public static GetPoseAtTime ( [ poseData, double timeStamp, |
||
poseData | [ | The pose to return. |
timeStamp | double | /// Time specified in seconds. /// /// If not set to 0.0, GetPoseAtTime retrieves the interpolated pose closest to this timestamp. If set to 0.0, /// the most recent pose estimate for the target-base pair is returned. The time of the returned pose is /// contained in the pose output structure and may differ from the queried timestamp. /// |
framePair | /// A pair of coordinate frames specifying the transformation to be queried for. /// /// For example, typical device motion is given by a target frame of TANGO_COORDINATE_FRAME_DEVICE and a base /// frame of TANGO_COORDINATE_FRAME_START_OF_SERVICE . /// | |
return | void |
/// <summary> /// Raise a Tango pose event if there is new data. /// </summary> /// <param name="emulateAreaDescriptions">If set, Area description poses are emulated.</param> internal void SendPoseIfAvailable(bool emulateAreaDescriptions) { #if UNITY_EDITOR lock (m_lockObject) { if (PoseProvider.m_emulationIsDirty) { PoseProvider.m_emulationIsDirty = false; if (m_onTangoPoseAvailable != null) { TangoCoordinateFramePair framePair; framePair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; framePair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(m_motionTrackingData, 0, framePair); m_isMotionTrackingPoseAvailable = true; if (emulateAreaDescriptions) { framePair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION; framePair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(m_areaLearningData, 0, framePair); m_isAreaLearningPoseAvailable = true; } } } } #endif if (m_onTangoPoseAvailable != null) { // NOTE: If this becomes a performance issue, this could be changed to use // Interlocked.CompareExchange to "consume" the motion tracking data. lock (m_lockObject) { if (m_isMotionTrackingPoseAvailable) { m_onTangoPoseAvailable(m_motionTrackingData); m_isMotionTrackingPoseAvailable = false; } if (m_isAreaLearningPoseAvailable) { m_onTangoPoseAvailable(m_areaLearningData); m_isAreaLearningPoseAvailable = false; } if (m_isRelocalizaitonPoseAvailable) { m_onTangoPoseAvailable(m_relocalizationData); m_isRelocalizaitonPoseAvailable = false; } } } }
/// <summary> /// Calculate the camera extrinsics for this device. /// </summary> private void _UpdateExtrinsics() { TangoCoordinateFramePair pair; TangoPoseData imu_T_devicePose = new TangoPoseData(); pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(imu_T_devicePose, 0, pair); TangoPoseData imu_T_depthCameraPose = new TangoPoseData(); pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_DEPTH; PoseProvider.GetPoseAtTime(imu_T_depthCameraPose, 0, pair); TangoPoseData imu_T_colorCameraPose = new TangoPoseData(); pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_IMU; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_COLOR; PoseProvider.GetPoseAtTime(imu_T_colorCameraPose, 0, pair); // Convert into matrix form to combine the poses. Matrix4x4 device_T_imu = Matrix4x4.Inverse(imu_T_devicePose.ToMatrix4x4()); m_device_T_depthCamera = device_T_imu * imu_T_depthCameraPose.ToMatrix4x4(); m_device_T_colorCamera = device_T_imu * imu_T_colorCameraPose.ToMatrix4x4(); m_unityWorld_T_startService.SetColumn(0, new Vector4(1, 0, 0, 0)); m_unityWorld_T_startService.SetColumn(1, new Vector4(0, 0, 1, 0)); m_unityWorld_T_startService.SetColumn(2, new Vector4(0, 1, 0, 0)); m_unityWorld_T_startService.SetColumn(3, new Vector4(0, 0, 0, 1)); // Update the camera intrinsics too. TangoCameraIntrinsics colorCameraIntrinsics = new TangoCameraIntrinsics(); VideoOverlayProvider.GetIntrinsics(TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR, colorCameraIntrinsics); m_colorCameraIntrinsics.calibration_type = (int)colorCameraIntrinsics.calibration_type; m_colorCameraIntrinsics.width = colorCameraIntrinsics.width; m_colorCameraIntrinsics.height = colorCameraIntrinsics.height; m_colorCameraIntrinsics.cx = colorCameraIntrinsics.cx; m_colorCameraIntrinsics.cy = colorCameraIntrinsics.cy; m_colorCameraIntrinsics.fx = colorCameraIntrinsics.fx; m_colorCameraIntrinsics.fy = colorCameraIntrinsics.fy; m_colorCameraIntrinsics.distortion0 = colorCameraIntrinsics.distortion0; m_colorCameraIntrinsics.distortion1 = colorCameraIntrinsics.distortion1; m_colorCameraIntrinsics.distortion2 = colorCameraIntrinsics.distortion2; m_colorCameraIntrinsics.distortion3 = colorCameraIntrinsics.distortion3; m_colorCameraIntrinsics.distortion4 = colorCameraIntrinsics.distortion4; }
/// <summary> /// This will be called when a new frame is available from the camera. /// /// The first scan-line of the color image is reserved for metadata instead of image pixels. /// </summary> /// <param name="cameraId">Camera identifier.</param> /// <param name="imageBuffer">Image buffer.</param> public void OnTangoImageMultithreadedAvailable(TangoEnums.TangoCameraId cameraId, TangoImageBuffer imageBuffer) { if (!m_enabled) { return; } if (cameraId != TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR) { return; } // Build World T depth camera TangoPoseData world_T_devicePose = new TangoPoseData(); if (m_useAreaDescriptionPose) { TangoCoordinateFramePair pair; pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(world_T_devicePose, imageBuffer.timestamp, pair); } else { TangoCoordinateFramePair pair; pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(world_T_devicePose, imageBuffer.timestamp, pair); } if (world_T_devicePose.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID) { Debug.Log(string.Format("Time {0} has bad status code {1}", imageBuffer.timestamp, world_T_devicePose.status_code) + Environment.StackTrace); return; } // The 3D Reconstruction library can not handle a left handed transformation during update. Instead, // transform into the Unity world space via the external_T_tango config. Matrix4x4 world_T_colorCamera = world_T_devicePose.ToMatrix4x4() * m_device_T_colorCamera; _UpdateColor(imageBuffer, world_T_colorCamera); }
/// <summary> /// INTERNAL USE: Update the Tango emulation state for color camera data. /// </summary> /// <param name="useByteBufferMethod">Whether to update emulation for byte-buffer method.</param> internal static void UpdateTangoEmulation(bool useByteBufferMethod) { // Get emulated position and rotation in Unity space. TangoPoseData poseData = new TangoPoseData(); TangoCoordinateFramePair pair; pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; if (!PoseProvider.GetTimestampForColorEmulation(out m_lastColorEmulationTime)) { Debug.LogError("Couldn't get a valid timestamp with which to emulate color camera. " + "Color camera emulation will be skipped this frame."); return; } PoseProvider.GetPoseAtTime(poseData, m_lastColorEmulationTime, pair); if (poseData.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID) { return; } Vector3 position; Quaternion rotation; TangoSupport.TangoPoseToWorldTransform(poseData, out position, out rotation); // Instantiate any resources that we haven't yet. if (!m_emulationIsInitialized) { _InitializeResourcesForEmulation(); m_emulationIsInitialized = true; } // Render. EmulatedEnvironmentRenderHelper.RenderEmulatedEnvironment(m_emulatedColorRenderTexture, EmulatedEnvironmentRenderHelper.EmulatedDataType.COLOR_CAMERA, position, rotation); m_emulationIsDirty = true; }
/// <summary> /// This is called each time new depth data is available. /// /// On the Tango tablet, the depth callback occurs at 5 Hz. /// </summary> /// <param name="tangoDepth">Tango depth.</param> public void OnTangoDepthMultithreadedAvailable(TangoXYZij tangoDepth) { if (!m_enabled) { return; } // Build World T depth camera TangoPoseData world_T_devicePose = new TangoPoseData(); if (m_useAreaDescriptionPose) { TangoCoordinateFramePair pair; pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(world_T_devicePose, tangoDepth.timestamp, pair); } else { TangoCoordinateFramePair pair; pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(world_T_devicePose, tangoDepth.timestamp, pair); } if (world_T_devicePose.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID) { Debug.Log(string.Format("Time {0} has bad status code {1}", tangoDepth.timestamp, world_T_devicePose.status_code) + Environment.StackTrace); return; } // NOTE: The 3D Reconstruction library does not handle left handed matrices correctly. For now, transform // into the Unity world space after extraction. Matrix4x4 world_T_depthCamera = world_T_devicePose.ToMatrix4x4() * m_device_T_depthCamera; _UpdateDepth(tangoDepth, world_T_depthCamera); }
/// <summary> /// INTERNAL USE: Update the Tango emulation state for depth data. /// </summary> internal static void UpdateTangoEmulation() { m_emulatedPointCloud.Clear(); // Timestamp shall be something in the past, and we'll emulate the depth cloud based on it. if (!PoseProvider.GetTimestampForDepthEmulation(out m_lastDepthEmulationTime)) { Debug.LogError("Couldn't get a valid timestamp with which to emulate depth data. " + "Depth emulation will be skipped this frame."); return; } // Get emulated position and rotation in Unity space. TangoPoseData poseData = new TangoPoseData(); TangoCoordinateFramePair pair; pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(poseData, m_lastDepthEmulationTime, pair); if (poseData.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID) { return; } Vector3 position; Quaternion rotation; TangoSupport.TangoPoseToWorldTransform(poseData, out position, out rotation); // Instantiate any resources that we haven't yet. _InternResourcesForEmulation(); // Render emulated depth camera data. EmulatedEnvironmentRenderHelper.RenderEmulatedEnvironment(m_emulatedDepthTexture, EmulatedEnvironmentRenderHelper.EmulatedDataType.DEPTH, position, rotation); // Capture rendered depth points from texture. RenderTexture.active = m_emulatedDepthTexture; m_emulationCaptureTexture.ReadPixels(new Rect(0, 0, m_emulatedDepthTexture.width, m_emulatedDepthTexture.height), 0, 0); m_emulationCaptureTexture.Apply(); // Exctract captured data. Color32[] depthDataAsColors = m_emulationCaptureTexture.GetPixels32(); // Convert depth texture to positions in camera space. Matrix4x4 projectionMatrix = GL.GetGPUProjectionMatrix(EmulatedEnvironmentRenderHelper.m_emulationCamera.projectionMatrix, false); Matrix4x4 reverseProjectionMatrix = projectionMatrix.inverse; float width = m_emulationCaptureTexture.width; float height = m_emulationCaptureTexture.height; for (int yTexel = 0; yTexel < height; yTexel++) { for (int xTexel = 0; xTexel < width; xTexel++) { Color32 depthAsColor = depthDataAsColors[xTexel + (yTexel * m_emulationCaptureTexture.width)]; float clipSpaceZ = (depthAsColor.r - 128f) + (depthAsColor.g / 255f); float ndcSpaceZ = (clipSpaceZ - projectionMatrix.m23) / projectionMatrix.m22; float perspectiveDivisor = ndcSpaceZ * projectionMatrix.m32; float ndcSpaceX = (((xTexel + 0.5f) / width) * 2f) - 1; float ndcSpaceY = (((yTexel + 0.5f) / height) * 2f) - 1; Vector4 clipSpacePos = new Vector4(ndcSpaceX * perspectiveDivisor, ndcSpaceY * perspectiveDivisor, clipSpaceZ, perspectiveDivisor); Vector4 viewSpacePos = reverseProjectionMatrix * clipSpacePos; Vector3 emulatedDepthPos = new Vector3(viewSpacePos.x, -viewSpacePos.y, -viewSpacePos.z); if (emulatedDepthPos.z > MIN_POINT_DISTANCE && emulatedDepthPos.z < MAX_POINT_DISTANCE) { m_emulatedPointCloud.Add(emulatedDepthPos); } } } m_emulationIsDirty = true; }
/// <summary> /// Detect one or more markers in the input image. /// </summary> /// <param name="imageBuffer"> /// The input image buffer. /// </param> /// <param name="cameraId"> /// Camera that is used for detecting markers, can be TangoEnums.TangoCameraId.TANGO_CAMERA_FISHEYE or /// TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR. /// </param> /// <param name="markerType"> /// Target marker's type. Current support marker types are QR marker and Alvar marker. /// </param> /// <param name="markerSize"> /// Physical size of marker's length. /// </param> /// <param name="markers"> /// The returned marker list. /// </param> /// <returns> /// Common.ErrorType.TANGO_SUCCESS on success, Common.ErrorType.TANGO_INVALID on invalid input, and /// Common.ErrorType.TANGO_ERROR on failure. /// </returns> public static bool DetectMarkers(TangoUnityImageData imageBuffer, TangoEnums.TangoCameraId cameraId, MarkerType markerType, double markerSize, List <Marker> markers) { if (markers == null) { Debug.Log("markers is null. " + Environment.StackTrace); return(false); } // Clear any existing marker markers.Clear(); // Detect marker. TangoImageBuffer buffer = new TangoImageBuffer(); GCHandle gchandle = GCHandle.Alloc(imageBuffer.data, GCHandleType.Pinned); IntPtr ptr = gchandle.AddrOfPinnedObject(); buffer.data = ptr; buffer.format = imageBuffer.format; buffer.frame_number = imageBuffer.frame_number; buffer.height = imageBuffer.height; buffer.stride = imageBuffer.stride; buffer.timestamp = imageBuffer.timestamp; buffer.width = imageBuffer.width; // Get Pose. TangoPoseData poseData = new TangoPoseData(); TangoCoordinateFramePair pair; pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_COLOR; PoseProvider.GetPoseAtTime(poseData, buffer.timestamp, pair); APIMarkerList rawAPIMarkerList = new APIMarkerList(); APIMarkerParam rawMarkerParam = new APIMarkerParam(markerType, markerSize); int ret = TangoSupportAPI.TangoSupport_detectMarkers(ref buffer, cameraId, ref poseData.translation, ref poseData.orientation, ref rawMarkerParam, ref rawAPIMarkerList); gchandle.Free(); if (ret != Common.ErrorType.TANGO_SUCCESS) { return(false); } if (rawAPIMarkerList.markerCount != 0) { List <APIMarker> apiMarkers = new List <APIMarker>(); MarshallingHelper.MarshalUnmanagedStructArrayToList <TangoSupport.APIMarker>( rawAPIMarkerList.markers, rawAPIMarkerList.markerCount, apiMarkers); for (int i = 0; i < apiMarkers.Count; ++i) { APIMarker apiMarker = apiMarkers[i]; Marker marker = new Marker(); marker.m_type = apiMarker.m_type; marker.m_timestamp = apiMarker.m_timestamp; marker.m_content = apiMarker.m_content; // Covert 2D corner points from pixel space to UV space. marker.m_corner2DP0.x = apiMarker.m_corner2DP0.x / buffer.width; marker.m_corner2DP0.y = apiMarker.m_corner2DP0.y / buffer.height; marker.m_corner2DP1.x = apiMarker.m_corner2DP1.x / buffer.width; marker.m_corner2DP1.y = apiMarker.m_corner2DP1.y / buffer.height; marker.m_corner2DP2.x = apiMarker.m_corner2DP2.x / buffer.width; marker.m_corner2DP2.y = apiMarker.m_corner2DP2.y / buffer.height; marker.m_corner2DP3.x = apiMarker.m_corner2DP3.x / buffer.width; marker.m_corner2DP3.y = apiMarker.m_corner2DP3.y / buffer.height; // Convert 3D corner points from Start of Service space to Unity World space. marker.m_corner3DP0 = GetMarkerInUnitySpace(apiMarker.m_corner3DP0); marker.m_corner3DP1 = GetMarkerInUnitySpace(apiMarker.m_corner3DP1); marker.m_corner3DP2 = GetMarkerInUnitySpace(apiMarker.m_corner3DP2); marker.m_corner3DP3 = GetMarkerInUnitySpace(apiMarker.m_corner3DP3); // Convert pose from Start of Service to Unity World space. Vector3 translation = new Vector3( (float)apiMarker.m_translation.x, (float)apiMarker.m_translation.y, (float)apiMarker.m_translation.z); Quaternion orientation = new Quaternion( (float)apiMarker.m_rotation.x, (float)apiMarker.m_rotation.y, (float)apiMarker.m_rotation.z, (float)apiMarker.m_rotation.w); Matrix4x4 ss_T_marker = Matrix4x4.TRS(translation, orientation, Vector3.one); // Note that UNITY_WORLD_T_START_SERVICE is involutory matrix. The actually transform // we wanted to multiply on the right hand side is START_SERVICE_T_UNITY_WORLD. Matrix4x4 uw_T_u_marker = TangoSupport.UNITY_WORLD_T_START_SERVICE * ss_T_marker * TangoSupport.UNITY_WORLD_T_START_SERVICE; marker.m_translation = uw_T_u_marker.GetColumn(3); marker.m_orientation = Quaternion.LookRotation(uw_T_u_marker.GetColumn(2), uw_T_u_marker.GetColumn(1)); // Add the marker to the output list markers.Add(marker); } } TangoSupportAPI.TangoSupport_freeMarkerList(ref rawAPIMarkerList); return(false); }
/// <summary> /// Raise a Tango pose event if there is new data. /// </summary> /// <param name="emulateAreaDescriptions">If set, Area description poses are emulated.</param> internal static void SendIfAvailable(bool emulateAreaDescriptions) { if (m_poseAvailableCallback == null) { return; } #if UNITY_EDITOR lock (m_lockObject) { if (PoseProvider.m_emulationIsDirty) { PoseProvider.m_emulationIsDirty = false; if (m_onTangoPoseAvailable != null) { TangoCoordinateFramePair framePair; framePair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; framePair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(m_motionTrackingData, 0, framePair); m_isMotionTrackingPoseAvailable = true; if (emulateAreaDescriptions) { framePair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION; framePair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(m_areaLearningData, 0, framePair); if (m_areaLearningData.status_code == TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID) { m_isAreaLearningPoseAvailable = true; } framePair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION; framePair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; PoseProvider.GetPoseAtTime(m_relocalizationData, 0, framePair); if (m_relocalizationData.status_code == TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID && m_relocalizationData.timestamp != m_mostRecentEmulatedRelocalizationTimestamp) { m_mostRecentEmulatedRelocalizationTimestamp = m_relocalizationData.timestamp; m_isRelocalizationPoseAvailable = true; } } } } } #endif if (m_onTangoPoseAvailable != null) { // NOTE: If this becomes a performance issue, this could be changed to use // Interlocked.CompareExchange to "consume" the motion tracking data. lock (m_lockObject) { if (m_isMotionTrackingPoseAvailable) { m_onTangoPoseAvailable(m_motionTrackingData); m_isMotionTrackingPoseAvailable = false; } if (m_isAreaLearningPoseAvailable) { m_onTangoPoseAvailable(m_areaLearningData); m_isAreaLearningPoseAvailable = false; } if (m_isRelocalizationPoseAvailable) { m_onTangoPoseAvailable(m_relocalizationData); m_isRelocalizationPoseAvailable = false; } if (m_isCloudPoseAvailable) { m_onTangoPoseAvailable(m_cloudPoseData); m_isCloudPoseAvailable = false; } } } }
/// <summary> /// INTERNAL USE: Update the Tango emulation state for depth data. /// /// Make this this is only called once per frame. /// </summary> internal static void UpdateTangoEmulation() { m_emulatedPointCloud.Clear(); // Timestamp shall be something in the past, and we'll emulate the depth cloud based on it. m_lastDepthEmulationTime = PoseProvider.GetTimestampForDepthEmulation(); // Get emulated position and rotation in Unity space. TangoPoseData poseData = new TangoPoseData(); TangoCoordinateFramePair pair; pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(poseData, m_lastDepthEmulationTime, pair); if (poseData.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID) { return; } Vector3 position; Quaternion rotation; TangoSupport.TangoPoseToWorldTransform(poseData, out position, out rotation); // Instantiate any resources that we haven't yet. if (m_emulatedDepthTexture == null) { m_emulatedDepthTexture = new RenderTexture(NUM_X_DEPTH_SAMPLES, NUM_Y_DEPTH_SAMPLES, 24, RenderTextureFormat.ARGB32); } if (m_emulationCaptureTexture == null) { m_emulationCaptureTexture = new Texture2D(NUM_X_DEPTH_SAMPLES, NUM_Y_DEPTH_SAMPLES, TextureFormat.ARGB32, false); } if (m_emulatedDepthShader == null) { // Find depth shader by searching for it in project. string[] foundAssetGuids = UnityEditor.AssetDatabase.FindAssets("DepthEmulation t:Shader"); if (foundAssetGuids.Length > 0) { string assetPath = UnityEditor.AssetDatabase.GUIDToAssetPath(foundAssetGuids[0]); m_emulatedDepthShader = UnityEditor.AssetDatabase.LoadAssetAtPath(assetPath, typeof(Shader)) as Shader; } } // Render emulated depth camera data. EmulatedEnvironmentRenderHelper.RenderEmulatedEnvironment(m_emulatedDepthTexture, m_emulatedDepthShader, position, rotation); // Capture rendered depth points from texture. RenderTexture.active = m_emulatedDepthTexture; m_emulationCaptureTexture.ReadPixels(new Rect(0, 0, m_emulatedDepthTexture.width, m_emulatedDepthTexture.height), 0, 0); m_emulationCaptureTexture.Apply(); // Exctract captured data. Color32[] depthDataAsColors = m_emulationCaptureTexture.GetPixels32(); // Convert depth texture to positions in camera space. Matrix4x4 projectionMatrix = GL.GetGPUProjectionMatrix(EmulatedEnvironmentRenderHelper.m_emulationCamera.projectionMatrix, false); Matrix4x4 reverseProjectionMatrix = projectionMatrix.inverse; float width = m_emulationCaptureTexture.width; float height = m_emulationCaptureTexture.height; for (int yTexel = 0; yTexel < height; yTexel++) { for (int xTexel = 0; xTexel < width; xTexel++) { Color32 depthAsColor = depthDataAsColors[xTexel + (yTexel * m_emulationCaptureTexture.width)]; float clipSpaceZ = (depthAsColor.r - 128f) + (depthAsColor.g / 255f); float ndcSpaceZ = (clipSpaceZ - projectionMatrix.m23) / projectionMatrix.m22; float perspectiveDivisor = ndcSpaceZ * projectionMatrix.m32; float ndcSpaceX = (((xTexel + 0.5f) / width) * 2f) - 1; float ndcSpaceY = (((yTexel + 0.5f) / height) * 2f) - 1; Vector4 clipSpacePos = new Vector4(ndcSpaceX * perspectiveDivisor, ndcSpaceY * perspectiveDivisor, clipSpaceZ, perspectiveDivisor); Vector4 viewSpacePos = reverseProjectionMatrix * clipSpacePos; Vector3 emulatedDepthPos = new Vector3(viewSpacePos.x, -viewSpacePos.y, -viewSpacePos.z); if (emulatedDepthPos.z > MIN_POINT_DISTANCE && emulatedDepthPos.z < MAX_POINT_DISTANCE) { m_emulatedPointCloud.Add(emulatedDepthPos); } } } }