public static int TangoSupport_detectMarkers(ref TangoImageBuffer image, TangoEnums.TangoCameraId cameraId, ref DVector3 translation, ref DVector4 orientation, ref APIMarkerParam param, ref APIMarkerList apiMarkerList) { apiMarkerList = new APIMarkerList(); return(Common.ErrorType.TANGO_SUCCESS); }
/// <summary> /// Raise a Tango image event if there is new data. /// </summary> internal void SendIfVideoOverlayAvailable() { #if UNITY_EDITOR lock (m_lockObject) { if (VideoOverlayProvider.m_emulationIsDirty) { VideoOverlayProvider.m_emulationIsDirty = false; if (m_onTangoYUVTextureAvailable != null) { m_shouldSendYUVTextureIdMethodEvent = true; } if (m_onTangoCameraTextureAvailable != null) { m_shouldSendTextureMethodEvent = true; } if (m_onTangoImageAvailable != null || m_onTangoImageMultithreadedAvailable != null) { _FillEmulatedColorCameraData(m_previousImageBuffer); } if (m_onTangoImageMultithreadedAvailable != null) { GCHandle pinnedColorBuffer = GCHandle.Alloc(m_previousImageBuffer.data, GCHandleType.Pinned); TangoImageBuffer emulatedImageBuffer = _GetEmulatedTangoImageBuffer(m_previousImageBuffer, pinnedColorBuffer); m_onTangoImageMultithreadedAvailable(COLOR_CAMERA_ID, emulatedImageBuffer); } if (m_onTangoImageAvailable != null) { m_shouldSendByteBufferMethodEvent = true; } } } #endif lock (m_lockObject) { if (m_onTangoYUVTextureAvailable != null && m_shouldSendYUVTextureIdMethodEvent) { m_onTangoYUVTextureAvailable(COLOR_CAMERA_ID); m_shouldSendYUVTextureIdMethodEvent = false; } if (m_onTangoCameraTextureAvailable != null & m_shouldSendTextureMethodEvent) { m_onTangoCameraTextureAvailable(COLOR_CAMERA_ID); m_shouldSendTextureMethodEvent = false; } if (m_onTangoImageAvailable != null && m_shouldSendByteBufferMethodEvent) { m_onTangoImageAvailable(COLOR_CAMERA_ID, m_previousImageBuffer); m_shouldSendByteBufferMethodEvent = false; } } }
/// <summary> /// Handle the callback sent by the Tango Service when a new image is sampled. /// </summary> /// <param name="callbackContext">Callback context.</param> /// <param name="cameraId">Camera identifier.</param> /// <param name="imageBuffer">Image buffer.</param> private void _OnImageAvailable(IntPtr callbackContext, TangoEnums.TangoCameraId cameraId, TangoImageBuffer imageBuffer) { if (m_onTangoImageMultithreadedAvailable != null) { m_onTangoImageMultithreadedAvailable(cameraId, imageBuffer); } lock (m_lockObject) { m_previousCameraId = cameraId; if (m_previousImageBuffer.data == null) { m_previousImageBuffer.data = new byte[(imageBuffer.width * imageBuffer.height * 3) / 2]; } m_previousImageBuffer.width = imageBuffer.width; m_previousImageBuffer.height = imageBuffer.height; m_previousImageBuffer.stride = imageBuffer.stride; m_previousImageBuffer.timestamp = imageBuffer.timestamp; m_previousImageBuffer.format = imageBuffer.format; m_previousImageBuffer.frame_number = imageBuffer.frame_number; Marshal.Copy(imageBuffer.data, m_previousImageBuffer.data, 0, m_previousImageBuffer.data.Length); m_shouldSendByteBufferMethodEvent = true; } }
/// <summary> /// Calculates the depth in the color camera space at a user-specified /// location using bilateral filtering weighted by both spatial distance /// from the user coordinate and by intensity similarity. /// </summary> /// <returns> /// Common.ErrorType.TANGO_SUCCESS on success, /// Common.ErrorType.TANGO_INVALID on invalid input, and /// Common.ErrorType.TANGO_ERROR on failure. /// </returns> /// <param name="pointCloud"> /// The point cloud. Cannot be null and must have at least one point. /// </param> /// <param name="pointCount"> /// The number of points to read from the point cloud. /// </param> /// <param name="timestamp">The timestamp of the depth points.</param> /// <param name="cameraIntrinsics"> /// The camera intrinsics for the color camera. Cannot be null. /// </param> /// <param name="colorImage"> /// The color image buffer. Cannot be null. /// </param> /// <param name="matrix"> /// Transformation matrix of the color camera with respect to the Unity /// World frame. /// </param> /// <param name="uvCoordinates"> /// The UV coordinates for the user selection. This is expected to be /// between (0.0, 0.0) and (1.0, 1.0). /// </param> /// <param name="colorCameraPoint"> /// The point (x, y, z), where (x, y) is the back-projection of the UV /// coordinates to the color camera space and z is the z coordinate of /// the point in the point cloud nearest to the user selection after /// projection onto the image plane. If there is not a point cloud point /// close to the user selection after projection onto the image plane, /// then the point will be set to (0.0, 0.0, 0.0) and isValidPoint will /// be set to false. /// </param> /// <param name="isValidPoint"> /// A flag valued true if there is a point cloud point close to the user /// selection after projection onto the image plane and valued false /// otherwise. /// </param> public static int ScreenCoordinateToWorldBilateral( Vector3[] pointCloud, int pointCount, double timestamp, TangoCameraIntrinsics cameraIntrinsics, TangoImageBuffer colorImage, ref Matrix4x4 matrix, Vector2 uvCoordinates, out Vector3 colorCameraPoint, out bool isValidPoint) { GCHandle pointCloudHandle = GCHandle.Alloc(pointCloud, GCHandleType.Pinned); TangoXYZij pointCloudXyzIj = new TangoXYZij(); pointCloudXyzIj.timestamp = timestamp; pointCloudXyzIj.xyz_count = pointCount; pointCloudXyzIj.xyz = pointCloudHandle.AddrOfPinnedObject(); DMatrix4x4 doubleMatrix = new DMatrix4x4(matrix); // Unity has Y pointing screen up; Tango camera has Y pointing // screen down. Vector2 uvCoordinatesTango = new Vector2(uvCoordinates.x, 1.0f - uvCoordinates.y); int isValidPointInteger; int returnValue = TangoSupportAPI.TangoSupport_getDepthAtPointBilateralCameraIntrinsicsMatrixTransform( pointCloudXyzIj, cameraIntrinsics, colorImage, ref doubleMatrix, ref uvCoordinatesTango, out colorCameraPoint, out isValidPointInteger); isValidPoint = isValidPointInteger != 0; pointCloudHandle.Free(); return(returnValue); }
/// <summary> /// Update the 3D Reconstruction with a new image and pose. /// /// It is expected this will get called in from the Tango binder thread. /// </summary> /// <param name="image">Color image from Tango.</param> /// <param name="imagePose">Pose matrix the color image corresponds too.</param> private void _UpdateColor(TangoImageBuffer image, Matrix4x4 imagePose) { if (!m_sendColorToUpdate) { // There is no depth cloud to process. return; } if (m_context == IntPtr.Zero) { Debug.Log("Update called before creating a reconstruction context." + Environment.StackTrace); return; } lock (m_lockObject) { if (!m_mostRecentDepthIsValid) { return; } APIImageBuffer apiImage; apiImage.width = image.width; apiImage.height = image.height; apiImage.stride = image.stride; apiImage.timestamp = image.timestamp; apiImage.format = (int)image.format; apiImage.data = image.data; APIPose apiImagePose = APIPose.FromMatrix4x4(ref imagePose); // Update the depth points to have the right value GCHandle mostRecentDepthPointsHandle = GCHandle.Alloc(m_mostRecentDepthPoints, GCHandleType.Pinned); m_mostRecentDepth.points = Marshal.UnsafeAddrOfPinnedArrayElement(m_mostRecentDepthPoints, 0); GCHandle thisHandle = GCHandle.Alloc(this, GCHandleType.Pinned); IntPtr rawUpdatedIndices; Status result = (Status)API.Tango3DR_update( m_context, ref m_mostRecentDepth, ref m_mostRecentDepthPose, ref apiImage, ref apiImagePose, ref m_colorCameraIntrinsics, out rawUpdatedIndices); m_mostRecentDepthIsValid = false; thisHandle.Free(); mostRecentDepthPointsHandle.Free(); if (result != Status.SUCCESS) { Debug.Log("Tango3DR_update returned non-success." + Environment.StackTrace); return; } _AddUpdatedIndices(rawUpdatedIndices); API.Tango3DR_GridIndexArray_destroy(rawUpdatedIndices); } }
public static int TangoSupport_getDepthAtPointBilateralCameraIntrinsicsMatrixTransform( TangoXYZij pointCloud, TangoCameraIntrinsics cameraIntrinsics, TangoImageBuffer colorImage, ref DMatrix4x4 matrix, ref Vector2 uvCoordinates, out Vector3 colorCameraPoint, out int isValidPoint) { colorCameraPoint = Vector3.zero; isValidPoint = 1; return(Common.ErrorType.TANGO_SUCCESS); }
/// <summary> /// It's backwards, but fill tango image buffer data with already-emulated data. /// It is the responsibility of the caller to GC pin/free the colorImageData's data array. /// </summary> /// <returns>Emulated raw color buffer.</returns> /// <param name="colorImageData">Emulated color buffer data.</param>> /// <param name="pinnedColorBuffer">Pinned array of imageBuffer.data.</param> private static TangoImageBuffer _GetEmulatedTangoImageBuffer(TangoUnityImageData colorImageData, GCHandle pinnedColorBuffer) { TangoImageBuffer imageBuffer = new TangoImageBuffer(); imageBuffer.data = pinnedColorBuffer.AddrOfPinnedObject(); imageBuffer.width = colorImageData.width; imageBuffer.height = colorImageData.height; imageBuffer.stride = colorImageData.stride; imageBuffer.format = colorImageData.format; imageBuffer.timestamp = colorImageData.timestamp; imageBuffer.frame_number = colorImageData.frame_number; return(imageBuffer); }
/// <summary> /// Handle the callback sent by the Tango Service when a new image is sampled. /// </summary> /// <param name="callbackContext">Callback context.</param> /// <param name="cameraId">Camera identifier.</param> /// <param name="imageBuffer">Image buffer.</param> private void _OnImageAvailable(IntPtr callbackContext, TangoEnums.TangoCameraId cameraId, TangoImageBuffer imageBuffer) { m_previousCameraId = cameraId; if (m_previousImageBuffer.data == null) { m_previousImageBuffer.data = new byte[imageBuffer.width * imageBuffer.height * 2]; } m_previousImageBuffer.width = imageBuffer.width; m_previousImageBuffer.height = imageBuffer.height; m_previousImageBuffer.stride = imageBuffer.stride; m_previousImageBuffer.timestamp = imageBuffer.timestamp; m_previousImageBuffer.format = imageBuffer.format; m_previousImageBuffer.frame_number = imageBuffer.frame_number; Marshal.Copy(imageBuffer.data, m_previousImageBuffer.data, 0, m_previousImageBuffer.data.Length); m_shouldSendEvent = true; }
public static extern int TangoSupport_getDepthAtPointBilateralCameraIntrinsicsMatrixTransform( TangoXYZij pointCloud, TangoCameraIntrinsics cameraIntrinsics, TangoImageBuffer colorImage, ref DMatrix4x4 matrix, ref Vector2 uvCoordinates, out Vector3 colorCameraPoint, [Out, MarshalAs(UnmanagedType.I4)] out int isValidPoint);
/// <summary> /// Handle the callback sent by the Tango Service /// when a new image is sampled. /// </summary> /// <param name="cameraId">Camera identifier.</param> /// <param name="callbackContext">Callback context.</param> /// <param name="imageBuffer">Image buffer.</param> protected void _OnImageAvailable(IntPtr callbackContext, TangoEnums.TangoCameraId cameraId, TangoImageBuffer imageBuffer) { m_previousCameraId = cameraId; if(m_previousImageBuffer.data == null) { m_previousImageBuffer.data = new byte[imageBuffer.width * imageBuffer.height * 2]; } m_previousImageBuffer.width = imageBuffer.width; m_previousImageBuffer.height = imageBuffer.height; m_previousImageBuffer.stride = imageBuffer.stride; m_previousImageBuffer.timestamp = imageBuffer.timestamp; m_previousImageBuffer.format = imageBuffer.format; m_previousImageBuffer.frame_number = imageBuffer.frame_number; Marshal.Copy(imageBuffer.data, m_previousImageBuffer.data, 0, m_previousImageBuffer.data.Length); m_shouldSendEvent = true; }
/// <summary> /// This will be called when a new frame is available from the camera. /// /// The first scan-line of the color image is reserved for metadata instead of image pixels. /// </summary> /// <param name="cameraId">Camera identifier.</param> /// <param name="imageBuffer">Image buffer.</param> public void OnTangoImageMultithreadedAvailable(TangoEnums.TangoCameraId cameraId, TangoImageBuffer imageBuffer) { if (!m_enabled) { return; } if (cameraId != TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR) { return; } // Build World T depth camera TangoPoseData world_T_devicePose = new TangoPoseData(); if (m_useAreaDescriptionPose) { TangoCoordinateFramePair pair; pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(world_T_devicePose, imageBuffer.timestamp, pair); } else { TangoCoordinateFramePair pair; pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(world_T_devicePose, imageBuffer.timestamp, pair); } if (world_T_devicePose.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID) { Debug.Log(string.Format("Time {0} has bad status code {1}", imageBuffer.timestamp, world_T_devicePose.status_code) + Environment.StackTrace); return; } // The 3D Reconstruction library can not handle a left handed transformation during update. Instead, // transform into the Unity world space via the external_T_tango config. Matrix4x4 world_T_colorCamera = world_T_devicePose.ToMatrix4x4() * m_device_T_colorCamera; _UpdateColor(imageBuffer, world_T_colorCamera); }
public static extern int TangoSupport_detectMarkers(ref TangoImageBuffer image, TangoEnums.TangoCameraId cameraId, ref DVector3 translation, ref DVector4 orientation, ref APIMarkerParam param, ref APIMarkerList apiMarkerList);
/// <summary> /// Calculates the depth in the color camera space at a user-specified /// location using bilateral filtering weighted by both spatial distance /// from the user coordinate and by intensity similarity. /// </summary> /// <returns> /// Common.ErrorType.TANGO_SUCCESS on success, /// Common.ErrorType.TANGO_INVALID on invalid input, and /// Common.ErrorType.TANGO_ERROR on failure. /// </returns> /// <param name="pointCloud"> /// The point cloud. Cannot be null and must have at least one point. /// </param> /// <param name="pointCount"> /// The number of points to read from the point cloud. /// </param> /// <param name="timestamp">The timestamp of the depth points.</param> /// <param name="cameraIntrinsics"> /// The camera intrinsics for the color camera. Cannot be null. /// </param> /// <param name="colorImage"> /// The color image buffer. Cannot be null. /// </param> /// <param name="matrix"> /// Transformation matrix of the color camera with respect to the Unity /// World frame. /// </param> /// <param name="uvCoordinates"> /// The UV coordinates for the user selection. This is expected to be /// between (0.0, 0.0) and (1.0, 1.0). /// </param> /// <param name="colorCameraPoint"> /// The point (x, y, z), where (x, y) is the back-projection of the UV /// coordinates to the color camera space and z is the z coordinate of /// the point in the point cloud nearest to the user selection after /// projection onto the image plane. If there is not a point cloud point /// close to the user selection after projection onto the image plane, /// then the point will be set to (0.0, 0.0, 0.0) and isValidPoint will /// be set to false. /// </param> /// <param name="isValidPoint"> /// A flag valued true if there is a point cloud point close to the user /// selection after projection onto the image plane and valued false /// otherwise. /// </param> public static int ScreenCoordinateToWorldBilateral( Vector3[] pointCloud, int pointCount, double timestamp, TangoCameraIntrinsics cameraIntrinsics, TangoImageBuffer colorImage, ref Matrix4x4 matrix, Vector2 uvCoordinates, out Vector3 colorCameraPoint, out bool isValidPoint) { GCHandle pointCloudHandle = GCHandle.Alloc(pointCloud, GCHandleType.Pinned); TangoXYZij pointCloudXyzIj = new TangoXYZij(); pointCloudXyzIj.timestamp = timestamp; pointCloudXyzIj.xyz_count = pointCount; pointCloudXyzIj.xyz = pointCloudHandle.AddrOfPinnedObject(); DMatrix4x4 doubleMatrix = new DMatrix4x4(matrix); // Unity has Y pointing screen up; Tango camera has Y pointing // screen down. Vector2 uvCoordinatesTango = new Vector2(uvCoordinates.x, 1.0f - uvCoordinates.y); int isValidPointInteger; int returnValue = TangoSupportAPI.TangoSupport_getDepthAtPointBilateralCameraIntrinsicsMatrixTransform( pointCloudXyzIj, cameraIntrinsics, colorImage, ref doubleMatrix, ref uvCoordinatesTango, out colorCameraPoint, out isValidPointInteger); isValidPoint = isValidPointInteger != 0; pointCloudHandle.Free(); return returnValue; }
/// <summary> /// Handle the callback sent by the Tango Service /// when a new image is sampled. /// </summary> /// <param name="cameraId">Camera identifier.</param> /// <param name="callbackContext">Callback context.</param> /// <param name="imageBuffer">Image buffer.</param> protected abstract void _OnImageAvailable(IntPtr callbackContext, Tango.TangoEnums.TangoCameraId cameraId, Tango.TangoImageBuffer imageBuffer);
/// <summary> /// It's backwards, but fill tango image buffer data with already-emulated data. /// It is the responsibility of the caller to GC pin/free the colorImageData's data array. /// </summary> /// <returns>Emulated raw color buffer.</returns> /// <param name="colorImageData">Emulated color buffer data.</param>> /// <param name="pinnedColorBuffer">Pinned array of imageBuffer.data.</param> private static TangoImageBuffer _GetEmulatedTangoImageBuffer(TangoUnityImageData colorImageData, GCHandle pinnedColorBuffer) { TangoImageBuffer imageBuffer = new TangoImageBuffer(); imageBuffer.data = pinnedColorBuffer.AddrOfPinnedObject(); imageBuffer.width = colorImageData.width; imageBuffer.height = colorImageData.height; imageBuffer.stride = colorImageData.stride; imageBuffer.format = colorImageData.format; imageBuffer.timestamp = colorImageData.timestamp; imageBuffer.frame_number = colorImageData.frame_number; return imageBuffer; }
private static void _OnImageAvailable( IntPtr callbackContext, TangoEnums.TangoCameraId cameraId, TangoImageBuffer imageBuffer) { if (cameraId != COLOR_CAMERA_ID) { return; } if (m_onTangoImageMultithreadedAvailable != null) { m_onTangoImageMultithreadedAvailable(cameraId, imageBuffer); } lock (m_lockObject) { if (m_previousImageBuffer.data == null) { m_previousImageBuffer.data = new byte[(imageBuffer.width * imageBuffer.height * 3) / 2]; } m_previousImageBuffer.width = imageBuffer.width; m_previousImageBuffer.height = imageBuffer.height; m_previousImageBuffer.stride = imageBuffer.stride; m_previousImageBuffer.timestamp = imageBuffer.timestamp; m_previousImageBuffer.format = imageBuffer.format; m_previousImageBuffer.frame_number = imageBuffer.frame_number; Marshal.Copy(imageBuffer.data, m_previousImageBuffer.data, 0, m_previousImageBuffer.data.Length); m_shouldSendByteBufferMethodEvent = true; } }
public void OnTangoImageMultithreadedAvailable(Tango.TangoEnums.TangoCameraId id, Tango.TangoImageBuffer buf) { if (fCount == 0) { JLog("ExperimentalTangoImageAvailable on " + id.ToString()); } fCount++; }
/// <summary> /// Detect one or more markers in the input image. /// </summary> /// <param name="imageBuffer"> /// The input image buffer. /// </param> /// <param name="cameraId"> /// Camera that is used for detecting markers, can be TangoEnums.TangoCameraId.TANGO_CAMERA_FISHEYE or /// TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR. /// </param> /// <param name="markerType"> /// Target marker's type. Current support marker types are QR marker and Alvar marker. /// </param> /// <param name="markerSize"> /// Physical size of marker's length. /// </param> /// <param name="markers"> /// The returned marker list. /// </param> /// <returns> /// Common.ErrorType.TANGO_SUCCESS on success, Common.ErrorType.TANGO_INVALID on invalid input, and /// Common.ErrorType.TANGO_ERROR on failure. /// </returns> public static bool DetectMarkers(TangoUnityImageData imageBuffer, TangoEnums.TangoCameraId cameraId, MarkerType markerType, double markerSize, List <Marker> markers) { if (markers == null) { Debug.Log("markers is null. " + Environment.StackTrace); return(false); } // Clear any existing marker markers.Clear(); // Detect marker. TangoImageBuffer buffer = new TangoImageBuffer(); GCHandle gchandle = GCHandle.Alloc(imageBuffer.data, GCHandleType.Pinned); IntPtr ptr = gchandle.AddrOfPinnedObject(); buffer.data = ptr; buffer.format = imageBuffer.format; buffer.frame_number = imageBuffer.frame_number; buffer.height = imageBuffer.height; buffer.stride = imageBuffer.stride; buffer.timestamp = imageBuffer.timestamp; buffer.width = imageBuffer.width; // Get Pose. TangoPoseData poseData = new TangoPoseData(); TangoCoordinateFramePair pair; pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_CAMERA_COLOR; PoseProvider.GetPoseAtTime(poseData, buffer.timestamp, pair); APIMarkerList rawAPIMarkerList = new APIMarkerList(); APIMarkerParam rawMarkerParam = new APIMarkerParam(markerType, markerSize); int ret = TangoSupportAPI.TangoSupport_detectMarkers(ref buffer, cameraId, ref poseData.translation, ref poseData.orientation, ref rawMarkerParam, ref rawAPIMarkerList); gchandle.Free(); if (ret != Common.ErrorType.TANGO_SUCCESS) { return(false); } if (rawAPIMarkerList.markerCount != 0) { List <APIMarker> apiMarkers = new List <APIMarker>(); MarshallingHelper.MarshalUnmanagedStructArrayToList <TangoSupport.APIMarker>( rawAPIMarkerList.markers, rawAPIMarkerList.markerCount, apiMarkers); for (int i = 0; i < apiMarkers.Count; ++i) { APIMarker apiMarker = apiMarkers[i]; Marker marker = new Marker(); marker.m_type = apiMarker.m_type; marker.m_timestamp = apiMarker.m_timestamp; marker.m_content = apiMarker.m_content; // Covert 2D corner points from pixel space to UV space. marker.m_corner2DP0.x = apiMarker.m_corner2DP0.x / buffer.width; marker.m_corner2DP0.y = apiMarker.m_corner2DP0.y / buffer.height; marker.m_corner2DP1.x = apiMarker.m_corner2DP1.x / buffer.width; marker.m_corner2DP1.y = apiMarker.m_corner2DP1.y / buffer.height; marker.m_corner2DP2.x = apiMarker.m_corner2DP2.x / buffer.width; marker.m_corner2DP2.y = apiMarker.m_corner2DP2.y / buffer.height; marker.m_corner2DP3.x = apiMarker.m_corner2DP3.x / buffer.width; marker.m_corner2DP3.y = apiMarker.m_corner2DP3.y / buffer.height; // Convert 3D corner points from Start of Service space to Unity World space. marker.m_corner3DP0 = GetMarkerInUnitySpace(apiMarker.m_corner3DP0); marker.m_corner3DP1 = GetMarkerInUnitySpace(apiMarker.m_corner3DP1); marker.m_corner3DP2 = GetMarkerInUnitySpace(apiMarker.m_corner3DP2); marker.m_corner3DP3 = GetMarkerInUnitySpace(apiMarker.m_corner3DP3); // Convert pose from Start of Service to Unity World space. Vector3 translation = new Vector3( (float)apiMarker.m_translation.x, (float)apiMarker.m_translation.y, (float)apiMarker.m_translation.z); Quaternion orientation = new Quaternion( (float)apiMarker.m_rotation.x, (float)apiMarker.m_rotation.y, (float)apiMarker.m_rotation.z, (float)apiMarker.m_rotation.w); Matrix4x4 ss_T_marker = Matrix4x4.TRS(translation, orientation, Vector3.one); // Note that UNITY_WORLD_T_START_SERVICE is involutory matrix. The actually transform // we wanted to multiply on the right hand side is START_SERVICE_T_UNITY_WORLD. Matrix4x4 uw_T_u_marker = TangoSupport.UNITY_WORLD_T_START_SERVICE * ss_T_marker * TangoSupport.UNITY_WORLD_T_START_SERVICE; marker.m_translation = uw_T_u_marker.GetColumn(3); marker.m_orientation = Quaternion.LookRotation(uw_T_u_marker.GetColumn(2), uw_T_u_marker.GetColumn(1)); // Add the marker to the output list markers.Add(marker); } } TangoSupportAPI.TangoSupport_freeMarkerList(ref rawAPIMarkerList); return(false); }
public static int TangoSupport_getDepthAtPointBilateralCameraIntrinsicsMatrixTransform( TangoXYZij pointCloud, TangoCameraIntrinsics cameraIntrinsics, TangoImageBuffer colorImage, ref DMatrix4x4 matrix, ref Vector2 uvCoordinates, out Vector3 colorCameraPoint, out int isValidPoint) { colorCameraPoint = Vector3.zero; isValidPoint = 1; return Common.ErrorType.TANGO_SUCCESS; }
/// <summary> /// This will be called when a new frame is available from the camera. /// /// The first scan-line of the color image is reserved for metadata instead of image pixels. /// </summary> /// <param name="cameraId">Camera identifier.</param> /// <param name="imageBuffer">Image buffer.</param> public void OnTangoImageMultithreadedAvailable(TangoEnums.TangoCameraId cameraId, TangoImageBuffer imageBuffer) { if (!m_enabled) { return; } if (cameraId != TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR) { return; } // Build World T depth camera TangoPoseData world_T_devicePose = new TangoPoseData(); if (m_useAreaDescriptionPose) { TangoCoordinateFramePair pair; pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(world_T_devicePose, imageBuffer.timestamp, pair); } else { TangoCoordinateFramePair pair; pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(world_T_devicePose, imageBuffer.timestamp, pair); } if (world_T_devicePose.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID) { Debug.Log(string.Format("Time {0} has bad status code {1}", imageBuffer.timestamp, world_T_devicePose.status_code) + Environment.StackTrace); return; } // NOTE: The 3D Reconstruction library does not handle left handed matrices correctly. For now, transform // into the Unity world space after extraction. Matrix4x4 world_T_colorCamera = world_T_devicePose.ToMatrix4x4() * m_device_T_colorCamera; _UpdateColor(imageBuffer, world_T_colorCamera); }