/// <summary> /// Callback that gets called when depth is available /// from the Tango Service. /// DO NOT USE THE UNITY API FROM INSIDE THIS FUNCTION! /// </summary> /// <param name="callbackContext">Callback context.</param> /// <param name="xyzij">Xyzij.</param> public void OnTangoDepthAvailable(Tango.TangoUnityDepth xyzij) { // Calculate the time since the last successful depth data // collection. if (m_previousDepthDeltaTime == 0.0) { m_previousDepthDeltaTime = xyzij.m_timestamp; } else { m_numberOfDepthSamples++; m_timeSinceLastDepthFrame = xyzij.m_timestamp - m_previousDepthDeltaTime; m_previousDepthDeltaTime = xyzij.m_timestamp; } // Fill in the data to draw the point cloud. if (xyzij != null && m_vertices != null) { int numberOfActiveVertices = xyzij.m_pointCount; m_pointsCount = numberOfActiveVertices; if (numberOfActiveVertices > 0) { float[] allPositions = new float[numberOfActiveVertices * 3]; for (int i = 0; i < m_vertices.Length; ++i) { if (i < xyzij.m_pointCount) { m_vertices[i].x = allPositions[i * 3]; m_vertices[i].y = allPositions[(i * 3) + 1]; m_vertices[i].z = allPositions[(i * 3) + 2]; } else { m_vertices[i].x = m_vertices[i].y = m_vertices[i].z = 0.0f; } } m_isDirty = true; } } }
/// <summary> /// This is called each time new depth data is available. /// /// On the Tango tablet, the depth callback occurs at 5 Hz. /// </summary> /// <param name="tangoDepth">Tango depth.</param> public void OnTangoDepthAvailable(TangoUnityDepth tangoDepth) { // Don't handle depth here because the PointCloud may not have been updated yet. Just // tell the coroutine it can continue. m_findPlaneWaitingForDepth = false; }
/// <summary> /// Callback that gets called when depth is available from the Tango Service. /// </summary> /// <param name="tangoDepth">Depth information from Tango.</param> public void OnTangoDepthAvailable(TangoUnityDepth tangoDepth) { // Calculate the time since the last successful depth data // collection. if (m_previousDepthDeltaTime == 0.0) { m_previousDepthDeltaTime = tangoDepth.m_timestamp; } else { m_depthDeltaTime = (float)((tangoDepth.m_timestamp - m_previousDepthDeltaTime) * 1000.0); m_previousDepthDeltaTime = tangoDepth.m_timestamp; } // Fill in the data to draw the point cloud. if (tangoDepth != null && tangoDepth.m_points != null) { m_pointsCount = tangoDepth.m_pointCount; if (m_pointsCount > 0) { _SetUpExtrinsics(); TangoCoordinateFramePair pair; TangoPoseData poseData = new TangoPoseData(); // Query pose to transform point cloud to world coordinates, here we are using the timestamp // that we get from depth. pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(poseData, m_previousDepthDeltaTime, pair); if (poseData.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID) { return; } Vector3 position = new Vector3((float)poseData.translation[0], (float)poseData.translation[1], (float)poseData.translation[2]); Quaternion quat = new Quaternion((float)poseData.orientation[0], (float)poseData.orientation[1], (float)poseData.orientation[2], (float)poseData.orientation[3]); m_startServiceTDevice = Matrix4x4.TRS(position, quat, Vector3.one); // The transformation matrix that represents the pointcloud's pose. // Explanation: // The pointcloud which is in Depth camera's frame, is put in unity world's // coordinate system(wrt unity world). // Then we are extracting the position and rotation from uwTuc matrix and applying it to // the PointCloud's transform. Matrix4x4 unityWorldTDepthCamera = m_unityWorldTStartService * m_startServiceTDevice * Matrix4x4.Inverse(m_imuTDevice) * m_imuTDepthCamera; transform.position = Vector3.zero; transform.rotation = Quaternion.identity; //Vector3 minpts = new Vector3(float.MaxValue, float.MaxValue, float.MaxValue); //Vector3 maxpts = new Vector3(float.MinValue, float.MinValue, float.MinValue); // Converting points array to world space. m_overallZ = 0; for (int i = 0; i < m_pointsCount; ++i) { float x = tangoDepth.m_points[(i * 3) + 0]; float y = tangoDepth.m_points[(i * 3) + 1]; float z = tangoDepth.m_points[(i * 3) + 2]; m_points[i] = unityWorldTDepthCamera.MultiplyPoint( new Vector3(x, y, z)); m_overallZ += z; } m_overallZ = m_overallZ / m_pointsCount; // The color should be pose relative, we need to store enough info to go back to pose values. //m_renderer.material.SetMatrix("depthCameraTUnityWorld", unityWorldTDepthCamera.inverse); //VoxelExtractionPointCloud.Instance.computeDepthPlanes(ref unityWorldTDepthCamera, unityWorldTDepthCamera * new Vector4(0,0,0,1), minpts, maxpts); //if(isScanning) PerDepthFrameCallBack.Instance.CallBack(m_points, m_pointsCount); } else { m_overallZ = 0; } } }
/// <summary> /// Callback that gets called when depth is available from the Tango Service. /// </summary> /// <param name="tangoDepth">Depth information from Tango.</param> public void OnTangoDepthAvailable(TangoUnityDepth tangoDepth) { // Calculate the time since the last successful depth data // collection. if (m_previousDepthDeltaTime == 0.0) { m_previousDepthDeltaTime = tangoDepth.m_timestamp; } else { m_depthDeltaTime = (float)((tangoDepth.m_timestamp - m_previousDepthDeltaTime) * 1000.0); m_previousDepthDeltaTime = tangoDepth.m_timestamp; } // Fill in the data to draw the point cloud. if (tangoDepth != null && tangoDepth.m_points != null) { m_pointsCount = tangoDepth.m_pointCount; if (m_pointsCount > 0) { _SetUpCameraData(); TangoCoordinateFramePair pair; TangoPoseData poseData = new TangoPoseData(); // Query pose to transform point cloud to world coordinates, here we are using the timestamp // that we get from depth. pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(poseData, m_previousDepthDeltaTime, pair); if (poseData.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID) { return; } Vector3 position = new Vector3((float)poseData.translation[0], (float)poseData.translation[1], (float)poseData.translation[2]); Quaternion quat = new Quaternion((float)poseData.orientation[0], (float)poseData.orientation[1], (float)poseData.orientation[2], (float)poseData.orientation[3]); m_startServiceTDevice = Matrix4x4.TRS(position, quat, Vector3.one); // The transformation matrix that represents the pointcloud's pose. // Explanation: // The pointcloud which is in Depth camera's frame, is put in unity world's // coordinate system(wrt unity world). // Then we are extracting the position and rotation from uwTuc matrix and applying it to // the PointCloud's transform. Matrix4x4 unityWorldTDepthCamera = m_unityWorldTStartService * m_startServiceTDevice * Matrix4x4.Inverse(m_imuTDevice) * m_imuTDepthCamera; transform.position = Vector3.zero; transform.rotation = Quaternion.identity; // Addoffset to the pointcloud depending on the offset from TangoDeltaPoseController Matrix4x4 unityWorldOffsetTDepthCamera; if (m_tangoDeltaPoseController != null) { unityWorldOffsetTDepthCamera = m_tangoDeltaPoseController.UnityWorldOffset * unityWorldTDepthCamera; } else { unityWorldOffsetTDepthCamera = unityWorldTDepthCamera; } // Converting points array to world space. m_overallZ = 0; for (int i = 0; i < m_pointsCount; ++i) { float x = tangoDepth.m_points[(i * 3) + 0]; float y = tangoDepth.m_points[(i * 3) + 1]; float z = tangoDepth.m_points[(i * 3) + 2]; m_points[i] = unityWorldOffsetTDepthCamera.MultiplyPoint(new Vector3(x, y, z)); m_overallZ += z; } m_overallZ = m_overallZ / m_pointsCount; m_pointsTimestamp = tangoDepth.m_timestamp; VoxelExtractionPointCloud.Instance.addAndRender(this); /* if (m_updatePointsMesh) { // Need to update indicies too! int[] indices = new int[m_pointsCount]; for (int i = 0; i < m_pointsCount; ++i) { indices[i] = i; } m_mesh.Clear(); m_mesh.vertices = m_points; m_mesh.SetIndices(indices, MeshTopology.Points, 0); } */ // The color should be pose relative, we need to store enough info to go back to pose values. //m_renderer.material.SetMatrix("depthCameraTUnityWorld", unityWorldOffsetTDepthCamera.inverse); } else { m_overallZ = 0; } } }
/// <summary> /// An event notifying when new depth data is available. OnTangoDepthAvailable events are thread safe. /// </summary> /// <param name="tangoDepth">Depth data that we get from API.</param> public void OnTangoDepthAvailable(TangoUnityDepth tangoDepth) { // Fill in the data to draw the point cloud. if (tangoDepth != null) { if (tangoDepth.m_points == null) { Debug.Log("Depth points are null"); return; } if (tangoDepth.m_pointCount > m_currTangoDepth.m_points.Length) { m_currTangoDepth.m_points = new float[3 * (int)(1.5f * tangoDepth.m_pointCount)]; } for (int i = 0; i < tangoDepth.m_pointCount; i += 3) { m_currTangoDepth.m_points[(3 * i) + 0] = tangoDepth.m_points[(i * 3) + 0]; m_currTangoDepth.m_points[(3 * i) + 1] = tangoDepth.m_points[(i * 3) + 1]; m_currTangoDepth.m_points[(3 * i) + 2] = tangoDepth.m_points[(i * 3) + 2]; } m_currTangoDepth.m_timestamp = tangoDepth.m_timestamp; m_currTangoDepth.m_pointCount = tangoDepth.m_pointCount; PoseProvider.GetPoseAtTime(m_poseAtDepthTimestamp, m_currTangoDepth.m_timestamp, m_coordinatePair); if (m_poseAtDepthTimestamp.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID) { return; } m_isDirty = true; } return; }
/// <summary> /// Read depth data from file. /// </summary> /// <returns>The depth from file.</returns> /// <param name="reader">File reader.</param> /// <param name="depthFrame">Tango depth data.</param> public int ReadDepthFromFile(BinaryReader reader, ref TangoUnityDepth depthFrame) { string frameMarker; try { frameMarker = reader.ReadString(); } catch (EndOfStreamException x) { reader.BaseStream.Position = 0; Reset(); print("Restarting log file: " + x.ToString()); frameMarker = reader.ReadString(); } if (frameMarker.CompareTo("depthframe\n") != 0) { m_debugText = "Failed to read depth"; return -1; } depthFrame.m_timestamp = double.Parse(reader.ReadString()); depthFrame.m_pointCount = int.Parse(reader.ReadString()); if (depthFrame.m_pointCount > depthFrame.m_points.Length) { depthFrame.m_points = new float[3 * (int)(1.5f * depthFrame.m_pointCount)]; } // load up the data for (int i = 0; i < depthFrame.m_pointCount; i++) { depthFrame.m_points[(3 * i) + 0] = reader.ReadSingle(); depthFrame.m_points[(3 * i) + 1] = reader.ReadSingle(); depthFrame.m_points[(3 * i) + 2] = reader.ReadSingle(); } return 0; }
/// <summary> /// Write depth data to file. /// </summary> /// <param name="writer">File writer.</param> /// <param name="depthFrame">Tango depth data.</param> public void WriteDepthToFile(BinaryWriter writer, TangoUnityDepth depthFrame) { if (writer == null) { return; } writer.Write("depthframe\n"); writer.Write(depthFrame.m_timestamp + "\n"); writer.Write(depthFrame.m_pointCount + "\n"); for (int i = 0; i < depthFrame.m_pointCount; i++) { writer.Write(depthFrame.m_points[(3 * i) + 0]); writer.Write(depthFrame.m_points[(3 * i) + 1]); writer.Write(depthFrame.m_points[(3 * i) + 2]); } writer.Flush(); }
/// <summary> /// Register to get Tango depth callbacks. /// /// NOTE: Tango depth callbacks happen on a different thread than the main /// Unity thread. /// </summary> internal virtual void SetCallback() { m_tangoDepth = new TangoUnityDepth(); m_onDepthAvailableCallback = new Tango.DepthProvider.TangoService_onDepthAvailable(_OnDepthAvailable); Tango.DepthProvider.SetCallback(m_onDepthAvailableCallback); }
/// <summary> /// Callback that gets called when depth is available /// from the Tango Service. /// DO NOT USE THE UNITY API FROM INSIDE THIS FUNCTION! /// </summary> /// <param name="callbackContext">Callback context.</param> /// <param name="xyzij">Xyzij.</param> public void OnTangoDepthAvailable(TangoUnityDepth tangoDepth) { // Calculate the time since the last successful depth data // collection. if (m_previousDepthDeltaTime == 0.0) { m_previousDepthDeltaTime = tangoDepth.m_timestamp; } else { m_depthDeltaTime = (float)((tangoDepth.m_timestamp - m_previousDepthDeltaTime) * 1000.0); m_previousDepthDeltaTime = tangoDepth.m_timestamp; } // Fill in the data to draw the point cloud. if (tangoDepth != null && tangoDepth.m_points != null) { int numberOfActiveVertices = tangoDepth.m_pointCount; m_pointsCount = numberOfActiveVertices; float validPointCount = 0; if(numberOfActiveVertices > 0) { _SetUpExtrinsics(); TangoCoordinateFramePair pair; TangoPoseData poseData = new TangoPoseData(); // Query pose to transform point cloud to world coordinates, here we are using the timestamp // that we get from depth. pair.baseFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE; pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE; PoseProvider.GetPoseAtTime(poseData, m_previousDepthDeltaTime, pair); Vector3 position = new Vector3((float)poseData.translation[0], (float)poseData.translation[1], (float)poseData.translation[2]); Quaternion quat = new Quaternion((float)poseData.orientation[0], (float)poseData.orientation[1], (float)poseData.orientation[2], (float)poseData.orientation[3]); m_ssTd = Matrix4x4.TRS(position, quat, Vector3.one); // The transformation matrix that represents the pointcloud's pose. // Explanation: // The pointcloud which is in RGB's camera frame, is put in unity world's // coordinate system(wrt unit camera). // Then we are extracting the position and rotation from uwTuc matrix and applying it to // the PointCloud's transform. Matrix4x4 uwTuc = m_uwTss * m_ssTd * Matrix4x4.Inverse(m_imuTd) * m_imuTc * m_cTuc; transform.position = uwTuc.GetColumn(3); transform.rotation = Quaternion.LookRotation(uwTuc.GetColumn(2), uwTuc.GetColumn(1)); Vector3[] pointCloudVertices = new Vector3[VERT_COUNT]; // Converting points array to point vector array for Unity to create a mesh. for(int i = 0; i < numberOfActiveVertices; ++i) { pointCloudVertices[i] = new Vector3(tangoDepth.m_points[i * 3], tangoDepth.m_points[i * 3 + 1], tangoDepth.m_points[i * 3 + 2]); m_overallZ += pointCloudVertices[i].z; ++validPointCount; } m_mesh.Clear(); m_mesh.vertices = pointCloudVertices; m_mesh.triangles = m_triangles; m_mesh.SetIndices(m_triangles, MeshTopology.Points, 0); } // Don't divide by zero! if (validPointCount != 0) { m_overallZ = m_overallZ / validPointCount; } else { m_overallZ = 0; } } }
/// <summary> /// Callback that gets called when depth is available from the Tango Service. /// </summary> /// <param name="tangoDepth">Depth information from Tango.</param> public void OnTangoDepthAvailable( TangoUnityDepth tangoDepth ) { // Calculate the time since the last successful depth data // collection. //if( m_previousDepthDeltaTime == 0.0 ) { // m_previousDepthDeltaTime = tangoDepth.m_timestamp; //} else { // m_depthDeltaTime = (float)((tangoDepth.m_timestamp - m_previousDepthDeltaTime) * 1000.0); // m_previousDepthDeltaTime = tangoDepth.m_timestamp; //} // Fill in the data to draw the point cloud. if( tangoDepth != null && tangoDepth.m_points != null ) { if( tangoDepth.m_pointCount > 0 ) { TangoUtility.InitExtrinsics( m_request ); TangoUtility.GetPose( tangoDepth.m_timestamp ); // The transformation matrix that represents the pointcloud's pose. // Explanation: // The pointcloud which is in Depth camera's frame, is put in unity world's // coordinate system(wrt unity world). // Then we are extracting the position and rotation from uwTuc matrix and applying it to // the PointCloud's transform. Matrix4x4 unityWorldTDepthCamera = TangoUtility.GetUnityWorldToDepthCamera();// m_unityWorldTStartService * m_startServiceTDevice * Matrix4x4.Inverse( m_imuTDevice ) * m_imuTDepthCamera; //double timestamp = VideoOverlayProvider.RenderLatestFrame( TangoEnums.TangoCameraId.TANGO_CAMERA_COLOR ); //GL.InvalidateState(); // Converting points array to world space. int pointCount = tangoDepth.m_pointCount; Vector3[] points = new Vector3[pointCount]; float[] from = tangoDepth.m_points; Vector3 point = Vector3.zero; Vector3 camSpacePoint; Matrix4x4 dcTcc = m_cameraProjection * TangoUtility.GetDepthToColourCamera() ; int index = 0; for( int i = 0; i < pointCount; ++i ) { //float x = from[(i * 3) + 0]; //float y = from[(i * 3) + 1]; //float z = from[(i * 3) + 2]; index = i * 3; point.Set( from[index + 0], from[index + 1], from[index + 2] ); point = unityWorldTDepthCamera.MultiplyPoint( point ); if( m_scanBounds.bounds.Contains( point ) ) { points[i] = point; //we need the point in camera space to look up the colour... camSpacePoint = dcTcc.MultiplyPoint( point ); Debug.Log( "CamSpacePoint: " + camSpacePoint ); } } /// /// The Following will only work when the API provides the data /// //int count = tangoDepth.m_ij.Length; //int colCount = tangoDepth.m_ijColumns; //int rowCount = tangoDepth.m_ijRows; //int[] all = tangoDepth.m_ij; //Debug.Log( "Points: " + count+"; cols: "+colCount+"; rows:"+rowCount ); //int total = 0; //for( int y = 0; y < rowCount; y++ ) { // for( int x = 0; x < colCount; x++ ) { // index = all[y*colCount+x]; // if( index < 0 ) { // continue; // } // point.Set( all[index + 0], all[index + 1], all[index + 2] ); // points[total++] = unityWorldTDepthCamera.MultiplyPoint( point ); // } //} //m_octTree.InsertPoints( points ); GameObject obj = Instantiate<GameObject>( m_pointsMeshPrefab ); obj.GetComponent<PointsMesh>().AddPoints( points ); m_meshes.Push( obj.GetComponent<MeshFilter>() ); // The color should be pose relative, we need to store enough info to go back to pose values. //m_renderer.material.SetMatrix( "depthCameraTUnityWorld", unityWorldTDepthCamera.inverse ); } } }
/// <summary> /// It's backwards, but fill an emulated TangoXYZij instance from an emulated TangoPointCloudData /// instance. It is the responsibility of the caller to GC pin/free the pointCloudData's m_points. /// </summary> /// <returns>Emulated raw xyzij data.</returns> /// <param name="depth">Emulated point cloud data.</param>> /// <param name="pinnedPoints">Pinned array of pointCloudData.m_points.</param> private static TangoXYZij _GetEmulatedRawXyzijData(TangoUnityDepth depth, GCHandle pinnedPoints) { TangoXYZij data = new TangoXYZij(); data.xyz = pinnedPoints.AddrOfPinnedObject(); data.xyz_count = depth.m_pointCount; data.ij_cols = 0; data.ij_rows = 0; data.ij = IntPtr.Zero; data.timestamp = depth.m_timestamp; return data; }
/// <summary> /// Raise a Tango depth event if there is new data. /// </summary> internal static void SendIfAvailable() { if (m_onPointCloudAvailableCallback == null) { return; } #if UNITY_EDITOR lock (m_lockObject) { if (DepthProvider.m_emulationIsDirty) { DepthProvider.m_emulationIsDirty = false; if (m_onTangoDepthAvailable != null || m_onTangoDepthMultithreadedAvailable != null || m_onPointCloudAvailable != null | m_onPointCloudMultithreadedAvailable != null) { _FillEmulatedPointCloud(ref m_pointCloud); } if (m_onTangoDepthMultithreadedAvailable != null) { // Pretend to be making a call from unmanaged code. TangoUnityDepth depth = new TangoUnityDepth(m_pointCloud); GCHandle pinnedPoints = GCHandle.Alloc(depth.m_points, GCHandleType.Pinned); TangoXYZij emulatedXyzij = _GetEmulatedRawXyzijData(depth, pinnedPoints); m_onTangoDepthMultithreadedAvailable(emulatedXyzij); pinnedPoints.Free(); } if (m_onPointCloudMultithreadedAvailable != null) { // Pretend to be making a call from unmanaged code. GCHandle pinnedPoints = GCHandle.Alloc(m_pointCloud.m_points, GCHandleType.Pinned); TangoPointCloudIntPtr rawData = _GetEmulatedRawData(m_pointCloud, pinnedPoints); m_onPointCloudMultithreadedAvailable(ref rawData); pinnedPoints.Free(); } if (m_onTangoDepthAvailable != null || m_onPointCloudAvailable != null) { m_isDirty = true; } } } #endif if (m_isDirty && (m_onTangoDepthAvailable != null || m_onPointCloudAvailable != null)) { lock (m_lockObject) { _ReducePointCloudPoints(m_pointCloud, m_maxNumReducedDepthPoints); if (m_onTangoDepthAvailable != null) { m_onTangoDepthAvailable(new TangoUnityDepth(m_pointCloud)); } if (m_onPointCloudAvailable != null) { m_onPointCloudAvailable(m_pointCloud); } } m_isDirty = false; } }