示例#1
0
        public float FindFloor(TangoPointCloudData pointCloud, float cameraY)
        {
            var m_points = pointCloud.Points;

            m_numPointsAtY.Clear();
            m_nonNoiseBuckets.Clear();
            var m_pointsCount = pointCloud.NumPoints;

            // Count each depth point into a bucket based on its world position y value.
            for (int i = 0; i < m_pointsCount; i++)
            {
                var x     = pointCloud.Points.Get(i * 4);
                var y     = pointCloud.Points.Get(i * 4 + 1);
                var z     = pointCloud.Points.Get(i * 4 + 2);
                var point = new Vector3(x, y, z);

                if (!point.Equals(Vector3.Zero))
                {
                    // Group similar points into buckets based on sensitivity.
                    float roundedY = (float)Math.Round(point.Y / SENSITIVITY) * SENSITIVITY;
                    if (!m_numPointsAtY.ContainsKey(roundedY))
                    {
                        m_numPointsAtY.Add(roundedY, 0);
                    }

                    m_numPointsAtY[roundedY]++;

                    // Check if the y plane is a non-noise plane.
                    if (m_numPointsAtY[roundedY] > NOISE_THRESHOLD && !m_nonNoiseBuckets.Contains(roundedY))
                    {
                        m_nonNoiseBuckets.Add(roundedY);
                    }
                }
            }

            // Find a plane at the y value. The y value must be below the camera y position.
            m_nonNoiseBuckets.Sort();
            for (int i = 0; i < m_nonNoiseBuckets.Count; i++)
            {
                float yBucket   = m_nonNoiseBuckets[i];
                int   numPoints = m_numPointsAtY[yBucket];
                if (numPoints > RECOGNITION_THRESHOLD && yBucket < cameraY)
                {
                    // Reject the plane if it is not the lowest.
                    if (yBucket > m_nonNoiseBuckets[0])
                    {
                        return(float.MinValue);
                    }

                    m_floorFound         = true;
                    m_findFloorWithDepth = false;
                    m_floorPlaneY        = yBucket;
                    m_numPointsAtY.Clear();
                    m_nonNoiseBuckets.Clear();
                }
            }
            return(m_floorPlaneY);
        }
示例#2
0
        private float calculateAveragedDepth(TangoPointCloudData pointCloud)
        {
            float totalZ    = 0;
            float averageZ  = 0;
            var   numPoints = pointCloud.NumPoints;

            if (numPoints != 0)
            {
                int numFloats = 4 * numPoints;
                for (int i = 2; i < numFloats; i = i + 4)
                {
                    totalZ = totalZ + pointCloud.Points.Get(i);
                }
                averageZ = totalZ / numPoints;
            }
            return(averageZ);
        }
示例#3
0
        public void OnPointCloudAvailable(TangoPointCloudData pointCloud)
        {
            // this is being called when depth mode is enabled in the config
            var z = calculateAveragedDepth(pointCloud);

            // var f = new FloorFinder();
            // var floor = f.FindFloor(pointCloud,0);
            //Log.Debug(Tag, $"Floor: {floor}");
            Log.Debug(Tag, "Point Cloud Available! Points:" + pointCloud.NumPoints + ", z: " + z);

            _activity.PointCloudIsAvailable();

            // WritePointCloudData(pointCloud);

            // why do we need this?
            // _activity.UpdatePointCloud(pointCloud);
        }
示例#4
0
        public void WritePointCloudData(TangoPointCloudData pointCloud)
        {
            var m_pointsCount = pointCloud.NumPoints;

            Log.Debug("Test", $"{m_pointsCount}");

            // Count each depth point into a bucket based on its world position y value.
            for (int i = 0; i < m_pointsCount; i++)
            {
                var x = pointCloud.Points.Get(i * 4);
                var y = pointCloud.Points.Get(i * 4 + 1);
                var z = pointCloud.Points.Get(i * 4 + 2);
                var c = pointCloud.Points.Get(i * 4 + 3);

                var point = new Vector3(x, y, z);

                Log.Debug("bertho", $"{x},{y},{z},{c}");
            }
            var e = false;
        }
示例#5
0
 public void OnPointCloudAvailable(TangoPointCloudData p0)
 {
     Log.Debug(Tag, $"Navigate OnPointCloudAvaiable");
 }
示例#6
0
    /// <summary>
    /// Callback that gets called when depth is available from the Tango Service.
    /// </summary>
    /// <param name="pointCloud">Depth information from Tango.</param>
    public void OnTangoPointCloudAvailable(TangoPointCloudData pointCloud)
    {
        m_mostRecentPointCloud = pointCloud;

        // Calculate the time since the last successful depth data
        // collection.
        if (m_depthTimestamp != 0.0)
        {
            m_depthDeltaTime = (float)((pointCloud.m_timestamp - m_depthTimestamp) * 1000.0);
        }

        // Fill in the data to draw the point cloud.
        m_pointsCount = pointCloud.m_numPoints;
        if (m_pointsCount > 0)
        {
            _SetUpCameraData();

            DMatrix4x4 globalTLocal;
            bool       globalTLocalSuccess = m_tangoApplication.GetGlobalTLocal(out globalTLocal);
            if (!globalTLocalSuccess)
            {
                return;
            }

            DMatrix4x4 unityWorldTGlobal = DMatrix4x4.FromMatrix4x4(TangoSupport.UNITY_WORLD_T_START_SERVICE) * globalTLocal.Inverse;

            TangoPoseData poseData;

            // Query pose to transform point cloud to world coordinates, here we are using the timestamp that we get from depth.
            bool poseSuccess = _GetDevicePose(pointCloud.m_timestamp, out poseData);
            if (!poseSuccess)
            {
                return;
            }

            DMatrix4x4 unityWorldTDevice = unityWorldTGlobal * DMatrix4x4.TR(poseData.translation, poseData.orientation);

            // The transformation matrix that represents the point cloud's pose.
            // Explanation:
            // The point cloud, which is in Depth camera's frame, is put in Unity world's
            // coordinate system(wrt Unity world).
            // Then we are extracting the position and rotation from uwTuc matrix and applying it to
            // the point cloud's transform.
            DMatrix4x4 unityWorldTDepthCamera = unityWorldTDevice * m_deviceTDepthCamera;
            transform.position = Vector3.zero;
            transform.rotation = Quaternion.identity;

            // Add offset to the point cloud depending on the offset from TangoDeltaPoseController.
            if (m_tangoDeltaPoseController != null)
            {
                m_mostRecentUnityWorldTDepthCamera = m_tangoDeltaPoseController.UnityWorldOffset * unityWorldTDepthCamera.ToMatrix4x4();
            }
            else
            {
                m_mostRecentUnityWorldTDepthCamera = unityWorldTDepthCamera.ToMatrix4x4();
            }

            // Converting points array to world space.
            m_overallZ = 0;
            for (int i = 0; i < m_pointsCount; ++i)
            {
                Vector3 point = pointCloud[i];
                m_points[i] = m_mostRecentUnityWorldTDepthCamera.MultiplyPoint3x4(point);
                m_overallZ += point.z;
            }

            m_overallZ       = m_overallZ / m_pointsCount;
            m_depthTimestamp = pointCloud.m_timestamp;

            if (m_updatePointsMesh)
            {
                // Need to update indices too!
                int[] indices = new int[m_pointsCount];
                for (int i = 0; i < m_pointsCount; ++i)
                {
                    indices[i] = i;
                }

                m_mesh.Clear();
                m_mesh.vertices = m_points;
                m_mesh.SetIndices(indices, MeshTopology.Points, 0);
            }

            // The color should be pose relative; we need to store enough info to go back to pose values.
            m_renderer.material.SetMatrix("depthCameraTUnityWorld", m_mostRecentUnityWorldTDepthCamera.inverse);

            // Try to find the floor using this set of depth points if requested.
            if (m_findFloorWithDepth)
            {
                _FindFloorWithDepth();
            }
        }
        else
        {
            m_overallZ = 0;
        }
    }
示例#7
0
    /// <summary>
    /// Callback that gets called when depth is available from the Tango Service.
    /// </summary>
    /// <param name="pointCloud">Depth information from Tango.</param>
    public void OnTangoPointCloudAvailable(TangoPointCloudData pointCloud)
    {
        // Calculate the time since the last successful depth data
        // collection.
        if (m_depthTimestamp != 0.0)
        {
            m_depthDeltaTime = (float)((pointCloud.m_timestamp - m_depthTimestamp) * 1000.0);
        }

        // Fill in the data to draw the point cloud.
        m_pointsCount = pointCloud.m_numPoints;
        if (m_pointsCount > 0)
        {
            _SetUpCameraData();
            TangoCoordinateFramePair pair;
            TangoPoseData            poseData = new TangoPoseData();

            // Query pose to transform point cloud to world coordinates, here we are using the timestamp
            // that we get from depth.
            if (m_useAreaDescriptionPose)
            {
                pair.baseFrame   = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_AREA_DESCRIPTION;
                pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
            }
            else
            {
                pair.baseFrame   = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE;
                pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;
            }

            PoseProvider.GetPoseAtTime(poseData, pointCloud.m_timestamp, pair);
            if (poseData.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID)
            {
                return;
            }

            Matrix4x4 startServiceTDevice = poseData.ToMatrix4x4();

            // The transformation matrix that represents the pointcloud's pose.
            // Explanation:
            // The pointcloud which is in Depth camera's frame, is put in unity world's
            // coordinate system(wrt unity world).
            // Then we are extracting the position and rotation from uwTuc matrix and applying it to
            // the PointCloud's transform.
            Matrix4x4 unityWorldTDepthCamera = m_unityWorldTStartService * startServiceTDevice * Matrix4x4.Inverse(m_imuTDevice) * m_imuTDepthCamera;
            transform.position = Vector3.zero;
            transform.rotation = Quaternion.identity;

            // Add offset to the pointcloud depending on the offset from TangoDeltaPoseController
            Matrix4x4 unityWorldOffsetTDepthCamera;
            if (m_tangoDeltaPoseController != null)
            {
                unityWorldOffsetTDepthCamera = m_tangoDeltaPoseController.UnityWorldOffset * unityWorldTDepthCamera;
            }
            else
            {
                unityWorldOffsetTDepthCamera = unityWorldTDepthCamera;
            }

            // Converting points array to world space.
            m_overallZ = 0;
            for (int i = 0; i < m_pointsCount; ++i)
            {
                Vector3 point = pointCloud[i];
                m_points[i] = unityWorldOffsetTDepthCamera.MultiplyPoint3x4(point);
                m_overallZ += point.z;
            }

            m_overallZ       = m_overallZ / m_pointsCount;
            m_depthTimestamp = pointCloud.m_timestamp;

            if (m_updatePointsMesh)
            {
                // Need to update indicies too!
                int[] indices = new int[m_pointsCount];
                for (int i = 0; i < m_pointsCount; ++i)
                {
                    indices[i] = i;
                }

                m_mesh.Clear();
                m_mesh.vertices = m_points;
                m_mesh.SetIndices(indices, MeshTopology.Points, 0);
            }

            // The color should be pose relative, we need to store enough info to go back to pose values.
            m_renderer.material.SetMatrix("depthCameraTUnityWorld", unityWorldOffsetTDepthCamera.inverse);

            // Try to find the floor using this set of depth points if requested.
            if (m_findFloorWithDepth)
            {
                _FindFloorWithDepth();
            }
        }
        else
        {
            m_overallZ = 0;
        }
    }
示例#8
0
 public void OnPointCloudAvailable(TangoPointCloudData p0)
 {
 }