/// <summary>
        /// INTERNAL USE: Update the Tango emulation state for color camera data.
        /// </summary>
        /// <param name="useByteBufferMethod">Whether to update emulation for byte-buffer method.</param>
        internal static void UpdateTangoEmulation(bool useByteBufferMethod)
        {
            // Get emulated position and rotation in Unity space.
            TangoPoseData            poseData = new TangoPoseData();
            TangoCoordinateFramePair pair;

            pair.baseFrame   = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE;
            pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;

            if (!PoseProvider.GetTimestampForColorEmulation(out m_lastColorEmulationTime))
            {
                Debug.LogError("Couldn't get a valid timestamp with which to emulate color camera. "
                               + "Color camera emulation will be skipped this frame.");
                return;
            }

            PoseProvider.GetPoseAtTime(poseData, m_lastColorEmulationTime, pair);
            if (poseData.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID)
            {
                return;
            }

            Vector3    position;
            Quaternion rotation;

            TangoSupport.TangoPoseToWorldTransform(poseData, out position, out rotation);

            // Instantiate any resources that we haven't yet.
            if (!m_emulationIsInitialized)
            {
                _InitializeResourcesForEmulation();
                m_emulationIsInitialized = true;
            }

            // Render.
            EmulatedEnvironmentRenderHelper.RenderEmulatedEnvironment(m_emulatedColorRenderTexture,
                                                                      EmulatedEnvironmentRenderHelper.EmulatedDataType.COLOR_CAMERA,
                                                                      position, rotation);

            m_emulationIsDirty = true;
        }
示例#2
0
        /// <summary>
        /// INTERNAL USE: Update the Tango emulation state for depth data.
        /// </summary>
        internal static void UpdateTangoEmulation()
        {
            m_emulatedPointCloud.Clear();

            // Timestamp shall be something in the past, and we'll emulate the depth cloud based on it.
            if (!PoseProvider.GetTimestampForDepthEmulation(out m_lastDepthEmulationTime))
            {
                Debug.LogError("Couldn't get a valid timestamp with which to emulate depth data. "
                               + "Depth emulation will be skipped this frame.");
                return;
            }

            // Get emulated position and rotation in Unity space.
            TangoPoseData            poseData = new TangoPoseData();
            TangoCoordinateFramePair pair;

            pair.baseFrame   = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE;
            pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;

            PoseProvider.GetPoseAtTime(poseData, m_lastDepthEmulationTime, pair);
            if (poseData.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID)
            {
                return;
            }

            Vector3    position;
            Quaternion rotation;

            TangoSupport.TangoPoseToWorldTransform(poseData, out position, out rotation);

            // Instantiate any resources that we haven't yet.
            _InternResourcesForEmulation();

            // Render emulated depth camera data.
            EmulatedEnvironmentRenderHelper.RenderEmulatedEnvironment(m_emulatedDepthTexture,
                                                                      EmulatedEnvironmentRenderHelper.EmulatedDataType.DEPTH,
                                                                      position, rotation);

            // Capture rendered depth points from texture.
            RenderTexture.active = m_emulatedDepthTexture;
            m_emulationCaptureTexture.ReadPixels(new Rect(0, 0, m_emulatedDepthTexture.width, m_emulatedDepthTexture.height), 0, 0);
            m_emulationCaptureTexture.Apply();

            // Exctract captured data.
            Color32[] depthDataAsColors = m_emulationCaptureTexture.GetPixels32();

            // Convert depth texture to positions in camera space.
            Matrix4x4 projectionMatrix        = GL.GetGPUProjectionMatrix(EmulatedEnvironmentRenderHelper.m_emulationCamera.projectionMatrix, false);
            Matrix4x4 reverseProjectionMatrix = projectionMatrix.inverse;

            float width  = m_emulationCaptureTexture.width;
            float height = m_emulationCaptureTexture.height;

            for (int yTexel = 0; yTexel < height; yTexel++)
            {
                for (int xTexel = 0; xTexel < width; xTexel++)
                {
                    Color32 depthAsColor = depthDataAsColors[xTexel + (yTexel * m_emulationCaptureTexture.width)];
                    float   clipSpaceZ   = (depthAsColor.r - 128f) + (depthAsColor.g / 255f);

                    float ndcSpaceZ          = (clipSpaceZ - projectionMatrix.m23) / projectionMatrix.m22;
                    float perspectiveDivisor = ndcSpaceZ * projectionMatrix.m32;

                    float ndcSpaceX = (((xTexel + 0.5f) / width) * 2f) - 1;
                    float ndcSpaceY = (((yTexel + 0.5f) / height) * 2f) - 1;

                    Vector4 clipSpacePos = new Vector4(ndcSpaceX * perspectiveDivisor, ndcSpaceY * perspectiveDivisor, clipSpaceZ, perspectiveDivisor);
                    Vector4 viewSpacePos = reverseProjectionMatrix * clipSpacePos;

                    Vector3 emulatedDepthPos = new Vector3(viewSpacePos.x, -viewSpacePos.y, -viewSpacePos.z);

                    if (emulatedDepthPos.z > MIN_POINT_DISTANCE && emulatedDepthPos.z < MAX_POINT_DISTANCE)
                    {
                        m_emulatedPointCloud.Add(emulatedDepthPos);
                    }
                }
            }

            m_emulationIsDirty = true;
        }
        /// <summary>
        /// INTERNAL USE: Update the Tango emulation state for depth data.
        ///
        /// Make this this is only called once per frame.
        /// </summary>
        internal static void UpdateTangoEmulation()
        {
            m_emulatedPointCloud.Clear();

            // Timestamp shall be something in the past, and we'll emulate the depth cloud based on it.
            m_lastDepthEmulationTime = PoseProvider.GetTimestampForDepthEmulation();

            // Get emulated position and rotation in Unity space.
            TangoPoseData            poseData = new TangoPoseData();
            TangoCoordinateFramePair pair;

            pair.baseFrame   = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_START_OF_SERVICE;
            pair.targetFrame = TangoEnums.TangoCoordinateFrameType.TANGO_COORDINATE_FRAME_DEVICE;

            PoseProvider.GetPoseAtTime(poseData, m_lastDepthEmulationTime, pair);
            if (poseData.status_code != TangoEnums.TangoPoseStatusType.TANGO_POSE_VALID)
            {
                return;
            }

            Vector3    position;
            Quaternion rotation;

            TangoSupport.TangoPoseToWorldTransform(poseData, out position, out rotation);

            // Instantiate any resources that we haven't yet.
            if (m_emulatedDepthTexture == null)
            {
                m_emulatedDepthTexture = new RenderTexture(NUM_X_DEPTH_SAMPLES, NUM_Y_DEPTH_SAMPLES, 24, RenderTextureFormat.ARGB32);
            }

            if (m_emulationCaptureTexture == null)
            {
                m_emulationCaptureTexture = new Texture2D(NUM_X_DEPTH_SAMPLES, NUM_Y_DEPTH_SAMPLES, TextureFormat.ARGB32, false);
            }

            if (m_emulatedDepthShader == null)
            {
                // Find depth shader by searching for it in project.
                string[] foundAssetGuids = UnityEditor.AssetDatabase.FindAssets("DepthEmulation t:Shader");
                if (foundAssetGuids.Length > 0)
                {
                    string assetPath = UnityEditor.AssetDatabase.GUIDToAssetPath(foundAssetGuids[0]);
                    m_emulatedDepthShader = UnityEditor.AssetDatabase.LoadAssetAtPath(assetPath, typeof(Shader)) as Shader;
                }
            }

            // Render emulated depth camera data.
            EmulatedEnvironmentRenderHelper.RenderEmulatedEnvironment(m_emulatedDepthTexture, m_emulatedDepthShader,
                                                                      position, rotation);

            // Capture rendered depth points from texture.
            RenderTexture.active = m_emulatedDepthTexture;
            m_emulationCaptureTexture.ReadPixels(new Rect(0, 0, m_emulatedDepthTexture.width, m_emulatedDepthTexture.height), 0, 0);
            m_emulationCaptureTexture.Apply();

            // Exctract captured data.
            Color32[] depthDataAsColors = m_emulationCaptureTexture.GetPixels32();

            // Convert depth texture to positions in camera space.
            Matrix4x4 projectionMatrix        = GL.GetGPUProjectionMatrix(EmulatedEnvironmentRenderHelper.m_emulationCamera.projectionMatrix, false);
            Matrix4x4 reverseProjectionMatrix = projectionMatrix.inverse;

            float width  = m_emulationCaptureTexture.width;
            float height = m_emulationCaptureTexture.height;

            for (int yTexel = 0; yTexel < height; yTexel++)
            {
                for (int xTexel = 0; xTexel < width; xTexel++)
                {
                    Color32 depthAsColor = depthDataAsColors[xTexel + (yTexel * m_emulationCaptureTexture.width)];
                    float   clipSpaceZ   = (depthAsColor.r - 128f) + (depthAsColor.g / 255f);

                    float ndcSpaceZ          = (clipSpaceZ - projectionMatrix.m23) / projectionMatrix.m22;
                    float perspectiveDivisor = ndcSpaceZ * projectionMatrix.m32;

                    float ndcSpaceX = (((xTexel + 0.5f) / width) * 2f) - 1;
                    float ndcSpaceY = (((yTexel + 0.5f) / height) * 2f) - 1;

                    Vector4 clipSpacePos = new Vector4(ndcSpaceX * perspectiveDivisor, ndcSpaceY * perspectiveDivisor, clipSpaceZ, perspectiveDivisor);
                    Vector4 viewSpacePos = reverseProjectionMatrix * clipSpacePos;

                    Vector3 emulatedDepthPos = new Vector3(viewSpacePos.x, -viewSpacePos.y, -viewSpacePos.z);

                    if (emulatedDepthPos.z > MIN_POINT_DISTANCE && emulatedDepthPos.z < MAX_POINT_DISTANCE)
                    {
                        m_emulatedPointCloud.Add(emulatedDepthPos);
                    }
                }
            }
        }