private void RefreshCameraFeedTexture()
    {
        XRCameraImage cameraImage;

        m_cameraManager.TryGetLatestImage(out cameraImage);
        if (m_cameraFeedTexture == null || m_cameraFeedTexture.width != cameraImage.width ||
            m_cameraFeedTexture.height != cameraImage.height)
        {
            m_cameraFeedTexture = new Texture2D(cameraImage.width, cameraImage.height, TextureFormat.RGBA32, false);
            // m_cameraFeedTexture = new Texture2D(Screen.width, Screen.height, TextureFormat.RGBA32, false);
        }

        CameraImageTransformation imageTransformation = Input.deviceOrientation == DeviceOrientation.LandscapeRight
            ? CameraImageTransformation.MirrorY
            : CameraImageTransformation.MirrorX;
        XRCameraImageConversionParams conversionParams =
            new XRCameraImageConversionParams(cameraImage, TextureFormat.RGBA32, imageTransformation);

        NativeArray <byte> rawTextureData = m_cameraFeedTexture.GetRawTextureData <byte>();

        try
        {
            unsafe
            {
                cameraImage.Convert(conversionParams, new IntPtr(rawTextureData.GetUnsafePtr()), rawTextureData.Length);
            }
        }
        finally
        {
            cameraImage.Dispose();
        }

        m_cameraFeedTexture.Apply();
        m_material.SetTexture("_CameraFeed", m_cameraFeedTexture);
    }
    private void RefreshCameraFeedTexture()
    {
        if (!_cameraManager.TryGetLatestImage(out XRCameraImage cameraImage))
        {
            Debug.Log("Failed to get the last image.");
            return;
        }

        RecreateTextureIfNeeded(cameraImage);

        CameraImageTransformation imageTransformation = (Input.deviceOrientation == DeviceOrientation.LandscapeRight)
            ? CameraImageTransformation.MirrorY
            : CameraImageTransformation.MirrorX;
        XRCameraImageConversionParams conversionParams =
            new XRCameraImageConversionParams(cameraImage, TextureFormat.RGBA32, imageTransformation);

        NativeArray <byte> rawTextureData = _texture.GetRawTextureData <byte>();

        try
        {
            unsafe
            {
                cameraImage.Convert(conversionParams, new IntPtr(rawTextureData.GetUnsafePtr()), rawTextureData.Length);
            }
        }
        finally
        {
            cameraImage.Dispose();
        }

        _texture.Apply();
        PreviewTexture(_texture);
    }
示例#3
0
 /// <summary>
 /// Constructs a <see cref="CameraImageConversionParams"/> using the <paramref name="image"/>'s full resolution. That is,
 /// it sets <see cref="inputRect"/> to <c>(0, 0, image.width, image.height)</c> and <see cref="outputDimensions"/> to <c>(image.width, image.height)</c>.
 /// </summary>
 /// <param name="image">The source <see cref="CameraImage"/>.</param>
 /// <param name="format">The <c>TextureFormat</c> to convert to.</param>
 /// <param name="transformation">An <see cref="CameraImageTransformation"/> to apply (optional).</param>
 public CameraImageConversionParams(
     CameraImage image,
     TextureFormat format,
     CameraImageTransformation transformation = CameraImageTransformation.None)
 {
     m_InputRect        = new RectInt(0, 0, image.width, image.height);
     m_OutputDimensions = new Vector2Int(image.width, image.height);
     m_Format           = format;
     m_Transformation   = transformation;
 }
    private void RefreshCameraFeedTexture()
    {
        XRCameraImage cameraImage;

        m_cameraManager.TryGetLatestImage(out cameraImage);

        if (m_cameraFeedTexture == null || m_cameraFeedTexture.width != cameraImage.width || m_cameraFeedTexture.height != cameraImage.height)
        {
            m_cameraFeedTexture = new Texture2D(cameraImage.width, cameraImage.height, TextureFormat.RGBA32, false);
        }

        CameraImageTransformation     imageTransformation = Input.deviceOrientation == DeviceOrientation.LandscapeRight ? CameraImageTransformation.MirrorY : CameraImageTransformation.MirrorX;
        XRCameraImageConversionParams conversionParams    = new XRCameraImageConversionParams(cameraImage, TextureFormat.RGBA32, imageTransformation);

        NativeArray <byte> rawTextureData = m_cameraFeedTexture.GetRawTextureData <byte>();

        try
        {
            unsafe
            {
                cameraImage.Convert(conversionParams, new IntPtr(rawTextureData.GetUnsafePtr()), rawTextureData.Length);
            }
        }
        finally
        {
            cameraImage.Dispose();
        }

        m_cameraFeedTexture.Apply();
        if (idx == 0)
        {
            meshRenderers = m_echoAR.GetComponentsInChildren <MeshRenderer>();
            materials.Clear();
            foreach (MeshRenderer mr in meshRenderers)
            {
                materials.Add(mr.material);
            }
            idx++;
        }
        m_material.SetTexture("_CameraFeed", materials[currentTexture].mainTexture as Texture2D);
        //m_material.SetTexture("_CameraFeed", humanOverlayTextures[currentTexture]);
    }
示例#5
0
文件: ARCamFeed.cs 项目: weacw/Cloak
    unsafe void OnCameraFrameReceived(ARCameraFrameEventArgs eventArgs)
    {
        if (!arCameraManager.TryGetLatestImage(out XRCameraImage image))
        {
            return;
        }

        //figure out cam transform
        CameraImageTransformation camTransform = CameraImageTransformation.None;

        //assume portrait only for now
        camTransform = CameraImageTransformation.MirrorX;
        camImageScreen.localEulerAngles = new Vector3(0, 0, -90);

        //downsample to save fps if needed
        Vector2Int outputSize;

        if (image.width > 1280)
        {
            outputSize = new Vector2Int(image.width / 2, image.height / 2);
        }
        else
        {
            outputSize = new Vector2Int(image.width, image.height);
        }

        XRCameraImageConversionParams conversionParams = new XRCameraImageConversionParams {
            // Get the entire image
            inputRect = new RectInt(0, 0, image.width, image.height),

            // Downsample if needed
            outputDimensions = outputSize,

            // Choose RGB format
            outputFormat = openCV.sendFormat,

            transformation = camTransform
        };

        // See how many bytes we need to store the final image.
        int size = image.GetConvertedDataSize(conversionParams);

        // Allocate a buffer to store the image
        var buffer = new NativeArray <byte>(size, Allocator.Temp);

        // Extract the image data
        image.Convert(conversionParams, new IntPtr(buffer.GetUnsafePtr()), buffer.Length);

        image.Dispose();

        if (textureToSend == null)
        {
            textureToSend = new Texture2D(
                conversionParams.outputDimensions.x,
                conversionParams.outputDimensions.y,
                conversionParams.outputFormat,
                false);
        }

        textureToSend.LoadRawTextureData(buffer);
        textureToSend.Apply();

        if (!texturesCreated)
        {
            texturesCreated = true;
            //init textures here
            openCV.CreateWritableTexture(textureToSend.width, textureToSend.height);
            return;
        }

        //process the image
        openCV.ProcessImage(textureToSend);

        // Done with our temporary data
        buffer.Dispose();
    }