private void RefreshCameraFeedTexture()
    {
        if (!_cameraManager.TryGetLatestImage(out XRCameraImage cameraImage))
        {
            Debug.Log("Failed to get the last image.");
            return;
        }

        RecreateTextureIfNeeded(cameraImage);

        CameraImageTransformation imageTransformation = (Input.deviceOrientation == DeviceOrientation.LandscapeRight)
            ? CameraImageTransformation.MirrorY
            : CameraImageTransformation.MirrorX;
        XRCameraImageConversionParams conversionParams =
            new XRCameraImageConversionParams(cameraImage, TextureFormat.RGBA32, imageTransformation);

        NativeArray <byte> rawTextureData = _texture.GetRawTextureData <byte>();

        try
        {
            unsafe
            {
                cameraImage.Convert(conversionParams, new IntPtr(rawTextureData.GetUnsafePtr()), rawTextureData.Length);
            }
        }
        finally
        {
            cameraImage.Dispose();
        }

        _texture.Apply();
        PreviewTexture(_texture);
    }
    private void RefreshCameraFeedTexture()
    {
        XRCameraImage cameraImage;

        m_cameraManager.TryGetLatestImage(out cameraImage);
        if (m_cameraFeedTexture == null || m_cameraFeedTexture.width != cameraImage.width ||
            m_cameraFeedTexture.height != cameraImage.height)
        {
            m_cameraFeedTexture = new Texture2D(cameraImage.width, cameraImage.height, TextureFormat.RGBA32, false);
            // m_cameraFeedTexture = new Texture2D(Screen.width, Screen.height, TextureFormat.RGBA32, false);
        }

        CameraImageTransformation imageTransformation = Input.deviceOrientation == DeviceOrientation.LandscapeRight
            ? CameraImageTransformation.MirrorY
            : CameraImageTransformation.MirrorX;
        XRCameraImageConversionParams conversionParams =
            new XRCameraImageConversionParams(cameraImage, TextureFormat.RGBA32, imageTransformation);

        NativeArray <byte> rawTextureData = m_cameraFeedTexture.GetRawTextureData <byte>();

        try
        {
            unsafe
            {
                cameraImage.Convert(conversionParams, new IntPtr(rawTextureData.GetUnsafePtr()), rawTextureData.Length);
            }
        }
        finally
        {
            cameraImage.Dispose();
        }

        m_cameraFeedTexture.Apply();
        m_material.SetTexture("_CameraFeed", m_cameraFeedTexture);
    }
示例#3
0
    unsafe void OnCameraFrameReceived(ARCameraFrameEventArgs eventArgs)
    {
        // Attempt to get the latest camera image. If this method succeeds,
        // it acquires a native resource that must be disposed (see below).
        XRCameraImage image;

        if (!cameraManager.TryGetLatestImage(out image))
        {
            return;
        }

        var format = TextureFormat.RGBA32;

        if (m_Texture == null || m_Texture.width != image.width || m_Texture.height != image.height)
        {
            m_Texture = new Texture2D(image.width, image.height, format, false);
        }

        var conversionParams = new XRCameraImageConversionParams(image, format);

        var rawTextureData = m_Texture.GetRawTextureData <byte>();

        try
        {
            image.Convert(conversionParams, new IntPtr(rawTextureData.GetUnsafePtr()), rawTextureData.Length);
        }
        finally
        {
            image.Dispose();
        }

        m_Texture.Apply();
        isPlaying = true;
    }
示例#4
0
    unsafe void SaveImageCPU()
    {
        XRCameraImage image;

        if (!cameraManager.TryGetLatestImage(out image))
        {
            return;
        }

        var conversionParams = new XRCameraImageConversionParams
        {
            // Get the entire image.
            inputRect = new RectInt(0, 0, image.width, image.height),

            // Downsample by 2.
            outputDimensions = new Vector2Int(image.width / 2, image.height / 2),

            // Choose RGBA format.
            outputFormat = TextureFormat.RGBA32,

            // Flip across the vertical axis (mirror image).
            transformation = CameraImageTransformation.MirrorY
        };

        // See how many bytes you need to store the final image.
        int size = image.GetConvertedDataSize(conversionParams);

        // Allocate a buffer to store the image.
        var buffer = new NativeArray <byte>(size, Allocator.Temp);

        // Extract the image data
        image.Convert(conversionParams, new IntPtr(buffer.GetUnsafePtr()), buffer.Length);

        // The image was converted to RGBA32 format and written into the provided buffer
        // so you can dispose of the XRCameraImage. You must do this or it will leak resources.
        image.Dispose();

        // At this point, you can process the image, pass it to a computer vision algorithm, etc.
        // In this example, you apply it to a texture to visualize it.

        // You've got the data; let's put it into a texture so you can visualize it.
        m_Texture = new Texture2D(
            conversionParams.outputDimensions.x,
            conversionParams.outputDimensions.y,
            conversionParams.outputFormat,
            false);

        m_Texture.LoadRawTextureData(buffer);
        m_Texture.Apply();

        var bytes = m_Texture.EncodeToPNG();

        filePath = Application.persistentDataPath + "/camera_texture.png";
        File.WriteAllBytes(filePath, bytes);

        // Done with your temporary data, so you can dispose it.
        buffer.Dispose();
    }
 /// <summary>
 /// Convert the image with handle <paramref name="nativeHandle"/> using the provided
 /// <paramref cref="conversionParams"/>.
 /// </summary>
 /// <param name="nativeHandle">A unique identifier for the camera image to convert.</param>
 /// <param name="conversionParams">The parameters to use during the conversion.</param>
 /// <param name="destinationBuffer">A buffer to write the converted image to.</param>
 /// <param name="bufferLength">The number of bytes available in the buffer.</param>
 /// <returns>
 /// <c>true</c> if the image was converted and stored in <paramref name="destinationBuffer"/>.
 /// </returns>
 public override bool TryConvert(
     int nativeHandle,
     XRCameraImageConversionParams conversionParams,
     IntPtr destinationBuffer,
     int bufferLength)
 {
     return(NativeApi.UnityARCore_Camera_TryConvert(
                nativeHandle, conversionParams, destinationBuffer, bufferLength));
 }
 /// <summary>
 /// Similar to <see cref="ConvertAsync(int, XRCameraImageConversionParams)"/> but takes a delegate to
 /// invoke when the request is complete, rather than returning a request id.
 /// </summary>
 /// <remarks>
 /// If the first parameter to <paramref name="callback"/> is
 /// <see cref="AsyncCameraImageConversionStatus.Ready"/> then the <c>dataPtr</c> parameter must be valid
 /// for the duration of the invocation. The data may be destroyed immediately upon return. The
 /// <paramref name="context"/> parameter must be passed back to the <paramref name="callback"/>.
 /// </remarks>
 /// <param name="nativeHandle">A unique identifier for the camera image to convert.</param>
 /// <param name="conversionParams">The parameters to use during the conversion.</param>
 /// <param name="callback">A delegate which must be invoked when the request is complete, whether the
 /// conversion was successfully or not.</param>
 /// <param name="context">A native pointer which must be passed back unaltered to
 /// <paramref name="callback"/>.</param>
 public override void ConvertAsync(
     int nativeHandle,
     XRCameraImageConversionParams conversionParams,
     OnImageRequestCompleteDelegate callback,
     IntPtr context)
 {
     NativeApi.UnityARCore_Camera_CreateAsyncConversionRequestWithCallback(
         nativeHandle, conversionParams, callback, context);
 }
示例#7
0
    unsafe void OnCameraFrameReceived(ARCameraFrameEventArgs eventArgs)
    {
        // Attempt to get the latest camera image. If this method succeeds,
        // it acquires a native resource that must be disposed (see below).
        XRCameraImage image;

        if (!cameraManager.TryGetLatestImage(out image))
        {
            return;
        }

        // Display some information about the camera image
        m_ImageInfo.text = string.Format(
            "Image info:\n\twidth: {0}\n\theight: {1}\n\tplaneCount: {2}\n\ttimestamp: {3}\n\tformat: {4}",
            image.width, image.height, image.planeCount, image.timestamp, image.format);

        // Once we have a valid XRCameraImage, we can access the individual image "planes"
        // (the separate channels in the image). XRCameraImage.GetPlane provides
        // low-overhead access to this data. This could then be passed to a
        // computer vision algorithm. Here, we will convert the camera image
        // to an RGBA texture and draw it on the screen.

        // Choose an RGBA format.
        // See XRCameraImage.FormatSupported for a complete list of supported formats.
        var format = TextureFormat.RGBA32;

        if (m_Texture == null || m_Texture.width != image.width || m_Texture.height != image.height)
        {
            m_Texture = new Texture2D(image.width, image.height, format, false);
        }

        // Convert the image to format, flipping the image across the Y axis.
        // We can also get a sub rectangle, but we'll get the full image here.
        var conversionParams = new XRCameraImageConversionParams(image, format, CameraImageTransformation.MirrorY);

        // Texture2D allows us write directly to the raw texture data
        // This allows us to do the conversion in-place without making any copies.
        var rawTextureData = m_Texture.GetRawTextureData <byte>();

        try
        {
            image.Convert(conversionParams, new IntPtr(rawTextureData.GetUnsafePtr()), rawTextureData.Length);
        }
        finally
        {
            // We must dispose of the XRCameraImage after we're finished
            // with it to avoid leaking native resources.
            image.Dispose();
        }

        // Apply the updated texture data to our texture
        m_Texture.Apply();

        // Set the RawImage's texture so we can visualize it.
        m_RawImage.texture = m_Texture;
    }
        /// <summary>
        /// Gets the image data from ARFoundation, preps it, and drops it into captureTex.
        /// </summary>
        /// <returns>
        /// A <see cref="Task"/> that yields <c>true</c> if the capture was successful; otherwise <c>false</c>.
        /// </returns>
        private Task <bool> GrabScreenAsync()
        {
            // Grab the latest image from ARFoundation
            XRCameraImage image;

            if (!cameraManager.TryGetLatestImage(out image))
            {
                Debug.LogError("[CameraCaptureARFoundation] Could not get latest image!");
                Task.FromResult <bool>(false);
            }

            // Set up resizing parameters
            Vector2Int size             = resolution.AdjustSize(new Vector2Int(image.width, image.height));
            var        conversionParams = new XRCameraImageConversionParams
            {
                inputRect        = new RectInt(0, 0, image.width, image.height),
                outputDimensions = new Vector2Int(size.x, size.y),
                outputFormat     = TextureFormat.RGB24,
                transformation   = CameraImageTransformation.MirrorY,
            };

            // make sure we have a texture to store the resized image
            if (captureTex == null || captureTex.width != size.x || captureTex.height != size.y)
            {
                if (captureTex != null)
                {
                    GameObject.Destroy(captureTex);
                }
                captureTex = new Texture2D(size.x, size.y, TextureFormat.RGB24, false);
            }

            // Create a completion source to wait for the async operation
            TaskCompletionSource <bool> tcs = new TaskCompletionSource <bool>();

            // And do the resize!
            image.ConvertAsync(conversionParams, (status, p, data) =>
            {
                if (status == AsyncCameraImageConversionStatus.Ready)
                {
                    captureTex.LoadRawTextureData(data);
                    captureTex.Apply();
                }
                if (status == AsyncCameraImageConversionStatus.Ready || status == AsyncCameraImageConversionStatus.Failed)
                {
                    image.Dispose();

                    // TODO: Should we log the failure or fail the task? Previously this completed no matter what.
                    tcs.SetResult(status == AsyncCameraImageConversionStatus.Ready);
                }
            });

            // Return the completion source task so callers can await
            return(tcs.Task);
        }
示例#9
0
    private unsafe void OnCameraFrameReceived(ARCameraFrameEventArgs eventArgs)
    {
        XRCameraImage image;

        if (!xrCameraSubsystem.TryGetLatestImage(out image))
        {
            return;
        }

        var conversionParams = new XRCameraImageConversionParams
        {
            // Get the entire image
            inputRect = new RectInt(0, 0, image.width, image.height),

            // Downsample by 2
            outputDimensions = new Vector2Int(image.width / 2, image.height / 2),

            // Choose RGBA format
            outputFormat = TextureFormat.RGBA32,

            // Flip across the vertical axis (mirror image)
            //transformation = CameraImageTransformation.MirrorY
        };

        // See how many bytes we need to store the final image.
        int size = image.GetConvertedDataSize(conversionParams);

        // Allocate a buffer to store the image
        var buffer = new NativeArray <byte>(size, Allocator.Temp);

        // Extract the image data
        image.Convert(conversionParams, new IntPtr(buffer.GetUnsafePtr()), buffer.Length);

        // The image was converted to RGBA32 format and written into the provided buffer
        // so we can dispose of the CameraImage. We must do this or it will leak resources.
        image.Dispose();

        // At this point, we could process the image, pass it to a computer vision algorithm, etc.
        // In this example, we'll just apply it to a texture to visualize it.

        // We've got the data; let's put it into a texture so we can visualize it.
        m_Texture = new Texture2D(
            conversionParams.outputDimensions.x,
            conversionParams.outputDimensions.y,
            conversionParams.outputFormat,
            false);

        m_Texture.LoadRawTextureData(buffer);
        m_Texture.Apply();

        // Done with our temporary data
        buffer.Dispose();
        UnRegister();
    }
示例#10
0
    public unsafe void GetScreenShot()
    {
        if (!cameraManager.TryGetLatestImage(out image))
        {
            return;
        }
        var format = TextureFormat.RGBA32;

        Texture2D texture = new Texture2D(image.width, image.height, format, false);

        var conversionParams = new XRCameraImageConversionParams {
            inputRect        = new RectInt(0, 0, image.width, image.height),
            outputDimensions = new Vector2Int(image.width, image.height),
            outputFormat     = TextureFormat.RGBA32,
            transformation   = CameraImageTransformation.MirrorY
        };

        var rawTextureData = texture.GetRawTextureData <byte>();

        try
        {
            IntPtr ptr = new IntPtr(rawTextureData.GetUnsafePtr());
            image.Convert(conversionParams, ptr, rawTextureData.Length);
        }
        finally
        {
            // We must dispose of the XRCameraImage after we're finished
            // with it to avoid leaking native resources.
            image.Dispose();
        }
        // Apply the updated texture data to our texture
        texture.Apply();

        // Set the RawImage's texture so we can visualize it
        float ratio = (float)texture.height / texture.width;

        if (Fit != null)
        {
            Fit.aspectRatio = 1f / ratio;
            Fit.aspectMode  = AspectRatioFitter.AspectMode.EnvelopeParent;
        }
        if (background != null)
        {
            background.texture = texture;
            background.rectTransform.localEulerAngles = new Vector3(0, 0, 90);
            background.rectTransform.localScale       = new Vector3(ratio, ratio, 1f);
            background.enabled = true;
        }
        StartCoroutine(SaveTexture(texture));
    }
    private byte[] ConvertBufferToJpg(NativeArray <byte> buffer, XRCameraImageConversionParams conversionParams)
    {
        var texture = new Texture2D(conversionParams.outputDimensions.x,
                                    conversionParams.outputDimensions.y,
                                    conversionParams.outputFormat,
                                    false);

        texture.LoadRawTextureData(buffer);
        texture.Apply();

        var jpgData = texture.EncodeToJPG();

        Destroy(texture);

        return(jpgData);
    }
    // Get Image from the AR Camera, extract the raw data from the image
    private unsafe void CaptureARBuffer()
    {
        // Get the image in the ARSubsystemManager.cameraFrameReceived callback

        XRCameraImage image;

        if (!cameraManager.TryGetLatestImage(out image))
        {
            Debug.LogWarning("Capture AR Buffer returns nothing!!!!!!");
            return;
        }

        var conversionParams = new XRCameraImageConversionParams
        {
            // Get the full image
            inputRect = new RectInt(0, 0, image.width, image.height),

            // Downsample by 2
            outputDimensions = new Vector2Int(image.width, image.height),

            // Color image format
            outputFormat = ConvertFormat,

            // Flip across the x axis
            transformation = CameraImageTransformation.MirrorX

                             // Call ProcessImage when the async operation completes
        };
        // See how many bytes we need to store the final image.
        int size = image.GetConvertedDataSize(conversionParams);

        Debug.Log("OnCameraFrameReceived, size == " + size + "w:" + image.width + " h:" + image.height + " planes=" + image.planeCount);


        // Allocate a buffer to store the image
        var buffer = new NativeArray <byte>(size, Allocator.Temp);

        // Extract the image data
        image.Convert(conversionParams, new System.IntPtr(buffer.GetUnsafePtr()), buffer.Length);

        // The image was converted to RGBA32 format and written into the provided buffer
        // so we can dispose of the CameraImage. We must do this or it will leak resources.

        byte[] bytes = buffer.ToArray();
        monoProxy.StartCoroutine(PushFrame(bytes, image.width, image.height,
                                           () => { image.Dispose(); buffer.Dispose(); }));
    }
    unsafe void convertCPUImage()
    {
        XRCameraImage image;

        if (!cameraManager.TryGetLatestImage(out image))
        {
            Debug.Log("Cant get image");
            return;
        }

        if (float.IsNegativeInfinity(ALPHA))
        {
            ALPHA      = (float)image.height / image.width;
            imageRatio = (float)(BETA / ALPHA);
        }

        var conversionParams = new XRCameraImageConversionParams {
            // Get the entire image
            inputRect = new RectInt(0, 0, image.width, image.height),
            // Downsample by 2
            outputDimensions = new Vector2Int(image.width / 2, image.height / 2),
            // Choose RGBA format
            outputFormat = TextureFormat.RGBA32,
            // Flip across the vertical axis (mirror image)
            transformation = CameraImageTransformation.MirrorY
        };

        int size = image.GetConvertedDataSize(conversionParams);

        var buffer = new NativeArray <byte>(size, Allocator.Temp);

        image.Convert(conversionParams, new IntPtr(buffer.GetUnsafePtr()), buffer.Length);
        image.Dispose();

        Texture2D m_Texture = new Texture2D(
            conversionParams.outputDimensions.x,
            conversionParams.outputDimensions.y,
            conversionParams.outputFormat,
            false);

        m_Texture.LoadRawTextureData(buffer);
        m_Texture.Apply();
        buffer.Dispose();
        // pass image for mediapipe
        handProcessor.addFrameTexture(m_Texture);
    }
        /// <summary>
        /// Gets the image data from ARFoundation, preps it, and drops it into captureTex.
        /// </summary>
        /// <param name="aOnFinished">Gets called when this method is finished with getting the image.</param>
        private void GrabScreen(Action aOnFinished)
        {
            // Grab the latest image from ARFoundation
            XRCameraImage image;

            if (!cameraManager.TryGetLatestImage(out image))
            {
                Debug.LogError("[CameraCaptureARFoundation] Could not get latest image!");
                return;
            }

            // Set up resizing parameters
            Vector2Int size             = resolution.AdjustSize(new Vector2Int(image.width, image.height));
            var        conversionParams = new XRCameraImageConversionParams
            {
                inputRect        = new RectInt(0, 0, image.width, image.height),
                outputDimensions = new Vector2Int(size.x, size.y),
                outputFormat     = TextureFormat.RGB24,
                transformation   = CameraImageTransformation.MirrorY,
            };

            // make sure we have a texture to store the resized image
            if (captureTex == null || captureTex.width != size.x || captureTex.height != size.y)
            {
                if (captureTex != null)
                {
                    GameObject.Destroy(captureTex);
                }
                captureTex = new Texture2D(size.x, size.y, TextureFormat.RGB24, false);
            }

            // And do the resize!
            image.ConvertAsync(conversionParams, (status, p, data) =>
            {
                if (status == AsyncCameraImageConversionStatus.Ready)
                {
                    captureTex.LoadRawTextureData(data);
                    captureTex.Apply();
                }
                if (status == AsyncCameraImageConversionStatus.Ready || status == AsyncCameraImageConversionStatus.Failed)
                {
                    image.Dispose();
                    aOnFinished();
                }
            });
        }
        public static void GetPlaneDataRGB(out byte[] pixels, XRCameraImage image)
        {
            var conversionParams = new XRCameraImageConversionParams
            {
                inputRect        = new RectInt(0, 0, image.width, image.height),
                outputDimensions = new Vector2Int(image.width, image.height),
                outputFormat     = TextureFormat.RGB24,
                transformation   = CameraImageTransformation.None
            };

            int size = image.GetConvertedDataSize(conversionParams);

            pixels = new byte[size];
            GCHandle bufferHandle = GCHandle.Alloc(pixels, GCHandleType.Pinned);

            image.Convert(conversionParams, bufferHandle.AddrOfPinnedObject(), pixels.Length);
            bufferHandle.Free();
        }
    private void RefreshCameraFeedTexture()
    {
        XRCameraImage cameraImage;

        m_cameraManager.TryGetLatestImage(out cameraImage);

        if (m_cameraFeedTexture == null || m_cameraFeedTexture.width != cameraImage.width || m_cameraFeedTexture.height != cameraImage.height)
        {
            m_cameraFeedTexture = new Texture2D(cameraImage.width, cameraImage.height, TextureFormat.RGBA32, false);
        }

        CameraImageTransformation     imageTransformation = Input.deviceOrientation == DeviceOrientation.LandscapeRight ? CameraImageTransformation.MirrorY : CameraImageTransformation.MirrorX;
        XRCameraImageConversionParams conversionParams    = new XRCameraImageConversionParams(cameraImage, TextureFormat.RGBA32, imageTransformation);

        NativeArray <byte> rawTextureData = m_cameraFeedTexture.GetRawTextureData <byte>();

        try
        {
            unsafe
            {
                cameraImage.Convert(conversionParams, new IntPtr(rawTextureData.GetUnsafePtr()), rawTextureData.Length);
            }
        }
        finally
        {
            cameraImage.Dispose();
        }

        m_cameraFeedTexture.Apply();
        if (idx == 0)
        {
            meshRenderers = m_echoAR.GetComponentsInChildren <MeshRenderer>();
            materials.Clear();
            foreach (MeshRenderer mr in meshRenderers)
            {
                materials.Add(mr.material);
            }
            idx++;
        }
        m_material.SetTexture("_CameraFeed", materials[currentTexture].mainTexture as Texture2D);
        //m_material.SetTexture("_CameraFeed", humanOverlayTextures[currentTexture]);
    }
        private void ProcessImage(AsyncCameraImageConversionStatus status,
                                  XRCameraImageConversionParams conversionParams,
                                  NativeArray <byte> imageBuffer)
        {
            if (status != AsyncCameraImageConversionStatus.Ready)
            {
                // attempt to call getMeasurements with empty image will compel SDK core to report error
                // triggering session to request another
                ScapeNative.citf_setYChannelPtr(this.scapeClient, IntPtr.Zero, 0, 0);
                ScapeNative.citf_getMeasurements(this.scapeClient);
                return;
            }

            CopyImageBuffer(imageBuffer);

            ScapeNative.citf_setYChannelPtr(this.scapeClient, imagePtr, ScapeImgWidth, ScapeImgHeight);
            ScapeNative.citf_getMeasurements(this.scapeClient);

            ScapeLogging.LogDebug("citf_getMeasurements() " + (Time.time - requestTime));
        }
示例#18
0
    void ProcessImage(AsyncCameraImageConversionStatus status, XRCameraImageConversionParams conversionParams, NativeArray <byte> data)
    {
        if (status != AsyncCameraImageConversionStatus.Ready)
        {
            Debug.LogErrorFormat("Async request failed with status {0}", status);
            return;
        }

        // Do something useful, like copy to a Texture2D or pass to a computer vision algorithm
        m_Texture = new Texture2D(
            conversionParams.outputDimensions.x,
            conversionParams.outputDimensions.y,
            conversionParams.outputFormat,
            false);
        Debug.Log("生成图片!");
        m_Texture.LoadRawTextureData(data);
        m_Texture.Apply();

        byte[] bytes = m_Texture.EncodeToPNG();
        //SaveImage("temp", bytes);
        ImageRecognition(bytes);
        // data is destroyed upon return; no need to dispose
    }
    public static unsafe byte[] GetJpgBytesSync(ARCameraManager cameraManager)
    {
        if (!cameraManager.TryGetLatestImage(out var image))
        {
            return(null);
        }

        var conversionParams = new XRCameraImageConversionParams
        {
            inputRect        = new RectInt(0, 0, image.width, image.height),
            outputDimensions = new Vector2Int(image.width, image.height),
            outputFormat     = TextureFormat.RGBA32,
            transformation   = CameraImageTransformation.MirrorY
        };

        var size   = image.GetConvertedDataSize(conversionParams);
        var buffer = new NativeArray <byte>(size, Allocator.Temp);

        image.Convert(conversionParams, new IntPtr(buffer.GetUnsafePtr()), buffer.Length);
        image.Dispose();

        var texture = new Texture2D(
            conversionParams.outputDimensions.x,
            conversionParams.outputDimensions.y,
            conversionParams.outputFormat,
            false);

        texture.LoadRawTextureData(buffer);
        texture.Apply();

        var bytes = texture.EncodeToJPG();

        buffer.Dispose();
        Destroy(texture);

        return(bytes);
    }
示例#20
0
文件: ARCamFeed.cs 项目: weacw/Cloak
    unsafe void OnCameraFrameReceived(ARCameraFrameEventArgs eventArgs)
    {
        if (!arCameraManager.TryGetLatestImage(out XRCameraImage image))
        {
            return;
        }

        //figure out cam transform
        CameraImageTransformation camTransform = CameraImageTransformation.None;

        //assume portrait only for now
        camTransform = CameraImageTransformation.MirrorX;
        camImageScreen.localEulerAngles = new Vector3(0, 0, -90);

        //downsample to save fps if needed
        Vector2Int outputSize;

        if (image.width > 1280)
        {
            outputSize = new Vector2Int(image.width / 2, image.height / 2);
        }
        else
        {
            outputSize = new Vector2Int(image.width, image.height);
        }

        XRCameraImageConversionParams conversionParams = new XRCameraImageConversionParams {
            // Get the entire image
            inputRect = new RectInt(0, 0, image.width, image.height),

            // Downsample if needed
            outputDimensions = outputSize,

            // Choose RGB format
            outputFormat = openCV.sendFormat,

            transformation = camTransform
        };

        // See how many bytes we need to store the final image.
        int size = image.GetConvertedDataSize(conversionParams);

        // Allocate a buffer to store the image
        var buffer = new NativeArray <byte>(size, Allocator.Temp);

        // Extract the image data
        image.Convert(conversionParams, new IntPtr(buffer.GetUnsafePtr()), buffer.Length);

        image.Dispose();

        if (textureToSend == null)
        {
            textureToSend = new Texture2D(
                conversionParams.outputDimensions.x,
                conversionParams.outputDimensions.y,
                conversionParams.outputFormat,
                false);
        }

        textureToSend.LoadRawTextureData(buffer);
        textureToSend.Apply();

        if (!texturesCreated)
        {
            texturesCreated = true;
            //init textures here
            openCV.CreateWritableTexture(textureToSend.width, textureToSend.height);
            return;
        }

        //process the image
        openCV.ProcessImage(textureToSend);

        // Done with our temporary data
        buffer.Dispose();
    }
 public static void UnityARCore_Camera_CreateAsyncConversionRequestWithCallback(
     int nativeHandle, XRCameraImageConversionParams conversionParams,
     XRCameraSubsystem.OnImageRequestCompleteDelegate callback, IntPtr context)
 {
     callback(AsyncCameraImageConversionStatus.Disposed, conversionParams, IntPtr.Zero, 0, context);
 }
 public static int UnityARCore_Camera_CreateAsyncConversionRequest(
     int nativeHandle, XRCameraImageConversionParams conversionParams)
 {
     return(0);
 }
 public static bool UnityARCore_Camera_TryConvert(
     int nativeHandle, XRCameraImageConversionParams conversionParams,
     IntPtr buffer, int bufferLength)
 {
     return(false);
 }
 public static extern void UnityARCore_Camera_CreateAsyncConversionRequestWithCallback(
     int nativeHandle, XRCameraImageConversionParams conversionParams,
     XRCameraSubsystem.OnImageRequestCompleteDelegate callback, IntPtr context);
 public static extern bool UnityARCore_Camera_TryConvert(
     int nativeHandle, XRCameraImageConversionParams conversionParams,
     IntPtr buffer, int bufferLength);
 /// <summary>
 /// Create an asynchronous request to convert a camera image, similar to <see cref="TryConvert"/> except
 /// the conversion should happen on a thread other than the calling (main) thread.
 /// </summary>
 /// <param name="nativeHandle">A unique identifier for the camera image to convert.</param>
 /// <param name="conversionParams">The parameters to use during the conversion.</param>
 /// <returns>A unique identifier for this request.</returns>
 public override int ConvertAsync(
     int nativeHandle,
     XRCameraImageConversionParams conversionParams)
 {
     return(NativeApi.UnityARCore_Camera_CreateAsyncConversionRequest(nativeHandle, conversionParams));
 }
示例#27
0
        private IEnumerator Capture(bool anchor)
        {
            yield return(new WaitForSeconds(0.25f));

            XRCameraImage image;

            if (m_CameraManager.TryGetLatestImage(out image))
            {
                CoroutineJobCapture j = new CoroutineJobCapture();
                j.onConnect         = onConnect;
                j.onFailedToConnect = onFailedToConnect;
                j.server            = this.server;
                j.token             = this.token;
                j.bank   = this.bank;
                j.run    = (int)(this.imageRun & 0xEFFFFFFF);
                j.index  = this.imageIndex++;
                j.anchor = anchor;

                Camera     cam = Camera.main;
                Quaternion _q  = cam.transform.rotation;
                Matrix4x4  r   = Matrix4x4.Rotate(new Quaternion(_q.x, _q.y, -_q.z, -_q.w));
                Vector3    _p  = cam.transform.position;
                Vector3    p   = new Vector3(_p.x, _p.y, -_p.z);
                j.rotation   = r;
                j.position   = p;
                j.intrinsics = ARHelper.GetIntrinsics(m_CameraManager);
                j.width      = image.width;
                j.height     = image.height;

                if (rgbCapture)
                {
                    var conversionParams = new XRCameraImageConversionParams
                    {
                        inputRect        = new RectInt(0, 0, image.width, image.height),
                        outputDimensions = new Vector2Int(image.width, image.height),
                        outputFormat     = TextureFormat.RGB24,
                        transformation   = CameraImageTransformation.None
                    };
                    int size = image.GetConvertedDataSize(conversionParams);
                    j.pixels   = new byte[size];
                    j.channels = 3;
                    GCHandle bufferHandle = GCHandle.Alloc(j.pixels, GCHandleType.Pinned);
                    image.Convert(conversionParams, bufferHandle.AddrOfPinnedObject(), j.pixels.Length);
                    bufferHandle.Free();
                }
                else
                {
                    XRCameraImagePlane plane = image.GetPlane(0);                     // use the Y plane
                    j.pixels   = new byte[plane.data.Length];
                    j.channels = 1;
                    plane.data.CopyTo(j.pixels);
                }

                j.sessionFirstImage = sessionFirstImage;
                if (sessionFirstImage)
                {
                    sessionFirstImage = false;
                }

                jobs.Add(j);
                image.Dispose();

                m_cameraShutterClick.Play();
            }
        }
    // NOTE: part of the following steps to setup image access on CPU come from
    // the official AR Foundation documentation. However some functions use
    // other names than described in the documentation.
    // e.g. cameraManager.frameReceived instead of cameraManager.cameraFrameReceived
    // https://docs.unity3d.com/Packages/[email protected]/manual/cpu-camera-image.html
    public unsafe void DetectOnImage(Vector2 userInput)
    {
        XRCameraImage image;

        if (!cameraManager.TryGetLatestImage(out image))
        {
            return;
        }

        var conversionParams = new XRCameraImageConversionParams
        {
            // Get the entire image
            inputRect = new RectInt(0, 0, image.width, image.height),

            outputDimensions = new Vector2Int(Convert.ToInt32(image.width * scale.detectionScaleFactor), Convert.ToInt32(image.height * scale.detectionScaleFactor)),

            // NOTE: directly converting into single channel could be an option,
            // but it is not sure that R8 represents grayscale in one channel
            // NOTE 2: RGBA32 is not listed in the documentation as supported format
            outputFormat = TextureFormat.RGBA32,

            // Flip across the vertical axis (mirror image)
            transformation = CameraImageTransformation.None
        };

        // See how many bytes we need to store the final image.
        int size = image.GetConvertedDataSize(conversionParams);

        // Allocate a buffer to store the image
        var buffer = new NativeArray <byte>(size, Allocator.Temp);

        // Extract the image data
        image.Convert(conversionParams, new IntPtr(buffer.GetUnsafePtr()), buffer.Length);

        // The image was converted to RGB32 format and written into the provided buffer
        // so we can dispose the CameraImage. We must do this or it will leak resources.
        image.Dispose();

        // At this point, we could process the image, pass it to a computer vision algorithm, etc.
        if (camTexture == null)
        {
            camTexture = new Texture2D(
                conversionParams.outputDimensions.x,
                conversionParams.outputDimensions.y,
                conversionParams.outputFormat,
                false
                );
        }

        camTexture.LoadRawTextureData(buffer);
        camTexture.Apply();

        Color32[] rawPixels   = camTexture.GetPixels32();
        Vector2[] resultArray = new Vector2[CORNERS];

        // Call to C++ Code
        float startT  = Time.realtimeSinceStartup;
        bool  success = ProcessImage(resultArray, rawPixels, userInput, conversionParams.outputDimensions.x, conversionParams.outputDimensions.y, true);
        float endT    = Time.realtimeSinceStartup;

        Debug.Log("DetectionTime: ");
        Debug.Log(endT - startT);

        imageToWorld.TransferIntoWorld(success, resultArray);
        //imageToWorld.ShowIndicator(success, resultArray);

        // Done with our temporary data
        buffer.Dispose();
    }
示例#29
0
    //   private void setupRawImage(Color color)
    //{
    //       if(colorImage.texture != null)
    //	{
    //		Texture2D oldTexture = (Texture2D) colorImage.texture;
    //		Destroy(oldTexture);
    //	}
    //	Debug.Log("SELECTED COLOR WAS " + color);

    //	var format = TextureFormat.RGBA32;
    //	Texture2D colorTexture = new Texture2D(selectedColorSide, selectedColorSide, format, false);
    //	var fillColorArray = colorTexture.GetPixels();
    //	for (var i = 0; i < fillColorArray.Length; ++i)
    //	{
    //		fillColorArray[i] = color;
    //	}

    //	colorTexture.SetPixels(fillColorArray);
    //	colorTexture.Apply();
    //	colorImage.texture = colorTexture;
    //}



    unsafe void OnCameraFrameReceived(ARCameraFrameEventArgs eventArgs)
    {
        if (!enableKeypointDetection)
        {
            return;
        }
        // Attempt to get the latest camera image. If this method succeeds,
        // it acquires a native resource that must be disposed (see below).
        XRCameraImage image;

        if (!cameraManager.TryGetLatestImage(out image))
        {
            return;
        }


        // Once we have a valid XRCameraImage, we can access the individual image "planes"
        // (the separate channels in the image). XRCameraImage.GetPlane provides
        // low-overhead access to this data. This could then be passed to a
        // computer vision algorithm. Here, we will convert the camera image
        // to an RGBA texture and draw it on the screen.

        // Choose an RGBA format.
        // See XRCameraImage.FormatSupported for a complete list of supported formats.
        var format = TextureFormat.RGBA32;

        if (m_Texture == null || m_Texture.width != image.width || m_Texture.height != image.height)
        {
            m_Texture   = new Texture2D(image.width, image.height, format, false);
            imageHeight = image.height;
            imageWidth  = image.width;
        }

        // Convert the image to format, flipping the image across the Y axis.
        // We can also get a sub rectangle, but we'll get the full image here.
        var conversionParams = new XRCameraImageConversionParams(image, format,
                                                                 CameraImageTransformation.None);

        // Texture2D allows us write directly to the raw texture data
        // This allows us to do the conversion in-place without making any copies.
        var rawTextureData = m_Texture.GetRawTextureData <byte>();

        try
        {
            image.Convert(conversionParams, new IntPtr(rawTextureData.GetUnsafePtr()), rawTextureData.Length);
        }
        finally
        {
            // We must dispose of the XRCameraImage after we're finished
            // with it to avoid leaking native resources.
            image.Dispose();
        }


        // Apply the updated texture data to our texture
        m_Texture.Apply();
        m_rotated = m_Texture;

        // set scaling for camera width to screen height
        scaleFactor = (double)Screen.height / image.width;
        Debug.Log("Scale factor is " + scaleFactor.ToString() + " Resized height should be " + (scaleFactor * image.height).ToString());
        // resize texture
        TextureScale.Point(m_rotated, (int)(m_rotated.width * scaleFactor) / downsample, (int)(m_rotated.height * scaleFactor) / downsample);
        m_rotated.Apply();
        Debug.Log("actual resized height is " + m_rotated.height.ToString());

        // get cropped from resized texture & crop
        pixels = m_rotated.GetPixels(0, (int)(m_rotated.height - Screen.width / downsample) / 2, (int)Screen.height / downsample, (int)Screen.width / downsample, 0);
        Destroy(m_rotated);

        // load cropped pixels into new texture
        Texture2D cropped_tex = new Texture2D((int)Screen.height / downsample, (int)Screen.width / downsample, TextureFormat.RGBA32, false);

        cropped_tex.SetPixels(pixels, 0);
        cropped_tex.Apply();

        for (int i = 0; i < cubes.Count; i++)
        {
            Destroy(cubes[i]);
        }
        cubes.Clear();

        // create mat of contours using scaled down cropped image
        matImg = new Mat(cropped_tex.height, cropped_tex.width, CvType.CV_8UC4);
        OpenCVForUnity.UnityUtils.Utils.texture2DToMat(cropped_tex, matImg);
        Destroy(cropped_tex);
        contourMat = getHolds(matImg);

        // create texture
        imgText = new Texture2D(contourMat.cols(), contourMat.rows(), TextureFormat.RGBA32, false);

        getKeyPoints(contourMat);
        Debug.Log("Length of keypoints is " + keyPix.Count.ToString());
        for (int i = 0; i < keyPix.Count; i++)
        {
            Debug.Log("Mat img is " + matImg);
            submats.Add(getSubmat(keyPix[i], matImg));
            // Color col = quantize(keyPix[i], matImg);
            // Imgproc.rectangle(matImg, new OpenCVForUnity.CoreModule.Rect((int)(keyPix[i][0] - keyPix[i][2] / 2), (int)(keyPix[i][1] - keyPix[i][2] / 2), (int)keyPix[i][2], (int)keyPix[i][2]), new Scalar(col.r, col.g, col.b, col.a), -4, 8);
            // colors.Add(col);
        }
        Debug.Log("Length of submats is " + submats.Count.ToString());
        addKeyPixCubes();

        Utils.matToTexture2D(matImg, imgText, true);

        //disposal
        if (m_RawImage.texture != null)
        {
            Destroy(m_RawImage.texture);
            // Destroy(cropped_tex);
            // Destroy(m_rotated);
        }

        // Set the RawImage's texture so we can visualize it.
        m_RawImage.texture = imgText;
        rawImage.SetNativeSize();
    }
示例#30
0
 public static extern int UnityARKit_Camera_CreateAsyncConversionRequest(
     int nativeHandle, XRCameraImageConversionParams conversionParams);