unsafe void OnCameraFrameReceied(ARCameraFrameEventArgs eventArgs)
    {
        XRCpuImage image;

        if (!cameraManager.TryGetLatestImage(out image))
        {
            return;
        }

        var conversionParams = new XRCpuImage.ConversionParams
                               (
            image,
            TextureFormat.RGBA32,
            XRCpuImage.Transformation.None
                               );

        if (mTexture == null || mTexture.width != image.width || mTexture.height != image.height)
        {
            mTexture = new Texture2D(
                conversionParams.outputDimensions.x,
                conversionParams.outputDimensions.y,
                conversionParams.outputFormat,
                false);
        }

        var buffer = mTexture.GetRawTextureData <byte>();

        image.Convert(conversionParams, new IntPtr(buffer.GetUnsafePtr()), buffer.Length);

        mTexture.Apply();
        mRenderer.material.mainTexture = mTexture;

        buffer.Dispose();
        image.Dispose();
    }
    /// <summary>
    /// 获取摄像头原始数据
    /// </summary>
    private unsafe void ScreenRecordRawImage()
    {
        if (!cameraManager.TryAcquireLatestCpuImage(out XRCpuImage image))
        {
            return;
        }

        if (isRecording)
        {
            if (renderTexture == null)
            {
                renderTexture    = new Texture2D(image.width, image.height, TextureFormat.BGRA32, false);
                conversionParams = new XRCpuImage.ConversionParams(image, TextureFormat.BGRA32);
                intPtr           = new IntPtr(renderTexture.GetRawTextureData <byte>().GetUnsafePtr());
                NativeAPI.StartRecordVideo(image.width, image.height, recordType);
            }

            var rawTextureData = renderTexture.GetRawTextureData <byte>();
            try
            {
                image.Convert(conversionParams, intPtr, rawTextureData.Length);
            }
            finally
            {
                image.Dispose();
            }

            renderTexture.Apply();

            NativeAPI.SendVideoData(renderTexture.GetRawTextureData(), rawTextureData.Length);
        }
    }
    /// <summary>
    /// 获取摄像头原始数据
    /// </summary>
    private unsafe void ScreenShotRawImage()
    {
        if (!cameraManager.TryAcquireLatestCpuImage(out XRCpuImage image))
        {
            return;
        }

        if (renderTexture == null)
        {
            renderTexture    = new Texture2D(image.width, image.height, TextureFormat.BGRA32, false);
            conversionParams = new XRCpuImage.ConversionParams(image, TextureFormat.BGRA32);
            intPtr           = new IntPtr(renderTexture.GetRawTextureData <byte>().GetUnsafePtr());
        }

        var rawTextureData = renderTexture.GetRawTextureData <byte>();

        try
        {
            image.Convert(conversionParams, intPtr, rawTextureData.Length);
        }
        finally
        {
            image.Dispose();
        }

        renderTexture.Apply();

        byte[] rawData = renderTexture.EncodeToPNG();
        NativeAPI.ScreenDidShot(rawData, rawData.Length, recordType);

        renderTexture = null;
    }
Example #4
0
    unsafe void OnCameraFrameReceived(ARCameraFrameEventArgs eventArgs)
    {
        if (!arCameraManager.TryAcquireLatestCpuImage(out XRCpuImage image))
        {
            return;
        }

        var conversionParams = new XRCpuImage.ConversionParams
        {
            inputRect        = new RectInt(0, 0, image.width, image.height),
            outputDimensions = new Vector2Int(image.width, image.height),
            outputFormat     = TextureFormat.RGBA32,
            transformation   = XRCpuImage.Transformation.None
        };
        int imageSize = image.GetConvertedDataSize(conversionParams);
        var buffer    = new NativeArray <byte>(imageSize, Allocator.Temp);

        image.Convert(conversionParams, new IntPtr(buffer.GetUnsafePtr()), buffer.Length);
        image.Dispose();

        this.m_Texture = new Texture2D(
            conversionParams.outputDimensions.x,
            conversionParams.outputDimensions.y,
            conversionParams.outputFormat,
            false
            );
        this.m_Texture.LoadRawTextureData(buffer);
        this.m_Texture.Apply();
        buffer.Dispose();

        Detect();
    }
Example #5
0
        unsafe void OnARCameraFrameReceived(ARCameraFrameEventArgs eventArgs)
        {
            //Reference:https://docs.unity3d.com/Packages/[email protected]/manual/cpu-camera-image.html

            if (!CameraManager.TryAcquireLatestCpuImage(out XRCpuImage image))
            {
                return;
            }

            var conversionParams = new XRCpuImage.ConversionParams
            {
                inputRect        = new RectInt(0, 0, image.width, image.height),
                outputDimensions = new Vector2Int(image.width / 2, image.height / 2),
                outputFormat     = TextureFormat.RGBA32,
                transformation   = XRCpuImage.Transformation.MirrorY
            };

            int size   = image.GetConvertedDataSize(conversionParams);
            var buffer = new NativeArray <byte>(size, Allocator.Temp);

            image.Convert(conversionParams, new IntPtr(buffer.GetUnsafePtr()), buffer.Length);
            image.Dispose();

            if (RGB_Texture == null)
            {
                RGB_Texture = new Texture2D(conversionParams.outputDimensions.x, conversionParams.outputDimensions.y, conversionParams.outputFormat, false);
            }

            RGB_Texture.LoadRawTextureData(buffer);
            RGB_Texture.Apply();

            buffer.Dispose();
        }
Example #6
0
 /// <summary>
 /// Convert the image with handle <paramref name="nativeHandle"/> using the provided
 /// <paramref cref="conversionParams"/>.
 /// </summary>
 /// <param name="nativeHandle">A unique identifier for the camera image to convert.</param>
 /// <param name="conversionParams">The parameters to use during the conversion.</param>
 /// <param name="destinationBuffer">A buffer to write the converted image to.</param>
 /// <param name="bufferLength">The number of bytes available in the buffer.</param>
 /// <returns>
 /// <c>true</c> if the image was converted and stored in <paramref name="destinationBuffer"/>.
 /// </returns>
 public override bool TryConvert(
     int nativeHandle,
     XRCpuImage.ConversionParams conversionParams,
     IntPtr destinationBuffer,
     int bufferLength)
 {
     return(Native.TryConvert(nativeHandle, conversionParams, destinationBuffer, bufferLength));
 }
Example #7
0
 /// <summary>
 /// Similar to <see cref="ConvertAsync(int, XRCpuImage.ConversionParams)"/> but takes a delegate to
 /// invoke when the request is complete, rather than returning a request id.
 /// </summary>
 /// <remarks>
 /// If the first parameter to <paramref name="callback"/> is
 /// <see cref="XRCpuImage.AsyncConversionStatus.Ready"/> then the <c>dataPtr</c> parameter must be valid
 /// for the duration of the invocation. The data may be destroyed immediately upon return. The
 /// <paramref name="context"/> parameter must be passed back to the <paramref name="callback"/>.
 /// </remarks>
 /// <param name="nativeHandle">A unique identifier for the camera image to convert.</param>
 /// <param name="conversionParams">The parameters to use during the conversion.</param>
 /// <param name="callback">A delegate which must be invoked when the request is complete, whether the
 /// conversion was successfully or not.</param>
 /// <param name="context">A native pointer which must be passed back unaltered to
 /// <paramref name="callback"/>.</param>
 public override void ConvertAsync(
     int nativeHandle,
     XRCpuImage.ConversionParams conversionParams,
     OnImageRequestCompleteDelegate callback,
     IntPtr context)
 {
     Native.CreateAsyncConversionRequestWithCallback(
         nativeHandle, conversionParams, callback, context);
 }
    public unsafe byte[] CamGetFrame()
    {
        XRCpuImage image;

        if (m_CameraManager.TryAcquireLatestCpuImage(out image))
        {
            var conversionParams = new XRCpuImage.ConversionParams
            {
                // Get the entire image.
                inputRect = new RectInt(0, 0, image.width, image.height),

                outputDimensions = new Vector2Int(image.width, image.height),

                // Choose RGBA format.
                outputFormat = TextureFormat.RGBA32,

                // Flip across the vertical axis (mirror image).
                transformation = XRCpuImage.Transformation.MirrorY
            };

            // See how many bytes we need to store the final image.
            int size = image.GetConvertedDataSize(conversionParams);

            // Allocate a buffer to store the image
            var buffer = new NativeArray <byte>(size, Allocator.Temp);

            // Extract the image data

            image.Convert(conversionParams, new IntPtr(buffer.GetUnsafePtr()), buffer.Length);
            Debug.Log("buffer.Length" + buffer.Length);
            // The image was converted to RGBA32 format and written into the provided buffer
            // so we can dispose of the CameraImage. We must do this or it will leak resources.
            image.Dispose();

            // At this point, we could process the image, pass it to a computer vision algorithm, etc.
            // In this example, we'll just apply it to a texture to visualize it.

            // We've got the data; let's put it into a texture so we can visualize it.
            Texture2D m_Texture = new Texture2D(
                conversionParams.outputDimensions.x,
                conversionParams.outputDimensions.y,
                conversionParams.outputFormat,
                false);

            m_Texture.LoadRawTextureData(buffer);
            m_Texture.Apply();
            buffer.Dispose();

            byte[] bb = m_Texture.EncodeToJPG(100);
            Destroy(m_Texture);
            return(bb);
        }
        return(null);
    }
        private unsafe void UpdateCameraTexture()
        {
            // https://docs.unity3d.com/Packages/[email protected]/manual/cpu-camera-image.html
            if (CameraManager.TryAcquireLatestCpuImage(out XRCpuImage img))
            {
                XRCpuImage.ConversionParams conversionParams = new XRCpuImage.ConversionParams
                {
                    // Get the entire image.
                    inputRect = new RectInt(0, 0, img.width, img.height),

                    // Downsample by 4.
                    outputDimensions = new Vector2Int(img.width / 4, img.height / 4),

                    // Choose RGBA format.
                    outputFormat = TextureFormat.RGBA32,

                    // Flip across the vertical axis (mirror image).
                    transformation = XRCpuImage.Transformation.MirrorY
                };


                // See how many bytes you need to store the final image.
                int size = img.GetConvertedDataSize(conversionParams);

                // Allocate a buffer to store the image.
                NativeArray <byte> buffer = new NativeArray <byte>(size, Allocator.Temp);

                // Extract the image data
                img.Convert(conversionParams, new IntPtr(buffer.GetUnsafePtr()), buffer.Length);

                // The image was converted to RGBA32 format and written into the provided buffer
                // so you can dispose of the XRCpuImage. You must do this or it will leak resources.
                img.Dispose();

                // At this point, you can process the image, pass it to a computer vision algorithm, etc.
                // In this example, you apply it to a texture to visualize it.

                // You've got the data; let's put it into a texture so you can visualize it.
                if (m_cameraTexture == null)
                {
                    m_cameraTexture = new Texture2D(
                        conversionParams.outputDimensions.x,
                        conversionParams.outputDimensions.y,
                        conversionParams.outputFormat,
                        false);
                }

                m_cameraTexture.LoadRawTextureData(buffer);
                m_cameraTexture.Apply();

                // Done with your temporary data, so you can dispose it.
                buffer.Dispose();
            }
        }
    private void UpdateRawImage(ref Texture2D texture, XRCpuImage cpuImage)
    {
        if (texture == null || texture.width != cpuImage.width || texture.height != cpuImage.height)
        {
            texture = new Texture2D(cpuImage.width, cpuImage.height, TextureFormat.RGB565, false);
        }

        var conversionParams = new XRCpuImage.ConversionParams(cpuImage, TextureFormat.R16);
        var rawTextureData   = texture.GetRawTextureData <byte>();

        cpuImage.Convert(conversionParams, rawTextureData);
        texture.Apply();
    }
Example #11
0
        private static void UpdateRawImage(Texture2D texture, XRCpuImage cpuImage)
        {
            var conversionParams = new XRCpuImage.ConversionParams(cpuImage, cpuImage.format.AsTextureFormat(),
                                                                   XRCpuImage.Transformation.MirrorY);
            var rawTextureData = texture.GetRawTextureData <byte>();

            Debug.Assert(
                rawTextureData.Length ==
                cpuImage.GetConvertedDataSize(conversionParams.outputDimensions, conversionParams.outputFormat),
                "The Texture2D is not the same size as the converted data.");

            cpuImage.Convert(conversionParams, rawTextureData);
            texture.Apply();
        }
Example #12
0
    unsafe void convertCPUImage()
    {
        XRCpuImage image;

        if (!cameraManager.TryAcquireLatestCpuImage(out image))
        {
            Debug.Log("Cant get image");
            return;
        }

        if (float.IsNegativeInfinity(ALPHA))
        {
            ALPHA      = (float)image.height / image.width;
            imageRatio = (float)(BETA / ALPHA);
        }

        var conversionParams = new XRCpuImage.ConversionParams {
            // Get the entire image
            inputRect = new RectInt(0, 0, image.width, image.height),
            // Downsample by 2
            outputDimensions = new Vector2Int(image.width / 2, image.height / 2),
            // Choose RGBA format
            outputFormat = TextureFormat.RGBA32,
            // Flip across the vertical axis (mirror image)
            transformation = XRCpuImage.Transformation.MirrorY
        };

        int size = image.GetConvertedDataSize(conversionParams);

        var buffer = new NativeArray <byte>(size, Allocator.Temp);

        image.Convert(conversionParams, new IntPtr(buffer.GetUnsafePtr()), buffer.Length);
        image.Dispose();

        Texture2D m_Texture = new Texture2D(
            conversionParams.outputDimensions.x,
            conversionParams.outputDimensions.y,
            conversionParams.outputFormat,
            false);

        m_Texture.LoadRawTextureData(buffer);
        m_Texture.Apply();
        buffer.Dispose();
        // pass image for mediapipe
        long time = new DateTimeOffset(DateTime.Now).ToUnixTimeMilliseconds();

        Debug.Log("Texture loaded at: " + time.ToString());
        handProcessor.addFrameTexture(m_Texture);
    }
    void UpdateRawImage(Texture2D texture, XRCpuImage cpuImage)
    {
        // For display, we need to mirror about the vertical access.
        var conversionParams = new XRCpuImage.ConversionParams(cpuImage, cpuImage.format.AsTextureFormat(), XRCpuImage.Transformation.MirrorY);

        // Get the Texture2D's underlying pixel buffer.
        var rawTextureData = texture.GetRawTextureData <byte>();

        // Make sure the destination buffer is large enough to hold the converted data (they should be the same size)
        Debug.Assert(rawTextureData.Length == cpuImage.GetConvertedDataSize(conversionParams.outputDimensions, conversionParams.outputFormat),
                     "The Texture2D is not the same size as the converted data.");

        // Perform the conversion.
        cpuImage.Convert(conversionParams, rawTextureData);

        // "Apply" the new pixel data to the Texture2D.
        texture.Apply();
    }
        public static void GetPlaneDataRGB(out byte[] pixels, XRCpuImage image)
        {
            var conversionParams = new XRCpuImage.ConversionParams
            {
                inputRect        = new RectInt(0, 0, image.width, image.height),
                outputDimensions = new Vector2Int(image.width, image.height),
                outputFormat     = TextureFormat.RGB24,
                transformation   = XRCpuImage.Transformation.None
            };

            int size = image.GetConvertedDataSize(conversionParams);

            pixels = new byte[size];
            GCHandle bufferHandle = GCHandle.Alloc(pixels, GCHandleType.Pinned);

            image.Convert(conversionParams, bufferHandle.AddrOfPinnedObject(), pixels.Length);
            bufferHandle.Free();
        }
    unsafe void UpdateCameraImage()
    {
        // Attempt to get the latest camera image. If this method succeeds,
        // it acquires a native resource that must be disposed (see below).
        if (!m_CameraManager.TryAcquireLatestCpuImage(out XRCpuImage image))
        {
            return;
        }

        using (image)
        {
            // Choose an RGBA format.
            // See XRCpuImage.FormatSupported for a complete list of supported formats.
            var format = TextureFormat.RGBA32;

            //Initialize m_CameraTexture if it's null or size of image is changed.
            if (m_CameraTexture == null || m_CameraTexture.width != image.width || m_CameraTexture.height != image.height)
            {
                m_CameraTexture = new Texture2D(image.width, image.height, format, false);
            }

            // Convert the image to format, flipping the image across the Y axis.
            // We can also get a sub rectangle, but we'll get the full image here.
            var conversionParams = new XRCpuImage.ConversionParams(image, format, XRCpuImage.Transformation.MirrorY);

            // Texture2D allows us write directly to the raw texture data
            // This allows us to do the conversion in-place without making any copies.
            var rawTextureData = m_CameraTexture.GetRawTextureData <byte>();

            //Convert XRCpuImage into RGBA32
            image.Convert(conversionParams, new IntPtr(rawTextureData.GetUnsafePtr()), rawTextureData.Length);


            // Apply the updated texture data to our texture
            m_CameraTexture.Apply();

            // Set the RawImage's texture so we can visualize it.
            m_cameraView.texture = m_CameraTexture;
        }
    }
Example #16
0
        private unsafe void UpdateCameraImage()
        {
            if (!_cameraManager.TryAcquireLatestCpuImage(out var image))
            {
                CloudUpdateEvent?.Invoke(GenerateDebugCloud());
                return;
            }

            using (image)
            {
                if (_cameraTexture == null || !HasEqualDimensions(_cameraTexture, image))
                {
                    _cameraTexture = new Texture2D(image.width, image.height, TextureFormat.RGBA32, false);
                }

                var conversionParams =
                    new XRCpuImage.ConversionParams(image, TextureFormat.RGBA32, XRCpuImage.Transformation.MirrorY);
                var textureRaw = _cameraTexture.GetRawTextureData <byte>();
                image.Convert(conversionParams, new IntPtr(textureRaw.GetUnsafePtr()), textureRaw.Length);

                _cameraTexture.Apply();
                _debugViews.Camera.texture = _cameraTexture;
            }
        }
Example #17
0
 public static extern int UnityARCore_CpuImage_CreateAsyncConversionRequest(int nativeHandle,
                                                                            XRCpuImage.ConversionParams conversionParams);
Example #18
0
 public static extern bool UnityARCore_CpuImage_TryConvert(int nativeHandle,
                                                           XRCpuImage.ConversionParams conversionParams,
                                                           IntPtr buffer, int bufferLength);
Example #19
0
 /// <summary>
 /// Similar to <see cref="ConvertAsync(int, XRCpuImage.ConversionParams)"/> but takes a delegate to
 /// invoke when the request is complete, rather than returning a request id.
 /// </summary>
 /// <remarks>
 /// If the first parameter to <paramref name="callback"/> is
 /// <see cref="XRCpuImage.AsyncConversionStatus.Ready"/> then the <c>dataPtr</c> parameter must be valid
 /// for the duration of the invocation. The data may be destroyed immediately upon return. The
 /// <paramref name="context"/> parameter must be passed back to the <paramref name="callback"/>.
 /// </remarks>
 /// <param name="nativeHandle">A unique identifier for the camera image to convert.</param>
 /// <param name="conversionParams">The parameters to use during the conversion.</param>
 /// <param name="callback">A delegate which must be invoked when the request is complete, whether the
 /// conversion was successfully or not.</param>
 /// <param name="context">A native pointer which must be passed back unaltered to
 /// <paramref name="callback"/>.</param>
 public override void ConvertAsync(int nativeHandle, XRCpuImage.ConversionParams conversionParams,
                                   XRCpuImage.Api.OnImageRequestCompleteDelegate callback, IntPtr context)
 => NativeApi.UnityARCore_CpuImage_CreateAsyncConversionRequestWithCallback(nativeHandle, conversionParams,
                                                                            callback, context);
Example #20
0
 /// <summary>
 /// Create an asynchronous request to convert a camera image, similar to <see cref="TryConvert"/> except
 /// the conversion should happen on a thread other than the calling (main) thread.
 /// </summary>
 /// <param name="nativeHandle">A unique identifier for the camera image to convert.</param>
 /// <param name="conversionParams">The parameters to use during the conversion.</param>
 /// <returns>A unique identifier for this request.</returns>
 public override int ConvertAsync(int nativeHandle, XRCpuImage.ConversionParams conversionParams)
 => NativeApi.UnityARCore_CpuImage_CreateAsyncConversionRequest(nativeHandle, conversionParams);
Example #21
0
 /// <summary>
 /// Convert the image with handle <paramref name="nativeHandle"/> using the provided
 /// <paramref cref="conversionParams"/>.
 /// </summary>
 /// <param name="nativeHandle">A unique identifier for the camera image to convert.</param>
 /// <param name="conversionParams">The parameters to use during the conversion.</param>
 /// <param name="destinationBuffer">A buffer to write the converted image to.</param>
 /// <param name="bufferLength">The number of bytes available in the buffer.</param>
 /// <returns>
 /// <c>true</c> if the image was converted and stored in <paramref name="destinationBuffer"/>.
 /// </returns>
 public override bool TryConvert(int nativeHandle, XRCpuImage.ConversionParams conversionParams,
                                 IntPtr destinationBuffer, int bufferLength)
 => NativeApi.UnityARCore_CpuImage_TryConvert(nativeHandle, conversionParams, destinationBuffer,
                                              bufferLength);
 public static void CreateAsyncConversionRequestWithCallback(
     int nativeHandle, XRCpuImage.ConversionParams conversionParams,
     OnImageRequestCompleteDelegate callback, IntPtr context)
 {
     throw new System.NotImplementedException(k_ExceptionMsg);
 }
Example #23
0
    IEnumerator ProcessImage(XRCpuImage image, Vector3 viewportScaling)
    {
        // Create the async conversion request.

        XRCpuImage.ConversionParams conv_params = new XRCpuImage.ConversionParams
        {
            // Use the full image.
            inputRect = new RectInt(0, 0, image.width, image.height),

            // Downsample by 2.
            outputDimensions = new Vector2Int(image.width, image.height),

            // Color image format.
            outputFormat = TextureFormat.RGBA32,

            // Flip across the Y axis.
            transformation = XRCpuImage.Transformation.MirrorY
        };

        var request = image.ConvertAsync(conv_params);

        // Wait for the conversion to complete.
        while (!request.status.IsDone())
        {
            yield return(null);
        }

        // Check status to see if the conversion completed successfully.
        if (request.status != XRCpuImage.AsyncConversionStatus.Ready)
        {
            // Something went wrong.
            Debug.LogErrorFormat("Request failed with status {0}", request.status);

            // Dispose even if there is an error.
            request.Dispose();
            yield break;
        }

        // Image data is ready. Let's apply it to a Texture2D.
        var rawData = request.GetData <byte>();

        // Create a texture if necessary.
        if (m_Texture == null)
        {
            m_Texture = new Texture2D(
                request.conversionParams.outputDimensions.x,
                request.conversionParams.outputDimensions.y,
                request.conversionParams.outputFormat,
                false);
        }

        // Copy the image data into the texture.
        m_Texture.LoadRawTextureData(rawData);
        m_Texture.Apply();

        Debug.Log("TEX: " + m_Texture.height + "h " + m_Texture.width + "w");
        Debug.Log("Screen: " + m_Texture.height + "h " + m_Texture.width + "w");


        Mat inputMat  = new Mat(image.height, image.width, CvType.CV_8UC4);
        Mat outputMat = new Mat(1500, 1500, CvType.CV_8UC4);

        Utils.fastTexture2DToMat(m_Texture, inputMat);

        if (tex2d == null)
        {
            tex2d = new Texture2D(1500,
                                  1500, conv_params.outputFormat, false);
        }

        Debug.Log("positionAnchor");
        Debug.Log(positionAnchor);

        Debug.Log("anchorRef");
        Debug.Log(anchorRef);

        int counter = 0;

        Point[] srcPointsVec = new Point[4];
        foreach (var point in anchorRef.getWorldPoints())
        {
            Vector3 screenPoint = mainCam.WorldToScreenPoint(point);
            srcPointsVec[counter] = new Point(screenPoint.y * viewportScaling.y / 3,
                                              100 - screenPoint.x * viewportScaling.x / 3);
            counter += 1;
        }


        MatOfPoint2f srcPoints = new MatOfPoint2f(new[]
        {
            srcPointsVec[0],
            srcPointsVec[1],
            srcPointsVec[2],
            srcPointsVec[3]
        });


        MatOfPoint2f dstPoints = new MatOfPoint2f(new[]
        {
            new Point(195 * 1.25, 0),
            new Point(0, 0),
            new Point(0, 280 * 1.25),
            new Point(195 * 1.25, 280 * 1.25),
        });

        Mat H = Calib3d.findHomography(srcPoints, dstPoints);


        Imgproc.warpPerspective(inputMat, outputMat, H, new Size(1500, 1500));

        Utils.fastMatToTexture2D(outputMat, tex2d);


        if (websocket.State == WebSocketState.Open && canProcess)
        {
            websocket.Send(ImageConversion.EncodeToJPG(tex2d, 50));
            canProcess = false;
        }

        inputMat.Dispose();
        inputMat = null;
        outputMat.Dispose();
        outputMat = null;
        request.Dispose();
    }
 public static bool TryConvert(
     int nativeHandle, XRCpuImage.ConversionParams conversionParams,
     IntPtr buffer, int bufferLength)
 {
     throw new System.NotImplementedException(k_ExceptionMsg);
 }
Example #25
0
        unsafe void OnCameraFrameReceived(ARCameraFrameEventArgs eventArgs)
        {
            // Attempt to get the latest camera image. If this method succeeds,
            // it acquires a native resource that must be disposed (see below).
            XRCpuImage image;

            if (!m_CameraManager.TryAcquireLatestCpuImage(out image))
            {
                return;
            }

            var format = TextureFormat.RGBA32;

            if (m_CameraTexture == null || m_CameraTexture.width != image.width || m_CameraTexture.height != image.height)
            {
                m_CameraTexture = new Texture2D(image.width, image.height, format, false);
            }

            // Convert the image to format, flipping the image across the Y axis.
            // We can also get a sub rectangle, but we'll get the full image here.
            var conversionParams = new XRCpuImage.ConversionParams(image, format, XRCpuImage.Transformation.MirrorY);
            // Texture2D allows us write directly to the raw texture data
            // This allows us to do the conversion in-place without making any copies.
            var rawTextureData = m_CameraTexture.GetRawTextureData <byte>();

            try
            {
                image.Convert(conversionParams, new IntPtr(rawTextureData.GetUnsafePtr()), rawTextureData.Length);
            }
            finally
            {
                // We must dispose of the XRCameraImage after we're finished
                // with it to avoid leaking native resources.
                image.Dispose();
            }

            // Apply the updated texture data to our texture
            m_CameraTexture.Apply();

            // Set the RawImage's texture so we can visualize it.
            m_RawImage.material.SetTexture("_CameraTex", m_CameraTexture);

            ///////////////////////////////////////

            Matrix4x4 cameraMatrix = eventArgs.displayMatrix ?? Matrix4x4.identity;

            Vector2 affineBasisX      = new Vector2(1.0f, 0.0f);
            Vector2 affineBasisY      = new Vector2(0.0f, 1.0f);
            Vector2 affineTranslation = new Vector2(0.0f, 0.0f);

            affineBasisX      = new Vector2(cameraMatrix[0, 0], cameraMatrix[1, 0]);
            affineBasisY      = new Vector2(cameraMatrix[0, 1], cameraMatrix[1, 1]);
            affineTranslation = new Vector2(cameraMatrix[2, 0], cameraMatrix[2, 1]);

            // The camera display matrix includes scaling and offsets to fit the aspect ratio of the device. In most
            // cases, the camera display matrix should be used directly without modification when applying depth to
            // the scene because that will line up the depth image with the camera image. However, for this demo,
            // we want to show the full depth image as a picture-in-picture, so we remove these scaling and offset
            // factors while preserving the orientation.
            affineBasisX                  = affineBasisX.normalized;
            affineBasisY                  = affineBasisY.normalized;
            m_DisplayRotationMatrix       = Matrix4x4.identity;
            m_DisplayRotationMatrix[0, 0] = affineBasisX.x;
            m_DisplayRotationMatrix[0, 1] = affineBasisY.x;
            m_DisplayRotationMatrix[1, 0] = affineBasisX.y;
            m_DisplayRotationMatrix[1, 1] = affineBasisY.y;
            m_DisplayRotationMatrix[2, 0] = Mathf.Round(affineTranslation.x);
            m_DisplayRotationMatrix[2, 1] = Mathf.Round(affineTranslation.y);

            // Set the matrix to the raw image material.
            m_RawImage.material.SetMatrix(k_DisplayRotationPerFrameId, m_DisplayRotationMatrix);
            m_RawImage.material.SetMatrix(k_InverseMatrixId, m_DisplayRotationMatrix.inverse);
        }
Example #26
0
        void onCameraFrameReceived(ARCameraFrameEventArgs eventArgs)
        {
            if (isStart_)
            {
                if (nextUpdateFrameTime_ > Time.realtimeSinceStartup)
                {
                    return;
                }
                //nextUpdateFrameTime_ += INTERVAL_TIME;
                nextUpdateFrameTime_ = Time.realtimeSinceStartup + INTERVAL_TIME;
            }

            if (!cameraManager_.TryAcquireLatestCpuImage(out XRCpuImage image))
            {
                return;
            }

            if (!isStart_)
            {
                height_ = RESIZE_HEIGHT;
                if (image.height < height_)
                {
                    height_ = image.height;
                }

                width_ = height_ * image.width / image.height;

#if UNITY_ANDROID && !UNITY_EDITOR
                using (AndroidJavaClass unityPlayer = new AndroidJavaClass("com.unity3d.player.UnityPlayer"))
                    using (AndroidJavaObject currentUnityActivity = unityPlayer.GetStatic <AndroidJavaObject>("currentActivity"))
                    {
                        multiHandMain_ = new AndroidJavaObject("online.mumeigames.mediapipe.apps.multihandtrackinggpu.MultiHandMain", currentUnityActivity, width_, height_, BothHand ? 2 : 1, 0);
                    }

                multiHandMain_.Call("startRunningGraph");
                nextUpdateFrameTime_ = Time.realtimeSinceStartup + START_DELAY_TIME;
#endif

#if UNITY_IOS && !UNITY_EDITOR
                IntPtr graphName = Marshal.StringToHGlobalAnsi("handcputogpu");
                multiHandSetup(graphName, width_, height_, BothHand ? 2 : 1, 0);
                Marshal.FreeHGlobal(graphName);

                multiHandStartRunningGraph();
                nextUpdateFrameTime_ = Time.realtimeSinceStartup + START_DELAY_TIME;
#endif

                focalLength_ = 0.5 / Mathf.Tan(FixedFieldOfView * Mathf.Deg2Rad * 0.5f);
                hand3dInitWithValues(focalLength_, focalLength_, 0.5, 0.5);

                resetHandValues();

                isStart_ = true;

                image.Dispose();
                return;
            }

            var conversionParams = new XRCpuImage.ConversionParams
            {
                inputRect        = new RectInt(0, 0, image.width, image.height),
                outputDimensions = new Vector2Int(width_, height_),
#if UNITY_IOS && !UNITY_EDITOR
                outputFormat = TextureFormat.BGRA32,
#else
                outputFormat = TextureFormat.ARGB32,
#endif
                transformation = XRCpuImage.Transformation.None
            };

            unsafe
            {
                int size   = image.GetConvertedDataSize(conversionParams);
                var buffer = new NativeArray <byte>(size, Allocator.Temp);
                image.Convert(conversionParams, new IntPtr(buffer.GetUnsafePtr()), buffer.Length);
                image.Dispose();

                byte[] frameImage = new byte[size];
                buffer.CopyTo(frameImage);

#if UNITY_ANDROID && !UNITY_EDITOR
                sbyte[] frameImageSigned = Array.ConvertAll(frameImage, b => unchecked ((sbyte)b));
                multiHandMain_.Call("setFrame", frameImageSigned);
#endif

#if UNITY_IOS && !UNITY_EDITOR
                IntPtr frameIntPtr = Marshal.AllocHGlobal(frameImage.Length * Marshal.SizeOf <byte>());
                Marshal.Copy(frameImage, 0, frameIntPtr, frameImage.Length);
                multiHandSetFrame(frameIntPtr, frameImage.Length);
                Marshal.FreeHGlobal(frameIntPtr);
#endif

                buffer.Dispose();
            }
        }
 public static int CreateAsyncConversionRequest(
     int nativeHandle, XRCpuImage.ConversionParams conversionParams)
 {
     throw new System.NotImplementedException(k_ExceptionMsg);
 }
Example #28
0
 public static extern void UnityARCore_CpuImage_CreateAsyncConversionRequestWithCallback(
     int nativeHandle, XRCpuImage.ConversionParams conversionParams,
     XRCpuImage.Api.OnImageRequestCompleteDelegate callback, IntPtr context);
Example #29
0
    unsafe void UpdateCameraImage()
    {
        // Attempt to get the latest camera image. If this method succeeds,
        // it acquires a native resource that must be disposed (see below).
        if (!m_CameraManager.TryAcquireLatestCpuImage(out XRCpuImage image))
        {
            return;
        }

        // Choose an RGBA format.
        // See XRCpuImage.FormatSupported for a complete list of supported formats.
        var format = TextureFormat.RGBA32;

        //Create new RenderTexture with correct width and height
        if (m_CameraTexture == null || m_CameraTexture.width != image.width || m_CameraTexture.height != image.height)
        {
            m_CameraTexture = new Texture2D(image.width, image.height, format, false);
        }

        // Convert the image to format, flipping the image across the Y axis.
        // We can also get a sub rectangle, but we'll get the full image here.
        var conversionParams = new XRCpuImage.ConversionParams(image, format, XRCpuImage.Transformation.MirrorY);

        // Texture2D allows us write directly to the raw texture data
        // This allows us to do the conversion in-place without making any copies.
        var rawTextureData = m_CameraTexture.GetRawTextureData <byte>();

        try
        {
            image.Convert(conversionParams, new IntPtr(rawTextureData.GetUnsafePtr()), rawTextureData.Length);
        }
        finally
        {
            // We must dispose of the XRCpuImage after we're finished
            // with it to avoid leaking native resources.
            image.Dispose();
        }

        // Apply the updated texture data to our texture
        m_CameraTexture.Apply();

        // Set the RawImage's texture so we can visualize it.

        if (Input.touchCount > 0 && Input.touches[0].phase == TouchPhase.Ended)
        {
            if (canSpawn)
            {
                canSpawn  = false;
                nextSpawn = Time.time + timeToWait;
                cols      = m_CameraTexture.GetPixels((m_CameraTexture.width / 2) - 80, (m_CameraTexture.height / 2) - 80, 160, 160);
                Color col = Color.black;
                for (int i = 0; i < cols.Length; i++)
                {
                    col += cols[i];
                }
                col /= cols.Length;

                ConvertColor(col);
            }
        }
    }
        /// <summary>
        /// Requests the localization.
        /// </summary>
        public unsafe void RequestLocalization()
        {
            XRCameraIntrinsics intr;
            ARCameraManager    cameraManager = m_Sdk.cameraManager;
            var cameraSubsystem = cameraManager.subsystem;

            if (cameraSubsystem != null && cameraSubsystem.TryGetIntrinsics(out intr) && cameraManager.TryAcquireLatestCpuImage(out XRCpuImage image))
            {
                loaderText.text = "Localizing...";
                loaderPanel.SetActive(true);

                LocalizationRequest lr = new LocalizationRequest();
                lr.cloud_Ids = cloudMaps;
                lr.width     = image.width;
                lr.height    = image.height;
                lr.channel   = 3;
                lr.Camera_fx = intr.focalLength.x;
                lr.Camera_fy = intr.focalLength.y;
                lr.Camera_px = intr.principalPoint.x;
                lr.Camera_py = intr.principalPoint.y;
                lr.version   = m_Sdk.arwaysdkversion;

                Vector3    camPos = ARCamera.transform.position;
                Quaternion camRot = ARCamera.transform.rotation;

                var format = TextureFormat.RGB24;

                if (m_Texture == null || m_Texture.width != image.width || m_Texture.height != image.height)
                {
                    m_Texture = new Texture2D(image.width, image.height, format, false);
                }

                // Convert the image to format, flipping the image across the Y axis.
                // We can also get a sub rectangle, but we'll get the full image here.
                var conversionParams = new XRCpuImage.ConversionParams(image, format, XRCpuImage.Transformation.MirrorX);

                // Texture2D allows us write directly to the raw texture data
                // This allows us to do the conversion in-place without making any copies.
                var rawTextureData = m_Texture.GetRawTextureData <byte>();
                try
                {
                    image.Convert(conversionParams, new IntPtr(rawTextureData.GetUnsafePtr()), rawTextureData.Length);
                }
                finally
                {
                    // We must dispose of the XRCameraImage after we're finished
                    // with it to avoid leaking native resources.
                    image.Dispose();
                }

                // Apply the updated texture data to our texture
                m_Texture.Apply();

                byte[] _bytesjpg = m_Texture.EncodeToJPG();
                lr.image     = Convert.ToBase64String(_bytesjpg);
                lr.timestamp = image.timestamp;

                //show requeset counts..

                loc_attempts_txt.GetComponent <TMP_Text>().enabled = true;

                string output = JsonUtility.ToJson(lr);
                StartCoroutine(sendCameraImages(output, camPos, camRot));
            }
        }