unsafe void OnCameraFrameReceied(ARCameraFrameEventArgs eventArgs)
    {
        XRCpuImage image;

        if (!cameraManager.TryGetLatestImage(out image))
        {
            return;
        }

        var conversionParams = new XRCpuImage.ConversionParams
                               (
            image,
            TextureFormat.RGBA32,
            XRCpuImage.Transformation.None
                               );

        if (mTexture == null || mTexture.width != image.width || mTexture.height != image.height)
        {
            mTexture = new Texture2D(
                conversionParams.outputDimensions.x,
                conversionParams.outputDimensions.y,
                conversionParams.outputFormat,
                false);
        }

        var buffer = mTexture.GetRawTextureData <byte>();

        image.Convert(conversionParams, new IntPtr(buffer.GetUnsafePtr()), buffer.Length);

        mTexture.Apply();
        mRenderer.material.mainTexture = mTexture;

        buffer.Dispose();
        image.Dispose();
    }
    private void RefreshCameraFeedTexture()
    {
        XRCameraImage cameraImage;

        m_cameraManager.TryGetLatestImage(out cameraImage);
        if (m_cameraFeedTexture == null || m_cameraFeedTexture.width != cameraImage.width ||
            m_cameraFeedTexture.height != cameraImage.height)
        {
            m_cameraFeedTexture = new Texture2D(cameraImage.width, cameraImage.height, TextureFormat.RGBA32, false);
            // m_cameraFeedTexture = new Texture2D(Screen.width, Screen.height, TextureFormat.RGBA32, false);
        }

        CameraImageTransformation imageTransformation = Input.deviceOrientation == DeviceOrientation.LandscapeRight
            ? CameraImageTransformation.MirrorY
            : CameraImageTransformation.MirrorX;
        XRCameraImageConversionParams conversionParams =
            new XRCameraImageConversionParams(cameraImage, TextureFormat.RGBA32, imageTransformation);

        NativeArray <byte> rawTextureData = m_cameraFeedTexture.GetRawTextureData <byte>();

        try
        {
            unsafe
            {
                cameraImage.Convert(conversionParams, new IntPtr(rawTextureData.GetUnsafePtr()), rawTextureData.Length);
            }
        }
        finally
        {
            cameraImage.Dispose();
        }

        m_cameraFeedTexture.Apply();
        m_material.SetTexture("_CameraFeed", m_cameraFeedTexture);
    }
示例#3
0
    void OnCameraFrameReceived(ARCameraFrameEventArgs eventArgs)
    {
        // CAMERA IMAGE HANDLING
        XRCameraImage image;

        if (!m_ARCameraManager.TryGetLatestImage(out image))
        {
            Debug.Log("Uh OH");
        }
        return;

        Debug.Log("FRAME");

        XRCameraImagePlane greyscale = image.GetPlane(0);

        if (m_Texture == null || m_cachedWidth != image.width || m_cachedHeight != image.height)
        {
            Debug.Log("Updating Texture Parameters");
            // TODO: Check for orientation
            // Update Cached Values
            m_cachedWidth  = image.width;
            m_cachedHeight = image.height;

            // Make new Texture
            var format = TextureFormat.R8; // CAUSES WHITESCREENING IF RGB24
            m_Texture = new Texture2D(image.width, image.height, format, false);
        }

        int w = image.width;
        int h = image.height;

        image.Dispose();

        // Process the image here:
        unsafe {
            Debug.Log("Processing the image");
            IntPtr greyPtr = (IntPtr)greyscale.data.GetUnsafePtr();
            ComputerVisionAlgo(greyPtr);
            Utils.fastMatToTexture2D(imageMat, m_Texture, true, 0);
        }

        if (m_CachedOrientation != Screen.orientation)
        {
            Debug.Log("Configuring RawImage");
            m_CachedOrientation = Screen.orientation;
            ConfigureRawImageInSpace(w, h);
        }

        // BuildGreyscaleTexture(out YTexture);

        m_Texture.Resize(Screen.width, Screen.height);

        m_Texture.Apply();
        m_RawImage.texture = (Texture)m_Texture;
        Debug.Log(m_Texture.GetPixel(300, 300));
        Debug.LogFormat("Texture Dimensions: {0} x {1}", m_Texture.width, m_Texture.height);
        // m_RawImage.SetNativeSize();
    }
示例#4
0
    unsafe void SaveImageCPU()
    {
        XRCameraImage image;

        if (!cameraManager.TryGetLatestImage(out image))
        {
            return;
        }

        var conversionParams = new XRCameraImageConversionParams
        {
            // Get the entire image.
            inputRect = new RectInt(0, 0, image.width, image.height),

            // Downsample by 2.
            outputDimensions = new Vector2Int(image.width / 2, image.height / 2),

            // Choose RGBA format.
            outputFormat = TextureFormat.RGBA32,

            // Flip across the vertical axis (mirror image).
            transformation = CameraImageTransformation.MirrorY
        };

        // See how many bytes you need to store the final image.
        int size = image.GetConvertedDataSize(conversionParams);

        // Allocate a buffer to store the image.
        var buffer = new NativeArray <byte>(size, Allocator.Temp);

        // Extract the image data
        image.Convert(conversionParams, new IntPtr(buffer.GetUnsafePtr()), buffer.Length);

        // The image was converted to RGBA32 format and written into the provided buffer
        // so you can dispose of the XRCameraImage. You must do this or it will leak resources.
        image.Dispose();

        // At this point, you can process the image, pass it to a computer vision algorithm, etc.
        // In this example, you apply it to a texture to visualize it.

        // You've got the data; let's put it into a texture so you can visualize it.
        m_Texture = new Texture2D(
            conversionParams.outputDimensions.x,
            conversionParams.outputDimensions.y,
            conversionParams.outputFormat,
            false);

        m_Texture.LoadRawTextureData(buffer);
        m_Texture.Apply();

        var bytes = m_Texture.EncodeToPNG();

        filePath = Application.persistentDataPath + "/camera_texture.png";
        File.WriteAllBytes(filePath, bytes);

        // Done with your temporary data, so you can dispose it.
        buffer.Dispose();
    }
        /// <summary>
        /// Gets the image data from ARFoundation, preps it, and drops it into captureTex.
        /// </summary>
        /// <returns>
        /// A <see cref="Task"/> that yields <c>true</c> if the capture was successful; otherwise <c>false</c>.
        /// </returns>
        private Task <bool> GrabScreenAsync()
        {
            // Grab the latest image from ARFoundation
            XRCameraImage image;

            if (!cameraManager.TryGetLatestImage(out image))
            {
                Debug.LogError("[CameraCaptureARFoundation] Could not get latest image!");
                Task.FromResult <bool>(false);
            }

            // Set up resizing parameters
            Vector2Int size             = resolution.AdjustSize(new Vector2Int(image.width, image.height));
            var        conversionParams = new XRCameraImageConversionParams
            {
                inputRect        = new RectInt(0, 0, image.width, image.height),
                outputDimensions = new Vector2Int(size.x, size.y),
                outputFormat     = TextureFormat.RGB24,
                transformation   = CameraImageTransformation.MirrorY,
            };

            // make sure we have a texture to store the resized image
            if (captureTex == null || captureTex.width != size.x || captureTex.height != size.y)
            {
                if (captureTex != null)
                {
                    GameObject.Destroy(captureTex);
                }
                captureTex = new Texture2D(size.x, size.y, TextureFormat.RGB24, false);
            }

            // Create a completion source to wait for the async operation
            TaskCompletionSource <bool> tcs = new TaskCompletionSource <bool>();

            // And do the resize!
            image.ConvertAsync(conversionParams, (status, p, data) =>
            {
                if (status == AsyncCameraImageConversionStatus.Ready)
                {
                    captureTex.LoadRawTextureData(data);
                    captureTex.Apply();
                }
                if (status == AsyncCameraImageConversionStatus.Ready || status == AsyncCameraImageConversionStatus.Failed)
                {
                    image.Dispose();

                    // TODO: Should we log the failure or fail the task? Previously this completed no matter what.
                    tcs.SetResult(status == AsyncCameraImageConversionStatus.Ready);
                }
            });

            // Return the completion source task so callers can await
            return(tcs.Task);
        }
示例#6
0
    public unsafe void GetScreenShot()
    {
        if (!cameraManager.TryGetLatestImage(out image))
        {
            return;
        }
        var format = TextureFormat.RGBA32;

        Texture2D texture = new Texture2D(image.width, image.height, format, false);

        var conversionParams = new XRCameraImageConversionParams {
            inputRect        = new RectInt(0, 0, image.width, image.height),
            outputDimensions = new Vector2Int(image.width, image.height),
            outputFormat     = TextureFormat.RGBA32,
            transformation   = CameraImageTransformation.MirrorY
        };

        var rawTextureData = texture.GetRawTextureData <byte>();

        try
        {
            IntPtr ptr = new IntPtr(rawTextureData.GetUnsafePtr());
            image.Convert(conversionParams, ptr, rawTextureData.Length);
        }
        finally
        {
            // We must dispose of the XRCameraImage after we're finished
            // with it to avoid leaking native resources.
            image.Dispose();
        }
        // Apply the updated texture data to our texture
        texture.Apply();

        // Set the RawImage's texture so we can visualize it
        float ratio = (float)texture.height / texture.width;

        if (Fit != null)
        {
            Fit.aspectRatio = 1f / ratio;
            Fit.aspectMode  = AspectRatioFitter.AspectMode.EnvelopeParent;
        }
        if (background != null)
        {
            background.texture = texture;
            background.rectTransform.localEulerAngles = new Vector3(0, 0, 90);
            background.rectTransform.localScale       = new Vector3(ratio, ratio, 1f);
            background.enabled = true;
        }
        StartCoroutine(SaveTexture(texture));
    }
示例#7
0
    public void GetImageAsync()
    {
        // Get information about the device camera image.
        XRCameraImage image;

        if (cameraManager.TryGetLatestImage(out image))
        {
            // If successful, launch a coroutine that waits for the image
            // to be ready, then apply it to a texture.
            StartCoroutine(ProcessImage(image));

            // It's safe to dispose the image before the async operation completes.
            image.Dispose();
        }
    }
    // Get Image from the AR Camera, extract the raw data from the image
    private unsafe void CaptureARBuffer()
    {
        // Get the image in the ARSubsystemManager.cameraFrameReceived callback

        XRCameraImage image;

        if (!cameraManager.TryGetLatestImage(out image))
        {
            Debug.LogWarning("Capture AR Buffer returns nothing!!!!!!");
            return;
        }

        var conversionParams = new XRCameraImageConversionParams
        {
            // Get the full image
            inputRect = new RectInt(0, 0, image.width, image.height),

            // Downsample by 2
            outputDimensions = new Vector2Int(image.width, image.height),

            // Color image format
            outputFormat = ConvertFormat,

            // Flip across the x axis
            transformation = CameraImageTransformation.MirrorX

                             // Call ProcessImage when the async operation completes
        };
        // See how many bytes we need to store the final image.
        int size = image.GetConvertedDataSize(conversionParams);

        Debug.Log("OnCameraFrameReceived, size == " + size + "w:" + image.width + " h:" + image.height + " planes=" + image.planeCount);


        // Allocate a buffer to store the image
        var buffer = new NativeArray <byte>(size, Allocator.Temp);

        // Extract the image data
        image.Convert(conversionParams, new System.IntPtr(buffer.GetUnsafePtr()), buffer.Length);

        // The image was converted to RGBA32 format and written into the provided buffer
        // so we can dispose of the CameraImage. We must do this or it will leak resources.

        byte[] bytes = buffer.ToArray();
        monoProxy.StartCoroutine(PushFrame(bytes, image.width, image.height,
                                           () => { image.Dispose(); buffer.Dispose(); }));
    }
    unsafe void convertCPUImage()
    {
        XRCameraImage image;

        if (!cameraManager.TryGetLatestImage(out image))
        {
            Debug.Log("Cant get image");
            return;
        }

        if (float.IsNegativeInfinity(ALPHA))
        {
            ALPHA      = (float)image.height / image.width;
            imageRatio = (float)(BETA / ALPHA);
        }

        var conversionParams = new XRCameraImageConversionParams {
            // Get the entire image
            inputRect = new RectInt(0, 0, image.width, image.height),
            // Downsample by 2
            outputDimensions = new Vector2Int(image.width / 2, image.height / 2),
            // Choose RGBA format
            outputFormat = TextureFormat.RGBA32,
            // Flip across the vertical axis (mirror image)
            transformation = CameraImageTransformation.MirrorY
        };

        int size = image.GetConvertedDataSize(conversionParams);

        var buffer = new NativeArray <byte>(size, Allocator.Temp);

        image.Convert(conversionParams, new IntPtr(buffer.GetUnsafePtr()), buffer.Length);
        image.Dispose();

        Texture2D m_Texture = new Texture2D(
            conversionParams.outputDimensions.x,
            conversionParams.outputDimensions.y,
            conversionParams.outputFormat,
            false);

        m_Texture.LoadRawTextureData(buffer);
        m_Texture.Apply();
        buffer.Dispose();
        // pass image for mediapipe
        handProcessor.addFrameTexture(m_Texture);
    }
        /// <summary>
        /// Gets the image data from ARFoundation, preps it, and drops it into captureTex.
        /// </summary>
        /// <param name="aOnFinished">Gets called when this method is finished with getting the image.</param>
        private void GrabScreen(Action aOnFinished)
        {
            // Grab the latest image from ARFoundation
            XRCameraImage image;

            if (!cameraManager.TryGetLatestImage(out image))
            {
                Debug.LogError("[CameraCaptureARFoundation] Could not get latest image!");
                return;
            }

            // Set up resizing parameters
            Vector2Int size             = resolution.AdjustSize(new Vector2Int(image.width, image.height));
            var        conversionParams = new XRCameraImageConversionParams
            {
                inputRect        = new RectInt(0, 0, image.width, image.height),
                outputDimensions = new Vector2Int(size.x, size.y),
                outputFormat     = TextureFormat.RGB24,
                transformation   = CameraImageTransformation.MirrorY,
            };

            // make sure we have a texture to store the resized image
            if (captureTex == null || captureTex.width != size.x || captureTex.height != size.y)
            {
                if (captureTex != null)
                {
                    GameObject.Destroy(captureTex);
                }
                captureTex = new Texture2D(size.x, size.y, TextureFormat.RGB24, false);
            }

            // And do the resize!
            image.ConvertAsync(conversionParams, (status, p, data) =>
            {
                if (status == AsyncCameraImageConversionStatus.Ready)
                {
                    captureTex.LoadRawTextureData(data);
                    captureTex.Apply();
                }
                if (status == AsyncCameraImageConversionStatus.Ready || status == AsyncCameraImageConversionStatus.Failed)
                {
                    image.Dispose();
                    aOnFinished();
                }
            });
        }
        private void GetImageAsync()
        {
            currentXRImage = new XRCameraImage();

            if (cameraManager.TryGetLatestImage(out currentXRImage))
            {
                currentXRImage.ConvertAsync(new XRCameraImageConversionParams
                {
                    inputRect        = new RectInt(0, 0, currentXRImage.width, currentXRImage.height),
                    outputDimensions = new Vector2Int(ScapeImgWidth, ScapeImgHeight),
                    outputFormat     = TextureFormat.R8
                }, ProcessImage);

                currentXRImage.Dispose();

                ScapeLogging.LogDebug("GetImageAsync() " + (Time.time - requestTime));
                measurementsRequested = false;
            }
        }
    private void RefreshCameraFeedTexture()
    {
        XRCameraImage cameraImage;

        m_cameraManager.TryGetLatestImage(out cameraImage);

        if (m_cameraFeedTexture == null || m_cameraFeedTexture.width != cameraImage.width || m_cameraFeedTexture.height != cameraImage.height)
        {
            m_cameraFeedTexture = new Texture2D(cameraImage.width, cameraImage.height, TextureFormat.RGBA32, false);
        }

        CameraImageTransformation     imageTransformation = Input.deviceOrientation == DeviceOrientation.LandscapeRight ? CameraImageTransformation.MirrorY : CameraImageTransformation.MirrorX;
        XRCameraImageConversionParams conversionParams    = new XRCameraImageConversionParams(cameraImage, TextureFormat.RGBA32, imageTransformation);

        NativeArray <byte> rawTextureData = m_cameraFeedTexture.GetRawTextureData <byte>();

        try
        {
            unsafe
            {
                cameraImage.Convert(conversionParams, new IntPtr(rawTextureData.GetUnsafePtr()), rawTextureData.Length);
            }
        }
        finally
        {
            cameraImage.Dispose();
        }

        m_cameraFeedTexture.Apply();
        if (idx == 0)
        {
            meshRenderers = m_echoAR.GetComponentsInChildren <MeshRenderer>();
            materials.Clear();
            foreach (MeshRenderer mr in meshRenderers)
            {
                materials.Add(mr.material);
            }
            idx++;
        }
        m_material.SetTexture("_CameraFeed", materials[currentTexture].mainTexture as Texture2D);
        //m_material.SetTexture("_CameraFeed", humanOverlayTextures[currentTexture]);
    }
    private void OnCameraFrameReceived(ARCameraFrameEventArgs eventArgs)
    {
        XRCameraImage cameraImage;

        m_cameraManager.TryGetLatestImage(out cameraImage);
        camW = cameraImage.width;
        camH = cameraImage.height;

        cameraImage.Dispose();
        if (camW != 0)
        {
            m_cameraManager.frameReceived -= OnCameraFrameReceived;
        }

        /* // If you want to do something with camera feed, you can tweak the following the code.
         * if (m_cameraFeedTexture == null || m_cameraFeedTexture.width != cameraImage.width || m_cameraFeedTexture.height != cameraImage.height)
         * {
         *  m_cameraFeedTexture = new Texture2D(cameraImage.width, cameraImage.height, TextureFormat.RGBA32, false);
         *  // m_cameraFeedTexture = new Texture2D(Screen.width, Screen.height, TextureFormat.RGBA32, false);
         * }
         *
         * CameraImageTransformation imageTransformation = Input.deviceOrientation == DeviceOrientation.LandscapeRight ? CameraImageTransformation.MirrorY : CameraImageTransformation.MirrorX;
         * XRCameraImageConversionParams conversionParams = new XRCameraImageConversionParams(cameraImage, TextureFormat.RGBA32, imageTransformation);
         *
         * NativeArray<byte> rawTextureData = m_cameraFeedTexture.GetRawTextureData<byte>();
         *
         * try
         * {
         *  unsafe
         *  {
         *      cameraImage.Convert(conversionParams, new IntPtr(rawTextureData.GetUnsafePtr()), rawTextureData.Length);
         *  }
         * }
         * finally
         * {
         *  cameraImage.Dispose();
         * }
         *
         * m_cameraFeedTexture.Apply();
         * m_material.SetTexture("_CameraFeed", testTexture);
         */
    }
示例#14
0
    /**
     * Get image and save it
     */
    public unsafe string GetImage(string dirname)   //works
    {
        XRCameraImage image;

        if (!arCameraManager.TryGetLatestImage(out image))
        {
            ToastHelper.ShowToast("Error getting lastest image");
            return(null);
        }

        XRCameraIntrinsics intrinsics;

        if (!arCameraManager.TryGetIntrinsics(out intrinsics))
        {
            ToastHelper.ShowToast("Error getting intrinsics");
        }

        float hfov = focalLenghToHFov(intrinsics);

        Debug.Log("Take picture " + image.width + "x" + image.height + " hfov: " + hfov);

        var conversionParams = new XRCameraImageConversionParams {
            // Get the entire image
            inputRect = new RectInt(0, 0, image.width, image.height),

            outputDimensions = new Vector2Int(image.width, image.height),

            // Choose RGBA format
            outputFormat = TextureFormat.RGBA32,

            // Flip across the vertical axis (mirror image)
            transformation = CameraImageTransformation.MirrorY,
        };

        // See how many bytes we need to store the final image.
        int size = image.GetConvertedDataSize(conversionParams);

        // Allocate a buffer to store the image
        var buffer = new NativeArray <byte>(size, Allocator.Temp);

        // Extract the image data
        image.Convert(conversionParams, new IntPtr(buffer.GetUnsafePtr <byte>()), buffer.Length);

        // The image was converted to RGBA32 format and written into the provided buffer
        // so we can dispose of the CameraImage. We must do this or it will leak resources.
        image.Dispose();

        // At this point, we could process the image, pass it to a computer vision algorithm, etc.
        // In this example, we'll just apply it to a texture to visualize it.

        // We've got the data; let's put it into a texture so we can visualize it.
        Texture2D m_Texture = new Texture2D(
            conversionParams.outputDimensions.x,
            conversionParams.outputDimensions.y,
            conversionParams.outputFormat,
            false);

        m_Texture.LoadRawTextureData(buffer);
        m_Texture.Apply();

        if (!Directory.Exists(Application.persistentDataPath + "/" + dirname))
        {
            Directory.CreateDirectory(Application.persistentDataPath + "/" + dirname); //duplicated code
        }
        string fn = System.DateTime.Now.ToString("yyyy-MM-dd_HHmmss") + "_fov_" + hfov + "_photo.jpg";

        File.WriteAllBytes(Application.persistentDataPath + "/" + dirname + "/" + fn, m_Texture.EncodeToJPG());

        // Done with our temporary data
        buffer.Dispose();

        return(fn);
    }
示例#15
0
    public unsafe string CamGetFrame()
    {
        XRCameraImage image;

        if (m_CameraManager.TryGetLatestImage(out image))
        {
            var conversionParams = new XRCameraImageConversionParams
            {
                // Get the entire image
                inputRect = new RectInt(0, 0, image.width, image.height),

                // Downsample by 2
                outputDimensions = new Vector2Int(image.width, image.height),

                // Choose RGBA format
                outputFormat = TextureFormat.RGBA32,

                // Flip across the vertical axis (mirror image)
                transformation = CameraImageTransformation.MirrorY
            };

            // See how many bytes we need to store the final image.
            int size = image.GetConvertedDataSize(conversionParams);

            // Allocate a buffer to store the image
            var buffer = new NativeArray <byte>(size, Allocator.Temp);

            // Extract the image data

            image.Convert(conversionParams, new IntPtr(buffer.GetUnsafePtr()), buffer.Length);
            Debug.Log("buffer.Length" + buffer.Length);
            // The image was converted to RGBA32 format and written into the provided buffer
            // so we can dispose of the CameraImage. We must do this or it will leak resources.
            image.Dispose();

            // At this point, we could process the image, pass it to a computer vision algorithm, etc.
            // In this example, we'll just apply it to a texture to visualize it.

            // We've got the data; let's put it into a texture so we can visualize it.
            Texture2D m_Texture = new Texture2D(
                conversionParams.outputDimensions.x,
                conversionParams.outputDimensions.y,
                conversionParams.outputFormat,
                false);

            m_Texture.LoadRawTextureData(buffer);
            m_Texture.Apply();

            Texture2D normTex = rotateTexture(m_Texture, false);



            byte[] bb           = normTex.EncodeToJPG();
            string pathToScreen = Application.persistentDataPath + "/augframe.jpg";
            if (File.Exists(pathToScreen))
            {
                File.Delete(pathToScreen);
            }
            File.WriteAllBytes(pathToScreen, bb);
            Debug.Log(pathToScreen);
            return(pathToScreen);
        }
        return(null);
    }
示例#16
0
    // Get Image from the AR Camera, extract the raw data from the image
    private unsafe void CaptureARBuffer(ARCameraFrameEventArgs eventArgs)
    {
        // Get the image in the ARSubsystemManager.cameraFrameReceived callback

        XRCameraImage image;

        if (!cameraManager.TryGetLatestImage(out image))
        {
            Debug.LogError("Capture AR Buffer returns nothing!!!!!!");
            return;
        }

        CameraImageFormat  ddd1 = image.format;
        XRCameraImagePlane ss   = image.GetPlane(0);



        Matrix4x4 ddd2 = eventArgs.projectionMatrix.Value;

        Vector3    position1 = new Vector3();
        Quaternion rotation1 = new Quaternion();

        SerializedCameraData serializedCameraData = new SerializedCameraData()
        {
            Timestamp        = eventArgs.timestampNs.Value,
            Position         = position1,
            Rotation         = rotation1,
            ProjectionMatrix = eventArgs.projectionMatrix.Value
        };


        byte[] augmentByteArray = serializedCameraData.Serialize();

        //Matrix4x4 ddd22 = eventArgs.projectionMatrix;



        var conversionParams = new XRCameraImageConversionParams
        {
            // Get the full image
            inputRect = new RectInt(0, 0, image.width, image.height),

            // Downsample by 2
            outputDimensions = new Vector2Int(image.width, image.height),

            // Color image format
            outputFormat = ConvertFormat,

            // Flip across the x axis
            transformation = CameraImageTransformation.MirrorX

                             // Call ProcessImage when the async operation completes
        };
        // See how many bytes we need to store the final image.
        int size = image.GetConvertedDataSize(conversionParams);

        Debug.LogError("OnCameraFrameReceived, size == " + size + "w:" + image.width + " h:" + image.height + " planes=" + image.planeCount);


        // Allocate a buffer to store the image
        var buffer = new NativeArray <byte>(size, Allocator.Temp);

        // Extract the image data
        image.Convert(conversionParams, new System.IntPtr(buffer.GetUnsafePtr()), buffer.Length);

        // The image was converted to RGBA32 format and written into the provided buffer
        // so we can dispose of the CameraImage. We must do this or it will leak resources.

        byte[] bytes = buffer.ToArray();
        monoProxy.StartCoroutine(PushFrame(bytes, image.width, image.height,
                                           () => { image.Dispose(); buffer.Dispose(); }));
    }
示例#17
0
        private IEnumerator Capture(bool anchor)
        {
            yield return(new WaitForSeconds(0.25f));

            XRCameraImage image;

            if (m_CameraManager.TryGetLatestImage(out image))
            {
                CoroutineJobCapture j = new CoroutineJobCapture();
                j.onConnect         = onConnect;
                j.onFailedToConnect = onFailedToConnect;
                j.server            = this.server;
                j.token             = this.token;
                j.bank   = this.bank;
                j.run    = (int)(this.imageRun & 0xEFFFFFFF);
                j.index  = this.imageIndex++;
                j.anchor = anchor;

                Camera     cam = Camera.main;
                Quaternion _q  = cam.transform.rotation;
                Matrix4x4  r   = Matrix4x4.Rotate(new Quaternion(_q.x, _q.y, -_q.z, -_q.w));
                Vector3    _p  = cam.transform.position;
                Vector3    p   = new Vector3(_p.x, _p.y, -_p.z);
                j.rotation   = r;
                j.position   = p;
                j.intrinsics = ARHelper.GetIntrinsics(m_CameraManager);
                j.width      = image.width;
                j.height     = image.height;

                if (rgbCapture)
                {
                    var conversionParams = new XRCameraImageConversionParams
                    {
                        inputRect        = new RectInt(0, 0, image.width, image.height),
                        outputDimensions = new Vector2Int(image.width, image.height),
                        outputFormat     = TextureFormat.RGB24,
                        transformation   = CameraImageTransformation.None
                    };
                    int size = image.GetConvertedDataSize(conversionParams);
                    j.pixels   = new byte[size];
                    j.channels = 3;
                    GCHandle bufferHandle = GCHandle.Alloc(j.pixels, GCHandleType.Pinned);
                    image.Convert(conversionParams, bufferHandle.AddrOfPinnedObject(), j.pixels.Length);
                    bufferHandle.Free();
                }
                else
                {
                    XRCameraImagePlane plane = image.GetPlane(0);                     // use the Y plane
                    j.pixels   = new byte[plane.data.Length];
                    j.channels = 1;
                    plane.data.CopyTo(j.pixels);
                }

                j.sessionFirstImage = sessionFirstImage;
                if (sessionFirstImage)
                {
                    sessionFirstImage = false;
                }

                jobs.Add(j);
                image.Dispose();

                m_cameraShutterClick.Play();
            }
        }
    // NOTE: part of the following steps to setup image access on CPU come from
    // the official AR Foundation documentation. However some functions use
    // other names than described in the documentation.
    // e.g. cameraManager.frameReceived instead of cameraManager.cameraFrameReceived
    // https://docs.unity3d.com/Packages/[email protected]/manual/cpu-camera-image.html
    public unsafe void DetectOnImage(Vector2 userInput)
    {
        XRCameraImage image;

        if (!cameraManager.TryGetLatestImage(out image))
        {
            return;
        }

        var conversionParams = new XRCameraImageConversionParams
        {
            // Get the entire image
            inputRect = new RectInt(0, 0, image.width, image.height),

            outputDimensions = new Vector2Int(Convert.ToInt32(image.width * scale.detectionScaleFactor), Convert.ToInt32(image.height * scale.detectionScaleFactor)),

            // NOTE: directly converting into single channel could be an option,
            // but it is not sure that R8 represents grayscale in one channel
            // NOTE 2: RGBA32 is not listed in the documentation as supported format
            outputFormat = TextureFormat.RGBA32,

            // Flip across the vertical axis (mirror image)
            transformation = CameraImageTransformation.None
        };

        // See how many bytes we need to store the final image.
        int size = image.GetConvertedDataSize(conversionParams);

        // Allocate a buffer to store the image
        var buffer = new NativeArray <byte>(size, Allocator.Temp);

        // Extract the image data
        image.Convert(conversionParams, new IntPtr(buffer.GetUnsafePtr()), buffer.Length);

        // The image was converted to RGB32 format and written into the provided buffer
        // so we can dispose the CameraImage. We must do this or it will leak resources.
        image.Dispose();

        // At this point, we could process the image, pass it to a computer vision algorithm, etc.
        if (camTexture == null)
        {
            camTexture = new Texture2D(
                conversionParams.outputDimensions.x,
                conversionParams.outputDimensions.y,
                conversionParams.outputFormat,
                false
                );
        }

        camTexture.LoadRawTextureData(buffer);
        camTexture.Apply();

        Color32[] rawPixels   = camTexture.GetPixels32();
        Vector2[] resultArray = new Vector2[CORNERS];

        // Call to C++ Code
        float startT  = Time.realtimeSinceStartup;
        bool  success = ProcessImage(resultArray, rawPixels, userInput, conversionParams.outputDimensions.x, conversionParams.outputDimensions.y, true);
        float endT    = Time.realtimeSinceStartup;

        Debug.Log("DetectionTime: ");
        Debug.Log(endT - startT);

        imageToWorld.TransferIntoWorld(success, resultArray);
        //imageToWorld.ShowIndicator(success, resultArray);

        // Done with our temporary data
        buffer.Dispose();
    }
示例#19
0
    unsafe void OnCameraFrameReceived(ARCameraFrameEventArgs eventArgs)
    {
        //Get the latest image
        XRCameraImage image;

        if (!cameraManager.TryGetLatestImage(out image))
        {
            return;
        }

        timeCount += Time.deltaTime;

        //select the format of the texture
        var format = TextureFormat.RGBA32;

        //check if the texture changed, and only if so create a new one with the new changes
        if (texture == null || texture.width != image.width || texture.height != image.height)
        {
            texture = new Texture2D(image.width, image.height, format, false);
        }

        //mirror on the Y axis so that it fits open cv standarts
        var conversionParams = new XRCameraImageConversionParams(image, format, CameraImageTransformation.MirrorY);

        // try to apply raw texture data to the texture
        var rawTextureData = texture.GetRawTextureData <byte>();

        try
        {
            image.Convert(conversionParams, new IntPtr(rawTextureData.GetUnsafePtr()), rawTextureData.Length);
        }
        finally
        {
            //every Mat must be released before new data is assigned!
            image.Dispose();
        }

        //apply texture
        texture.Apply();

        texParam.FlipHorizontally = false;

        //create a Mat class from the texture
        imgBuffer = ARucoUnityHelper.TextureToMat(texture, texParam);

        // Increment thread counter
        if (threadCounter == 0 && timeCount >= markerDetectorPauseTime &&
            arCamera.velocity.magnitude <= maxPositionChangePerFrame && cameraPoseTracker.rotationChange <= maxRotationChangePerFrameDegrees)
        {
            //copy the buffer data to the img Mat
            imgBuffer.CopyTo(img);
            Interlocked.Increment(ref threadCounter);
            timeCount = 0;
        }

        updateThread = true;

        //Show the texture if needed
        if (showOpenCvTexture)
        {
            openCvTexture.texture = ARucoUnityHelper.MatToTexture(imgBuffer, texture);
        }

        //release imgBuffer Mat
        imgBuffer.Release();
    }