Beispiel #1
0
    private Color GetColourAt(CameraImageBytes cim, int cx, int cy, out bool foundColour)
    {
        Color colour = new Color(0, 0, 0);

        foundColour = false;

        if ((cx >= 0) && (cy >= 0) && (cx < cim.Width) && (cy < cim.Height))
        {
            byte[] buf = new byte[1];
            byte   y;
            byte   u;
            byte   v;

            Marshal.Copy(new IntPtr(cim.Y.ToInt64() + cy * cim.YRowStride + cx * 1), buf, 0, 1);
            y = buf[0];
            //UV planes are at quarter resolution, make sure to keep in mind when editing values
            Marshal.Copy(new IntPtr(cim.U.ToInt64() + (cy / 2) * cim.UVRowStride + (cx / 2) * cim.UVPixelStride), buf, 0, 1);
            u = buf[0];
            Marshal.Copy(new IntPtr(cim.V.ToInt64() + (cy / 2) * cim.UVRowStride + (cx / 2) * cim.UVPixelStride), buf, 0, 1);
            v           = buf[0];
            colour      = YUV2RGB(y, u, v);
            foundColour = true;
        }
        return(colour);
    }
Beispiel #2
0
    void OnImageAvailableC(int width, int height, CameraImageBytes image, int bufferSize)
    {
        byte[] bufferY = new byte[bufferSize];
        byte[] bufferU = new byte[bufferSize];
        byte[] bufferV = new byte[bufferSize];

        Marshal.Copy(image.Y, bufferY, 0, bufferSize);
        Marshal.Copy(image.U, bufferU, 0, bufferSize);
        Marshal.Copy(image.V, bufferV, 0, bufferSize);

        int bufferSizeYUV = width * height * 3 / 2;

        byte[] bufferYUV = new byte[bufferSizeYUV];
        Marshal.Copy(image.Y, bufferYUV, 0, bufferSizeYUV);

        Texture2D m_TextureRender = new Texture2D(width, height, TextureFormat.RGBA32, false, false);

        Color c = new Color();

        for (int y = 0; y < height; y++)
        {
            for (int x = 0; x < width; x++)
            {
                float Y = bufferY[y * width + x];
                float U = bufferU[y * width + x];
                float V = bufferV[y * width + x];

                /*
                 * c.r = Y; // + 1.4075f * (V - 128);
                 * c.g = 0f; //Y - 0.3455f * (U - 128) - (0.7169f * (V - 128));
                 * c.b = 0f; // Y + 1.7790f * (U - 128);*/

                /*float Yvalue = bufferYUV[y * width + x];
                 * float Uvalue = bufferYUV[(y / 2) * (width / 2) + x / 2 + (width * height)];
                 * float Vvalue = bufferYUV[(y / 2) * (width / 2) + x / 2 + (width * height) + (width * height) / 4];*/
                c.r = Y + (float)(1.370705 * (V - 128.0f));
                c.g = Y - (float)(0.698001 * (V - 128.0f)) - (float)(0.337633 * (U - 128.0f));
                c.b = Y + (float)(1.732446 * (U - 128.0f));


                c.r /= 255.0f;
                c.g /= 255.0f;
                c.b /= 255.0f;

                MinMaxColor(ref c);

                c.a = 1.0f;
                m_TextureRender.SetPixel(width - 1 - x, y, c);
            }
        }

        string fn   = System.DateTime.Now.ToString("yyyy-MM-dd-HH-mm-ss") + "_photo_bw.jpg";
        string path = Application.persistentDataPath + "/" + fn;

        Utils.Toast("Image " + width + "x" + height + " " + fn);
        File.WriteAllBytes(path, m_TextureRender.EncodeToJPG());
    }
        /// <summary>
        /// This is the standard Unity function called at every frame, in this particular case it acquire an image if neccesary, and enqueque inside
        /// </summary>
        public void Update()
        {
            if (nextFrameId == long.MinValue)
            {
                return;
            }

            // Method 1 (some problem on Xiaomi, resolve on 1.8 version)
            using (CameraImageBytes image = Frame.CameraImage.AcquireCameraImageBytes())
            {
                if (!image.IsAvailable)
                {
                    return;
                }

                Debug.Log("Acquired frame nextFrameId: " + nextFrameId);

                byte[] yBuffer = new byte[image.Width * image.Height];

                byte[] vuBuffer = new byte[image.Width * (image.Height / 2)];

                Marshal.Copy(image.Y, yBuffer, 0, yBuffer.Length);

                Marshal.Copy(image.V, vuBuffer, 0, vuBuffer.Length);

                Vector3 position = trsmatrix.MultiplyPoint3x4(Frame.Pose.position);

                Quaternion rotation = Frame.Pose.rotation * Quaternion.LookRotation(
                    trsmatrix.GetColumn(2), trsmatrix.GetColumn(1));
                Quaternion rotationNoEdit = new Quaternion(rotation.x, rotation.y, rotation.z, rotation.w);

                Vector3    positionOri = cameraTransform.position;
                Quaternion rotationOri = cameraTransform.rotation;

                FrameNode frame = new FrameNode
                {
                    frameId        = nextFrameId,
                    imageHeight    = image.Height,
                    imageWidth     = image.Width,
                    focalLength    = Frame.CameraImage.ImageIntrinsics.FocalLength,
                    principalPoint = Frame.CameraImage.ImageIntrinsics.PrincipalPoint,
                    position       = position,
                    positionOri    = positionOri,
                    rotation       = rotation,
                    rotationOri    = rotationOri,
                    vuBuffer       = vuBuffer,
                    yBuffer        = yBuffer
                };

                sendingSocket.EnquequeFrame(ref frame);

                frameAcquired.Add(nextFrameId);

                nextFrameId = long.MinValue;
            }
        }
Beispiel #4
0
    /* public Texture2D CameraToTexture() {
     *   // Create the object for the result - this has to be done before the
     *   // using {} clause.
     *   Texture2D result;
     *
     *   // Use using to make sure that C# disposes of the CameraImageBytes afterwards
     *   using (CameraImageBytes camBytes = Frame.CameraImage.AcquireCameraImageBytes()) {
     *
     *       // If acquiring failed, return null
     *       if (!camBytes.IsAvailable) {
     *           Debug.LogWarning("camBytes not available");
     *           return null;
     *       }
     *
     *       // To save a YUV_420_888 image, you need 1.5*pixelCount bytes.
     *       // I will explain later, why.
     *
     *       byte[] YUVimage = new byte[(int)(camBytes.Width * camBytes.Height * 1.5f)];
     *
     *       // As CameraImageBytes keep the Y, U and V data in three separate
     *       // arrays, we need to put them in a single array. This is done using
     *       // native pointers, which are considered unsafe in C#.
     *       unsafe {
     *           for (int i = 0; i < camBytes.Width * camBytes.Height; i++) {
     *               YUVimage[i] = *((byte*)camBytes.Y.ToPointer() + (i * sizeof(byte)));
     *           }
     *
     *           for (int i = 0; i < camBytes.Width * camBytes.Height / 4; i++) {
     *               YUVimage[(camBytes.Width * camBytes.Height) + 2 * i] = *((byte*)camBytes.U.ToPointer() + (i * camBytes.UVPixelStride * sizeof(byte)));
     *               YUVimage[(camBytes.Width * camBytes.Height) + 2 * i + 1] = *((byte*)camBytes.V.ToPointer() + (i * camBytes.UVPixelStride * sizeof(byte)));
     *           }
     *       }
     *
     *       // Create the output byte array. RGB is three channels, therefore
     *       // we need 3 times the pixel count
     *       byte[] RGBimage = new byte[camBytes.Width * camBytes.Height * 3];
     *
     *       // GCHandles help us "pin" the arrays in the memory, so that we can
     *       // pass them to the C++ code.
     *       GCHandle YUVhandle = GCHandle.Alloc(YUVimage, GCHandleType.Pinned);
     *       GCHandle RGBhandle = GCHandle.Alloc(RGBimage, GCHandleType.Pinned);
     *
     *       // Call the C++ function that we created.
     *       int k = ConvertYUV2RGBA(YUVhandle.AddrOfPinnedObject(), RGBhandle.AddrOfPinnedObject(), camBytes.Width, camBytes.Height);
     *
     *       // If OpenCV conversion failed, return null
     *       if (k != 0) {
     *           Debug.LogWarning("Color conversion - k != 0");
     *           return null;
     *       }
     *
     *       // Create a new texture object
     *       result = new Texture2D(camBytes.Width, camBytes.Height, TextureFormat.RGB24, false);
     *
     *       // Load the RGB array to the texture, send it to GPU
     *       result.LoadRawTextureData(RGBimage);
     *       result.Apply();
     *
     *       // Save the texture as an PNG file. End the using {} clause to
     *       // dispose of the CameraImageBytes.
     *       File.WriteAllBytes(Application.persistentDataPath + "/tex.png", result.EncodeToPNG());
     *   }
     *
     *   // Return the texture.
     *   return result;
     * }*/

    void takePhotoGrayscale640x480()
    {
        Utils.Log(_CameraIntrinsicsToString(Frame.CameraImage.ImageIntrinsics, "ff"));



        using (CameraImageBytes image = Frame.CameraImage.AcquireCameraImageBytes()) {
            if (!image.IsAvailable)
            {
                Utils.Toast("not available");
                return;
            }
            OnImageAvailableBW(image.Width, image.Height, image.YRowStride, image.Y, image.Width * image.Height); //works but sometime camera not availble and only in B&W
            //OnImageAvailableC(image.Width, image.Height, image, image.Width * image.Height);
        }


        void OnImageAvailableBW(int width, int height, int rowStride, IntPtr pixelBuffer, int bufferSize)
        {
            byte[] bufferY = new byte[bufferSize];

            System.Runtime.InteropServices.Marshal.Copy(pixelBuffer, bufferY, 0, bufferSize);

            Texture2D m_TextureRender = new Texture2D(width, height, TextureFormat.RGBA32, false, false);

            Color c = new Color();

            for (int y = 0; y < height; y++)
            {
                for (int x = 0; x < width; x++)
                {
                    float Y = bufferY[y * width + x];

                    c.r = Y;
                    c.g = Y;
                    c.b = Y;

                    c.r /= 255.0f;
                    c.g /= 255.0f;
                    c.b /= 255.0f;

                    MinMaxColor(ref c);

                    c.a = 1.0f;
                    m_TextureRender.SetPixel(width - 1 - x, y, c);
                }
            }

            string fn   = System.DateTime.Now.ToString("yyyy-MM-dd-HH-mm-ss") + "_photo_bw.jpg";
            string path = Application.persistentDataPath + "/" + fn;

            Utils.Toast("Image " + width + "x" + height + " " + fn);
            File.WriteAllBytes(path, m_TextureRender.EncodeToJPG());
        }
    }
Beispiel #5
0
        /// <summary>
        /// Captures a YUV420888 image from the devices CPU, using ARCores CameraImageBytes class
        /// Image is then converted to RGB using OpenCV+ A free plugin based on OpenCV Sharp
        /// Image is then rotated and flipped to align with the viewport.
        /// Adds images to a List of Mat Arrays, which can be converted to JPG using Unity Textures at a later date
        /// NOTE: No Unity classes are used in this function to ensure that it is thread safe (Unity itself is not threadsafe).
        /// </summary>
        public static void GetCameraImage()
        {
            // Use using to make sure that C# disposes of the CameraImageBytes afterwards
            using (CameraImageBytes camBytes = Frame.CameraImage.AcquireCameraImageBytes())
            {
                // If acquiring failed, return null
                if (!camBytes.IsAvailable)
                {
                    return;
                }

                // To save a YUV_420_888 image, you need 1.5*pixelCount bytes.
                byte[] YUVimage = new byte[(int)(camBytes.Width * camBytes.Height * 1.5f)];

                // As CameraImageBytes keep the Y, U and V data in three separate
                // arrays, we need to put them in a single array. This is done using
                // native pointers, which are considered unsafe in C#.
                unsafe
                {
                    for (int i = 0; i < camBytes.Width * camBytes.Height; i++)
                    {
                        YUVimage[i] = *((byte *)camBytes.Y.ToPointer() + (i * sizeof(byte)));
                    }

                    for (int i = 0; i < camBytes.Width * camBytes.Height / 4; i++)
                    {
                        YUVimage[(camBytes.Width * camBytes.Height) + 2 * i]     = *((byte *)camBytes.U.ToPointer() + (i * camBytes.UVPixelStride * sizeof(byte)));
                        YUVimage[(camBytes.Width * camBytes.Height) + 2 * i + 1] = *((byte *)camBytes.V.ToPointer() + (i * camBytes.UVPixelStride * sizeof(byte)));
                    }
                }

                // GCHandles help us "pin" the arrays in the memory, so that we can
                // pass them to the C++ code.
                GCHandle pinnedArray = GCHandle.Alloc(YUVimage, GCHandleType.Pinned);

                IntPtr pointerYUV = pinnedArray.AddrOfPinnedObject();

                Mat input  = new Mat(camBytes.Height + camBytes.Height / 2, camBytes.Width, MatType.CV_8UC1, pointerYUV);
                Mat output = new Mat(camBytes.Height, camBytes.Width, MatType.CV_8UC3);

                Cv2.CvtColor(input, output, ColorConversionCodes.YUV2BGR_NV12);

                // FLIP AND TRANPOSE TO VERTICAL
                Cv2.Transpose(output, output);
                Cv2.Flip(output, output, FlipMode.Y);

                Mesh3DController.AllData.Add(output);
                pinnedArray.Free();
                Mesh3DController.ErrorString = "Thread Sucess!";
            }
        }
 public static void GetCameraImage()
     {
     Texture2D result;
     // Use using to make sure that C# disposes of the CameraImageBytes afterwards
     using (CameraImageBytes camBytes = Frame.CameraImage.AcquireCameraImageBytes())
     {
         // If acquiring failed, return null
         if (!camBytes.IsAvailable)
         {
             Debug.LogWarning("camBytes not available");
             return;
         }
         // To save a YUV_420_888 image, you need 1.5*pixelCount bytes.
         // I will explain later, why.
             byte[] YUVimage = new byte[(int)(camBytes.Width * camBytes.Height * 1.5f)];
         // As CameraImageBytes keep the Y, U and V data in three separate
         // arrays, we need to put them in a single array. This is done using
         // native pointers, which are considered unsafe in C#.
         unsafe
         {
             for (int i = 0; i < camBytes.Width * camBytes.Height; i++)
             {
                 YUVimage[i] = *((byte*)camBytes.Y.ToPointer() + (i * sizeof(byte)));
             }
             for (int i = 0; i < camBytes.Width * camBytes.Height / 4; i++)
             {
                 YUVimage[(camBytes.Width * camBytes.Height) + 2 * i] = *((byte*)camBytes.U.ToPointer() + (i * camBytes.UVPixelStride * sizeof(byte)));
                 YUVimage[(camBytes.Width * camBytes.Height) + 2 * i + 1] = *((byte*)camBytes.V.ToPointer() + (i * camBytes.UVPixelStride * sizeof(byte)));
             }
         }
         // Create the output byte array. RGB is three channels, therefore
         // we need 3 times the pixel count
         byte[] RGBimage = new byte[camBytes.Width * camBytes.Height * 3];
         // GCHandles help us "pin" the arrays in the memory, so that we can
         // pass them to the C++ code.
         GCHandle pinnedArray = GCHandle.Alloc(YUVimage, GCHandleType.Pinned);
         IntPtr pointer = pinnedArray.AddrOfPinnedObject();
         Mat input = new Mat(camBytes.Height + camBytes.Height / 2, camBytes.Width, MatType.CV_8UC1, pointer);
         Mat output = new Mat(camBytes.Height, camBytes.Width, MatType.CV_8UC3);
         Cv2.CvtColor(input, output, ColorConversionCodes.YUV2BGR_NV12 );// YUV2RGB_NV12);
         Cv2.Transpose(output, output);
         Cv2.Flip(output, output, FlipMode.Y);
         result = Unity.MatToTexture(output);
         result.Apply();
         byte[] im = result.EncodeToJPG(100);
         AllData.Add(im);
         Destroy(result);
         pinnedArray.Free();
     }
     
 }
        public override Texture2D captureCurrentFrame()
        {
            Texture2D texture = null;

            using (CameraImageBytes image = Frame.CameraImage.AcquireCameraImageBytes())
            {
                texture = new Texture2D(image.Width, image.Height, TextureFormat.RGBA32, false, false);
                byte[] imageBinary = new byte[image.Width * image.Height * 4];
                int    bufferSize  = image.YRowStride * image.Height;
                System.Runtime.InteropServices.Marshal.Copy(image.Y, imageBinary, 0, bufferSize);
                texture.LoadRawTextureData(imageBinary);
                texture.Apply();
            }
            return(texture);
        }
Beispiel #8
0
    // Update is called once per frame
    void Update()
    {
        if (Frame.PointCloud.IsUpdatedThisFrame)
        {
            if (logger != null)
            {
                logger.text = "Have Points";
            }
            if (AddVoxels)
            {
                CameraImageBytes cim = Frame.CameraImage.AcquireCameraImageBytes();

                for (int i = 0; i < Frame.PointCloud.PointCount; i++)
                {
                    Color colour      = new Color(0, 0, 0);
                    bool  foundColour = false;

                    PointCloudPoint p = Frame.PointCloud.GetPointAsStruct(i);
                    Vector3         cameraCoordinates = arCamera.WorldToViewportPoint(p);

                    if (cim.IsAvailable)
                    {
                        var uvQuad = Frame.CameraImage.DisplayUvCoords;
                        int cx     = (int)(cameraCoordinates.x * cim.Width);
                        int cy     = (int)((1.0f - cameraCoordinates.y) * cim.Height);
                        colour = GetColourAt(cim, cx, cy, out foundColour);
                    }
                    if (foundColour)
                    {
                        tree.addPoint(p, colour);
                    }
                }
                cim.Release();
            }
            tree.renderOctTree(voxelParent);
        }
        else
        {
            if (logger != null)
            {
                logger.text = "No Points";
            }
        }
    }
Beispiel #9
0
    /**
     * Sends a snapshot of the current camera image and convert it into a JPG image
     * to be processed by a OCR service.
     * The response can arrive at any time. The default timeout time is 10 seconds
     * The average response time from Google Cloud Vision is 1 second
     */
    private void InitiateOCRDetection()
    {
        image = Frame.CameraImage.AcquireCameraImageBytes();

        // Detection needs to continue until enough cpu resources have been freed
        // If it fails, the user has to start the whole process again
        if (!image.IsAvailable)
        {
            _SystemStatePresenter.DisplayUserMessage("Couldn't access camera image! Please try again.");
        }

        /**
         * The camera image is split into its brightness(Y) channel and the meta data needed for calculations
         * This data is needed to create a Texture2D that can be converted into a JPG
         */
        else
        {
            _TextDetection.DetectText(image.Width, image.Height, image.Y, image.YRowStride);
        }
        image.Release();
    }
        /// <summary>
        /// The Unity Update() method.
        /// </summary>
        public void Update()
        {
            if (Input.GetKey(KeyCode.Escape))
            {
                Application.Quit();
            }

            _QuitOnConnectionErrors();
            _UpdateFrameRate();

            // Change the CPU resolution checkbox visibility.
            LowResConfigToggle.gameObject.SetActive(EdgeDetectionBackgroundImage.enabled);
            HighResConfigToggle.gameObject.SetActive(EdgeDetectionBackgroundImage.enabled);
            m_ImageTextureToggleText.text = EdgeDetectionBackgroundImage.enabled ?
                                            "Switch to GPU Texture" : "Switch to CPU Image";

            if (!Session.Status.IsValid())
            {
                return;
            }

            using (CameraImageBytes image = Frame.CameraImage.AcquireCameraImageBytes())
            {
                if (!image.IsAvailable)
                {
                    return;
                }

                _OnImageAvailable(image.Width, image.Height, image.YRowStride, image.Y, 0);
            }

            var cameraIntrinsics = EdgeDetectionBackgroundImage.enabled
                ? Frame.CameraImage.ImageIntrinsics : Frame.CameraImage.TextureIntrinsics;
            string intrinsicsType =
                EdgeDetectionBackgroundImage.enabled ? "CPU Image" : "GPU Texture";

            CameraIntrinsicsOutput.text =
                _CameraIntrinsicsToString(cameraIntrinsics, intrinsicsType);
        }
Beispiel #11
0
    private string takeSnapshot2()
    {
        string fn       = System.DateTime.Now.ToString("yyyy-MM-dd-HH-mm-ss") + "_screenshot.jpg";
        string pathSnap = Application.persistentDataPath + "/" + fn;

        //ScreenCapture.CaptureScreenshot(fn);


        Utils.Toast("Photo " + pathSnap);
        CameraImageBytes image = Frame.CameraImage.AcquireCameraImageBytes(); //using

        int width  = image.Width;
        int height = image.Height;

        byte[] m_EdgeImage = new byte[image.Width * image.Height * 4];
        //System.Runtime.InteropServices.Marshal.Copy(image.Y, m_EdgeImage, 0, image.Width * image.Height);
        Texture2D tx = new Texture2D(width, height, TextureFormat.R8, false, false);

        byte[] rgbaBuffer = new byte[width * height * 4];
        System.Runtime.InteropServices.Marshal.Copy(image.Y, rgbaBuffer, 0, width * height * 4);

        // Copy the red channel to the grayscale image buffer, since the shader only reads the red channel.
        for (int i = 0; i < width * height; i++)
        {
            m_EdgeImage[i] = rgbaBuffer[i * 4 + 0];
        }

        tx.LoadRawTextureData(m_EdgeImage);
        tx.Apply();

        var encodedJpg = tx.EncodeToJPG();

        File.WriteAllBytes(pathSnap, encodedJpg);

        return(fn);
    }
Beispiel #12
0
        private static IEnumerator UpdateLoop(string adbPath)
        {
            Debug.Log("Entering UpdateLoop");
            var renderEventFunc          = NativeApi.GetRenderEventFunc();
            var shouldConvertToBgra      = SystemInfo.graphicsDeviceType == GraphicsDeviceType.Direct3D11;
            var loggedAspectRatioWarning = false;

            // Waits until the end of the first frame until capturing the screen size,
            // because it might be incorrect when first querying it.
            yield return(k_WaitForEndOfFrame);

            var currentWidth        = 0;
            var currentHeight       = 0;
            var needToStartActivity = true;
            var prevFrameLandscape  = false;

            RenderTexture screenTexture = null;
            RenderTexture targetTexture = null;
            RenderTexture bgrTexture    = null;

            // Begins update loop. The coroutine will cease when the
            // ARCoreSession component it's called from is destroyed.
            for (; ;)
            {
                yield return(k_WaitForEndOfFrame);

                var curFrameLandscape = Screen.width > Screen.height;
                if (prevFrameLandscape != curFrameLandscape)
                {
                    needToStartActivity = true;
                }

                prevFrameLandscape = curFrameLandscape;
                if (needToStartActivity)
                {
                    Debug.Log("Need to start InstantPreview activity");
                    string activityName = curFrameLandscape ? "InstantPreviewLandscapeActivity" :
                                          "InstantPreviewActivity";
                    string output;
                    string errors;
                    ShellHelper.RunCommand(adbPath,
                                           "shell am start -S -n com.google.ar.core.instantpreview/." + activityName,
                                           out output, out errors);
                    needToStartActivity = false;
                }

                // Creates a target texture to capture the preview window onto.
                // Some video encoders prefer the dimensions to be a multiple of 16.
                var targetWidth  = RoundUpToNearestMultipleOf16(Screen.width);
                var targetHeight = RoundUpToNearestMultipleOf16(Screen.height);

                if (targetWidth != currentWidth || targetHeight != currentHeight)
                {
                    screenTexture = new RenderTexture(targetWidth, targetHeight, 0);
                    targetTexture = screenTexture;

                    if (shouldConvertToBgra)
                    {
                        bgrTexture    = new RenderTexture(screenTexture.width, screenTexture.height, 0, RenderTextureFormat.BGRA32);
                        targetTexture = bgrTexture;
                    }

                    currentWidth  = targetWidth;
                    currentHeight = targetHeight;
                    Debug.Log("Created target texture is was needed");
                }

                Debug.Log("Native Api update");
                NativeApi.Update();
                Debug.Log("InstantPreviewInput update");
                InstantPreviewInput.Update();
                AddInstantPreviewTrackedPoseDriverWhenNeeded();

                Graphics.Blit(null, screenTexture);

                if (shouldConvertToBgra)
                {
                    Graphics.Blit(screenTexture, bgrTexture);
                }

                var cameraTexture = Frame.CameraImage.Texture;
                if (cameraTexture == null)
                {
                    Debug.Log("cameraTexture is null");
                    Debug.Log("Trying to acquire camera image bytes.");
                    CameraImageBytes cameraBytes = Frame.CameraImage.AcquireCameraImageBytes();
                    if (cameraBytes.IsAvailable)
                    {
                        Debug.Log("Acquire cpu was successful. Retrying to assign cameraTexture");
                        cameraTexture = Frame.CameraImage.Texture;
                        if (cameraTexture == null)
                        {
                            Debug.Log("cameraTexture STILL NULL");
                        }
                    }
                    else
                    {
                        Debug.Log("Acquire failed");
                    }
                }
                if (!loggedAspectRatioWarning && cameraTexture != null)
                {
                    var sourceWidth            = cameraTexture.width;
                    var sourceHeight           = cameraTexture.height;
                    var sourceAspectRatio      = (float)sourceWidth / sourceHeight;
                    var destinationWidth       = Screen.width;
                    var destinationHeight      = Screen.height;
                    var destinationAspectRatio = (float)destinationWidth / destinationHeight;

                    if (Mathf.Abs(sourceAspectRatio - destinationAspectRatio) >
                        k_MaxTolerableAspectRatioDifference)
                    {
                        Debug.LogWarningFormat(k_MismatchedAspectRatioWarningFormatString, sourceWidth,
                                               sourceHeight);
                        loggedAspectRatioWarning = true;
                    }
                }

                NativeApi.SendFrame(targetTexture.GetNativeTexturePtr());
                GL.IssuePluginEvent(renderEventFunc, 1);
            }
        }