protected virtual void OnFrameSampleAcquired(VideoCaptureSample sample)
        {
            lock (latestImageBytesLockObject){
                //When copying the bytes out of the buffer, you must supply a byte[] that is appropriately sized.
                //You can reuse this byte[] until you need to resize it (for whatever reason).
                if (_latestImageBytes == null || _latestImageBytes.Length < sample.dataLength)
                {
                    _latestImageBytes = new byte[sample.dataLength];
                }
                sample.CopyRawImageDataIntoBuffer(_latestImageBytes);
            }

            float[] cameraToWorldMatrixAsFloat;
            if (sample.TryGetCameraToWorldMatrix(out cameraToWorldMatrixAsFloat) == false)
            {
                sample.Dispose();
                return;
            }

            float[] projectionMatrixAsFloat;
            if (sample.TryGetProjectionMatrix(out projectionMatrixAsFloat) == false)
            {
                sample.Dispose();
                return;
            }

            CameraIntrinsics camIntrinsics = sample.GetCameraIntrinsics();

            // Right now we pass things across the pipe as a float array then convert them back into UnityEngine.Matrix using a utility method
            projectionMatrix    = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(projectionMatrixAsFloat);
            cameraToWorldMatrix = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(cameraToWorldMatrixAsFloat);

            sample.Dispose();

            didUpdateThisFrame = true;
            didUpdateImageBufferInCurrentFrame = true;

            if (hasInitEventCompleted && frameMatAcquired != null)
            {
                Mat mat = new Mat(cameraParams.cameraResolutionHeight, cameraParams.cameraResolutionWidth, CvType.CV_8UC4);
                Utils.copyToMat <byte> (latestImageBytes, mat);

                if (_rotate90Degree)
                {
                    Mat rotatedFrameMat = new Mat(cameraParams.cameraResolutionWidth, cameraParams.cameraResolutionHeight, CvType.CV_8UC4);
                    Core.rotate(mat, rotatedFrameMat, Core.ROTATE_90_CLOCKWISE);
                    mat.Dispose();

                    FlipMat(rotatedFrameMat, _flipVertical, _flipHorizontal);

                    frameMatAcquired.Invoke(rotatedFrameMat, projectionMatrix, cameraToWorldMatrix, camIntrinsics);
                }
                else
                {
                    FlipMat(mat, _flipVertical, _flipHorizontal);

                    frameMatAcquired.Invoke(mat, projectionMatrix, cameraToWorldMatrix, camIntrinsics);
                }
            }
        }
Example #2
0
    void ApplyCapture(byte[] data, HoloLensCameraStream.Resolution size, float[] camera2WorldFloat, float[] projectionFloat, bool setPostion = false)
    {
        this.camera2WorldFloat = camera2WorldFloat;
        this.projectionFloat   = projectionFloat;

        this.camera2WorldMatrix = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(camera2WorldFloat);
        this.projectionMatrix   = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(projectionFloat);

        var pictureRenderer = gameObject.GetComponent <Renderer>();

        pictureRenderer.material = new Material(Shader.Find("AR/HolographicImageBlend"));
        var pictureTexture = new Texture2D(size.width, size.height, TextureFormat.BGRA32, false);

        // Upload bytes to texture
        pictureTexture.LoadRawTextureData(data);
        pictureTexture.wrapMode = TextureWrapMode.Clamp;
        pictureTexture.Apply();

        // Set material parameters
        pictureRenderer.sharedMaterial.SetTexture("_MainTex", pictureTexture);
        pictureRenderer.sharedMaterial.SetMatrix("_WorldToCameraMatrix", camera2WorldMatrix.inverse);
        pictureRenderer.sharedMaterial.SetMatrix("_CameraProjectionMatrix", projectionMatrix);
        pictureRenderer.sharedMaterial.SetFloat("_VignetteScale", 0f);

        this.Resolution = new HoloLensCameraStream.Resolution(pictureTexture.width, pictureTexture.height);
        this.HeadPos    = Camera.main.transform.position;

        // time to enable tap-to-place
        pictureRenderer.enabled = true;
    }
Example #3
0
 /// <summary>
 /// Return a tuple of Label, Confidence, and point to shoot a ray in the world coordinate
 /// Of a detection
 /// </summary>
 /// <param name="camera2WorldMatrix">Camera-to-world matrix</param>
 /// <param name="projectionMatrix">Projection matrix</param>
 /// <param name="predictions">List of predictions</param>
 /// <returns></returns>
 private IEnumerable <Tuple <string, float, Vector3> > GetRectCentersInWorldCoordinates()
 {
     foreach (var p in Predictions)
     {
         var centerX   = p.X + p.Width / 2;
         var centerY   = p.Y + p.Height / 2;
         var direction = LocatableCameraUtils.PixelCoordToWorldCoord(camera2WorldMatrix, projectionMatrix, Resolution, new Vector2(centerX, centerY));
         yield return(new Tuple <string, float, Vector3>(p.Label, p.Confidence, direction));
     }
 }
Example #4
0
    static Tuple <Vector3, Quaternion> GetPositionFromCamera(float[] camera2WorldFloat)
    {
        Matrix4x4 camera2WorldMatrix = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(camera2WorldFloat);

        Vector3 inverseNormal = -camera2WorldMatrix.GetColumn(2);

        // Position the canvas object slightly in front of the real world web camera.
        Vector3 position = camera2WorldMatrix.GetColumn(3) - camera2WorldMatrix.GetColumn(2);
        var     rotation = Quaternion.LookRotation(inverseNormal, camera2WorldMatrix.GetColumn(1));

        return(new Tuple <Vector3, Quaternion>(position, rotation));
    }
Example #5
0
    void OnFrameSampleAcquired(VideoCaptureSample sample)
    {
        //When copying the bytes out of the buffer, you must supply a byte[] that is appropriately sized.
        //You can reuse this byte[] until you need to resize it (for whatever reason).
        if (_latestImageBytes == null || _latestImageBytes.Length < sample.dataLength)
        {
            _latestImageBytes = new byte[sample.dataLength];
        }
        sample.CopyRawImageDataIntoBuffer(_latestImageBytes);

        //If you need to get the cameraToWorld matrix for purposes of compositing you can do it like this
        float[] cameraToWorldMatrixAsFloat;
        if (sample.TryGetCameraToWorldMatrix(out cameraToWorldMatrixAsFloat) == false)
        {
            return;
        }

        //If you need to get the projection matrix for purposes of compositing you can do it like this
        float[] projectionMatrixAsFloat;
        if (sample.TryGetProjectionMatrix(out projectionMatrixAsFloat) == false)
        {
            return;
        }

        // Right now we pass things across the pipe as a float array then convert them back into UnityEngine.Matrix using a utility method
        Matrix4x4 cameraToWorldMatrix = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(cameraToWorldMatrixAsFloat);
        Matrix4x4 projectionMatrix    = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(projectionMatrixAsFloat);

        //This is where we actually use the image data
        //TODO: Create a class like VideoPanel for the next code
        UnityEngine.WSA.Application.InvokeOnAppThread(() =>
        {
            _videoTexture.LoadRawTextureData(_latestImageBytes);
            _videoTexture.wrapMode = TextureWrapMode.Clamp;
            _videoTexture.Apply();

            _videoPanelUIRenderer.sharedMaterial.SetTexture("_MainTex", _videoTexture);
            _videoPanelUIRenderer.sharedMaterial.SetMatrix("_WorldToCameraMatrix", cameraToWorldMatrix.inverse);
            _videoPanelUIRenderer.sharedMaterial.SetMatrix("_CameraProjectionMatrix", projectionMatrix);
            _videoPanelUIRenderer.sharedMaterial.SetFloat("_VignetteScale", 1.3f);


            Vector3 inverseNormal = -cameraToWorldMatrix.GetColumn(2);
            // Position the canvas object slightly in front of the real world web camera.
            Vector3 imagePosition = cameraToWorldMatrix.GetColumn(3) - cameraToWorldMatrix.GetColumn(2);

            _videoPanelUI.gameObject.transform.position = imagePosition;
            _videoPanelUI.gameObject.transform.rotation = Quaternion.LookRotation(inverseNormal, cameraToWorldMatrix.GetColumn(1));
        }, false);
    }
Example #6
0
    // Everything above here is boilerplate from the VideoPanelApp.cs project
    void OnFrameSampleAcquired(VideoCaptureSample sample)
    {
        //When copying the bytes out of the buffer, you must supply a byte[] that is appropriately sized.
        //You can reuse this byte[] until you need to resize it (for whatever reason).
        if (_latestImageBytes == null || _latestImageBytes.Length < sample.dataLength)
        {
            _latestImageBytes = new byte[sample.dataLength];
        }
        sample.CopyRawImageDataIntoBuffer(_latestImageBytes);

        //If you need to get the cameraToWorld matrix for purposes of compositing you can do it like this
        float[] cameraToWorldMatrixAsFloat;
        if (sample.TryGetCameraToWorldMatrix(out cameraToWorldMatrixAsFloat) == false)
        {
            return;
        }

        //If you need to get the projection matrix for purposes of compositing you can do it like this
        float[] projectionMatrixAsFloat;
        if (sample.TryGetProjectionMatrix(out projectionMatrixAsFloat) == false)
        {
            return;
        }

        // Right now we pass things across the pipe as a float array then convert them back into UnityEngine.Matrix using a utility method
        Matrix4x4 cameraToWorldMatrix = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(cameraToWorldMatrixAsFloat);
        Matrix4x4 projectionMatrix    = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(projectionMatrixAsFloat);

        //This is where we actually use the image data
        UnityEngine.WSA.Application.InvokeOnAppThread(() =>
        {
            _videoPanelUI.SetBytes(_latestImageBytes);

            Vector3 inverseNormal = LocatableCameraUtils.GetNormalOfPose(cameraToWorldMatrix);
            Vector3 imagePosition = cameraToWorldMatrix.MultiplyPoint(Vector3.zero);

            // Throw out an indicator in the composite space 2 meters in front of us using the corresponding view matrices
            float distanceToMarker      = 2f;
            Vector3 pointOnFaceBoxPlane = imagePosition - inverseNormal * distanceToMarker;
            Plane surfacePlane          = new Plane(inverseNormal, pointOnFaceBoxPlane);

            Vector2 targetPoint = new Vector2(_resolution.width * 0.5f, _resolution.height * 0.5f);
            Vector3 mdPoint     = LocatableCameraUtils.PixelCoordToWorldCoord(cameraToWorldMatrix, projectionMatrix, _resolution, targetPoint, surfacePlane);

            _targetIndicator.SetPosition(mdPoint);
            _targetIndicator.SetText("P");

            _videoPanelUI.gameObject.SetActive(false);
        }, false);
    }
    private void trackFilteredObject(ObjectTracker ot, Mat threshold)
    {
        Mat temp = new Mat();

        threshold.copyTo(temp);

        List <MatOfPoint> contours = new List <MatOfPoint>();
        Mat hierarchy = new Mat();

        Imgproc.findContours(temp, contours, hierarchy, Imgproc.RETR_CCOMP, Imgproc.CHAIN_APPROX_SIMPLE);

        if (hierarchy.rows() > 0)
        {
            for (int index = 0; index >= 0; index = (int)hierarchy.get(0, index)[0])
            {
                Moments moment = Imgproc.moments(contours[index]);
                double  area   = moment.m00;

                if (area > 10 * 10)
                {
                    int x = (int)(moment.get_m10() / area);
                    int y = (int)(moment.get_m01() / area);

                    Vector2 point  = new Vector2(x, y);
                    Vector3 dirRay = LocatableCameraUtils.PixelCoordToWorldCoord(_cameraToWorldMatrix, _projectionMatrix, _resolution, point);

                    Application.InvokeOnAppThread(() => {
                        ot.Sphere.transform.position = Camera.main.transform.position + new Vector3(0, ot.offset, 0);
                        SphereCollider collider      = ot.Sphere.GetComponent <SphereCollider>();

                        // We inverse the ray source and dir to make the sphere collider work
                        Vector3 newPosRay = Camera.main.transform.position + dirRay * (collider.radius * 2);

                        Ray ray = new Ray(newPosRay, -dirRay);
                        RaycastHit hit;

                        if (Physics.Raycast(ray, out hit, collider.radius * 3))
                        {
                            Vector3 pos = hit.point;
                            ot.gameObject.transform.position = pos;
                        }
                    }, false);
                }
            }
        }
    }
Example #8
0
 void OnFrameCaptured(HoloLensCameraStream.VideoCaptureSample sample)
 {
     if (VideoFrameBuffer == null)
     {
         VideoFrameBuffer = new byte[sample.dataLength];
     }
     float[] matrix = null;
     if (File.Exists(Path.Combine(OutputDirectory, "proj.txt")) == false)
     {
         if (sample.TryGetProjectionMatrix(out matrix) == false)
         {
             return;
         }
         Matrix4x4 m       = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(matrix);
         Vector4   column2 = m.GetColumn(2);
         column2.x = -column2.x;
         column2.y = -column2.y;
         m.SetColumn(2, column2);
         float halfWidth  = VideoResolution.width / 2f;
         float halfHeight = VideoResolution.height / 2f;
         float Fx         = m.GetColumn(0).x *halfWidth;
         float Fy         = m.GetColumn(1).y *halfHeight;
         float offsetX    = m.GetColumn(2).x;
         float offsetY    = m.GetColumn(2).y;
         float Cx         = halfWidth + offsetX * halfWidth;
         float Cy         = halfHeight + offsetY * halfHeight;
         Utilities.SaveFile(string.Format("{0} {1}\n{2} {3} {4} {5}", VideoResolution.width, VideoResolution.height, Fx, Fy, Cx, Cy),
                            Path.Combine(OutputDirectory, "proj.txt"));
     }
     if (sample.TryGetCameraToWorldMatrix(out matrix) == false)
     {
         return;
     }
     sample.CopyRawImageDataIntoBuffer(VideoFrameBuffer);
     for (int i = 0; i < 16; ++i)
     {
         VideoBinaryWriter.Write(matrix[i]);
     }
     VideoBinaryWriter.Write(VideoFrameBuffer);
 }
    private void OnFrameSampleAcquired(VideoCaptureSample sample)
    {
        if (_latestImageBytes == null || _latestImageBytes.Length < sample.dataLength)
        {
            _latestImageBytes = new byte[sample.dataLength];
        }
        sample.CopyRawImageDataIntoBuffer(_latestImageBytes);

        float[] cameraToWorldMatrixAsFloat;
        float[] projectionMatrixAsFloat;
        if (sample.TryGetCameraToWorldMatrix(out cameraToWorldMatrixAsFloat) == false || sample.TryGetProjectionMatrix(out projectionMatrixAsFloat) == false)
        {
            Debug.Log("Failed to get camera to world or projection matrix");
            return;
        }

        _cameraToWorldMatrix = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(cameraToWorldMatrixAsFloat);
        _projectionMatrix    = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(projectionMatrixAsFloat);

        sample.Dispose();

        Mat frameBGRA = new Mat(_resolution.height, _resolution.width, CvType.CV_8UC4);

        frameBGRA.put(0, 0, _latestImageBytes);
        Mat frameBGR = new Mat(_resolution.height, _resolution.width, CvType.CV_8UC3);

        Imgproc.cvtColor(frameBGRA, frameBGR, Imgproc.COLOR_BGRA2BGR);

        Mat HSV       = new Mat();
        Mat threshold = new Mat();

        // Track objects
        foreach (ObjectTracker ot in _trackers)
        {
            Imgproc.cvtColor(frameBGR, HSV, Imgproc.COLOR_BGR2HSV);
            Core.inRange(HSV, new Scalar(ot.minH, ot.minSaturation, ot.minLight), new Scalar(ot.maxH, 255, 255), threshold);
            morphOps(threshold);
            trackFilteredObject(ot, threshold);
        }
    }
    void OnFrameSampleAcquired(VideoCaptureSample sample)
    {
        //When copying the bytes out of the buffer, you must supply a byte[] that is appropriately sized.
        //You can reuse this byte[] until you need to resize it (for whatever reason).
        if (latestImageBytes == null || latestImageBytes.Length < sample.dataLength)
        {
            latestImageBytes = new byte[sample.dataLength];
        }
        sample.CopyRawImageDataIntoBuffer(latestImageBytes);

        //If you need to get the cameraToWorld matrix for purposes of compositing you can do it like this
        float[] outMatrix;
        if (sample.TryGetCameraToWorldMatrix(out outMatrix) == false)
        {
            return;
        }
        Matrix4x4 cameraToWorldMatrix = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(outMatrix);

        //If you need to get the projection matrix for purposes of compositing you can do it like this
        if (sample.TryGetProjectionMatrix(out outMatrix) == false)
        {
            return;
        }
        Matrix4x4 projectionMatrix = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(outMatrix);

        sample.Dispose();

        //This is where we actually use the image data
        UnityEngine.WSA.Application.InvokeOnAppThread(() =>
        {
            int length   = latestImageBytes.Length;
            int size     = Marshal.SizeOf(latestImageBytes[0]) * length;
            IntPtr inPtr = Marshal.AllocHGlobal(size);
            try
            {
                Marshal.Copy(latestImageBytes, 0, inPtr, length);
                IntPtr outPtr = ProcessFrame(inPtr, resolution.width, resolution.height);
                if (outPtr != IntPtr.Zero)
                {
                    int[] cvOutput = new int[4];
                    Marshal.Copy(outPtr, cvOutput, 0, 4);

                    // Get the world coordinates of the keyboard center
                    Vector2 pixelCoordinates = new Vector2(cvOutput[0], cvOutput[1]);
                    Vector3 worldDirection   = LocatableCameraUtils.PixelCoordToWorldCoord(cameraToWorldMatrix, projectionMatrix, resolution, pixelCoordinates);
                    Ray ray = new Ray(cameraToWorldMatrix.GetColumn(3), worldDirection);
                    RaycastHit hitInfo;
                    if (Physics.Raycast(ray, out hitInfo))
                    {
                        keyboardPosition = hitInfo.point;
                    }

                    // Get the world coordinates of the keyboard's eigenvector
                    pixelCoordinates = new Vector2(cvOutput[2], cvOutput[3]);
                    worldDirection   = LocatableCameraUtils.PixelCoordToWorldCoord(cameraToWorldMatrix, projectionMatrix, resolution, pixelCoordinates);
                    ray = new Ray(cameraToWorldMatrix.GetColumn(3), worldDirection);
                    if (Physics.Raycast(ray, out hitInfo))
                    {
                        keyboardOrientation = hitInfo.point - keyboardPosition;
                    }
                }

                /*byte[] outBytes = new byte[length];
                 * Marshal.Copy(outPtr, outBytes, 0, length);
                 * videoPanelUI.SetBytes(outBytes);*/
            }
            finally
            {
                // Clean memory
                Marshal.FreeHGlobal(inPtr);
                FreeMemory();
                GC.Collect();
            }
        }, false);

        if (stopVideoMode)
        {
            videoCapture.StopVideoModeAsync(OnVideoModeStopped);
        }
    }
Example #11
0
    private void OnFrameSampleAcquired(VideoCaptureSample sample)
    {
        // Allocate byteBuffer
        if (_latestImageBytes == null || _latestImageBytes.Length < sample.dataLength)
        {
            _latestImageBytes = new byte[sample.dataLength];
        }

        // Fill frame struct
        SampleStruct s = new SampleStruct();

        sample.CopyRawImageDataIntoBuffer(_latestImageBytes);
        s.data = _latestImageBytes;

        // Get the cameraToWorldMatrix and projectionMatrix
        if (!sample.TryGetCameraToWorldMatrix(out s.camera2WorldMatrix) || !sample.TryGetProjectionMatrix(out s.projectionMatrix))
        {
            return;
        }

        sample.Dispose();

        Matrix4x4 camera2WorldMatrix = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(s.camera2WorldMatrix);
        Matrix4x4 projectionMatrix   = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(s.projectionMatrix);

        UnityEngine.WSA.Application.InvokeOnAppThread(() =>
        {
            // Upload bytes to texture
            _pictureTexture.LoadRawTextureData(s.data);
            _pictureTexture.wrapMode = TextureWrapMode.Clamp;
            _pictureTexture.Apply();

            // Set material parameters
            _pictureRenderer.sharedMaterial.SetTexture("_MainTex", _pictureTexture);
            _pictureRenderer.sharedMaterial.SetMatrix("_WorldToCameraMatrix", camera2WorldMatrix.inverse);
            _pictureRenderer.sharedMaterial.SetMatrix("_CameraProjectionMatrix", projectionMatrix);
            _pictureRenderer.sharedMaterial.SetFloat("_VignetteScale", 0f);

            Vector3 inverseNormal = -camera2WorldMatrix.GetColumn(2);
            // Position the canvas object slightly in front of the real world web camera.
            Vector3 imagePosition = camera2WorldMatrix.GetColumn(3) - camera2WorldMatrix.GetColumn(2);

            _picture.transform.position = imagePosition;
            _picture.transform.rotation = Quaternion.LookRotation(inverseNormal, camera2WorldMatrix.GetColumn(1));
        }, false);

        // Stop the video and reproject the 5 pixels
        if (stopVideo)
        {
            _videoCapture.StopVideoModeAsync(onVideoModeStopped);

            // Get the ray directions
            Vector3 imageCenterDirection   = LocatableCameraUtils.PixelCoordToWorldCoord(camera2WorldMatrix, projectionMatrix, _resolution, new Vector2(_resolution.width / 2, _resolution.height / 2));
            Vector3 imageTopLeftDirection  = LocatableCameraUtils.PixelCoordToWorldCoord(camera2WorldMatrix, projectionMatrix, _resolution, new Vector2(0, 0));
            Vector3 imageTopRightDirection = LocatableCameraUtils.PixelCoordToWorldCoord(camera2WorldMatrix, projectionMatrix, _resolution, new Vector2(_resolution.width, 0));
            Vector3 imageBotLeftDirection  = LocatableCameraUtils.PixelCoordToWorldCoord(camera2WorldMatrix, projectionMatrix, _resolution, new Vector2(0, _resolution.height));
            Vector3 imageBotRightDirection = LocatableCameraUtils.PixelCoordToWorldCoord(camera2WorldMatrix, projectionMatrix, _resolution, new Vector2(_resolution.width, _resolution.height));

            UnityEngine.WSA.Application.InvokeOnAppThread(() =>
            {
                // Paint the rays on the 3d world
                _laser.shootLaserFrom(camera2WorldMatrix.GetColumn(3), imageCenterDirection, 10f, _centerMaterial);
                _laser.shootLaserFrom(camera2WorldMatrix.GetColumn(3), imageTopLeftDirection, 10f, _topLeftMaterial);
                _laser.shootLaserFrom(camera2WorldMatrix.GetColumn(3), imageTopRightDirection, 10f, _topRightMaterial);
                _laser.shootLaserFrom(camera2WorldMatrix.GetColumn(3), imageBotLeftDirection, 10f, _botLeftMaterial);
                _laser.shootLaserFrom(camera2WorldMatrix.GetColumn(3), imageBotRightDirection, 10f, _botRightMaterial);
            }, false);
        }
    }
    private void OnFrameSampleAcquired(VideoCaptureSample sample)
    {
        // upload image bytes
        if (_latestImageBytes == null || _latestImageBytes.Length < sample.dataLength)
        {
            _latestImageBytes = new byte[sample.dataLength];
        }
        sample.CopyRawImageDataIntoBuffer(_latestImageBytes);

        // transform matrix
        float[] cameraToWorldMatrixAsFloat;
        float[] projectionMatrixAsFloat;
        if (sample.TryGetCameraToWorldMatrix(out cameraToWorldMatrixAsFloat) == false || sample.TryGetProjectionMatrix(out projectionMatrixAsFloat) == false)
        {
            Debug.Log("Failed to get camera to world or projection matrix");
            return;
        }

        _cameraToWorldMatrix = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(cameraToWorldMatrixAsFloat);
        _projectionMatrix    = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(projectionMatrixAsFloat);

        //focal_x = _projectionMatrix[0, 0] * _resolution.width / 2;
        //focal_y = _projectionMatrix[1, 1] * _resolution.height / 2;
        //cx = _resolution.width / 2 + _resolution.width / 2 * _projectionMatrix[0, 2];
        //cy = _resolution.height / 2 + _resolution.height / 2 * _projectionMatrix[1, 2];
        // Debug.Log("focal_x: " + focal_x.ToString("f5") + " focal_y: " + focal_y.ToString("f5") + " cx: " + cx.ToString("f5") + " cy: " + cy.ToString("f5"));

        sample.Dispose();

        // Opencv mat conversion: to RGB mat
        Mat frameBGRA = new Mat(_resolution.height, _resolution.width, CvType.CV_8UC4);

        //Array.Reverse(_latestImageBytes);
        frameBGRA.put(0, 0, _latestImageBytes);
        //Core.flip(frameBGRA, frameBGRA, 0);

        Mat frameBGR = new Mat(_resolution.height, _resolution.width, CvType.CV_8UC3);

        Imgproc.cvtColor(frameBGRA, frameBGR, Imgproc.COLOR_BGRA2BGR);
        Mat RGB = new Mat();

        Imgproc.cvtColor(frameBGR, RGB, Imgproc.COLOR_BGR2RGB);
        // Target detection: marker location in webcam
        Vector4 location = markerDetection.DetectMarkers(RGB);

        // Application thread. Because some operations are only allowed in main thread, not event thread
        Application.InvokeOnAppThread(() =>
        {
            if (MarkerDetection.success == true)// if detected
            {
                Debug.Log("catch!");
                // target: marker -> webcam -> world.
                Vector3 target = _cameraToWorldMatrix * location;
                Debug.Log("target: " + target.ToString("f5"));
                Target.transform.position = target;

                // render with the right matrix
                Matrix4x4 V = Camera.main.GetStereoViewMatrix(Camera.StereoscopicEye.Right);
                Matrix4x4 P = GL.GetGPUProjectionMatrix(Camera.main.GetStereoProjectionMatrix(Camera.StereoscopicEye.Right), false); // openGL to DX
                Matrix4x4 M = Target.transform.localToWorldMatrix;
                Target.GetComponent <Renderer>().material.SetMatrix("MATRIX_MVP", P * V * M);                                        // render with custom pipeline
                Target.GetComponent <Renderer>().enabled = true;                                                                     // show now
                Vector4 vertex = P * V * M * new Vector4(0f, 0f, 0f, 1f);                                                            // vertex location on the rendering plane

                //Vector3 cam = V.GetColumn(3);

                //// ----------------------------------------------------
                //// collide a cam-target ray with display collider
                //Vector3 camToTarget = target - cam;
                //Ray ray = new Ray(cam, camToTarget);
                //RaycastHit hit;

                //if (Physics.Raycast(ray, out hit, 2f))
                //{
                //    Vector3 pos = hit.point; // physical hitted point
                //    Target.transform.position = pos;
                //    Debug.Log("hit pos: " + pos.ToString("f5"));

                //    Matrix4x4 M = Target.transform.localToWorldMatrix;
                //    Target.GetComponent<Renderer>().material.SetMatrix("MATRIX_MVP", P * V * M); // render with custom pipeline
                //    Target.GetComponent<Renderer>().enabled = true; // show now
                //    // Vector4 vertex = P * V * M * new Vector4(0f, 0f, 0f, 1f); // vertex location on the rendering plane
                //}

                // ---------------------------------------------------
                // get eye position in eyecam
                string[] eyedata  = UDPCommunication.message.Split(',');
                Vector4 eye_pos_e = new Vector4(float.Parse(eyedata[0]) / 1000, float.Parse(eyedata[1]) / 1000, float.Parse(eyedata[2]) / 1000, 1.0f); // in [m]
                Debug.Log("eye in eyecam: " + eye_pos_e.ToString("f5"));

                // eye: eyecam -> webcam -> world.
                Vector3 eye_pos_w = _cameraToWorldMatrix * _eyecamTowebcam * eye_pos_e;
                Debug.Log("eye in world: " + eye_pos_w.ToString("f5"));

                // ----------------------------------------------------
                // collide a eye-target ray with display collider
                Vector3 eyeToTarget = target - eye_pos_w;
                Ray ray_revised     = new Ray(eye_pos_w, eyeToTarget);
                RaycastHit hit_revised;

                if (Physics.Raycast(ray_revised, out hit_revised, 2f))
                {
                    Vector4 pos_revised = hit_revised.point; // physical hitted point
                    //Revised.transform.position = pos_revised;
                    Debug.Log("hit_revised pos: " + pos_revised.ToString("f5"));
                    pos_revised.w = 1.0f;
                    // calculate hitted vertex, scale w to the same depth
                    Vector4 vertex_hit        = P * V * pos_revised;
                    float scale               = vertex.w / vertex_hit.w;
                    Vector4 vertex_hit_scaled = new Vector4(vertex_hit.x * scale, vertex_hit.y * scale, vertex_hit.z, vertex_hit.w * scale);

                    // retrieve the world location
                    Vector3 pos_scaled         = V.inverse * P.inverse * vertex_hit_scaled;
                    Revised.transform.position = pos_scaled;
                    // position the revised target and render it
                    Matrix4x4 M_revised = Revised.transform.localToWorldMatrix;
                    Revised.GetComponent <Renderer>().material.SetMatrix("MATRIX_MVP", P * V * M_revised);
                    Revised.GetComponent <Renderer>().enabled = true;

                    Debug.Log("webcameraToWorldMatrix:\n" + _cameraToWorldMatrix.ToString("f5"));
                    Debug.Log("WorldToRightMatrix:\n" + V.ToString("f5"));
                    Debug.Log("RightGLProjectionMatrix:\n" + P.ToString("f5"));

                    Debug.Log("detected target location: " + Target.transform.position.ToString("f5"));
                    Debug.Log("revised target location: " + Revised.transform.position.ToString("f5"));

                    Debug.Log("rendering vertex: " + vertex.ToString("f5"));
                    Debug.Log("hit vertex: " + vertex_hit.ToString("f5"));
                    Debug.Log("revised rendering vertex: " + vertex_hit_scaled.ToString("f5"));
                }
            }
            else
            {
                Revised.GetComponent <Renderer>().enabled = false;
                Target.GetComponent <Renderer>().enabled  = false; // hide
            }
        }, false);
    }
    void OnFrameSampleAcquired(VideoCaptureSample sample)
    {
        m_NumFrames++;

        //When copying the bytes out of the buffer, you must supply a byte[] that is appropriately sized.
        //You can reuse this byte[] until you need to resize it (for whatever reason).
        if (_latestImageBytes == null || _latestImageBytes.Length < sample.dataLength)
        {
            _latestImageBytes = new byte[sample.dataLength];
        }
        sample.CopyRawImageDataIntoBuffer(_latestImageBytes);

        //If you need to get the cameraToWorld matrix for purposes of compositing you can do it like this
        float[] cameraToWorldMatrixAsFloat;
        if (sample.TryGetCameraToWorldMatrix(out cameraToWorldMatrixAsFloat) == false)
        {
            return;
        }

        //If you need to get the projection matrix for purposes of compositing you can do it like this
        float[] projectionMatrixAsFloat;
        if (sample.TryGetProjectionMatrix(out projectionMatrixAsFloat) == false)
        {
            return;
        }

        // Right now we pass things across the pipe as a float array then convert them back into UnityEngine.Matrix using a utility method
        Matrix4x4 cameraToWorldMatrix = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(cameraToWorldMatrixAsFloat);
        Matrix4x4 projectionMatrix    = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(projectionMatrixAsFloat);

        //This is where we actually use the image data
        UnityEngine.WSA.Application.InvokeOnAppThread(() =>
        {
            int numFrames = m_NumFrames;

            _videoPanelUI.SetBytes(_latestImageBytes);

            Texture2D tex   = _videoPanelUI.rawImage.texture as Texture2D;
            byte[] jpgBytes = tex.EncodeToJPG();

            /*
             * Vector3 inverseNormal = LocatableCameraUtils.GetNormalOfPose(cameraToWorldMatrix);
             * Vector3 imagePosition = cameraToWorldMatrix.MultiplyPoint(Vector3.zero);
             *
             * // Throw out an indicator in the composite space 2 meters in front of us using the corresponding view matrices
             * float distanceToMarker = 2f;
             * Vector3 pointOnFaceBoxPlane = imagePosition - inverseNormal * distanceToMarker;
             * Plane surfacePlane = new Plane(inverseNormal, pointOnFaceBoxPlane);
             *
             * Vector2 targetPoint = new Vector2(_resolution.width * 0.5f, _resolution.height * 0.5f);
             * Vector3 mdPoint = LocatableCameraUtils.PixelCoordToWorldCoord(cameraToWorldMatrix, projectionMatrix, _resolution, targetPoint, surfacePlane);
             */

            //string infoText = String.Format("Position: {0}\t{1}\t{2}\nRotation: {3}\t{4}\t{5}", position.x, position.y, position.z, eulerAngles.x, eulerAngles.y, eulerAngles.z);
            //Debug.Log(infoText);


            float timestamp = Time.time - m_RecordingStartTime;

            PoseData pose  = new PoseData();
            pose.timestamp = timestamp;
            pose.position  = cameraToWorldMatrix.MultiplyPoint(Vector3.zero);

            Quaternion rotation = Quaternion.LookRotation(-cameraToWorldMatrix.GetColumn(2), cameraToWorldMatrix.GetColumn(1));
            pose.eulerAngles    = rotation.eulerAngles;

            if (PlacingAxisMarkers && AxisPrefab != null)
            {
                Instantiate(AxisPrefab, pose.position, rotation);
            }


            m_CurrentPoses.Add(pose);

            SaveFrame(jpgBytes, numFrames);

            if (m_CurrentRecordingState == RecordingState.Recording)
            {
                this._videoCapture.RequestNextFrameSample(OnFrameSampleAcquired);
            }
        }, false);
    }
    void OnFrameSampleAcquired(VideoCaptureSample sample)
    {
        //When copying the bytes out of the buffer, you must supply a byte[] that is appropriately sized.
        //You can reuse this byte[] until you need to resize it (for whatever reason).
        if (_latestImageBytes == null || _latestImageBytes.Length < sample.dataLength)
        {
            _latestImageBytes = new byte[sample.dataLength];
        }
        sample.CopyRawImageDataIntoBuffer(_latestImageBytes);

        //If you need to get the cameraToWorld matrix for purposes of compositing you can do it like this
        float[] cameraToWorldMatrixAsFloat;
        if (sample.TryGetCameraToWorldMatrix(out cameraToWorldMatrixAsFloat) == false)
        {
            return;
        }

        //If you need to get the projection matrix for purposes of compositing you can do it like this
        float[] projectionMatrixAsFloat;
        if (sample.TryGetProjectionMatrix(out projectionMatrixAsFloat) == false)
        {
            return;
        }

        // Right now we pass things across the pipe as a float array then convert them back into UnityEngine.Matrix using a utility method
        Matrix4x4 cameraToWorldMatrix = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(cameraToWorldMatrixAsFloat);
        Matrix4x4 projectionMatrix    = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(projectionMatrixAsFloat);

        //This is where we actually use the image data
        UnityEngine.WSA.Application.InvokeOnAppThread(() =>
        {
            Vector3 cameraWorldPosition    = cameraToWorldMatrix.MultiplyPoint(Vector3.zero);
            Quaternion cameraWorldRotation = Quaternion.LookRotation(-cameraToWorldMatrix.GetColumn(2), cameraToWorldMatrix.GetColumn(1));


            _videoPanelUI.SetBytes(_latestImageBytes);

            Texture2D tex = _videoPanelUI.rawImage.texture as Texture2D;

            Color32[] c        = tex.GetPixels32();
            IntPtr imageHandle = getImageHandle(c);
            newImage(imageHandle);

            // do any detection here...

            int chessX = topDownCameraCalibrationData["chess_x"].AsInt;
            int chessY = topDownCameraCalibrationData["chess_y"].AsInt;
            float chessSquareMeters = topDownCameraCalibrationData["chess_square_size_meters"].AsFloat;
            double cam_mtx_fx       = HoloCam_fx;
            double cam_mtx_fy       = HoloCam_fy;
            double cam_mtx_cx       = HoloCam_cx;
            double cam_mtx_cy       = HoloCam_cy;
            double dist_k1          = HoloCam_k1;
            double dist_k2          = HoloCam_k2;
            double dist_p1          = HoloCam_p1;
            double dist_p2          = HoloCam_p2;
            double dist_k3          = HoloCam_k3;

            bool gotValidPose = findExtrinsics(chessX, chessY, chessSquareMeters, cam_mtx_fx, cam_mtx_fy, cam_mtx_cx, cam_mtx_cy, dist_k1, dist_k2, dist_p1, dist_p2, dist_k3);

            if (gotValidPose)
            {
                Debug.Log("Got valid pose!");

                List <double> rvec = new List <double>();
                rvec.Add(GetRvec0());
                rvec.Add(GetRvec1());
                rvec.Add(GetRvec2());

                List <double> tvec = new List <double>();
                tvec.Add(GetTvec0());
                tvec.Add(GetTvec1());
                tvec.Add(GetTvec2());

                Debug.Log("rvec between HoloLens camera and chessboard is " + rvec[0] + " " + rvec[1] + " " + rvec[2]);
                Debug.Log("tvec between HoloLens camera and chessboard is " + tvec[0] + " " + tvec[1] + " " + tvec[2]);



                // NOTE: At this point, we should STOP the HoloLens frame processing by:
                // 1: setting _processingCameraFrames to FALSE
                // 2: calling StopCameraProcessing()

                // the data we have available is:
                // --- rvec and tvec (the pose data between the HoloLens camera and the chessboard)
                // --- topDownCameraCalibrationData ... the received data from the top-down camera app, which includes:
                // ------ topDownCameraCalibrationData["rvec"] ... the pose between the top-down camera and the chessboard
                // ------ topDownCameraCalibrationData["tvec"]
                // ------ topDownCameraCalibrationData["fx"], fy, cx, cy (the top-down camera matrix)
                // ------ topDownCameraCalibrationData["k1"], k2, p1, p2, k3 (the top-down camera distortion coefficients)
                // --- cameraWorldPosition and cameraWorldRotation (the pose data in Unity's coord system between the HoloLens camera and the world)


                // StopCameraProcessing();
                // _processingCameraFrames = false;
            }


            //// Fetch the processed image and render
            imageHandle = getProcessedImage();
            Marshal.Copy(imageHandle, processedImageData, 0, _resolution.width * _resolution.height * 4);
            tex.LoadRawTextureData(processedImageData);
            tex.Apply();



            if (_processingCameraFrames)
            {
                this._videoCapture.RequestNextFrameSample(OnFrameSampleAcquired);
            }
        }, false);
    }