コード例 #1
0
    void OnFrameSampleAcquired(VideoCaptureSample sample)
    {
        //When copying the bytes out of the buffer, you must supply a byte[] that is appropriately sized.
        //You can reuse this byte[] until you need to resize it (for whatever reason).
        if (_latestImageBytes == null || _latestImageBytes.Length < sample.dataLength)
        {
            _latestImageBytes = new byte[sample.dataLength];
        }
        sample.CopyRawImageDataIntoBuffer(_latestImageBytes);
        sample.Dispose();

        _videoPanelUI.SetBytes(_latestImageBytes);
    }
コード例 #2
0
    void OnFrameSampleAcquired(VideoCaptureSample sample)
    {
        //When copying the bytes out of the buffer, you must supply a byte[] that is appropriately sized.
        //You can reuse this byte[] until you need to resize it (for whatever reason).
        if (_latestImageBytes == null || _latestImageBytes.Length < sample.dataLength)
        {
            _latestImageBytes = new byte[sample.dataLength];
        }
        sample.CopyRawImageDataIntoBuffer(_latestImageBytes);

        //If you need to get the cameraToWorld matrix for purposes of compositing you can do it like this
        float[] cameraToWorldMatrix;
        if (sample.TryGetCameraToWorldMatrix(out cameraToWorldMatrix) == false)
        {
            return;
        }

        //If you need to get the projection matrix for purposes of compositing you can do it like this
        float[] projectionMatrix;
        if (sample.TryGetProjectionMatrix(out projectionMatrix) == false)
        {
            return;
        }

        sample.Dispose();

        //This is where we actually use the image data
        UnityEngine.WSA.Application.InvokeOnAppThread(() =>
        {
            _videoPanelUI.SetBytes(_latestImageBytes);
            //text.text = _resolution.width + "x"+ _resolution.height+"+"+ _latestImageBytes.Length;
            RGBUpdated = true;
        }, false);
    }
コード例 #3
0
    void OnFrameSampleAcquired(VideoCaptureSample sample)
    {
        //When copying the bytes out of the buffer, you must supply a byte[] that is appropriately sized.
        //You can reuse this byte[] until you need to resize it (for whatever reason).
        if (_latestImageBytes == null || _latestImageBytes.Length < sample.dataLength)
        {
            _latestImageBytes = new byte[sample.dataLength];
        }

        // edit the color of each frame
        sample.CopyRawImageDataIntoBuffer(_latestImageBytes);

        //If you need to get the cameraToWorld matrix for purposes of compositing you can do it like this
        float[] cameraToWorldMatrix;
        if (sample.TryGetCameraToWorldMatrix(out cameraToWorldMatrix) == false)
        {
            return;
        }

        //If you need to get the projection matrix for purposes of compositing you can do it like this
        float[] projectionMatrix;
        if (sample.TryGetProjectionMatrix(out projectionMatrix) == false)
        {
            return;
        }

        sample.Dispose();

        //This is where we actually use the image data
        UnityEngine.WSA.Application.InvokeOnAppThread(() =>
        {
            int f         = 10000;
            int PixelSize = 4;
            int width     = _resolution.width;
            int height    = _resolution.height;
            int Line      = width * PixelSize;

            // get normal image from mirror image
            for (int i = 0; i < height; ++i)
            {
                for (int j = 0; j + 4 < Line / 2; j += 4)
                {
                    Swap <byte>(ref _latestImageBytes[Line * i + j], ref _latestImageBytes[Line * i + Line - j - 4]);
                    Swap <byte>(ref _latestImageBytes[Line * i + j + 1], ref _latestImageBytes[Line * i + Line - j - 3]);
                    Swap <byte>(ref _latestImageBytes[Line * i + j + 2], ref _latestImageBytes[Line * i + Line - j - 2]);
                    Swap <byte>(ref _latestImageBytes[Line * i + j + 3], ref _latestImageBytes[Line * i + Line - j - 1]);

                    // reduce the alpha
                    //_latestImageBytes[Line * i + j + 3] = _latestImageBytes[Line * i + Line - j - 1] = 100;
                }
            }


            _videoPanelUI.SetBytes(_latestImageBytes);
        }, true);
    }
コード例 #4
0
    // Everything above here is boilerplate from the VideoPanelApp.cs project
    void OnFrameSampleAcquired(VideoCaptureSample sample)
    {
        //When copying the bytes out of the buffer, you must supply a byte[] that is appropriately sized.
        //You can reuse this byte[] until you need to resize it (for whatever reason).
        if (_latestImageBytes == null || _latestImageBytes.Length < sample.dataLength)
        {
            _latestImageBytes = new byte[sample.dataLength];
        }
        sample.CopyRawImageDataIntoBuffer(_latestImageBytes);

        //If you need to get the cameraToWorld matrix for purposes of compositing you can do it like this
        float[] cameraToWorldMatrixAsFloat;
        if (sample.TryGetCameraToWorldMatrix(out cameraToWorldMatrixAsFloat) == false)
        {
            return;
        }

        //If you need to get the projection matrix for purposes of compositing you can do it like this
        float[] projectionMatrixAsFloat;
        if (sample.TryGetProjectionMatrix(out projectionMatrixAsFloat) == false)
        {
            return;
        }

        // Right now we pass things across the pipe as a float array then convert them back into UnityEngine.Matrix using a utility method
        Matrix4x4 cameraToWorldMatrix = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(cameraToWorldMatrixAsFloat);
        Matrix4x4 projectionMatrix    = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(projectionMatrixAsFloat);

        //This is where we actually use the image data
        UnityEngine.WSA.Application.InvokeOnAppThread(() =>
        {
            _videoPanelUI.SetBytes(_latestImageBytes);

            Vector3 inverseNormal = LocatableCameraUtils.GetNormalOfPose(cameraToWorldMatrix);
            Vector3 imagePosition = cameraToWorldMatrix.MultiplyPoint(Vector3.zero);

            // Throw out an indicator in the composite space 2 meters in front of us using the corresponding view matrices
            float distanceToMarker      = 2f;
            Vector3 pointOnFaceBoxPlane = imagePosition - inverseNormal * distanceToMarker;
            Plane surfacePlane          = new Plane(inverseNormal, pointOnFaceBoxPlane);

            Vector2 targetPoint = new Vector2(_resolution.width * 0.5f, _resolution.height * 0.5f);
            Vector3 mdPoint     = LocatableCameraUtils.PixelCoordToWorldCoord(cameraToWorldMatrix, projectionMatrix, _resolution, targetPoint, surfacePlane);

            _targetIndicator.SetPosition(mdPoint);
            _targetIndicator.SetText("P");

            _videoPanelUI.gameObject.SetActive(false);
        }, false);
    }
コード例 #5
0
    void OnFrameSampleAcquired(VideoCaptureSample sample)
    {
        //When copying the bytes out of the buffer, you must supply a byte[] that is appropriately sized.
        //You can reuse this byte[] until you need to resize it (for whatever reason).
        if (_latestImageBytes == null || _latestImageBytes.Length < sample.dataLength)
        {
            _latestImageBytes = new byte[sample.dataLength];
        }
        sample.CopyRawImageDataIntoBuffer(_latestImageBytes);
        sample.Dispose();

        //This is where we actually use the image data
        UnityEngine.WSA.Application.InvokeOnAppThread(() =>
        {
            _videoPanelUI.SetBytes(_latestImageBytes);
        }, false);
    }
コード例 #6
0
    void OnFrameSampleAcquired(VideoCaptureSample sample)
    {
        lock (_mainThreadActions)
        {
            if (_mainThreadActions.Count > 2)
            {
                return;
            }
        }
        //When copying the bytes out of the buffer, you must supply a byte[] that is appropriately sized.
        //You can reuse this byte[] until you need to resize it (for whatever reason).
        if (_latestImageBytes == null || _latestImageBytes.Length < sample.dataLength)
        {
            _latestImageBytes = new byte[sample.dataLength];
        }
        sample.CopyRawImageDataIntoBuffer(_latestImageBytes);

        //If you need to get the cameraToWorld matrix for purposes of compositing you can do it like this
        float[] cameraToWorldMatrix;
        if (sample.TryGetCameraToWorldMatrix(out cameraToWorldMatrix) == false)
        {
            return;
        }

        //If you need to get the projection matrix for purposes of compositing you can do it like this
        float[] projectionMatrix;
        if (sample.TryGetProjectionMatrix(out projectionMatrix) == false)
        {
            return;
        }

        sample.Dispose();

        //Debug.Log("Got frame: " + sample.FrameWidth + "x" + sample.FrameHeight + " | " + sample.pixelFormat);

        Enqueue(() => _videoPanelUI.SetBytes(_latestImageBytes));
    }
コード例 #7
0
ファイル: TestScript.cs プロジェクト: ethanttbui/pianow
 // Update is called once per frame
 void Update()
 {
     byte[] imgData = ProcessFrame(640, 480);
     videoPanelUI.SetBytes(imgData);
     FreeMemory();
 }
コード例 #8
0
    void OnFrameSampleAcquired(VideoCaptureSample sample)
    {
        m_NumFrames++;

        //When copying the bytes out of the buffer, you must supply a byte[] that is appropriately sized.
        //You can reuse this byte[] until you need to resize it (for whatever reason).
        if (_latestImageBytes == null || _latestImageBytes.Length < sample.dataLength)
        {
            _latestImageBytes = new byte[sample.dataLength];
        }
        sample.CopyRawImageDataIntoBuffer(_latestImageBytes);

        //If you need to get the cameraToWorld matrix for purposes of compositing you can do it like this
        float[] cameraToWorldMatrixAsFloat;
        if (sample.TryGetCameraToWorldMatrix(out cameraToWorldMatrixAsFloat) == false)
        {
            return;
        }

        //If you need to get the projection matrix for purposes of compositing you can do it like this
        float[] projectionMatrixAsFloat;
        if (sample.TryGetProjectionMatrix(out projectionMatrixAsFloat) == false)
        {
            return;
        }

        // Right now we pass things across the pipe as a float array then convert them back into UnityEngine.Matrix using a utility method
        Matrix4x4 cameraToWorldMatrix = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(cameraToWorldMatrixAsFloat);
        Matrix4x4 projectionMatrix    = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(projectionMatrixAsFloat);

        //This is where we actually use the image data
        UnityEngine.WSA.Application.InvokeOnAppThread(() =>
        {
            int numFrames = m_NumFrames;

            _videoPanelUI.SetBytes(_latestImageBytes);

            Texture2D tex   = _videoPanelUI.rawImage.texture as Texture2D;
            byte[] jpgBytes = tex.EncodeToJPG();

            /*
             * Vector3 inverseNormal = LocatableCameraUtils.GetNormalOfPose(cameraToWorldMatrix);
             * Vector3 imagePosition = cameraToWorldMatrix.MultiplyPoint(Vector3.zero);
             *
             * // Throw out an indicator in the composite space 2 meters in front of us using the corresponding view matrices
             * float distanceToMarker = 2f;
             * Vector3 pointOnFaceBoxPlane = imagePosition - inverseNormal * distanceToMarker;
             * Plane surfacePlane = new Plane(inverseNormal, pointOnFaceBoxPlane);
             *
             * Vector2 targetPoint = new Vector2(_resolution.width * 0.5f, _resolution.height * 0.5f);
             * Vector3 mdPoint = LocatableCameraUtils.PixelCoordToWorldCoord(cameraToWorldMatrix, projectionMatrix, _resolution, targetPoint, surfacePlane);
             */

            //string infoText = String.Format("Position: {0}\t{1}\t{2}\nRotation: {3}\t{4}\t{5}", position.x, position.y, position.z, eulerAngles.x, eulerAngles.y, eulerAngles.z);
            //Debug.Log(infoText);


            float timestamp = Time.time - m_RecordingStartTime;

            PoseData pose  = new PoseData();
            pose.timestamp = timestamp;
            pose.position  = cameraToWorldMatrix.MultiplyPoint(Vector3.zero);

            Quaternion rotation = Quaternion.LookRotation(-cameraToWorldMatrix.GetColumn(2), cameraToWorldMatrix.GetColumn(1));
            pose.eulerAngles    = rotation.eulerAngles;

            if (PlacingAxisMarkers && AxisPrefab != null)
            {
                Instantiate(AxisPrefab, pose.position, rotation);
            }


            m_CurrentPoses.Add(pose);

            SaveFrame(jpgBytes, numFrames);

            if (m_CurrentRecordingState == RecordingState.Recording)
            {
                this._videoCapture.RequestNextFrameSample(OnFrameSampleAcquired);
            }
        }, false);
    }
コード例 #9
0
    void OnFrameSampleAcquired(VideoCaptureSample sample)
    {
        //When copying the bytes out of the buffer, you must supply a byte[] that is appropriately sized.
        //You can reuse this byte[] until you need to resize it (for whatever reason).
        if (_latestImageBytes == null || _latestImageBytes.Length < sample.dataLength)
        {
            _latestImageBytes = new byte[sample.dataLength];
        }
        sample.CopyRawImageDataIntoBuffer(_latestImageBytes);

        //If you need to get the cameraToWorld matrix for purposes of compositing you can do it like this
        float[] cameraToWorldMatrixAsFloat;
        if (sample.TryGetCameraToWorldMatrix(out cameraToWorldMatrixAsFloat) == false)
        {
            return;
        }

        //If you need to get the projection matrix for purposes of compositing you can do it like this
        float[] projectionMatrixAsFloat;
        if (sample.TryGetProjectionMatrix(out projectionMatrixAsFloat) == false)
        {
            return;
        }

        // Right now we pass things across the pipe as a float array then convert them back into UnityEngine.Matrix using a utility method
        Matrix4x4 cameraToWorldMatrix = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(cameraToWorldMatrixAsFloat);
        Matrix4x4 projectionMatrix    = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(projectionMatrixAsFloat);

        //This is where we actually use the image data
        UnityEngine.WSA.Application.InvokeOnAppThread(() =>
        {
            Vector3 cameraWorldPosition    = cameraToWorldMatrix.MultiplyPoint(Vector3.zero);
            Quaternion cameraWorldRotation = Quaternion.LookRotation(-cameraToWorldMatrix.GetColumn(2), cameraToWorldMatrix.GetColumn(1));


            _videoPanelUI.SetBytes(_latestImageBytes);

            Texture2D tex = _videoPanelUI.rawImage.texture as Texture2D;

            Color32[] c        = tex.GetPixels32();
            IntPtr imageHandle = getImageHandle(c);
            newImage(imageHandle);

            // do any detection here...

            int chessX = topDownCameraCalibrationData["chess_x"].AsInt;
            int chessY = topDownCameraCalibrationData["chess_y"].AsInt;
            float chessSquareMeters = topDownCameraCalibrationData["chess_square_size_meters"].AsFloat;
            double cam_mtx_fx       = HoloCam_fx;
            double cam_mtx_fy       = HoloCam_fy;
            double cam_mtx_cx       = HoloCam_cx;
            double cam_mtx_cy       = HoloCam_cy;
            double dist_k1          = HoloCam_k1;
            double dist_k2          = HoloCam_k2;
            double dist_p1          = HoloCam_p1;
            double dist_p2          = HoloCam_p2;
            double dist_k3          = HoloCam_k3;

            bool gotValidPose = findExtrinsics(chessX, chessY, chessSquareMeters, cam_mtx_fx, cam_mtx_fy, cam_mtx_cx, cam_mtx_cy, dist_k1, dist_k2, dist_p1, dist_p2, dist_k3);

            if (gotValidPose)
            {
                Debug.Log("Got valid pose!");

                List <double> rvec = new List <double>();
                rvec.Add(GetRvec0());
                rvec.Add(GetRvec1());
                rvec.Add(GetRvec2());

                List <double> tvec = new List <double>();
                tvec.Add(GetTvec0());
                tvec.Add(GetTvec1());
                tvec.Add(GetTvec2());

                Debug.Log("rvec between HoloLens camera and chessboard is " + rvec[0] + " " + rvec[1] + " " + rvec[2]);
                Debug.Log("tvec between HoloLens camera and chessboard is " + tvec[0] + " " + tvec[1] + " " + tvec[2]);



                // NOTE: At this point, we should STOP the HoloLens frame processing by:
                // 1: setting _processingCameraFrames to FALSE
                // 2: calling StopCameraProcessing()

                // the data we have available is:
                // --- rvec and tvec (the pose data between the HoloLens camera and the chessboard)
                // --- topDownCameraCalibrationData ... the received data from the top-down camera app, which includes:
                // ------ topDownCameraCalibrationData["rvec"] ... the pose between the top-down camera and the chessboard
                // ------ topDownCameraCalibrationData["tvec"]
                // ------ topDownCameraCalibrationData["fx"], fy, cx, cy (the top-down camera matrix)
                // ------ topDownCameraCalibrationData["k1"], k2, p1, p2, k3 (the top-down camera distortion coefficients)
                // --- cameraWorldPosition and cameraWorldRotation (the pose data in Unity's coord system between the HoloLens camera and the world)


                // StopCameraProcessing();
                // _processingCameraFrames = false;
            }


            //// Fetch the processed image and render
            imageHandle = getProcessedImage();
            Marshal.Copy(imageHandle, processedImageData, 0, _resolution.width * _resolution.height * 4);
            tex.LoadRawTextureData(processedImageData);
            tex.Apply();



            if (_processingCameraFrames)
            {
                this._videoCapture.RequestNextFrameSample(OnFrameSampleAcquired);
            }
        }, false);
    }