Exemple #1
0
        void Start()
        {
            _KinectSensor = _AzureKinectManager.Sensor;
            if (_KinectSensor != null)
            {
                Debug.Log("ColorResolution: " + _KinectSensor.ColorImageWidth + "x" + _KinectSensor.ColorImageHeight);
                Debug.Log("DepthResolution: " + _KinectSensor.DepthImageWidth + "x" + _KinectSensor.DepthImageHeight);

                _TransformedColorImageTexture = new Texture2D(_KinectSensor.DepthImageWidth, _KinectSensor.DepthImageHeight, TextureFormat.BGRA32, false);
                _DepthRawData      = new byte[_KinectSensor.DepthImageWidth * _KinectSensor.DepthImageHeight * sizeof(ushort)];
                _DepthImageTexture = new Texture2D(_KinectSensor.DepthImageWidth, _KinectSensor.DepthImageHeight, TextureFormat.R16, false);

                _ColorImageTexture            = new Texture2D(_KinectSensor.ColorImageWidth, _KinectSensor.ColorImageHeight, TextureFormat.BGRA32, false);
                _TransformedDepthRawData      = new byte[_KinectSensor.ColorImageWidth * _KinectSensor.ColorImageHeight * sizeof(ushort)];
                _TransformedDepthImageTexture = new Texture2D(_KinectSensor.ColorImageWidth, _KinectSensor.ColorImageHeight, TextureFormat.R16, false);

                _PointCloudRenderer = GetComponent <PointCloudRenderer>();

                CameraCalibration deviceDepthCameraCalibration = _KinectSensor.DeviceCalibration.DepthCameraCalibration;
                CameraCalibration deviceColorCameraCalibration = _KinectSensor.DeviceCalibration.ColorCameraCalibration;

                K4A.Calibration calibration = new K4A.Calibration();
                calibration.DepthCameraCalibration = CreateCalibrationCamera(deviceDepthCameraCalibration, _KinectSensor.DepthImageWidth, _KinectSensor.DepthImageHeight);
                calibration.ColorCameraCalibration = CreateCalibrationCamera(deviceColorCameraCalibration, _KinectSensor.ColorImageWidth, _KinectSensor.ColorImageHeight);

#if COLOR_TO_DEPTH
                _PointCloudRenderer.GenerateMesh(calibration, K4A.CalibrationType.Depth);
#else
                _PointCloudRenderer.GenerateMesh(calibration, K4A.CalibrationType.Color);
#endif
            }
        }
 public void Initialize(AzureKinectSensor kinectSensor)
 {
     if (!_Initialized)
     {
         _KinectSensor = kinectSensor;
         if (_KinectSensor != null)
         {
             _LowPassFilter = new ButterworthFilter(_Order, _SamplingFrequency, _CutoffFrequency, (uint)_KinectPos.Length);
             _Initialized   = true;
         }
     }
 }
Exemple #3
0
        /// <summary>
        /// Main entry point.
        /// </summary>
        public static void Main()
        {
            // camera resolution settings
            const ColorResolution resolution   = ColorResolution.R720p;
            const int             widthSource  = 1280;
            const int             heightSource = 720;

            // down sampled resolution
            const int    widthOutput       = 80;
            const int    heightOutput      = 45;
            const double scaleFactorWidth  = (double)widthOutput / widthSource;
            const double scaleFactorHeight = (double)heightOutput / heightSource;

            // background subtraction beyond this depth
            const double maxDepth = 1.0; // meters

            const SensorOrientation initialOrientation = SensorOrientation.Default;

            using (var pipeline = Pipeline.Create("AzureKinectSample", DeliveryPolicy.LatestMessage))
            {
                var azureKinect = new AzureKinectSensor(
                    pipeline,
                    new AzureKinectSensorConfiguration()
                {
                    OutputImu                = true,
                    ColorResolution          = resolution,
                    DepthMode                = DepthMode.WFOV_Unbinned,
                    CameraFPS                = FPS.FPS15,
                    BodyTrackerConfiguration = new AzureKinectBodyTrackerConfiguration()
                    {
                        CpuOnlyMode       = true, // false if CUDA supported GPU available
                        SensorOrientation = initialOrientation,
                    },
                });

                StringBuilder     sb = new StringBuilder();
                SensorOrientation lastOrientation = (SensorOrientation)(-1); // detect orientation changes

                // consuming color, depth, IMU, body tracking, calibration
                azureKinect.ColorImage.Resize(widthOutput, heightOutput)
                .Join(azureKinect.DepthImage)
                .Join(azureKinect.Imu, TimeSpan.FromMilliseconds(10))
                .Pair(azureKinect.Bodies)
                .Pair(azureKinect.DepthDeviceCalibrationInfo)
                .Do(message =>
                {
                    var(color, depth, imu, bodies, calib) = message;
        void Start()
        {
            _KinectSensor = _AzureKinectManager.Sensor;
            if (_KinectSensor != null)
            {
                int depthImageSize = _KinectSensor.DepthImageWidth * _KinectSensor.DepthImageHeight;
                _DepthRawData     = new byte[depthImageSize * sizeof(ushort)];
                _EncodedDepthData = new byte[depthImageSize];
                _DecodedDepthData = new short[depthImageSize];
                _Diff             = new short[depthImageSize];

                _DepthImageTexture        = new Texture2D(_KinectSensor.DepthImageWidth, _KinectSensor.DepthImageHeight, TextureFormat.R16, false);
                _DecodedDepthImageTexture = new Texture2D(_KinectSensor.DepthImageWidth, _KinectSensor.DepthImageHeight, TextureFormat.R16, false);
                _DiffImageTexture         = new Texture2D(_KinectSensor.DepthImageWidth, _KinectSensor.DepthImageHeight, TextureFormat.R16, false);
                _ColorImageTexture        = new Texture2D(_KinectSensor.ColorImageWidth, _KinectSensor.ColorImageHeight, TextureFormat.BGRA32, false);

                MeshRenderer depthMeshRenderer = _DepthImageObject.GetComponent <MeshRenderer>();
                depthMeshRenderer.sharedMaterial = new Material(_DepthVisualizer);
                depthMeshRenderer.sharedMaterial.SetTexture("_DepthTex", _DepthImageTexture);

                MeshRenderer decodedDepthMeshRenderer = _DecodedDepthImageObject.GetComponent <MeshRenderer>();
                decodedDepthMeshRenderer.sharedMaterial = new Material(_DepthVisualizer);
                decodedDepthMeshRenderer.sharedMaterial.SetTexture("_DepthTex", _DecodedDepthImageTexture);

                MeshRenderer diffMeshRenderer = _DiffImageObject.GetComponent <MeshRenderer>();
                diffMeshRenderer.sharedMaterial = new Material(_DiffVisualizer);
                diffMeshRenderer.sharedMaterial.SetTexture("_DepthTex", _DiffImageTexture);

                MeshRenderer colorMeshRenderer = _ColorImageObject.GetComponent <MeshRenderer>();
                colorMeshRenderer.sharedMaterial = new Material(_UnlitTextureMaterial);
                colorMeshRenderer.sharedMaterial.SetTexture("_MainTex", _ColorImageTexture);

                Debug.Log("ColorResolution: " + _KinectSensor.ColorImageWidth + "x" + _KinectSensor.ColorImageHeight);
                Debug.Log("DepthResolution: " + _KinectSensor.DepthImageWidth + "x" + _KinectSensor.DepthImageHeight);

                // _TrvlEncoder = new TemporalRVLEncoder(depthImageSize, 10, 2);
                // _TrvlDecoder = new TemporalRVLDecoder(depthImageSize);

                _TrvlEncoder = new NativePlugin.TemporalRVLEncoder(depthImageSize, 10, 2);
                _TrvlDecoder = new NativePlugin.TemporalRVLDecoder(depthImageSize);
            }
        }
        public void Initialize()
        {
            _KinectSensor = _AzureKinectManager.Sensor;
            if (_KinectSensor != null)
            {
                Debug.Log("ColorResolution: " + _KinectSensor.ColorImageWidth + "x" + _KinectSensor.ColorImageHeight);
                Debug.Log("DepthResolution: " + _KinectSensor.DepthImageWidth + "x" + _KinectSensor.DepthImageHeight);

                _DepthImageSize        = _KinectSensor.DepthImageWidth * _KinectSensor.DepthImageHeight;
                _DepthRawData          = new byte[_DepthImageSize * sizeof(short)];
                _Diff                  = new short[_DepthImageSize];
                _EncodedColorImageData = new byte[_DepthImageSize];

                _DepthImageTexture        = new Texture2D(_KinectSensor.DepthImageWidth, _KinectSensor.DepthImageHeight, TextureFormat.R16, false);
                _DecodedDepthImageTexture = new Texture2D(_KinectSensor.DepthImageWidth, _KinectSensor.DepthImageHeight, TextureFormat.R16, false);
                _DiffImageTexture         = new Texture2D(_KinectSensor.DepthImageWidth, _KinectSensor.DepthImageHeight, TextureFormat.R16, false);
                _ColorImageTexture        = new Texture2D(_KinectSensor.DepthImageWidth, _KinectSensor.DepthImageHeight, TextureFormat.BGRA32, false);

                _TrvlEncoder = new TemporalRVLEncoder(_DepthImageSize, 10, 2);
                _TrvlDecoder = new TemporalRVLDecoder(_DepthImageSize);

                CameraCalibration deviceDepthCameraCalibration = _KinectSensor.DeviceCalibration.DepthCameraCalibration;
                CameraCalibration deviceColorCameraCalibration = _KinectSensor.DeviceCalibration.ColorCameraCalibration;

                _Calibration = new K4A.Calibration();
                _Calibration.DepthCameraCalibration = CreateCalibrationCamera(deviceDepthCameraCalibration, _KinectSensor.DepthImageWidth, _KinectSensor.DepthImageHeight);
                _Calibration.ColorCameraCalibration = CreateCalibrationCamera(deviceColorCameraCalibration, _KinectSensor.ColorImageWidth, _KinectSensor.ColorImageHeight);

                _CalibrationType = K4A.CalibrationType.Depth; // Color to depth

                _Initialized = true;
            }
            else
            {
                Debug.LogError("KinectSensor is null!");
            }
        }