public static extern k4a_result_t k4a_calibration_3d_to_2d(
     [In] ref Calibration calibration,
     ref Vector3 source_point3d,
     CalibrationDeviceType source_camera,
     CalibrationDeviceType target_camera,
     out Vector2 target_point2d,
     out bool valid);
예제 #2
0
 static extern int k4a_calibration_2d_to_3d
     ([In] ref Calibration calibration,
     ref Float2 source_point2d,
     float source_depth,
     CalibrationDeviceType source_camera,
     CalibrationDeviceType target_camera,
     out Float3 target_point3d,
     out bool valid);
예제 #3
0
        /// <summary>
        /// Transform a 3D point of a source coordinate system into a 3D point of the target coordinate system.
        /// </summary>
        /// <param name="sourcePoint3D">The 3D coordinates in millimeters representing a point in <paramref name="sourceCamera"/>.</param>
        /// <param name="sourceCamera">The current camera.</param>
        /// <param name="targetCamera">The target camera.</param>
        /// <returns>A point in 3D coordiantes of <paramref name="targetCamera"/> stored in millimeters.</returns>
        public Vector3?TransformTo3D(Vector3 sourcePoint3D, CalibrationDeviceType sourceCamera, CalibrationDeviceType targetCamera)
        {
            using (LoggingTracer tracer = new LoggingTracer())
            {
                AzureKinectException.ThrowIfNotSuccess(tracer, NativeMethods.k4a_calibration_3d_to_3d(
                                                           ref this,
                                                           ref sourcePoint3D,
                                                           sourceCamera,
                                                           targetCamera,
                                                           out Vector3 target_point3d));

                return(target_point3d);
            }
        }
예제 #4
0
        /// <summary>
        /// Transform a 3D point of a source coordinate system into a 2D pixel coordinate of the target camera.
        /// </summary>
        /// <param name="sourcePoint3D">The 3D coordinate in millimeters representing a point in <paramref name="sourceCamera"/> coordinate system.</param>
        /// <param name="sourceCamera">The current camera.</param>
        /// <param name="targetCamera">The target camera.</param>
        /// <returns>The 2D pixel coordinate in <paramref name="targetCamera"/> coordinates or null if the point is not valid in that coordinate system.</returns>
        public Vector2?TransformTo2D(Vector3 sourcePoint3D, CalibrationDeviceType sourceCamera, CalibrationDeviceType targetCamera)
        {
            using (LoggingTracer tracer = new LoggingTracer())
            {
                AzureKinectException.ThrowIfNotSuccess(tracer, NativeMethods.k4a_calibration_3d_to_2d(
                                                           ref this,
                                                           ref sourcePoint3D,
                                                           sourceCamera,
                                                           targetCamera,
                                                           out Vector2 target_point2d,
                                                           out bool valid));

                return(valid ? (Vector2?)target_point2d : null);
            }
        }
예제 #5
0
        /// <summary>
        /// Creates a point cloud from a depth image.
        /// </summary>
        /// <param name="depth">The depth map to generate the point cloud from.</param>
        /// <param name="camera">The perspective the depth map is from.</param>
        /// <remarks>
        /// If the depth map is from the original depth perspective, <paramref name="camera"/> should be Depth. If it has
        /// been transformed to the color camera perspective, <paramref name="camera"/> should be Color.
        ///
        /// The returned image will be of format Custom. Each pixel will be an XYZ set of 16 bit values,
        /// therefore its stride must is 2(bytes) * 3(x,y,z) * width of the <paramref name="depth"/> image in pixels.
        /// </remarks>
        /// <returns>A point cloud image.</returns>
        public Image DepthImageToPointCloud(Image depth, CalibrationDeviceType camera = CalibrationDeviceType.Depth)
        {
            if (depth == null)
            {
                throw new ArgumentNullException(nameof(depth));
            }

            Image pointCloud = new Image(
                ImageFormat.Custom,
                depth.WidthPixels,
                depth.HeightPixels,
                sizeof(short) * 3 * depth.WidthPixels);

            this.DepthImageToPointCloud(depth, pointCloud, camera);

            return(pointCloud);
        }
예제 #6
0
        /// <summary>
        /// Creates a point cloud from a depth image.
        /// </summary>
        /// <param name="depth">The depth map to generate the point cloud from.</param>
        /// <param name="pointCloud">The image to store the output point cloud.</param>
        /// <param name="camera">The perspective the depth map is from.</param>
        /// <remarks>
        /// If the depth map is from the original depth perspective, <paramref name="camera"/> should be Depth. If it has
        /// been transformed to the color camera perspective, <paramref name="camera"/> should be Color.
        ///
        /// The <paramref name="pointCloud"/> image must be of format Custom. Each pixel will be an XYZ set of 16 bit values,
        /// therefore its stride must be 2(bytes) * 3(x,y,z) * width of the <paramref name="depth"/> image in pixels.
        /// </remarks>
        public void DepthImageToPointCloud(Image depth, Image pointCloud, CalibrationDeviceType camera = CalibrationDeviceType.Depth)
        {
            if (depth == null)
            {
                throw new ArgumentNullException(nameof(depth));
            }

            if (pointCloud == null)
            {
                throw new ArgumentNullException(nameof(pointCloud));
            }

            lock (this)
            {
                if (this.disposedValue)
                {
                    throw new ObjectDisposedException(nameof(Transformation));
                }

                // Create a new reference to the Image objects so that they cannot be disposed while
                // we are performing the transformation
                using (Image depthReference = depth.Reference())
                    using (Image pointCloudReference = pointCloud.Reference())
                    {
                        // Ensure changes made to the managed memory are visible to the native layer
                        depthReference.FlushMemory();

                        AzureKinectException.ThrowIfNotSuccess(() => NativeMethods.k4a_transformation_depth_image_to_point_cloud(
                                                                   this.handle,
                                                                   depthReference.DangerousGetHandle(),
                                                                   camera,
                                                                   pointCloudReference.DangerousGetHandle()));

                        // Copy the native memory back to managed memory if required
                        pointCloudReference.InvalidateMemory();
                    }
            }
        }
예제 #7
0
        // gets the given camera intrinsics
        private void GetCameraIntrinsics(CalibrationDeviceType camType, CameraCalibration camParams, ref KinectInterop.CameraIntrinsics intr)
        {
            Intrinsics camIntr = camParams.Intrinsics;

            if (camIntr.Parameters.Length < 15)
            {
                throw new System.Exception("Intrinsics length is less than expected: " + camIntr.ParameterCount);
            }

            intr = new KinectInterop.CameraIntrinsics();

            intr.cameraType = (int)camType;
            intr.width      = camParams.ResolutionWidth;
            intr.height     = camParams.ResolutionHeight;

            // 0        float cx;
            // 1        float cy;
            intr.ppx = camIntr.Parameters[0];
            intr.ppy = camIntr.Parameters[1];

            // 2        float fx;            /**< Focal length x */
            // 3        float fy;            /**< Focal length y */
            intr.fx = camIntr.Parameters[2];
            intr.fy = camIntr.Parameters[3];

            // 4        float k1;
            // 5        float k2;
            // 6        float k3;
            // 7        float k4;
            // 8        float k5;
            // 9        float k6;
            intr.distCoeffs    = new float[6];
            intr.distCoeffs[0] = camIntr.Parameters[4];
            intr.distCoeffs[1] = camIntr.Parameters[5];
            intr.distCoeffs[2] = camIntr.Parameters[6];
            intr.distCoeffs[3] = camIntr.Parameters[7];
            intr.distCoeffs[4] = camIntr.Parameters[8];
            intr.distCoeffs[5] = camIntr.Parameters[9];

            if (camIntr.Type == CalibrationModelType.Theta)
            {
                intr.distType = KinectInterop.DistortionType.Theta;
            }
            else if (camIntr.Type == CalibrationModelType.Polynomial3K)
            {
                intr.distType = KinectInterop.DistortionType.Polynomial3K;
            }
            else if (camIntr.Type == CalibrationModelType.Rational6KT)
            {
                intr.distType = KinectInterop.DistortionType.Rational6KT;
            }
            else
            {
                intr.distType = (KinectInterop.DistortionType)camIntr.Type;
            }

            // 10            float codx;
            // 11            float cody;
            intr.codx = camIntr.Parameters[10];
            intr.cody = camIntr.Parameters[11];

            // 12            float p2;
            // 13            float p1;
            intr.p2 = camIntr.Parameters[12];
            intr.p1 = camIntr.Parameters[13];

            // 14           float metric_radius;
            intr.maxRadius = camIntr.Parameters[14];

            EstimateFOV(intr);
        }
 public static extern k4a_result_t k4a_transformation_depth_image_to_point_cloud(
     k4a_transformation_t transformation_handle,
     k4a_image_t depth_image,
     CalibrationDeviceType camera,
     k4a_image_t xyz_image);