unknownuser6666's picture
Upload folder using huggingface_hub
663494c verified
import copy
from typing import Dict, Optional
import numpy as np
import utils.calibration as calib_utils
################## box in the object's own coordinate
def create_bb_points(size: np.ndarray) -> np.ndarray:
"""Create bounding box's 8 corners in the object coordinate
Note: we assume that the location of the object is at the center
of the object, not the bottom center
Args:
size: (3, ), length, width and height, obtained from
carla_utils.vec2np(obj.bounding_box.extent)
Returns:
corners: 8 x 4, in the object coordinate
Carla use left-handed coordinate, world coordinate:
+x -> front
+y -> right
+z -> up
4 -------- 5
/| /|
7 -------- 6 .
| | | |
. 0 -------- 1
|/ |/
3 -------- 2
e.g., if take points of 0, 1, 2, 3, it will return the box in bottom
if take points of 0, 3, 4, 7, it will return the box on the left
"""
# decompose object size into length, width, height
l, w, h = size
# create corner points
corners = np.zeros((8, 4))
corners[0, :] = np.array([l / 2, -w / 2, -h / 2, 1])
corners[1, :] = np.array([l / 2, w / 2, -h / 2, 1])
corners[2, :] = np.array([-l / 2, w / 2, -h / 2, 1])
corners[3, :] = np.array([-l / 2, -w / 2, -h / 2, 1])
corners[4, :] = np.array([l / 2, -w / 2, h / 2, 1])
corners[5, :] = np.array([l / 2, w / 2, h / 2, 1])
corners[6, :] = np.array([-l / 2, w / 2, h / 2, 1])
corners[7, :] = np.array([-l / 2, -w / 2, h / 2, 1])
return corners
def box_in_obj_to_world(box_in_object: np.ndarray, transform: np.ndarray) -> np.ndarray:
"""Convert object boxes in its own coordinate to world coordinate
Args:
corners: 8 x 4 in the object coordinate
transform: 4 x 4, object to world transformation
"""
box_in_world = np.dot(transform, np.transpose(box_in_object)).transpose() # 8 x 4
return box_in_world
################## box in the world coordinate
def get_box_in_world(
location: np.ndarray, rotation: np.ndarray, size: np.ndarray
) -> np.ndarray:
"""Return 3D bounding box in world coordinate"""
# get box in the object's own coordinate
box_in_object: np.ndarray = create_bb_points(size) # 8 x 4
# apply transformation to the world coordinate
transform: np.ndarray = calib_utils.create_transform(location, rotation)
box_in_world: np.ndarray = box_in_obj_to_world(box_in_object, transform) # 8 x 4
return box_in_world
################## box in the ego vehicle's coordinate
def box_in_world_to_ego(
box_in_world: np.ndarray, ego_transform: np.ndarray
) -> np.ndarray:
"""Convert bbox in the world coordinate to the ego coordinate
Args:
box_in_world: (8, 4), 8 corners in the world coordinate
ego_transform: (4, 4), transformation from ego to the world coordinate
Returns:
box_in_ego: (8, 4), 8 corners in the ego coordinate
"""
# avoid in-place operation
box_in_world = copy.copy(box_in_world) # 8 x 4
# apply transformation
world2ego_transform = np.linalg.inv(ego_transform) # 4 x 4
box_in_ego = np.dot(
world2ego_transform, np.transpose(box_in_world)
).transpose() # 8 x 4
return box_in_ego
################## box in the LiDAR's coordinate
def box_in_ego_to_lidar(box_in_ego: np.ndarray, ego2lidar: Optional[np.ndarray] = None) -> np.ndarray:
"""Convert bbox in the ego coordinate to the lidar coordinate
Arguments
box_in_ego: 8 x 4
ego2lidar: 4 x 4
"""
# avoid in-place operation
box_in_ego = copy.copy(box_in_ego) # 8 x 4
# get transformation by default
if ego2lidar is None:
ego2lidar = np.linalg.inv(calib_utils.lidar_to_ego_transform()) # 4 x 4
# apply transformation
box_in_lidar = np.dot(ego2lidar, np.transpose(box_in_ego)).transpose() # 8 x 4
return box_in_lidar
def box_in_lidar_to_ego(box_in_lidar: np.ndarray, lidar2ego: Optional[np.ndarray] = None) -> np.ndarray:
"""Convert bbox in the lidar coordinate to the ego coordinate
Arguments
box_in_lidar: 8 x 4
lidar2ego: 4 x 4
"""
# avoid in-place operation
box_in_lidar = copy.copy(box_in_lidar) # 8 x 4
# get transformation by default
if lidar2ego is None:
lidar2ego = calib_utils.lidar_to_ego_transform()
# print(lidar2ego)
assert False, 'No lidar2ego is provided!!!'
# asd
# qwe
# print('using provided')
# apply transformation
box_in_ego = np.dot(lidar2ego, np.transpose(box_in_lidar)).transpose() # 8 x 4
return box_in_ego
################## box in the camera/image's coordinate
def box_in_ego_to_camera(box_in_ego: np.ndarray, cam_param: Dict) -> np.ndarray:
"""Convert bbox in the ego coordinate to the camera coordinate"""
# avoid in-place operation
box_in_ego = copy.copy(box_in_ego) # 8 x 4
# get transformation by default
if 'ego2cam' in cam_param:
ego2cam: np.narray = cam_param['ego2cam']
else:
ego2cam = np.linalg.inv(
calib_utils.camera_to_ego_transform(cam_param)
) # 4 x 4
# apply transformation
box_in_camera = np.dot(ego2cam, np.transpose(box_in_ego)).transpose() # 8 x 4
return box_in_camera
def box_projection_to_image(
box_in_camera: np.ndarray,
intrinsics: np.ndarray,
front_dist_threshold: float = 1.5,
left_hand_system: bool = True,
) -> np.ndarray:
"""Project 3D box in camera coordinate to 2D box in image coordinate
Args:
box_in_camera: (8, 4), 3D box in the camera coordinate
intrinsics: (4, 4) camera intrinsic matrix
front_dist_threshold: used to filter out bad project due to too close
ReturnsL
box_2d_in_image: (8, 2), 2D box in the image coordinate
"""
box_in_camera = copy.copy(box_in_camera).transpose() # 4 x 8
# camera rotation correction, from left to right hand coordinate system
# change to camera coordinate as in other datasets such as KITTI
if left_hand_system:
box_in_camera = np.vstack(
(
box_in_camera[1, :],
-box_in_camera[2, :],
box_in_camera[0, :],
box_in_camera[3, :],
)
) # 4 x 8
# apply projection and normalization of the homogeneous coordinate
box_2d_in_image = (intrinsics @ box_in_camera).T # 8 x 4
box_2d_in_image[:, 0] = box_2d_in_image[:, 0] / box_2d_in_image[:, 2]
box_2d_in_image[:, 1] = box_2d_in_image[:, 1] / box_2d_in_image[:, 2]
# check if the box is in front of camera
if np.any(box_2d_in_image[:, 2] > front_dist_threshold):
box_2d_in_image = np.array(box_2d_in_image)[:, :2]
return box_2d_in_image # 8 x 2
else:
return None