file_path stringlengths 3 280 | file_language stringclasses 66 values | content stringlengths 1 1.04M | repo_name stringlengths 5 92 | repo_stars int64 0 154k | repo_description stringlengths 0 402 | repo_primary_language stringclasses 108 values | developer_username stringlengths 1 25 | developer_name stringlengths 0 30 | developer_company stringlengths 0 82 |
|---|---|---|---|---|---|---|---|---|---|
pcdet/models/backbones_3d/__init__.py | Python | from pcdet.models.backbones_3d.pointnet2_backbone import PointNet2Backbone, PointNet2MSG
from pcdet.models.backbones_3d.spconv_backbone import (
VoxelBackBone8x,
VoxelResBackBone8x,
)
from pcdet.models.backbones_3d.spconv_unet import UNetV2
__all__ = {
"VoxelBackBone8x": VoxelBackBone8x,
"UNetV2": UNetV2,
"PointNet2Backbone": PointNet2Backbone,
"PointNet2MSG": PointNet2MSG,
"VoxelResBackBone8x": VoxelResBackBone8x,
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/backbones_3d/pfe/__init__.py | Python | from pcdet.models.backbones_3d.pfe.voxel_set_abstraction import VoxelSetAbstraction
__all__ = {"VoxelSetAbstraction": VoxelSetAbstraction}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/backbones_3d/pfe/voxel_set_abstraction.py | Python | import torch
import torch.nn as nn
from pcdet.ops.pointnet2.pointnet2_stack import (
pointnet2_modules as pointnet2_stack_modules,
)
from pcdet.ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_stack_utils
from pcdet.utils import common_utils
def bilinear_interpolate_torch(im, x, y):
"""
Args:
im: (H, W, C) [y, x]
x: (N)
y: (N)
Returns:
"""
x0 = torch.floor(x).long()
x1 = x0 + 1
y0 = torch.floor(y).long()
y1 = y0 + 1
x0 = torch.clamp(x0, 0, im.shape[1] - 1)
x1 = torch.clamp(x1, 0, im.shape[1] - 1)
y0 = torch.clamp(y0, 0, im.shape[0] - 1)
y1 = torch.clamp(y1, 0, im.shape[0] - 1)
Ia = im[y0, x0]
Ib = im[y1, x0]
Ic = im[y0, x1]
Id = im[y1, x1]
wa = (x1.type_as(x) - x) * (y1.type_as(y) - y)
wb = (x1.type_as(x) - x) * (y - y0.type_as(y))
wc = (x - x0.type_as(x)) * (y1.type_as(y) - y)
wd = (x - x0.type_as(x)) * (y - y0.type_as(y))
ans = (
torch.t((torch.t(Ia) * wa))
+ torch.t(torch.t(Ib) * wb)
+ torch.t(torch.t(Ic) * wc)
+ torch.t(torch.t(Id) * wd)
)
return ans
class VoxelSetAbstraction(nn.Module):
def __init__(
self,
model_cfg,
voxel_size,
point_cloud_range,
num_bev_features=None,
num_rawpoint_features=None,
**kwargs
):
super().__init__()
self.model_cfg = model_cfg
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
SA_cfg = self.model_cfg.SA_LAYER
self.SA_layers = nn.ModuleList()
self.SA_layer_names = []
self.downsample_times_map = {}
c_in = 0
for src_name in self.model_cfg.FEATURES_SOURCE:
if src_name in ["bev", "raw_points"]:
continue
self.downsample_times_map[src_name] = SA_cfg[src_name].DOWNSAMPLE_FACTOR
mlps = SA_cfg[src_name].MLPS
for k in range(len(mlps)):
mlps[k] = [mlps[k][0]] + mlps[k]
cur_layer = pointnet2_stack_modules.StackSAModuleMSG(
radii=SA_cfg[src_name].POOL_RADIUS,
nsamples=SA_cfg[src_name].NSAMPLE,
mlps=mlps,
use_xyz=True,
pool_method="max_pool",
)
self.SA_layers.append(cur_layer)
self.SA_layer_names.append(src_name)
c_in += sum([x[-1] for x in mlps])
if "bev" in self.model_cfg.FEATURES_SOURCE:
c_bev = num_bev_features
c_in += c_bev
if "raw_points" in self.model_cfg.FEATURES_SOURCE:
mlps = SA_cfg["raw_points"].MLPS
for k in range(len(mlps)):
mlps[k] = [num_rawpoint_features - 3] + mlps[k]
self.SA_rawpoints = pointnet2_stack_modules.StackSAModuleMSG(
radii=SA_cfg["raw_points"].POOL_RADIUS,
nsamples=SA_cfg["raw_points"].NSAMPLE,
mlps=mlps,
use_xyz=True,
pool_method="max_pool",
)
c_in += sum([x[-1] for x in mlps])
self.vsa_point_feature_fusion = nn.Sequential(
nn.Linear(c_in, self.model_cfg.NUM_OUTPUT_FEATURES, bias=False),
nn.BatchNorm1d(self.model_cfg.NUM_OUTPUT_FEATURES),
nn.ReLU(),
)
self.num_point_features = self.model_cfg.NUM_OUTPUT_FEATURES
self.num_point_features_before_fusion = c_in
def interpolate_from_bev_features(
self, keypoints, bev_features, batch_size, bev_stride
):
x_idxs = (keypoints[:, :, 0] - self.point_cloud_range[0]) / self.voxel_size[0]
y_idxs = (keypoints[:, :, 1] - self.point_cloud_range[1]) / self.voxel_size[1]
x_idxs = x_idxs / bev_stride
y_idxs = y_idxs / bev_stride
point_bev_features_list = []
for k in range(batch_size):
cur_x_idxs = x_idxs[k]
cur_y_idxs = y_idxs[k]
cur_bev_features = bev_features[k].permute(1, 2, 0) # (H, W, C)
point_bev_features = bilinear_interpolate_torch(
cur_bev_features, cur_x_idxs, cur_y_idxs
)
point_bev_features_list.append(point_bev_features.unsqueeze(dim=0))
point_bev_features = torch.cat(point_bev_features_list, dim=0) # (B, N, C0)
return point_bev_features
def get_sampled_points(self, batch_dict):
batch_size = batch_dict["batch_size"]
if self.model_cfg.POINT_SOURCE == "raw_points":
src_points = batch_dict["points"][:, 1:4]
batch_indices = batch_dict["points"][:, 0].long()
elif self.model_cfg.POINT_SOURCE == "voxel_centers":
src_points = common_utils.get_voxel_centers(
batch_dict["voxel_coords"][:, 1:4],
downsample_times=1,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range,
)
batch_indices = batch_dict["voxel_coords"][:, 0].long()
else:
raise NotImplementedError
keypoints_list = []
for bs_idx in range(batch_size):
bs_mask = batch_indices == bs_idx
sampled_points = src_points[bs_mask].unsqueeze(dim=0) # (1, N, 3)
if self.model_cfg.SAMPLE_METHOD == "FPS":
cur_pt_idxs = pointnet2_stack_utils.furthest_point_sample(
sampled_points[:, :, 0:3].contiguous(), self.model_cfg.NUM_KEYPOINTS
).long()
if sampled_points.shape[1] < self.model_cfg.NUM_KEYPOINTS:
times = (
int(self.model_cfg.NUM_KEYPOINTS / sampled_points.shape[1]) + 1
)
non_empty = cur_pt_idxs[0, : sampled_points.shape[1]]
cur_pt_idxs[0] = non_empty.repeat(times)[
: self.model_cfg.NUM_KEYPOINTS
]
keypoints = sampled_points[0][cur_pt_idxs[0]].unsqueeze(dim=0)
elif self.model_cfg.SAMPLE_METHOD == "FastFPS":
raise NotImplementedError
else:
raise NotImplementedError
keypoints_list.append(keypoints)
keypoints = torch.cat(keypoints_list, dim=0) # (B, M, 3)
return keypoints
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
keypoints: (B, num_keypoints, 3)
multi_scale_3d_features: {
'x_conv4': ...
}
points: optional (N, 1 + 3 + C) [bs_idx, x, y, z, ...]
spatial_features: optional
spatial_features_stride: optional
Returns:
point_features: (N, C)
point_coords: (N, 4)
"""
keypoints = self.get_sampled_points(batch_dict)
point_features_list = []
if "bev" in self.model_cfg.FEATURES_SOURCE:
point_bev_features = self.interpolate_from_bev_features(
keypoints,
batch_dict["spatial_features"],
batch_dict["batch_size"],
bev_stride=batch_dict["spatial_features_stride"],
)
point_features_list.append(point_bev_features)
batch_size, num_keypoints, _ = keypoints.shape
new_xyz = keypoints.view(-1, 3)
new_xyz_batch_cnt = new_xyz.new_zeros(batch_size).int().fill_(num_keypoints)
if "raw_points" in self.model_cfg.FEATURES_SOURCE:
raw_points = batch_dict["points"]
xyz = raw_points[:, 1:4]
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (raw_points[:, 0] == bs_idx).sum()
point_features = (
raw_points[:, 4:].contiguous() if raw_points.shape[1] > 4 else None
)
pooled_points, pooled_features = self.SA_rawpoints(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=point_features,
)
point_features_list.append(
pooled_features.view(batch_size, num_keypoints, -1)
)
for k, src_name in enumerate(self.SA_layer_names):
cur_coords = batch_dict["multi_scale_3d_features"][src_name].indices
xyz = common_utils.get_voxel_centers(
cur_coords[:, 1:4],
downsample_times=self.downsample_times_map[src_name],
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range,
)
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (cur_coords[:, 0] == bs_idx).sum()
pooled_points, pooled_features = self.SA_layers[k](
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=batch_dict["multi_scale_3d_features"][
src_name
].features.contiguous(),
)
point_features_list.append(
pooled_features.view(batch_size, num_keypoints, -1)
)
point_features = torch.cat(point_features_list, dim=2)
batch_idx = (
torch.arange(batch_size, device=keypoints.device)
.view(-1, 1)
.repeat(1, keypoints.shape[1])
.view(-1)
)
point_coords = torch.cat(
(batch_idx.view(-1, 1).float(), keypoints.view(-1, 3)), dim=1
)
batch_dict["point_features_before_fusion"] = point_features.view(
-1, point_features.shape[-1]
)
point_features = self.vsa_point_feature_fusion(
point_features.view(-1, point_features.shape[-1])
)
batch_dict["point_features"] = point_features # (BxN, C)
batch_dict["point_coords"] = point_coords # (BxN, 4)
return batch_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/backbones_3d/pointnet2_backbone.py | Python | import torch
import torch.nn as nn
from pcdet.ops.pointnet2.pointnet2_batch import pointnet2_modules
from pcdet.ops.pointnet2.pointnet2_stack import (
pointnet2_modules as pointnet2_modules_stack,
)
from pcdet.ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_utils_stack
class PointNet2MSG(nn.Module):
def __init__(self, model_cfg, input_channels, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.SA_modules = nn.ModuleList()
channel_in = input_channels - 3
self.num_points_each_layer = []
skip_channel_list = [input_channels - 3]
for k in range(self.model_cfg.SA_CONFIG.NPOINTS.__len__()):
mlps = self.model_cfg.SA_CONFIG.MLPS[k].copy()
channel_out = 0
for idx in range(mlps.__len__()):
mlps[idx] = [channel_in] + mlps[idx]
channel_out += mlps[idx][-1]
self.SA_modules.append(
pointnet2_modules.PointnetSAModuleMSG(
npoint=self.model_cfg.SA_CONFIG.NPOINTS[k],
radii=self.model_cfg.SA_CONFIG.RADIUS[k],
nsamples=self.model_cfg.SA_CONFIG.NSAMPLE[k],
mlps=mlps,
use_xyz=self.model_cfg.SA_CONFIG.get("USE_XYZ", True),
)
)
skip_channel_list.append(channel_out)
channel_in = channel_out
self.FP_modules = nn.ModuleList()
for k in range(self.model_cfg.FP_MLPS.__len__()):
pre_channel = (
self.model_cfg.FP_MLPS[k + 1][-1]
if k + 1 < len(self.model_cfg.FP_MLPS)
else channel_out
)
self.FP_modules.append(
pointnet2_modules.PointnetFPModule(
mlp=[pre_channel + skip_channel_list[k]] + self.model_cfg.FP_MLPS[k]
)
)
self.num_point_features = self.model_cfg.FP_MLPS[0][-1]
def break_up_pc(self, pc):
batch_idx = pc[:, 0]
xyz = pc[:, 1:4].contiguous()
features = pc[:, 4:].contiguous() if pc.size(-1) > 4 else None
return batch_idx, xyz, features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
points: (num_points, 4 + C), [batch_idx, x, y, z, ...]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
point_features: (N, C)
"""
batch_size = batch_dict["batch_size"]
points = batch_dict["points"]
batch_idx, xyz, features = self.break_up_pc(points)
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (batch_idx == bs_idx).sum()
assert xyz_batch_cnt.min() == xyz_batch_cnt.max()
xyz = xyz.view(batch_size, -1, 3)
features = (
features.view(batch_size, -1, features.shape[-1])
.permute(0, 2, 1)
.contiguous()
if features is not None
else None
)
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
) # (B, C, N)
point_features = l_features[0].permute(0, 2, 1).contiguous() # (B, N, C)
batch_dict["point_features"] = point_features.view(-1, point_features.shape[-1])
batch_dict["point_coords"] = torch.cat(
(batch_idx[:, None].float(), l_xyz[0].view(-1, 3)), dim=1
)
return batch_dict
class PointNet2Backbone(nn.Module):
"""
DO NOT USE THIS CURRENTLY SINCE IT MAY HAVE POTENTIAL BUGS, 20200723
"""
def __init__(self, model_cfg, input_channels, **kwargs):
assert (
False
), "DO NOT USE THIS CURRENTLY SINCE IT MAY HAVE POTENTIAL BUGS, 20200723"
super().__init__()
self.model_cfg = model_cfg
self.SA_modules = nn.ModuleList()
channel_in = input_channels - 3
self.num_points_each_layer = []
skip_channel_list = [input_channels]
for k in range(self.model_cfg.SA_CONFIG.NPOINTS.__len__()):
self.num_points_each_layer.append(self.model_cfg.SA_CONFIG.NPOINTS[k])
mlps = self.model_cfg.SA_CONFIG.MLPS[k].copy()
channel_out = 0
for idx in range(mlps.__len__()):
mlps[idx] = [channel_in] + mlps[idx]
channel_out += mlps[idx][-1]
self.SA_modules.append(
pointnet2_modules_stack.StackSAModuleMSG(
radii=self.model_cfg.SA_CONFIG.RADIUS[k],
nsamples=self.model_cfg.SA_CONFIG.NSAMPLE[k],
mlps=mlps,
use_xyz=self.model_cfg.SA_CONFIG.get("USE_XYZ", True),
)
)
skip_channel_list.append(channel_out)
channel_in = channel_out
self.FP_modules = nn.ModuleList()
for k in range(self.model_cfg.FP_MLPS.__len__()):
pre_channel = (
self.model_cfg.FP_MLPS[k + 1][-1]
if k + 1 < len(self.model_cfg.FP_MLPS)
else channel_out
)
self.FP_modules.append(
pointnet2_modules_stack.StackPointnetFPModule(
mlp=[pre_channel + skip_channel_list[k]] + self.model_cfg.FP_MLPS[k]
)
)
self.num_point_features = self.model_cfg.FP_MLPS[0][-1]
def break_up_pc(self, pc):
batch_idx = pc[:, 0]
xyz = pc[:, 1:4].contiguous()
features = pc[:, 4:].contiguous() if pc.size(-1) > 4 else None
return batch_idx, xyz, features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
points: (num_points, 4 + C), [batch_idx, x, y, z, ...]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
point_features: (N, C)
"""
batch_size = batch_dict["batch_size"]
points = batch_dict["points"]
batch_idx, xyz, features = self.break_up_pc(points)
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (batch_idx == bs_idx).sum()
l_xyz, l_features, l_batch_cnt = [xyz], [features], [xyz_batch_cnt]
for i in range(len(self.SA_modules)):
new_xyz_list = []
for k in range(batch_size):
if len(l_xyz) == 1:
cur_xyz = l_xyz[0][batch_idx == k]
else:
last_num_points = self.num_points_each_layer[i - 1]
cur_xyz = l_xyz[-1][k * last_num_points : (k + 1) * last_num_points]
cur_pt_idxs = pointnet2_utils_stack.furthest_point_sample(
cur_xyz[None, :, :].contiguous(), self.num_points_each_layer[i]
).long()[0]
if cur_xyz.shape[0] < self.num_points_each_layer[i]:
empty_num = self.num_points_each_layer[i] - cur_xyz.shape[1]
cur_pt_idxs[0, -empty_num:] = cur_pt_idxs[0, :empty_num]
new_xyz_list.append(cur_xyz[cur_pt_idxs])
new_xyz = torch.cat(new_xyz_list, dim=0)
new_xyz_batch_cnt = (
xyz.new_zeros(batch_size).int().fill_(self.num_points_each_layer[i])
)
li_xyz, li_features = self.SA_modules[i](
xyz=l_xyz[i],
features=l_features[i],
xyz_batch_cnt=l_batch_cnt[i],
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
)
l_xyz.append(li_xyz)
l_features.append(li_features)
l_batch_cnt.append(new_xyz_batch_cnt)
l_features[0] = points[:, 1:]
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
unknown=l_xyz[i - 1],
unknown_batch_cnt=l_batch_cnt[i - 1],
known=l_xyz[i],
known_batch_cnt=l_batch_cnt[i],
unknown_feats=l_features[i - 1],
known_feats=l_features[i],
)
batch_dict["point_features"] = l_features[0]
batch_dict["point_coords"] = torch.cat(
(batch_idx[:, None].float(), l_xyz[0]), dim=1
)
return batch_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/backbones_3d/spconv_backbone.py | Python | from functools import partial
import spconv
import torch.nn as nn
def post_act_block(
in_channels,
out_channels,
kernel_size,
indice_key=None,
stride=1,
padding=0,
conv_type="subm",
norm_fn=None,
):
if conv_type == "subm":
conv = spconv.SubMConv3d(
in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key
)
elif conv_type == "spconv":
conv = spconv.SparseConv3d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=False,
indice_key=indice_key,
)
elif conv_type == "inverseconv":
conv = spconv.SparseInverseConv3d(
in_channels, out_channels, kernel_size, indice_key=indice_key, bias=False
)
else:
raise NotImplementedError
m = spconv.SparseSequential(
conv,
norm_fn(out_channels),
nn.ReLU(),
)
return m
class SparseBasicBlock(spconv.SparseModule):
expansion = 1
def __init__(
self, inplanes, planes, stride=1, norm_fn=None, downsample=None, indice_key=None
):
super(SparseBasicBlock, self).__init__()
assert norm_fn is not None
bias = norm_fn is not None
self.conv1 = spconv.SubMConv3d(
inplanes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=bias,
indice_key=indice_key,
)
self.bn1 = norm_fn(planes)
self.relu = nn.ReLU()
self.conv2 = spconv.SubMConv3d(
planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=bias,
indice_key=indice_key,
)
self.bn2 = norm_fn(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out.features = self.bn1(out.features)
out.features = self.relu(out.features)
out = self.conv2(out)
out.features = self.bn2(out.features)
if self.downsample is not None:
identity = self.downsample(x)
out.features += identity.features
out.features = self.relu(out.features)
return out
class VoxelBackBone8x(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(
input_channels, 16, 3, padding=1, bias=False, indice_key="subm1"
),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key="subm1"),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(
16,
32,
3,
norm_fn=norm_fn,
stride=2,
padding=1,
indice_key="spconv2",
conv_type="spconv",
),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key="subm2"),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key="subm2"),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(
32,
64,
3,
norm_fn=norm_fn,
stride=2,
padding=1,
indice_key="spconv3",
conv_type="spconv",
),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key="subm3"),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key="subm3"),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(
64,
64,
3,
norm_fn=norm_fn,
stride=2,
padding=(0, 1, 1),
indice_key="spconv4",
conv_type="spconv",
),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key="subm4"),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key="subm4"),
)
last_pad = 0
last_pad = self.model_cfg.get("last_pad", last_pad)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(
64,
128,
(3, 1, 1),
stride=(2, 1, 1),
padding=last_pad,
bias=False,
indice_key="spconv_down2",
),
norm_fn(128),
nn.ReLU(),
)
self.num_point_features = 128
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
"""
voxel_features, voxel_coords = (
batch_dict["voxel_features"],
batch_dict["voxel_coords"],
)
batch_size = batch_dict["batch_size"]
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size,
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict.update(
{"encoded_spconv_tensor": out, "encoded_spconv_tensor_stride": 8}
)
batch_dict.update(
{
"multi_scale_3d_features": {
"x_conv1": x_conv1,
"x_conv2": x_conv2,
"x_conv3": x_conv3,
"x_conv4": x_conv4,
}
}
)
return batch_dict
class VoxelResBackBone8x(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(
input_channels, 16, 3, padding=1, bias=False, indice_key="subm1"
),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key="res1"),
SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key="res1"),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(
16,
32,
3,
norm_fn=norm_fn,
stride=2,
padding=1,
indice_key="spconv2",
conv_type="spconv",
),
SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key="res2"),
SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key="res2"),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(
32,
64,
3,
norm_fn=norm_fn,
stride=2,
padding=1,
indice_key="spconv3",
conv_type="spconv",
),
SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key="res3"),
SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key="res3"),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(
64,
128,
3,
norm_fn=norm_fn,
stride=2,
padding=(0, 1, 1),
indice_key="spconv4",
conv_type="spconv",
),
SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key="res4"),
SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key="res4"),
)
last_pad = 0
last_pad = self.model_cfg.get("last_pad", last_pad)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(
128,
128,
(3, 1, 1),
stride=(2, 1, 1),
padding=last_pad,
bias=False,
indice_key="spconv_down2",
),
norm_fn(128),
nn.ReLU(),
)
self.num_point_features = 128
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
"""
voxel_features, voxel_coords = (
batch_dict["voxel_features"],
batch_dict["voxel_coords"],
)
batch_size = batch_dict["batch_size"]
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size,
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict.update(
{"encoded_spconv_tensor": out, "encoded_spconv_tensor_stride": 8}
)
batch_dict.update(
{
"multi_scale_3d_features": {
"x_conv1": x_conv1,
"x_conv2": x_conv2,
"x_conv3": x_conv3,
"x_conv4": x_conv4,
}
}
)
return batch_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/backbones_3d/spconv_unet.py | Python | from functools import partial
import spconv
import torch
import torch.nn as nn
from pcdet.models.backbones_3d.spconv_backbone import post_act_block
from pcdet.utils import common_utils
class SparseBasicBlock(spconv.SparseModule):
expansion = 1
def __init__(
self, inplanes, planes, stride=1, downsample=None, indice_key=None, norm_fn=None
):
super(SparseBasicBlock, self).__init__()
self.conv1 = spconv.SubMConv3d(
inplanes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False,
indice_key=indice_key,
)
self.bn1 = norm_fn(planes)
self.relu = nn.ReLU()
self.conv2 = spconv.SubMConv3d(
planes,
planes,
kernel_size=3,
stride=1,
padding=1,
bias=False,
indice_key=indice_key,
)
self.bn2 = norm_fn(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x.features
assert x.features.dim() == 2, "x.features.dim()=%d" % x.features.dim()
out = self.conv1(x)
out.features = self.bn1(out.features)
out.features = self.relu(out.features)
out = self.conv2(out)
out.features = self.bn2(out.features)
if self.downsample is not None:
identity = self.downsample(x)
out.features += identity
out.features = self.relu(out.features)
return out
class UNetV2(nn.Module):
"""
Sparse Convolution based UNet for point-wise feature learning.
Reference Paper: https://arxiv.org/abs/1907.03670 (Shaoshuai Shi, et. al)
From Points to Parts: 3D Object Detection from Point Cloud with Part-aware and Part-aggregation Network
"""
def __init__(
self,
model_cfg,
input_channels,
grid_size,
voxel_size,
point_cloud_range,
**kwargs
):
super().__init__()
self.model_cfg = model_cfg
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(
input_channels, 16, 3, padding=1, bias=False, indice_key="subm1"
),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key="subm1"),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(
16,
32,
3,
norm_fn=norm_fn,
stride=2,
padding=1,
indice_key="spconv2",
conv_type="spconv",
),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key="subm2"),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key="subm2"),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(
32,
64,
3,
norm_fn=norm_fn,
stride=2,
padding=1,
indice_key="spconv3",
conv_type="spconv",
),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key="subm3"),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key="subm3"),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(
64,
64,
3,
norm_fn=norm_fn,
stride=2,
padding=(0, 1, 1),
indice_key="spconv4",
conv_type="spconv",
),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key="subm4"),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key="subm4"),
)
if self.model_cfg.get("RETURN_ENCODED_TENSOR", True):
last_pad = self.model_cfg.get("last_pad", 0)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(
64,
128,
(3, 1, 1),
stride=(2, 1, 1),
padding=last_pad,
bias=False,
indice_key="spconv_down2",
),
norm_fn(128),
nn.ReLU(),
)
else:
self.conv_out = None
# decoder
# [400, 352, 11] <- [200, 176, 5]
self.conv_up_t4 = SparseBasicBlock(64, 64, indice_key="subm4", norm_fn=norm_fn)
self.conv_up_m4 = block(
128, 64, 3, norm_fn=norm_fn, padding=1, indice_key="subm4"
)
self.inv_conv4 = block(
64, 64, 3, norm_fn=norm_fn, indice_key="spconv4", conv_type="inverseconv"
)
# [800, 704, 21] <- [400, 352, 11]
self.conv_up_t3 = SparseBasicBlock(64, 64, indice_key="subm3", norm_fn=norm_fn)
self.conv_up_m3 = block(
128, 64, 3, norm_fn=norm_fn, padding=1, indice_key="subm3"
)
self.inv_conv3 = block(
64, 32, 3, norm_fn=norm_fn, indice_key="spconv3", conv_type="inverseconv"
)
# [1600, 1408, 41] <- [800, 704, 21]
self.conv_up_t2 = SparseBasicBlock(32, 32, indice_key="subm2", norm_fn=norm_fn)
self.conv_up_m2 = block(64, 32, 3, norm_fn=norm_fn, indice_key="subm2")
self.inv_conv2 = block(
32, 16, 3, norm_fn=norm_fn, indice_key="spconv2", conv_type="inverseconv"
)
# [1600, 1408, 41] <- [1600, 1408, 41]
self.conv_up_t1 = SparseBasicBlock(16, 16, indice_key="subm1", norm_fn=norm_fn)
self.conv_up_m1 = block(32, 16, 3, norm_fn=norm_fn, indice_key="subm1")
self.conv5 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key="subm1")
)
self.num_point_features = 16
def UR_block_forward(self, x_lateral, x_bottom, conv_t, conv_m, conv_inv):
x_trans = conv_t(x_lateral)
x = x_trans
x.features = torch.cat((x_bottom.features, x_trans.features), dim=1)
x_m = conv_m(x)
x = self.channel_reduction(x, x_m.features.shape[1])
x.features = x_m.features + x.features
x = conv_inv(x)
return x
@staticmethod
def channel_reduction(x, out_channels):
"""
Args:
x: x.features (N, C1)
out_channels: C2
Returns:
"""
features = x.features
n, in_channels = features.shape
assert (in_channels % out_channels == 0) and (in_channels >= out_channels)
x.features = features.view(n, out_channels, -1).sum(dim=2)
return x
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
point_features: (N, C)
"""
voxel_features, voxel_coords = (
batch_dict["voxel_features"],
batch_dict["voxel_coords"],
)
batch_size = batch_dict["batch_size"]
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size,
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
if self.conv_out is not None:
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict["encoded_spconv_tensor"] = out
batch_dict["encoded_spconv_tensor_stride"] = 8
# for segmentation head
# [400, 352, 11] <- [200, 176, 5]
x_up4 = self.UR_block_forward(
x_conv4, x_conv4, self.conv_up_t4, self.conv_up_m4, self.inv_conv4
)
# [800, 704, 21] <- [400, 352, 11]
x_up3 = self.UR_block_forward(
x_conv3, x_up4, self.conv_up_t3, self.conv_up_m3, self.inv_conv3
)
# [1600, 1408, 41] <- [800, 704, 21]
x_up2 = self.UR_block_forward(
x_conv2, x_up3, self.conv_up_t2, self.conv_up_m2, self.inv_conv2
)
# [1600, 1408, 41] <- [1600, 1408, 41]
x_up1 = self.UR_block_forward(
x_conv1, x_up2, self.conv_up_t1, self.conv_up_m1, self.conv5
)
batch_dict["point_features"] = x_up1.features
point_coords = common_utils.get_voxel_centers(
x_up1.indices[:, 1:],
downsample_times=1,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range,
)
batch_dict["point_coords"] = torch.cat(
(x_up1.indices[:, 0:1].float(), point_coords), dim=1
)
return batch_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/backbones_3d/vfe/__init__.py | Python | from pcdet.models.backbones_3d.vfe.mean_vfe import MeanVFE
from pcdet.models.backbones_3d.vfe.pillar_vfe import PillarVFE
from pcdet.models.backbones_3d.vfe.vfe_template import VFETemplate
__all__ = {"VFETemplate": VFETemplate, "MeanVFE": MeanVFE, "PillarVFE": PillarVFE}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/backbones_3d/vfe/mean_vfe.py | Python | import torch
from pcdet.models.backbones_3d.vfe.vfe_template import VFETemplate
class MeanVFE(VFETemplate):
def __init__(self, model_cfg, num_point_features, **kwargs):
super().__init__(model_cfg=model_cfg)
self.num_point_features = num_point_features
def get_output_feature_dim(self):
return self.num_point_features
def forward(self, batch_dict, **kwargs):
"""
Args:
batch_dict:
voxels: (num_voxels, max_points_per_voxel, C)
voxel_num_points: optional (num_voxels)
**kwargs:
Returns:
vfe_features: (num_voxels, C)
"""
voxel_features, voxel_num_points = (
batch_dict["voxels"],
batch_dict["voxel_num_points"],
)
points_mean = voxel_features[:, :, :].sum(dim=1, keepdim=False)
normalizer = torch.clamp_min(voxel_num_points.view(-1, 1), min=1.0).type_as(
voxel_features
)
points_mean = points_mean / normalizer
batch_dict["voxel_features"] = points_mean.contiguous()
return batch_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/backbones_3d/vfe/pillar_vfe.py | Python | import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.models.backbones_3d.vfe.vfe_template import VFETemplate
class PFNLayer(nn.Module):
def __init__(self, in_channels, out_channels, use_norm=True, last_layer=False):
super().__init__()
self.last_vfe = last_layer
self.use_norm = use_norm
if not self.last_vfe:
out_channels = out_channels // 2
if self.use_norm:
self.linear = nn.Linear(in_channels, out_channels, bias=False)
self.norm = nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01)
else:
self.linear = nn.Linear(in_channels, out_channels, bias=True)
self.part = 50000
def forward(self, inputs):
if inputs.shape[0] > self.part:
# nn.Linear performs randomly when batch size is too large
num_parts = inputs.shape[0] // self.part
part_linear_out = [
self.linear(inputs[num_part * self.part : (num_part + 1) * self.part])
for num_part in range(num_parts + 1)
]
x = torch.cat(part_linear_out, dim=0)
else:
x = self.linear(inputs)
torch.backends.cudnn.enabled = False
x = self.norm(x.permute(0, 2, 1)).permute(0, 2, 1) if self.use_norm else x
torch.backends.cudnn.enabled = True
x = F.relu(x)
x_max = torch.max(x, dim=1, keepdim=True)[0]
if self.last_vfe:
return x_max
else:
x_repeat = x_max.repeat(1, inputs.shape[1], 1)
x_concatenated = torch.cat([x, x_repeat], dim=2)
return x_concatenated
class PillarVFE(VFETemplate):
def __init__(self, model_cfg, num_point_features, voxel_size, point_cloud_range):
super().__init__(model_cfg=model_cfg)
self.use_norm = self.model_cfg.USE_NORM
self.with_distance = self.model_cfg.WITH_DISTANCE
self.use_absolute_xyz = self.model_cfg.USE_ABSLOTE_XYZ
num_point_features += 6 if self.use_absolute_xyz else 3
if self.with_distance:
num_point_features += 1
self.num_filters = self.model_cfg.NUM_FILTERS
assert len(self.num_filters) > 0
num_filters = [num_point_features] + list(self.num_filters)
pfn_layers = []
for i in range(len(num_filters) - 1):
in_filters = num_filters[i]
out_filters = num_filters[i + 1]
pfn_layers.append(
PFNLayer(
in_filters,
out_filters,
self.use_norm,
last_layer=(i >= len(num_filters) - 2),
)
)
self.pfn_layers = nn.ModuleList(pfn_layers)
self.voxel_x = voxel_size[0]
self.voxel_y = voxel_size[1]
self.voxel_z = voxel_size[2]
self.x_offset = self.voxel_x / 2 + point_cloud_range[0]
self.y_offset = self.voxel_y / 2 + point_cloud_range[1]
self.z_offset = self.voxel_z / 2 + point_cloud_range[2]
def get_output_feature_dim(self):
return self.num_filters[-1]
def get_paddings_indicator(self, actual_num, max_num, axis=0):
actual_num = torch.unsqueeze(actual_num, axis + 1)
max_num_shape = [1] * len(actual_num.shape)
max_num_shape[axis + 1] = -1
max_num = torch.arange(max_num, dtype=torch.int, device=actual_num.device).view(
max_num_shape
)
paddings_indicator = actual_num.int() > max_num
return paddings_indicator
def forward(self, batch_dict, **kwargs):
voxel_features, voxel_num_points, coords = (
batch_dict["voxels"],
batch_dict["voxel_num_points"],
batch_dict["voxel_coords"],
)
points_mean = voxel_features[:, :, :3].sum(
dim=1, keepdim=True
) / voxel_num_points.type_as(voxel_features).view(-1, 1, 1)
f_cluster = voxel_features[:, :, :3] - points_mean
f_center = torch.zeros_like(voxel_features[:, :, :3])
f_center[:, :, 0] = voxel_features[:, :, 0] - (
coords[:, 3].to(voxel_features.dtype).unsqueeze(1) * self.voxel_x
+ self.x_offset
)
f_center[:, :, 1] = voxel_features[:, :, 1] - (
coords[:, 2].to(voxel_features.dtype).unsqueeze(1) * self.voxel_y
+ self.y_offset
)
f_center[:, :, 2] = voxel_features[:, :, 2] - (
coords[:, 1].to(voxel_features.dtype).unsqueeze(1) * self.voxel_z
+ self.z_offset
)
if self.use_absolute_xyz:
features = [voxel_features, f_cluster, f_center]
else:
features = [voxel_features[..., 3:], f_cluster, f_center]
if self.with_distance:
points_dist = torch.norm(voxel_features[:, :, :3], 2, 2, keepdim=True)
features.append(points_dist)
features = torch.cat(features, dim=-1)
voxel_count = features.shape[1]
mask = self.get_paddings_indicator(voxel_num_points, voxel_count, axis=0)
mask = torch.unsqueeze(mask, -1).type_as(voxel_features)
features *= mask
for pfn in self.pfn_layers:
features = pfn(features)
features = features.squeeze()
batch_dict["pillar_features"] = features
return batch_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/backbones_3d/vfe/vfe_template.py | Python | import torch.nn as nn
class VFETemplate(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
def get_output_feature_dim(self):
raise NotImplementedError
def forward(self, **kwargs):
"""
Args:
**kwargs:
Returns:
batch_dict:
...
vfe_features: (num_voxels, C)
"""
raise NotImplementedError
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/dense_heads/__init__.py | Python | from pcdet.models.dense_heads.anchor_head_multi import AnchorHeadMulti
from pcdet.models.dense_heads.anchor_head_single import AnchorHeadSingle
from pcdet.models.dense_heads.anchor_head_template import AnchorHeadTemplate
from pcdet.models.dense_heads.point_head_box import PointHeadBox
from pcdet.models.dense_heads.point_head_simple import PointHeadSimple
from pcdet.models.dense_heads.point_intra_part_head import PointIntraPartOffsetHead
__all__ = {
"AnchorHeadTemplate": AnchorHeadTemplate,
"AnchorHeadSingle": AnchorHeadSingle,
"PointIntraPartOffsetHead": PointIntraPartOffsetHead,
"PointHeadSimple": PointHeadSimple,
"PointHeadBox": PointHeadBox,
"AnchorHeadMulti": AnchorHeadMulti,
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/dense_heads/anchor_head_multi.py | Python | import numpy as np
import torch
import torch.nn as nn
from pcdet.models.backbones_2d import BaseBEVBackbone
from pcdet.models.dense_heads.anchor_head_template import AnchorHeadTemplate
class SingleHead(BaseBEVBackbone):
def __init__(
self,
model_cfg,
input_channels,
num_class,
num_anchors_per_location,
code_size,
rpn_head_cfg=None,
head_label_indices=None,
separate_reg_config=None,
):
super().__init__(rpn_head_cfg, input_channels)
self.num_anchors_per_location = num_anchors_per_location
self.num_class = num_class
self.code_size = code_size
self.model_cfg = model_cfg
self.separate_reg_config = separate_reg_config
self.register_buffer("head_label_indices", head_label_indices)
if self.separate_reg_config is not None:
code_size_cnt = 0
self.conv_box = nn.ModuleDict()
self.conv_box_names = []
num_middle_conv = self.separate_reg_config.NUM_MIDDLE_CONV
num_middle_filter = self.separate_reg_config.NUM_MIDDLE_FILTER
conv_cls_list = []
c_in = input_channels
for k in range(num_middle_conv):
conv_cls_list.extend(
[
nn.Conv2d(
c_in,
num_middle_filter,
kernel_size=3,
stride=1,
padding=1,
bias=False,
),
nn.BatchNorm2d(num_middle_filter),
nn.ReLU(),
]
)
c_in = num_middle_filter
conv_cls_list.append(
nn.Conv2d(
c_in,
self.num_anchors_per_location * self.num_class,
kernel_size=3,
stride=1,
padding=1,
)
)
self.conv_cls = nn.Sequential(*conv_cls_list)
for reg_config in self.separate_reg_config.REG_LIST:
reg_name, reg_channel = reg_config.split(":")
reg_channel = int(reg_channel)
cur_conv_list = []
c_in = input_channels
for k in range(num_middle_conv):
cur_conv_list.extend(
[
nn.Conv2d(
c_in,
num_middle_filter,
kernel_size=3,
stride=1,
padding=1,
bias=False,
),
nn.BatchNorm2d(num_middle_filter),
nn.ReLU(),
]
)
c_in = num_middle_filter
cur_conv_list.append(
nn.Conv2d(
c_in,
self.num_anchors_per_location * int(reg_channel),
kernel_size=3,
stride=1,
padding=1,
bias=True,
)
)
code_size_cnt += reg_channel
self.conv_box[f"conv_{reg_name}"] = nn.Sequential(*cur_conv_list)
self.conv_box_names.append(f"conv_{reg_name}")
for m in self.conv_box.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode="fan_out", nonlinearity="relu"
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
assert (
code_size_cnt == code_size
), f"Code size does not match: {code_size_cnt}:{code_size}"
else:
self.conv_cls = nn.Conv2d(
input_channels,
self.num_anchors_per_location * self.num_class,
kernel_size=1,
)
self.conv_box = nn.Conv2d(
input_channels,
self.num_anchors_per_location * self.code_size,
kernel_size=1,
)
if self.model_cfg.get("USE_DIRECTION_CLASSIFIER", None) is not None:
self.conv_dir_cls = nn.Conv2d(
input_channels,
self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS,
kernel_size=1,
)
else:
self.conv_dir_cls = None
self.use_multihead = self.model_cfg.get("USE_MULTIHEAD", False)
self.init_weights()
def init_weights(self):
pi = 0.01
if isinstance(self.conv_cls, nn.Conv2d):
nn.init.constant_(self.conv_cls.bias, -np.log((1 - pi) / pi))
else:
nn.init.constant_(self.conv_cls[-1].bias, -np.log((1 - pi) / pi))
def forward(self, spatial_features_2d):
ret_dict = {}
spatial_features_2d = super().forward(
{"spatial_features": spatial_features_2d}
)["spatial_features_2d"]
cls_preds = self.conv_cls(spatial_features_2d)
if self.separate_reg_config is None:
box_preds = self.conv_box(spatial_features_2d)
else:
box_preds_list = []
for reg_name in self.conv_box_names:
box_preds_list.append(self.conv_box[reg_name](spatial_features_2d))
box_preds = torch.cat(box_preds_list, dim=1)
if not self.use_multihead:
box_preds = box_preds.permute(0, 2, 3, 1).contiguous()
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
else:
H, W = box_preds.shape[2:]
batch_size = box_preds.shape[0]
box_preds = (
box_preds.view(-1, self.num_anchors_per_location, self.code_size, H, W)
.permute(0, 1, 3, 4, 2)
.contiguous()
)
cls_preds = (
cls_preds.view(-1, self.num_anchors_per_location, self.num_class, H, W)
.permute(0, 1, 3, 4, 2)
.contiguous()
)
box_preds = box_preds.view(batch_size, -1, self.code_size)
cls_preds = cls_preds.view(batch_size, -1, self.num_class)
if self.conv_dir_cls is not None:
dir_cls_preds = self.conv_dir_cls(spatial_features_2d)
if self.use_multihead:
dir_cls_preds = (
dir_cls_preds.view(
-1,
self.num_anchors_per_location,
self.model_cfg.NUM_DIR_BINS,
H,
W,
)
.permute(0, 1, 3, 4, 2)
.contiguous()
)
dir_cls_preds = dir_cls_preds.view(
batch_size, -1, self.model_cfg.NUM_DIR_BINS
)
else:
dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
else:
dir_cls_preds = None
ret_dict["cls_preds"] = cls_preds
ret_dict["box_preds"] = box_preds
ret_dict["dir_cls_preds"] = dir_cls_preds
return ret_dict
class AnchorHeadMulti(AnchorHeadTemplate):
def __init__(
self,
model_cfg,
input_channels,
num_class,
class_names,
grid_size,
point_cloud_range,
predict_boxes_when_training=True,
**kwargs,
):
super().__init__(
model_cfg=model_cfg,
num_class=num_class,
class_names=class_names,
grid_size=grid_size,
point_cloud_range=point_cloud_range,
predict_boxes_when_training=predict_boxes_when_training,
)
self.model_cfg = model_cfg
self.separate_multihead = self.model_cfg.get("SEPARATE_MULTIHEAD", False)
if self.model_cfg.get("SHARED_CONV_NUM_FILTER", None) is not None:
shared_conv_num_filter = self.model_cfg.SHARED_CONV_NUM_FILTER
self.shared_conv = nn.Sequential(
nn.Conv2d(
input_channels,
shared_conv_num_filter,
3,
stride=1,
padding=1,
bias=False,
),
nn.BatchNorm2d(shared_conv_num_filter, eps=1e-3, momentum=0.01),
nn.ReLU(),
)
else:
self.shared_conv = None
shared_conv_num_filter = input_channels
self.rpn_heads = None
self.make_multihead(shared_conv_num_filter)
def make_multihead(self, input_channels):
rpn_head_cfgs = self.model_cfg.RPN_HEAD_CFGS
rpn_heads = []
class_names = []
for rpn_head_cfg in rpn_head_cfgs:
class_names.extend(rpn_head_cfg["HEAD_CLS_NAME"])
for rpn_head_cfg in rpn_head_cfgs:
num_anchors_per_location = sum(
[
self.num_anchors_per_location[class_names.index(head_cls)]
for head_cls in rpn_head_cfg["HEAD_CLS_NAME"]
]
)
head_label_indices = torch.from_numpy(
np.array(
[
self.class_names.index(cur_name) + 1
for cur_name in rpn_head_cfg["HEAD_CLS_NAME"]
]
)
)
rpn_head = SingleHead(
self.model_cfg,
input_channels,
(
len(rpn_head_cfg["HEAD_CLS_NAME"])
if self.separate_multihead
else self.num_class
),
num_anchors_per_location,
self.box_coder.code_size,
rpn_head_cfg,
head_label_indices=head_label_indices,
separate_reg_config=self.model_cfg.get("SEPARATE_REG_CONFIG", None),
)
rpn_heads.append(rpn_head)
self.rpn_heads = nn.ModuleList(rpn_heads)
def forward(self, data_dict):
spatial_features_2d = data_dict["spatial_features_2d"]
if self.shared_conv is not None:
spatial_features_2d = self.shared_conv(spatial_features_2d)
ret_dicts = []
for rpn_head in self.rpn_heads:
ret_dicts.append(rpn_head(spatial_features_2d))
cls_preds = [ret_dict["cls_preds"] for ret_dict in ret_dicts]
box_preds = [ret_dict["box_preds"] for ret_dict in ret_dicts]
ret = {
"cls_preds": (
cls_preds if self.separate_multihead else torch.cat(cls_preds, dim=1)
),
"box_preds": (
box_preds if self.separate_multihead else torch.cat(box_preds, dim=1)
),
}
if self.model_cfg.get("USE_DIRECTION_CLASSIFIER", False):
dir_cls_preds = [ret_dict["dir_cls_preds"] for ret_dict in ret_dicts]
ret["dir_cls_preds"] = (
dir_cls_preds
if self.separate_multihead
else torch.cat(dir_cls_preds, dim=1)
)
self.forward_ret_dict.update(ret)
if self.training:
targets_dict = self.assign_targets(gt_boxes=data_dict["gt_boxes"])
self.forward_ret_dict.update(targets_dict)
if not self.training or self.predict_boxes_when_training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=data_dict["batch_size"],
cls_preds=ret["cls_preds"],
box_preds=ret["box_preds"],
dir_cls_preds=ret.get("dir_cls_preds", None),
)
if isinstance(batch_cls_preds, list):
multihead_label_mapping = []
for idx in range(len(batch_cls_preds)):
multihead_label_mapping.append(
self.rpn_heads[idx].head_label_indices
)
data_dict["multihead_label_mapping"] = multihead_label_mapping
data_dict["batch_cls_preds"] = batch_cls_preds
data_dict["batch_box_preds"] = batch_box_preds
data_dict["cls_preds_normalized"] = False
return data_dict
def get_cls_layer_loss(self):
loss_weights = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
if "pos_cls_weight" in loss_weights:
pos_cls_weight = loss_weights["pos_cls_weight"]
neg_cls_weight = loss_weights["neg_cls_weight"]
else:
pos_cls_weight = neg_cls_weight = 1.0
cls_preds = self.forward_ret_dict["cls_preds"]
box_cls_labels = self.forward_ret_dict["box_cls_labels"]
if not isinstance(cls_preds, list):
cls_preds = [cls_preds]
batch_size = int(cls_preds[0].shape[0])
cared = box_cls_labels >= 0 # [N, num_anchors]
positives = box_cls_labels > 0
negatives = box_cls_labels == 0
negative_cls_weights = negatives * 1.0 * neg_cls_weight
cls_weights = (negative_cls_weights + pos_cls_weight * positives).float()
reg_weights = positives.float()
if self.num_class == 1:
# class agnostic
box_cls_labels[positives] = 1
pos_normalizer = positives.sum(1, keepdim=True).float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_targets = box_cls_labels * cared.type_as(box_cls_labels)
one_hot_targets = torch.zeros(
*list(cls_targets.shape),
self.num_class + 1,
dtype=cls_preds[0].dtype,
device=cls_targets.device,
)
one_hot_targets.scatter_(-1, cls_targets.unsqueeze(dim=-1).long(), 1.0)
one_hot_targets = one_hot_targets[..., 1:]
start_idx = c_idx = 0
cls_losses = 0
for idx, cls_pred in enumerate(cls_preds):
cur_num_class = self.rpn_heads[idx].num_class
cls_pred = cls_pred.view(batch_size, -1, cur_num_class)
if self.separate_multihead:
one_hot_target = one_hot_targets[
:,
start_idx : start_idx + cls_pred.shape[1],
c_idx : c_idx + cur_num_class,
]
c_idx += cur_num_class
else:
one_hot_target = one_hot_targets[
:, start_idx : start_idx + cls_pred.shape[1]
]
cls_weight = cls_weights[:, start_idx : start_idx + cls_pred.shape[1]]
cls_loss_src = self.cls_loss_func(
cls_pred, one_hot_target, weights=cls_weight
) # [N, M]
cls_loss = cls_loss_src.sum() / batch_size
cls_loss = cls_loss * loss_weights["cls_weight"]
cls_losses += cls_loss
start_idx += cls_pred.shape[1]
assert start_idx == one_hot_targets.shape[1]
tb_dict = {"rpn_loss_cls": cls_losses.item()}
return cls_losses, tb_dict
def get_box_reg_layer_loss(self):
box_preds = self.forward_ret_dict["box_preds"]
box_dir_cls_preds = self.forward_ret_dict.get("dir_cls_preds", None)
box_reg_targets = self.forward_ret_dict["box_reg_targets"]
box_cls_labels = self.forward_ret_dict["box_cls_labels"]
positives = box_cls_labels > 0
reg_weights = positives.float()
pos_normalizer = positives.sum(1, keepdim=True).float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
if not isinstance(box_preds, list):
box_preds = [box_preds]
batch_size = int(box_preds[0].shape[0])
if isinstance(self.anchors, list):
if self.use_multihead:
anchors = torch.cat(
[
anchor.permute(3, 4, 0, 1, 2, 5)
.contiguous()
.view(-1, anchor.shape[-1])
for anchor in self.anchors
],
dim=0,
)
else:
anchors = torch.cat(self.anchors, dim=-3)
else:
anchors = self.anchors
anchors = anchors.view(1, -1, anchors.shape[-1]).repeat(batch_size, 1, 1)
start_idx = 0
box_losses = 0
tb_dict = {}
for idx, box_pred in enumerate(box_preds):
box_pred = box_pred.view(
batch_size,
-1,
(
box_pred.shape[-1] // self.num_anchors_per_location
if not self.use_multihead
else box_pred.shape[-1]
),
)
box_reg_target = box_reg_targets[
:, start_idx : start_idx + box_pred.shape[1]
]
reg_weight = reg_weights[:, start_idx : start_idx + box_pred.shape[1]]
# sin(a - b) = sinacosb-cosasinb
if box_dir_cls_preds is not None:
box_pred_sin, reg_target_sin = self.add_sin_difference(
box_pred, box_reg_target
)
loc_loss_src = self.reg_loss_func(
box_pred_sin, reg_target_sin, weights=reg_weight
) # [N, M]
else:
loc_loss_src = self.reg_loss_func(
box_pred, box_reg_target, weights=reg_weight
) # [N, M]
loc_loss = loc_loss_src.sum() / batch_size
loc_loss = loc_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS["loc_weight"]
box_losses += loc_loss
tb_dict["rpn_loss_loc"] = tb_dict.get("rpn_loss_loc", 0) + loc_loss.item()
if box_dir_cls_preds is not None:
if not isinstance(box_dir_cls_preds, list):
box_dir_cls_preds = [box_dir_cls_preds]
dir_targets = self.get_direction_target(
anchors,
box_reg_targets,
dir_offset=self.model_cfg.DIR_OFFSET,
num_bins=self.model_cfg.NUM_DIR_BINS,
)
box_dir_cls_pred = box_dir_cls_preds[idx]
dir_logit = box_dir_cls_pred.view(
batch_size, -1, self.model_cfg.NUM_DIR_BINS
)
weights = positives.type_as(dir_logit)
weights /= torch.clamp(weights.sum(-1, keepdim=True), min=1.0)
weight = weights[:, start_idx : start_idx + box_pred.shape[1]]
dir_target = dir_targets[:, start_idx : start_idx + box_pred.shape[1]]
dir_loss = self.dir_loss_func(dir_logit, dir_target, weights=weight)
dir_loss = dir_loss.sum() / batch_size
dir_loss = (
dir_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS["dir_weight"]
)
box_losses += dir_loss
tb_dict["rpn_loss_dir"] = (
tb_dict.get("rpn_loss_dir", 0) + dir_loss.item()
)
start_idx += box_pred.shape[1]
return box_losses, tb_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/dense_heads/anchor_head_single.py | Python | import numpy as np
import torch.nn as nn
from pcdet.models.dense_heads.anchor_head_template import AnchorHeadTemplate
class AnchorHeadSingle(AnchorHeadTemplate):
def __init__(
self,
model_cfg,
input_channels,
num_class,
class_names,
grid_size,
point_cloud_range,
predict_boxes_when_training=True,
**kwargs
):
super().__init__(
model_cfg=model_cfg,
num_class=num_class,
class_names=class_names,
grid_size=grid_size,
point_cloud_range=point_cloud_range,
predict_boxes_when_training=predict_boxes_when_training,
)
self.num_anchors_per_location = sum(self.num_anchors_per_location)
self.conv_cls = nn.Conv2d(
input_channels,
self.num_anchors_per_location * self.num_class,
kernel_size=1,
)
self.conv_box = nn.Conv2d(
input_channels,
self.num_anchors_per_location * self.box_coder.code_size,
kernel_size=1,
)
if self.model_cfg.get("USE_DIRECTION_CLASSIFIER", None) is not None:
self.conv_dir_cls = nn.Conv2d(
input_channels,
self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS,
kernel_size=1,
)
else:
self.conv_dir_cls = None
self.init_weights()
def init_weights(self):
pi = 0.01
nn.init.constant_(self.conv_cls.bias, -np.log((1 - pi) / pi))
nn.init.normal_(self.conv_box.weight, mean=0, std=0.001)
def forward(self, data_dict):
spatial_features_2d = data_dict["spatial_features_2d"]
cls_preds = self.conv_cls(spatial_features_2d)
box_preds = self.conv_box(spatial_features_2d)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
box_preds = box_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
self.forward_ret_dict["cls_preds"] = cls_preds
self.forward_ret_dict["box_preds"] = box_preds
if self.conv_dir_cls is not None:
dir_cls_preds = self.conv_dir_cls(spatial_features_2d)
dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
self.forward_ret_dict["dir_cls_preds"] = dir_cls_preds
else:
dir_cls_preds = None
if self.training:
targets_dict = self.assign_targets(gt_boxes=data_dict["gt_boxes"])
self.forward_ret_dict.update(targets_dict)
if not self.training or self.predict_boxes_when_training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=data_dict["batch_size"],
cls_preds=cls_preds,
box_preds=box_preds,
dir_cls_preds=dir_cls_preds,
)
data_dict["batch_cls_preds"] = batch_cls_preds
data_dict["batch_box_preds"] = batch_box_preds
data_dict["cls_preds_normalized"] = False
return data_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/dense_heads/anchor_head_template.py | Python | import numpy as np
import torch
import torch.nn as nn
from pcdet.models.dense_heads.target_assigner.anchor_generator import AnchorGenerator
from pcdet.models.dense_heads.target_assigner.atss_target_assigner import (
ATSSTargetAssigner,
)
from pcdet.models.dense_heads.target_assigner.axis_aligned_target_assigner import (
AxisAlignedTargetAssigner,
)
from pcdet.utils import box_coder_utils, common_utils, loss_utils
class AnchorHeadTemplate(nn.Module):
def __init__(
self,
model_cfg,
num_class,
class_names,
grid_size,
point_cloud_range,
predict_boxes_when_training,
):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.class_names = class_names
self.predict_boxes_when_training = predict_boxes_when_training
self.use_multihead = self.model_cfg.get("USE_MULTIHEAD", False)
anchor_target_cfg = self.model_cfg.TARGET_ASSIGNER_CONFIG
self.box_coder = getattr(box_coder_utils, anchor_target_cfg.BOX_CODER)(
num_dir_bins=anchor_target_cfg.get("NUM_DIR_BINS", 6),
**anchor_target_cfg.get("BOX_CODER_CONFIG", {})
)
anchor_generator_cfg = self.model_cfg.ANCHOR_GENERATOR_CONFIG
anchors, self.num_anchors_per_location = self.generate_anchors(
anchor_generator_cfg,
grid_size=grid_size,
point_cloud_range=point_cloud_range,
anchor_ndim=self.box_coder.code_size,
)
self.anchors = [x.cuda() for x in anchors]
self.target_assigner = self.get_target_assigner(anchor_target_cfg)
self.forward_ret_dict = {}
self.build_losses(self.model_cfg.LOSS_CONFIG)
@staticmethod
def generate_anchors(
anchor_generator_cfg, grid_size, point_cloud_range, anchor_ndim=7
):
anchor_generator = AnchorGenerator(
anchor_range=point_cloud_range, anchor_generator_config=anchor_generator_cfg
)
feature_map_size = [
grid_size[:2] // config["feature_map_stride"]
for config in anchor_generator_cfg
]
anchors_list, num_anchors_per_location_list = anchor_generator.generate_anchors(
feature_map_size
)
if anchor_ndim != 7:
for idx, anchors in enumerate(anchors_list):
pad_zeros = anchors.new_zeros([*anchors.shape[0:-1], anchor_ndim - 7])
new_anchors = torch.cat((anchors, pad_zeros), dim=-1)
anchors_list[idx] = new_anchors
return anchors_list, num_anchors_per_location_list
def get_target_assigner(self, anchor_target_cfg):
if anchor_target_cfg.NAME == "ATSS":
target_assigner = ATSSTargetAssigner(
topk=anchor_target_cfg.TOPK,
box_coder=self.box_coder,
use_multihead=self.use_multihead,
match_height=anchor_target_cfg.MATCH_HEIGHT,
)
elif anchor_target_cfg.NAME == "AxisAlignedTargetAssigner":
target_assigner = AxisAlignedTargetAssigner(
model_cfg=self.model_cfg,
class_names=self.class_names,
box_coder=self.box_coder,
match_height=anchor_target_cfg.MATCH_HEIGHT,
)
else:
raise NotImplementedError
return target_assigner
def build_losses(self, losses_cfg):
self.add_module(
"cls_loss_func",
loss_utils.SigmoidFocalClassificationLoss(alpha=0.25, gamma=2.0),
)
reg_loss_name = (
"WeightedSmoothL1Loss"
if losses_cfg.get("REG_LOSS_TYPE", None) is None
else losses_cfg.REG_LOSS_TYPE
)
self.add_module(
"reg_loss_func",
getattr(loss_utils, reg_loss_name)(
code_weights=losses_cfg.LOSS_WEIGHTS["code_weights"]
),
)
self.add_module("dir_loss_func", loss_utils.WeightedCrossEntropyLoss())
def assign_targets(self, gt_boxes):
"""
Args:
gt_boxes: (B, M, 8)
Returns:
"""
targets_dict = self.target_assigner.assign_targets(self.anchors, gt_boxes)
return targets_dict
def get_cls_layer_loss(self):
cls_preds = self.forward_ret_dict["cls_preds"]
box_cls_labels = self.forward_ret_dict["box_cls_labels"]
batch_size = int(cls_preds.shape[0])
cared = box_cls_labels >= 0 # [N, num_anchors]
positives = box_cls_labels > 0
negatives = box_cls_labels == 0
negative_cls_weights = negatives * 1.0
cls_weights = (negative_cls_weights + 1.0 * positives).float()
reg_weights = positives.float()
if self.num_class == 1:
# class agnostic
box_cls_labels[positives] = 1
pos_normalizer = positives.sum(1, keepdim=True).float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_targets = box_cls_labels * cared.type_as(box_cls_labels)
cls_targets = cls_targets.unsqueeze(dim=-1)
cls_targets = cls_targets.squeeze(dim=-1)
one_hot_targets = torch.zeros(
*list(cls_targets.shape),
self.num_class + 1,
dtype=cls_preds.dtype,
device=cls_targets.device
)
one_hot_targets.scatter_(-1, cls_targets.unsqueeze(dim=-1).long(), 1.0)
cls_preds = cls_preds.view(batch_size, -1, self.num_class)
one_hot_targets = one_hot_targets[..., 1:]
cls_loss_src = self.cls_loss_func(
cls_preds, one_hot_targets, weights=cls_weights
) # [N, M]
cls_loss = cls_loss_src.sum() / batch_size
cls_loss = cls_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS["cls_weight"]
tb_dict = {"rpn_loss_cls": cls_loss.item()}
return cls_loss, tb_dict
@staticmethod
def add_sin_difference(boxes1, boxes2, dim=6):
assert dim != -1
rad_pred_encoding = torch.sin(boxes1[..., dim : dim + 1]) * torch.cos(
boxes2[..., dim : dim + 1]
)
rad_tg_encoding = torch.cos(boxes1[..., dim : dim + 1]) * torch.sin(
boxes2[..., dim : dim + 1]
)
boxes1 = torch.cat(
[boxes1[..., :dim], rad_pred_encoding, boxes1[..., dim + 1 :]], dim=-1
)
boxes2 = torch.cat(
[boxes2[..., :dim], rad_tg_encoding, boxes2[..., dim + 1 :]], dim=-1
)
return boxes1, boxes2
@staticmethod
def get_direction_target(
anchors, reg_targets, one_hot=True, dir_offset=0, num_bins=2
):
batch_size = reg_targets.shape[0]
anchors = anchors.view(batch_size, -1, anchors.shape[-1])
rot_gt = reg_targets[..., 6] + anchors[..., 6]
offset_rot = common_utils.limit_period(rot_gt - dir_offset, 0, 2 * np.pi)
dir_cls_targets = torch.floor(offset_rot / (2 * np.pi / num_bins)).long()
dir_cls_targets = torch.clamp(dir_cls_targets, min=0, max=num_bins - 1)
if one_hot:
dir_targets = torch.zeros(
*list(dir_cls_targets.shape),
num_bins,
dtype=anchors.dtype,
device=dir_cls_targets.device
)
dir_targets.scatter_(-1, dir_cls_targets.unsqueeze(dim=-1).long(), 1.0)
dir_cls_targets = dir_targets
return dir_cls_targets
def get_box_reg_layer_loss(self):
box_preds = self.forward_ret_dict["box_preds"]
box_dir_cls_preds = self.forward_ret_dict.get("dir_cls_preds", None)
box_reg_targets = self.forward_ret_dict["box_reg_targets"]
box_cls_labels = self.forward_ret_dict["box_cls_labels"]
batch_size = int(box_preds.shape[0])
positives = box_cls_labels > 0
reg_weights = positives.float()
pos_normalizer = positives.sum(1, keepdim=True).float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
if isinstance(self.anchors, list):
if self.use_multihead:
anchors = torch.cat(
[
anchor.permute(3, 4, 0, 1, 2, 5)
.contiguous()
.view(-1, anchor.shape[-1])
for anchor in self.anchors
],
dim=0,
)
else:
anchors = torch.cat(self.anchors, dim=-3)
else:
anchors = self.anchors
anchors = anchors.view(1, -1, anchors.shape[-1]).repeat(batch_size, 1, 1)
box_preds = box_preds.view(
batch_size,
-1,
(
box_preds.shape[-1] // self.num_anchors_per_location
if not self.use_multihead
else box_preds.shape[-1]
),
)
# sin(a - b) = sinacosb-cosasinb
box_preds_sin, reg_targets_sin = self.add_sin_difference(
box_preds, box_reg_targets
)
loc_loss_src = self.reg_loss_func(
box_preds_sin, reg_targets_sin, weights=reg_weights
) # [N, M]
loc_loss = loc_loss_src.sum() / batch_size
loc_loss = loc_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS["loc_weight"]
box_loss = loc_loss
tb_dict = {"rpn_loss_loc": loc_loss.item()}
if box_dir_cls_preds is not None:
dir_targets = self.get_direction_target(
anchors,
box_reg_targets,
dir_offset=self.model_cfg.DIR_OFFSET,
num_bins=self.model_cfg.NUM_DIR_BINS,
)
dir_logits = box_dir_cls_preds.view(
batch_size, -1, self.model_cfg.NUM_DIR_BINS
)
weights = positives.type_as(dir_logits)
weights /= torch.clamp(weights.sum(-1, keepdim=True), min=1.0)
dir_loss = self.dir_loss_func(dir_logits, dir_targets, weights=weights)
dir_loss = dir_loss.sum() / batch_size
dir_loss = dir_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS["dir_weight"]
box_loss += dir_loss
tb_dict["rpn_loss_dir"] = dir_loss.item()
return box_loss, tb_dict
def get_loss(self):
cls_loss, tb_dict = self.get_cls_layer_loss()
box_loss, tb_dict_box = self.get_box_reg_layer_loss()
tb_dict.update(tb_dict_box)
rpn_loss = cls_loss + box_loss
tb_dict["rpn_loss"] = rpn_loss.item()
return rpn_loss, tb_dict
def generate_predicted_boxes(
self, batch_size, cls_preds, box_preds, dir_cls_preds=None
):
"""
Args:
batch_size:
cls_preds: (N, H, W, C1)
box_preds: (N, H, W, C2)
dir_cls_preds: (N, H, W, C3)
Returns:
batch_cls_preds: (B, num_boxes, num_classes)
batch_box_preds: (B, num_boxes, 7+C)
"""
if isinstance(self.anchors, list):
if self.use_multihead:
anchors = torch.cat(
[
anchor.permute(3, 4, 0, 1, 2, 5)
.contiguous()
.view(-1, anchor.shape[-1])
for anchor in self.anchors
],
dim=0,
)
else:
anchors = torch.cat(self.anchors, dim=-3)
else:
anchors = self.anchors
num_anchors = anchors.view(-1, anchors.shape[-1]).shape[0]
batch_anchors = anchors.view(1, -1, anchors.shape[-1]).repeat(batch_size, 1, 1)
batch_cls_preds = (
cls_preds.view(batch_size, num_anchors, -1).float()
if not isinstance(cls_preds, list)
else cls_preds
)
batch_box_preds = (
box_preds.view(batch_size, num_anchors, -1)
if not isinstance(box_preds, list)
else torch.cat(box_preds, dim=1).view(batch_size, num_anchors, -1)
)
batch_box_preds = self.box_coder.decode_torch(batch_box_preds, batch_anchors)
if dir_cls_preds is not None:
dir_offset = self.model_cfg.DIR_OFFSET
dir_limit_offset = self.model_cfg.DIR_LIMIT_OFFSET
dir_cls_preds = (
dir_cls_preds.view(batch_size, num_anchors, -1)
if not isinstance(dir_cls_preds, list)
else torch.cat(dir_cls_preds, dim=1).view(batch_size, num_anchors, -1)
)
dir_labels = torch.max(dir_cls_preds, dim=-1)[1]
period = 2 * np.pi / self.model_cfg.NUM_DIR_BINS
dir_rot = common_utils.limit_period(
batch_box_preds[..., 6] - dir_offset, dir_limit_offset, period
)
batch_box_preds[..., 6] = (
dir_rot + dir_offset + period * dir_labels.to(batch_box_preds.dtype)
)
if isinstance(self.box_coder, box_coder_utils.PreviousResidualDecoder):
batch_box_preds[..., 6] = common_utils.limit_period(
-(batch_box_preds[..., 6] + np.pi / 2), offset=0.5, period=np.pi * 2
)
return batch_cls_preds, batch_box_preds
def forward(self, **kwargs):
raise NotImplementedError
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/dense_heads/point_head_box.py | Python | import torch
from pcdet.models.dense_heads.point_head_template import PointHeadTemplate
from pcdet.utils import box_coder_utils, box_utils
class PointHeadBox(PointHeadTemplate):
"""
A simple point-based segmentation head, which are used for PointRCNN.
Reference Paper: https://arxiv.org/abs/1812.04244
PointRCNN: 3D Object Proposal Generation and Detection from Point Cloud
"""
def __init__(
self,
num_class,
input_channels,
model_cfg,
predict_boxes_when_training=False,
**kwargs
):
super().__init__(model_cfg=model_cfg, num_class=num_class)
self.predict_boxes_when_training = predict_boxes_when_training
self.cls_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.CLS_FC,
input_channels=input_channels,
output_channels=num_class,
)
target_cfg = self.model_cfg.TARGET_CONFIG
self.box_coder = getattr(box_coder_utils, target_cfg.BOX_CODER)(
**target_cfg.BOX_CODER_CONFIG
)
self.box_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.REG_FC,
input_channels=input_channels,
output_channels=self.box_coder.code_size,
)
def assign_targets(self, input_dict):
"""
Args:
input_dict:
point_features: (N1 + N2 + N3 + ..., C)
batch_size:
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes (optional): (B, M, 8)
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_part_labels: (N1 + N2 + N3 + ..., 3)
"""
point_coords = input_dict["point_coords"]
gt_boxes = input_dict["gt_boxes"]
assert gt_boxes.shape.__len__() == 3, "gt_boxes.shape=%s" % str(gt_boxes.shape)
assert point_coords.shape.__len__() in [2], "points.shape=%s" % str(
point_coords.shape
)
batch_size = gt_boxes.shape[0]
extend_gt_boxes = box_utils.enlarge_box3d(
gt_boxes.view(-1, gt_boxes.shape[-1]),
extra_width=self.model_cfg.TARGET_CONFIG.GT_EXTRA_WIDTH,
).view(batch_size, -1, gt_boxes.shape[-1])
targets_dict = self.assign_stack_targets(
points=point_coords,
gt_boxes=gt_boxes,
extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True,
use_ball_constraint=False,
ret_part_labels=False,
ret_box_labels=True,
)
return targets_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
point_loss_cls, tb_dict_1 = self.get_cls_layer_loss()
point_loss_box, tb_dict_2 = self.get_box_layer_loss()
point_loss = point_loss_cls + point_loss_box
tb_dict.update(tb_dict_1)
tb_dict.update(tb_dict_2)
return point_loss, tb_dict
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
point_features: (N1 + N2 + N3 + ..., C) or (B, N, C)
point_features_before_fusion: (N1 + N2 + N3 + ..., C)
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
point_labels (optional): (N1 + N2 + N3 + ...)
gt_boxes (optional): (B, M, 8)
Returns:
batch_dict:
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
"""
if self.model_cfg.get("USE_POINT_FEATURES_BEFORE_FUSION", False):
point_features = batch_dict["point_features_before_fusion"]
else:
point_features = batch_dict["point_features"]
point_cls_preds = self.cls_layers(point_features) # (total_points, num_class)
point_box_preds = self.box_layers(
point_features
) # (total_points, box_code_size)
point_cls_preds_max, _ = point_cls_preds.max(dim=-1)
batch_dict["point_cls_scores"] = torch.sigmoid(point_cls_preds_max)
ret_dict = {
"point_cls_preds": point_cls_preds,
"point_box_preds": point_box_preds,
}
if self.training:
targets_dict = self.assign_targets(batch_dict)
ret_dict["point_cls_labels"] = targets_dict["point_cls_labels"]
ret_dict["point_box_labels"] = targets_dict["point_box_labels"]
if not self.training or self.predict_boxes_when_training:
point_cls_preds, point_box_preds = self.generate_predicted_boxes(
points=batch_dict["point_coords"][:, 1:4],
point_cls_preds=point_cls_preds,
point_box_preds=point_box_preds,
)
batch_dict["batch_cls_preds"] = point_cls_preds
batch_dict["batch_box_preds"] = point_box_preds
batch_dict["batch_index"] = batch_dict["point_coords"][:, 0]
batch_dict["cls_preds_normalized"] = False
self.forward_ret_dict = ret_dict
return batch_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/dense_heads/point_head_simple.py | Python | import torch
from pcdet.models.dense_heads.point_head_template import PointHeadTemplate
from pcdet.utils import box_utils
class PointHeadSimple(PointHeadTemplate):
"""
A simple point-based segmentation head, which are used for PV-RCNN keypoint segmentaion.
Reference Paper: https://arxiv.org/abs/1912.13192
PV-RCNN: Point-Voxel Feature Set Abstraction for 3D Object Detection
"""
def __init__(self, num_class, input_channels, model_cfg, **kwargs):
super().__init__(model_cfg=model_cfg, num_class=num_class)
self.cls_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.CLS_FC,
input_channels=input_channels,
output_channels=num_class,
)
def assign_targets(self, input_dict):
"""
Args:
input_dict:
point_features: (N1 + N2 + N3 + ..., C)
batch_size:
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes (optional): (B, M, 8)
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_part_labels: (N1 + N2 + N3 + ..., 3)
"""
point_coords = input_dict["point_coords"]
gt_boxes = input_dict["gt_boxes"]
assert gt_boxes.shape.__len__() == 3, "gt_boxes.shape=%s" % str(gt_boxes.shape)
assert point_coords.shape.__len__() in [2], "points.shape=%s" % str(
point_coords.shape
)
batch_size = gt_boxes.shape[0]
extend_gt_boxes = box_utils.enlarge_box3d(
gt_boxes.view(-1, gt_boxes.shape[-1]),
extra_width=self.model_cfg.TARGET_CONFIG.GT_EXTRA_WIDTH,
).view(batch_size, -1, gt_boxes.shape[-1])
targets_dict = self.assign_stack_targets(
points=point_coords,
gt_boxes=gt_boxes,
extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True,
use_ball_constraint=False,
ret_part_labels=False,
)
return targets_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
point_loss_cls, tb_dict_1 = self.get_cls_layer_loss()
point_loss = point_loss_cls
tb_dict.update(tb_dict_1)
return point_loss, tb_dict
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
point_features: (N1 + N2 + N3 + ..., C) or (B, N, C)
point_features_before_fusion: (N1 + N2 + N3 + ..., C)
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
point_labels (optional): (N1 + N2 + N3 + ...)
gt_boxes (optional): (B, M, 8)
Returns:
batch_dict:
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
"""
if self.model_cfg.get("USE_POINT_FEATURES_BEFORE_FUSION", False):
point_features = batch_dict["point_features_before_fusion"]
else:
point_features = batch_dict["point_features"]
point_cls_preds = self.cls_layers(point_features) # (total_points, num_class)
ret_dict = {
"point_cls_preds": point_cls_preds,
}
point_cls_scores = torch.sigmoid(point_cls_preds)
batch_dict["point_cls_scores"], _ = point_cls_scores.max(dim=-1)
if self.training:
targets_dict = self.assign_targets(batch_dict)
ret_dict["point_cls_labels"] = targets_dict["point_cls_labels"]
self.forward_ret_dict = ret_dict
return batch_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/dense_heads/point_head_template.py | Python | import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.roiaware_pool3d import roiaware_pool3d_utils
from pcdet.utils import common_utils, loss_utils
class PointHeadTemplate(nn.Module):
def __init__(self, model_cfg, num_class):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.build_losses(self.model_cfg.LOSS_CONFIG)
self.forward_ret_dict = None
def build_losses(self, losses_cfg):
self.add_module(
"cls_loss_func",
loss_utils.SigmoidFocalClassificationLoss(alpha=0.25, gamma=2.0),
)
reg_loss_type = losses_cfg.get("LOSS_REG", None)
if reg_loss_type == "smooth-l1":
self.reg_loss_func = F.smooth_l1_loss
elif reg_loss_type == "l1":
self.reg_loss_func = F.l1_loss
elif reg_loss_type == "WeightedSmoothL1Loss":
self.reg_loss_func = loss_utils.WeightedSmoothL1Loss(
code_weights=losses_cfg.LOSS_WEIGHTS.get("code_weights", None)
)
else:
self.reg_loss_func = F.smooth_l1_loss
@staticmethod
def make_fc_layers(fc_cfg, input_channels, output_channels):
fc_layers = []
c_in = input_channels
for k in range(0, fc_cfg.__len__()):
fc_layers.extend(
[
nn.Linear(c_in, fc_cfg[k], bias=False),
nn.BatchNorm1d(fc_cfg[k]),
nn.ReLU(),
]
)
c_in = fc_cfg[k]
fc_layers.append(nn.Linear(c_in, output_channels, bias=True))
return nn.Sequential(*fc_layers)
def assign_stack_targets(
self,
points,
gt_boxes,
extend_gt_boxes=None,
ret_box_labels=False,
ret_part_labels=False,
set_ignore_flag=True,
use_ball_constraint=False,
central_radius=2.0,
):
"""
Args:
points: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes: (B, M, 8)
extend_gt_boxes: [B, M, 8]
ret_box_labels:
ret_part_labels:
set_ignore_flag:
use_ball_constraint:
central_radius:
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_box_labels: (N1 + N2 + N3 + ..., code_size)
"""
assert len(points.shape) == 2 and points.shape[1] == 4, "points.shape=%s" % str(
points.shape
)
assert (
len(gt_boxes.shape) == 3 and gt_boxes.shape[2] == 8
), "gt_boxes.shape=%s" % str(gt_boxes.shape)
assert (
extend_gt_boxes is None
or len(extend_gt_boxes.shape) == 3
and extend_gt_boxes.shape[2] == 8
), "extend_gt_boxes.shape=%s" % str(extend_gt_boxes.shape)
assert set_ignore_flag != use_ball_constraint, "Choose one only!"
batch_size = gt_boxes.shape[0]
bs_idx = points[:, 0]
point_cls_labels = points.new_zeros(points.shape[0]).long()
point_box_labels = (
gt_boxes.new_zeros((points.shape[0], 8)) if ret_box_labels else None
)
point_part_labels = (
gt_boxes.new_zeros((points.shape[0], 3)) if ret_part_labels else None
)
for k in range(batch_size):
bs_mask = bs_idx == k
points_single = points[bs_mask][:, 1:4]
point_cls_labels_single = point_cls_labels.new_zeros(bs_mask.sum())
box_idxs_of_pts = (
roiaware_pool3d_utils.points_in_boxes_gpu(
points_single.unsqueeze(dim=0),
gt_boxes[k : k + 1, :, 0:7].contiguous(),
)
.long()
.squeeze(dim=0)
)
box_fg_flag = box_idxs_of_pts >= 0
if set_ignore_flag:
extend_box_idxs_of_pts = (
roiaware_pool3d_utils.points_in_boxes_gpu(
points_single.unsqueeze(dim=0),
extend_gt_boxes[k : k + 1, :, 0:7].contiguous(),
)
.long()
.squeeze(dim=0)
)
fg_flag = box_fg_flag
ignore_flag = fg_flag ^ (extend_box_idxs_of_pts >= 0)
point_cls_labels_single[ignore_flag] = -1
elif use_ball_constraint:
box_centers = gt_boxes[k][box_idxs_of_pts][:, 0:3].clone()
box_centers[:, 2] += gt_boxes[k][box_idxs_of_pts][:, 5] / 2
ball_flag = (box_centers - points_single).norm(dim=1) < central_radius
fg_flag = box_fg_flag & ball_flag
else:
raise NotImplementedError
gt_box_of_fg_points = gt_boxes[k][box_idxs_of_pts[fg_flag]]
point_cls_labels_single[fg_flag] = (
1 if self.num_class == 1 else gt_box_of_fg_points[:, -1].long()
)
point_cls_labels[bs_mask] = point_cls_labels_single
if ret_box_labels and gt_box_of_fg_points.shape[0] > 0:
point_box_labels_single = point_box_labels.new_zeros((bs_mask.sum(), 8))
fg_point_box_labels = self.box_coder.encode_torch(
gt_boxes=gt_box_of_fg_points[:, :-1],
points=points_single[fg_flag],
gt_classes=gt_box_of_fg_points[:, -1].long(),
)
point_box_labels_single[fg_flag] = fg_point_box_labels
point_box_labels[bs_mask] = point_box_labels_single
if ret_part_labels:
point_part_labels_single = point_part_labels.new_zeros(
(bs_mask.sum(), 3)
)
transformed_points = (
points_single[fg_flag] - gt_box_of_fg_points[:, 0:3]
)
transformed_points = common_utils.rotate_points_along_z(
transformed_points.view(-1, 1, 3), -gt_box_of_fg_points[:, 6]
).view(-1, 3)
offset = (
torch.tensor([0.5, 0.5, 0.5]).view(1, 3).type_as(transformed_points)
)
point_part_labels_single[fg_flag] = (
transformed_points / gt_box_of_fg_points[:, 3:6]
) + offset
point_part_labels[bs_mask] = point_part_labels_single
targets_dict = {
"point_cls_labels": point_cls_labels,
"point_box_labels": point_box_labels,
"point_part_labels": point_part_labels,
}
return targets_dict
def get_cls_layer_loss(self, tb_dict=None):
point_cls_labels = self.forward_ret_dict["point_cls_labels"].view(-1)
point_cls_preds = self.forward_ret_dict["point_cls_preds"].view(
-1, self.num_class
)
positives = point_cls_labels > 0
negative_cls_weights = (point_cls_labels == 0) * 1.0
cls_weights = (negative_cls_weights + 1.0 * positives).float()
pos_normalizer = positives.sum(dim=0).float()
cls_weights /= torch.clamp(pos_normalizer, min=1.0)
one_hot_targets = point_cls_preds.new_zeros(
*list(point_cls_labels.shape), self.num_class + 1
)
one_hot_targets.scatter_(
-1,
(point_cls_labels * (point_cls_labels >= 0).long())
.unsqueeze(dim=-1)
.long(),
1.0,
)
one_hot_targets = one_hot_targets[..., 1:]
cls_loss_src = self.cls_loss_func(
point_cls_preds, one_hot_targets, weights=cls_weights
)
point_loss_cls = cls_loss_src.sum()
loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
point_loss_cls = point_loss_cls * loss_weights_dict["point_cls_weight"]
if tb_dict is None:
tb_dict = {}
tb_dict.update(
{
"point_loss_cls": point_loss_cls.item(),
"point_pos_num": pos_normalizer.item(),
}
)
return point_loss_cls, tb_dict
def get_part_layer_loss(self, tb_dict=None):
pos_mask = self.forward_ret_dict["point_cls_labels"] > 0
pos_normalizer = max(1, (pos_mask > 0).sum().item())
point_part_labels = self.forward_ret_dict["point_part_labels"]
point_part_preds = self.forward_ret_dict["point_part_preds"]
point_loss_part = F.binary_cross_entropy(
torch.sigmoid(point_part_preds), point_part_labels, reduction="none"
)
point_loss_part = (point_loss_part.sum(dim=-1) * pos_mask.float()).sum() / (
3 * pos_normalizer
)
loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
point_loss_part = point_loss_part * loss_weights_dict["point_part_weight"]
if tb_dict is None:
tb_dict = {}
tb_dict.update({"point_loss_part": point_loss_part.item()})
return point_loss_part, tb_dict
def get_box_layer_loss(self, tb_dict=None):
pos_mask = self.forward_ret_dict["point_cls_labels"] > 0
point_box_labels = self.forward_ret_dict["point_box_labels"]
point_box_preds = self.forward_ret_dict["point_box_preds"]
reg_weights = pos_mask.float()
pos_normalizer = pos_mask.sum().float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
point_loss_box_src = self.reg_loss_func(
point_box_preds[None, ...],
point_box_labels[None, ...],
weights=reg_weights[None, ...],
)
point_loss_box = point_loss_box_src.sum()
loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS
point_loss_box = point_loss_box * loss_weights_dict["point_box_weight"]
if tb_dict is None:
tb_dict = {}
tb_dict.update({"point_loss_box": point_loss_box.item()})
return point_loss_box, tb_dict
def generate_predicted_boxes(self, points, point_cls_preds, point_box_preds):
"""
Args:
points: (N, 3)
point_cls_preds: (N, num_class)
point_box_preds: (N, box_code_size)
Returns:
point_cls_preds: (N, num_class)
point_box_preds: (N, box_code_size)
"""
_, pred_classes = point_cls_preds.max(dim=-1)
point_box_preds = self.box_coder.decode_torch(
point_box_preds, points, pred_classes + 1
)
return point_cls_preds, point_box_preds
def forward(self, **kwargs):
raise NotImplementedError
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/dense_heads/point_intra_part_head.py | Python | import torch
from pcdet.models.dense_heads.point_head_template import PointHeadTemplate
from pcdet.utils import box_coder_utils, box_utils
class PointIntraPartOffsetHead(PointHeadTemplate):
"""
Point-based head for predicting the intra-object part locations.
Reference Paper: https://arxiv.org/abs/1907.03670
From Points to Parts: 3D Object Detection from Point Cloud with Part-aware and Part-aggregation Network
"""
def __init__(
self,
num_class,
input_channels,
model_cfg,
predict_boxes_when_training=False,
**kwargs
):
super().__init__(model_cfg=model_cfg, num_class=num_class)
self.predict_boxes_when_training = predict_boxes_when_training
self.cls_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.CLS_FC,
input_channels=input_channels,
output_channels=num_class,
)
self.part_reg_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.PART_FC,
input_channels=input_channels,
output_channels=3,
)
target_cfg = self.model_cfg.TARGET_CONFIG
if target_cfg.get("BOX_CODER", None) is not None:
self.box_coder = getattr(box_coder_utils, target_cfg.BOX_CODER)(
**target_cfg.BOX_CODER_CONFIG
)
self.box_layers = self.make_fc_layers(
fc_cfg=self.model_cfg.REG_FC,
input_channels=input_channels,
output_channels=self.box_coder.code_size,
)
else:
self.box_layers = None
def assign_targets(self, input_dict):
"""
Args:
input_dict:
point_features: (N1 + N2 + N3 + ..., C)
batch_size:
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes (optional): (B, M, 8)
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_part_labels: (N1 + N2 + N3 + ..., 3)
"""
point_coords = input_dict["point_coords"]
gt_boxes = input_dict["gt_boxes"]
assert gt_boxes.shape.__len__() == 3, "gt_boxes.shape=%s" % str(gt_boxes.shape)
assert point_coords.shape.__len__() in [2], "points.shape=%s" % str(
point_coords.shape
)
batch_size = gt_boxes.shape[0]
extend_gt_boxes = box_utils.enlarge_box3d(
gt_boxes.view(-1, gt_boxes.shape[-1]),
extra_width=self.model_cfg.TARGET_CONFIG.GT_EXTRA_WIDTH,
).view(batch_size, -1, gt_boxes.shape[-1])
targets_dict = self.assign_stack_targets(
points=point_coords,
gt_boxes=gt_boxes,
extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True,
use_ball_constraint=False,
ret_part_labels=True,
ret_box_labels=(self.box_layers is not None),
)
return targets_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
point_loss_cls, tb_dict = self.get_cls_layer_loss(tb_dict)
point_loss_part, tb_dict = self.get_part_layer_loss(tb_dict)
point_loss = point_loss_cls + point_loss_part
if self.box_layers is not None:
point_loss_box, tb_dict = self.get_box_layer_loss(tb_dict)
point_loss += point_loss_box
return point_loss, tb_dict
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
point_features: (N1 + N2 + N3 + ..., C) or (B, N, C)
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
point_labels (optional): (N1 + N2 + N3 + ...)
gt_boxes (optional): (B, M, 8)
Returns:
batch_dict:
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
"""
point_features = batch_dict["point_features"]
point_cls_preds = self.cls_layers(point_features) # (total_points, num_class)
point_part_preds = self.part_reg_layers(point_features)
ret_dict = {
"point_cls_preds": point_cls_preds,
"point_part_preds": point_part_preds,
}
if self.box_layers is not None:
point_box_preds = self.box_layers(point_features)
ret_dict["point_box_preds"] = point_box_preds
point_cls_scores = torch.sigmoid(point_cls_preds)
point_part_offset = torch.sigmoid(point_part_preds)
batch_dict["point_cls_scores"], _ = point_cls_scores.max(dim=-1)
batch_dict["point_part_offset"] = point_part_offset
if self.training:
targets_dict = self.assign_targets(batch_dict)
ret_dict["point_cls_labels"] = targets_dict["point_cls_labels"]
ret_dict["point_part_labels"] = targets_dict.get("point_part_labels")
ret_dict["point_box_labels"] = targets_dict.get("point_box_labels")
if self.box_layers is not None and (
not self.training or self.predict_boxes_when_training
):
point_cls_preds, point_box_preds = self.generate_predicted_boxes(
points=batch_dict["point_coords"][:, 1:4],
point_cls_preds=point_cls_preds,
point_box_preds=ret_dict["point_box_preds"],
)
batch_dict["batch_cls_preds"] = point_cls_preds
batch_dict["batch_box_preds"] = point_box_preds
batch_dict["batch_index"] = batch_dict["point_coords"][:, 0]
batch_dict["cls_preds_normalized"] = False
self.forward_ret_dict = ret_dict
return batch_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/dense_heads/target_assigner/anchor_generator.py | Python | import torch
class AnchorGenerator(object):
def __init__(self, anchor_range, anchor_generator_config):
super().__init__()
self.anchor_generator_cfg = anchor_generator_config
self.anchor_range = anchor_range
self.anchor_sizes = [
config["anchor_sizes"] for config in anchor_generator_config
]
self.anchor_rotations = [
config["anchor_rotations"] for config in anchor_generator_config
]
self.anchor_heights = [
config["anchor_bottom_heights"] for config in anchor_generator_config
]
self.align_center = [
config.get("align_center", False) for config in anchor_generator_config
]
assert (
len(self.anchor_sizes)
== len(self.anchor_rotations)
== len(self.anchor_heights)
)
self.num_of_anchor_sets = len(self.anchor_sizes)
def generate_anchors(self, grid_sizes):
assert len(grid_sizes) == self.num_of_anchor_sets
all_anchors = []
num_anchors_per_location = []
for grid_size, anchor_size, anchor_rotation, anchor_height, align_center in zip(
grid_sizes,
self.anchor_sizes,
self.anchor_rotations,
self.anchor_heights,
self.align_center,
):
num_anchors_per_location.append(
len(anchor_rotation) * len(anchor_size) * len(anchor_height)
)
if align_center:
x_stride = (self.anchor_range[3] - self.anchor_range[0]) / grid_size[0]
y_stride = (self.anchor_range[4] - self.anchor_range[1]) / grid_size[1]
x_offset, y_offset = x_stride / 2, y_stride / 2
else:
x_stride = (self.anchor_range[3] - self.anchor_range[0]) / (
grid_size[0] - 1
)
y_stride = (self.anchor_range[4] - self.anchor_range[1]) / (
grid_size[1] - 1
)
x_offset, y_offset = 0, 0
x_shifts = torch.arange(
self.anchor_range[0] + x_offset,
self.anchor_range[3] + 1e-5,
step=x_stride,
dtype=torch.float32,
).cuda()
y_shifts = torch.arange(
self.anchor_range[1] + y_offset,
self.anchor_range[4] + 1e-5,
step=y_stride,
dtype=torch.float32,
).cuda()
z_shifts = x_shifts.new_tensor(anchor_height)
num_anchor_size, num_anchor_rotation = (
anchor_size.__len__(),
anchor_rotation.__len__(),
)
anchor_rotation = x_shifts.new_tensor(anchor_rotation)
anchor_size = x_shifts.new_tensor(anchor_size)
x_shifts, y_shifts, z_shifts = torch.meshgrid(
[x_shifts, y_shifts, z_shifts]
) # [x_grid, y_grid, z_grid]
anchors = torch.stack(
(x_shifts, y_shifts, z_shifts), dim=-1
) # [x, y, z, 3]
anchors = anchors[:, :, :, None, :].repeat(1, 1, 1, anchor_size.shape[0], 1)
anchor_size = anchor_size.view(1, 1, 1, -1, 3).repeat(
[*anchors.shape[0:3], 1, 1]
)
anchors = torch.cat((anchors, anchor_size), dim=-1)
anchors = anchors[:, :, :, :, None, :].repeat(
1, 1, 1, 1, num_anchor_rotation, 1
)
anchor_rotation = anchor_rotation.view(1, 1, 1, 1, -1, 1).repeat(
[*anchors.shape[0:3], num_anchor_size, 1, 1]
)
anchors = torch.cat(
(anchors, anchor_rotation), dim=-1
) # [x, y, z, num_size, num_rot, 7]
anchors = anchors.permute(2, 1, 0, 3, 4, 5).contiguous()
# anchors = anchors.view(-1, anchors.shape[-1])
anchors[..., 2] += anchors[..., 5] / 2 # shift to box centers
all_anchors.append(anchors)
return all_anchors, num_anchors_per_location
if __name__ == "__main__":
from easydict import EasyDict
config = [
EasyDict(
{
"anchor_sizes": [
[2.1, 4.7, 1.7],
[0.86, 0.91, 1.73],
[0.84, 1.78, 1.78],
],
"anchor_rotations": [0, 1.57],
"anchor_heights": [0, 0.5],
}
)
]
A = AnchorGenerator(
anchor_range=[-75.2, -75.2, -2, 75.2, 75.2, 4], anchor_generator_config=config
)
import pdb
pdb.set_trace()
A.generate_anchors([[188, 188]])
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/dense_heads/target_assigner/atss_target_assigner.py | Python | import torch
from pcdet.ops.iou3d_nms import iou3d_nms_utils
from pcdet.utils import common_utils
class ATSSTargetAssigner(object):
"""
Reference: https://arxiv.org/abs/1912.02424
"""
def __init__(self, topk, box_coder, match_height=False):
self.topk = topk
self.box_coder = box_coder
self.match_height = match_height
def assign_targets(self, anchors_list, gt_boxes_with_classes, use_multihead=False):
"""
Args:
anchors: [(N, 7), ...]
gt_boxes: (B, M, 8)
Returns:
"""
if not isinstance(anchors_list, list):
anchors_list = [anchors_list]
single_set_of_anchor = True
else:
single_set_of_anchor = len(anchors_list) == 1
cls_labels_list, reg_targets_list, reg_weights_list = [], [], []
for anchors in anchors_list:
batch_size = gt_boxes_with_classes.shape[0]
gt_classes = gt_boxes_with_classes[:, :, -1]
gt_boxes = gt_boxes_with_classes[:, :, :-1]
if use_multihead:
anchors = (
anchors.permute(3, 4, 0, 1, 2, 5)
.contiguous()
.view(-1, anchors.shape[-1])
)
else:
anchors = anchors.view(-1, anchors.shape[-1])
cls_labels, reg_targets, reg_weights = [], [], []
for k in range(batch_size):
cur_gt = gt_boxes[k]
cnt = cur_gt.__len__() - 1
while cnt > 0 and cur_gt[cnt].sum() == 0:
cnt -= 1
cur_gt = cur_gt[: cnt + 1]
cur_gt_classes = gt_classes[k][: cnt + 1]
(
cur_cls_labels,
cur_reg_targets,
cur_reg_weights,
) = self.assign_targets_single(anchors, cur_gt, cur_gt_classes)
cls_labels.append(cur_cls_labels)
reg_targets.append(cur_reg_targets)
reg_weights.append(cur_reg_weights)
cls_labels = torch.stack(cls_labels, dim=0)
reg_targets = torch.stack(reg_targets, dim=0)
reg_weights = torch.stack(reg_weights, dim=0)
cls_labels_list.append(cls_labels)
reg_targets_list.append(reg_targets)
reg_weights_list.append(reg_weights)
if single_set_of_anchor:
ret_dict = {
"box_cls_labels": cls_labels_list[0],
"box_reg_targets": reg_targets_list[0],
"reg_weights": reg_weights_list[0],
}
else:
ret_dict = {
"box_cls_labels": torch.cat(cls_labels_list, dim=1),
"box_reg_targets": torch.cat(reg_targets_list, dim=1),
"reg_weights": torch.cat(reg_weights_list, dim=1),
}
return ret_dict
def assign_targets_single(self, anchors, gt_boxes, gt_classes):
"""
Args:
anchors: (N, 7) [x, y, z, dx, dy, dz, heading]
gt_boxes: (M, 7) [x, y, z, dx, dy, dz, heading]
gt_classes: (M)
Returns:
"""
num_anchor = anchors.shape[0]
num_gt = gt_boxes.shape[0]
# select topk anchors for each gt_boxes
if self.match_height:
ious = iou3d_nms_utils.boxes_iou3d_gpu(
anchors[:, 0:7], gt_boxes[:, 0:7]
) # (N, M)
else:
ious = iou3d_nms_utils.boxes_iou_bev(anchors[:, 0:7], gt_boxes[:, 0:7])
distance = (anchors[:, None, 0:3] - gt_boxes[None, :, 0:3]).norm(
dim=-1
) # (N, M)
_, topk_idxs = distance.topk(self.topk, dim=0, largest=False) # (K, M)
candidate_ious = ious[topk_idxs, torch.arange(num_gt)] # (K, M)
iou_mean_per_gt = candidate_ious.mean(dim=0)
iou_std_per_gt = candidate_ious.std(dim=0)
iou_thresh_per_gt = iou_mean_per_gt + iou_std_per_gt + 1e-6
is_pos = candidate_ious >= iou_thresh_per_gt[None, :] # (K, M)
# check whether anchor_center in gt_boxes, only check BEV x-y axes
candidate_anchors = anchors[topk_idxs.view(-1)] # (KxM, 7)
gt_boxes_of_each_anchor = gt_boxes[:, :].repeat(self.topk, 1) # (KxM, 7)
xyz_local = candidate_anchors[:, 0:3] - gt_boxes_of_each_anchor[:, 0:3]
xyz_local = common_utils.rotate_points_along_z(
xyz_local[:, None, :], -gt_boxes_of_each_anchor[:, 6]
).squeeze(dim=1)
xy_local = xyz_local[:, 0:2]
lw = gt_boxes_of_each_anchor[:, 3:5][
:, [1, 0]
] # bugfixed: w ==> y, l ==> x in local coords
is_in_gt = (
((xy_local <= lw / 2) & (xy_local >= -lw / 2)).all(dim=-1).view(-1, num_gt)
) # (K, M)
is_pos = is_pos & is_in_gt # (K, M)
for ng in range(num_gt):
topk_idxs[:, ng] += ng * num_anchor
# select the highest IoU if an anchor box is assigned with multiple gt_boxes
INF = -0x7FFFFFFF
ious_inf = torch.full_like(ious, INF).t().contiguous().view(-1) # (MxN)
index = topk_idxs.view(-1)[is_pos.view(-1)]
ious_inf[index] = ious.t().contiguous().view(-1)[index]
ious_inf = ious_inf.view(num_gt, -1).t() # (N, M)
anchors_to_gt_values, anchors_to_gt_indexs = ious_inf.max(dim=1)
# match the gt_boxes to the anchors which have maximum iou with them
max_iou_of_each_gt, argmax_iou_of_each_gt = ious.max(dim=0)
anchors_to_gt_indexs[argmax_iou_of_each_gt] = torch.arange(
0, num_gt, device=ious.device
)
anchors_to_gt_values[argmax_iou_of_each_gt] = max_iou_of_each_gt
cls_labels = gt_classes[anchors_to_gt_indexs]
cls_labels[anchors_to_gt_values == INF] = 0
matched_gts = gt_boxes[anchors_to_gt_indexs]
pos_mask = cls_labels > 0
reg_targets = matched_gts.new_zeros((num_anchor, self.box_coder.code_size))
reg_weights = matched_gts.new_zeros(num_anchor)
if pos_mask.sum() > 0:
reg_targets[pos_mask > 0] = self.box_coder.encode_torch(
matched_gts[pos_mask > 0], anchors[pos_mask > 0]
)
reg_weights[pos_mask] = 1.0
return cls_labels, reg_targets, reg_weights
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/dense_heads/target_assigner/axis_aligned_target_assigner.py | Python | import numpy as np
import torch
from pcdet.ops.iou3d_nms import iou3d_nms_utils
from pcdet.utils import box_utils
class AxisAlignedTargetAssigner(object):
def __init__(self, model_cfg, class_names, box_coder, match_height=False):
super().__init__()
anchor_generator_cfg = model_cfg.ANCHOR_GENERATOR_CONFIG
anchor_target_cfg = model_cfg.TARGET_ASSIGNER_CONFIG
self.box_coder = box_coder
self.match_height = match_height
self.class_names = np.array(class_names)
self.anchor_class_names = [
config["class_name"] for config in anchor_generator_cfg
]
self.pos_fraction = (
anchor_target_cfg.POS_FRACTION
if anchor_target_cfg.POS_FRACTION >= 0
else None
)
self.sample_size = anchor_target_cfg.SAMPLE_SIZE
self.norm_by_num_examples = anchor_target_cfg.NORM_BY_NUM_EXAMPLES
self.matched_thresholds = {}
self.unmatched_thresholds = {}
for config in anchor_generator_cfg:
self.matched_thresholds[config["class_name"]] = config["matched_threshold"]
self.unmatched_thresholds[config["class_name"]] = config[
"unmatched_threshold"
]
self.use_multihead = model_cfg.get("USE_MULTIHEAD", False)
# self.separate_multihead = model_cfg.get('SEPARATE_MULTIHEAD', False)
# if self.seperate_multihead:
# rpn_head_cfgs = model_cfg.RPN_HEAD_CFGS
# self.gt_remapping = {}
# for rpn_head_cfg in rpn_head_cfgs:
# for idx, name in enumerate(rpn_head_cfg['HEAD_CLS_NAME']):
# self.gt_remapping[name] = idx + 1
def assign_targets(self, all_anchors, gt_boxes_with_classes):
"""
Args:
all_anchors: [(N, 7), ...]
gt_boxes: (B, M, 8)
Returns:
"""
bbox_targets = []
cls_labels = []
reg_weights = []
batch_size = gt_boxes_with_classes.shape[0]
gt_classes = gt_boxes_with_classes[:, :, -1]
gt_boxes = gt_boxes_with_classes[:, :, :-1]
for k in range(batch_size):
cur_gt = gt_boxes[k]
cnt = cur_gt.__len__() - 1
while cnt > 0 and cur_gt[cnt].sum() == 0:
cnt -= 1
cur_gt = cur_gt[: cnt + 1]
cur_gt_classes = gt_classes[k][: cnt + 1].int()
target_list = []
for anchor_class_name, anchors in zip(self.anchor_class_names, all_anchors):
if cur_gt_classes.shape[0] > 1:
mask = torch.from_numpy(
self.class_names[cur_gt_classes.cpu().abs() - 1]
== anchor_class_name
)
else:
mask = torch.tensor(
[
self.class_names[torch.abs(c) - 1] == anchor_class_name
for c in cur_gt_classes
],
dtype=torch.bool,
)
if self.use_multihead:
anchors = (
anchors.permute(3, 4, 0, 1, 2, 5)
.contiguous()
.view(-1, anchors.shape[-1])
)
# if self.seperate_multihead:
# selected_classes = cur_gt_classes[mask].clone()
# if len(selected_classes) > 0:
# new_cls_id = self.gt_remapping[anchor_class_name]
# selected_classes[:] = new_cls_id
# else:
# selected_classes = cur_gt_classes[mask]
selected_classes = cur_gt_classes[mask]
else:
feature_map_size = anchors.shape[:3]
anchors = anchors.view(-1, anchors.shape[-1])
selected_classes = cur_gt_classes[mask]
single_target = self.assign_targets_single(
anchors,
cur_gt[mask],
gt_classes=selected_classes,
matched_threshold=self.matched_thresholds[anchor_class_name],
unmatched_threshold=self.unmatched_thresholds[anchor_class_name],
)
target_list.append(single_target)
if self.use_multihead:
target_dict = {
"box_cls_labels": [
t["box_cls_labels"].view(-1) for t in target_list
],
"box_reg_targets": [
t["box_reg_targets"].view(-1, self.box_coder.code_size)
for t in target_list
],
"reg_weights": [t["reg_weights"].view(-1) for t in target_list],
}
target_dict["box_reg_targets"] = torch.cat(
target_dict["box_reg_targets"], dim=0
)
target_dict["box_cls_labels"] = torch.cat(
target_dict["box_cls_labels"], dim=0
).view(-1)
target_dict["reg_weights"] = torch.cat(
target_dict["reg_weights"], dim=0
).view(-1)
else:
target_dict = {
"box_cls_labels": [
t["box_cls_labels"].view(*feature_map_size, -1)
for t in target_list
],
"box_reg_targets": [
t["box_reg_targets"].view(
*feature_map_size, -1, self.box_coder.code_size
)
for t in target_list
],
"reg_weights": [
t["reg_weights"].view(*feature_map_size, -1)
for t in target_list
],
}
target_dict["box_reg_targets"] = torch.cat(
target_dict["box_reg_targets"], dim=-2
).view(-1, self.box_coder.code_size)
target_dict["box_cls_labels"] = torch.cat(
target_dict["box_cls_labels"], dim=-1
).view(-1)
target_dict["reg_weights"] = torch.cat(
target_dict["reg_weights"], dim=-1
).view(-1)
bbox_targets.append(target_dict["box_reg_targets"])
cls_labels.append(target_dict["box_cls_labels"])
reg_weights.append(target_dict["reg_weights"])
bbox_targets = torch.stack(bbox_targets, dim=0)
cls_labels = torch.stack(cls_labels, dim=0)
reg_weights = torch.stack(reg_weights, dim=0)
all_targets_dict = {
"box_cls_labels": cls_labels,
"box_reg_targets": bbox_targets,
"reg_weights": reg_weights,
}
return all_targets_dict
def assign_targets_single(
self,
anchors,
gt_boxes,
gt_classes,
matched_threshold=0.6,
unmatched_threshold=0.45,
):
num_anchors = anchors.shape[0]
num_gt = gt_boxes.shape[0]
labels = (
torch.ones((num_anchors,), dtype=torch.int32, device=anchors.device) * -1
)
gt_ids = (
torch.ones((num_anchors,), dtype=torch.int32, device=anchors.device) * -1
)
if len(gt_boxes) > 0 and anchors.shape[0] > 0:
anchor_by_gt_overlap = (
iou3d_nms_utils.boxes_iou3d_gpu(anchors[:, 0:7], gt_boxes[:, 0:7])
if self.match_height
else box_utils.boxes3d_nearest_bev_iou(
anchors[:, 0:7], gt_boxes[:, 0:7]
)
)
anchor_to_gt_argmax = torch.from_numpy(
anchor_by_gt_overlap.cpu().numpy().argmax(axis=1)
).cuda()
anchor_to_gt_max = anchor_by_gt_overlap[
torch.arange(num_anchors, device=anchors.device), anchor_to_gt_argmax
]
gt_to_anchor_argmax = torch.from_numpy(
anchor_by_gt_overlap.cpu().numpy().argmax(axis=0)
).cuda()
gt_to_anchor_max = anchor_by_gt_overlap[
gt_to_anchor_argmax, torch.arange(num_gt, device=anchors.device)
]
empty_gt_mask = gt_to_anchor_max == 0
gt_to_anchor_max[empty_gt_mask] = -1
anchors_with_max_overlap = (
anchor_by_gt_overlap == gt_to_anchor_max
).nonzero()[:, 0]
gt_inds_force = anchor_to_gt_argmax[anchors_with_max_overlap]
labels[anchors_with_max_overlap] = gt_classes[gt_inds_force]
gt_ids[anchors_with_max_overlap] = gt_inds_force.int()
pos_inds = anchor_to_gt_max >= matched_threshold
gt_inds_over_thresh = anchor_to_gt_argmax[pos_inds]
labels[pos_inds] = gt_classes[gt_inds_over_thresh]
gt_ids[pos_inds] = gt_inds_over_thresh.int()
bg_inds = (anchor_to_gt_max < unmatched_threshold).nonzero()[:, 0]
else:
bg_inds = torch.arange(num_anchors, device=anchors.device)
fg_inds = (labels > 0).nonzero()[:, 0]
if self.pos_fraction is not None:
num_fg = int(self.pos_fraction * self.sample_size)
if len(fg_inds) > num_fg:
num_disabled = len(fg_inds) - num_fg
disable_inds = torch.randperm(len(fg_inds))[:num_disabled]
labels[disable_inds] = -1
fg_inds = (labels > 0).nonzero()[:, 0]
num_bg = self.sample_size - (labels > 0).sum()
if len(bg_inds) > num_bg:
enable_inds = bg_inds[torch.randint(0, len(bg_inds), size=(num_bg,))]
labels[enable_inds] = 0
# bg_inds = torch.nonzero(labels == 0)[:, 0]
else:
if len(gt_boxes) == 0 or anchors.shape[0] == 0:
labels[:] = 0
else:
labels[bg_inds] = 0
labels[anchors_with_max_overlap] = gt_classes[gt_inds_force]
bbox_targets = anchors.new_zeros((num_anchors, self.box_coder.code_size))
if len(gt_boxes) > 0 and anchors.shape[0] > 0:
fg_gt_boxes = gt_boxes[anchor_to_gt_argmax[fg_inds], :]
fg_anchors = anchors[fg_inds, :]
bbox_targets[fg_inds, :] = self.box_coder.encode_torch(
fg_gt_boxes, fg_anchors
)
reg_weights = anchors.new_zeros((num_anchors,))
if self.norm_by_num_examples:
num_examples = (labels >= 0).sum()
num_examples = num_examples if num_examples > 1.0 else 1.0
reg_weights[labels > 0] = 1.0 / num_examples
else:
reg_weights[labels > 0] = 1.0
ret_dict = {
"box_cls_labels": labels,
"box_reg_targets": bbox_targets,
"reg_weights": reg_weights,
}
return ret_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/detectors/PartA2_net.py | Python | from pcdet.models.detectors.detector3d_template import Detector3DTemplate
class PartA2Net(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {"loss": loss}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_point, tb_dict = self.point_head.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/detectors/__init__.py | Python | from pcdet.models.detectors.detector3d_template import Detector3DTemplate
from pcdet.models.detectors.PartA2_net import PartA2Net
from pcdet.models.detectors.point_rcnn import PointRCNN
from pcdet.models.detectors.pointpillar import PointPillar
from pcdet.models.detectors.pv_rcnn import PVRCNN
from pcdet.models.detectors.second_net import SECONDNet
from pcdet.models.detectors.second_net_iou import SECONDNetIoU
__all__ = {
"Detector3DTemplate": Detector3DTemplate,
"SECONDNet": SECONDNet,
"PartA2Net": PartA2Net,
"PVRCNN": PVRCNN,
"PointPillar": PointPillar,
"PointRCNN": PointRCNN,
"SECONDNetIoU": SECONDNetIoU,
}
def build_detector(model_cfg, num_class, dataset):
model = __all__[model_cfg.NAME](
model_cfg=model_cfg, num_class=num_class, dataset=dataset
)
return model
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/detectors/detector3d_template.py | Python | import os
import torch
import torch.nn as nn
from pcdet.config import cfg
from pcdet.models import backbones_2d, backbones_3d, dense_heads, roi_heads
from pcdet.models.backbones_2d import map_to_bev
from pcdet.models.backbones_3d import pfe, vfe
from pcdet.models.model_utils import model_nms_utils
from pcdet.ops.iou3d_nms import iou3d_nms_utils
class Detector3DTemplate(nn.Module):
def __init__(self, model_cfg, num_class, dataset):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.dataset = dataset
self.class_names = dataset.class_names
self.register_buffer("global_step", torch.LongTensor(1).zero_())
self.module_topology = [
"vfe",
"backbone_3d",
"map_to_bev_module",
"pfe",
"backbone_2d",
"dense_head",
"point_head",
"roi_head",
]
@property
def mode(self):
return "TRAIN" if self.training else "TEST"
def update_global_step(self):
self.global_step += 1
def build_networks(self):
model_info_dict = {
"module_list": [],
"num_rawpoint_features": self.dataset.point_feature_encoder.num_point_features,
"num_point_features": self.dataset.point_feature_encoder.num_point_features,
"grid_size": self.dataset.grid_size,
"point_cloud_range": self.dataset.point_cloud_range,
"voxel_size": self.dataset.voxel_size,
}
for module_name in self.module_topology:
module, model_info_dict = getattr(self, "build_%s" % module_name)(
model_info_dict=model_info_dict
)
self.add_module(module_name, module)
return model_info_dict["module_list"]
def build_vfe(self, model_info_dict):
if self.model_cfg.get("VFE", None) is None:
return None, model_info_dict
vfe_module = vfe.__all__[self.model_cfg.VFE.NAME](
model_cfg=self.model_cfg.VFE,
num_point_features=model_info_dict["num_rawpoint_features"],
point_cloud_range=model_info_dict["point_cloud_range"],
voxel_size=model_info_dict["voxel_size"],
)
model_info_dict["num_point_features"] = vfe_module.get_output_feature_dim()
model_info_dict["module_list"].append(vfe_module)
return vfe_module, model_info_dict
def build_backbone_3d(self, model_info_dict):
if self.model_cfg.get("BACKBONE_3D", None) is None:
return None, model_info_dict
backbone_3d_module = backbones_3d.__all__[self.model_cfg.BACKBONE_3D.NAME](
model_cfg=self.model_cfg.BACKBONE_3D,
input_channels=model_info_dict["num_point_features"],
grid_size=model_info_dict["grid_size"],
voxel_size=model_info_dict["voxel_size"],
point_cloud_range=model_info_dict["point_cloud_range"],
)
model_info_dict["module_list"].append(backbone_3d_module)
model_info_dict["num_point_features"] = backbone_3d_module.num_point_features
return backbone_3d_module, model_info_dict
def build_map_to_bev_module(self, model_info_dict):
if self.model_cfg.get("MAP_TO_BEV", None) is None:
return None, model_info_dict
map_to_bev_module = map_to_bev.__all__[self.model_cfg.MAP_TO_BEV.NAME](
model_cfg=self.model_cfg.MAP_TO_BEV, grid_size=model_info_dict["grid_size"]
)
model_info_dict["module_list"].append(map_to_bev_module)
model_info_dict["num_bev_features"] = map_to_bev_module.num_bev_features
return map_to_bev_module, model_info_dict
def build_backbone_2d(self, model_info_dict):
if self.model_cfg.get("BACKBONE_2D", None) is None:
return None, model_info_dict
backbone_2d_module = backbones_2d.__all__[self.model_cfg.BACKBONE_2D.NAME](
model_cfg=self.model_cfg.BACKBONE_2D,
input_channels=model_info_dict["num_bev_features"],
)
model_info_dict["module_list"].append(backbone_2d_module)
model_info_dict["num_bev_features"] = backbone_2d_module.num_bev_features
return backbone_2d_module, model_info_dict
def build_pfe(self, model_info_dict):
if self.model_cfg.get("PFE", None) is None:
return None, model_info_dict
pfe_module = pfe.__all__[self.model_cfg.PFE.NAME](
model_cfg=self.model_cfg.PFE,
voxel_size=model_info_dict["voxel_size"],
point_cloud_range=model_info_dict["point_cloud_range"],
num_bev_features=model_info_dict["num_bev_features"],
num_rawpoint_features=model_info_dict["num_rawpoint_features"],
)
model_info_dict["module_list"].append(pfe_module)
model_info_dict["num_point_features"] = pfe_module.num_point_features
model_info_dict["num_point_features_before_fusion"] = (
pfe_module.num_point_features_before_fusion
)
return pfe_module, model_info_dict
def build_dense_head(self, model_info_dict):
if self.model_cfg.get("DENSE_HEAD", None) is None:
return None, model_info_dict
dense_head_module = dense_heads.__all__[self.model_cfg.DENSE_HEAD.NAME](
model_cfg=self.model_cfg.DENSE_HEAD,
input_channels=model_info_dict["num_bev_features"],
num_class=(
self.num_class if not self.model_cfg.DENSE_HEAD.CLASS_AGNOSTIC else 1
),
class_names=self.class_names,
grid_size=model_info_dict["grid_size"],
point_cloud_range=model_info_dict["point_cloud_range"],
predict_boxes_when_training=self.model_cfg.get("ROI_HEAD", False),
)
model_info_dict["module_list"].append(dense_head_module)
return dense_head_module, model_info_dict
def build_point_head(self, model_info_dict):
if self.model_cfg.get("POINT_HEAD", None) is None:
return None, model_info_dict
if self.model_cfg.POINT_HEAD.get("USE_POINT_FEATURES_BEFORE_FUSION", False):
num_point_features = model_info_dict["num_point_features_before_fusion"]
else:
num_point_features = model_info_dict["num_point_features"]
point_head_module = dense_heads.__all__[self.model_cfg.POINT_HEAD.NAME](
model_cfg=self.model_cfg.POINT_HEAD,
input_channels=num_point_features,
num_class=(
self.num_class if not self.model_cfg.POINT_HEAD.CLASS_AGNOSTIC else 1
),
predict_boxes_when_training=self.model_cfg.get("ROI_HEAD", False),
)
model_info_dict["module_list"].append(point_head_module)
return point_head_module, model_info_dict
def build_roi_head(self, model_info_dict):
if self.model_cfg.get("ROI_HEAD", None) is None:
return None, model_info_dict
point_head_module = roi_heads.__all__[self.model_cfg.ROI_HEAD.NAME](
model_cfg=self.model_cfg.ROI_HEAD,
input_channels=model_info_dict["num_point_features"],
num_class=(
self.num_class if not self.model_cfg.ROI_HEAD.CLASS_AGNOSTIC else 1
),
)
model_info_dict["module_list"].append(point_head_module)
return point_head_module, model_info_dict
def forward(self, **kwargs):
raise NotImplementedError
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...]
multihead_label_mapping: [(num_class1), (num_class2), ...]
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
has_class_labels: True/False
roi_labels: (B, num_rois) 1 .. num_classes
batch_pred_labels: (B, num_boxes, 1)
Returns:
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict["batch_size"]
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get("batch_index", None) is not None:
assert batch_dict["batch_box_preds"].shape.__len__() == 2
batch_mask = batch_dict["batch_index"] == index
else:
assert batch_dict["batch_box_preds"].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict["batch_box_preds"][batch_mask]
src_box_preds = box_preds
if not isinstance(batch_dict["batch_cls_preds"], list):
cls_preds = batch_dict["batch_cls_preds"][batch_mask]
src_cls_preds = cls_preds
assert cls_preds.shape[1] in [1, self.num_class]
if not batch_dict["cls_preds_normalized"]:
cls_preds = torch.sigmoid(cls_preds)
else:
cls_preds = [x[batch_mask] for x in batch_dict["batch_cls_preds"]]
src_cls_preds = cls_preds
if not batch_dict["cls_preds_normalized"]:
cls_preds = [torch.sigmoid(x) for x in cls_preds]
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
if not isinstance(cls_preds, list):
cls_preds = [cls_preds]
multihead_label_mapping = [
torch.arange(1, self.num_class, device=cls_preds[0].device)
]
else:
multihead_label_mapping = batch_dict["multihead_label_mapping"]
cur_start_idx = 0
pred_scores, pred_labels, pred_boxes = [], [], []
for cur_cls_preds, cur_label_mapping in zip(
cls_preds, multihead_label_mapping
):
assert cur_cls_preds.shape[1] == len(cur_label_mapping)
cur_box_preds = box_preds[
cur_start_idx : cur_start_idx + cur_cls_preds.shape[0]
]
(
cur_pred_scores,
cur_pred_labels,
cur_pred_boxes,
) = model_nms_utils.multi_classes_nms(
cls_scores=cur_cls_preds,
box_preds=cur_box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH,
)
cur_pred_labels = cur_label_mapping[cur_pred_labels]
pred_scores.append(cur_pred_scores)
pred_labels.append(cur_pred_labels)
pred_boxes.append(cur_pred_boxes)
cur_start_idx += cur_cls_preds.shape[0]
final_scores = torch.cat(pred_scores, dim=0)
final_labels = torch.cat(pred_labels, dim=0)
final_boxes = torch.cat(pred_boxes, dim=0)
else:
cls_preds, label_preds = torch.max(cls_preds, dim=-1)
if batch_dict.get("has_class_labels", False):
label_key = (
"roi_labels"
if "roi_labels" in batch_dict
else "batch_pred_labels"
)
label_preds = batch_dict[label_key][index]
else:
label_preds = label_preds + 1
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=cls_preds,
box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH,
)
if post_process_cfg.OUTPUT_RAW_SCORE:
max_cls_preds, _ = torch.max(src_cls_preds, dim=-1)
selected_scores = max_cls_preds[selected]
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
recall_dict = self.generate_recall_record(
box_preds=final_boxes if "rois" not in batch_dict else src_box_preds,
recall_dict=recall_dict,
batch_index=index,
data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST,
)
record_dict = {
"pred_boxes": final_boxes,
"pred_scores": final_scores,
"pred_labels": final_labels,
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
@staticmethod
def generate_recall_record(
box_preds, recall_dict, batch_index, data_dict=None, thresh_list=None
):
if "gt_boxes" not in data_dict:
return recall_dict
rois = data_dict["rois"][batch_index] if "rois" in data_dict else None
gt_boxes = data_dict["gt_boxes"][batch_index]
if recall_dict.__len__() == 0:
recall_dict = {"gt": 0}
for cur_thresh in thresh_list:
recall_dict["roi_%s" % (str(cur_thresh))] = 0
recall_dict["rcnn_%s" % (str(cur_thresh))] = 0
cur_gt = gt_boxes
k = cur_gt.__len__() - 1
while k > 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[: k + 1]
if cur_gt.shape[0] > 0:
if box_preds.shape[0] > 0:
iou3d_rcnn = iou3d_nms_utils.boxes_iou3d_gpu(
box_preds[:, 0:7], cur_gt[:, 0:7]
)
else:
iou3d_rcnn = torch.zeros((0, cur_gt.shape[0]))
if rois is not None:
iou3d_roi = iou3d_nms_utils.boxes_iou3d_gpu(
rois[:, 0:7], cur_gt[:, 0:7]
)
for cur_thresh in thresh_list:
if iou3d_rcnn.shape[0] == 0:
recall_dict["rcnn_%s" % str(cur_thresh)] += 0
else:
rcnn_recalled = (iou3d_rcnn.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict["rcnn_%s" % str(cur_thresh)] += rcnn_recalled
if rois is not None:
roi_recalled = (iou3d_roi.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict["roi_%s" % str(cur_thresh)] += roi_recalled
recall_dict["gt"] += cur_gt.shape[0]
else:
gt_iou = box_preds.new_zeros(box_preds.shape[0])
return recall_dict
def load_params_from_file(self, filename, logger, to_cpu=False):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info(
"==> Loading parameters from checkpoint %s to %s"
% (filename, "CPU" if to_cpu else "GPU")
)
loc_type = torch.device("cpu") if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
model_state_disk = checkpoint["model_state"]
if "version" in checkpoint:
logger.info(
"==> Checkpoint trained from version: %s" % checkpoint["version"]
)
update_model_state = {}
for key, val in model_state_disk.items():
if (
key in self.state_dict()
and self.state_dict()[key].shape == model_state_disk[key].shape
):
update_model_state[key] = val
# logger.info('Update weight %s: %s' % (key, str(val.shape)))
if cfg.get("SELF_TRAIN", None) and cfg.SELF_TRAIN.get("DSNORM", None):
self.load_state_dict(model_state_disk)
else:
state_dict = self.state_dict()
state_dict.update(update_model_state)
self.load_state_dict(state_dict)
key_not_found = False
for key in state_dict:
if key not in update_model_state:
logger.info(
"Not updated weight %s: %s" % (key, str(state_dict[key].shape))
)
key_not_found = True
if key_not_found:
print(f"Some weights in {filename} are not loaded!")
import ipdb
ipdb.set_trace()
pass
logger.info(
"==> Done (loaded %d/%d)"
% (len(update_model_state), len(self.state_dict()))
)
def load_params_with_optimizer(
self, filename, to_cpu=False, optimizer=None, logger=None
):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info(
"==> Loading parameters from checkpoint %s to %s"
% (filename, "CPU" if to_cpu else "GPU")
)
loc_type = torch.device("cpu") if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
epoch = checkpoint.get("epoch", -1)
it = checkpoint.get("it", 0.0)
self.load_state_dict(checkpoint["model_state"])
if optimizer is not None:
if (
"optimizer_state" in checkpoint
and checkpoint["optimizer_state"] is not None
):
logger.info(
"==> Loading optimizer parameters from checkpoint %s to %s"
% (filename, "CPU" if to_cpu else "GPU")
)
optimizer.load_state_dict(checkpoint["optimizer_state"])
else:
assert filename[-4] == ".", filename
src_file, ext = filename[:-4], filename[-3:]
optimizer_filename = "%s_optim.%s" % (src_file, ext)
if os.path.exists(optimizer_filename):
optimizer_ckpt = torch.load(
optimizer_filename, map_location=loc_type
)
optimizer.load_state_dict(optimizer_ckpt["optimizer_state"])
if "version" in checkpoint:
print("==> Checkpoint trained from version: %s" % checkpoint["version"])
logger.info("==> Done")
return it, epoch
def post_processing_multicriterion(self, batch_dict):
"""
For
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
roi_labels: (B, num_rois) 1 .. num_classes
Returns:
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict["batch_size"]
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get("batch_index", None) is not None:
assert batch_dict["batch_cls_preds"].shape.__len__() == 2
batch_mask = batch_dict["batch_index"] == index
else:
assert batch_dict["batch_cls_preds"].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict["batch_box_preds"][batch_mask]
iou_preds = batch_dict["batch_cls_preds"][batch_mask]
cls_preds = batch_dict["roi_scores"][batch_mask]
if isinstance(cls_preds, list):
cls_preds = torch.cat(cls_preds).squeeze()
else:
cls_preds = cls_preds.squeeze()
src_iou_preds = iou_preds
src_box_preds = box_preds
src_cls_preds = cls_preds
assert iou_preds.shape[1] in [1, self.num_class]
if not batch_dict["cls_preds_normalized"]:
iou_preds = torch.sigmoid(iou_preds)
cls_preds = torch.sigmoid(cls_preds)
# TODO
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
raise NotImplementedError
else:
iou_preds, label_preds = torch.max(iou_preds, dim=-1)
label_preds = (
batch_dict["roi_labels"][index]
if batch_dict.get("has_class_labels", False)
else label_preds + 1
)
if isinstance(label_preds, list):
label_preds = torch.cat(label_preds, dim=0)
if post_process_cfg.NMS_CONFIG.get("SCORE_WEIGHTS", None):
weight_iou = post_process_cfg.NMS_CONFIG.SCORE_WEIGHTS.iou
weight_cls = post_process_cfg.NMS_CONFIG.SCORE_WEIGHTS.cls
if (
post_process_cfg.NMS_CONFIG.get("SCORE_TYPE", None) == "iou"
or post_process_cfg.NMS_CONFIG.get("SCORE_TYPE", None) is None
):
nms_scores = iou_preds
elif post_process_cfg.NMS_CONFIG.SCORE_TYPE == "cls":
nms_scores = cls_preds
elif post_process_cfg.NMS_CONFIG.SCORE_TYPE == "hybrid_iou_cls":
assert weight_iou + weight_cls == 1
nms_scores = weight_iou * iou_preds + weight_cls * cls_preds
else:
raise NotImplementedError
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=nms_scores,
box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH,
)
if post_process_cfg.OUTPUT_RAW_SCORE:
raise NotImplementedError
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
recall_dict = self.generate_recall_record(
box_preds=final_boxes if "rois" not in batch_dict else src_box_preds,
recall_dict=recall_dict,
batch_index=index,
data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST,
)
record_dict = {
"pred_boxes": final_boxes,
"pred_scores": final_scores,
"pred_labels": final_labels,
"pred_cls_scores": cls_preds[selected],
"pred_iou_scores": iou_preds[selected],
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/detectors/point_rcnn.py | Python | from pcdet.models.detectors.detector3d_template import Detector3DTemplate
class PointRCNN(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {"loss": loss}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_point, tb_dict = self.point_head.get_loss()
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_point + loss_rcnn
return loss, tb_dict, disp_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/detectors/pointpillar.py | Python | from pcdet.models.detectors.detector3d_template import Detector3DTemplate
class PointPillar(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {"loss": loss}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
tb_dict = {"loss_rpn": loss_rpn.item(), **tb_dict}
loss = loss_rpn
return loss, tb_dict, disp_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/detectors/pv_rcnn.py | Python | from pcdet.models.detectors.detector3d_template import Detector3DTemplate
class PVRCNN(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {"loss": loss}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing_multicriterion(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_point, tb_dict = self.point_head.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/detectors/second_net.py | Python | from pcdet.models.detectors.detector3d_template import Detector3DTemplate
class SECONDNet(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {"loss": loss}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
tb_dict = {"loss_rpn": loss_rpn.item(), **tb_dict}
loss = loss_rpn
return loss, tb_dict, disp_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/detectors/second_net_iou.py | Python | import torch
from pcdet.models.detectors.detector3d_template import Detector3DTemplate
from pcdet.models.model_utils.model_nms_utils import class_agnostic_nms
from pcdet.ops.roiaware_pool3d import roiaware_pool3d_utils
class SECONDNetIoU(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
batch_dict["dataset_cfg"] = self.dataset.dataset_cfg
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {"loss": loss}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing_multicriterion(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_rcnn
return loss, tb_dict, disp_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/model_utils/dsnorm.py | Python | """
This is implemented refer to https://github.com/thuml/TransNorm
Copyright: Jihan Yang from 2020 - present
"""
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
import torch
import itertools
class DSNorm(Module):
def __init__(
self,
num_features,
eps=1e-5,
momentum=0.1,
affine=True,
track_running_stats=True,
):
super(DSNorm, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
# 0 indicate source, 1 indicate target
self.domain_label = 0
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter("weight", None)
self.register_parameter("bias", None)
if self.track_running_stats:
self.register_buffer("running_mean_source", torch.zeros(num_features))
self.register_buffer("running_mean_target", torch.zeros(num_features))
self.register_buffer("running_var_source", torch.ones(num_features))
self.register_buffer("running_var_target", torch.ones(num_features))
self.register_buffer(
"num_batches_tracked", torch.tensor(0, dtype=torch.long)
)
else:
self.register_parameter("running_mean_source", None)
self.register_parameter("running_mean_target", None)
self.register_parameter("running_var_source", None)
self.register_parameter("running_var_target", None)
self.reset_parameters()
def reset_running_stats(self):
if self.track_running_stats:
self.running_mean_source.zero_()
self.running_var_source.fill_(1)
self.running_mean_target.zero_()
self.running_var_target.fill_(1)
self.num_batches_tracked.zero_()
def set_domain_label(self, domain_label):
self.domain_label = domain_label
def reset_parameters(self):
self.reset_running_stats()
if self.affine:
nn.init.ones_(self.weight)
nn.init.zeros_(self.bias)
def _check_input_dim(self, input):
return NotImplemented
def forward(self, input):
self._check_input_dim(input)
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
# import ipdb; ipdb.set_trace(context=20)
if self.training and self.track_running_stats:
# TODO: if statement only here to tell the jit to skip emitting this when it is None
if self.num_batches_tracked is not None:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
return F.batch_norm(
input,
self.running_mean_target if self.domain_label else self.running_mean_source,
self.running_var_target if self.domain_label else self.running_var_source,
self.weight,
self.bias,
self.training or not self.track_running_stats,
exponential_average_factor,
self.eps,
)
def extra_repr(self):
return (
"{num_features}, eps={eps}, momentum={momentum}, affine={affine}, "
"track_running_stats={track_running_stats}".format(**self.__dict__)
)
def _load_from_state_dict(
self,
state_dict,
prefix,
metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = metadata.get("version", None)
if (version is None or version < 2) and self.track_running_stats:
# at version 2: added num_batches_tracked buffer
# this should have a default value of 0
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key not in state_dict:
state_dict[num_batches_tracked_key] = torch.tensor(0, dtype=torch.long)
self._load_from_state_dict_ds(
state_dict,
prefix,
metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
def _load_from_state_dict_ds(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
r"""Copies parameters and buffers from :attr:`state_dict` into only
this module, but not its descendants. This is called on every submodule
in :meth:`~torch.nn.Module.load_state_dict`. Metadata saved for this
module in input :attr:`state_dict` is provided as :attr:`local_metadata`.
For state dicts without metadata, :attr:`local_metadata` is empty.
Subclasses can achieve class-specific backward compatible loading using
the version number at `local_metadata.get("version", None)`.
.. note::
:attr:`state_dict` is not the same object as the input
:attr:`state_dict` to :meth:`~torch.nn.Module.load_state_dict`. So
it can be modified.
Arguments:
state_dict (dict): a dict containing parameters and
persistent buffers.
prefix (str): the prefix for parameters and buffers used in this
module
local_metadata (dict): a dict containing the metadata for this module.
See
strict (bool): whether to strictly enforce that the keys in
:attr:`state_dict` with :attr:`prefix` match the names of
parameters and buffers in this module
missing_keys (list of str): if ``strict=True``, add missing keys to
this list
unexpected_keys (list of str): if ``strict=True``, add unexpected
keys to this list
error_msgs (list of str): error messages should be added to this
list, and will be reported together in
:meth:`~torch.nn.Module.load_state_dict`
"""
local_name_params = itertools.chain(
self._parameters.items(), self._buffers.items()
)
local_state = {k: v.data for k, v in local_name_params if v is not None}
for name, param in local_state.items():
key = prefix + name
if ("source" in key or "target" in key) and (key not in state_dict):
key = key[:-7]
# import ipdb; ipdb.set_trace(context=20)
if key in state_dict:
input_param = state_dict[key]
# Backward compatibility: loading 1-dim tensor from 0.3.* to version 0.4+
if len(param.shape) == 0 and len(input_param.shape) == 1:
input_param = input_param[0]
if input_param.shape != param.shape:
# local shape should match the one in checkpoint
error_msgs.append(
"size mismatch for {}: copying a param with shape {} from checkpoint, "
"the shape in current model is {}.".format(
key, input_param.shape, param.shape
)
)
continue
if isinstance(input_param, Parameter):
# backwards compatibility for serialized parameters
input_param = input_param.data
try:
param.copy_(input_param)
except Exception:
error_msgs.append(
'While copying the parameter named "{}", '
"whose dimensions in the model are {} and "
"whose dimensions in the checkpoint are {}.".format(
key, param.size(), input_param.size()
)
)
elif strict:
missing_keys.append(key)
# if strict:
# for key in state_dict.keys():
# if key.startswith(prefix):
# input_name = key[len(prefix):]
# input_name = input_name.split('.', 1)[0] # get the name of param/buffer/child
# if input_name not in self._modules and input_name not in local_state:
# unexpected_keys.append(key)
@classmethod
def convert_dsnorm(cls, module):
r"""Helper function to convert `torch.nn.BatchNormND` layer in the model to
`torch.nn.SyncBatchNorm` layer.
Args:
module (nn.Module): containing module
Returns:
The original module with the converted `torch.nn.SyncBatchNorm` layer
Example::
>>> # Network with nn.BatchNorm layer
>>> module = torch.nn.Sequential(
>>> torch.nn.Linear(20, 100),
>>> torch.nn.BatchNorm1d(100)
>>> ).cuda()
>>> # creating process group (optional)
>>> # process_ids is a list of int identifying rank ids.
"""
module_output = module
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module_output = DSNorm(
module.num_features,
module.eps,
module.momentum,
module.affine,
module.track_running_stats,
)
if module.affine:
module_output.weight.data = module.weight.data.clone().detach()
module_output.bias.data = module.bias.data.clone().detach()
# keep reuqires_grad unchanged
module_output.weight.requires_grad = module.weight.requires_grad
module_output.bias.requires_grad = module.bias.requires_grad
module_output.running_mean_target = module_output.running_mean_source = (
module.running_mean
)
module_output.running_var_target = module_output.running_var_source = (
module.running_var
)
module_output.num_batches_tracked = module.num_batches_tracked
for name, child in module.named_children():
module_output.add_module(name, cls.convert_dsnorm(child))
del module
return module_output
class DSNorm1d(DSNorm):
r"""Applies Batch Normalization over a 2D or 3D input (a mini-batch of 1D
inputs with optional additional channel dimension) as described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size).
By default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal Batch Normalization.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, L)` or :math:`L` from input of size :math:`(N, L)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True``
Shape:
- Input: :math:`(N, C)` or :math:`(N, C, L)`
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
Examples::
>>> # With Learnable Parameters
>>> m = nn.BatchNorm1d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm1d(100, affine=False)
>>> input = torch.randn(20, 100)
>>> output = m(input)
.. _`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`:
https://arxiv.org/abs/1502.03167
"""
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError(
"expected 2D or 3D input (got {}D input)".format(input.dim())
)
class DSNorm2d(DSNorm):
r"""Applies Batch Normalization over a 4D input (a mini-batch of 2D inputs
with additional channel dimension) as described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size).
By default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial Batch Normalization.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, H, W)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True``
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples::
>>> # With Learnable Parameters
>>> m = nn.BatchNorm2d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm2d(100, affine=False)
>>> input = torch.randn(20, 100, 35, 45)
>>> output = m(input)
.. _`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`:
https://arxiv.org/abs/1502.03167
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError("expected 4D input (got {}D input)".format(input.dim()))
def set_ds_source(m):
classname = m.__class__.__name__
if classname.find("DSNorm") != -1:
m.set_domain_label(0)
def set_ds_target(m):
classname = m.__class__.__name__
if classname.find("DSNorm") != -1:
m.set_domain_label(1)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/model_utils/model_nms_utils.py | Python | import torch
from pcdet.ops.iou3d_nms import iou3d_nms_utils
def class_agnostic_nms(box_scores, box_preds, nms_config, score_thresh=None):
src_box_scores = box_scores
if score_thresh is not None:
scores_mask = box_scores >= score_thresh
box_scores = box_scores[scores_mask]
box_preds = box_preds[scores_mask]
selected = []
if box_scores.shape[0] > 0:
box_scores_nms, indices = torch.topk(
box_scores, k=min(nms_config.NMS_PRE_MAXSIZE, box_scores.shape[0])
)
boxes_for_nms = box_preds[indices]
keep_idx, selected_scores = getattr(iou3d_nms_utils, nms_config.NMS_TYPE)(
boxes_for_nms[:, 0:7], box_scores_nms, nms_config.NMS_THRESH, **nms_config
)
selected = indices[keep_idx[: nms_config.NMS_POST_MAXSIZE]]
if score_thresh is not None:
original_idxs = scores_mask.nonzero().view(-1)
selected = original_idxs[selected]
return selected, src_box_scores[selected]
def multi_classes_nms(cls_scores, box_preds, nms_config, score_thresh=None):
"""
Args:
cls_scores: (N, num_class)
box_preds: (N, 7 + C)
nms_config:
score_thresh:
Returns:
"""
pred_scores, pred_labels, pred_boxes = [], [], []
for k in range(cls_scores.shape[1]):
if score_thresh is not None:
scores_mask = cls_scores[:, k] >= score_thresh
box_scores = cls_scores[scores_mask, k]
cur_box_preds = box_preds[scores_mask]
else:
box_scores = cls_scores[:, k]
cur_box_preds = box_preds
selected = []
if box_scores.shape[0] > 0:
box_scores_nms, indices = torch.topk(
box_scores, k=min(nms_config.NMS_PRE_MAXSIZE, box_scores.shape[0])
)
boxes_for_nms = cur_box_preds[indices]
keep_idx, selected_scores = getattr(iou3d_nms_utils, nms_config.NMS_TYPE)(
boxes_for_nms[:, 0:7],
box_scores_nms,
nms_config.NMS_THRESH,
**nms_config
)
selected = indices[keep_idx[: nms_config.NMS_POST_MAXSIZE]]
pred_scores.append(box_scores[selected])
pred_labels.append(box_scores.new_ones(len(selected)).long() * k)
pred_boxes.append(cur_box_preds[selected])
pred_scores = torch.cat(pred_scores, dim=0)
pred_labels = torch.cat(pred_labels, dim=0)
pred_boxes = torch.cat(pred_boxes, dim=0)
return pred_scores, pred_labels, pred_boxes
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/roi_heads/__init__.py | Python | from pcdet.models.roi_heads.partA2_head import PartA2FCHead
from pcdet.models.roi_heads.pointrcnn_head import PointRCNNHead
from pcdet.models.roi_heads.pvrcnn_head import PVRCNNHead
from pcdet.models.roi_heads.roi_head_template import RoIHeadTemplate
from pcdet.models.roi_heads.second_head import SECONDHead
__all__ = {
"RoIHeadTemplate": RoIHeadTemplate,
"PartA2FCHead": PartA2FCHead,
"PVRCNNHead": PVRCNNHead,
"SECONDHead": SECONDHead,
"PointRCNNHead": PointRCNNHead,
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/roi_heads/partA2_head.py | Python | import numpy as np
import spconv
import torch
import torch.nn as nn
from pcdet.models.roi_heads.roi_head_template import RoIHeadTemplate
from pcdet.ops.roiaware_pool3d import roiaware_pool3d_utils
class PartA2FCHead(RoIHeadTemplate):
def __init__(self, input_channels, model_cfg, num_class=1):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
self.SA_modules = nn.ModuleList()
block = self.post_act_block
c0 = self.model_cfg.ROI_AWARE_POOL.NUM_FEATURES // 2
self.conv_part = spconv.SparseSequential(
block(4, 64, 3, padding=1, indice_key="rcnn_subm1"),
block(64, c0, 3, padding=1, indice_key="rcnn_subm1_1"),
)
self.conv_rpn = spconv.SparseSequential(
block(input_channels, 64, 3, padding=1, indice_key="rcnn_subm2"),
block(64, c0, 3, padding=1, indice_key="rcnn_subm1_2"),
)
shared_fc_list = []
pool_size = self.model_cfg.ROI_AWARE_POOL.POOL_SIZE
pre_channel = (
self.model_cfg.ROI_AWARE_POOL.NUM_FEATURES
* pool_size
* pool_size
* pool_size
)
for k in range(0, self.model_cfg.SHARED_FC.__len__()):
shared_fc_list.extend(
[
nn.Conv1d(
pre_channel,
self.model_cfg.SHARED_FC[k],
kernel_size=1,
bias=False,
),
nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
nn.ReLU(),
]
)
pre_channel = self.model_cfg.SHARED_FC[k]
if (
k != self.model_cfg.SHARED_FC.__len__() - 1
and self.model_cfg.DP_RATIO > 0
):
shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.shared_fc_layer = nn.Sequential(*shared_fc_list)
self.cls_layers = self.make_fc_layers(
input_channels=pre_channel,
output_channels=self.num_class,
fc_list=self.model_cfg.CLS_FC,
)
self.reg_layers = self.make_fc_layers(
input_channels=pre_channel,
output_channels=self.box_coder.code_size * self.num_class,
fc_list=self.model_cfg.REG_FC,
)
self.roiaware_pool3d_layer = roiaware_pool3d_utils.RoIAwarePool3d(
out_size=self.model_cfg.ROI_AWARE_POOL.POOL_SIZE,
max_pts_each_voxel=self.model_cfg.ROI_AWARE_POOL.MAX_POINTS_PER_VOXEL,
)
self.init_weights(weight_init="xavier")
def init_weights(self, weight_init="xavier"):
if weight_init == "kaiming":
init_func = nn.init.kaiming_normal_
elif weight_init == "xavier":
init_func = nn.init.xavier_normal_
elif weight_init == "normal":
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == "normal":
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001)
def post_act_block(
self,
in_channels,
out_channels,
kernel_size,
indice_key,
stride=1,
padding=0,
conv_type="subm",
):
if conv_type == "subm":
m = spconv.SparseSequential(
spconv.SubMConv3d(
in_channels,
out_channels,
kernel_size,
bias=False,
indice_key=indice_key,
),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),
nn.ReLU(),
)
elif conv_type == "spconv":
m = spconv.SparseSequential(
spconv.SparseConv3d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=False,
indice_key=indice_key,
),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),
nn.ReLU(),
)
elif conv_type == "inverseconv":
m = spconv.SparseSequential(
spconv.SparseInverseConv3d(
in_channels,
out_channels,
kernel_size,
indice_key=indice_key,
bias=False,
),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),
nn.ReLU(),
)
else:
raise NotImplementedError
return m
def roiaware_pool(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
point_coords: (num_points, 4) [bs_idx, x, y, z]
point_features: (num_points, C)
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
Returns:
"""
batch_size = batch_dict["batch_size"]
batch_idx = batch_dict["point_coords"][:, 0]
point_coords = batch_dict["point_coords"][:, 1:4]
point_features = batch_dict["point_features"]
part_features = torch.cat(
(
(
batch_dict["point_part_offset"]
if not self.model_cfg.get("DISABLE_PART", False)
else point_coords
),
batch_dict["point_cls_scores"].view(-1, 1).detach(),
),
dim=1,
)
part_features[
part_features[:, -1] < self.model_cfg.SEG_MASK_SCORE_THRESH, 0:3
] = 0
rois = batch_dict["rois"]
pooled_part_features_list, pooled_rpn_features_list = [], []
for bs_idx in range(batch_size):
bs_mask = batch_idx == bs_idx
cur_point_coords = point_coords[bs_mask]
cur_part_features = part_features[bs_mask]
cur_rpn_features = point_features[bs_mask]
cur_roi = rois[bs_idx][:, 0:7].contiguous() # (N, 7)
pooled_part_features = self.roiaware_pool3d_layer.forward(
cur_roi, cur_point_coords, cur_part_features, pool_method="avg"
) # (N, out_x, out_y, out_z, 4)
pooled_rpn_features = self.roiaware_pool3d_layer.forward(
cur_roi, cur_point_coords, cur_rpn_features, pool_method="max"
) # (N, out_x, out_y, out_z, C)
pooled_part_features_list.append(pooled_part_features)
pooled_rpn_features_list.append(pooled_rpn_features)
pooled_part_features = torch.cat(
pooled_part_features_list, dim=0
) # (B * N, out_x, out_y, out_z, 4)
pooled_rpn_features = torch.cat(
pooled_rpn_features_list, dim=0
) # (B * N, out_x, out_y, out_z, C)
return pooled_part_features, pooled_rpn_features
@staticmethod
def fake_sparse_idx(sparse_idx, batch_size_rcnn):
print(
"Warning: Sparse_Idx_Shape(%s) \r" % (str(sparse_idx.shape)),
end="",
flush=True,
)
# at most one sample is non-empty, then fake the first voxels of each sample(BN needs at least
# two values each channel) as non-empty for the below calculation
sparse_idx = sparse_idx.new_zeros((batch_size_rcnn, 3))
bs_idxs = torch.arange(batch_size_rcnn).type_as(sparse_idx).view(-1, 1)
sparse_idx = torch.cat((bs_idxs, sparse_idx), dim=1)
return sparse_idx
def forward(self, batch_dict):
"""
Args:
batch_dict:
Returns:
"""
targets_dict = self.proposal_layer(
batch_dict,
nms_config=self.model_cfg.NMS_CONFIG["TRAIN" if self.training else "TEST"],
)
if self.training:
targets_dict = self.assign_targets(batch_dict)
batch_dict["rois"] = targets_dict["rois"]
batch_dict["roi_labels"] = targets_dict["roi_labels"]
# RoI aware pooling
pooled_part_features, pooled_rpn_features = self.roiaware_pool(batch_dict)
batch_size_rcnn = pooled_part_features.shape[
0
] # (B * N, out_x, out_y, out_z, 4)
# transform to sparse tensors
sparse_shape = np.array(pooled_part_features.shape[1:4], dtype=np.int32)
sparse_idx = pooled_part_features.sum(
dim=-1
).nonzero() # (non_empty_num, 4) ==> [bs_idx, x_idx, y_idx, z_idx]
if sparse_idx.shape[0] < 3:
sparse_idx = self.fake_sparse_idx(sparse_idx, batch_size_rcnn)
if self.training:
# these are invalid samples
targets_dict["rcnn_cls_labels"].fill_(-1)
targets_dict["reg_valid_mask"].fill_(-1)
part_features = pooled_part_features[
sparse_idx[:, 0], sparse_idx[:, 1], sparse_idx[:, 2], sparse_idx[:, 3]
]
rpn_features = pooled_rpn_features[
sparse_idx[:, 0], sparse_idx[:, 1], sparse_idx[:, 2], sparse_idx[:, 3]
]
coords = sparse_idx.int()
part_features = spconv.SparseConvTensor(
part_features, coords, sparse_shape, batch_size_rcnn
)
rpn_features = spconv.SparseConvTensor(
rpn_features, coords, sparse_shape, batch_size_rcnn
)
# forward rcnn network
x_part = self.conv_part(part_features)
x_rpn = self.conv_rpn(rpn_features)
merged_feature = torch.cat((x_rpn.features, x_part.features), dim=1) # (N, C)
shared_feature = spconv.SparseConvTensor(
merged_feature, coords, sparse_shape, batch_size_rcnn
)
shared_feature = shared_feature.dense().view(batch_size_rcnn, -1, 1)
shared_feature = self.shared_fc_layer(shared_feature)
rcnn_cls = (
self.cls_layers(shared_feature).transpose(1, 2).contiguous().squeeze(dim=1)
) # (B, 1 or 2)
rcnn_reg = (
self.reg_layers(shared_feature).transpose(1, 2).contiguous().squeeze(dim=1)
) # (B, C)
if not self.training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict["batch_size"],
rois=batch_dict["rois"],
cls_preds=rcnn_cls,
box_preds=rcnn_reg,
)
batch_dict["batch_cls_preds"] = batch_cls_preds
batch_dict["batch_box_preds"] = batch_box_preds
batch_dict["cls_preds_normalized"] = False
else:
targets_dict["rcnn_cls"] = rcnn_cls
targets_dict["rcnn_reg"] = rcnn_reg
self.forward_ret_dict = targets_dict
return batch_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/roi_heads/pointrcnn_head.py | Python | import torch
import torch.nn as nn
from pcdet.models.roi_heads.roi_head_template import RoIHeadTemplate
from pcdet.ops.pointnet2.pointnet2_batch import pointnet2_modules
from pcdet.ops.roipoint_pool3d import roipoint_pool3d_utils
from pcdet.utils import common_utils
class PointRCNNHead(RoIHeadTemplate):
def __init__(self, input_channels, model_cfg, num_class=1):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
use_bn = self.model_cfg.USE_BN
self.SA_modules = nn.ModuleList()
channel_in = input_channels
self.num_prefix_channels = 3 + 2 # xyz + point_scores + point_depth
xyz_mlps = [self.num_prefix_channels] + self.model_cfg.XYZ_UP_LAYER
shared_mlps = []
for k in range(len(xyz_mlps) - 1):
shared_mlps.append(
nn.Conv2d(xyz_mlps[k], xyz_mlps[k + 1], kernel_size=1, bias=not use_bn)
)
if use_bn:
shared_mlps.append(nn.BatchNorm2d(xyz_mlps[k + 1]))
shared_mlps.append(nn.ReLU())
self.xyz_up_layer = nn.Sequential(*shared_mlps)
c_out = self.model_cfg.XYZ_UP_LAYER[-1]
self.merge_down_layer = nn.Sequential(
nn.Conv2d(c_out * 2, c_out, kernel_size=1, bias=not use_bn),
*[nn.BatchNorm2d(c_out), nn.ReLU()] if use_bn else [nn.ReLU()]
)
for k in range(self.model_cfg.SA_CONFIG.NPOINTS.__len__()):
mlps = [channel_in] + self.model_cfg.SA_CONFIG.MLPS[k]
npoint = (
self.model_cfg.SA_CONFIG.NPOINTS[k]
if self.model_cfg.SA_CONFIG.NPOINTS[k] != -1
else None
)
self.SA_modules.append(
pointnet2_modules.PointnetSAModule(
npoint=npoint,
radius=self.model_cfg.SA_CONFIG.RADIUS[k],
nsample=self.model_cfg.SA_CONFIG.NSAMPLE[k],
mlp=mlps,
use_xyz=True,
bn=use_bn,
)
)
channel_in = mlps[-1]
self.cls_layers = self.make_fc_layers(
input_channels=channel_in,
output_channels=self.num_class,
fc_list=self.model_cfg.CLS_FC,
)
self.reg_layers = self.make_fc_layers(
input_channels=channel_in,
output_channels=self.box_coder.code_size * self.num_class,
fc_list=self.model_cfg.REG_FC,
)
self.roipoint_pool3d_layer = roipoint_pool3d_utils.RoIPointPool3d(
num_sampled_points=self.model_cfg.ROI_POINT_POOL.NUM_SAMPLED_POINTS,
pool_extra_width=self.model_cfg.ROI_POINT_POOL.POOL_EXTRA_WIDTH,
)
self.init_weights(weight_init="xavier")
def init_weights(self, weight_init="xavier"):
if weight_init == "kaiming":
init_func = nn.init.kaiming_normal_
elif weight_init == "xavier":
init_func = nn.init.xavier_normal_
elif weight_init == "normal":
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == "normal":
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001)
def roipool3d_gpu(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
point_coords: (num_points, 4) [bs_idx, x, y, z]
point_features: (num_points, C)
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
Returns:
"""
batch_size = batch_dict["batch_size"]
batch_idx = batch_dict["point_coords"][:, 0]
point_coords = batch_dict["point_coords"][:, 1:4]
point_features = batch_dict["point_features"]
rois = batch_dict["rois"] # (B, num_rois, 7 + C)
batch_cnt = point_coords.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
batch_cnt[bs_idx] = (batch_idx == bs_idx).sum()
assert batch_cnt.min() == batch_cnt.max()
point_scores = batch_dict["point_cls_scores"].detach()
point_depths = (
point_coords.norm(dim=1) / self.model_cfg.ROI_POINT_POOL.DEPTH_NORMALIZER
- 0.5
)
point_features_list = [
point_scores[:, None],
point_depths[:, None],
point_features,
]
point_features_all = torch.cat(point_features_list, dim=1)
batch_points = point_coords.view(batch_size, -1, 3)
batch_point_features = point_features_all.view(
batch_size, -1, point_features_all.shape[-1]
)
with torch.no_grad():
pooled_features, pooled_empty_flag = self.roipoint_pool3d_layer(
batch_points, batch_point_features, rois
) # pooled_features: (B, num_rois, num_sampled_points, 3 + C), pooled_empty_flag: (B, num_rois)
# canonical transformation
roi_center = rois[:, :, 0:3]
pooled_features[:, :, :, 0:3] -= roi_center.unsqueeze(dim=2)
pooled_features = pooled_features.view(
-1, pooled_features.shape[-2], pooled_features.shape[-1]
)
pooled_features[:, :, 0:3] = common_utils.rotate_points_along_z(
pooled_features[:, :, 0:3], -rois.view(-1, rois.shape[-1])[:, 6]
)
pooled_features[pooled_empty_flag.view(-1) > 0] = 0
return pooled_features
def forward(self, batch_dict):
"""
Args:
batch_dict:
Returns:
"""
targets_dict = self.proposal_layer(
batch_dict,
nms_config=self.model_cfg.NMS_CONFIG["TRAIN" if self.training else "TEST"],
)
if self.training:
targets_dict = self.assign_targets(batch_dict)
batch_dict["rois"] = targets_dict["rois"]
batch_dict["roi_labels"] = targets_dict["roi_labels"]
pooled_features = self.roipool3d_gpu(
batch_dict
) # (total_rois, num_sampled_points, 3 + C)
xyz_input = (
pooled_features[..., 0 : self.num_prefix_channels]
.transpose(1, 2)
.unsqueeze(dim=3)
.contiguous()
)
xyz_features = self.xyz_up_layer(xyz_input)
point_features = (
pooled_features[..., self.num_prefix_channels :]
.transpose(1, 2)
.unsqueeze(dim=3)
)
merged_features = torch.cat((xyz_features, point_features), dim=1)
merged_features = self.merge_down_layer(merged_features)
l_xyz, l_features = [pooled_features[..., 0:3].contiguous()], [
merged_features.squeeze(dim=3).contiguous()
]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
shared_features = l_features[-1] # (total_rois, num_features, 1)
rcnn_cls = (
self.cls_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1)
) # (B, 1 or 2)
rcnn_reg = (
self.reg_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1)
) # (B, C)
if not self.training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict["batch_size"],
rois=batch_dict["rois"],
cls_preds=rcnn_cls,
box_preds=rcnn_reg,
)
batch_dict["batch_cls_preds"] = batch_cls_preds
batch_dict["batch_box_preds"] = batch_box_preds
batch_dict["cls_preds_normalized"] = False
else:
targets_dict["rcnn_cls"] = rcnn_cls
targets_dict["rcnn_reg"] = rcnn_reg
self.forward_ret_dict = targets_dict
return batch_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/roi_heads/pvrcnn_head.py | Python | import torch.nn as nn
from pcdet.models.roi_heads.roi_head_template import RoIHeadTemplate
from pcdet.ops.pointnet2.pointnet2_stack import (
pointnet2_modules as pointnet2_stack_modules,
)
from pcdet.utils import common_utils
class PVRCNNHead(RoIHeadTemplate):
def __init__(self, input_channels, model_cfg, num_class=1):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
mlps = self.model_cfg.ROI_GRID_POOL.MLPS
for k in range(len(mlps)):
mlps[k] = [input_channels] + mlps[k]
self.roi_grid_pool_layer = pointnet2_stack_modules.StackSAModuleMSG(
radii=self.model_cfg.ROI_GRID_POOL.POOL_RADIUS,
nsamples=self.model_cfg.ROI_GRID_POOL.NSAMPLE,
mlps=mlps,
use_xyz=True,
pool_method=self.model_cfg.ROI_GRID_POOL.POOL_METHOD,
)
GRID_SIZE = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
c_out = sum([x[-1] for x in mlps])
pre_channel = GRID_SIZE * GRID_SIZE * GRID_SIZE * c_out
shared_fc_list = []
for k in range(0, self.model_cfg.SHARED_FC.__len__()):
shared_fc_list.extend(
[
nn.Conv1d(
pre_channel,
self.model_cfg.SHARED_FC[k],
kernel_size=1,
bias=False,
),
nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
nn.ReLU(),
]
)
pre_channel = self.model_cfg.SHARED_FC[k]
if (
k != self.model_cfg.SHARED_FC.__len__() - 1
and self.model_cfg.DP_RATIO > 0
):
shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.shared_fc_layer = nn.Sequential(*shared_fc_list)
self.cls_layers = self.make_fc_layers(
input_channels=pre_channel,
output_channels=self.num_class,
fc_list=self.model_cfg.CLS_FC,
)
self.reg_layers = self.make_fc_layers(
input_channels=pre_channel,
output_channels=self.box_coder.code_size * self.num_class,
fc_list=self.model_cfg.REG_FC,
)
self.init_weights(weight_init="xavier")
def init_weights(self, weight_init="xavier"):
if weight_init == "kaiming":
init_func = nn.init.kaiming_normal_
elif weight_init == "xavier":
init_func = nn.init.xavier_normal_
elif weight_init == "normal":
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == "normal":
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001)
def roi_grid_pool(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
point_coords: (num_points, 4) [bs_idx, x, y, z]
point_features: (num_points, C)
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
Returns:
"""
batch_size = batch_dict["batch_size"]
rois = batch_dict["rois"]
point_coords = batch_dict["point_coords"]
point_features = batch_dict["point_features"]
point_features = point_features * batch_dict["point_cls_scores"].view(-1, 1)
(
global_roi_grid_points,
local_roi_grid_points,
) = self.get_global_grid_points_of_roi(
rois, grid_size=self.model_cfg.ROI_GRID_POOL.GRID_SIZE
) # (BxN, 6x6x6, 3)
global_roi_grid_points = global_roi_grid_points.view(
batch_size, -1, 3
) # (B, Nx6x6x6, 3)
xyz = point_coords[:, 1:4]
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
batch_idx = point_coords[:, 0]
for k in range(batch_size):
xyz_batch_cnt[k] = (batch_idx == k).sum()
new_xyz = global_roi_grid_points.view(-1, 3)
new_xyz_batch_cnt = (
xyz.new_zeros(batch_size).int().fill_(global_roi_grid_points.shape[1])
)
pooled_points, pooled_features = self.roi_grid_pool_layer(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=point_features.contiguous(),
) # (M1 + M2 ..., C)
pooled_features = pooled_features.view(
-1, self.model_cfg.ROI_GRID_POOL.GRID_SIZE**3, pooled_features.shape[-1]
) # (BxN, 6x6x6, C)
return pooled_features
def get_global_grid_points_of_roi(self, rois, grid_size):
rois = rois.view(-1, rois.shape[-1])
batch_size_rcnn = rois.shape[0]
local_roi_grid_points = self.get_dense_grid_points(
rois, batch_size_rcnn, grid_size
) # (B, 6x6x6, 3)
global_roi_grid_points = common_utils.rotate_points_along_z(
local_roi_grid_points.clone(), rois[:, 6]
).squeeze(dim=1)
global_center = rois[:, 0:3].clone()
global_roi_grid_points += global_center.unsqueeze(dim=1)
return global_roi_grid_points, local_roi_grid_points
@staticmethod
def get_dense_grid_points(rois, batch_size_rcnn, grid_size):
faked_features = rois.new_ones((grid_size, grid_size, grid_size))
dense_idx = faked_features.nonzero() # (N, 3) [x_idx, y_idx, z_idx]
dense_idx = dense_idx.repeat(batch_size_rcnn, 1, 1).float() # (B, 6x6x6, 3)
local_roi_size = rois.view(batch_size_rcnn, -1)[:, 3:6]
roi_grid_points = (dense_idx + 0.5) / grid_size * local_roi_size.unsqueeze(
dim=1
) - (
local_roi_size.unsqueeze(dim=1) / 2
) # (B, 6x6x6, 3)
return roi_grid_points
def forward(self, batch_dict):
"""
:param input_data: input dict
:return:
"""
targets_dict = self.proposal_layer(
batch_dict,
nms_config=self.model_cfg.NMS_CONFIG["TRAIN" if self.training else "TEST"],
)
if self.training:
targets_dict = self.assign_targets(batch_dict)
batch_dict["rois"] = targets_dict["rois"]
batch_dict["roi_labels"] = targets_dict["roi_labels"]
# RoI aware pooling
pooled_features = self.roi_grid_pool(batch_dict) # (BxN, 6x6x6, C)
grid_size = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
batch_size_rcnn = pooled_features.shape[0]
pooled_features = (
pooled_features.permute(0, 2, 1)
.contiguous()
.view(batch_size_rcnn, -1, grid_size, grid_size, grid_size)
) # (BxN, C, 6, 6, 6)
shared_features = self.shared_fc_layer(
pooled_features.view(batch_size_rcnn, -1, 1)
)
rcnn_cls = (
self.cls_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1)
) # (B, 1 or 2)
rcnn_reg = (
self.reg_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1)
) # (B, C)
if not self.training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict["batch_size"],
rois=batch_dict["rois"],
cls_preds=rcnn_cls,
box_preds=rcnn_reg,
)
batch_dict["batch_cls_preds"] = batch_cls_preds
batch_dict["batch_box_preds"] = batch_box_preds
batch_dict["cls_preds_normalized"] = False
else:
targets_dict["rcnn_cls"] = rcnn_cls
targets_dict["rcnn_reg"] = rcnn_reg
self.forward_ret_dict = targets_dict
return batch_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/roi_heads/roi_head_template.py | Python | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.models.model_utils.model_nms_utils import class_agnostic_nms
from pcdet.models.roi_heads.target_assigner.proposal_target_layer import (
ProposalTargetLayer,
)
from pcdet.utils import box_coder_utils, common_utils, loss_utils
class RoIHeadTemplate(nn.Module):
def __init__(self, num_class, model_cfg):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.box_coder = getattr(
box_coder_utils, self.model_cfg.TARGET_CONFIG.BOX_CODER
)(**self.model_cfg.TARGET_CONFIG.get("BOX_CODER_CONFIG", {}))
self.proposal_target_layer = ProposalTargetLayer(
roi_sampler_cfg=self.model_cfg.TARGET_CONFIG
)
self.build_losses(self.model_cfg.LOSS_CONFIG)
self.forward_ret_dict = None
def build_losses(self, losses_cfg):
self.add_module(
"reg_loss_func",
loss_utils.WeightedSmoothL1Loss(
code_weights=losses_cfg.LOSS_WEIGHTS["code_weights"]
),
)
def make_fc_layers(self, input_channels, output_channels, fc_list):
fc_layers = []
pre_channel = input_channels
for k in range(0, fc_list.__len__()):
fc_layers.extend(
[
nn.Conv1d(pre_channel, fc_list[k], kernel_size=1, bias=False),
nn.BatchNorm1d(fc_list[k]),
nn.ReLU(),
]
)
pre_channel = fc_list[k]
if self.model_cfg.DP_RATIO >= 0 and k == 0:
fc_layers.append(nn.Dropout(self.model_cfg.DP_RATIO))
fc_layers.append(
nn.Conv1d(pre_channel, output_channels, kernel_size=1, bias=True)
)
fc_layers = nn.Sequential(*fc_layers)
return fc_layers
@torch.no_grad()
def proposal_layer(self, batch_dict, nms_config):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
nms_config:
Returns:
batch_dict:
rois: (B, num_rois, 7+C)
roi_scores: (B, num_rois)
roi_labels: (B, num_rois)
"""
batch_size = batch_dict["batch_size"]
batch_box_preds = batch_dict["batch_box_preds"]
batch_cls_preds = batch_dict["batch_cls_preds"]
rois = batch_box_preds.new_zeros(
(batch_size, nms_config.NMS_POST_MAXSIZE, batch_box_preds.shape[-1])
)
roi_scores = batch_box_preds.new_zeros(
(batch_size, nms_config.NMS_POST_MAXSIZE)
)
roi_labels = batch_box_preds.new_zeros(
(batch_size, nms_config.NMS_POST_MAXSIZE), dtype=torch.long
)
for index in range(batch_size):
if batch_dict.get("batch_index", None) is not None:
assert batch_cls_preds.shape.__len__() == 2
batch_mask = batch_dict["batch_index"] == index
else:
assert batch_dict["batch_cls_preds"].shape.__len__() == 3
batch_mask = index
box_preds = batch_box_preds[batch_mask]
cls_preds = batch_cls_preds[batch_mask]
cur_roi_scores, cur_roi_labels = torch.max(cls_preds, dim=1)
if nms_config.MULTI_CLASSES_NMS:
raise NotImplementedError
else:
selected, selected_scores = class_agnostic_nms(
box_scores=cur_roi_scores,
box_preds=box_preds,
nms_config=nms_config,
)
rois[index, : len(selected), :] = box_preds[selected]
roi_scores[index, : len(selected)] = cur_roi_scores[selected]
roi_labels[index, : len(selected)] = cur_roi_labels[selected]
batch_dict["rois"] = rois
batch_dict["roi_scores"] = roi_scores
batch_dict["roi_labels"] = roi_labels + 1
batch_dict["has_class_labels"] = (
True if batch_cls_preds.shape[-1] > 1 else False
)
batch_dict.pop("batch_index", None)
return batch_dict
def assign_targets(self, batch_dict):
batch_size = batch_dict["batch_size"]
with torch.no_grad():
targets_dict = self.proposal_target_layer.forward(batch_dict)
rois = targets_dict["rois"] # (B, N, 7 + C)
gt_of_rois = targets_dict["gt_of_rois"] # (B, N, 7 + C + 1)
targets_dict["gt_of_rois_src"] = gt_of_rois.clone().detach()
# canonical transformation
roi_center = rois[:, :, 0:3]
roi_ry = rois[:, :, 6] % (2 * np.pi)
gt_of_rois[:, :, 0:3] = gt_of_rois[:, :, 0:3] - roi_center
gt_of_rois[:, :, 6] = gt_of_rois[:, :, 6] - roi_ry
# transfer LiDAR coords to local coords
gt_of_rois = common_utils.rotate_points_along_z(
points=gt_of_rois.view(-1, 1, gt_of_rois.shape[-1]), angle=-roi_ry.view(-1)
).view(batch_size, -1, gt_of_rois.shape[-1])
# flip orientation if rois have opposite orientation
heading_label = gt_of_rois[:, :, 6] % (2 * np.pi) # 0 ~ 2pi
opposite_flag = (heading_label > np.pi * 0.5) & (heading_label < np.pi * 1.5)
heading_label[opposite_flag] = (heading_label[opposite_flag] + np.pi) % (
2 * np.pi
) # (0 ~ pi/2, 3pi/2 ~ 2pi)
flag = heading_label > np.pi
heading_label[flag] = heading_label[flag] - np.pi * 2 # (-pi/2, pi/2)
heading_label = torch.clamp(heading_label, min=-np.pi / 2, max=np.pi / 2)
gt_of_rois[:, :, 6] = heading_label
targets_dict["gt_of_rois"] = gt_of_rois
return targets_dict
def get_box_reg_layer_loss(self, forward_ret_dict):
loss_cfgs = self.model_cfg.LOSS_CONFIG
code_size = self.box_coder.code_size
reg_valid_mask = forward_ret_dict["reg_valid_mask"].view(-1)
gt_boxes3d_ct = forward_ret_dict["gt_of_rois"][..., 0:code_size]
gt_of_rois_src = forward_ret_dict["gt_of_rois_src"][..., 0:code_size].view(
-1, code_size
)
rcnn_reg = forward_ret_dict["rcnn_reg"] # (rcnn_batch_size, C)
roi_boxes3d = forward_ret_dict["rois"]
rcnn_batch_size = gt_boxes3d_ct.view(-1, code_size).shape[0]
fg_mask = reg_valid_mask > 0
fg_sum = fg_mask.long().sum().item()
tb_dict = {}
if loss_cfgs.REG_LOSS == "smooth-l1":
rois_anchor = roi_boxes3d.clone().detach().view(-1, code_size)
rois_anchor[:, 0:3] = 0
rois_anchor[:, 6] = 0
reg_targets = self.box_coder.encode_torch(
gt_boxes3d_ct.view(rcnn_batch_size, code_size), rois_anchor
)
rcnn_loss_reg = self.reg_loss_func(
rcnn_reg.view(rcnn_batch_size, -1).unsqueeze(dim=0),
reg_targets.unsqueeze(dim=0),
) # [B, M, 7]
rcnn_loss_reg = (
rcnn_loss_reg.view(rcnn_batch_size, -1)
* fg_mask.unsqueeze(dim=-1).float()
).sum() / max(fg_sum, 1)
rcnn_loss_reg = rcnn_loss_reg * loss_cfgs.LOSS_WEIGHTS["rcnn_reg_weight"]
tb_dict["rcnn_loss_reg"] = rcnn_loss_reg.item()
if loss_cfgs.CORNER_LOSS_REGULARIZATION and fg_sum > 0:
# TODO: NEED to BE CHECK
fg_rcnn_reg = rcnn_reg.view(rcnn_batch_size, -1)[fg_mask]
fg_roi_boxes3d = roi_boxes3d.view(-1, code_size)[fg_mask]
fg_roi_boxes3d = fg_roi_boxes3d.view(1, -1, code_size)
batch_anchors = fg_roi_boxes3d.clone().detach()
roi_ry = fg_roi_boxes3d[:, :, 6].view(-1)
roi_xyz = fg_roi_boxes3d[:, :, 0:3].view(-1, 3)
batch_anchors[:, :, 0:3] = 0
rcnn_boxes3d = self.box_coder.decode_torch(
fg_rcnn_reg.view(batch_anchors.shape[0], -1, code_size),
batch_anchors,
).view(-1, code_size)
rcnn_boxes3d = common_utils.rotate_points_along_z(
rcnn_boxes3d.unsqueeze(dim=1), roi_ry
).squeeze(dim=1)
rcnn_boxes3d[:, 0:3] += roi_xyz
loss_corner = loss_utils.get_corner_loss_lidar(
rcnn_boxes3d[:, 0:7], gt_of_rois_src[fg_mask][:, 0:7]
)
loss_corner = loss_corner.mean()
loss_corner = loss_corner * loss_cfgs.LOSS_WEIGHTS["rcnn_corner_weight"]
rcnn_loss_reg += loss_corner
tb_dict["rcnn_loss_corner"] = loss_corner.item()
else:
raise NotImplementedError
return rcnn_loss_reg, tb_dict
def get_box_cls_layer_loss(self, forward_ret_dict):
loss_cfgs = self.model_cfg.LOSS_CONFIG
rcnn_cls = forward_ret_dict["rcnn_cls"]
rcnn_cls_labels = forward_ret_dict["rcnn_cls_labels"].view(-1)
if loss_cfgs.CLS_LOSS == "BinaryCrossEntropy":
rcnn_cls_flat = rcnn_cls.view(-1)
batch_loss_cls = F.binary_cross_entropy(
torch.sigmoid(rcnn_cls_flat), rcnn_cls_labels.float(), reduction="none"
)
cls_valid_mask = (rcnn_cls_labels >= 0).float()
rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(
cls_valid_mask.sum(), min=1.0
)
elif loss_cfgs.CLS_LOSS == "CrossEntropy":
batch_loss_cls = F.cross_entropy(
rcnn_cls, rcnn_cls_labels, reduction="none", ignore_index=-1
)
cls_valid_mask = (rcnn_cls_labels >= 0).float()
rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(
cls_valid_mask.sum(), min=1.0
)
else:
raise NotImplementedError
rcnn_loss_cls = rcnn_loss_cls * loss_cfgs.LOSS_WEIGHTS["rcnn_cls_weight"]
tb_dict = {"rcnn_loss_cls": rcnn_loss_cls.item()}
return rcnn_loss_cls, tb_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
rcnn_loss = 0
rcnn_loss_cls, cls_tb_dict = self.get_box_cls_layer_loss(self.forward_ret_dict)
rcnn_loss += rcnn_loss_cls
tb_dict.update(cls_tb_dict)
rcnn_loss_reg, reg_tb_dict = self.get_box_reg_layer_loss(self.forward_ret_dict)
rcnn_loss += rcnn_loss_reg
tb_dict.update(reg_tb_dict)
tb_dict["rcnn_loss"] = rcnn_loss.item()
return rcnn_loss, tb_dict
def generate_predicted_boxes(self, batch_size, rois, cls_preds, box_preds):
"""
Args:
batch_size:
rois: (B, N, 7)
cls_preds: (BN, num_class)
box_preds: (BN, code_size)
Returns:
"""
code_size = self.box_coder.code_size
# batch_cls_preds: (B, N, num_class or 1)
batch_cls_preds = cls_preds.view(batch_size, -1, cls_preds.shape[-1])
batch_box_preds = box_preds.view(batch_size, -1, code_size)
roi_ry = rois[:, :, 6].view(-1)
roi_xyz = rois[:, :, 0:3].view(-1, 3)
local_rois = rois.clone().detach()
local_rois[:, :, 0:3] = 0
batch_box_preds = self.box_coder.decode_torch(batch_box_preds, local_rois).view(
-1, code_size
)
batch_box_preds = common_utils.rotate_points_along_z(
batch_box_preds.unsqueeze(dim=1), roi_ry
).squeeze(dim=1)
batch_box_preds[:, 0:3] += roi_xyz
batch_box_preds = batch_box_preds.view(batch_size, -1, code_size)
return batch_cls_preds, batch_box_preds
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/roi_heads/second_head.py | Python | import torch
import torch.nn as nn
from pcdet.models.roi_heads.roi_head_template import RoIHeadTemplate
from pcdet.utils import common_utils, loss_utils
class SECONDHead(RoIHeadTemplate):
def __init__(self, input_channels, model_cfg, num_class=1):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
GRID_SIZE = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
pre_channel = self.model_cfg.ROI_GRID_POOL.IN_CHANNEL * GRID_SIZE * GRID_SIZE
shared_fc_list = []
for k in range(0, self.model_cfg.SHARED_FC.__len__()):
shared_fc_list.extend(
[
nn.Conv1d(
pre_channel,
self.model_cfg.SHARED_FC[k],
kernel_size=1,
bias=False,
),
nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
nn.ReLU(),
]
)
pre_channel = self.model_cfg.SHARED_FC[k]
if (
k != self.model_cfg.SHARED_FC.__len__() - 1
and self.model_cfg.DP_RATIO > 0
):
shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.shared_fc_layer = nn.Sequential(*shared_fc_list)
self.iou_layers = self.make_fc_layers(
input_channels=pre_channel, output_channels=1, fc_list=self.model_cfg.IOU_FC
)
self.init_weights(weight_init="xavier")
def init_weights(self, weight_init="xavier"):
if weight_init == "kaiming":
init_func = nn.init.kaiming_normal_
elif weight_init == "xavier":
init_func = nn.init.xavier_normal_
elif weight_init == "normal":
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == "normal":
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def roi_grid_pool(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
spatial_features_2d: (B, C, H, W)
Returns:
"""
batch_size = batch_dict["batch_size"]
rois = batch_dict["rois"].detach()
spatial_features_2d = batch_dict["spatial_features_2d"].detach()
height, width = spatial_features_2d.size(2), spatial_features_2d.size(3)
dataset_cfg = batch_dict["dataset_cfg"]
min_x = dataset_cfg.POINT_CLOUD_RANGE[0]
min_y = dataset_cfg.POINT_CLOUD_RANGE[1]
voxel_size_x = dataset_cfg.DATA_PROCESSOR[-1].VOXEL_SIZE[0]
voxel_size_y = dataset_cfg.DATA_PROCESSOR[-1].VOXEL_SIZE[1]
down_sample_ratio = self.model_cfg.ROI_GRID_POOL.DOWNSAMPLE_RATIO
pooled_features_list = []
torch.backends.cudnn.enabled = False
for b_id in range(batch_size):
# Map global boxes coordinates to feature map coordinates
x1 = (rois[b_id, :, 0] - rois[b_id, :, 3] / 2 - min_x) / (
voxel_size_x * down_sample_ratio
)
x2 = (rois[b_id, :, 0] + rois[b_id, :, 3] / 2 - min_x) / (
voxel_size_x * down_sample_ratio
)
y1 = (rois[b_id, :, 1] - rois[b_id, :, 4] / 2 - min_y) / (
voxel_size_y * down_sample_ratio
)
y2 = (rois[b_id, :, 1] + rois[b_id, :, 4] / 2 - min_y) / (
voxel_size_y * down_sample_ratio
)
angle, _ = common_utils.check_numpy_to_torch(rois[b_id, :, 6])
cosa = torch.cos(angle)
sina = torch.sin(angle)
theta = (
torch.stack(
(
(x2 - x1) / (width - 1) * cosa,
(x2 - x1) / (width - 1) * (-sina),
(x1 + x2 - width + 1) / (width - 1),
(y2 - y1) / (height - 1) * sina,
(y2 - y1) / (height - 1) * cosa,
(y1 + y2 - height + 1) / (height - 1),
),
dim=1,
)
.view(-1, 2, 3)
.float()
)
grid_size = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
grid = nn.functional.affine_grid(
theta,
torch.Size(
(rois.size(1), spatial_features_2d.size(1), grid_size, grid_size)
),
align_corners=True,
)
pooled_features = nn.functional.grid_sample(
spatial_features_2d[b_id]
.unsqueeze(0)
.expand(rois.size(1), spatial_features_2d.size(1), height, width),
grid,
align_corners=True,
)
pooled_features_list.append(pooled_features)
torch.backends.cudnn.enabled = True
pooled_features = torch.cat(pooled_features_list, dim=0)
return pooled_features
def forward(self, batch_dict):
"""
:param input_data: input dict
:return:
"""
targets_dict = self.proposal_layer(
batch_dict,
nms_config=self.model_cfg.NMS_CONFIG["TRAIN" if self.training else "TEST"],
)
if self.training:
targets_dict = self.assign_targets(batch_dict)
batch_dict["rois"] = targets_dict["rois"]
batch_dict["roi_labels"] = targets_dict["roi_labels"]
# RoI aware pooling
pooled_features = self.roi_grid_pool(batch_dict) # (BxN, C, 7, 7)
batch_size_rcnn = pooled_features.shape[0]
shared_features = self.shared_fc_layer(
pooled_features.view(batch_size_rcnn, -1, 1)
)
rcnn_iou = (
self.iou_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1)
) # (B*N, 1)
if not self.training:
batch_dict["batch_cls_preds"] = rcnn_iou.view(
batch_dict["batch_size"], -1, rcnn_iou.shape[-1]
)
batch_dict["batch_box_preds"] = batch_dict["rois"]
batch_dict["cls_preds_normalized"] = False
else:
targets_dict["rcnn_iou"] = rcnn_iou
self.forward_ret_dict = targets_dict
return batch_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
rcnn_loss = 0
rcnn_loss_cls, cls_tb_dict = self.get_box_iou_layer_loss(self.forward_ret_dict)
rcnn_loss += rcnn_loss_cls
tb_dict.update(cls_tb_dict)
tb_dict["rcnn_loss"] = rcnn_loss.item()
return rcnn_loss, tb_dict
def get_box_iou_layer_loss(self, forward_ret_dict):
loss_cfgs = self.model_cfg.LOSS_CONFIG
rcnn_iou = forward_ret_dict["rcnn_iou"]
rcnn_iou_labels = forward_ret_dict["rcnn_cls_labels"].view(-1)
rcnn_iou_flat = rcnn_iou.view(-1)
if loss_cfgs.IOU_LOSS == "BinaryCrossEntropy":
batch_loss_iou = nn.functional.binary_cross_entropy_with_logits(
rcnn_iou_flat, rcnn_iou_labels.float(), reduction="none"
)
elif loss_cfgs.IOU_LOSS == "L2":
batch_loss_iou = nn.functional.mse_loss(
rcnn_iou_flat, rcnn_iou_labels, reduction="none"
)
elif loss_cfgs.IOU_LOSS == "smoothL1":
diff = rcnn_iou_flat - rcnn_iou_labels
batch_loss_iou = loss_utils.WeightedSmoothL1Loss.smooth_l1_loss(
diff, 1.0 / 9.0
)
elif loss_cfgs.IOU_LOSS == "focalbce":
batch_loss_iou = loss_utils.sigmoid_focal_cls_loss(
rcnn_iou_flat, rcnn_iou_labels
)
else:
raise NotImplementedError
iou_valid_mask = (rcnn_iou_labels >= 0).float()
rcnn_loss_iou = (batch_loss_iou * iou_valid_mask).sum() / torch.clamp(
iou_valid_mask.sum(), min=1.0
)
rcnn_loss_iou = rcnn_loss_iou * loss_cfgs.LOSS_WEIGHTS["rcnn_iou_weight"]
tb_dict = {"rcnn_loss_iou": rcnn_loss_iou.item()}
return rcnn_loss_iou, tb_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/models/roi_heads/target_assigner/proposal_target_layer.py | Python | import numpy as np
import torch
import torch.nn as nn
from pcdet.ops.iou3d_nms import iou3d_nms_utils
class ProposalTargetLayer(nn.Module):
def __init__(self, roi_sampler_cfg):
super().__init__()
self.roi_sampler_cfg = roi_sampler_cfg
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
roi_scores: (B, num_rois)
gt_boxes: (B, N, 7 + C + 1)
roi_labels: (B, num_rois)
Returns:
batch_dict:
rois: (B, M, 7 + C)
gt_of_rois: (B, M, 7 + C)
gt_iou_of_rois: (B, M)
roi_scores: (B, M)
roi_labels: (B, M)
reg_valid_mask: (B, M)
rcnn_cls_labels: (B, M)
"""
(
batch_rois,
batch_gt_of_rois,
batch_roi_ious,
batch_roi_scores,
batch_roi_labels,
) = self.sample_rois_for_rcnn(batch_dict=batch_dict)
# regression valid mask
reg_valid_mask = (batch_roi_ious > self.roi_sampler_cfg.REG_FG_THRESH).long()
# classification label
if self.roi_sampler_cfg.CLS_SCORE_TYPE == "cls":
batch_cls_labels = (
batch_roi_ious > self.roi_sampler_cfg.CLS_FG_THRESH
).long()
ignore_mask = (batch_roi_ious > self.roi_sampler_cfg.CLS_BG_THRESH) & (
batch_roi_ious < self.roi_sampler_cfg.CLS_FG_THRESH
)
batch_cls_labels[ignore_mask > 0] = -1
elif self.roi_sampler_cfg.CLS_SCORE_TYPE == "roi_iou":
iou_bg_thresh = self.roi_sampler_cfg.CLS_BG_THRESH
iou_fg_thresh = self.roi_sampler_cfg.CLS_FG_THRESH
fg_mask = batch_roi_ious > iou_fg_thresh
bg_mask = batch_roi_ious < iou_bg_thresh
interval_mask = (fg_mask == 0) & (bg_mask == 0)
batch_cls_labels = (fg_mask > 0).float()
batch_cls_labels[interval_mask] = (
batch_roi_ious[interval_mask] - iou_bg_thresh
) / (iou_fg_thresh - iou_bg_thresh)
elif self.roi_sampler_cfg.CLS_SCORE_TYPE == "raw_roi_iou":
batch_cls_labels = batch_roi_ious
else:
raise NotImplementedError
targets_dict = {
"rois": batch_rois,
"gt_of_rois": batch_gt_of_rois,
"gt_iou_of_rois": batch_roi_ious,
"roi_scores": batch_roi_scores,
"roi_labels": batch_roi_labels,
"reg_valid_mask": reg_valid_mask,
"rcnn_cls_labels": batch_cls_labels,
}
return targets_dict
def sample_rois_for_rcnn(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
roi_scores: (B, num_rois)
gt_boxes: (B, N, 7 + C + 1)
roi_labels: (B, num_rois)
Returns:
"""
batch_size = batch_dict["batch_size"]
rois = batch_dict["rois"]
roi_scores = batch_dict["roi_scores"]
roi_labels = batch_dict["roi_labels"]
gt_boxes = batch_dict["gt_boxes"]
code_size = rois.shape[-1]
batch_rois = rois.new_zeros(
batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE, code_size
)
batch_gt_of_rois = rois.new_zeros(
batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE, code_size + 1
)
batch_roi_ious = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE)
batch_roi_scores = rois.new_zeros(
batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE
)
batch_roi_labels = rois.new_zeros(
(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE), dtype=torch.long
)
for index in range(batch_size):
cur_roi, cur_gt, cur_roi_labels, cur_roi_scores = (
rois[index],
gt_boxes[index],
roi_labels[index],
roi_scores[index],
)
k = cur_gt.__len__() - 1
while k > 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[: k + 1]
cur_gt = (
cur_gt.new_zeros((1, cur_gt.shape[1])) if len(cur_gt) == 0 else cur_gt
)
if self.roi_sampler_cfg.get("SAMPLE_ROI_BY_EACH_CLASS", False):
max_overlaps, gt_assignment = self.get_max_iou_with_same_class(
rois=cur_roi,
roi_labels=cur_roi_labels,
gt_boxes=cur_gt[:, 0:7],
gt_labels=cur_gt[:, -1].long(),
)
else:
iou3d = iou3d_nms_utils.boxes_iou3d_gpu(
cur_roi, cur_gt[:, 0:7]
) # (M, N)
max_overlaps, gt_assignment = torch.max(iou3d, dim=1)
sampled_inds = self.subsample_rois(max_overlaps=max_overlaps)
batch_rois[index] = cur_roi[sampled_inds]
batch_roi_labels[index] = cur_roi_labels[sampled_inds]
batch_roi_ious[index] = max_overlaps[sampled_inds]
batch_roi_scores[index] = cur_roi_scores[sampled_inds]
batch_gt_of_rois[index] = cur_gt[gt_assignment[sampled_inds]]
return (
batch_rois,
batch_gt_of_rois,
batch_roi_ious,
batch_roi_scores,
batch_roi_labels,
)
def subsample_rois(self, max_overlaps):
# sample fg, easy_bg, hard_bg
fg_rois_per_image = int(
np.round(self.roi_sampler_cfg.FG_RATIO * self.roi_sampler_cfg.ROI_PER_IMAGE)
)
fg_thresh = min(
self.roi_sampler_cfg.REG_FG_THRESH, self.roi_sampler_cfg.CLS_FG_THRESH
)
fg_inds = ((max_overlaps >= fg_thresh)).nonzero().view(-1)
easy_bg_inds = (
((max_overlaps < self.roi_sampler_cfg.CLS_BG_THRESH_LO)).nonzero().view(-1)
)
hard_bg_inds = (
(
(max_overlaps < self.roi_sampler_cfg.REG_FG_THRESH)
& (max_overlaps >= self.roi_sampler_cfg.CLS_BG_THRESH_LO)
)
.nonzero()
.view(-1)
)
fg_num_rois = fg_inds.numel()
bg_num_rois = hard_bg_inds.numel() + easy_bg_inds.numel()
if fg_num_rois > 0 and bg_num_rois > 0:
# sampling fg
fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois)
rand_num = (
torch.from_numpy(np.random.permutation(fg_num_rois))
.type_as(max_overlaps)
.long()
)
fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]]
# sampling bg
bg_rois_per_this_image = (
self.roi_sampler_cfg.ROI_PER_IMAGE - fg_rois_per_this_image
)
bg_inds = self.sample_bg_inds(
hard_bg_inds,
easy_bg_inds,
bg_rois_per_this_image,
self.roi_sampler_cfg.HARD_BG_RATIO,
)
elif fg_num_rois > 0 and bg_num_rois == 0:
# sampling fg
rand_num = np.floor(
np.random.rand(self.roi_sampler_cfg.ROI_PER_IMAGE) * fg_num_rois
)
rand_num = torch.from_numpy(rand_num).type_as(max_overlaps).long()
fg_inds = fg_inds[rand_num]
bg_inds = []
elif bg_num_rois > 0 and fg_num_rois == 0:
# sampling bg
bg_rois_per_this_image = self.roi_sampler_cfg.ROI_PER_IMAGE
bg_inds = self.sample_bg_inds(
hard_bg_inds,
easy_bg_inds,
bg_rois_per_this_image,
self.roi_sampler_cfg.HARD_BG_RATIO,
)
else:
print(
"maxoverlaps:(min=%f, max=%f)"
% (max_overlaps.min().item(), max_overlaps.max().item())
)
print("ERROR: FG=%d, BG=%d" % (fg_num_rois, bg_num_rois))
raise NotImplementedError
sampled_inds = torch.cat((fg_inds, bg_inds), dim=0)
return sampled_inds
@staticmethod
def sample_bg_inds(
hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, hard_bg_ratio
):
if hard_bg_inds.numel() > 0 and easy_bg_inds.numel() > 0:
hard_bg_rois_num = min(
int(bg_rois_per_this_image * hard_bg_ratio), len(hard_bg_inds)
)
easy_bg_rois_num = bg_rois_per_this_image - hard_bg_rois_num
# sampling hard bg
rand_idx = torch.randint(
low=0, high=hard_bg_inds.numel(), size=(hard_bg_rois_num,)
).long()
hard_bg_inds = hard_bg_inds[rand_idx]
# sampling easy bg
rand_idx = torch.randint(
low=0, high=easy_bg_inds.numel(), size=(easy_bg_rois_num,)
).long()
easy_bg_inds = easy_bg_inds[rand_idx]
bg_inds = torch.cat([hard_bg_inds, easy_bg_inds], dim=0)
elif hard_bg_inds.numel() > 0 and easy_bg_inds.numel() == 0:
hard_bg_rois_num = bg_rois_per_this_image
# sampling hard bg
rand_idx = torch.randint(
low=0, high=hard_bg_inds.numel(), size=(hard_bg_rois_num,)
).long()
bg_inds = hard_bg_inds[rand_idx]
elif hard_bg_inds.numel() == 0 and easy_bg_inds.numel() > 0:
easy_bg_rois_num = bg_rois_per_this_image
# sampling easy bg
rand_idx = torch.randint(
low=0, high=easy_bg_inds.numel(), size=(easy_bg_rois_num,)
).long()
bg_inds = easy_bg_inds[rand_idx]
else:
raise NotImplementedError
return bg_inds
@staticmethod
def get_max_iou_with_same_class(rois, roi_labels, gt_boxes, gt_labels):
"""
Args:
rois: (N, 7)
roi_labels: (N)
gt_boxes: (N, )
gt_labels:
Returns:
"""
"""
:param rois: (N, 7)
:param roi_labels: (N)
:param gt_boxes: (N, 8)
:return:
"""
max_overlaps = rois.new_zeros(rois.shape[0])
gt_assignment = roi_labels.new_zeros(roi_labels.shape[0])
for k in range(gt_labels.min().item(), gt_labels.max().item() + 1):
roi_mask = roi_labels == k
gt_mask = gt_labels == k
if roi_mask.sum() > 0 and gt_mask.sum() > 0:
cur_roi = rois[roi_mask]
cur_gt = gt_boxes[gt_mask]
original_gt_assignment = gt_mask.nonzero().view(-1)
iou3d = iou3d_nms_utils.boxes_iou3d_gpu(cur_roi, cur_gt) # (M, N)
cur_max_overlaps, cur_gt_assignment = torch.max(iou3d, dim=1)
max_overlaps[roi_mask] = cur_max_overlaps
gt_assignment[roi_mask] = original_gt_assignment[cur_gt_assignment]
return max_overlaps, gt_assignment
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/iou3d_nms/iou3d_nms_utils.py | Python | """
3D IoU Calculation and Rotated NMS
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
"""
import torch
from pcdet.ops.iou3d_nms import iou3d_nms_cuda
from pcdet.utils import common_utils
def boxes_bev_iou_cpu(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
Returns:
"""
boxes_a, is_numpy = common_utils.check_numpy_to_torch(boxes_a)
boxes_b, is_numpy = common_utils.check_numpy_to_torch(boxes_b)
assert not (boxes_a.is_cuda or boxes_b.is_cuda), "Only support CPU tensors"
assert boxes_a.shape[1] == 7 and boxes_b.shape[1] == 7
ans_iou = boxes_a.new_zeros(torch.Size((boxes_a.shape[0], boxes_b.shape[0])))
iou3d_nms_cuda.boxes_iou_bev_cpu(
boxes_a.contiguous(), boxes_b.contiguous(), ans_iou
)
return ans_iou.numpy() if is_numpy else ans_iou
def boxes_iou_bev(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
Returns:
ans_iou: (N, M)
"""
assert boxes_a.shape[1] == boxes_b.shape[1] == 7
ans_iou = torch.cuda.FloatTensor(
torch.Size((boxes_a.shape[0], boxes_b.shape[0]))
).zero_()
iou3d_nms_cuda.boxes_iou_bev_gpu(
boxes_a.contiguous(), boxes_b.contiguous(), ans_iou
)
return ans_iou
def boxes_iou3d_gpu(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
Returns:
ans_iou: (N, M)
"""
assert boxes_a.shape[1] == boxes_b.shape[1] == 7
# height overlap
boxes_a_height_max = (boxes_a[:, 2] + boxes_a[:, 5] / 2).view(-1, 1)
boxes_a_height_min = (boxes_a[:, 2] - boxes_a[:, 5] / 2).view(-1, 1)
boxes_b_height_max = (boxes_b[:, 2] + boxes_b[:, 5] / 2).view(1, -1)
boxes_b_height_min = (boxes_b[:, 2] - boxes_b[:, 5] / 2).view(1, -1)
# bev overlap
overlaps_bev = torch.cuda.FloatTensor(
torch.Size((boxes_a.shape[0], boxes_b.shape[0]))
).zero_() # (N, M)
iou3d_nms_cuda.boxes_overlap_bev_gpu(
boxes_a.contiguous(), boxes_b.contiguous(), overlaps_bev
)
max_of_min = torch.max(boxes_a_height_min, boxes_b_height_min)
min_of_max = torch.min(boxes_a_height_max, boxes_b_height_max)
overlaps_h = torch.clamp(min_of_max - max_of_min, min=0)
# 3d iou
overlaps_3d = overlaps_bev * overlaps_h
vol_a = (boxes_a[:, 3] * boxes_a[:, 4] * boxes_a[:, 5]).view(-1, 1)
vol_b = (boxes_b[:, 3] * boxes_b[:, 4] * boxes_b[:, 5]).view(1, -1)
iou3d = overlaps_3d / torch.clamp(vol_a + vol_b - overlaps_3d, min=1e-6)
return iou3d
def nms_gpu(boxes, scores, thresh, pre_maxsize=None, **kwargs):
"""
:param boxes: (N, 7) [x, y, z, dx, dy, dz, heading]
:param scores: (N)
:param thresh:
:return:
"""
assert boxes.shape[1] == 7
order = scores.sort(0, descending=True)[1]
if pre_maxsize is not None:
order = order[:pre_maxsize]
boxes = boxes[order].contiguous()
keep = torch.LongTensor(boxes.size(0))
num_out = iou3d_nms_cuda.nms_gpu(boxes, keep, thresh)
return order[keep[:num_out].cuda()].contiguous(), None
def nms_normal_gpu(boxes, scores, thresh, **kwargs):
"""
:param boxes: (N, 7) [x, y, z, dx, dy, dz, heading]
:param scores: (N)
:param thresh:
:return:
"""
assert boxes.shape[1] == 7
order = scores.sort(0, descending=True)[1]
boxes = boxes[order].contiguous()
keep = torch.LongTensor(boxes.size(0))
num_out = iou3d_nms_cuda.nms_normal_gpu(boxes, keep, thresh)
return order[keep[:num_out].cuda()].contiguous(), None
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/iou3d_nms/src/iou3d_cpu.cpp | C++ | /*
3D Rotated IoU Calculation (CPU)
Written by Shaoshuai Shi
All Rights Reserved 2020.
*/
#include "iou3d_cpu.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <math.h>
#include <stdio.h>
#include <torch/extension.h>
#include <torch/serialize/tensor.h>
#include <vector>
#define CHECK_CUDA(x) \
do { \
if (!x.type().is_cuda()) { \
fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, \
__LINE__); \
exit(-1); \
} \
} while (0)
#define CHECK_CONTIGUOUS(x) \
do { \
if (!x.is_contiguous()) { \
fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, \
__FILE__, __LINE__); \
exit(-1); \
} \
} while (0)
#define CHECK_INPUT(x) \
CHECK_CUDA(x); \
CHECK_CONTIGUOUS(x)
inline float min(float a, float b) { return a > b ? b : a; }
inline float max(float a, float b) { return a > b ? a : b; }
const float EPS = 1e-8;
struct Point {
float x, y;
__device__ Point() {}
__device__ Point(double _x, double _y) { x = _x, y = _y; }
__device__ void set(float _x, float _y) {
x = _x;
y = _y;
}
__device__ Point operator+(const Point &b) const {
return Point(x + b.x, y + b.y);
}
__device__ Point operator-(const Point &b) const {
return Point(x - b.x, y - b.y);
}
};
inline float cross(const Point &a, const Point &b) {
return a.x * b.y - a.y * b.x;
}
inline float cross(const Point &p1, const Point &p2, const Point &p0) {
return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y);
}
inline int check_rect_cross(const Point &p1,
const Point &p2,
const Point &q1,
const Point &q2) {
int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) &&
min(q1.x, q2.x) <= max(p1.x, p2.x) &&
min(p1.y, p2.y) <= max(q1.y, q2.y) &&
min(q1.y, q2.y) <= max(p1.y, p2.y);
return ret;
}
inline int check_in_box2d(const float *box, const Point &p) {
// params: (7) [x, y, z, dx, dy, dz, heading]
const float MARGIN = 1e-2;
float center_x = box[0], center_y = box[1];
float angle_cos = cos(-box[6]),
angle_sin = sin(-box[6]); // rotate the point in the opposite
// direction of box
float rot_x =
(p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin);
float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos;
return (fabs(rot_x) < box[3] / 2 + MARGIN &&
fabs(rot_y) < box[4] / 2 + MARGIN);
}
inline int intersection(const Point &p1,
const Point &p0,
const Point &q1,
const Point &q0,
Point &ans) {
// fast exclusion
if (check_rect_cross(p0, p1, q0, q1) == 0) return 0;
// check cross standing
float s1 = cross(q0, p1, p0);
float s2 = cross(p1, q1, p0);
float s3 = cross(p0, q1, q0);
float s4 = cross(q1, p1, q0);
if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0;
// calculate intersection of two lines
float s5 = cross(q1, p1, p0);
if (fabs(s5 - s1) > EPS) {
ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1);
ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1);
} else {
float a0 = p0.y - p1.y, b0 = p1.x - p0.x,
c0 = p0.x * p1.y - p1.x * p0.y;
float a1 = q0.y - q1.y, b1 = q1.x - q0.x,
c1 = q0.x * q1.y - q1.x * q0.y;
float D = a0 * b1 - a1 * b0;
ans.x = (b0 * c1 - b1 * c0) / D;
ans.y = (a1 * c0 - a0 * c1) / D;
}
return 1;
}
inline void rotate_around_center(const Point ¢er,
const float angle_cos,
const float angle_sin,
Point &p) {
float new_x = (p.x - center.x) * angle_cos +
(p.y - center.y) * (-angle_sin) + center.x;
float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos +
center.y;
p.set(new_x, new_y);
}
inline int point_cmp(const Point &a, const Point &b, const Point ¢er) {
return atan2(a.y - center.y, a.x - center.x) >
atan2(b.y - center.y, b.x - center.x);
}
inline float box_overlap(const float *box_a, const float *box_b) {
// params: box_a (7) [x, y, z, dx, dy, dz, heading]
// params: box_b (7) [x, y, z, dx, dy, dz, heading]
// float a_x1 = box_a[0], a_y1 = box_a[1], a_x2 = box_a[2], a_y2 =
// box_a[3], a_angle = box_a[4]; float b_x1 = box_b[0], b_y1 = box_b[1],
// b_x2 = box_b[2], b_y2 = box_b[3], b_angle = box_b[4];
float a_angle = box_a[6], b_angle = box_b[6];
float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2,
a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2;
float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half;
float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half;
float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half;
float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half;
Point center_a(box_a[0], box_a[1]);
Point center_b(box_b[0], box_b[1]);
Point box_a_corners[5];
box_a_corners[0].set(a_x1, a_y1);
box_a_corners[1].set(a_x2, a_y1);
box_a_corners[2].set(a_x2, a_y2);
box_a_corners[3].set(a_x1, a_y2);
Point box_b_corners[5];
box_b_corners[0].set(b_x1, b_y1);
box_b_corners[1].set(b_x2, b_y1);
box_b_corners[2].set(b_x2, b_y2);
box_b_corners[3].set(b_x1, b_y2);
// get oriented corners
float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle);
float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle);
for (int k = 0; k < 4; k++) {
rotate_around_center(center_a, a_angle_cos, a_angle_sin,
box_a_corners[k]);
rotate_around_center(center_b, b_angle_cos, b_angle_sin,
box_b_corners[k]);
}
box_a_corners[4] = box_a_corners[0];
box_b_corners[4] = box_b_corners[0];
// get intersection of lines
Point cross_points[16];
Point poly_center;
int cnt = 0, flag = 0;
poly_center.set(0, 0);
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
flag = intersection(box_a_corners[i + 1], box_a_corners[i],
box_b_corners[j + 1], box_b_corners[j],
cross_points[cnt]);
if (flag) {
poly_center = poly_center + cross_points[cnt];
cnt++;
}
}
}
// check corners
for (int k = 0; k < 4; k++) {
if (check_in_box2d(box_a, box_b_corners[k])) {
poly_center = poly_center + box_b_corners[k];
cross_points[cnt] = box_b_corners[k];
cnt++;
}
if (check_in_box2d(box_b, box_a_corners[k])) {
poly_center = poly_center + box_a_corners[k];
cross_points[cnt] = box_a_corners[k];
cnt++;
}
}
poly_center.x /= cnt;
poly_center.y /= cnt;
// sort the points of polygon
Point temp;
for (int j = 0; j < cnt - 1; j++) {
for (int i = 0; i < cnt - j - 1; i++) {
if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) {
temp = cross_points[i];
cross_points[i] = cross_points[i + 1];
cross_points[i + 1] = temp;
}
}
}
// get the overlap areas
float area = 0;
for (int k = 0; k < cnt - 1; k++) {
area += cross(cross_points[k] - cross_points[0],
cross_points[k + 1] - cross_points[0]);
}
return fabs(area) / 2.0;
}
inline float iou_bev(const float *box_a, const float *box_b) {
// params: box_a (7) [x, y, z, dx, dy, dz, heading]
// params: box_b (7) [x, y, z, dx, dy, dz, heading]
float sa = box_a[3] * box_a[4];
float sb = box_b[3] * box_b[4];
float s_overlap = box_overlap(box_a, box_b);
return s_overlap / fmaxf(sa + sb - s_overlap, EPS);
}
int boxes_iou_bev_cpu(at::Tensor boxes_a_tensor,
at::Tensor boxes_b_tensor,
at::Tensor ans_iou_tensor) {
// params boxes_a_tensor: (N, 7) [x, y, z, dx, dy, dz, heading]
// params boxes_b_tensor: (M, 7) [x, y, z, dx, dy, dz, heading]
// params ans_iou_tensor: (N, M)
CHECK_CONTIGUOUS(boxes_a_tensor);
CHECK_CONTIGUOUS(boxes_b_tensor);
int num_boxes_a = boxes_a_tensor.size(0);
int num_boxes_b = boxes_b_tensor.size(0);
const float *boxes_a = boxes_a_tensor.data<float>();
const float *boxes_b = boxes_b_tensor.data<float>();
float *ans_iou = ans_iou_tensor.data<float>();
for (int i = 0; i < num_boxes_a; i++) {
for (int j = 0; j < num_boxes_b; j++) {
ans_iou[i * num_boxes_b + j] =
iou_bev(boxes_a + i * 7, boxes_b + j * 7);
}
}
return 1;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/iou3d_nms/src/iou3d_cpu.h | C/C++ Header | #ifndef IOU3D_CPU_H
#define IOU3D_CPU_H
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <torch/serialize/tensor.h>
#include <vector>
int boxes_iou_bev_cpu(at::Tensor boxes_a_tensor,
at::Tensor boxes_b_tensor,
at::Tensor ans_iou_tensor);
#endif
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/iou3d_nms/src/iou3d_nms.cpp | C++ | /*
3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others)
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
*/
#include "iou3d_nms.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <torch/extension.h>
#include <torch/serialize/tensor.h>
#include <vector>
#define CHECK_CUDA(x) \
do { \
if (!x.type().is_cuda()) { \
fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, \
__LINE__); \
exit(-1); \
} \
} while (0)
#define CHECK_CONTIGUOUS(x) \
do { \
if (!x.is_contiguous()) { \
fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, \
__FILE__, __LINE__); \
exit(-1); \
} \
} while (0)
#define CHECK_INPUT(x) \
CHECK_CUDA(x); \
CHECK_CONTIGUOUS(x)
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define CHECK_ERROR(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code,
const char *file,
int line,
bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file,
line);
if (abort) exit(code);
}
}
const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8;
void boxesoverlapLauncher(const int num_a,
const float *boxes_a,
const int num_b,
const float *boxes_b,
float *ans_overlap);
void boxesioubevLauncher(const int num_a,
const float *boxes_a,
const int num_b,
const float *boxes_b,
float *ans_iou);
void nmsLauncher(const float *boxes,
unsigned long long *mask,
int boxes_num,
float nms_overlap_thresh);
void nmsNormalLauncher(const float *boxes,
unsigned long long *mask,
int boxes_num,
float nms_overlap_thresh);
int boxes_overlap_bev_gpu(at::Tensor boxes_a,
at::Tensor boxes_b,
at::Tensor ans_overlap) {
// params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
// params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
// params ans_overlap: (N, M)
CHECK_INPUT(boxes_a);
CHECK_INPUT(boxes_b);
CHECK_INPUT(ans_overlap);
int num_a = boxes_a.size(0);
int num_b = boxes_b.size(0);
const float *boxes_a_data = boxes_a.data<float>();
const float *boxes_b_data = boxes_b.data<float>();
float *ans_overlap_data = ans_overlap.data<float>();
boxesoverlapLauncher(num_a, boxes_a_data, num_b, boxes_b_data,
ans_overlap_data);
return 1;
}
int boxes_iou_bev_gpu(at::Tensor boxes_a,
at::Tensor boxes_b,
at::Tensor ans_iou) {
// params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
// params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
// params ans_overlap: (N, M)
CHECK_INPUT(boxes_a);
CHECK_INPUT(boxes_b);
CHECK_INPUT(ans_iou);
int num_a = boxes_a.size(0);
int num_b = boxes_b.size(0);
const float *boxes_a_data = boxes_a.data<float>();
const float *boxes_b_data = boxes_b.data<float>();
float *ans_iou_data = ans_iou.data<float>();
boxesioubevLauncher(num_a, boxes_a_data, num_b, boxes_b_data, ans_iou_data);
return 1;
}
int nms_gpu(at::Tensor boxes, at::Tensor keep, float nms_overlap_thresh) {
// params boxes: (N, 7) [x, y, z, dx, dy, dz, heading]
// params keep: (N)
CHECK_INPUT(boxes);
CHECK_CONTIGUOUS(keep);
int boxes_num = boxes.size(0);
const float *boxes_data = boxes.data<float>();
long *keep_data = keep.data<long>();
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
unsigned long long *mask_data = NULL;
CHECK_ERROR(
cudaMalloc((void **)&mask_data,
boxes_num * col_blocks * sizeof(unsigned long long)));
nmsLauncher(boxes_data, mask_data, boxes_num, nms_overlap_thresh);
// unsigned long long mask_cpu[boxes_num * col_blocks];
// unsigned long long *mask_cpu = new unsigned long long [boxes_num *
// col_blocks];
std::vector<unsigned long long> mask_cpu(boxes_num * col_blocks);
// printf("boxes_num=%d, col_blocks=%d\n", boxes_num, col_blocks);
CHECK_ERROR(cudaMemcpy(&mask_cpu[0], mask_data,
boxes_num * col_blocks * sizeof(unsigned long long),
cudaMemcpyDeviceToHost));
cudaFree(mask_data);
unsigned long long remv_cpu[col_blocks];
memset(remv_cpu, 0, col_blocks * sizeof(unsigned long long));
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / THREADS_PER_BLOCK_NMS;
int inblock = i % THREADS_PER_BLOCK_NMS;
if (!(remv_cpu[nblock] & (1ULL << inblock))) {
keep_data[num_to_keep++] = i;
unsigned long long *p = &mask_cpu[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv_cpu[j] |= p[j];
}
}
}
if (cudaSuccess != cudaGetLastError()) printf("Error!\n");
return num_to_keep;
}
int nms_normal_gpu(at::Tensor boxes,
at::Tensor keep,
float nms_overlap_thresh) {
// params boxes: (N, 7) [x, y, z, dx, dy, dz, heading]
// params keep: (N)
CHECK_INPUT(boxes);
CHECK_CONTIGUOUS(keep);
int boxes_num = boxes.size(0);
const float *boxes_data = boxes.data<float>();
long *keep_data = keep.data<long>();
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
unsigned long long *mask_data = NULL;
CHECK_ERROR(
cudaMalloc((void **)&mask_data,
boxes_num * col_blocks * sizeof(unsigned long long)));
nmsNormalLauncher(boxes_data, mask_data, boxes_num, nms_overlap_thresh);
// unsigned long long mask_cpu[boxes_num * col_blocks];
// unsigned long long *mask_cpu = new unsigned long long [boxes_num *
// col_blocks];
std::vector<unsigned long long> mask_cpu(boxes_num * col_blocks);
// printf("boxes_num=%d, col_blocks=%d\n", boxes_num, col_blocks);
CHECK_ERROR(cudaMemcpy(&mask_cpu[0], mask_data,
boxes_num * col_blocks * sizeof(unsigned long long),
cudaMemcpyDeviceToHost));
cudaFree(mask_data);
unsigned long long remv_cpu[col_blocks];
memset(remv_cpu, 0, col_blocks * sizeof(unsigned long long));
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / THREADS_PER_BLOCK_NMS;
int inblock = i % THREADS_PER_BLOCK_NMS;
if (!(remv_cpu[nblock] & (1ULL << inblock))) {
keep_data[num_to_keep++] = i;
unsigned long long *p = &mask_cpu[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv_cpu[j] |= p[j];
}
}
}
if (cudaSuccess != cudaGetLastError()) printf("Error!\n");
return num_to_keep;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/iou3d_nms/src/iou3d_nms.h | C/C++ Header | #ifndef IOU3D_NMS_H
#define IOU3D_NMS_H
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <torch/serialize/tensor.h>
#include <vector>
int boxes_overlap_bev_gpu(at::Tensor boxes_a,
at::Tensor boxes_b,
at::Tensor ans_overlap);
int boxes_iou_bev_gpu(at::Tensor boxes_a,
at::Tensor boxes_b,
at::Tensor ans_iou);
int nms_gpu(at::Tensor boxes, at::Tensor keep, float nms_overlap_thresh);
int nms_normal_gpu(at::Tensor boxes, at::Tensor keep, float nms_overlap_thresh);
#endif
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/iou3d_nms/src/iou3d_nms_api.cpp | C++ | #include <cuda.h>
#include <cuda_runtime_api.h>
#include <torch/extension.h>
#include <torch/serialize/tensor.h>
#include <vector>
#include "iou3d_cpu.h"
#include "iou3d_nms.h"
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("boxes_overlap_bev_gpu", &boxes_overlap_bev_gpu,
"oriented boxes overlap");
m.def("boxes_iou_bev_gpu", &boxes_iou_bev_gpu, "oriented boxes iou");
m.def("nms_gpu", &nms_gpu, "oriented nms gpu");
m.def("nms_normal_gpu", &nms_normal_gpu, "nms gpu");
m.def("boxes_iou_bev_cpu", &boxes_iou_bev_cpu, "oriented boxes iou");
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/iou3d_nms/src/iou3d_nms_kernel.cu | CUDA | /*
3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others)
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
*/
#include <stdio.h>
#define THREADS_PER_BLOCK 16
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
// #define DEBUG
const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8;
const float EPS = 1e-8;
struct Point {
float x, y;
__device__ Point() {}
__device__ Point(double _x, double _y) { x = _x, y = _y; }
__device__ void set(float _x, float _y) {
x = _x;
y = _y;
}
__device__ Point operator+(const Point &b) const {
return Point(x + b.x, y + b.y);
}
__device__ Point operator-(const Point &b) const {
return Point(x - b.x, y - b.y);
}
};
__device__ inline float cross(const Point &a, const Point &b) {
return a.x * b.y - a.y * b.x;
}
__device__ inline float cross(const Point &p1,
const Point &p2,
const Point &p0) {
return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y);
}
__device__ int check_rect_cross(const Point &p1,
const Point &p2,
const Point &q1,
const Point &q2) {
int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) &&
min(q1.x, q2.x) <= max(p1.x, p2.x) &&
min(p1.y, p2.y) <= max(q1.y, q2.y) &&
min(q1.y, q2.y) <= max(p1.y, p2.y);
return ret;
}
__device__ inline int check_in_box2d(const float *box, const Point &p) {
// params: (7) [x, y, z, dx, dy, dz, heading]
const float MARGIN = 1e-2;
float center_x = box[0], center_y = box[1];
float angle_cos = cos(-box[6]),
angle_sin = sin(-box[6]); // rotate the point in the opposite
// direction of box
float rot_x =
(p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin);
float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos;
return (fabs(rot_x) < box[3] / 2 + MARGIN &&
fabs(rot_y) < box[4] / 2 + MARGIN);
}
__device__ inline int intersection(const Point &p1,
const Point &p0,
const Point &q1,
const Point &q0,
Point &ans) {
// fast exclusion
if (check_rect_cross(p0, p1, q0, q1) == 0) return 0;
// check cross standing
float s1 = cross(q0, p1, p0);
float s2 = cross(p1, q1, p0);
float s3 = cross(p0, q1, q0);
float s4 = cross(q1, p1, q0);
if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0;
// calculate intersection of two lines
float s5 = cross(q1, p1, p0);
if (fabs(s5 - s1) > EPS) {
ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1);
ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1);
} else {
float a0 = p0.y - p1.y, b0 = p1.x - p0.x,
c0 = p0.x * p1.y - p1.x * p0.y;
float a1 = q0.y - q1.y, b1 = q1.x - q0.x,
c1 = q0.x * q1.y - q1.x * q0.y;
float D = a0 * b1 - a1 * b0;
ans.x = (b0 * c1 - b1 * c0) / D;
ans.y = (a1 * c0 - a0 * c1) / D;
}
return 1;
}
__device__ inline void rotate_around_center(const Point ¢er,
const float angle_cos,
const float angle_sin,
Point &p) {
float new_x = (p.x - center.x) * angle_cos +
(p.y - center.y) * (-angle_sin) + center.x;
float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos +
center.y;
p.set(new_x, new_y);
}
__device__ inline int point_cmp(const Point &a,
const Point &b,
const Point ¢er) {
return atan2(a.y - center.y, a.x - center.x) >
atan2(b.y - center.y, b.x - center.x);
}
__device__ inline float box_overlap(const float *box_a, const float *box_b) {
// params box_a: [x, y, z, dx, dy, dz, heading]
// params box_b: [x, y, z, dx, dy, dz, heading]
float a_angle = box_a[6], b_angle = box_b[6];
float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2,
a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2;
float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half;
float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half;
float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half;
float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half;
Point center_a(box_a[0], box_a[1]);
Point center_b(box_b[0], box_b[1]);
#ifdef DEBUG
printf("a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, "
"%.3f)\n",
a_x1, a_y1, a_x2, a_y2, a_angle, b_x1, b_y1, b_x2, b_y2, b_angle);
printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y,
center_b.x, center_b.y);
#endif
Point box_a_corners[5];
box_a_corners[0].set(a_x1, a_y1);
box_a_corners[1].set(a_x2, a_y1);
box_a_corners[2].set(a_x2, a_y2);
box_a_corners[3].set(a_x1, a_y2);
Point box_b_corners[5];
box_b_corners[0].set(b_x1, b_y1);
box_b_corners[1].set(b_x2, b_y1);
box_b_corners[2].set(b_x2, b_y2);
box_b_corners[3].set(b_x1, b_y2);
// get oriented corners
float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle);
float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle);
for (int k = 0; k < 4; k++) {
#ifdef DEBUG
printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k,
box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x,
box_b_corners[k].y);
#endif
rotate_around_center(center_a, a_angle_cos, a_angle_sin,
box_a_corners[k]);
rotate_around_center(center_b, b_angle_cos, b_angle_sin,
box_b_corners[k]);
#ifdef DEBUG
printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k,
box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x,
box_b_corners[k].y);
#endif
}
box_a_corners[4] = box_a_corners[0];
box_b_corners[4] = box_b_corners[0];
// get intersection of lines
Point cross_points[16];
Point poly_center;
int cnt = 0, flag = 0;
poly_center.set(0, 0);
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
flag = intersection(box_a_corners[i + 1], box_a_corners[i],
box_b_corners[j + 1], box_b_corners[j],
cross_points[cnt]);
if (flag) {
poly_center = poly_center + cross_points[cnt];
cnt++;
#ifdef DEBUG
printf("Cross points (%.3f, %.3f): a(%.3f, %.3f)->(%.3f, "
"%.3f), b(%.3f, %.3f)->(%.3f, %.3f) \n",
cross_points[cnt - 1].x, cross_points[cnt - 1].y,
box_a_corners[i].x, box_a_corners[i].y,
box_a_corners[i + 1].x, box_a_corners[i + 1].y,
box_b_corners[i].x, box_b_corners[i].y,
box_b_corners[i + 1].x, box_b_corners[i + 1].y);
#endif
}
}
}
// check corners
for (int k = 0; k < 4; k++) {
if (check_in_box2d(box_a, box_b_corners[k])) {
poly_center = poly_center + box_b_corners[k];
cross_points[cnt] = box_b_corners[k];
cnt++;
#ifdef DEBUG
printf("b corners in a: corner_b(%.3f, %.3f)",
cross_points[cnt - 1].x, cross_points[cnt - 1].y);
#endif
}
if (check_in_box2d(box_b, box_a_corners[k])) {
poly_center = poly_center + box_a_corners[k];
cross_points[cnt] = box_a_corners[k];
cnt++;
#ifdef DEBUG
printf("a corners in b: corner_a(%.3f, %.3f)",
cross_points[cnt - 1].x, cross_points[cnt - 1].y);
#endif
}
}
poly_center.x /= cnt;
poly_center.y /= cnt;
// sort the points of polygon
Point temp;
for (int j = 0; j < cnt - 1; j++) {
for (int i = 0; i < cnt - j - 1; i++) {
if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) {
temp = cross_points[i];
cross_points[i] = cross_points[i + 1];
cross_points[i + 1] = temp;
}
}
}
#ifdef DEBUG
printf("cnt=%d\n", cnt);
for (int i = 0; i < cnt; i++) {
printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x,
cross_points[i].y);
}
#endif
// get the overlap areas
float area = 0;
for (int k = 0; k < cnt - 1; k++) {
area += cross(cross_points[k] - cross_points[0],
cross_points[k + 1] - cross_points[0]);
}
return fabs(area) / 2.0;
}
__device__ inline float iou_bev(const float *box_a, const float *box_b) {
// params box_a: [x, y, z, dx, dy, dz, heading]
// params box_b: [x, y, z, dx, dy, dz, heading]
float sa = box_a[3] * box_a[4];
float sb = box_b[3] * box_b[4];
float s_overlap = box_overlap(box_a, box_b);
return s_overlap / fmaxf(sa + sb - s_overlap, EPS);
}
__global__ void boxes_overlap_kernel(const int num_a,
const float *boxes_a,
const int num_b,
const float *boxes_b,
float *ans_overlap) {
// params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
// params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b) {
return;
}
const float *cur_box_a = boxes_a + a_idx * 7;
const float *cur_box_b = boxes_b + b_idx * 7;
float s_overlap = box_overlap(cur_box_a, cur_box_b);
ans_overlap[a_idx * num_b + b_idx] = s_overlap;
}
__global__ void boxes_iou_bev_kernel(const int num_a,
const float *boxes_a,
const int num_b,
const float *boxes_b,
float *ans_iou) {
// params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
// params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b) {
return;
}
const float *cur_box_a = boxes_a + a_idx * 7;
const float *cur_box_b = boxes_b + b_idx * 7;
float cur_iou_bev = iou_bev(cur_box_a, cur_box_b);
ans_iou[a_idx * num_b + b_idx] = cur_iou_bev;
}
__global__ void nms_kernel(const int boxes_num,
const float nms_overlap_thresh,
const float *boxes,
unsigned long long *mask) {
// params: boxes (N, 7) [x, y, z, dx, dy, dz, heading]
// params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 7 + 0] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 +
0];
block_boxes[threadIdx.x * 7 + 1] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 +
1];
block_boxes[threadIdx.x * 7 + 2] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 +
2];
block_boxes[threadIdx.x * 7 + 3] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 +
3];
block_boxes[threadIdx.x * 7 + 4] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 +
4];
block_boxes[threadIdx.x * 7 + 5] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 +
5];
block_boxes[threadIdx.x * 7 + 6] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 +
6];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const float *cur_box = boxes + cur_box_idx * 7;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (iou_bev(cur_box, block_boxes + i * 7) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
__device__ inline float iou_normal(float const *const a, float const *const b) {
// params: a: [x, y, z, dx, dy, dz, heading]
// params: b: [x, y, z, dx, dy, dz, heading]
float left = fmaxf(a[0] - a[3] / 2, b[0] - b[3] / 2),
right = fminf(a[0] + a[3] / 2, b[0] + b[3] / 2);
float top = fmaxf(a[1] - a[4] / 2, b[1] - b[4] / 2),
bottom = fminf(a[1] + a[4] / 2, b[1] + b[4] / 2);
float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f);
float interS = width * height;
float Sa = a[3] * a[4];
float Sb = b[3] * b[4];
return interS / fmaxf(Sa + Sb - interS, EPS);
}
__global__ void nms_normal_kernel(const int boxes_num,
const float nms_overlap_thresh,
const float *boxes,
unsigned long long *mask) {
// params: boxes (N, 7) [x, y, z, dx, dy, dz, heading]
// params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 7 + 0] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 +
0];
block_boxes[threadIdx.x * 7 + 1] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 +
1];
block_boxes[threadIdx.x * 7 + 2] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 +
2];
block_boxes[threadIdx.x * 7 + 3] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 +
3];
block_boxes[threadIdx.x * 7 + 4] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 +
4];
block_boxes[threadIdx.x * 7 + 5] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 +
5];
block_boxes[threadIdx.x * 7 + 6] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 +
6];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const float *cur_box = boxes + cur_box_idx * 7;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (iou_normal(cur_box, block_boxes + i * 7) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void boxesoverlapLauncher(const int num_a,
const float *boxes_a,
const int num_b,
const float *boxes_b,
float *ans_overlap) {
dim3 blocks(DIVUP(num_b, THREADS_PER_BLOCK),
DIVUP(num_a,
THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
boxes_overlap_kernel<<<blocks, threads>>>(num_a, boxes_a, num_b, boxes_b,
ans_overlap);
#ifdef DEBUG
cudaDeviceSynchronize(); // for using printf in kernel function
#endif
}
void boxesioubevLauncher(const int num_a,
const float *boxes_a,
const int num_b,
const float *boxes_b,
float *ans_iou) {
dim3 blocks(DIVUP(num_b, THREADS_PER_BLOCK),
DIVUP(num_a,
THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
boxes_iou_bev_kernel<<<blocks, threads>>>(num_a, boxes_a, num_b, boxes_b,
ans_iou);
#ifdef DEBUG
cudaDeviceSynchronize(); // for using printf in kernel function
#endif
}
void nmsLauncher(const float *boxes,
unsigned long long *mask,
int boxes_num,
float nms_overlap_thresh) {
dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes, mask);
}
void nmsNormalLauncher(const float *boxes,
unsigned long long *mask,
int boxes_num,
float nms_overlap_thresh) {
dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
nms_normal_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes,
mask);
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_batch/pointnet2_modules.py | Python | from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.pointnet2.pointnet2_batch import pointnet2_utils
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super().__init__()
self.npoint = None
self.groupers = None
self.mlps = None
self.pool_method = "max_pool"
def forward(
self, xyz: torch.Tensor, features: torch.Tensor = None, new_xyz=None
) -> (torch.Tensor, torch.Tensor):
"""
:param xyz: (B, N, 3) tensor of the xyz coordinates of the features
:param features: (B, N, C) tensor of the descriptors of the the features
:param new_xyz:
:return:
new_xyz: (B, npoint, 3) tensor of the new features' xyz
new_features: (B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
if new_xyz is None:
new_xyz = (
pointnet2_utils.gather_operation(
xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint)
)
.transpose(1, 2)
.contiguous()
if self.npoint is not None
else None
)
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features
) # (B, C, npoint, nsample)
new_features = self.mlps[i](new_features) # (B, mlp[-1], npoint, nsample)
if self.pool_method == "max_pool":
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
elif self.pool_method == "avg_pool":
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
else:
raise NotImplementedError
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1)
class PointnetSAModuleMSG(_PointnetSAModuleBase):
"""Pointnet set abstraction layer with multiscale grouping"""
def __init__(
self,
*,
npoint: int,
radii: List[float],
nsamples: List[int],
mlps: List[List[int]],
bn: bool = True,
use_xyz: bool = True,
pool_method="max_pool"
):
"""
:param npoint: int
:param radii: list of float, list of radii to group with
:param nsamples: list of int, number of samples in each ball query
:param mlps: list of list of int, spec of the pointnet before the global pooling for each scale
:param bn: whether to use batchnorm
:param use_xyz:
:param pool_method: max_pool / avg_pool
"""
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz)
if npoint is not None
else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
shared_mlps = []
for k in range(len(mlp_spec) - 1):
shared_mlps.extend(
[
nn.Conv2d(
mlp_spec[k], mlp_spec[k + 1], kernel_size=1, bias=False
),
nn.BatchNorm2d(mlp_spec[k + 1]),
nn.ReLU(),
]
)
self.mlps.append(nn.Sequential(*shared_mlps))
self.pool_method = pool_method
class PointnetSAModule(PointnetSAModuleMSG):
"""Pointnet set abstraction layer"""
def __init__(
self,
*,
mlp: List[int],
npoint: int = None,
radius: float = None,
nsample: int = None,
bn: bool = True,
use_xyz: bool = True,
pool_method="max_pool"
):
"""
:param mlp: list of int, spec of the pointnet before the global max_pool
:param npoint: int, number of features
:param radius: float, radius of ball
:param nsample: int, number of samples in the ball query
:param bn: whether to use batchnorm
:param use_xyz:
:param pool_method: max_pool / avg_pool
"""
super().__init__(
mlps=[mlp],
npoint=npoint,
radii=[radius],
nsamples=[nsample],
bn=bn,
use_xyz=use_xyz,
pool_method=pool_method,
)
class PointnetFPModule(nn.Module):
r"""Propigates the features of one set to another"""
def __init__(self, *, mlp: List[int], bn: bool = True):
"""
:param mlp: list of int
:param bn: whether to use batchnorm
"""
super().__init__()
shared_mlps = []
for k in range(len(mlp) - 1):
shared_mlps.extend(
[
nn.Conv2d(mlp[k], mlp[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp[k + 1]),
nn.ReLU(),
]
)
self.mlp = nn.Sequential(*shared_mlps)
def forward(
self,
unknown: torch.Tensor,
known: torch.Tensor,
unknow_feats: torch.Tensor,
known_feats: torch.Tensor,
) -> torch.Tensor:
"""
:param unknown: (B, n, 3) tensor of the xyz positions of the unknown features
:param known: (B, m, 3) tensor of the xyz positions of the known features
:param unknow_feats: (B, C1, n) tensor of the features to be propigated to
:param known_feats: (B, C2, m) tensor of features to be propigated
:return:
new_features: (B, mlp[-1], n) tensor of the features of the unknown features
"""
if known is not None:
dist, idx = pointnet2_utils.three_nn(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(
known_feats, idx, weight
)
else:
interpolated_feats = known_feats.expand(
*known_feats.size()[0:2], unknown.size(1)
)
if unknow_feats is not None:
new_features = torch.cat(
[interpolated_feats, unknow_feats], dim=1
) # (B, C2 + C1, n)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze(-1)
new_features = self.mlp(new_features)
return new_features.squeeze(-1)
if __name__ == "__main__":
pass
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_batch/pointnet2_utils.py | Python | from typing import Tuple
import torch
import torch.nn as nn
from torch.autograd import Function, Variable
from pcdet.ops.pointnet2.pointnet2_batch import pointnet2_batch_cuda as pointnet2
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz: torch.Tensor, npoint: int) -> torch.Tensor:
"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum distance
:param ctx:
:param xyz: (B, N, 3) where N > npoint
:param npoint: int, number of features in the sampled set
:return:
output: (B, npoint) tensor containing the set
"""
assert xyz.is_contiguous()
B, N, _ = xyz.size()
output = torch.cuda.IntTensor(B, npoint)
temp = torch.cuda.FloatTensor(B, N).fill_(1e10)
pointnet2.furthest_point_sampling_wrapper(B, N, npoint, xyz, temp, output)
return output
@staticmethod
def backward(xyz, a=None):
return None, None
furthest_point_sample = FurthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param features: (B, C, N)
:param idx: (B, npoint) index tensor of the features to gather
:return:
output: (B, C, npoint)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
B, npoint = idx.size()
_, C, N = features.size()
output = torch.cuda.FloatTensor(B, C, npoint)
pointnet2.gather_points_wrapper(B, C, N, npoint, features, idx, output)
ctx.for_backwards = (idx, C, N)
return output
@staticmethod
def backward(ctx, grad_out):
idx, C, N = ctx.for_backwards
B, npoint = idx.size()
grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.gather_points_grad_wrapper(
B, C, N, npoint, grad_out_data, idx, grad_features.data
)
return grad_features, None
gather_operation = GatherOperation.apply
class ThreeNN(Function):
@staticmethod
def forward(
ctx, unknown: torch.Tensor, known: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Find the three nearest neighbors of unknown in known
:param ctx:
:param unknown: (B, N, 3)
:param known: (B, M, 3)
:return:
dist: (B, N, 3) l2 distance to the three nearest neighbors
idx: (B, N, 3) index of 3 nearest neighbors
"""
assert unknown.is_contiguous()
assert known.is_contiguous()
B, N, _ = unknown.size()
m = known.size(1)
dist2 = torch.cuda.FloatTensor(B, N, 3)
idx = torch.cuda.IntTensor(B, N, 3)
pointnet2.three_nn_wrapper(B, N, m, unknown, known, dist2, idx)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(
ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor
) -> torch.Tensor:
"""
Performs weight linear interpolation on 3 features
:param ctx:
:param features: (B, C, M) Features descriptors to be interpolated from
:param idx: (B, n, 3) three nearest neighbors of the target features in features
:param weight: (B, n, 3) weights
:return:
output: (B, C, N) tensor of the interpolated features
"""
assert features.is_contiguous()
assert idx.is_contiguous()
assert weight.is_contiguous()
B, c, m = features.size()
n = idx.size(1)
ctx.three_interpolate_for_backward = (idx, weight, m)
output = torch.cuda.FloatTensor(B, c, n)
pointnet2.three_interpolate_wrapper(B, c, m, n, features, idx, weight, output)
return output
@staticmethod
def backward(
ctx, grad_out: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
:param ctx:
:param grad_out: (B, C, N) tensor with gradients of outputs
:return:
grad_features: (B, C, M) tensor with gradients of features
None:
None:
"""
idx, weight, m = ctx.three_interpolate_for_backward
B, c, n = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(B, c, m).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.three_interpolate_grad_wrapper(
B, c, n, m, grad_out_data, idx, weight, grad_features.data
)
return grad_features, None, None
three_interpolate = ThreeInterpolate.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param features: (B, C, N) tensor of features to group
:param idx: (B, npoint, nsample) tensor containing the indicies of features to group with
:return:
output: (B, C, npoint, nsample) tensor
"""
assert features.is_contiguous()
assert idx.is_contiguous()
B, nfeatures, nsample = idx.size()
_, C, N = features.size()
output = torch.cuda.FloatTensor(B, C, nfeatures, nsample)
pointnet2.group_points_wrapper(
B, C, N, nfeatures, nsample, features, idx, output
)
ctx.for_backwards = (idx, N)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param ctx:
:param grad_out: (B, C, npoint, nsample) tensor of the gradients of the output from forward
:return:
grad_features: (B, C, N) gradient of the features
"""
idx, N = ctx.for_backwards
B, C, npoint, nsample = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.group_points_grad_wrapper(
B, C, N, npoint, nsample, grad_out_data, idx, grad_features.data
)
return grad_features, None
grouping_operation = GroupingOperation.apply
class BallQuery(Function):
@staticmethod
def forward(
ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor
) -> torch.Tensor:
"""
:param ctx:
:param radius: float, radius of the balls
:param nsample: int, maximum number of features in the balls
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, npoint, 3) centers of the ball query
:return:
idx: (B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
assert new_xyz.is_contiguous()
assert xyz.is_contiguous()
B, N, _ = xyz.size()
npoint = new_xyz.size(1)
idx = torch.cuda.IntTensor(B, npoint, nsample).zero_()
pointnet2.ball_query_wrapper(B, N, npoint, radius, nsample, new_xyz, xyz, idx)
return idx
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ball_query = BallQuery.apply
class QueryAndGroup(nn.Module):
def __init__(self, radius: float, nsample: int, use_xyz: bool = True):
"""
:param radius: float, radius of ball
:param nsample: int, maximum number of features to gather in the ball
:param use_xyz:
"""
super().__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(
self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None
) -> Tuple[torch.Tensor]:
"""
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, npoint, 3) centroids
:param features: (B, C, N) descriptors of the features
:return:
new_features: (B, 3 + C, npoint, nsample)
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert (
self.use_xyz
), "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class GroupAll(nn.Module):
def __init__(self, use_xyz: bool = True):
super().__init__()
self.use_xyz = use_xyz
def forward(
self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None
):
"""
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: ignored
:param features: (B, C, N) descriptors of the features
:return:
new_features: (B, C + 3, 1, N)
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, 3 + C, 1, N)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_batch/src/ball_query.cpp | C++ | /*
batch version of ball query, modified from the original implementation of
official PointNet++ codes. Written by Shaoshuai Shi All Rights Reserved 2018.
*/
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <torch/serialize/tensor.h>
#include <vector>
#include "ball_query_gpu.h"
#define CHECK_CUDA(x) \
do { \
if (!x.type().is_cuda()) { \
fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, \
__LINE__); \
exit(-1); \
} \
} while (0)
#define CHECK_CONTIGUOUS(x) \
do { \
if (!x.is_contiguous()) { \
fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, \
__FILE__, __LINE__); \
exit(-1); \
} \
} while (0)
#define CHECK_INPUT(x) \
CHECK_CUDA(x); \
CHECK_CONTIGUOUS(x)
int ball_query_wrapper_fast(int b,
int n,
int m,
float radius,
int nsample,
at::Tensor new_xyz_tensor,
at::Tensor xyz_tensor,
at::Tensor idx_tensor) {
CHECK_INPUT(new_xyz_tensor);
CHECK_INPUT(xyz_tensor);
const float *new_xyz = new_xyz_tensor.data<float>();
const float *xyz = xyz_tensor.data<float>();
int *idx = idx_tensor.data<int>();
ball_query_kernel_launcher_fast(b, n, m, radius, nsample, new_xyz, xyz,
idx);
return 1;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_batch/src/ball_query_gpu.cu | CUDA | /*
batch version of ball query, modified from the original implementation of
official PointNet++ codes. Written by Shaoshuai Shi All Rights Reserved 2018.
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "ball_query_gpu.h"
#include "cuda_utils.h"
__global__ void ball_query_kernel_fast(int b,
int n,
int m,
float radius,
int nsample,
const float *__restrict__ new_xyz,
const float *__restrict__ xyz,
int *__restrict__ idx) {
// new_xyz: (B, M, 3)
// xyz: (B, N, 3)
// output:
// idx: (B, M, nsample)
int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || pt_idx >= m) return;
new_xyz += bs_idx * m * 3 + pt_idx * 3;
xyz += bs_idx * n * 3;
idx += bs_idx * m * nsample + pt_idx * nsample;
float radius2 = radius * radius;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
int cnt = 0;
for (int k = 0; k < n; ++k) {
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) +
(new_z - z) * (new_z - z);
if (d2 < radius2) {
if (cnt == 0) {
for (int l = 0; l < nsample; ++l) {
idx[l] = k;
}
}
idx[cnt] = k;
++cnt;
if (cnt >= nsample) break;
}
}
}
void ball_query_kernel_launcher_fast(int b,
int n,
int m,
float radius,
int nsample,
const float *new_xyz,
const float *xyz,
int *idx) {
// new_xyz: (B, M, 3)
// xyz: (B, N, 3)
// output:
// idx: (B, M, nsample)
cudaError_t err;
dim3 blocks(DIVUP(m, THREADS_PER_BLOCK),
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
ball_query_kernel_fast<<<blocks, threads>>>(b, n, m, radius, nsample,
new_xyz, xyz, idx);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_batch/src/ball_query_gpu.h | C/C++ Header | #ifndef _BALL_QUERY_GPU_H
#define _BALL_QUERY_GPU_H
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <torch/serialize/tensor.h>
#include <vector>
int ball_query_wrapper_fast(int b,
int n,
int m,
float radius,
int nsample,
at::Tensor new_xyz_tensor,
at::Tensor xyz_tensor,
at::Tensor idx_tensor);
void ball_query_kernel_launcher_fast(int b,
int n,
int m,
float radius,
int nsample,
const float *xyz,
const float *new_xyz,
int *idx);
#endif
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_batch/src/cuda_utils.h | C/C++ Header | #ifndef _CUDA_UTILS_H
#define _CUDA_UTILS_H
#include <cmath>
#define TOTAL_THREADS 1024
#define THREADS_PER_BLOCK 256
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
inline int opt_n_threads(int work_size) {
const int pow_2 = std::log(static_cast<double>(work_size)) / std::log(2.0);
return max(min(1 << pow_2, TOTAL_THREADS), 1);
}
#endif
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_batch/src/group_points.cpp | C++ | /*
batch version of point grouping, modified from the original implementation of
official PointNet++ codes. Written by Shaoshuai Shi All Rights Reserved 2018.
*/
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <torch/serialize/tensor.h>
#include <vector>
#include "group_points_gpu.h"
int group_points_grad_wrapper_fast(int b,
int c,
int n,
int npoints,
int nsample,
at::Tensor grad_out_tensor,
at::Tensor idx_tensor,
at::Tensor grad_points_tensor) {
float *grad_points = grad_points_tensor.data<float>();
const int *idx = idx_tensor.data<int>();
const float *grad_out = grad_out_tensor.data<float>();
group_points_grad_kernel_launcher_fast(b, c, n, npoints, nsample, grad_out,
idx, grad_points);
return 1;
}
int group_points_wrapper_fast(int b,
int c,
int n,
int npoints,
int nsample,
at::Tensor points_tensor,
at::Tensor idx_tensor,
at::Tensor out_tensor) {
const float *points = points_tensor.data<float>();
const int *idx = idx_tensor.data<int>();
float *out = out_tensor.data<float>();
group_points_kernel_launcher_fast(b, c, n, npoints, nsample, points, idx,
out);
return 1;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_batch/src/group_points_gpu.cu | CUDA | /*
batch version of point grouping, modified from the original implementation of
official PointNet++ codes. Written by Shaoshuai Shi All Rights Reserved 2018.
*/
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "group_points_gpu.h"
__global__ void group_points_grad_kernel_fast(
int b,
int c,
int n,
int npoints,
int nsample,
const float *__restrict__ grad_out,
const int *__restrict__ idx,
float *__restrict__ grad_points) {
// grad_out: (B, C, npoints, nsample)
// idx: (B, npoints, nsample)
// output:
// grad_points: (B, C, N)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int pt_idx = index / nsample;
if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return;
int sample_idx = index % nsample;
grad_out += bs_idx * c * npoints * nsample + c_idx * npoints * nsample +
pt_idx * nsample + sample_idx;
idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx;
atomicAdd(grad_points + bs_idx * c * n + c_idx * n + idx[0], grad_out[0]);
}
void group_points_grad_kernel_launcher_fast(int b,
int c,
int n,
int npoints,
int nsample,
const float *grad_out,
const int *idx,
float *grad_points) {
// grad_out: (B, C, npoints, nsample)
// idx: (B, npoints, nsample)
// output:
// grad_points: (B, C, N)
cudaError_t err;
dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
group_points_grad_kernel_fast<<<blocks, threads>>>(
b, c, n, npoints, nsample, grad_out, idx, grad_points);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void group_points_kernel_fast(int b,
int c,
int n,
int npoints,
int nsample,
const float *__restrict__ points,
const int *__restrict__ idx,
float *__restrict__ out) {
// points: (B, C, N)
// idx: (B, npoints, nsample)
// output:
// out: (B, C, npoints, nsample)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int pt_idx = index / nsample;
if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return;
int sample_idx = index % nsample;
idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx;
int in_idx = bs_idx * c * n + c_idx * n + idx[0];
int out_idx = bs_idx * c * npoints * nsample + c_idx * npoints * nsample +
pt_idx * nsample + sample_idx;
out[out_idx] = points[in_idx];
}
void group_points_kernel_launcher_fast(int b,
int c,
int n,
int npoints,
int nsample,
const float *points,
const int *idx,
float *out) {
// points: (B, C, N)
// idx: (B, npoints, nsample)
// output:
// out: (B, C, npoints, nsample)
cudaError_t err;
dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
group_points_kernel_fast<<<blocks, threads>>>(b, c, n, npoints, nsample,
points, idx, out);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_batch/src/group_points_gpu.h | C/C++ Header | #ifndef _GROUP_POINTS_GPU_H
#define _GROUP_POINTS_GPU_H
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <torch/serialize/tensor.h>
#include <vector>
int group_points_wrapper_fast(int b,
int c,
int n,
int npoints,
int nsample,
at::Tensor points_tensor,
at::Tensor idx_tensor,
at::Tensor out_tensor);
void group_points_kernel_launcher_fast(int b,
int c,
int n,
int npoints,
int nsample,
const float *points,
const int *idx,
float *out);
int group_points_grad_wrapper_fast(int b,
int c,
int n,
int npoints,
int nsample,
at::Tensor grad_out_tensor,
at::Tensor idx_tensor,
at::Tensor grad_points_tensor);
void group_points_grad_kernel_launcher_fast(int b,
int c,
int n,
int npoints,
int nsample,
const float *grad_out,
const int *idx,
float *grad_points);
#endif
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_batch/src/interpolate.cpp | C++ | /*
batch version of point interpolation, modified from the original implementation
of official PointNet++ codes. Written by Shaoshuai Shi All Rights Reserved 2018.
*/
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <torch/serialize/tensor.h>
#include <vector>
#include "interpolate_gpu.h"
void three_nn_wrapper_fast(int b,
int n,
int m,
at::Tensor unknown_tensor,
at::Tensor known_tensor,
at::Tensor dist2_tensor,
at::Tensor idx_tensor) {
const float *unknown = unknown_tensor.data<float>();
const float *known = known_tensor.data<float>();
float *dist2 = dist2_tensor.data<float>();
int *idx = idx_tensor.data<int>();
three_nn_kernel_launcher_fast(b, n, m, unknown, known, dist2, idx);
}
void three_interpolate_wrapper_fast(int b,
int c,
int m,
int n,
at::Tensor points_tensor,
at::Tensor idx_tensor,
at::Tensor weight_tensor,
at::Tensor out_tensor) {
const float *points = points_tensor.data<float>();
const float *weight = weight_tensor.data<float>();
float *out = out_tensor.data<float>();
const int *idx = idx_tensor.data<int>();
three_interpolate_kernel_launcher_fast(b, c, m, n, points, idx, weight,
out);
}
void three_interpolate_grad_wrapper_fast(int b,
int c,
int n,
int m,
at::Tensor grad_out_tensor,
at::Tensor idx_tensor,
at::Tensor weight_tensor,
at::Tensor grad_points_tensor) {
const float *grad_out = grad_out_tensor.data<float>();
const float *weight = weight_tensor.data<float>();
float *grad_points = grad_points_tensor.data<float>();
const int *idx = idx_tensor.data<int>();
three_interpolate_grad_kernel_launcher_fast(b, c, n, m, grad_out, idx,
weight, grad_points);
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_batch/src/interpolate_gpu.cu | CUDA | /*
batch version of point interpolation, modified from the original implementation
of official PointNet++ codes. Written by Shaoshuai Shi All Rights Reserved 2018.
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "interpolate_gpu.h"
__global__ void three_nn_kernel_fast(int b,
int n,
int m,
const float *__restrict__ unknown,
const float *__restrict__ known,
float *__restrict__ dist2,
int *__restrict__ idx) {
// unknown: (B, N, 3)
// known: (B, M, 3)
// output:
// dist2: (B, N, 3)
// idx: (B, N, 3)
int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || pt_idx >= n) return;
unknown += bs_idx * n * 3 + pt_idx * 3;
known += bs_idx * m * 3;
dist2 += bs_idx * n * 3 + pt_idx * 3;
idx += bs_idx * n * 3 + pt_idx * 3;
float ux = unknown[0];
float uy = unknown[1];
float uz = unknown[2];
double best1 = 1e40, best2 = 1e40, best3 = 1e40;
int besti1 = 0, besti2 = 0, besti3 = 0;
for (int k = 0; k < m; ++k) {
float x = known[k * 3 + 0];
float y = known[k * 3 + 1];
float z = known[k * 3 + 2];
float d =
(ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z);
if (d < best1) {
best3 = best2;
besti3 = besti2;
best2 = best1;
besti2 = besti1;
best1 = d;
besti1 = k;
} else if (d < best2) {
best3 = best2;
besti3 = besti2;
best2 = d;
besti2 = k;
} else if (d < best3) {
best3 = d;
besti3 = k;
}
}
dist2[0] = best1;
dist2[1] = best2;
dist2[2] = best3;
idx[0] = besti1;
idx[1] = besti2;
idx[2] = besti3;
}
void three_nn_kernel_launcher_fast(int b,
int n,
int m,
const float *unknown,
const float *known,
float *dist2,
int *idx) {
// unknown: (B, N, 3)
// known: (B, M, 3)
// output:
// dist2: (B, N, 3)
// idx: (B, N, 3)
cudaError_t err;
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK),
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
three_nn_kernel_fast<<<blocks, threads>>>(b, n, m, unknown, known, dist2,
idx);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void three_interpolate_kernel_fast(int b,
int c,
int m,
int n,
const float *__restrict__ points,
const int *__restrict__ idx,
const float *__restrict__ weight,
float *__restrict__ out) {
// points: (B, C, M)
// idx: (B, N, 3)
// weight: (B, N, 3)
// output:
// out: (B, C, N)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || c_idx >= c || pt_idx >= n) return;
weight += bs_idx * n * 3 + pt_idx * 3;
points += bs_idx * c * m + c_idx * m;
idx += bs_idx * n * 3 + pt_idx * 3;
out += bs_idx * c * n + c_idx * n;
out[pt_idx] = weight[0] * points[idx[0]] + weight[1] * points[idx[1]] +
weight[2] * points[idx[2]];
}
void three_interpolate_kernel_launcher_fast(int b,
int c,
int m,
int n,
const float *points,
const int *idx,
const float *weight,
float *out) {
// points: (B, C, M)
// idx: (B, N, 3)
// weight: (B, N, 3)
// output:
// out: (B, C, N)
cudaError_t err;
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
three_interpolate_kernel_fast<<<blocks, threads>>>(b, c, m, n, points, idx,
weight, out);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void three_interpolate_grad_kernel_fast(
int b,
int c,
int n,
int m,
const float *__restrict__ grad_out,
const int *__restrict__ idx,
const float *__restrict__ weight,
float *__restrict__ grad_points) {
// grad_out: (B, C, N)
// weight: (B, N, 3)
// output:
// grad_points: (B, C, M)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || c_idx >= c || pt_idx >= n) return;
grad_out += bs_idx * c * n + c_idx * n + pt_idx;
weight += bs_idx * n * 3 + pt_idx * 3;
grad_points += bs_idx * c * m + c_idx * m;
idx += bs_idx * n * 3 + pt_idx * 3;
atomicAdd(grad_points + idx[0], grad_out[0] * weight[0]);
atomicAdd(grad_points + idx[1], grad_out[0] * weight[1]);
atomicAdd(grad_points + idx[2], grad_out[0] * weight[2]);
}
void three_interpolate_grad_kernel_launcher_fast(int b,
int c,
int n,
int m,
const float *grad_out,
const int *idx,
const float *weight,
float *grad_points) {
// grad_out: (B, C, N)
// weight: (B, N, 3)
// output:
// grad_points: (B, C, M)
cudaError_t err;
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
three_interpolate_grad_kernel_fast<<<blocks, threads>>>(
b, c, n, m, grad_out, idx, weight, grad_points);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_batch/src/interpolate_gpu.h | C/C++ Header | #ifndef _INTERPOLATE_GPU_H
#define _INTERPOLATE_GPU_H
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <torch/serialize/tensor.h>
#include <vector>
void three_nn_wrapper_fast(int b,
int n,
int m,
at::Tensor unknown_tensor,
at::Tensor known_tensor,
at::Tensor dist2_tensor,
at::Tensor idx_tensor);
void three_nn_kernel_launcher_fast(int b,
int n,
int m,
const float *unknown,
const float *known,
float *dist2,
int *idx);
void three_interpolate_wrapper_fast(int b,
int c,
int m,
int n,
at::Tensor points_tensor,
at::Tensor idx_tensor,
at::Tensor weight_tensor,
at::Tensor out_tensor);
void three_interpolate_kernel_launcher_fast(int b,
int c,
int m,
int n,
const float *points,
const int *idx,
const float *weight,
float *out);
void three_interpolate_grad_wrapper_fast(int b,
int c,
int n,
int m,
at::Tensor grad_out_tensor,
at::Tensor idx_tensor,
at::Tensor weight_tensor,
at::Tensor grad_points_tensor);
void three_interpolate_grad_kernel_launcher_fast(int b,
int c,
int n,
int m,
const float *grad_out,
const int *idx,
const float *weight,
float *grad_points);
#endif
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_batch/src/pointnet2_api.cpp | C++ | #include <torch/extension.h>
#include <torch/serialize/tensor.h>
#include "ball_query_gpu.h"
#include "group_points_gpu.h"
#include "interpolate_gpu.h"
#include "sampling_gpu.h"
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("ball_query_wrapper", &ball_query_wrapper_fast,
"ball_query_wrapper_fast");
m.def("group_points_wrapper", &group_points_wrapper_fast,
"group_points_wrapper_fast");
m.def("group_points_grad_wrapper", &group_points_grad_wrapper_fast,
"group_points_grad_wrapper_fast");
m.def("gather_points_wrapper", &gather_points_wrapper_fast,
"gather_points_wrapper_fast");
m.def("gather_points_grad_wrapper", &gather_points_grad_wrapper_fast,
"gather_points_grad_wrapper_fast");
m.def("furthest_point_sampling_wrapper", &furthest_point_sampling_wrapper,
"furthest_point_sampling_wrapper");
m.def("three_nn_wrapper", &three_nn_wrapper_fast, "three_nn_wrapper_fast");
m.def("three_interpolate_wrapper", &three_interpolate_wrapper_fast,
"three_interpolate_wrapper_fast");
m.def("three_interpolate_grad_wrapper",
&three_interpolate_grad_wrapper_fast,
"three_interpolate_grad_wrapper_fast");
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_batch/src/sampling.cpp | C++ | /*
batch version of point sampling and gathering, modified from the original
implementation of official PointNet++ codes. Written by Shaoshuai Shi All Rights
Reserved 2018.
*/
#include <ATen/cuda/CUDAContext.h>
#include <torch/serialize/tensor.h>
#include <vector>
#include "sampling_gpu.h"
int gather_points_wrapper_fast(int b,
int c,
int n,
int npoints,
at::Tensor points_tensor,
at::Tensor idx_tensor,
at::Tensor out_tensor) {
const float *points = points_tensor.data<float>();
const int *idx = idx_tensor.data<int>();
float *out = out_tensor.data<float>();
gather_points_kernel_launcher_fast(b, c, n, npoints, points, idx, out);
return 1;
}
int gather_points_grad_wrapper_fast(int b,
int c,
int n,
int npoints,
at::Tensor grad_out_tensor,
at::Tensor idx_tensor,
at::Tensor grad_points_tensor) {
const float *grad_out = grad_out_tensor.data<float>();
const int *idx = idx_tensor.data<int>();
float *grad_points = grad_points_tensor.data<float>();
gather_points_grad_kernel_launcher_fast(b, c, n, npoints, grad_out, idx,
grad_points);
return 1;
}
int furthest_point_sampling_wrapper(int b,
int n,
int m,
at::Tensor points_tensor,
at::Tensor temp_tensor,
at::Tensor idx_tensor) {
const float *points = points_tensor.data<float>();
float *temp = temp_tensor.data<float>();
int *idx = idx_tensor.data<int>();
furthest_point_sampling_kernel_launcher(b, n, m, points, temp, idx);
return 1;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_batch/src/sampling_gpu.cu | CUDA | /*
batch version of point sampling and gathering, modified from the original
implementation of official PointNet++ codes. Written by Shaoshuai Shi All Rights
Reserved 2018.
*/
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "sampling_gpu.h"
__global__ void gather_points_kernel_fast(int b,
int c,
int n,
int m,
const float *__restrict__ points,
const int *__restrict__ idx,
float *__restrict__ out) {
// points: (B, C, N)
// idx: (B, M)
// output:
// out: (B, C, M)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || c_idx >= c || pt_idx >= m) return;
out += bs_idx * c * m + c_idx * m + pt_idx;
idx += bs_idx * m + pt_idx;
points += bs_idx * c * n + c_idx * n;
out[0] = points[idx[0]];
}
void gather_points_kernel_launcher_fast(int b,
int c,
int n,
int npoints,
const float *points,
const int *idx,
float *out) {
// points: (B, C, N)
// idx: (B, npoints)
// output:
// out: (B, C, npoints)
cudaError_t err;
dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
gather_points_kernel_fast<<<blocks, threads>>>(b, c, n, npoints, points,
idx, out);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void gather_points_grad_kernel_fast(
int b,
int c,
int n,
int m,
const float *__restrict__ grad_out,
const int *__restrict__ idx,
float *__restrict__ grad_points) {
// grad_out: (B, C, M)
// idx: (B, M)
// output:
// grad_points: (B, C, N)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || c_idx >= c || pt_idx >= m) return;
grad_out += bs_idx * c * m + c_idx * m + pt_idx;
idx += bs_idx * m + pt_idx;
grad_points += bs_idx * c * n + c_idx * n;
atomicAdd(grad_points + idx[0], grad_out[0]);
}
void gather_points_grad_kernel_launcher_fast(int b,
int c,
int n,
int npoints,
const float *grad_out,
const int *idx,
float *grad_points) {
// grad_out: (B, C, npoints)
// idx: (B, npoints)
// output:
// grad_points: (B, C, N)
cudaError_t err;
dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
gather_points_grad_kernel_fast<<<blocks, threads>>>(
b, c, n, npoints, grad_out, idx, grad_points);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__device__ void __update(float *__restrict__ dists,
int *__restrict__ dists_i,
int idx1,
int idx2) {
const float v1 = dists[idx1], v2 = dists[idx2];
const int i1 = dists_i[idx1], i2 = dists_i[idx2];
dists[idx1] = max(v1, v2);
dists_i[idx1] = v2 > v1 ? i2 : i1;
}
template <unsigned int block_size>
__global__ void furthest_point_sampling_kernel(
int b,
int n,
int m,
const float *__restrict__ dataset,
float *__restrict__ temp,
int *__restrict__ idxs) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
if (m <= 0) return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * 3;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0) idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
float x1 = dataset[old * 3 + 0];
float y1 = dataset[old * 3 + 1];
float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
float x2, y2, z2;
x2 = dataset[k * 3 + 0];
y2 = dataset[k * 3 + 1];
z2 = dataset[k * 3 + 2];
// float mag = (x2 * x2) + (y2 * y2) + (z2 * z2);
// if (mag <= 1e-3)
// continue;
float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) +
(z2 - z1) * (z2 - z1);
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 1024) {
if (tid < 512) {
__update(dists, dists_i, tid, tid + 512);
}
__syncthreads();
}
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0) idxs[j] = old;
}
}
void furthest_point_sampling_kernel_launcher(
int b, int n, int m, const float *dataset, float *temp, int *idxs) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
cudaError_t err;
unsigned int n_threads = opt_n_threads(n);
switch (n_threads) {
case 1024:
furthest_point_sampling_kernel<1024>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 512:
furthest_point_sampling_kernel<512>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 256:
furthest_point_sampling_kernel<256>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 128:
furthest_point_sampling_kernel<128>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 64:
furthest_point_sampling_kernel<64>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 32:
furthest_point_sampling_kernel<32>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 16:
furthest_point_sampling_kernel<16>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 8:
furthest_point_sampling_kernel<8>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 4:
furthest_point_sampling_kernel<4>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 2:
furthest_point_sampling_kernel<2>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 1:
furthest_point_sampling_kernel<1>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
default:
furthest_point_sampling_kernel<512>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
}
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_batch/src/sampling_gpu.h | C/C++ Header | #ifndef _SAMPLING_GPU_H
#define _SAMPLING_GPU_H
#include <ATen/cuda/CUDAContext.h>
#include <torch/serialize/tensor.h>
#include <vector>
int gather_points_wrapper_fast(int b,
int c,
int n,
int npoints,
at::Tensor points_tensor,
at::Tensor idx_tensor,
at::Tensor out_tensor);
void gather_points_kernel_launcher_fast(int b,
int c,
int n,
int npoints,
const float *points,
const int *idx,
float *out);
int gather_points_grad_wrapper_fast(int b,
int c,
int n,
int npoints,
at::Tensor grad_out_tensor,
at::Tensor idx_tensor,
at::Tensor grad_points_tensor);
void gather_points_grad_kernel_launcher_fast(int b,
int c,
int n,
int npoints,
const float *grad_out,
const int *idx,
float *grad_points);
int furthest_point_sampling_wrapper(int b,
int n,
int m,
at::Tensor points_tensor,
at::Tensor temp_tensor,
at::Tensor idx_tensor);
void furthest_point_sampling_kernel_launcher(
int b, int n, int m, const float *dataset, float *temp, int *idxs);
#endif
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_stack/pointnet2_modules.py | Python | from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.ops.pointnet2.pointnet2_stack import pointnet2_utils
class StackSAModuleMSG(nn.Module):
def __init__(
self,
*,
radii: List[float],
nsamples: List[int],
mlps: List[List[int]],
use_xyz: bool = True,
pool_method="max_pool"
):
"""
Args:
radii: list of float, list of radii to group with
nsamples: list of int, number of samples in each ball query
mlps: list of list of int, spec of the pointnet before the global pooling for each scale
use_xyz:
pool_method: max_pool / avg_pool
"""
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
shared_mlps = []
for k in range(len(mlp_spec) - 1):
shared_mlps.extend(
[
nn.Conv2d(
mlp_spec[k], mlp_spec[k + 1], kernel_size=1, bias=False
),
nn.BatchNorm2d(mlp_spec[k + 1]),
nn.ReLU(),
]
)
self.mlps.append(nn.Sequential(*shared_mlps))
self.pool_method = pool_method
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
def forward(
self,
xyz,
xyz_batch_cnt,
new_xyz,
new_xyz_batch_cnt,
features=None,
empty_voxel_set_zeros=True,
):
"""
:param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features
:param xyz_batch_cnt: (batch_size), [N1, N2, ...]
:param new_xyz: (M1 + M2 ..., 3)
:param new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
:param features: (N1 + N2 ..., C) tensor of the descriptors of the the features
:return:
new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz
new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
for k in range(len(self.groupers)):
new_features, ball_idxs = self.groupers[k](
xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features
) # (M1 + M2, C, nsample)
new_features = new_features.permute(1, 0, 2).unsqueeze(
dim=0
) # (1, C, M1 + M2 ..., nsample)
new_features = self.mlps[k](new_features) # (1, C, M1 + M2 ..., nsample)
if self.pool_method == "max_pool":
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(
dim=-1
) # (1, C, M1 + M2 ...)
elif self.pool_method == "avg_pool":
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(
dim=-1
) # (1, C, M1 + M2 ...)
else:
raise NotImplementedError
new_features = new_features.squeeze(dim=0).permute(1, 0) # (M1 + M2 ..., C)
new_features_list.append(new_features)
new_features = torch.cat(new_features_list, dim=1) # (M1 + M2 ..., C)
return new_xyz, new_features
class StackPointnetFPModule(nn.Module):
def __init__(self, *, mlp: List[int]):
"""
Args:
mlp: list of int
"""
super().__init__()
shared_mlps = []
for k in range(len(mlp) - 1):
shared_mlps.extend(
[
nn.Conv2d(mlp[k], mlp[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp[k + 1]),
nn.ReLU(),
]
)
self.mlp = nn.Sequential(*shared_mlps)
def forward(
self,
unknown,
unknown_batch_cnt,
known,
known_batch_cnt,
unknown_feats=None,
known_feats=None,
):
"""
Args:
unknown: (N1 + N2 ..., 3)
known: (M1 + M2 ..., 3)
unknow_feats: (N1 + N2 ..., C1)
known_feats: (M1 + M2 ..., C2)
Returns:
new_features: (N1 + N2 ..., C_out)
"""
dist, idx = pointnet2_utils.three_nn(
unknown, unknown_batch_cnt, known, known_batch_cnt
)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=-1, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(known_feats, idx, weight)
if unknown_feats is not None:
new_features = torch.cat(
[interpolated_feats, unknown_feats], dim=1
) # (N1 + N2 ..., C2 + C1)
else:
new_features = interpolated_feats
new_features = new_features.permute(1, 0)[
None, :, :, None
] # (1, C, N1 + N2 ..., 1)
new_features = self.mlp(new_features)
new_features = (
new_features.squeeze(dim=0).squeeze(dim=-1).permute(1, 0)
) # (N1 + N2 ..., C)
return new_features
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_stack/pointnet2_utils.py | Python | import torch
import torch.nn as nn
from torch.autograd import Function, Variable
from pcdet.ops.pointnet2.pointnet2_stack import pointnet2_stack_cuda as pointnet2
class BallQuery(Function):
@staticmethod
def forward(
ctx,
radius: float,
nsample: int,
xyz: torch.Tensor,
xyz_batch_cnt: torch.Tensor,
new_xyz: torch.Tensor,
new_xyz_batch_cnt,
):
"""
Args:
ctx:
radius: float, radius of the balls
nsample: int, maximum number of features in the balls
xyz: (N1 + N2 ..., 3) xyz coordinates of the features
xyz_batch_cnt: (batch_size), [N1, N2, ...]
new_xyz: (M1 + M2 ..., 3) centers of the ball query
new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
Returns:
idx: (M1 + M2, nsample) tensor with the indicies of the features that form the query balls
"""
assert new_xyz.is_contiguous()
assert new_xyz_batch_cnt.is_contiguous()
assert xyz.is_contiguous()
assert xyz_batch_cnt.is_contiguous()
B = xyz_batch_cnt.shape[0]
M = new_xyz.shape[0]
idx = torch.cuda.IntTensor(M, nsample).zero_()
pointnet2.ball_query_wrapper(
B, M, radius, nsample, new_xyz, new_xyz_batch_cnt, xyz, xyz_batch_cnt, idx
)
empty_ball_mask = idx[:, 0] == -1
idx[empty_ball_mask] = 0
return idx, empty_ball_mask
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ball_query = BallQuery.apply
class GroupingOperation(Function):
@staticmethod
def forward(
ctx,
features: torch.Tensor,
features_batch_cnt: torch.Tensor,
idx: torch.Tensor,
idx_batch_cnt: torch.Tensor,
):
"""
Args:
ctx:
features: (N1 + N2 ..., C) tensor of features to group
features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the indicies of features to group with
idx: (M1 + M2 ..., nsample) tensor containing the indicies of features to group with
idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of features to group with
Returns:
output: (M1 + M2, C, nsample) tensor
"""
assert features.is_contiguous()
assert features_batch_cnt.is_contiguous()
assert idx.is_contiguous()
assert idx_batch_cnt.is_contiguous()
assert (
features.shape[0] == features_batch_cnt.sum()
), "features: %s, features_batch_cnt: %s" % (
str(features.shape),
str(features_batch_cnt),
)
assert idx.shape[0] == idx_batch_cnt.sum(), "idx: %s, idx_batch_cnt: %s" % (
str(idx.shape),
str(idx_batch_cnt),
)
M, nsample = idx.size()
N, C = features.size()
B = idx_batch_cnt.shape[0]
output = torch.cuda.FloatTensor(M, C, nsample)
pointnet2.group_points_wrapper(
B, M, C, nsample, features, features_batch_cnt, idx, idx_batch_cnt, output
)
ctx.for_backwards = (B, N, idx, features_batch_cnt, idx_batch_cnt)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor):
"""
Args:
ctx:
grad_out: (M1 + M2 ..., C, nsample) tensor of the gradients of the output from forward
Returns:
grad_features: (N1 + N2 ..., C) gradient of the features
"""
B, N, idx, features_batch_cnt, idx_batch_cnt = ctx.for_backwards
M, C, nsample = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(N, C).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.group_points_grad_wrapper(
B,
M,
C,
N,
nsample,
grad_out_data,
idx,
idx_batch_cnt,
features_batch_cnt,
grad_features.data,
)
return grad_features, None, None, None
grouping_operation = GroupingOperation.apply
class QueryAndGroup(nn.Module):
def __init__(self, radius: float, nsample: int, use_xyz: bool = True):
"""
Args:
radius: float, radius of ball
nsample: int, maximum number of features to gather in the ball
use_xyz:
"""
super().__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(
self,
xyz: torch.Tensor,
xyz_batch_cnt: torch.Tensor,
new_xyz: torch.Tensor,
new_xyz_batch_cnt: torch.Tensor,
features: torch.Tensor = None,
):
"""
Args:
xyz: (N1 + N2 ..., 3) xyz coordinates of the features
xyz_batch_cnt: (batch_size), [N1, N2, ...]
new_xyz: (M1 + M2 ..., 3) centers of the ball query
new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
features: (N1 + N2 ..., C) tensor of features to group
Returns:
new_features: (M1 + M2, C, nsample) tensor
"""
assert xyz.shape[0] == xyz_batch_cnt.sum(), "xyz: %s, xyz_batch_cnt: %s" % (
str(xyz.shape),
str(new_xyz_batch_cnt),
)
assert (
new_xyz.shape[0] == new_xyz_batch_cnt.sum()
), "new_xyz: %s, new_xyz_batch_cnt: %s" % (
str(new_xyz.shape),
str(new_xyz_batch_cnt),
)
# idx: (M1 + M2 ..., nsample), empty_ball_mask: (M1 + M2 ...)
idx, empty_ball_mask = ball_query(
self.radius, self.nsample, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt
)
grouped_xyz = grouping_operation(
xyz, xyz_batch_cnt, idx, new_xyz_batch_cnt
) # (M1 + M2, 3, nsample)
grouped_xyz -= new_xyz.unsqueeze(-1)
grouped_xyz[empty_ball_mask] = 0
if features is not None:
grouped_features = grouping_operation(
features, xyz_batch_cnt, idx, new_xyz_batch_cnt
) # (M1 + M2, C, nsample)
grouped_features[empty_ball_mask] = 0
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (M1 + M2 ..., C + 3, nsample)
else:
new_features = grouped_features
else:
assert (
self.use_xyz
), "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features, idx
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz: torch.Tensor, npoint: int):
"""
Args:
ctx:
xyz: (B, N, 3) where N > npoint
npoint: int, number of features in the sampled set
Returns:
output: (B, npoint) tensor containing the set
"""
assert xyz.is_contiguous()
B, N, _ = xyz.size()
output = torch.cuda.IntTensor(B, npoint)
temp = torch.cuda.FloatTensor(B, N).fill_(1e10)
pointnet2.furthest_point_sampling_wrapper(B, N, npoint, xyz, temp, output)
return output
@staticmethod
def backward(xyz, a=None):
return None, None
furthest_point_sample = FurthestPointSampling.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown, unknown_batch_cnt, known, known_batch_cnt):
"""
Args:
ctx:
unknown: (N1 + N2..., 3)
unknown_batch_cnt: (batch_size), [N1, N2, ...]
known: (M1 + M2..., 3)
known_batch_cnt: (batch_size), [M1, M2, ...]
Returns:
dist: (N1 + N2 ..., 3) l2 distance to the three nearest neighbors
idx: (N1 + N2 ..., 3) index of the three nearest neighbors, range [0, M1+M2+...]
"""
assert unknown.shape.__len__() == 2 and unknown.shape[1] == 3
assert known.shape.__len__() == 2 and known.shape[1] == 3
assert unknown_batch_cnt.__len__() == known_batch_cnt.__len__()
dist2 = unknown.new_zeros(unknown.shape)
idx = unknown_batch_cnt.new_zeros(unknown.shape).int()
pointnet2.three_nn_wrapper(
unknown.contiguous(),
unknown_batch_cnt.contiguous(),
known.contiguous(),
known_batch_cnt.contiguous(),
dist2,
idx,
)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor):
"""
Args:
ctx:
features: (M1 + M2 ..., C)
idx: [N1 + N2 ..., 3]
weight: [N1 + N2 ..., 3]
Returns:
out_tensor: (N1 + N2 ..., C)
"""
assert idx.shape[0] == weight.shape[0] and idx.shape[1] == weight.shape[1] == 3
ctx.three_interpolate_for_backward = (idx, weight, features.shape[0])
output = features.new_zeros((idx.shape[0], features.shape[1]))
pointnet2.three_interpolate_wrapper(
features.contiguous(), idx.contiguous(), weight.contiguous(), output
)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor):
"""
Args:
ctx:
grad_out: (N1 + N2 ..., C)
Returns:
grad_features: (M1 + M2 ..., C)
"""
idx, weight, M = ctx.three_interpolate_for_backward
grad_features = grad_out.new_zeros((M, grad_out.shape[1]))
pointnet2.three_interpolate_grad_wrapper(
grad_out.contiguous(), idx.contiguous(), weight.contiguous(), grad_features
)
return grad_features, None, None
three_interpolate = ThreeInterpolate.apply
if __name__ == "__main__":
pass
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_stack/src/ball_query.cpp | C++ | /*
Stacked-batch-data version of ball query, modified from the original
implementation of official PointNet++ codes. Written by Shaoshuai Shi All Rights
Reserved 2019-2020.
*/
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <torch/serialize/tensor.h>
#include <vector>
#include "ball_query_gpu.h"
#define CHECK_CUDA(x) \
do { \
if (!x.type().is_cuda()) { \
fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, \
__LINE__); \
exit(-1); \
} \
} while (0)
#define CHECK_CONTIGUOUS(x) \
do { \
if (!x.is_contiguous()) { \
fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, \
__FILE__, __LINE__); \
exit(-1); \
} \
} while (0)
#define CHECK_INPUT(x) \
CHECK_CUDA(x); \
CHECK_CONTIGUOUS(x)
int ball_query_wrapper_stack(int B,
int M,
float radius,
int nsample,
at::Tensor new_xyz_tensor,
at::Tensor new_xyz_batch_cnt_tensor,
at::Tensor xyz_tensor,
at::Tensor xyz_batch_cnt_tensor,
at::Tensor idx_tensor) {
CHECK_INPUT(new_xyz_tensor);
CHECK_INPUT(xyz_tensor);
CHECK_INPUT(new_xyz_batch_cnt_tensor);
CHECK_INPUT(xyz_batch_cnt_tensor);
const float *new_xyz = new_xyz_tensor.data<float>();
const float *xyz = xyz_tensor.data<float>();
const int *new_xyz_batch_cnt = new_xyz_batch_cnt_tensor.data<int>();
const int *xyz_batch_cnt = xyz_batch_cnt_tensor.data<int>();
int *idx = idx_tensor.data<int>();
ball_query_kernel_launcher_stack(B, M, radius, nsample, new_xyz,
new_xyz_batch_cnt, xyz, xyz_batch_cnt,
idx);
return 1;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_stack/src/ball_query_gpu.cu | CUDA | /*
Stacked-batch-data version of ball query, modified from the original
implementation of official PointNet++ codes. Written by Shaoshuai Shi All Rights
Reserved 2019-2020.
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "ball_query_gpu.h"
#include "cuda_utils.h"
__global__ void ball_query_kernel_stack(int B,
int M,
float radius,
int nsample,
const float *new_xyz,
const int *new_xyz_batch_cnt,
const float *xyz,
const int *xyz_batch_cnt,
int *idx) {
// :param xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// :param xyz_batch_cnt: (batch_size), [N1, N2, ...]
// :param new_xyz: (M1 + M2 ..., 3) centers of the ball query
// :param new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
// output:
// idx: (M, nsample)
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= M) return;
int bs_idx = 0, pt_cnt = new_xyz_batch_cnt[0];
for (int k = 1; k < B; k++) {
if (pt_idx < pt_cnt) break;
pt_cnt += new_xyz_batch_cnt[k];
bs_idx = k;
}
int xyz_batch_start_idx = 0;
for (int k = 0; k < bs_idx; k++) xyz_batch_start_idx += xyz_batch_cnt[k];
// for (int k = 0; k < bs_idx; k++) new_xyz_batch_start_idx +=
// new_xyz_batch_cnt[k];
new_xyz += pt_idx * 3;
xyz += xyz_batch_start_idx * 3;
idx += pt_idx * nsample;
float radius2 = radius * radius;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
int n = xyz_batch_cnt[bs_idx];
int cnt = 0;
for (int k = 0; k < n; ++k) {
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) +
(new_z - z) * (new_z - z);
if (d2 < radius2) {
if (cnt == 0) {
for (int l = 0; l < nsample; ++l) {
idx[l] = k;
}
}
idx[cnt] = k;
++cnt;
if (cnt >= nsample) break;
}
}
if (cnt == 0) idx[0] = -1;
}
void ball_query_kernel_launcher_stack(int B,
int M,
float radius,
int nsample,
const float *new_xyz,
const int *new_xyz_batch_cnt,
const float *xyz,
const int *xyz_batch_cnt,
int *idx) {
// :param xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// :param xyz_batch_cnt: (batch_size), [N1, N2, ...]
// :param new_xyz: (M1 + M2 ..., 3) centers of the ball query
// :param new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
// output:
// idx: (M, nsample)
cudaError_t err;
dim3 blocks(
DIVUP(M, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
ball_query_kernel_stack<<<blocks, threads>>>(B, M, radius, nsample, new_xyz,
new_xyz_batch_cnt, xyz,
xyz_batch_cnt, idx);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_stack/src/ball_query_gpu.h | C/C++ Header | /*
Stacked-batch-data version of ball query, modified from the original
implementation of official PointNet++ codes. Written by Shaoshuai Shi All Rights
Reserved 2019-2020.
*/
#ifndef _STACK_BALL_QUERY_GPU_H
#define _STACK_BALL_QUERY_GPU_H
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <torch/serialize/tensor.h>
#include <vector>
int ball_query_wrapper_stack(int B,
int M,
float radius,
int nsample,
at::Tensor new_xyz_tensor,
at::Tensor new_xyz_batch_cnt_tensor,
at::Tensor xyz_tensor,
at::Tensor xyz_batch_cnt_tensor,
at::Tensor idx_tensor);
void ball_query_kernel_launcher_stack(int B,
int M,
float radius,
int nsample,
const float *new_xyz,
const int *new_xyz_batch_cnt,
const float *xyz,
const int *xyz_batch_cnt,
int *idx);
#endif
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_stack/src/cuda_utils.h | C/C++ Header | #ifndef _STACK_CUDA_UTILS_H
#define _STACK_CUDA_UTILS_H
#include <cmath>
#define THREADS_PER_BLOCK 256
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
#endif
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_stack/src/group_points.cpp | C++ | /*
Stacked-batch-data version of point grouping, modified from the original
implementation of official PointNet++ codes. Written by Shaoshuai Shi All Rights
Reserved 2019-2020.
*/
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <torch/serialize/tensor.h>
#include <vector>
#include "group_points_gpu.h"
#define CHECK_CUDA(x) \
do { \
if (!x.type().is_cuda()) { \
fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, \
__LINE__); \
exit(-1); \
} \
} while (0)
#define CHECK_CONTIGUOUS(x) \
do { \
if (!x.is_contiguous()) { \
fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, \
__FILE__, __LINE__); \
exit(-1); \
} \
} while (0)
#define CHECK_INPUT(x) \
CHECK_CUDA(x); \
CHECK_CONTIGUOUS(x)
int group_points_grad_wrapper_stack(int B,
int M,
int C,
int N,
int nsample,
at::Tensor grad_out_tensor,
at::Tensor idx_tensor,
at::Tensor idx_batch_cnt_tensor,
at::Tensor features_batch_cnt_tensor,
at::Tensor grad_features_tensor) {
CHECK_INPUT(grad_out_tensor);
CHECK_INPUT(idx_tensor);
CHECK_INPUT(idx_batch_cnt_tensor);
CHECK_INPUT(features_batch_cnt_tensor);
CHECK_INPUT(grad_features_tensor);
const float *grad_out = grad_out_tensor.data<float>();
const int *idx = idx_tensor.data<int>();
const int *idx_batch_cnt = idx_batch_cnt_tensor.data<int>();
const int *features_batch_cnt = features_batch_cnt_tensor.data<int>();
float *grad_features = grad_features_tensor.data<float>();
group_points_grad_kernel_launcher_stack(B, M, C, N, nsample, grad_out, idx,
idx_batch_cnt, features_batch_cnt,
grad_features);
return 1;
}
int group_points_wrapper_stack(int B,
int M,
int C,
int nsample,
at::Tensor features_tensor,
at::Tensor features_batch_cnt_tensor,
at::Tensor idx_tensor,
at::Tensor idx_batch_cnt_tensor,
at::Tensor out_tensor) {
CHECK_INPUT(features_tensor);
CHECK_INPUT(features_batch_cnt_tensor);
CHECK_INPUT(idx_tensor);
CHECK_INPUT(idx_batch_cnt_tensor);
CHECK_INPUT(out_tensor);
const float *features = features_tensor.data<float>();
const int *idx = idx_tensor.data<int>();
const int *features_batch_cnt = features_batch_cnt_tensor.data<int>();
const int *idx_batch_cnt = idx_batch_cnt_tensor.data<int>();
float *out = out_tensor.data<float>();
group_points_kernel_launcher_stack(B, M, C, nsample, features,
features_batch_cnt, idx, idx_batch_cnt,
out);
return 1;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_stack/src/group_points_gpu.cu | CUDA | /*
Stacked-batch-data version of point grouping, modified from the original
implementation of official PointNet++ codes. Written by Shaoshuai Shi All Rights
Reserved 2019-2020.
*/
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "group_points_gpu.h"
__global__ void group_points_grad_kernel_stack(int B,
int M,
int C,
int N,
int nsample,
const float *grad_out,
const int *idx,
const int *idx_batch_cnt,
const int *features_batch_cnt,
float *grad_features) {
// :param grad_out: (M1 + M2 ..., C, nsample) tensor of the gradients of the
// output from forward :param idx: (M1 + M2 ..., nsample) tensor containing
// the indicies of features to group with :param idx_batch_cnt: (batch_size)
// [M1 + M2 ...] tensor containing the indicies of features to group with
// :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing
// the indicies of features to group with :return:
// grad_features: (N1 + N2 ..., C) gradient of the features
int index = blockIdx.x * blockDim.x + threadIdx.x;
int sample_idx = index % nsample;
int C_idx = (index / nsample) % C;
int pt_idx = (index / nsample / C);
if (pt_idx >= M || C_idx >= C || sample_idx >= nsample) return;
int bs_idx = 0, pt_cnt = idx_batch_cnt[0];
for (int k = 1; k < B; k++) {
if (pt_idx < pt_cnt) break;
pt_cnt += idx_batch_cnt[k];
bs_idx = k;
}
int features_batch_start_idx = 0;
for (int k = 0; k < bs_idx; k++)
features_batch_start_idx += features_batch_cnt[k];
grad_out += pt_idx * C * nsample + C_idx * nsample + sample_idx;
idx += pt_idx * nsample + sample_idx;
grad_features += (features_batch_start_idx + idx[0]) * C + C_idx;
atomicAdd(grad_features, grad_out[0]);
}
void group_points_grad_kernel_launcher_stack(int B,
int M,
int C,
int N,
int nsample,
const float *grad_out,
const int *idx,
const int *idx_batch_cnt,
const int *features_batch_cnt,
float *grad_features) {
// :param grad_out: (M1 + M2 ..., C, nsample) tensor of the gradients of the
// output from forward :param idx: (M1 + M2 ..., nsample) tensor containing
// the indicies of features to group with :param idx_batch_cnt: (batch_size)
// [M1 + M2 ...] tensor containing the indicies of features to group with
// :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing
// the indicies of features to group with :return:
// grad_features: (N1 + N2 ..., C) gradient of the features
cudaError_t err;
// dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); //
// blockIdx.x(col), blockIdx.y(row)
dim3 blocks(DIVUP(M * C * nsample,
THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
group_points_grad_kernel_stack<<<blocks, threads>>>(
B, M, C, N, nsample, grad_out, idx, idx_batch_cnt,
features_batch_cnt, grad_features);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void group_points_kernel_stack(int B,
int M,
int C,
int nsample,
const float *features,
const int *features_batch_cnt,
const int *idx,
const int *idx_batch_cnt,
float *out) {
// :param features: (N1 + N2 ..., C) tensor of features to group
// :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing
// the indicies of features to group with :param idx: (M1 + M2 ..., nsample)
// tensor containing the indicies of features to group with :param
// idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies
// of features to group with :return:
// output: (M1 + M2, C, nsample) tensor
int index = blockIdx.x * blockDim.x + threadIdx.x;
int sample_idx = index % nsample;
int C_idx = (index / nsample) % C;
int pt_idx = (index / nsample / C);
if (pt_idx >= M || C_idx >= C || sample_idx >= nsample) return;
int bs_idx = 0, pt_cnt = idx_batch_cnt[0];
for (int k = 1; k < B; k++) {
if (pt_idx < pt_cnt) break;
pt_cnt += idx_batch_cnt[k];
bs_idx = k;
}
int features_batch_start_idx = 0;
for (int k = 0; k < bs_idx; k++)
features_batch_start_idx += features_batch_cnt[k];
features += features_batch_start_idx * C;
idx += pt_idx * nsample + sample_idx;
int in_idx = idx[0] * C + C_idx;
int out_idx = pt_idx * C * nsample + C_idx * nsample + sample_idx;
out[out_idx] = features[in_idx];
}
void group_points_kernel_launcher_stack(int B,
int M,
int C,
int nsample,
const float *features,
const int *features_batch_cnt,
const int *idx,
const int *idx_batch_cnt,
float *out) {
// :param features: (N1 + N2 ..., C) tensor of features to group
// :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing
// the indicies of features to group with :param idx: (M1 + M2 ..., nsample)
// tensor containing the indicies of features to group with :param
// idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies
// of features to group with :return:
// output: (M1 + M2, C, nsample) tensor
cudaError_t err;
dim3 blocks(DIVUP(M * C * nsample,
THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
group_points_kernel_stack<<<blocks, threads>>>(B, M, C, nsample, features,
features_batch_cnt, idx,
idx_batch_cnt, out);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_stack/src/group_points_gpu.h | C/C++ Header | /*
Stacked-batch-data version of point grouping, modified from the original
implementation of official PointNet++ codes. Written by Shaoshuai Shi All Rights
Reserved 2019-2020.
*/
#ifndef _STACK_GROUP_POINTS_GPU_H
#define _STACK_GROUP_POINTS_GPU_H
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <torch/serialize/tensor.h>
#include <vector>
int group_points_wrapper_stack(int B,
int M,
int C,
int nsample,
at::Tensor features_tensor,
at::Tensor features_batch_cnt_tensor,
at::Tensor idx_tensor,
at::Tensor idx_batch_cnt_tensor,
at::Tensor out_tensor);
void group_points_kernel_launcher_stack(int B,
int M,
int C,
int nsample,
const float *features,
const int *features_batch_cnt,
const int *idx,
const int *idx_batch_cnt,
float *out);
int group_points_grad_wrapper_stack(int B,
int M,
int C,
int N,
int nsample,
at::Tensor grad_out_tensor,
at::Tensor idx_tensor,
at::Tensor idx_batch_cnt_tensor,
at::Tensor features_batch_cnt_tensor,
at::Tensor grad_features_tensor);
void group_points_grad_kernel_launcher_stack(int B,
int M,
int C,
int N,
int nsample,
const float *grad_out,
const int *idx,
const int *idx_batch_cnt,
const int *features_batch_cnt,
float *grad_features);
#endif
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_stack/src/interpolate.cpp | C++ | /*
Stacked-batch-data version of point interpolation, modified from the original
implementation of official PointNet++ codes. Written by Shaoshuai Shi All Rights
Reserved 2019-2020.
*/
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <torch/serialize/tensor.h>
#include <vector>
#include "interpolate_gpu.h"
#define CHECK_CUDA(x) \
do { \
if (!x.type().is_cuda()) { \
fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, \
__LINE__); \
exit(-1); \
} \
} while (0)
#define CHECK_CONTIGUOUS(x) \
do { \
if (!x.is_contiguous()) { \
fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, \
__FILE__, __LINE__); \
exit(-1); \
} \
} while (0)
#define CHECK_INPUT(x) \
CHECK_CUDA(x); \
CHECK_CONTIGUOUS(x)
void three_nn_wrapper_stack(at::Tensor unknown_tensor,
at::Tensor unknown_batch_cnt_tensor,
at::Tensor known_tensor,
at::Tensor known_batch_cnt_tensor,
at::Tensor dist2_tensor,
at::Tensor idx_tensor) {
// unknown: (N1 + N2 ..., 3)
// unknown_batch_cnt: (batch_size), [N1, N2, ...]
// known: (M1 + M2 ..., 3)
// known_batch_cnt: (batch_size), [M1, M2, ...]
// Return:
// dist: (N1 + N2 ..., 3) l2 distance to the three nearest neighbors
// idx: (N1 + N2 ..., 3) index of the three nearest neighbors
CHECK_INPUT(unknown_tensor);
CHECK_INPUT(unknown_batch_cnt_tensor);
CHECK_INPUT(known_tensor);
CHECK_INPUT(known_batch_cnt_tensor);
CHECK_INPUT(dist2_tensor);
CHECK_INPUT(idx_tensor);
int batch_size = unknown_batch_cnt_tensor.size(0);
int N = unknown_tensor.size(0);
int M = known_tensor.size(0);
const float *unknown = unknown_tensor.data<float>();
const int *unknown_batch_cnt = unknown_batch_cnt_tensor.data<int>();
const float *known = known_tensor.data<float>();
const int *known_batch_cnt = known_batch_cnt_tensor.data<int>();
float *dist2 = dist2_tensor.data<float>();
int *idx = idx_tensor.data<int>();
three_nn_kernel_launcher_stack(batch_size, N, M, unknown, unknown_batch_cnt,
known, known_batch_cnt, dist2, idx);
}
void three_interpolate_wrapper_stack(at::Tensor features_tensor,
at::Tensor idx_tensor,
at::Tensor weight_tensor,
at::Tensor out_tensor) {
// features_tensor: (M1 + M2 ..., C)
// idx_tensor: [N1 + N2 ..., 3]
// weight_tensor: [N1 + N2 ..., 3]
// Return:
// out_tensor: (N1 + N2 ..., C)
CHECK_INPUT(features_tensor);
CHECK_INPUT(idx_tensor);
CHECK_INPUT(weight_tensor);
CHECK_INPUT(out_tensor);
int N = out_tensor.size(0);
int channels = features_tensor.size(1);
const float *features = features_tensor.data<float>();
const float *weight = weight_tensor.data<float>();
const int *idx = idx_tensor.data<int>();
float *out = out_tensor.data<float>();
three_interpolate_kernel_launcher_stack(N, channels, features, idx, weight,
out);
}
void three_interpolate_grad_wrapper_stack(at::Tensor grad_out_tensor,
at::Tensor idx_tensor,
at::Tensor weight_tensor,
at::Tensor grad_features_tensor) {
// grad_out_tensor: (N1 + N2 ..., C)
// idx_tensor: [N1 + N2 ..., 3]
// weight_tensor: [N1 + N2 ..., 3]
// Return:
// grad_features_tensor: (M1 + M2 ..., C)
CHECK_INPUT(grad_out_tensor);
CHECK_INPUT(idx_tensor);
CHECK_INPUT(weight_tensor);
CHECK_INPUT(grad_features_tensor);
int N = grad_out_tensor.size(0);
int channels = grad_out_tensor.size(1);
const float *grad_out = grad_out_tensor.data<float>();
const float *weight = weight_tensor.data<float>();
const int *idx = idx_tensor.data<int>();
float *grad_features = grad_features_tensor.data<float>();
// printf("N=%d, channels=%d\n", N, channels);
three_interpolate_grad_kernel_launcher_stack(N, channels, grad_out, idx,
weight, grad_features);
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_stack/src/interpolate_gpu.cu | CUDA | /*
Stacked-batch-data version of point interpolation, modified from the original
implementation of official PointNet++ codes. Written by Shaoshuai Shi All Rights
Reserved 2019-2020.
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "interpolate_gpu.h"
__global__ void three_nn_kernel_stack(int batch_size,
int N,
int M,
const float *unknown,
const int *unknown_batch_cnt,
const float *known,
const int *known_batch_cnt,
float *dist2,
int *idx) {
// unknown: (N1 + N2 ..., 3)
// unknown_batch_cnt: (batch_size), [N1, N2, ...]
// known: (M1 + M2 ..., 3)
// known_batch_cnt: (batch_size), [M1, M2, ...]
// Return:
// dist: (N1 + N2 ..., 3) l2 distance to the three nearest neighbors
// idx: (N1 + N2 ..., 3) index of the three nearest neighbors
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= N) return;
int bs_idx = 0, pt_cnt = unknown_batch_cnt[0];
for (int k = 1; k < batch_size; k++) {
if (pt_idx < pt_cnt) break;
pt_cnt += unknown_batch_cnt[k];
bs_idx = k;
}
int cur_num_known_points = known_batch_cnt[bs_idx];
int known_batch_start_idx = 0;
for (int k = 0; k < bs_idx; k++)
known_batch_start_idx += known_batch_cnt[k];
known += known_batch_start_idx * 3;
unknown += pt_idx * 3;
dist2 += pt_idx * 3;
idx += pt_idx * 3;
float ux = unknown[0];
float uy = unknown[1];
float uz = unknown[2];
double best1 = 1e40, best2 = 1e40, best3 = 1e40;
int besti1 = 0, besti2 = 0, besti3 = 0;
for (int k = 0; k < cur_num_known_points; ++k) {
float x = known[k * 3 + 0];
float y = known[k * 3 + 1];
float z = known[k * 3 + 2];
float d =
(ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z);
if (d < best1) {
best3 = best2;
besti3 = besti2;
best2 = best1;
besti2 = besti1;
best1 = d;
besti1 = k;
} else if (d < best2) {
best3 = best2;
besti3 = besti2;
best2 = d;
besti2 = k;
} else if (d < best3) {
best3 = d;
besti3 = k;
}
}
dist2[0] = best1;
dist2[1] = best2;
dist2[2] = best3;
idx[0] = besti1 + known_batch_start_idx;
idx[1] = besti2 + known_batch_start_idx;
idx[2] = besti3 + known_batch_start_idx;
}
void three_nn_kernel_launcher_stack(int batch_size,
int N,
int M,
const float *unknown,
const int *unknown_batch_cnt,
const float *known,
const int *known_batch_cnt,
float *dist2,
int *idx) {
// unknown: (N1 + N2 ..., 3)
// unknown_batch_cnt: (batch_size), [N1, N2, ...]
// known: (M1 + M2 ..., 3)
// known_batch_cnt: (batch_size), [M1, M2, ...]
// Return:
// dist: (N1 + N2 ..., 3) l2 distance to the three nearest neighbors
// idx: (N1 + N2 ..., 3) index of the three nearest neighbors
cudaError_t err;
dim3 blocks(
DIVUP(N, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
three_nn_kernel_stack<<<blocks, threads>>>(batch_size, N, M, unknown,
unknown_batch_cnt, known,
known_batch_cnt, dist2, idx);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void three_interpolate_kernel_stack(int N,
int channels,
const float *features,
const int *idx,
const float *weight,
float *out) {
// features: (M1 + M2 ..., C)
// idx: [N1 + N2 ..., 3]
// weight: [N1 + N2 ..., 3]
// Return:
// out: (N1 + N2 ..., C)
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= N || c_idx >= channels) return;
weight += pt_idx * 3;
idx += pt_idx * 3;
out += pt_idx * channels + c_idx;
out[0] = weight[0] * features[idx[0] * channels + c_idx] +
weight[1] * features[idx[1] * channels + c_idx] +
weight[2] * features[idx[2] * channels + c_idx];
}
void three_interpolate_kernel_launcher_stack(int N,
int channels,
const float *features,
const int *idx,
const float *weight,
float *out) {
// features: (M1 + M2 ..., C)
// idx: [N1 + N2 ..., 3]
// weight: [N1 + N2 ..., 3]
// Return:
// out: (N1 + N2 ..., C)
cudaError_t err;
dim3 blocks(DIVUP(N, THREADS_PER_BLOCK), channels);
dim3 threads(THREADS_PER_BLOCK);
three_interpolate_kernel_stack<<<blocks, threads>>>(N, channels, features,
idx, weight, out);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void three_interpolate_grad_kernel_stack(int N,
int channels,
const float *grad_out,
const int *idx,
const float *weight,
float *grad_features) {
// grad_out_tensor: (N1 + N2 ..., C)
// idx_tensor: [N1 + N2 ..., 3]
// weight_tensor: [N1 + N2 ..., 3]
// Return:
// grad_features_tensor: (M1 + M2 ..., C)
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= N || c_idx >= channels) return;
grad_out += pt_idx * channels + c_idx;
weight += pt_idx * 3;
idx += pt_idx * 3;
// printf("pt_idx=%d, c_idx=%d, idx=(%d, %d, %d), grad_out=%f\n", pt_idx,
// c_idx, idx[0], idx[1], idx[2], grad_out[0]);
atomicAdd(grad_features + idx[0] * channels + c_idx,
grad_out[0] * weight[0]);
atomicAdd(grad_features + idx[1] * channels + c_idx,
grad_out[0] * weight[1]);
atomicAdd(grad_features + idx[2] * channels + c_idx,
grad_out[0] * weight[2]);
}
void three_interpolate_grad_kernel_launcher_stack(int N,
int channels,
const float *grad_out,
const int *idx,
const float *weight,
float *grad_features) {
// grad_out_tensor: (N1 + N2 ..., C)
// idx_tensor: [N1 + N2 ..., 3]
// weight_tensor: [N1 + N2 ..., 3]
// Return:
// grad_features_tensor: (M1 + M2 ..., C)
cudaError_t err;
dim3 blocks(DIVUP(N, THREADS_PER_BLOCK),
channels); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
three_interpolate_grad_kernel_stack<<<blocks, threads>>>(
N, channels, grad_out, idx, weight, grad_features);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_stack/src/interpolate_gpu.h | C/C++ Header | #ifndef _INTERPOLATE_GPU_H
#define _INTERPOLATE_GPU_H
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <torch/serialize/tensor.h>
#include <vector>
void three_nn_wrapper_stack(at::Tensor unknown_tensor,
at::Tensor unknown_batch_cnt_tensor,
at::Tensor known_tensor,
at::Tensor known_batch_cnt_tensor,
at::Tensor dist2_tensor,
at::Tensor idx_tensor);
void three_interpolate_wrapper_stack(at::Tensor features_tensor,
at::Tensor idx_tensor,
at::Tensor weight_tensor,
at::Tensor out_tensor);
void three_interpolate_grad_wrapper_stack(at::Tensor grad_out_tensor,
at::Tensor idx_tensor,
at::Tensor weight_tensor,
at::Tensor grad_features_tensor);
void three_nn_kernel_launcher_stack(int batch_size,
int N,
int M,
const float *unknown,
const int *unknown_batch_cnt,
const float *known,
const int *known_batch_cnt,
float *dist2,
int *idx);
void three_interpolate_kernel_launcher_stack(int N,
int channels,
const float *features,
const int *idx,
const float *weight,
float *out);
void three_interpolate_grad_kernel_launcher_stack(int N,
int channels,
const float *grad_out,
const int *idx,
const float *weight,
float *grad_features);
#endif
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_stack/src/pointnet2_api.cpp | C++ | #include <torch/extension.h>
#include <torch/serialize/tensor.h>
#include "ball_query_gpu.h"
#include "group_points_gpu.h"
#include "interpolate_gpu.h"
#include "sampling_gpu.h"
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("ball_query_wrapper", &ball_query_wrapper_stack,
"ball_query_wrapper_stack");
m.def("furthest_point_sampling_wrapper", &furthest_point_sampling_wrapper,
"furthest_point_sampling_wrapper");
m.def("group_points_wrapper", &group_points_wrapper_stack,
"group_points_wrapper_stack");
m.def("group_points_grad_wrapper", &group_points_grad_wrapper_stack,
"group_points_grad_wrapper_stack");
m.def("three_nn_wrapper", &three_nn_wrapper_stack,
"three_nn_wrapper_stack");
m.def("three_interpolate_wrapper", &three_interpolate_wrapper_stack,
"three_interpolate_wrapper_stack");
m.def("three_interpolate_grad_wrapper",
&three_interpolate_grad_wrapper_stack,
"three_interpolate_grad_wrapper_stack");
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_stack/src/sampling.cpp | C++ | #include <ATen/cuda/CUDAContext.h>
#include <torch/serialize/tensor.h>
#include <vector>
#include "sampling_gpu.h"
#define CHECK_CUDA(x) \
do { \
if (!x.type().is_cuda()) { \
fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, \
__LINE__); \
exit(-1); \
} \
} while (0)
#define CHECK_CONTIGUOUS(x) \
do { \
if (!x.is_contiguous()) { \
fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, \
__FILE__, __LINE__); \
exit(-1); \
} \
} while (0)
#define CHECK_INPUT(x) \
CHECK_CUDA(x); \
CHECK_CONTIGUOUS(x)
int furthest_point_sampling_wrapper(int b,
int n,
int m,
at::Tensor points_tensor,
at::Tensor temp_tensor,
at::Tensor idx_tensor) {
CHECK_INPUT(points_tensor);
CHECK_INPUT(temp_tensor);
CHECK_INPUT(idx_tensor);
const float *points = points_tensor.data<float>();
float *temp = temp_tensor.data<float>();
int *idx = idx_tensor.data<int>();
furthest_point_sampling_kernel_launcher(b, n, m, points, temp, idx);
return 1;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_stack/src/sampling_gpu.cu | CUDA | #include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "sampling_gpu.h"
#define TOTAL_THREADS 1024
inline int opt_n_threads(int work_size) {
const int pow_2 = std::log(static_cast<double>(work_size)) / std::log(2.0);
return max(min(1 << pow_2, TOTAL_THREADS), 1);
}
__device__ void __update(float *__restrict__ dists,
int *__restrict__ dists_i,
int idx1,
int idx2) {
const float v1 = dists[idx1], v2 = dists[idx2];
const int i1 = dists_i[idx1], i2 = dists_i[idx2];
dists[idx1] = max(v1, v2);
dists_i[idx1] = v2 > v1 ? i2 : i1;
}
template <unsigned int block_size>
__global__ void furthest_point_sampling_kernel(
int b,
int n,
int m,
const float *__restrict__ dataset,
float *__restrict__ temp,
int *__restrict__ idxs) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
if (m <= 0) return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * 3;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0) idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
float x1 = dataset[old * 3 + 0];
float y1 = dataset[old * 3 + 1];
float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
float x2, y2, z2;
x2 = dataset[k * 3 + 0];
y2 = dataset[k * 3 + 1];
z2 = dataset[k * 3 + 2];
// float mag = (x2 * x2) + (y2 * y2) + (z2 * z2);
// if (mag <= 1e-3)
// continue;
float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) +
(z2 - z1) * (z2 - z1);
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 1024) {
if (tid < 512) {
__update(dists, dists_i, tid, tid + 512);
}
__syncthreads();
}
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0) idxs[j] = old;
}
}
void furthest_point_sampling_kernel_launcher(
int b, int n, int m, const float *dataset, float *temp, int *idxs) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
cudaError_t err;
unsigned int n_threads = opt_n_threads(n);
switch (n_threads) {
case 1024:
furthest_point_sampling_kernel<1024>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 512:
furthest_point_sampling_kernel<512>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 256:
furthest_point_sampling_kernel<256>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 128:
furthest_point_sampling_kernel<128>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 64:
furthest_point_sampling_kernel<64>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 32:
furthest_point_sampling_kernel<32>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 16:
furthest_point_sampling_kernel<16>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 8:
furthest_point_sampling_kernel<8>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 4:
furthest_point_sampling_kernel<4>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 2:
furthest_point_sampling_kernel<2>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 1:
furthest_point_sampling_kernel<1>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
default:
furthest_point_sampling_kernel<512>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
}
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/pointnet2/pointnet2_stack/src/sampling_gpu.h | C/C++ Header | #ifndef _SAMPLING_GPU_H
#define _SAMPLING_GPU_H
#include <ATen/cuda/CUDAContext.h>
#include <torch/serialize/tensor.h>
#include <vector>
int furthest_point_sampling_wrapper(int b,
int n,
int m,
at::Tensor points_tensor,
at::Tensor temp_tensor,
at::Tensor idx_tensor);
void furthest_point_sampling_kernel_launcher(
int b, int n, int m, const float *dataset, float *temp, int *idxs);
#endif
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/roiaware_pool3d/roiaware_pool3d_utils.py | Python | import torch
import torch.nn as nn
from torch.autograd import Function
from pcdet.ops.roiaware_pool3d import roiaware_pool3d_cuda
from pcdet.utils import common_utils
def points_in_boxes_cpu(points, boxes):
"""
Args:
points: (num_points, 3)
boxes: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center, each box DO NOT overlaps
Returns:
point_indices: (N, num_points)
"""
assert boxes.shape[1] == 7
assert points.shape[1] == 3
points, is_numpy = common_utils.check_numpy_to_torch(points)
boxes, is_numpy = common_utils.check_numpy_to_torch(boxes)
point_indices = points.new_zeros((boxes.shape[0], points.shape[0]), dtype=torch.int)
roiaware_pool3d_cuda.points_in_boxes_cpu(
boxes.float().contiguous(), points.float().contiguous(), point_indices
)
return point_indices.numpy() if is_numpy else point_indices
def points_in_boxes_gpu(points, boxes):
"""
:param points: (B, M, 3)
:param boxes: (B, T, 7), num_valid_boxes <= T
:return box_idxs_of_pts: (B, M), default background = -1
"""
assert boxes.shape[0] == points.shape[0]
assert boxes.shape[2] == 7 and points.shape[2] == 3
batch_size, num_points, _ = points.shape
box_idxs_of_pts = points.new_zeros((batch_size, num_points), dtype=torch.int).fill_(
-1
)
roiaware_pool3d_cuda.points_in_boxes_gpu(
boxes.contiguous(), points.contiguous(), box_idxs_of_pts
)
return box_idxs_of_pts
class RoIAwarePool3d(nn.Module):
def __init__(self, out_size, max_pts_each_voxel=128):
super().__init__()
self.out_size = out_size
self.max_pts_each_voxel = max_pts_each_voxel
def forward(self, rois, pts, pts_feature, pool_method="max"):
assert pool_method in ["max", "avg"]
return RoIAwarePool3dFunction.apply(
rois, pts, pts_feature, self.out_size, self.max_pts_each_voxel, pool_method
)
class RoIAwarePool3dFunction(Function):
@staticmethod
def forward(ctx, rois, pts, pts_feature, out_size, max_pts_each_voxel, pool_method):
"""
Args:
ctx:
rois: (N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center
pts: (npoints, 3)
pts_feature: (npoints, C)
out_size: int or tuple, like 7 or (7, 7, 7)
max_pts_each_voxel:
pool_method: 'max' or 'avg'
Returns:
pooled_features: (N, out_x, out_y, out_z, C)
"""
assert rois.shape[1] == 7 and pts.shape[1] == 3
if isinstance(out_size, int):
out_x = out_y = out_z = out_size
else:
assert len(out_size) == 3
for k in range(3):
assert isinstance(out_size[k], int)
out_x, out_y, out_z = out_size
num_rois = rois.shape[0]
num_channels = pts_feature.shape[-1]
num_pts = pts.shape[0]
pooled_features = pts_feature.new_zeros(
(num_rois, out_x, out_y, out_z, num_channels)
)
argmax = pts_feature.new_zeros(
(num_rois, out_x, out_y, out_z, num_channels), dtype=torch.int
)
pts_idx_of_voxels = pts_feature.new_zeros(
(num_rois, out_x, out_y, out_z, max_pts_each_voxel), dtype=torch.int
)
pool_method_map = {"max": 0, "avg": 1}
pool_method = pool_method_map[pool_method]
roiaware_pool3d_cuda.forward(
rois,
pts,
pts_feature,
argmax,
pts_idx_of_voxels,
pooled_features,
pool_method,
)
ctx.roiaware_pool3d_for_backward = (
pts_idx_of_voxels,
argmax,
pool_method,
num_pts,
num_channels,
)
return pooled_features
@staticmethod
def backward(ctx, grad_out):
"""
:param grad_out: (N, out_x, out_y, out_z, C)
:return:
grad_in: (npoints, C)
"""
(
pts_idx_of_voxels,
argmax,
pool_method,
num_pts,
num_channels,
) = ctx.roiaware_pool3d_for_backward
grad_in = grad_out.new_zeros((num_pts, num_channels))
roiaware_pool3d_cuda.backward(
pts_idx_of_voxels, argmax, grad_out.contiguous(), grad_in, pool_method
)
return None, None, grad_in, None, None, None
if __name__ == "__main__":
pass
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/roiaware_pool3d/src/roiaware_pool3d.cpp | C++ | /*
RoI-aware point cloud feature pooling
Reference paper: https://arxiv.org/abs/1907.03670
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
*/
#include <assert.h>
#include <torch/extension.h>
#include <torch/serialize/tensor.h>
//#define CHECK_CUDA(x) AT_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor
//") #define CHECK_CONTIGUOUS(x) AT_CHECK(x.is_contiguous(), #x, " must be
// contiguous ") #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x)
void roiaware_pool3d_launcher(int boxes_num,
int pts_num,
int channels,
int max_pts_each_voxel,
int out_x,
int out_y,
int out_z,
const float *rois,
const float *pts,
const float *pts_feature,
int *argmax,
int *pts_idx_of_voxels,
float *pooled_features,
int pool_method);
void roiaware_pool3d_backward_launcher(int boxes_num,
int out_x,
int out_y,
int out_z,
int channels,
int max_pts_each_voxel,
const int *pts_idx_of_voxels,
const int *argmax,
const float *grad_out,
float *grad_in,
int pool_method);
void points_in_boxes_launcher(int batch_size,
int boxes_num,
int pts_num,
const float *boxes,
const float *pts,
int *box_idx_of_points);
int roiaware_pool3d_gpu(at::Tensor rois,
at::Tensor pts,
at::Tensor pts_feature,
at::Tensor argmax,
at::Tensor pts_idx_of_voxels,
at::Tensor pooled_features,
int pool_method) {
// params rois: (N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box
// center params pts: (npoints, 3) [x, y, z] params pts_feature: (npoints,
// C) params argmax: (N, out_x, out_y, out_z, C) params pts_idx_of_voxels:
// (N, out_x, out_y, out_z, max_pts_each_voxel) params pooled_features: (N,
// out_x, out_y, out_z, C) params pool_method: 0: max_pool 1: avg_pool
// CHECK_INPUT(rois);
// CHECK_INPUT(pts);
// CHECK_INPUT(pts_feature);
// CHECK_INPUT(argmax);
// CHECK_INPUT(pts_idx_of_voxels);
// CHECK_INPUT(pooled_features);
int boxes_num = rois.size(0);
int pts_num = pts.size(0);
int channels = pts_feature.size(1);
int max_pts_each_voxel =
pts_idx_of_voxels.size(4); // index 0 is the counter
int out_x = pts_idx_of_voxels.size(1);
int out_y = pts_idx_of_voxels.size(2);
int out_z = pts_idx_of_voxels.size(3);
assert((out_x < 256) && (out_y < 256) &&
(out_z < 256)); // we encode index with 8bit
const float *rois_data = rois.data<float>();
const float *pts_data = pts.data<float>();
const float *pts_feature_data = pts_feature.data<float>();
int *argmax_data = argmax.data<int>();
int *pts_idx_of_voxels_data = pts_idx_of_voxels.data<int>();
float *pooled_features_data = pooled_features.data<float>();
roiaware_pool3d_launcher(
boxes_num, pts_num, channels, max_pts_each_voxel, out_x, out_y,
out_z, rois_data, pts_data, pts_feature_data, argmax_data,
pts_idx_of_voxels_data, pooled_features_data, pool_method);
return 1;
}
int roiaware_pool3d_gpu_backward(at::Tensor pts_idx_of_voxels,
at::Tensor argmax,
at::Tensor grad_out,
at::Tensor grad_in,
int pool_method) {
// params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel)
// params argmax: (N, out_x, out_y, out_z, C)
// params grad_out: (N, out_x, out_y, out_z, C)
// params grad_in: (npoints, C), return value
// params pool_method: 0: max_pool 1: avg_pool
// CHECK_INPUT(pts_idx_of_voxels);
// CHECK_INPUT(argmax);
// CHECK_INPUT(grad_out);
// CHECK_INPUT(grad_in);
int boxes_num = pts_idx_of_voxels.size(0);
int out_x = pts_idx_of_voxels.size(1);
int out_y = pts_idx_of_voxels.size(2);
int out_z = pts_idx_of_voxels.size(3);
int max_pts_each_voxel =
pts_idx_of_voxels.size(4); // index 0 is the counter
int channels = grad_out.size(4);
const int *pts_idx_of_voxels_data = pts_idx_of_voxels.data<int>();
const int *argmax_data = argmax.data<int>();
const float *grad_out_data = grad_out.data<float>();
float *grad_in_data = grad_in.data<float>();
roiaware_pool3d_backward_launcher(boxes_num, out_x, out_y, out_z, channels,
max_pts_each_voxel,
pts_idx_of_voxels_data, argmax_data,
grad_out_data, grad_in_data, pool_method);
return 1;
}
int points_in_boxes_gpu(at::Tensor boxes_tensor,
at::Tensor pts_tensor,
at::Tensor box_idx_of_points_tensor) {
// params boxes: (B, N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the
// box center params pts: (B, npoints, 3) [x, y, z] params
// boxes_idx_of_points: (B, npoints), default -1
// CHECK_INPUT(boxes_tensor);
// CHECK_INPUT(pts_tensor);
// CHECK_INPUT(box_idx_of_points_tensor);
int batch_size = boxes_tensor.size(0);
int boxes_num = boxes_tensor.size(1);
int pts_num = pts_tensor.size(1);
const float *boxes = boxes_tensor.data<float>();
const float *pts = pts_tensor.data<float>();
int *box_idx_of_points = box_idx_of_points_tensor.data<int>();
points_in_boxes_launcher(batch_size, boxes_num, pts_num, boxes, pts,
box_idx_of_points);
return 1;
}
inline void lidar_to_local_coords_cpu(float shift_x,
float shift_y,
float rot_angle,
float &local_x,
float &local_y) {
float cosa = cos(-rot_angle), sina = sin(-rot_angle);
local_x = shift_x * cosa + shift_y * (-sina);
local_y = shift_x * sina + shift_y * cosa;
}
inline int check_pt_in_box3d_cpu(const float *pt,
const float *box3d,
float &local_x,
float &local_y) {
// param pt: (x, y, z)
// param box3d: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
const float MARGIN = 1e-2;
float x = pt[0], y = pt[1], z = pt[2];
float cx = box3d[0], cy = box3d[1], cz = box3d[2];
float dx = box3d[3], dy = box3d[4], dz = box3d[5], rz = box3d[6];
if (fabsf(z - cz) > dz / 2.0) return 0;
lidar_to_local_coords_cpu(x - cx, y - cy, rz, local_x, local_y);
float in_flag = (fabs(local_x) < dx / 2.0 + MARGIN) &
(fabs(local_y) < dy / 2.0 + MARGIN);
return in_flag;
}
int points_in_boxes_cpu(at::Tensor boxes_tensor,
at::Tensor pts_tensor,
at::Tensor pts_indices_tensor) {
// params boxes: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box
// center, each box DO NOT overlaps params pts: (num_points, 3) [x, y, z]
// params pts_indices: (N, num_points)
// CHECK_CONTIGUOUS(boxes_tensor);
// CHECK_CONTIGUOUS(pts_tensor);
// CHECK_CONTIGUOUS(pts_indices_tensor);
int boxes_num = boxes_tensor.size(0);
int pts_num = pts_tensor.size(0);
const float *boxes = boxes_tensor.data<float>();
const float *pts = pts_tensor.data<float>();
int *pts_indices = pts_indices_tensor.data<int>();
float local_x = 0, local_y = 0;
for (int i = 0; i < boxes_num; i++) {
for (int j = 0; j < pts_num; j++) {
int cur_in_flag = check_pt_in_box3d_cpu(pts + j * 3, boxes + i * 7,
local_x, local_y);
pts_indices[i * pts_num + j] = cur_in_flag;
}
}
return 1;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &roiaware_pool3d_gpu, "roiaware pool3d forward (CUDA)");
m.def("backward", &roiaware_pool3d_gpu_backward,
"roiaware pool3d backward (CUDA)");
m.def("points_in_boxes_gpu", &points_in_boxes_gpu,
"points_in_boxes_gpu forward (CUDA)");
m.def("points_in_boxes_cpu", &points_in_boxes_cpu,
"points_in_boxes_cpu forward (CUDA)");
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/roiaware_pool3d/src/roiaware_pool3d_kernel.cu | CUDA | /*
RoI-aware point cloud feature pooling
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
*/
#include <math.h>
#include <stdio.h>
#define THREADS_PER_BLOCK 256
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
// #define DEBUG
__device__ inline void lidar_to_local_coords(float shift_x,
float shift_y,
float rot_angle,
float &local_x,
float &local_y) {
float cosa = cos(-rot_angle), sina = sin(-rot_angle);
local_x = shift_x * cosa + shift_y * (-sina);
local_y = shift_x * sina + shift_y * cosa;
}
__device__ inline int check_pt_in_box3d(const float *pt,
const float *box3d,
float &local_x,
float &local_y) {
// param pt: (x, y, z)
// param box3d: [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center
const float MARGIN = 1e-5;
float x = pt[0], y = pt[1], z = pt[2];
float cx = box3d[0], cy = box3d[1], cz = box3d[2];
float dx = box3d[3], dy = box3d[4], dz = box3d[5], rz = box3d[6];
if (fabsf(z - cz) > dz / 2.0) return 0;
lidar_to_local_coords(x - cx, y - cy, rz, local_x, local_y);
float in_flag = (fabs(local_x) < dx / 2.0 + MARGIN) &
(fabs(local_y) < dy / 2.0 + MARGIN);
return in_flag;
}
__global__ void generate_pts_mask_for_box3d(int boxes_num,
int pts_num,
int out_x,
int out_y,
int out_z,
const float *rois,
const float *pts,
int *pts_mask) {
// params rois: [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center
// params pts: (npoints, 3) [x, y, z]
// params pts_mask: (N, npoints): -1 means point doesnot in this box,
// otherwise: encode (x_idxs, y_idxs, z_idxs) by binary bit
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
int box_idx = blockIdx.y;
if (pt_idx >= pts_num || box_idx >= boxes_num) return;
pts += pt_idx * 3;
rois += box_idx * 7;
pts_mask += box_idx * pts_num + pt_idx;
float local_x = 0, local_y = 0;
int cur_in_flag = check_pt_in_box3d(pts, rois, local_x, local_y);
pts_mask[0] = -1;
if (cur_in_flag > 0) {
float local_z = pts[2] - rois[2];
float dx = rois[3], dy = rois[4], dz = rois[5];
float x_res = dx / out_x;
float y_res = dy / out_y;
float z_res = dz / out_z;
unsigned int x_idx = int((local_x + dx / 2) / x_res);
unsigned int y_idx = int((local_y + dy / 2) / y_res);
unsigned int z_idx = int((local_z + dz / 2) / z_res);
x_idx = min(max(x_idx, 0), out_x - 1);
y_idx = min(max(y_idx, 0), out_y - 1);
z_idx = min(max(z_idx, 0), out_z - 1);
unsigned int idx_encoding = (x_idx << 16) + (y_idx << 8) + z_idx;
pts_mask[0] = idx_encoding;
}
}
__global__ void collect_inside_pts_for_box3d(int boxes_num,
int pts_num,
int max_pts_each_voxel,
int out_x,
int out_y,
int out_z,
const int *pts_mask,
int *pts_idx_of_voxels) {
// params pts_mask: (N, npoints) 0 or 1
// params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel)
int box_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (box_idx >= boxes_num) return;
int max_num_pts = max_pts_each_voxel - 1; // index 0 is the counter
pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel;
for (int k = 0; k < pts_num; k++) {
if (pts_mask[box_idx * pts_num + k] != -1) {
unsigned int idx_encoding = pts_mask[box_idx * pts_num + k];
unsigned int x_idx = (idx_encoding >> 16) & 0xFF;
unsigned int y_idx = (idx_encoding >> 8) & 0xFF;
unsigned int z_idx = idx_encoding & 0xFF;
unsigned int base_offset =
x_idx * out_y * out_z * max_pts_each_voxel +
y_idx * out_z * max_pts_each_voxel +
z_idx * max_pts_each_voxel;
unsigned int cnt = pts_idx_of_voxels[base_offset];
if (cnt < max_num_pts) {
pts_idx_of_voxels[base_offset + cnt + 1] = k;
pts_idx_of_voxels[base_offset]++;
}
#ifdef DEBUG
printf("collect: pts_%d, idx(%d, %d, %d), idx_encoding=%x\n", k,
x_idx, y_idx, z_idx, idx_encoding);
#endif
}
}
}
__global__ void roiaware_maxpool3d(int boxes_num,
int pts_num,
int channels,
int max_pts_each_voxel,
int out_x,
int out_y,
int out_z,
const float *pts_feature,
const int *pts_idx_of_voxels,
float *pooled_features,
int *argmax) {
// params pts_feature: (npoints, C)
// params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel),
// index 0 is the counter params pooled_features: (N, out_x, out_y, out_z,
// C) params argmax: (N, out_x, out_y, out_z, C)
int box_idx = blockIdx.z;
int channel_idx = blockIdx.y;
int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x;
int x_idx = voxel_idx_flat / (out_y * out_z);
int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z;
int z_idx = voxel_idx_flat % out_z;
if (box_idx >= boxes_num || channel_idx >= channels || x_idx >= out_x ||
y_idx >= out_y || z_idx >= out_z)
return;
#ifdef DEBUG
printf("src pts_idx_of_voxels: (%p, ), argmax: %p\n", pts_idx_of_voxels,
argmax);
#endif
int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx;
pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel +
offset_base * max_pts_each_voxel;
pooled_features += box_idx * out_x * out_y * out_z * channels +
offset_base * channels + channel_idx;
argmax += box_idx * out_x * out_y * out_z * channels +
offset_base * channels + channel_idx;
int argmax_idx = -1;
float max_val = -1e50;
int total_pts = pts_idx_of_voxels[0];
for (int k = 1; k <= total_pts; k++) {
if (pts_feature[pts_idx_of_voxels[k] * channels + channel_idx] >
max_val) {
max_val =
pts_feature[pts_idx_of_voxels[k] * channels + channel_idx];
argmax_idx = pts_idx_of_voxels[k];
}
}
if (argmax_idx != -1) {
pooled_features[0] = max_val;
}
argmax[0] = argmax_idx;
#ifdef DEBUG
printf("channel_%d idx(%d, %d, %d), argmax_idx=(%d, %.3f), total=%d, after "
"pts_idx: %p, argmax: (%p, %d)\n",
channel_idx, x_idx, y_idx, z_idx, argmax_idx, max_val, total_pts,
pts_idx_of_voxels, argmax, argmax_idx);
#endif
}
__global__ void roiaware_avgpool3d(int boxes_num,
int pts_num,
int channels,
int max_pts_each_voxel,
int out_x,
int out_y,
int out_z,
const float *pts_feature,
const int *pts_idx_of_voxels,
float *pooled_features) {
// params pts_feature: (npoints, C)
// params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel),
// index 0 is the counter params pooled_features: (N, out_x, out_y, out_z,
// C) params argmax: (N, out_x, out_y, out_z, C)
int box_idx = blockIdx.z;
int channel_idx = blockIdx.y;
int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x;
int x_idx = voxel_idx_flat / (out_y * out_z);
int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z;
int z_idx = voxel_idx_flat % out_z;
if (box_idx >= boxes_num || channel_idx >= channels || x_idx >= out_x ||
y_idx >= out_y || z_idx >= out_z)
return;
int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx;
pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel +
offset_base * max_pts_each_voxel;
pooled_features += box_idx * out_x * out_y * out_z * channels +
offset_base * channels + channel_idx;
float sum_val = 0;
int total_pts = pts_idx_of_voxels[0];
for (int k = 1; k <= total_pts; k++) {
sum_val += pts_feature[pts_idx_of_voxels[k] * channels + channel_idx];
}
if (total_pts > 0) {
pooled_features[0] = sum_val / total_pts;
}
}
void roiaware_pool3d_launcher(int boxes_num,
int pts_num,
int channels,
int max_pts_each_voxel,
int out_x,
int out_y,
int out_z,
const float *rois,
const float *pts,
const float *pts_feature,
int *argmax,
int *pts_idx_of_voxels,
float *pooled_features,
int pool_method) {
// params rois: (N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box
// center params pts: (npoints, 3) [x, y, z] params pts_feature: (npoints,
// C) params argmax: (N, out_x, out_y, out_z, C) params pts_idx_of_voxels:
// (N, out_x, out_y, out_z, max_pts_each_voxel) params pooled_features: (N,
// out_x, out_y, out_z, C) params pool_method: 0: max_pool 1: avg_pool
int *pts_mask = NULL;
cudaMalloc(&pts_mask, boxes_num * pts_num * sizeof(int)); // (N, M)
cudaMemset(pts_mask, -1, boxes_num * pts_num * sizeof(int));
dim3 blocks_mask(DIVUP(pts_num, THREADS_PER_BLOCK), boxes_num);
dim3 threads(THREADS_PER_BLOCK);
generate_pts_mask_for_box3d<<<blocks_mask, threads>>>(
boxes_num, pts_num, out_x, out_y, out_z, rois, pts, pts_mask);
// TODO: Merge the collect and pool functions, SS
dim3 blocks_collect(DIVUP(boxes_num, THREADS_PER_BLOCK));
collect_inside_pts_for_box3d<<<blocks_collect, threads>>>(
boxes_num, pts_num, max_pts_each_voxel, out_x, out_y, out_z,
pts_mask, pts_idx_of_voxels);
dim3 blocks_pool(DIVUP(out_x * out_y * out_z, THREADS_PER_BLOCK), channels,
boxes_num);
if (pool_method == 0) {
roiaware_maxpool3d<<<blocks_pool, threads>>>(
boxes_num, pts_num, channels, max_pts_each_voxel, out_x, out_y,
out_z, pts_feature, pts_idx_of_voxels, pooled_features, argmax);
} else if (pool_method == 1) {
roiaware_avgpool3d<<<blocks_pool, threads>>>(
boxes_num, pts_num, channels, max_pts_each_voxel, out_x, out_y,
out_z, pts_feature, pts_idx_of_voxels, pooled_features);
}
cudaFree(pts_mask);
#ifdef DEBUG
cudaDeviceSynchronize(); // for using printf in kernel function
#endif
}
__global__ void roiaware_maxpool3d_backward(int boxes_num,
int channels,
int out_x,
int out_y,
int out_z,
const int *argmax,
const float *grad_out,
float *grad_in) {
// params argmax: (N, out_x, out_y, out_z, C)
// params grad_out: (N, out_x, out_y, out_z, C)
// params grad_in: (npoints, C), return value
int box_idx = blockIdx.z;
int channel_idx = blockIdx.y;
int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x;
int x_idx = voxel_idx_flat / (out_y * out_z);
int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z;
int z_idx = voxel_idx_flat % out_z;
if (box_idx >= boxes_num || channel_idx >= channels || x_idx >= out_x ||
y_idx >= out_y || z_idx >= out_z)
return;
int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx;
argmax += box_idx * out_x * out_y * out_z * channels +
offset_base * channels + channel_idx;
grad_out += box_idx * out_x * out_y * out_z * channels +
offset_base * channels + channel_idx;
if (argmax[0] == -1) return;
atomicAdd(grad_in + argmax[0] * channels + channel_idx, grad_out[0] * 1);
}
__global__ void roiaware_avgpool3d_backward(int boxes_num,
int channels,
int out_x,
int out_y,
int out_z,
int max_pts_each_voxel,
const int *pts_idx_of_voxels,
const float *grad_out,
float *grad_in) {
// params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel)
// params grad_out: (N, out_x, out_y, out_z, C)
// params grad_in: (npoints, C), return value
int box_idx = blockIdx.z;
int channel_idx = blockIdx.y;
int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x;
int x_idx = voxel_idx_flat / (out_y * out_z);
int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z;
int z_idx = voxel_idx_flat % out_z;
if (box_idx >= boxes_num || channel_idx >= channels || x_idx >= out_x ||
y_idx >= out_y || z_idx >= out_z)
return;
int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx;
pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel +
offset_base * max_pts_each_voxel;
grad_out += box_idx * out_x * out_y * out_z * channels +
offset_base * channels + channel_idx;
int total_pts = pts_idx_of_voxels[0];
float cur_grad = 1 / fmaxf(float(total_pts), 1.0);
for (int k = 1; k <= total_pts; k++) {
atomicAdd(grad_in + pts_idx_of_voxels[k] * channels + channel_idx,
grad_out[0] * cur_grad);
}
}
void roiaware_pool3d_backward_launcher(int boxes_num,
int out_x,
int out_y,
int out_z,
int channels,
int max_pts_each_voxel,
const int *pts_idx_of_voxels,
const int *argmax,
const float *grad_out,
float *grad_in,
int pool_method) {
// params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel)
// params argmax: (N, out_x, out_y, out_z, C)
// params grad_out: (N, out_x, out_y, out_z, C)
// params grad_in: (npoints, C), return value
// params pool_method: 0: max_pool, 1: avg_pool
dim3 blocks(DIVUP(out_x * out_y * out_z, THREADS_PER_BLOCK), channels,
boxes_num);
dim3 threads(THREADS_PER_BLOCK);
if (pool_method == 0) {
roiaware_maxpool3d_backward<<<blocks, threads>>>(
boxes_num, channels, out_x, out_y, out_z, argmax, grad_out,
grad_in);
} else if (pool_method == 1) {
roiaware_avgpool3d_backward<<<blocks, threads>>>(
boxes_num, channels, out_x, out_y, out_z, max_pts_each_voxel,
pts_idx_of_voxels, grad_out, grad_in);
}
}
__global__ void points_in_boxes_kernel(int batch_size,
int boxes_num,
int pts_num,
const float *boxes,
const float *pts,
int *box_idx_of_points) {
// params boxes: (B, N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the
// box center params pts: (B, npoints, 3) [x, y, z] in LiDAR coordinate
// params boxes_idx_of_points: (B, npoints), default -1
int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= batch_size || pt_idx >= pts_num) return;
boxes += bs_idx * boxes_num * 7;
pts += bs_idx * pts_num * 3 + pt_idx * 3;
box_idx_of_points += bs_idx * pts_num + pt_idx;
float local_x = 0, local_y = 0;
int cur_in_flag = 0;
for (int k = 0; k < boxes_num; k++) {
cur_in_flag = check_pt_in_box3d(pts, boxes + k * 7, local_x, local_y);
if (cur_in_flag) {
box_idx_of_points[0] = k;
break;
}
}
}
void points_in_boxes_launcher(int batch_size,
int boxes_num,
int pts_num,
const float *boxes,
const float *pts,
int *box_idx_of_points) {
// params boxes: (B, N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the
// box center params pts: (B, npoints, 3) [x, y, z] params
// boxes_idx_of_points: (B, npoints), default -1
cudaError_t err;
dim3 blocks(DIVUP(pts_num, THREADS_PER_BLOCK), batch_size);
dim3 threads(THREADS_PER_BLOCK);
points_in_boxes_kernel<<<blocks, threads>>>(batch_size, boxes_num, pts_num,
boxes, pts, box_idx_of_points);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
#ifdef DEBUG
cudaDeviceSynchronize(); // for using printf in kernel function
#endif
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/roipoint_pool3d/roipoint_pool3d_utils.py | Python | import torch
import torch.nn as nn
from torch.autograd import Function
from pcdet.ops.roipoint_pool3d import roipoint_pool3d_cuda
from pcdet.utils import box_utils
class RoIPointPool3d(nn.Module):
def __init__(self, num_sampled_points=512, pool_extra_width=1.0):
super().__init__()
self.num_sampled_points = num_sampled_points
self.pool_extra_width = pool_extra_width
def forward(self, points, point_features, boxes3d):
"""
Args:
points: (B, N, 3)
point_features: (B, N, C)
boxes3d: (B, M, 7), [x, y, z, dx, dy, dz, heading]
Returns:
pooled_features: (B, M, 512, 3 + C)
pooled_empty_flag: (B, M)
"""
return RoIPointPool3dFunction.apply(
points,
point_features,
boxes3d,
self.pool_extra_width,
self.num_sampled_points,
)
class RoIPointPool3dFunction(Function):
@staticmethod
def forward(
ctx, points, point_features, boxes3d, pool_extra_width, num_sampled_points=512
):
"""
Args:
ctx:
points: (B, N, 3)
point_features: (B, N, C)
boxes3d: (B, num_boxes, 7), [x, y, z, dx, dy, dz, heading]
pool_extra_width:
num_sampled_points:
Returns:
pooled_features: (B, num_boxes, 512, 3 + C)
pooled_empty_flag: (B, num_boxes)
"""
assert points.shape.__len__() == 3 and points.shape[2] == 3
batch_size, boxes_num, feature_len = (
points.shape[0],
boxes3d.shape[1],
point_features.shape[2],
)
pooled_boxes3d = box_utils.enlarge_box3d(
boxes3d.view(-1, 7), pool_extra_width
).view(batch_size, -1, 7)
pooled_features = point_features.new_zeros(
(batch_size, boxes_num, num_sampled_points, 3 + feature_len)
)
pooled_empty_flag = point_features.new_zeros((batch_size, boxes_num)).int()
roipoint_pool3d_cuda.forward(
points.contiguous(),
pooled_boxes3d.contiguous(),
point_features.contiguous(),
pooled_features,
pooled_empty_flag,
)
return pooled_features, pooled_empty_flag
@staticmethod
def backward(ctx, grad_out):
raise NotImplementedError
if __name__ == "__main__":
pass
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/roipoint_pool3d/src/roipoint_pool3d.cpp | C++ | #include <torch/extension.h>
#include <torch/serialize/tensor.h>
#define CHECK_CUDA(x) \
do { \
if (!x.type().is_cuda()) { \
fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, \
__LINE__); \
exit(-1); \
} \
} while (0)
#define CHECK_CONTIGUOUS(x) \
do { \
if (!x.is_contiguous()) { \
fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, \
__FILE__, __LINE__); \
exit(-1); \
} \
} while (0)
#define CHECK_INPUT(x) \
CHECK_CUDA(x); \
CHECK_CONTIGUOUS(x)
void roipool3dLauncher(int batch_size,
int pts_num,
int boxes_num,
int feature_in_len,
int sampled_pts_num,
const float *xyz,
const float *boxes3d,
const float *pts_feature,
float *pooled_features,
int *pooled_empty_flag);
int roipool3d_gpu(at::Tensor xyz,
at::Tensor boxes3d,
at::Tensor pts_feature,
at::Tensor pooled_features,
at::Tensor pooled_empty_flag) {
// params xyz: (B, N, 3)
// params boxes3d: (B, M, 7)
// params pts_feature: (B, N, C)
// params pooled_features: (B, M, 512, 3+C)
// params pooled_empty_flag: (B, M)
CHECK_INPUT(xyz);
CHECK_INPUT(boxes3d);
CHECK_INPUT(pts_feature);
CHECK_INPUT(pooled_features);
CHECK_INPUT(pooled_empty_flag);
int batch_size = xyz.size(0);
int pts_num = xyz.size(1);
int boxes_num = boxes3d.size(1);
int feature_in_len = pts_feature.size(2);
int sampled_pts_num = pooled_features.size(2);
const float *xyz_data = xyz.data<float>();
const float *boxes3d_data = boxes3d.data<float>();
const float *pts_feature_data = pts_feature.data<float>();
float *pooled_features_data = pooled_features.data<float>();
int *pooled_empty_flag_data = pooled_empty_flag.data<int>();
roipool3dLauncher(batch_size, pts_num, boxes_num, feature_in_len,
sampled_pts_num, xyz_data, boxes3d_data, pts_feature_data,
pooled_features_data, pooled_empty_flag_data);
return 1;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &roipool3d_gpu, "roipool3d forward (CUDA)");
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/ops/roipoint_pool3d/src/roipoint_pool3d_kernel.cu | CUDA | /*
Point cloud feature pooling
Written by Shaoshuai Shi
All Rights Reserved 2018.
*/
#include <math.h>
#include <stdio.h>
#define THREADS_PER_BLOCK 256
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
// #define DEBUG
__device__ inline void lidar_to_local_coords(float shift_x,
float shift_y,
float rot_angle,
float &local_x,
float &local_y) {
float cosa = cos(-rot_angle), sina = sin(-rot_angle);
local_x = shift_x * cosa + shift_y * (-sina);
local_y = shift_x * sina + shift_y * cosa;
}
__device__ inline int check_pt_in_box3d(const float *pt,
const float *box3d,
float &local_x,
float &local_y) {
// param pt: (x, y, z)
// param box3d: [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center
const float MARGIN = 1e-5;
float x = pt[0], y = pt[1], z = pt[2];
float cx = box3d[0], cy = box3d[1], cz = box3d[2];
float dx = box3d[3], dy = box3d[4], dz = box3d[5], rz = box3d[6];
if (fabsf(z - cz) > dz / 2.0) return 0;
lidar_to_local_coords(x - cx, y - cy, rz, local_x, local_y);
float in_flag = (fabs(local_x) < dx / 2.0 + MARGIN) &
(fabs(local_y) < dy / 2.0 + MARGIN);
return in_flag;
}
__global__ void assign_pts_to_box3d(int batch_size,
int pts_num,
int boxes_num,
const float *xyz,
const float *boxes3d,
int *pts_assign) {
// params xyz: (B, N, 3)
// params boxes3d: (B, M, 7)
// params pts_assign: (B, N, M): idx of the corresponding box3d, -1 means
// background points
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
int box_idx = blockIdx.y;
int bs_idx = blockIdx.z;
if (pt_idx >= pts_num || box_idx >= boxes_num || bs_idx >= batch_size) {
return;
}
int assign_idx =
bs_idx * pts_num * boxes_num + pt_idx * boxes_num + box_idx;
pts_assign[assign_idx] = 0;
int box_offset = bs_idx * boxes_num * 7 + box_idx * 7;
int pt_offset = bs_idx * pts_num * 3 + pt_idx * 3;
float local_x = 0, local_y = 0;
int cur_in_flag = check_pt_in_box3d(xyz + pt_offset, boxes3d + box_offset,
local_x, local_y);
pts_assign[assign_idx] = cur_in_flag;
// printf("bs=%d, pt=%d, in=%d\n", bs_idx, pt_idx, pts_assign[bs_idx *
// pts_num + pt_idx]);
}
__global__ void get_pooled_idx(int batch_size,
int pts_num,
int boxes_num,
int sampled_pts_num,
const int *pts_assign,
int *pts_idx,
int *pooled_empty_flag) {
// params xyz: (B, N, 3)
// params pts_feature: (B, N, C)
// params pts_assign: (B, N)
// params pts_idx: (B, M, 512)
// params pooled_empty_flag: (B, M)
int boxes_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (boxes_idx >= boxes_num) {
return;
}
int bs_idx = blockIdx.y;
int cnt = 0;
for (int k = 0; k < pts_num; k++) {
if (pts_assign[bs_idx * pts_num * boxes_num + k * boxes_num +
boxes_idx]) {
if (cnt < sampled_pts_num) {
pts_idx[bs_idx * boxes_num * sampled_pts_num +
boxes_idx * sampled_pts_num + cnt] = k;
cnt++;
} else
break;
}
}
if (cnt == 0) {
pooled_empty_flag[bs_idx * boxes_num + boxes_idx] = 1;
} else if (cnt < sampled_pts_num) {
// duplicate same points for sampling
for (int k = cnt; k < sampled_pts_num; k++) {
int duplicate_idx = k % cnt;
int base_offset = bs_idx * boxes_num * sampled_pts_num +
boxes_idx * sampled_pts_num;
pts_idx[base_offset + k] = pts_idx[base_offset + duplicate_idx];
}
}
}
__global__ void roipool3d_forward(int batch_size,
int pts_num,
int boxes_num,
int feature_in_len,
int sampled_pts_num,
const float *xyz,
const int *pts_idx,
const float *pts_feature,
float *pooled_features,
int *pooled_empty_flag) {
// params xyz: (B, N, 3)
// params pts_idx: (B, M, 512)
// params pts_feature: (B, N, C)
// params pooled_features: (B, M, 512, 3+C)
// params pooled_empty_flag: (B, M)
int sample_pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
int box_idx = blockIdx.y;
int bs_idx = blockIdx.z;
if (sample_pt_idx >= sampled_pts_num || box_idx >= boxes_num ||
bs_idx >= batch_size) {
return;
}
if (pooled_empty_flag[bs_idx * boxes_num + box_idx]) {
return;
}
int temp_idx = bs_idx * boxes_num * sampled_pts_num +
box_idx * sampled_pts_num + sample_pt_idx;
int src_pt_idx = pts_idx[temp_idx];
int dst_feature_offset = temp_idx * (3 + feature_in_len);
for (int j = 0; j < 3; j++)
pooled_features[dst_feature_offset + j] =
xyz[bs_idx * pts_num * 3 + src_pt_idx * 3 + j];
int src_feature_offset =
bs_idx * pts_num * feature_in_len + src_pt_idx * feature_in_len;
for (int j = 0; j < feature_in_len; j++)
pooled_features[dst_feature_offset + 3 + j] =
pts_feature[src_feature_offset + j];
}
void roipool3dLauncher(int batch_size,
int pts_num,
int boxes_num,
int feature_in_len,
int sampled_pts_num,
const float *xyz,
const float *boxes3d,
const float *pts_feature,
float *pooled_features,
int *pooled_empty_flag) {
// printf("batch_size=%d, pts_num=%d, boxes_num=%d\n", batch_size, pts_num,
// boxes_num);
int *pts_assign = NULL;
cudaMalloc(&pts_assign, batch_size * pts_num * boxes_num *
sizeof(int)); // (batch_size, N, M)
// cudaMemset(&pts_assign, -1, batch_size * pts_num * boxes_num *
// sizeof(int));
dim3 blocks(DIVUP(pts_num, THREADS_PER_BLOCK), boxes_num,
batch_size); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
assign_pts_to_box3d<<<blocks, threads>>>(batch_size, pts_num, boxes_num,
xyz, boxes3d, pts_assign);
int *pts_idx = NULL;
cudaMalloc(&pts_idx,
batch_size * boxes_num * sampled_pts_num *
sizeof(int)); // (batch_size, M, sampled_pts_num)
dim3 blocks2(DIVUP(boxes_num, THREADS_PER_BLOCK),
batch_size); // blockIdx.x(col), blockIdx.y(row)
get_pooled_idx<<<blocks2, threads>>>(batch_size, pts_num, boxes_num,
sampled_pts_num, pts_assign, pts_idx,
pooled_empty_flag);
dim3 blocks_pool(DIVUP(sampled_pts_num, THREADS_PER_BLOCK), boxes_num,
batch_size);
roipool3d_forward<<<blocks_pool, threads>>>(
batch_size, pts_num, boxes_num, feature_in_len, sampled_pts_num,
xyz, pts_idx, pts_feature, pooled_features, pooled_empty_flag);
cudaFree(pts_assign);
cudaFree(pts_idx);
#ifdef DEBUG
cudaDeviceSynchronize(); // for using printf in kernel function
#endif
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/utils/box_coder_utils.py | Python | import numpy as np
import torch
class ResidualCoder(object):
def __init__(self, code_size=7, encode_angle_by_sincos=False, **kwargs):
super().__init__()
self.code_size = code_size
self.encode_angle_by_sincos = encode_angle_by_sincos
if self.encode_angle_by_sincos:
self.code_size += 1
def encode_torch(self, boxes, anchors):
"""
Args:
boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
anchors: (N, 7 + C) [x, y, z, dx, dy, dz, heading or *[cos, sin], ...]
Returns:
"""
anchors[:, 3:6] = torch.clamp_min(anchors[:, 3:6], min=1e-5)
boxes[:, 3:6] = torch.clamp_min(boxes[:, 3:6], min=1e-5)
xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
xg, yg, zg, dxg, dyg, dzg, rg, *cgs = torch.split(boxes, 1, dim=-1)
diagonal = torch.sqrt(dxa**2 + dya**2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / dza
dxt = torch.log(dxg / dxa)
dyt = torch.log(dyg / dya)
dzt = torch.log(dzg / dza)
if self.encode_angle_by_sincos:
rt_cos = torch.cos(rg) - torch.cos(ra)
rt_sin = torch.sin(rg) - torch.sin(ra)
rts = [rt_cos, rt_sin]
else:
rts = [rg - ra]
cts = [g - a for g, a in zip(cgs, cas)]
return torch.cat([xt, yt, zt, dxt, dyt, dzt, *rts, *cts], dim=-1)
def decode_torch(self, box_encodings, anchors):
"""
Args:
box_encodings: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading or *[cos, sin], ...]
anchors: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
Returns:
"""
xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
if not self.encode_angle_by_sincos:
xt, yt, zt, dxt, dyt, dzt, rt, *cts = torch.split(box_encodings, 1, dim=-1)
else:
xt, yt, zt, dxt, dyt, dzt, cost, sint, *cts = torch.split(
box_encodings, 1, dim=-1
)
diagonal = torch.sqrt(dxa**2 + dya**2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = torch.exp(dxt) * dxa
dyg = torch.exp(dyt) * dya
dzg = torch.exp(dzt) * dza
if self.encode_angle_by_sincos:
rg_cos = cost + torch.cos(ra)
rg_sin = sint + torch.sin(ra)
rg = torch.atan2(rg_sin, rg_cos)
else:
rg = rt + ra
cgs = [t + a for t, a in zip(cts, cas)]
return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)
class PreviousResidualDecoder(object):
def __init__(self, code_size=7, **kwargs):
super().__init__()
self.code_size = code_size
@staticmethod
def decode_torch(box_encodings, anchors):
"""
Args:
box_encodings: (B, N, 7 + ?) x, y, z, w, l, h, r, custom values
anchors: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
Returns:
"""
xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
xt, yt, zt, wt, lt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1)
diagonal = torch.sqrt(dxa**2 + dya**2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = torch.exp(lt) * dxa
dyg = torch.exp(wt) * dya
dzg = torch.exp(ht) * dza
rg = rt + ra
cgs = [t + a for t, a in zip(cts, cas)]
return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)
class PreviousResidualRoIDecoder(object):
def __init__(self, code_size=7, **kwargs):
super().__init__()
self.code_size = code_size
@staticmethod
def decode_torch(box_encodings, anchors):
"""
Args:
box_encodings: (B, N, 7 + ?) x, y, z, w, l, h, r, custom values
anchors: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
Returns:
"""
xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
xt, yt, zt, wt, lt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1)
diagonal = torch.sqrt(dxa**2 + dya**2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = torch.exp(lt) * dxa
dyg = torch.exp(wt) * dya
dzg = torch.exp(ht) * dza
rg = ra - rt
cgs = [t + a for t, a in zip(cts, cas)]
return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)
class PointResidualCoder(object):
def __init__(self, code_size=8, use_mean_size=True, **kwargs):
super().__init__()
self.code_size = code_size
self.use_mean_size = use_mean_size
if self.use_mean_size:
self.mean_size = (
torch.from_numpy(np.array(kwargs["mean_size"])).cuda().float()
)
assert self.mean_size.min() > 0
def encode_torch(self, gt_boxes, points, gt_classes=None):
"""
Args:
gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
points: (N, 3) [x, y, z]
gt_classes: (N) [1, num_classes]
Returns:
box_coding: (N, 8 + C)
"""
gt_boxes[:, 3:6] = torch.clamp_min(gt_boxes[:, 3:6], min=1e-5)
xg, yg, zg, dxg, dyg, dzg, rg, *cgs = torch.split(gt_boxes, 1, dim=-1)
xa, ya, za = torch.split(points, 1, dim=-1)
if self.use_mean_size:
assert gt_classes.max() <= self.mean_size.shape[0]
# use aboslute value to handle ignore labels in st3d
point_anchor_size = self.mean_size[torch.abs(gt_classes) - 1]
dxa, dya, dza = torch.split(point_anchor_size, 1, dim=-1)
diagonal = torch.sqrt(dxa**2 + dya**2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / dza
dxt = torch.log(dxg / dxa)
dyt = torch.log(dyg / dya)
dzt = torch.log(dzg / dza)
else:
xt = xg - xa
yt = yg - ya
zt = zg - za
dxt = torch.log(dxg)
dyt = torch.log(dyg)
dzt = torch.log(dzg)
cts = [g for g in cgs]
return torch.cat(
[xt, yt, zt, dxt, dyt, dzt, torch.cos(rg), torch.sin(rg), *cts], dim=-1
)
def decode_torch(self, box_encodings, points, pred_classes=None):
"""
Args:
box_encodings: (N, 8 + C) [x, y, z, dx, dy, dz, cos, sin, ...]
points: [x, y, z]
pred_classes: (N) [1, num_classes]
Returns:
"""
xt, yt, zt, dxt, dyt, dzt, cost, sint, *cts = torch.split(
box_encodings, 1, dim=-1
)
xa, ya, za = torch.split(points, 1, dim=-1)
if self.use_mean_size:
assert pred_classes.max() <= self.mean_size.shape[0]
point_anchor_size = self.mean_size[pred_classes - 1]
dxa, dya, dza = torch.split(point_anchor_size, 1, dim=-1)
diagonal = torch.sqrt(dxa**2 + dya**2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = torch.exp(dxt) * dxa
dyg = torch.exp(dyt) * dya
dzg = torch.exp(dzt) * dza
else:
xg = xt + xa
yg = yt + ya
zg = zt + za
dxg, dyg, dzg = torch.split(torch.exp(box_encodings[..., 3:6]), 1, dim=-1)
rg = torch.atan2(sint, cost)
cgs = [t for t in cts]
return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/utils/box_utils.py | Python | import copy
import numpy as np
import scipy
import torch
from scipy.spatial import Delaunay
from pcdet.ops.roiaware_pool3d import roiaware_pool3d_utils
from pcdet.utils import common_utils
def in_hull(p, hull):
"""
:param p: (N, K) test points
:param hull: (M, K) M corners of a box
:return (N) bool
"""
try:
if not isinstance(hull, Delaunay):
hull = Delaunay(hull)
flag = hull.find_simplex(p) >= 0
except scipy.spatial.qhull.QhullError:
print("Warning: not a hull %s" % str(hull))
flag = np.zeros(p.shape[0], dtype=np.bool)
return flag
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) or (N,8)
(N, 7): [x, y, z, dx, dy, dz, heading]
(N, 8): [x, y, z, dx, dy, dz, heading, cls]
where (x, y, z) is the box center
Returns:
corners: (N, 8, 3)
"""
boxes3d, is_numpy = common_utils.check_numpy_to_torch(boxes3d)
template = (
boxes3d.new_tensor(
(
[1, 1, -1],
[1, -1, -1],
[-1, -1, -1],
[-1, 1, -1],
[1, 1, 1],
[1, -1, 1],
[-1, -1, 1],
[-1, 1, 1],
)
)
/ 2
)
corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :]
corners3d = common_utils.rotate_points_along_z(
corners3d.view(-1, 8, 3), boxes3d[:, 6]
).view(-1, 8, 3)
corners3d += boxes3d[:, None, 0:3]
return corners3d.numpy() if is_numpy else corners3d
def box_to_corners_3d(bbox):
"""
Same as boxes_to_corners_3d, but for one box only.
Args:
bbox: (7,) or (8,)
Returns:
corners: (8, 3)
"""
bboxes = np.expand_dims(bbox, axis=0)
corners = boxes_to_corners_3d(bboxes)[0]
return corners
def mask_boxes_outside_range_numpy(boxes, limit_range, min_num_corners=1):
"""
Args:
boxes: (N, 7) [x, y, z, dx, dy, dz, heading, ...], (x, y, z) is the box center
limit_range: [minx, miny, minz, maxx, maxy, maxz]
min_num_corners:
Returns:
"""
if boxes.shape[1] > 7:
boxes = boxes[:, 0:7]
corners = boxes_to_corners_3d(boxes) # (N, 8, 3)
mask = ((corners >= limit_range[0:3]) & (corners <= limit_range[3:6])).all(axis=2)
mask = mask.sum(axis=1) >= min_num_corners # (N)
return mask
def remove_points_in_boxes3d(points, boxes3d):
"""
Args:
points: (num_points, 3 + C)
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center, each box DO NOT overlaps
Returns:
"""
boxes3d, is_numpy = common_utils.check_numpy_to_torch(boxes3d)
points, is_numpy = common_utils.check_numpy_to_torch(points)
point_masks = roiaware_pool3d_utils.points_in_boxes_cpu(points[:, 0:3], boxes3d)
points = points[point_masks.sum(dim=0) == 0]
return points.numpy() if is_numpy else points
def boxes3d_kitti_camera_to_lidar(boxes3d_camera, calib):
"""
Args:
boxes3d_camera: (N, 7) [x, y, z, l, h, w, r] in rect camera coords
calib:
Returns:
boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
"""
boxes3d_camera_copy = copy.deepcopy(boxes3d_camera)
xyz_camera, r = boxes3d_camera_copy[:, 0:3], boxes3d_camera_copy[:, 6:7]
l, h, w = (
boxes3d_camera_copy[:, 3:4],
boxes3d_camera_copy[:, 4:5],
boxes3d_camera_copy[:, 5:6],
)
xyz_lidar = calib.rect_to_lidar(xyz_camera)
xyz_lidar[:, 2] += h[:, 0] / 2
return np.concatenate([xyz_lidar, l, w, h, -(r + np.pi / 2)], axis=-1)
def boxes3d_kitti_fakelidar_to_lidar(boxes3d_lidar):
"""
Args:
boxes3d_fakelidar: (N, 7) [x, y, z, w, l, h, r] in old LiDAR coordinates, z is bottom center
Returns:
boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
"""
boxes3d_lidar_copy = copy.deepcopy(boxes3d_lidar)
w, l, h = (
boxes3d_lidar_copy[:, 3:4],
boxes3d_lidar_copy[:, 4:5],
boxes3d_lidar_copy[:, 5:6],
)
r = boxes3d_lidar_copy[:, 6:7]
boxes3d_lidar_copy[:, 2] += h[:, 0] / 2
return np.concatenate(
[boxes3d_lidar_copy[:, 0:3], l, w, h, -(r + np.pi / 2)], axis=-1
)
def boxes3d_kitti_lidar_to_fakelidar(boxes3d_lidar):
"""
Args:
boxes3d_lidar: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
boxes3d_fakelidar: [x, y, z, w, l, h, r] in old LiDAR coordinates, z is bottom center
"""
boxes3d_lidar_copy = copy.deepcopy(boxes3d_lidar)
dx, dy, dz = (
boxes3d_lidar_copy[:, 3:4],
boxes3d_lidar_copy[:, 4:5],
boxes3d_lidar_copy[:, 5:6],
)
heading = boxes3d_lidar_copy[:, 6:7]
boxes3d_lidar_copy[:, 2] -= dz[:, 0] / 2
return np.concatenate(
[boxes3d_lidar_copy[:, 0:3], dy, dx, dz, -heading - np.pi / 2], axis=-1
)
def enlarge_box3d(boxes3d, extra_width=(0, 0, 0)):
"""
Args:
boxes3d: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
extra_width: [extra_x, extra_y, extra_z]
Returns:
"""
boxes3d, is_numpy = common_utils.check_numpy_to_torch(boxes3d)
large_boxes3d = boxes3d.clone()
large_boxes3d[:, 3:6] += boxes3d.new_tensor(extra_width)[None, :]
return large_boxes3d
def boxes3d_lidar_to_kitti_camera(boxes3d_lidar, calib):
"""
:param boxes3d_lidar: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
:param calib:
:return:
boxes3d_camera: (N, 7) [x, y, z, l, h, w, r] in rect camera coords
"""
boxes3d_lidar_copy = copy.deepcopy(boxes3d_lidar)
xyz_lidar = boxes3d_lidar_copy[:, 0:3]
l, w, h = (
boxes3d_lidar_copy[:, 3:4],
boxes3d_lidar_copy[:, 4:5],
boxes3d_lidar_copy[:, 5:6],
)
r = boxes3d_lidar_copy[:, 6:7]
xyz_lidar[:, 2] -= h.reshape(-1) / 2
xyz_cam = calib.lidar_to_rect(xyz_lidar)
# xyz_cam[:, 1] += h.reshape(-1) / 2
r = -r - np.pi / 2
return np.concatenate([xyz_cam, l, h, w, r], axis=-1)
def boxes3d_to_corners3d_kitti_camera(boxes3d, bottom_center=True):
"""
:param boxes3d: (N, 7) [x, y, z, l, h, w, ry] in camera coords, see the definition of ry in KITTI dataset
:param bottom_center: whether y is on the bottom center of object
:return: corners3d: (N, 8, 3)
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
"""
boxes_num = boxes3d.shape[0]
l, h, w = boxes3d[:, 3], boxes3d[:, 4], boxes3d[:, 5]
x_corners = np.array(
[l / 2.0, l / 2.0, -l / 2.0, -l / 2.0, l / 2.0, l / 2.0, -l / 2.0, -l / 2],
dtype=np.float32,
).T
z_corners = np.array(
[w / 2.0, -w / 2.0, -w / 2.0, w / 2.0, w / 2.0, -w / 2.0, -w / 2.0, w / 2.0],
dtype=np.float32,
).T
if bottom_center:
y_corners = np.zeros((boxes_num, 8), dtype=np.float32)
y_corners[:, 4:8] = -h.reshape(boxes_num, 1).repeat(4, axis=1) # (N, 8)
else:
y_corners = np.array(
[
h / 2.0,
h / 2.0,
h / 2.0,
h / 2.0,
-h / 2.0,
-h / 2.0,
-h / 2.0,
-h / 2.0,
],
dtype=np.float32,
).T
ry = boxes3d[:, 6]
zeros, ones = np.zeros(ry.size, dtype=np.float32), np.ones(
ry.size, dtype=np.float32
)
rot_list = np.array(
[
[np.cos(ry), zeros, -np.sin(ry)],
[zeros, ones, zeros],
[np.sin(ry), zeros, np.cos(ry)],
]
) # (3, 3, N)
R_list = np.transpose(rot_list, (2, 0, 1)) # (N, 3, 3)
temp_corners = np.concatenate(
(
x_corners.reshape(-1, 8, 1),
y_corners.reshape(-1, 8, 1),
z_corners.reshape(-1, 8, 1),
),
axis=2,
) # (N, 8, 3)
rotated_corners = np.matmul(temp_corners, R_list) # (N, 8, 3)
x_corners, y_corners, z_corners = (
rotated_corners[:, :, 0],
rotated_corners[:, :, 1],
rotated_corners[:, :, 2],
)
x_loc, y_loc, z_loc = boxes3d[:, 0], boxes3d[:, 1], boxes3d[:, 2]
x = x_loc.reshape(-1, 1) + x_corners.reshape(-1, 8)
y = y_loc.reshape(-1, 1) + y_corners.reshape(-1, 8)
z = z_loc.reshape(-1, 1) + z_corners.reshape(-1, 8)
corners = np.concatenate(
(x.reshape(-1, 8, 1), y.reshape(-1, 8, 1), z.reshape(-1, 8, 1)), axis=2
)
return corners.astype(np.float32)
def boxes3d_kitti_camera_to_imageboxes(boxes3d, calib, image_shape=None):
"""
:param boxes3d: (N, 7) [x, y, z, l, h, w, r] in rect camera coords
:param calib:
:return:
box_2d_preds: (N, 4) [x1, y1, x2, y2]
"""
corners3d = boxes3d_to_corners3d_kitti_camera(boxes3d)
pts_img, _ = calib.rect_to_img(corners3d.reshape(-1, 3))
corners_in_image = pts_img.reshape(-1, 8, 2)
min_uv = np.min(corners_in_image, axis=1) # (N, 2)
max_uv = np.max(corners_in_image, axis=1) # (N, 2)
boxes2d_image = np.concatenate([min_uv, max_uv], axis=1)
if image_shape is not None:
boxes2d_image[:, 0] = np.clip(
boxes2d_image[:, 0], a_min=0, a_max=image_shape[1] - 1
)
boxes2d_image[:, 1] = np.clip(
boxes2d_image[:, 1], a_min=0, a_max=image_shape[0] - 1
)
boxes2d_image[:, 2] = np.clip(
boxes2d_image[:, 2], a_min=0, a_max=image_shape[1] - 1
)
boxes2d_image[:, 3] = np.clip(
boxes2d_image[:, 3], a_min=0, a_max=image_shape[0] - 1
)
return boxes2d_image
def boxes_iou_normal(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 4) [x1, y1, x2, y2]
boxes_b: (M, 4) [x1, y1, x2, y2]
Returns:
"""
assert boxes_a.shape[1] == boxes_b.shape[1] == 4
x_min = torch.max(boxes_a[:, 0, None], boxes_b[None, :, 0])
x_max = torch.min(boxes_a[:, 2, None], boxes_b[None, :, 2])
y_min = torch.max(boxes_a[:, 1, None], boxes_b[None, :, 1])
y_max = torch.min(boxes_a[:, 3, None], boxes_b[None, :, 3])
x_len = torch.clamp_min(x_max - x_min, min=0)
y_len = torch.clamp_min(y_max - y_min, min=0)
area_a = (boxes_a[:, 2] - boxes_a[:, 0]) * (boxes_a[:, 3] - boxes_a[:, 1])
area_b = (boxes_b[:, 2] - boxes_b[:, 0]) * (boxes_b[:, 3] - boxes_b[:, 1])
a_intersect_b = x_len * y_len
iou = a_intersect_b / torch.clamp_min(
area_a[:, None] + area_b[None, :] - a_intersect_b, min=1e-6
)
return iou
def boxes3d_lidar_to_aligned_bev_boxes(boxes3d):
"""
Args:
boxes3d: (N, 7 + C) [x, y, z, dx, dy, dz, heading] in lidar coordinate
Returns:
aligned_bev_boxes: (N, 4) [x1, y1, x2, y2] in the above lidar coordinate
"""
rot_angle = common_utils.limit_period(boxes3d[:, 6], offset=0.5, period=np.pi).abs()
choose_dims = torch.where(
rot_angle[:, None] < np.pi / 4, boxes3d[:, [3, 4]], boxes3d[:, [4, 3]]
)
aligned_bev_boxes = torch.cat(
(boxes3d[:, 0:2] - choose_dims / 2, boxes3d[:, 0:2] + choose_dims / 2), dim=1
)
return aligned_bev_boxes
def boxes3d_nearest_bev_iou(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
Returns:
"""
boxes_bev_a = boxes3d_lidar_to_aligned_bev_boxes(boxes_a)
boxes_bev_b = boxes3d_lidar_to_aligned_bev_boxes(boxes_b)
return boxes_iou_normal(boxes_bev_a, boxes_bev_b)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/utils/cal_quality_utils.py | Python | import copy
import os
import torch
import argparse
import pickle
import glob
from pcdet.datasets.kitti.kitti_object_eval_python import eval as kitti_eval
import numpy as np
from pcdet.utils import common_utils, box_utils
from pcdet.ops.iou3d_nms import iou3d_nms_utils
class QualityMetric(object):
def __init__(self, infos=None):
self.quality_metric = {
"tp": 0,
"fp": 0,
"fn": 0,
"gt": 0,
"trans_err": 0,
"scale_err": 0,
"orient_err": 0,
}
self.infos = infos
def check(self, gt_boxes, frame_id, idx):
assert self.infos[idx]["point_cloud"]["lidar_idx"] == frame_id
assert (self.infos[idx]["annos"]["name"] == "Car").sum() == gt_boxes.shape[0]
def update(
self,
pred_boxes,
gt_boxes,
iou_thresh=0.7,
points=None,
frame_id=None,
idx=None,
batch_dict=None,
):
remain_mask = gt_boxes[:, 0] != 0
gt_boxes = gt_boxes[remain_mask]
self.check(gt_boxes, frame_id, idx)
tp_boxes, tp_gt_boxes = self.count_tp_fp_fn_gt(
pred_boxes, gt_boxes, iou_thresh=iou_thresh, points=points
)
if tp_boxes is not None and tp_boxes.shape[0] > 0:
self.cal_tp_metric(tp_boxes, tp_gt_boxes, points=points)
def count_tp_fp_fn_gt(self, pred_boxes, gt_boxes, iou_thresh=0.7, points=None):
"""Count the number of tp, fp, fn and gt. Return tp boxes and their corresponding gt boxes"""
assert gt_boxes.shape[1] == 7 and pred_boxes.shape[1] == 7
# import ipdb; ipdb.set_trace(context=20)
self.quality_metric["gt"] += gt_boxes.shape[0]
if gt_boxes.shape[0] == 0:
self.quality_metric["fp"] += pred_boxes.shape[0]
return None, None
elif pred_boxes.shape[0] == 0:
self.quality_metric["fn"] += gt_boxes.shape[0]
return None, None
# import ipdb; ipdb.set_trace(context=20)
# from pcdet.datasets.dataset import DatasetTemplate
# DatasetTemplate.__vis__(points, gt_boxes, pred_boxes)
pred_boxes, _ = common_utils.check_numpy_to_torch(pred_boxes)
gt_boxes, _ = common_utils.check_numpy_to_torch(gt_boxes)
if not (pred_boxes.is_cuda and gt_boxes.is_cuda):
pred_boxes, gt_boxes = pred_boxes.cuda(), gt_boxes.cuda()
iou_matrix = iou3d_nms_utils.boxes_iou3d_gpu(pred_boxes[:, :7], gt_boxes[:, :7])
max_ious, match_idx = torch.max(iou_matrix, dim=1)
assert max_ious.shape[0] == pred_boxes.shape[0]
# max iou > iou_thresh is tp
tp_mask = max_ious >= iou_thresh
ntps = tp_mask.sum().item()
self.quality_metric["tp"] += ntps
self.quality_metric["fp"] += max_ious.shape[0] - ntps
# gt boxes that missed by tp boxes are fn boxes
self.quality_metric["fn"] += gt_boxes.shape[0] - ntps
# get tp boxes and their corresponding gt boxes
tp_boxes = pred_boxes[tp_mask]
tp_gt_boxes = gt_boxes[match_idx[tp_mask]]
if ntps > 0:
scale_diff, debug_boxes = self.cal_scale_diff(tp_boxes, tp_gt_boxes)
self.quality_metric["scale_err"] += scale_diff
return tp_boxes.cpu().numpy(), tp_gt_boxes.cpu().numpy()
@staticmethod
def cal_scale_diff(tp_boxes, gt_boxes):
assert tp_boxes.shape[0] == gt_boxes.shape[0]
aligned_tp_boxes = tp_boxes.detach().clone()
# shift their center together
aligned_tp_boxes[:, 0:3] = gt_boxes[:, 0:3]
# align their angle
aligned_tp_boxes[:, 6] = gt_boxes[:, 6]
iou_matrix = iou3d_nms_utils.boxes_iou3d_gpu(
aligned_tp_boxes[:, 0:7], gt_boxes[:, 0:7]
)
max_ious, _ = torch.max(iou_matrix, dim=1)
scale_diff = (1 - max_ious).sum().item()
return scale_diff, aligned_tp_boxes.cpu().numpy()
@staticmethod
def cor_angle_range(angle):
"""correct angle range to [-pi, pi]
Args:
angle:
Returns:
"""
gt_pi_mask = angle > np.pi
lt_minus_pi_mask = angle < -np.pi
angle[gt_pi_mask] = angle[gt_pi_mask] - 2 * np.pi
angle[lt_minus_pi_mask] = angle[lt_minus_pi_mask] + 2 * np.pi
return angle
def cal_angle_diff(self, angle1, angle2):
"""angle is from x to y, anti-clockwise"""
angle1 = self.cor_angle_range(angle1)
angle2 = self.cor_angle_range(angle2)
diff = np.abs(angle1 - angle2)
gt_pi_mask = diff > np.pi
diff[gt_pi_mask] = 2 * np.pi - diff[gt_pi_mask]
return diff
def cal_tp_metric(self, tp_boxes, gt_boxes, points=None):
assert tp_boxes.shape[0] == gt_boxes.shape[0]
# L2 distance xy only
center_distance = np.linalg.norm(tp_boxes[:, :2] - gt_boxes[:, :2], axis=1)
self.quality_metric["trans_err"] += center_distance.sum()
# Angle difference
angle_diff = self.cal_angle_diff(tp_boxes[:, 6], gt_boxes[:, 6])
assert angle_diff.sum() >= 0
self.quality_metric["orient_err"] += angle_diff.sum()
return
def statistics_result(self, logger=None):
self.quality_metric["trans_err"] /= self.quality_metric["tp"]
self.quality_metric["scale_err"] /= self.quality_metric["tp"]
self.quality_metric["orient_err"] /= self.quality_metric["tp"]
result = "=============Quality Metrif of Pseudo labels=============\n"
for key, value in self.quality_metric.items():
result += "{} : {:.3f}\n".format(key, value)
if logger is not None:
logger.info(result)
return
else:
return result
class QualityMetricPkl(QualityMetric):
def update(
self,
pred_boxes,
gt_boxes,
iou_thresh=0.7,
points=None,
frame_id=None,
idx=None,
batch_dict=None,
):
tp_boxes, tp_gt_boxes = self.count_tp_fp_fn_gt(
pred_boxes, gt_boxes, iou_thresh=iou_thresh, points=points
)
if tp_boxes is not None and tp_boxes.shape[0] > 0:
self.cal_tp_metric(tp_boxes, tp_gt_boxes, points=points)
def get_quality_of_single_info(pred_infos, gt_infos, class_name):
pred_infos = pickle.load(open(pred_infos, "rb"))
gt_infos = pickle.load(open(gt_infos, "rb"))
gt_annos = [info["annos"] for info in gt_infos]
ap_result_str, ap_dict = kitti_eval.get_official_eval_result(
gt_annos, pred_infos, current_classes=["Car"]
)
print(ap_result_str)
assert len(pred_infos) == len(gt_annos)
quality_metric = QualityMetricPkl()
for pred_info, gt_anno in zip(pred_infos, gt_annos):
pred_boxes = pred_info["boxes_lidar"]
pred_boxes[:, 2] += pred_boxes[:, 5] / 2
gt_mask = gt_anno["name"] == class_name
valid_num = gt_anno["gt_boxes_lidar"].shape[0]
gt_boxes = gt_anno["gt_boxes_lidar"][gt_mask[:valid_num]]
assert gt_boxes.shape[0] == gt_mask.sum()
assert (gt_anno["name"][valid_num:] == "DontCare").all()
gt_boxes = box_utils.boxes3d_kitti_fakelidar_to_lidar(gt_boxes)
quality_metric.update(pred_boxes, gt_boxes)
result = quality_metric.statistics_result()
print(result)
def get_error_of_multiple_infos(
info_path_list, gt_info_path, class_name, iou_thresh=0.7
):
pred_info_list = [pickle.load(open(cur_path, "rb")) for cur_path in info_path_list]
gt_infos = pickle.load(open(gt_info_path, "rb"))
gt_annos = [info["annos"] for info in gt_infos]
num_infos = len(pred_info_list)
quality_metric_list = [QualityMetricPkl() for _ in range(num_infos)]
print(
f"------Start to estimate the errors by considering multiple infos (iou_thresh={iou_thresh})..------"
)
# import pdb
# pdb.set_trace()
for k, gt_anno in enumerate(gt_annos):
gt_mask = gt_anno["name"] == class_name
valid_num = gt_anno["gt_boxes_lidar"].shape[0]
gt_boxes = gt_anno["gt_boxes_lidar"][gt_mask[:valid_num]]
assert gt_boxes.shape[0] == gt_mask.sum()
assert (gt_anno["name"][valid_num:] == "DontCare").all()
gt_boxes = box_utils.boxes3d_kitti_fakelidar_to_lidar(gt_boxes)
gt_boxes, _ = common_utils.check_numpy_to_torch(gt_boxes)
if gt_boxes.shape[0] == 0:
continue
gt_of_tp_mask = np.ones(gt_boxes.shape[0], dtype=np.int)
for info_idx in range(num_infos):
pred_boxes = pred_info_list[info_idx][k]["boxes_lidar"]
pred_boxes[:, 2] += pred_boxes[:, 5] / 2
if pred_boxes.__len__() == 0:
gt_of_tp_mask[:] = 0
break
pred_boxes, _ = common_utils.check_numpy_to_torch(pred_boxes)
iou_matrix = iou3d_nms_utils.boxes_iou3d_gpu(
pred_boxes[:, :7].cuda(), gt_boxes[:, :7].cuda()
)
max_iou_of_gt, _ = torch.max(iou_matrix, dim=0)
max_iou_of_gt = max_iou_of_gt.cpu().numpy()
gt_of_tp_mask[max_iou_of_gt < iou_thresh] = 0
intersect_gt_boxes = gt_boxes[gt_of_tp_mask > 0]
for info_idx in range(num_infos):
pred_boxes = pred_info_list[info_idx][k]["boxes_lidar"]
quality_metric_list[info_idx].update(pred_boxes, intersect_gt_boxes)
for info_idx in range(num_infos):
result = quality_metric_list[info_idx].statistics_result()
print(f"{result} for file: {info_path_list[info_idx]}")
def main():
parser = argparse.ArgumentParser(description="arg parser")
parser.add_argument("--pred_infos", type=str, default=None, help="pickle file")
parser.add_argument("--gt_infos", type=str, default=None, help="pickle file")
parser.add_argument("--class_name", type=str, nargs="+", default="Car", help="")
parser.add_argument("--iou_thresh", type=float, default=0.7, help="")
args = parser.parse_args()
if os.path.isdir(args.pred_infos):
info_path_list = glob.glob(os.path.join(args.pred_infos, "*.pkl"))
get_error_of_multiple_infos(
info_path_list, args.gt_infos, args.class_name, iou_thresh=args.iou_thresh
)
else:
get_quality_of_single_info(args.pred_infos, args.gt_infos, args.class_name)
if __name__ == "__main__":
main()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/utils/calibration_kitti.py | Python | import numpy as np
def get_calib_from_file(calib_file):
with open(calib_file) as f:
lines = f.readlines()
obj = lines[2].strip().split(" ")[1:]
P2 = np.array(obj, dtype=np.float32)
obj = lines[3].strip().split(" ")[1:]
P3 = np.array(obj, dtype=np.float32)
obj = lines[4].strip().split(" ")[1:]
R0 = np.array(obj, dtype=np.float32)
obj = lines[5].strip().split(" ")[1:]
Tr_velo_to_cam = np.array(obj, dtype=np.float32)
return {
"P2": P2.reshape(3, 4),
"P3": P3.reshape(3, 4),
"R0": R0.reshape(3, 3),
"Tr_velo2cam": Tr_velo_to_cam.reshape(3, 4),
}
class Calibration(object):
def __init__(self, calib_file):
if not isinstance(calib_file, dict):
calib = get_calib_from_file(calib_file)
else:
calib = calib_file
self.P2 = calib["P2"] # 3 x 4
self.R0 = calib["R0"] # 3 x 3
self.V2C = calib["Tr_velo2cam"] # 3 x 4
# Camera intrinsics and extrinsics
self.cu = self.P2[0, 2]
self.cv = self.P2[1, 2]
self.fu = self.P2[0, 0]
self.fv = self.P2[1, 1]
self.tx = self.P2[0, 3] / (-self.fu)
self.ty = self.P2[1, 3] / (-self.fv)
def cart_to_hom(self, pts):
"""
:param pts: (N, 3 or 2)
:return pts_hom: (N, 4 or 3)
"""
pts_hom = np.hstack((pts, np.ones((pts.shape[0], 1), dtype=np.float32)))
return pts_hom
def rect_to_lidar(self, pts_rect):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_rect_hom = self.cart_to_hom(pts_rect) # (N, 4)
R0_ext = np.hstack((self.R0, np.zeros((3, 1), dtype=np.float32))) # (3, 4)
R0_ext = np.vstack((R0_ext, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
R0_ext[3, 3] = 1
V2C_ext = np.vstack((self.V2C, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
V2C_ext[3, 3] = 1
pts_lidar = np.dot(pts_rect_hom, np.linalg.inv(np.dot(R0_ext, V2C_ext).T))
return pts_lidar[:, 0:3]
def lidar_to_rect(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_lidar_hom = self.cart_to_hom(pts_lidar)
pts_rect = np.dot(pts_lidar_hom, np.dot(self.V2C.T, self.R0.T))
# pts_rect = reduce(np.dot, (pts_lidar_hom, self.V2C.T, self.R0.T))
return pts_rect
def rect_to_img(self, pts_rect):
"""
:param pts_rect: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect_hom = self.cart_to_hom(pts_rect)
pts_2d_hom = np.dot(pts_rect_hom, self.P2.T)
pts_img = (pts_2d_hom[:, 0:2].T / pts_rect_hom[:, 2]).T # (N, 2)
pts_rect_depth = (
pts_2d_hom[:, 2] - self.P2.T[3, 2]
) # depth in rect camera coord
return pts_img, pts_rect_depth
def lidar_to_img(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect = self.lidar_to_rect(pts_lidar)
pts_img, pts_depth = self.rect_to_img(pts_rect)
return pts_img, pts_depth
def img_to_rect(self, u, v, depth_rect):
"""
:param u: (N)
:param v: (N)
:param depth_rect: (N)
:return:
"""
x = ((u - self.cu) * depth_rect) / self.fu + self.tx
y = ((v - self.cv) * depth_rect) / self.fv + self.ty
pts_rect = np.concatenate(
(x.reshape(-1, 1), y.reshape(-1, 1), depth_rect.reshape(-1, 1)), axis=1
)
return pts_rect
def corners3d_to_img_boxes(self, corners3d):
"""
:param corners3d: (N, 8, 3) corners in rect coordinate
:return: boxes: (None, 4) [x1, y1, x2, y2] in rgb coordinate
:return: boxes_corner: (None, 8) [xi, yi] in rgb coordinate
"""
sample_num = corners3d.shape[0]
corners3d_hom = np.concatenate(
(corners3d, np.ones((sample_num, 8, 1))), axis=2
) # (N, 8, 4)
img_pts = np.matmul(corners3d_hom, self.P2.T) # (N, 8, 3)
x, y = img_pts[:, :, 0] / img_pts[:, :, 2], img_pts[:, :, 1] / img_pts[:, :, 2]
x1, y1 = np.min(x, axis=1), np.min(y, axis=1)
x2, y2 = np.max(x, axis=1), np.max(y, axis=1)
boxes = np.concatenate(
(
x1.reshape(-1, 1),
y1.reshape(-1, 1),
x2.reshape(-1, 1),
y2.reshape(-1, 1),
),
axis=1,
)
boxes_corner = np.concatenate(
(x.reshape(-1, 8, 1), y.reshape(-1, 8, 1)), axis=2
)
return boxes, boxes_corner
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/utils/common_utils.py | Python | import copy
import logging
import os
import pickle
import random
import shutil
import subprocess
import numpy as np
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
def seed_everything():
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
torch.cuda.manual_seed_all(0)
def check_numpy_to_torch(x):
if isinstance(x, np.ndarray):
return torch.from_numpy(x).float(), True
if isinstance(x, np.float64) or isinstance(x, np.float32):
return torch.tensor([x]).float(), True
return x, False
def limit_period(val, offset=0.5, period=np.pi):
val, is_numpy = check_numpy_to_torch(val)
ans = val - torch.floor(val / period + offset) * period
return ans.numpy() if is_numpy else ans
def drop_info_with_name(info, name):
ret_info = {}
keep_indices = [i for i, x in enumerate(info["name"]) if x != name]
for key in info.keys():
ret_info[key] = info[key][keep_indices]
return ret_info
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3 + C)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
points, is_numpy = check_numpy_to_torch(points)
angle, _ = check_numpy_to_torch(angle)
cosa = torch.cos(angle)
sina = torch.sin(angle)
zeros = angle.new_zeros(points.shape[0])
ones = angle.new_ones(points.shape[0])
rot_matrix = (
torch.stack((cosa, sina, zeros, -sina, cosa, zeros, zeros, zeros, ones), dim=1)
.view(-1, 3, 3)
.float()
)
points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)
points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)
return points_rot.numpy() if is_numpy else points_rot
def mask_points_by_range(points, limit_range):
mask = (
(points[:, 0] >= limit_range[0])
& (points[:, 0] <= limit_range[3])
& (points[:, 1] >= limit_range[1])
& (points[:, 1] <= limit_range[4])
)
return mask
def get_voxel_centers(voxel_coords, downsample_times, voxel_size, point_cloud_range):
"""
Args:
voxel_coords: (N, 3)
downsample_times:
voxel_size:
point_cloud_range:
Returns:
"""
assert voxel_coords.shape[1] == 3
voxel_centers = voxel_coords[:, [2, 1, 0]].float() # (xyz)
voxel_size = (
torch.tensor(voxel_size, device=voxel_centers.device).float() * downsample_times
)
pc_range = torch.tensor(point_cloud_range[0:3], device=voxel_centers.device).float()
voxel_centers = (voxel_centers + 0.5) * voxel_size + pc_range
return voxel_centers
def create_logger(log_file=None, rank=0, log_level=logging.INFO):
logger = logging.getLogger(__name__)
logger.setLevel(log_level if rank == 0 else "ERROR")
formatter = logging.Formatter(
"[%(asctime)s %(filename)s %(lineno)d " "%(levelname)5s] %(message)s"
)
console = logging.StreamHandler()
console.setLevel(log_level if rank == 0 else "ERROR")
console.setFormatter(formatter)
logger.addHandler(console)
if log_file is not None:
file_handler = logging.FileHandler(filename=log_file)
file_handler.setLevel(log_level if rank == 0 else "ERROR")
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def keep_arrays_by_name(gt_names, used_classes):
inds = [i for i, x in enumerate(gt_names) if x in used_classes]
inds = np.array(inds, dtype=np.int64)
return inds
def init_dist_slurm(tcp_port, local_rank, backend="nccl"):
"""
modified from https://github.com/open-mmlab/mmdetection
Args:
tcp_port:
backend:
Returns:
"""
proc_id = int(os.environ["SLURM_PROCID"])
ntasks = int(os.environ["SLURM_NTASKS"])
node_list = os.environ["SLURM_NODELIST"]
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(proc_id % num_gpus)
addr = subprocess.getoutput(
"scontrol show hostname {} | head -n1".format(node_list)
)
os.environ["MASTER_PORT"] = str(tcp_port)
os.environ["MASTER_ADDR"] = addr
os.environ["WORLD_SIZE"] = str(ntasks)
os.environ["RANK"] = str(proc_id)
dist.init_process_group(backend=backend)
total_gpus = dist.get_world_size()
rank = dist.get_rank()
return total_gpus, rank
def init_dist_pytorch(tcp_port, local_rank, backend="nccl"):
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
gpu = int(os.environ["LOCAL_RANK"])
else:
print("Does not support training without GPU.")
sys.exit(1)
dist.init_process_group(
backend="nccl",
init_method="env://",
world_size=world_size,
rank=rank,
)
torch.cuda.set_device(gpu)
return world_size, rank
# if mp.get_start_method(allow_none=True) is None:
# mp.set_start_method('spawn')
# num_gpus = torch.cuda.device_count()
# torch.cuda.set_device(local_rank % num_gpus)
# # 这里有问题
# dist.init_process_group(
# backend=backend,
# init_method='tcp://127.0.0.1:%d' % tcp_port,
# rank=local_rank,
# world_size=num_gpus
# )
# rank = dist.get_rank()
# return num_gpus, rank
def get_dist_info():
if torch.__version__ < "1.0":
initialized = dist._initialized
else:
if dist.is_available():
initialized = dist.is_initialized()
else:
initialized = False
if initialized:
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
return rank, world_size
def merge_results_dist(result_part, size, tmpdir):
rank, world_size = get_dist_info()
os.makedirs(tmpdir, exist_ok=True)
dist.barrier()
pickle.dump(
result_part, open(os.path.join(tmpdir, "result_part_{}.pkl".format(rank)), "wb")
)
dist.barrier()
if rank != 0:
return None
part_list = []
for i in range(world_size):
part_file = os.path.join(tmpdir, "result_part_{}.pkl".format(i))
part_list.append(pickle.load(open(part_file, "rb")))
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:size]
shutil.rmtree(tmpdir)
return ordered_results
def add_prefix_to_dict(dict, prefix):
for key in list(dict.keys()):
dict[prefix + key] = dict.pop(key)
return dict
class DataReader(object):
def __init__(self, dataloader, sampler):
self.dataloader = dataloader
self.sampler = sampler
def construct_iter(self):
self.dataloader_iter = iter(self.dataloader)
def set_cur_epoch(self, cur_epoch):
self.cur_epoch = cur_epoch
def read_data(self):
try:
return self.dataloader_iter.next()
except:
if self.sampler is not None:
self.sampler.set_epoch(self.cur_epoch)
self.construct_iter()
return self.dataloader_iter.next()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def set_bn_train(m):
classname = m.__class__.__name__
if classname.find("BatchNorm") != -1:
m.train()
class NAverageMeter(object):
"""
Contain N AverageMeter and update respectively or simultaneously
"""
def __init__(self, n):
self.n = n
self.meters = [AverageMeter() for i in range(n)]
def update(self, val, index=None, attribute="avg"):
if isinstance(val, list) and index is None:
assert len(val) == self.n
for i in range(self.n):
self.meters[i].update(val[i])
elif isinstance(val, NAverageMeter) and index is None:
assert val.n == self.n
for i in range(self.n):
self.meters[i].update(getattr(val.meters[i], attribute))
elif not isinstance(val, list) and index is not None:
self.meters[index].update(val)
else:
raise ValueError
def aggregate_result(self):
result = "("
for i in range(self.n):
result += "{:.3f},".format(self.meters[i].avg)
result += ")"
return result
def calculate_gradient_norm(model):
total_norm = 0
for p in model.parameters():
param_norm = p.grad.data.norm(2)
total_norm += param_norm.item() ** 2
total_norm = total_norm ** (1.0 / 2)
return total_norm
def mask_dict(result_dict, mask):
new_dict = copy.deepcopy(result_dict)
for key, value in new_dict.items():
new_dict[key] = value[mask]
return new_dict
def concatenate_array_inside_dict(merged_dict, result_dict):
for key, val in result_dict.items():
if key not in merged_dict:
merged_dict[key] = copy.deepcopy(val)
else:
merged_dict[key] = np.concatenate([merged_dict[key], copy.deepcopy(val)])
return merged_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/utils/commu_utils.py | Python | """
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
deeply borrow from maskrcnn-benchmark
"""
import pickle
import time
import torch
import torch.distributed as dist
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/utils/loss_utils.py | Python | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.utils import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(SigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
"""PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = (
torch.clamp(input, min=0)
- input * target
+ torch.log1p(torch.exp(-torch.abs(input)))
)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or (
weights.shape.__len__() == 1 and target.shape.__len__() == 2
):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n**2 / beta, n - 0.5 * beta)
return loss
def forward(
self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None
):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert (
weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
)
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(
self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None
):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert (
weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
)
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction="none") * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(
torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2),
)
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/utils/memory_ensemble_utils.py | Python | import torch
import numpy as np
from scipy.optimize import linear_sum_assignment
from pcdet.utils import common_utils
from pcdet.ops.iou3d_nms import iou3d_nms_utils
from pcdet.models.model_utils.model_nms_utils import class_agnostic_nms
def consistency_ensemble(gt_infos_a, gt_infos_b, memory_ensemble_cfg):
"""
Args:
gt_infos_a:
gt_boxes: (N, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for previous pseudo boxes
cls_scores: (N)
iou_scores: (N)
memory_counter: (N)
gt_infos_b:
gt_boxes: (M, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for current pseudo boxes
cls_scores: (M)
iou_scores: (M)
memory_counter: (M)
memory_ensemble_cfg:
Returns:
gt_infos:
gt_boxes: (K, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for merged pseudo boxes
cls_scores: (K)
iou_scores: (K)
memory_counter: (K)
"""
gt_box_a, _ = common_utils.check_numpy_to_torch(gt_infos_a["gt_boxes"])
gt_box_b, _ = common_utils.check_numpy_to_torch(gt_infos_b["gt_boxes"])
gt_box_a, gt_box_b = gt_box_a.cuda(), gt_box_b.cuda()
new_gt_box = gt_infos_a["gt_boxes"]
new_cls_scores = gt_infos_a["cls_scores"]
new_iou_scores = gt_infos_a["iou_scores"]
new_memory_counter = gt_infos_a["memory_counter"]
# if gt_box_b or gt_box_a don't have any predictions
if gt_box_b.shape[0] == 0:
gt_infos_a["memory_counter"] += 1
return gt_infos_a
elif gt_box_a.shape[0] == 0:
return gt_infos_b
# get ious
iou_matrix = iou3d_nms_utils.boxes_iou3d_gpu(gt_box_a[:, :7], gt_box_b[:, :7]).cpu()
ious, match_idx = torch.max(iou_matrix, dim=1)
ious, match_idx = ious.numpy(), match_idx.numpy()
gt_box_a, gt_box_b = gt_box_a.cpu().numpy(), gt_box_b.cpu().numpy()
match_pairs_idx = np.concatenate(
(
np.array(list(range(gt_box_a.shape[0]))).reshape(-1, 1),
match_idx.reshape(-1, 1),
),
axis=1,
)
#########################################################
# filter matched pair boxes by IoU
# if matching succeeded, use boxes with higher confidence
#########################################################
iou_mask = ious >= memory_ensemble_cfg.IOU_THRESH
matching_selected = match_pairs_idx[iou_mask]
gt_box_selected_a = gt_box_a[matching_selected[:, 0]]
gt_box_selected_b = gt_box_b[matching_selected[:, 1]]
# assign boxes with higher confidence
score_mask = gt_box_selected_a[:, 8] < gt_box_selected_b[:, 8]
if memory_ensemble_cfg.get("WEIGHTED", None):
weight = gt_box_selected_a[:, 8] / (
gt_box_selected_a[:, 8] + gt_box_selected_b[:, 8]
)
min_scores = np.minimum(gt_box_selected_a[:, 8], gt_box_selected_b[:, 8])
max_scores = np.maximum(gt_box_selected_a[:, 8], gt_box_selected_b[:, 8])
weighted_score = weight * (max_scores - min_scores) + min_scores
new_gt_box[matching_selected[:, 0], :7] = (
weight.reshape(-1, 1) * gt_box_selected_a[:, :7]
+ (1 - weight.reshape(-1, 1)) * gt_box_selected_b[:, :7]
)
new_gt_box[matching_selected[:, 0], 8] = weighted_score
else:
new_gt_box[matching_selected[score_mask, 0], :] = gt_box_selected_b[
score_mask, :
]
if gt_infos_a["cls_scores"] is not None:
new_cls_scores[matching_selected[score_mask, 0]] = gt_infos_b["cls_scores"][
matching_selected[score_mask, 1]
]
if gt_infos_a["iou_scores"] is not None:
new_iou_scores[matching_selected[score_mask, 0]] = gt_infos_b["iou_scores"][
matching_selected[score_mask, 1]
]
# for matching pairs, clear the ignore counter
new_memory_counter[matching_selected[:, 0]] = 0
#######################################################
# If previous bboxes disappeared: ious <= 0.1
#######################################################
disappear_idx = (ious < memory_ensemble_cfg.IOU_THRESH).nonzero()[0]
if (
memory_ensemble_cfg.get("MEMORY_VOTING", None)
and memory_ensemble_cfg.MEMORY_VOTING.ENABLED
):
new_memory_counter[disappear_idx] += 1
# ignore gt_boxes that ignore_count == IGNORE_THRESH
ignore_mask = (
new_memory_counter >= memory_ensemble_cfg.MEMORY_VOTING.IGNORE_THRESH
)
new_gt_box[ignore_mask, 7] = -1
# remove gt_boxes that ignore_count >= RM_THRESH
remain_mask = new_memory_counter < memory_ensemble_cfg.MEMORY_VOTING.RM_THRESH
new_gt_box = new_gt_box[remain_mask]
new_memory_counter = new_memory_counter[remain_mask]
if gt_infos_a["cls_scores"] is not None:
new_cls_scores = new_cls_scores[remain_mask]
if gt_infos_a["iou_scores"] is not None:
new_iou_scores = new_iou_scores[remain_mask]
# Add new appear boxes
ious_b2a, match_idx_b2a = torch.max(iou_matrix, dim=0)
ious_b2a, match_idx_b2a = ious_b2a.numpy(), match_idx_b2a.numpy()
newboxes_idx = (ious_b2a < memory_ensemble_cfg.IOU_THRESH).nonzero()[0]
if newboxes_idx.shape[0] != 0:
new_gt_box = np.concatenate(
(new_gt_box, gt_infos_b["gt_boxes"][newboxes_idx, :]), axis=0
)
if gt_infos_a["cls_scores"] is not None:
new_cls_scores = np.concatenate(
(new_cls_scores, gt_infos_b["cls_scores"][newboxes_idx]), axis=0
)
if gt_infos_a["iou_scores"] is not None:
new_iou_scores = np.concatenate(
(new_iou_scores, gt_infos_b["iou_scores"][newboxes_idx]), axis=0
)
new_memory_counter = np.concatenate(
(new_memory_counter, gt_infos_b["memory_counter"][newboxes_idx]), axis=0
)
new_gt_infos = {
"gt_boxes": new_gt_box,
"cls_scores": new_cls_scores if gt_infos_a["cls_scores"] is not None else None,
"iou_scores": new_iou_scores if gt_infos_a["iou_scores"] is not None else None,
"memory_counter": new_memory_counter,
}
return new_gt_infos
def nms_ensemble(gt_infos_a, gt_infos_b, memory_ensemble_cfg):
"""
Args:
gt_infos_a:
gt_boxes: (N, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for previous pseudo boxes
cls_scores: (N)
iou_scores: (N)
memory_counter: (N)
gt_infos_b:
gt_boxes: (M, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for current pseudo boxes
cls_scores: (M)
iou_scores: (M)
memory_counter: (M)
memory_ensemble_cfg:
Returns:
gt_infos:
gt_boxes: (K, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for merged pseudo boxes
cls_scores: (K)
iou_scores: (K)
memory_counter: (K)
"""
gt_box_a, _ = common_utils.check_numpy_to_torch(gt_infos_a["gt_boxes"])
gt_box_b, _ = common_utils.check_numpy_to_torch(gt_infos_b["gt_boxes"])
if gt_box_b.shape[0] == 0:
if (
memory_ensemble_cfg.get("MEMORY_VOTING", None)
and memory_ensemble_cfg.MEMORY_VOTING.ENABLED
):
gt_infos_a["memory_counter"] += 1
return gt_infos_a
elif gt_box_a.shape[0] == 0:
return gt_infos_b
gt_box_a, gt_box_b = gt_box_a.cuda(), gt_box_b.cuda()
gt_boxes = torch.cat((gt_box_a, gt_box_b), dim=0)
if gt_infos_a["cls_scores"] is not None:
new_cls_scores = np.concatenate(
(gt_infos_a["cls_scores"], gt_infos_b["cls_scores"]), axis=0
)
if gt_infos_a["iou_scores"] is not None:
new_iou_scores = np.concatenate(
(gt_infos_a["iou_scores"], gt_infos_b["iou_scores"]), axis=0
)
new_memory_counter = np.concatenate(
(gt_infos_a["memory_counter"], gt_infos_b["memory_counter"]), axis=0
)
selected, selected_scores = class_agnostic_nms(
box_scores=gt_boxes[:, -1],
box_preds=gt_boxes[:, :7],
nms_config=memory_ensemble_cfg.NMS_CONFIG,
)
gt_boxes = gt_boxes.cpu().numpy()
if isinstance(selected, list):
selected = np.array(selected)
else:
selected = selected.cpu().numpy()
if (
memory_ensemble_cfg.get("MEMORY_VOTING", None)
and memory_ensemble_cfg.MEMORY_VOTING.ENABLED
):
iou_matrix = iou3d_nms_utils.boxes_iou3d_gpu(gt_box_a[:, :7], gt_box_b[:, :7])
ious, _ = torch.max(iou_matrix, dim=1)
ious = ious.cpu().numpy()
gt_box_a_size = gt_box_a.shape[0]
selected_a = selected[selected < gt_box_a_size]
matched_mask = ious[selected_a] > memory_ensemble_cfg.NMS_CONFIG.NMS_THRESH
match_idx = selected_a[matched_mask]
new_memory_counter[match_idx] = 0
# for previous bboxes disappeared
disappear_idx = (ious < memory_ensemble_cfg.NMS_CONFIG.NMS_THRESH).nonzero()[0]
new_memory_counter[disappear_idx] += 1
# ignore gt_boxes that ignore_count == IGNORE_THRESH
ignore_mask = (
new_memory_counter >= memory_ensemble_cfg.MEMORY_VOTING.IGNORE_THRESH
)
gt_boxes[ignore_mask, 7] = -1
# remove gt_boxes that ignore_count >= RM_THRESH
rm_idx = (
new_memory_counter >= memory_ensemble_cfg.MEMORY_VOTING.RM_THRESH
).nonzero()[0]
selected = np.setdiff1d(selected, rm_idx)
selected_gt_boxes = gt_boxes[selected]
new_gt_infos = {
"gt_boxes": selected_gt_boxes,
"cls_scores": (
new_cls_scores[selected] if gt_infos_a["cls_scores"] is not None else None
),
"iou_scores": (
new_iou_scores[selected] if gt_infos_a["iou_scores"] is not None else None
),
"memory_counter": new_memory_counter[selected],
}
return new_gt_infos
def bipartite_ensemble(gt_infos_a, gt_infos_b, memory_ensemble_cfg):
"""
Args:
gt_infos_a:
gt_boxes: (N, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for previous pseudo boxes
cls_scores: (N)
iou_scores: (N)
memory_counter: (N)
gt_infos_b:
gt_boxes: (M, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for current pseudo boxes
cls_scores: (M)
iou_scores: (M)
memory_counter: (M)
memory_ensemble_cfg:
Returns:
gt_infos:
gt_boxes: (K, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for merged pseudo boxes
cls_scores: (K)
iou_scores: (K)
memory_counter: (K)
"""
gt_box_a, _ = common_utils.check_numpy_to_torch(gt_infos_a["gt_boxes"])
gt_box_b, _ = common_utils.check_numpy_to_torch(gt_infos_b["gt_boxes"])
gt_box_a, gt_box_b = gt_box_a.cuda(), gt_box_b.cuda()
new_gt_box = gt_infos_a["gt_boxes"]
new_cls_scores = gt_infos_a["cls_scores"]
new_iou_scores = gt_infos_a["iou_scores"]
new_memory_counter = gt_infos_a["memory_counter"]
# if gt_box_b or gt_box_a don't have any predictions
if gt_box_b.shape[0] == 0:
gt_infos_a["memory_counter"] += 1
return gt_infos_a
elif gt_box_a.shape[0] == 0:
return gt_infos_b
# bipartite matching
iou_matrix = iou3d_nms_utils.boxes_iou3d_gpu(gt_box_a[:, :7], gt_box_b[:, :7])
iou_matrix = iou_matrix.cpu().numpy()
a_idx, b_idx = linear_sum_assignment(-iou_matrix)
gt_box_a, gt_box_b = gt_box_a.cpu().numpy(), gt_box_b.cpu().numpy()
matching_paris_idx = np.concatenate(
(a_idx.reshape(-1, 1), b_idx.reshape(-1, 1)), axis=1
)
ious = iou_matrix[matching_paris_idx[:, 0], matching_paris_idx[:, 1]]
# matched a boxes.
matched_mask = ious > memory_ensemble_cfg.IOU_THRESH
matching_selected = matching_paris_idx[matched_mask]
gt_box_selected_a = gt_box_a[matching_selected[:, 0]]
gt_box_selected_b = gt_box_b[matching_selected[:, 1]]
# assign boxes with higher confidence
score_mask = gt_box_selected_a[:, 8] < gt_box_selected_b[:, 8]
new_gt_box[matching_selected[score_mask, 0], :] = gt_box_selected_b[score_mask, :]
if gt_infos_a["cls_scores"] is not None:
new_cls_scores[matching_selected[score_mask, 0]] = gt_infos_b["cls_scores"][
matching_selected[score_mask, 1]
]
if gt_infos_a["iou_scores"] is not None:
new_iou_scores[matching_selected[score_mask, 0]] = gt_infos_b["iou_scores"][
matching_selected[score_mask, 1]
]
# for matched pairs, clear the ignore counter
new_memory_counter[matching_selected[:, 0]] = 0
##############################################
# disppeared boxes for previous pseudo boxes
##############################################
gt_box_a_idx = np.array(list(range(gt_box_a.shape[0])))
disappear_idx = np.setdiff1d(gt_box_a_idx, matching_selected[:, 0])
if (
memory_ensemble_cfg.get("MEMORY_VOTING", None)
and memory_ensemble_cfg.MEMORY_VOTING.ENABLED
):
new_memory_counter[disappear_idx] += 1
# ignore gt_boxes that ignore_count == IGNORE_THRESH
ignore_mask = (
new_memory_counter >= memory_ensemble_cfg.MEMORY_VOTING.IGNORE_THRESH
)
new_gt_box[ignore_mask, 7] = -1
# remove gt_boxes that ignore_count >= RM_THRESH
remain_mask = new_memory_counter < memory_ensemble_cfg.MEMORY_VOTING.RM_THRESH
new_gt_box = new_gt_box[remain_mask]
new_memory_counter = new_memory_counter[remain_mask]
if gt_infos_a["cls_scores"] is not None:
new_cls_scores = new_cls_scores[remain_mask]
if gt_infos_a["iou_scores"] is not None:
new_iou_scores = new_iou_scores[remain_mask]
##############################################
# new appear boxes for current pseudo boxes
##############################################
gt_box_b_idx = np.array(list(range(gt_box_b.shape[0])))
newboxes_idx = np.setdiff1d(gt_box_b_idx, matching_selected[:, 1])
if newboxes_idx.shape[0] != 0:
new_gt_box = np.concatenate(
(new_gt_box, gt_infos_b["gt_boxes"][newboxes_idx, :]), axis=0
)
if gt_infos_a["cls_scores"] is not None:
new_cls_scores = np.concatenate(
(new_cls_scores, gt_infos_b["cls_scores"][newboxes_idx]), axis=0
)
if gt_infos_a["iou_scores"] is not None:
new_iou_scores = np.concatenate(
(new_iou_scores, gt_infos_b["iou_scores"][newboxes_idx]), axis=0
)
new_memory_counter = np.concatenate(
(new_memory_counter, gt_infos_b["memory_counter"][newboxes_idx]), axis=0
)
new_gt_infos = {
"gt_boxes": new_gt_box,
"cls_scores": new_cls_scores if gt_infos_a["cls_scores"] is not None else None,
"iou_scores": new_iou_scores if gt_infos_a["iou_scores"] is not None else None,
"memory_counter": new_memory_counter,
}
return new_gt_infos
def memory_ensemble(gt_infos_a, gt_infos_b, memory_ensemble_cfg, ensemble_func):
# if there are multiple classes
classes_a = np.unique(np.abs(gt_infos_a["gt_boxes"][:, -2]))
classes_b = np.unique(np.abs(gt_infos_b["gt_boxes"][:, -2]))
n_classes = max(classes_a.shape[0], classes_b.shape[0])
if n_classes == 0:
return gt_infos_a
# single category case
if n_classes == 1:
return ensemble_func(gt_infos_a, gt_infos_b, memory_ensemble_cfg)
# for multi class case
merged_infos = {}
for i in np.union1d(classes_a, classes_b):
mask_a = np.abs(gt_infos_a["gt_boxes"][:, -2]) == i
gt_infos_a_i = common_utils.mask_dict(gt_infos_a, mask_a)
mask_b = np.abs(gt_infos_b["gt_boxes"][:, -2]) == i
gt_infos_b_i = common_utils.mask_dict(gt_infos_b, mask_b)
gt_infos = ensemble_func(gt_infos_a_i, gt_infos_b_i, memory_ensemble_cfg)
merged_infos = common_utils.concatenate_array_inside_dict(
merged_infos, gt_infos
)
return merged_infos
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/utils/object3d_kitti.py | Python | import numpy as np
def get_objects_from_label(label_file):
with open(label_file, "r") as f:
lines = f.readlines()
objects = [Object3d(line) for line in lines]
return objects
def cls_type_to_id(cls_type):
type_to_id = {"Car": 1, "Pedestrian": 2, "Cyclist": 3, "Van": 4}
if cls_type not in type_to_id.keys():
return -1
return type_to_id[cls_type]
class Object3d(object):
def __init__(self, line):
label = line.strip().split(" ")
self.src = line
self.cls_type = label[0]
self.cls_id = cls_type_to_id(self.cls_type)
self.truncation = float(label[1])
self.occlusion = float(
label[2]
) # 0:fully visible 1:partly occluded 2:largely occluded 3:unknown
self.alpha = float(label[3])
self.box2d = np.array(
(float(label[4]), float(label[5]), float(label[6]), float(label[7])),
dtype=np.float32,
)
self.h = float(label[8])
self.w = float(label[9])
self.l = float(label[10])
self.loc = np.array(
(float(label[11]), float(label[12]), float(label[13])), dtype=np.float32
)
self.dis_to_cam = np.linalg.norm(self.loc)
self.ry = float(label[14])
self.score = float(label[15]) if label.__len__() == 16 else -1.0
self.level_str = None
self.level = self.get_kitti_obj_level()
def get_kitti_obj_level(self):
height = float(self.box2d[3]) - float(self.box2d[1]) + 1
if height >= 40 and self.truncation <= 0.15 and self.occlusion <= 0:
self.level_str = "Easy"
return 0 # Easy
elif height >= 25 and self.truncation <= 0.3 and self.occlusion <= 1:
self.level_str = "Moderate"
return 1 # Moderate
elif height >= 25 and self.truncation <= 0.5 and self.occlusion <= 2:
self.level_str = "Hard"
return 2 # Hard
else:
self.level_str = "UnKnown"
return -1
def generate_corners3d(self):
"""
generate corners3d representation for this object
:return corners_3d: (8, 3) corners of box3d in camera coord
"""
l, h, w = self.l, self.h, self.w
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
R = np.array(
[
[np.cos(self.ry), 0, np.sin(self.ry)],
[0, 1, 0],
[-np.sin(self.ry), 0, np.cos(self.ry)],
]
)
corners3d = np.vstack([x_corners, y_corners, z_corners]) # (3, 8)
corners3d = np.dot(R, corners3d).T
corners3d = corners3d + self.loc
return corners3d
def to_str(self):
print_str = (
"%s %.3f %.3f %.3f box2d: %s hwl: [%.3f %.3f %.3f] pos: %s ry: %.3f"
% (
self.cls_type,
self.truncation,
self.occlusion,
self.alpha,
self.box2d,
self.h,
self.w,
self.l,
self.loc,
self.ry,
)
)
return print_str
def to_kitti_format(self):
kitti_str = (
"%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f"
% (
self.cls_type,
self.truncation,
int(self.occlusion),
self.alpha,
self.box2d[0],
self.box2d[1],
self.box2d[2],
self.box2d[3],
self.h,
self.w,
self.l,
self.loc[0],
self.loc[1],
self.loc[2],
self.ry,
)
)
return kitti_str
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
pcdet/utils/self_training_utils.py | Python | import torch
import os
import glob
import tqdm
import numpy as np
import torch.distributed as dist
from pcdet.config import cfg
from pcdet.models import load_data_to_gpu
from pcdet.utils import common_utils, commu_utils, memory_ensemble_utils
import pickle as pkl
import re
from pcdet.models.model_utils.dsnorm import set_ds_target
PSEUDO_LABELS = {}
NEW_PSEUDO_LABELS = {}
def check_already_exsit_pseudo_label(ps_label_dir, start_epoch):
"""
if we continue training, use this to directly
load pseudo labels from exsiting result pkl
if exsit, load latest result pkl to PSEUDO LABEL
otherwise, return false and
Args:
ps_label_dir: dir to save pseudo label results pkls.
start_epoch: start epoc
Returns:
"""
# support init ps_label given by cfg
if start_epoch == 0 and cfg.SELF_TRAIN.get("INIT_PS", None):
if os.path.exists(cfg.SELF_TRAIN.INIT_PS):
init_ps_label = pkl.load(open(cfg.SELF_TRAIN.INIT_PS, "rb"))
PSEUDO_LABELS.update(init_ps_label)
if cfg.LOCAL_RANK == 0:
ps_path = os.path.join(ps_label_dir, "ps_label_e0.pkl")
with open(ps_path, "wb") as f:
pkl.dump(PSEUDO_LABELS, f)
return cfg.SELF_TRAIN.INIT_PS
ps_label_list = glob.glob(os.path.join(ps_label_dir, "ps_label_e*.pkl"))
if len(ps_label_list) == 0:
return
ps_label_list.sort(key=os.path.getmtime, reverse=True)
for cur_pkl in ps_label_list:
num_epoch = re.findall("ps_label_e(.*).pkl", cur_pkl)
assert len(num_epoch) == 1
# load pseudo label and return
if int(num_epoch[0]) <= start_epoch:
latest_ps_label = pkl.load(open(cur_pkl, "rb"))
PSEUDO_LABELS.update(latest_ps_label)
return cur_pkl
return None
def save_pseudo_label_epoch(
model, val_loader, rank, leave_pbar, ps_label_dir, cur_epoch
):
"""
Generate pseudo label with given model.
Args:
model: model to predict result for pseudo label
val_loader: data_loader to predict pseudo label
rank: process rank
leave_pbar: tqdm bar controller
ps_label_dir: dir to save pseudo label
cur_epoch
"""
val_dataloader_iter = iter(val_loader)
total_it_each_epoch = len(val_loader)
if rank == 0:
pbar = tqdm.tqdm(
total=total_it_each_epoch,
leave=leave_pbar,
desc="generate_ps_e%d" % cur_epoch,
dynamic_ncols=True,
)
pos_ps_nmeter = common_utils.NAverageMeter(len(cfg.CLASS_NAMES))
ign_ps_nmeter = common_utils.NAverageMeter(len(cfg.CLASS_NAMES))
if cfg.SELF_TRAIN.get("DSNORM", None):
model.apply(set_ds_target)
model.eval()
for cur_it in range(total_it_each_epoch):
try:
target_batch = next(val_dataloader_iter)
except StopIteration:
target_dataloader_iter = iter(val_loader)
target_batch = next(target_dataloader_iter)
# generate gt_boxes for target_batch and update model weights
with torch.no_grad():
load_data_to_gpu(target_batch)
pred_dicts, ret_dict = model(target_batch)
pos_ps_batch_nmeters, ign_ps_batch_nmeters = save_pseudo_label_batch(
target_batch,
pred_dicts=pred_dicts,
need_update=(
cfg.SELF_TRAIN.get("MEMORY_ENSEMBLE", None)
and cfg.SELF_TRAIN.MEMORY_ENSEMBLE.ENABLED
and cur_epoch > 0
),
)
# log to console and tensorboard
pos_ps_nmeter.update(pos_ps_batch_nmeters)
ign_ps_nmeter.update(ign_ps_batch_nmeters)
pos_ps_result = pos_ps_nmeter.aggregate_result()
ign_ps_result = ign_ps_nmeter.aggregate_result()
disp_dict = {"pos_ps_box": pos_ps_result, "ign_ps_box": ign_ps_result}
if rank == 0:
pbar.update()
pbar.set_postfix(disp_dict)
pbar.refresh()
if rank == 0:
pbar.close()
gather_and_dump_pseudo_label_result(rank, ps_label_dir, cur_epoch)
def gather_and_dump_pseudo_label_result(rank, ps_label_dir, cur_epoch):
commu_utils.synchronize()
if dist.is_initialized():
part_pseudo_labels_list = commu_utils.all_gather(NEW_PSEUDO_LABELS)
new_pseudo_label_dict = {}
for pseudo_labels in part_pseudo_labels_list:
new_pseudo_label_dict.update(pseudo_labels)
NEW_PSEUDO_LABELS.update(new_pseudo_label_dict)
# dump new pseudo label to given dir
if rank == 0:
ps_path = os.path.join(ps_label_dir, "ps_label_e{}.pkl".format(cur_epoch))
with open(ps_path, "wb") as f:
pkl.dump(NEW_PSEUDO_LABELS, f)
commu_utils.synchronize()
PSEUDO_LABELS.clear()
PSEUDO_LABELS.update(NEW_PSEUDO_LABELS)
NEW_PSEUDO_LABELS.clear()
def save_pseudo_label_batch(input_dict, pred_dicts=None, need_update=True):
"""
Save pseudo label for give batch.
If model is given, use model to inference pred_dicts,
otherwise, directly use given pred_dicts.
Args:
input_dict: batch data read from dataloader
pred_dicts: Dict if not given model.
predict results to be generated pseudo label and saved
need_update: Bool.
If set to true, use consistency matching to update pseudo label
"""
pos_ps_nmeter = common_utils.NAverageMeter(len(cfg.CLASS_NAMES))
ign_ps_nmeter = common_utils.NAverageMeter(len(cfg.CLASS_NAMES))
batch_size = len(pred_dicts)
for b_idx in range(batch_size):
pred_cls_scores = pred_iou_scores = None
if "pred_boxes" in pred_dicts[b_idx]:
# Exist predicted boxes passing self-training score threshold
pred_boxes = pred_dicts[b_idx]["pred_boxes"].detach().cpu().numpy()
pred_labels = pred_dicts[b_idx]["pred_labels"].detach().cpu().numpy()
pred_scores = pred_dicts[b_idx]["pred_scores"].detach().cpu().numpy()
if "pred_cls_scores" in pred_dicts[b_idx]:
pred_cls_scores = (
pred_dicts[b_idx]["pred_cls_scores"].detach().cpu().numpy()
)
if "pred_iou_scores" in pred_dicts[b_idx]:
pred_iou_scores = (
pred_dicts[b_idx]["pred_iou_scores"].detach().cpu().numpy()
)
# remove boxes under negative threshold
if cfg.SELF_TRAIN.get("NEG_THRESH", None):
labels_remove_scores = np.array(cfg.SELF_TRAIN.NEG_THRESH)[
pred_labels - 1
]
remain_mask = pred_scores >= labels_remove_scores
pred_labels = pred_labels[remain_mask]
pred_scores = pred_scores[remain_mask]
pred_boxes = pred_boxes[remain_mask]
if "pred_cls_scores" in pred_dicts[b_idx]:
pred_cls_scores = pred_cls_scores[remain_mask]
if "pred_iou_scores" in pred_dicts[b_idx]:
pred_iou_scores = pred_iou_scores[remain_mask]
labels_ignore_scores = np.array(cfg.SELF_TRAIN.SCORE_THRESH)[
pred_labels - 1
]
ignore_mask = pred_scores < labels_ignore_scores
pred_labels[ignore_mask] = -pred_labels[ignore_mask]
gt_box = np.concatenate(
(pred_boxes, pred_labels.reshape(-1, 1), pred_scores.reshape(-1, 1)),
axis=1,
)
else:
# no predicted boxes passes self-training score threshold
gt_box = np.zeros((0, 9), dtype=np.float32)
gt_infos = {
"gt_boxes": gt_box,
"cls_scores": pred_cls_scores,
"iou_scores": pred_iou_scores,
"memory_counter": np.zeros(gt_box.shape[0]),
}
# record pseudo label to pseudo label dict
if need_update:
ensemble_func = getattr(
memory_ensemble_utils, cfg.SELF_TRAIN.MEMORY_ENSEMBLE.NAME
)
gt_infos = memory_ensemble_utils.memory_ensemble(
PSEUDO_LABELS[input_dict["frame_id"][b_idx]],
gt_infos,
cfg.SELF_TRAIN.MEMORY_ENSEMBLE,
ensemble_func,
)
# counter the number of ignore boxes for each class
for i in range(ign_ps_nmeter.n):
num_total_boxes = (np.abs(gt_infos["gt_boxes"][:, 7]) == (i + 1)).sum()
ign_ps_nmeter.update(
(gt_infos["gt_boxes"][:, 7] == -(i + 1)).sum(), index=i
)
pos_ps_nmeter.update(num_total_boxes - ign_ps_nmeter.meters[i].val, index=i)
NEW_PSEUDO_LABELS[input_dict["frame_id"][b_idx]] = gt_infos
return pos_ps_nmeter, ign_ps_nmeter
def load_ps_label(frame_id):
"""
:param frame_id: file name of pseudo label
:return gt_box: loaded gt boxes (N, 9) [x, y, z, w, l, h, ry, label, scores]
"""
if frame_id in PSEUDO_LABELS:
gt_box = PSEUDO_LABELS[frame_id]["gt_boxes"]
else:
raise ValueError("Cannot find pseudo label for frame: %s" % frame_id)
return gt_box
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
setup.py | Python | import os
import subprocess
from pathlib import Path
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
def make_cuda_ext(name, module, sources):
print(f"Making CUDA extension: {name}...")
cuda_ext = CUDAExtension(
name="%s.%s" % (module, name),
sources=[os.path.join(*module.split("."), src) for src in sources],
)
return cuda_ext
def main():
version = "0.3.0"
setup(
name="lit",
version=version,
description="LiT: LiDAR Translator",
install_requires=[
"numpy",
"torch>=1.1",
"spconv",
"numba",
"tensorboardX",
"easydict",
"pyyaml",
],
license="MIT",
packages=["pcdet", "lit"],
cmdclass={"build_ext": BuildExtension},
ext_modules=[
make_cuda_ext(
name="iou3d_nms_cuda",
module="pcdet.ops.iou3d_nms",
sources=[
"src/iou3d_cpu.cpp",
"src/iou3d_nms_api.cpp",
"src/iou3d_nms.cpp",
"src/iou3d_nms_kernel.cu",
],
),
make_cuda_ext(
name="roiaware_pool3d_cuda",
module="pcdet.ops.roiaware_pool3d",
sources=[
"src/roiaware_pool3d.cpp",
"src/roiaware_pool3d_kernel.cu",
],
),
make_cuda_ext(
name="roipoint_pool3d_cuda",
module="pcdet.ops.roipoint_pool3d",
sources=[
"src/roipoint_pool3d.cpp",
"src/roipoint_pool3d_kernel.cu",
],
),
make_cuda_ext(
name="pointnet2_stack_cuda",
module="pcdet.ops.pointnet2.pointnet2_stack",
sources=[
"src/pointnet2_api.cpp",
"src/ball_query.cpp",
"src/ball_query_gpu.cu",
"src/group_points.cpp",
"src/group_points_gpu.cu",
"src/sampling.cpp",
"src/sampling_gpu.cu",
"src/interpolate.cpp",
"src/interpolate_gpu.cu",
],
),
make_cuda_ext(
name="pointnet2_batch_cuda",
module="pcdet.ops.pointnet2.pointnet2_batch",
sources=[
"src/pointnet2_api.cpp",
"src/ball_query.cpp",
"src/ball_query_gpu.cu",
"src/group_points.cpp",
"src/group_points_gpu.cu",
"src/interpolate.cpp",
"src/interpolate_gpu.cu",
"src/sampling.cpp",
"src/sampling_gpu.cu",
],
),
],
)
if __name__ == "__main__":
main()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
tools/eval/summerize_json.py | Python | import json
import re
from pathlib import Path
_pwd = Path(__file__).parent.absolute()
_project_root = _pwd.parent.parent
_eval_root = _project_root / "eval"
config_to_json_names = {
"mini_translator_none": [
"da_waymo_nuscenes_mini_max_sweeps_1_translator_None.json",
"da_waymo_nuscenes_mini_max_sweeps_2_translator_None.json",
"da_waymo_nuscenes_mini_max_sweeps_3_translator_None.json",
"da_waymo_nuscenes_mini_max_sweeps_4_translator_None.json",
"da_waymo_nuscenes_mini_max_sweeps_5_translator_None.json",
"da_waymo_nuscenes_mini_max_sweeps_6_translator_None.json",
"da_waymo_nuscenes_mini_max_sweeps_7_translator_None.json",
"da_waymo_nuscenes_mini_max_sweeps_8_translator_None.json",
"da_waymo_nuscenes_mini_max_sweeps_9_translator_None.json",
"da_waymo_nuscenes_mini_max_sweeps_10_translator_None.json",
],
"mini_translator_nksr_names": [
"da_waymo_nuscenes_mini_max_sweeps_1_translator_nksr.json",
"da_waymo_nuscenes_mini_max_sweeps_2_translator_nksr.json",
"da_waymo_nuscenes_mini_max_sweeps_3_translator_nksr.json",
"da_waymo_nuscenes_mini_max_sweeps_4_translator_nksr.json",
"da_waymo_nuscenes_mini_max_sweeps_5_translator_nksr.json",
"da_waymo_nuscenes_mini_max_sweeps_6_translator_nksr.json",
"da_waymo_nuscenes_mini_max_sweeps_7_translator_nksr.json",
"da_waymo_nuscenes_mini_max_sweeps_8_translator_nksr.json",
"da_waymo_nuscenes_mini_max_sweeps_9_translator_nksr.json",
"da_waymo_nuscenes_mini_max_sweeps_10_translator_nksr.json",
],
"mini_translator_nksr_recompute_voxels_force_nearest_sweeps_names": [
"da_waymo_nuscenes_mini_max_sweeps_1_translator_nksr_recompute_voxels_force_nearest_sweeps.json",
"da_waymo_nuscenes_mini_max_sweeps_2_translator_nksr_recompute_voxels_force_nearest_sweeps.json",
"da_waymo_nuscenes_mini_max_sweeps_3_translator_nksr_recompute_voxels_force_nearest_sweeps.json",
"da_waymo_nuscenes_mini_max_sweeps_4_translator_nksr_recompute_voxels_force_nearest_sweeps.json",
"da_waymo_nuscenes_mini_max_sweeps_5_translator_nksr_recompute_voxels_force_nearest_sweeps.json",
"da_waymo_nuscenes_mini_max_sweeps_6_translator_nksr_recompute_voxels_force_nearest_sweeps.json",
"da_waymo_nuscenes_mini_max_sweeps_7_translator_nksr_recompute_voxels_force_nearest_sweeps.json",
"da_waymo_nuscenes_mini_max_sweeps_8_translator_nksr_recompute_voxels_force_nearest_sweeps.json",
"da_waymo_nuscenes_mini_max_sweeps_9_translator_nksr_recompute_voxels_force_nearest_sweeps.json",
"da_waymo_nuscenes_mini_max_sweeps_10_translator_nksr_recompute_voxels_force_nearest_sweeps.json",
],
"full_translator_none_names": [
"da_waymo_nuscenes_trainval_max_sweeps_1_translator_None.json",
"da_waymo_nuscenes_trainval_max_sweeps_2_translator_None.json",
"da_waymo_nuscenes_trainval_max_sweeps_3_translator_None.json",
"da_waymo_nuscenes_trainval_max_sweeps_4_translator_None.json",
"da_waymo_nuscenes_trainval_max_sweeps_5_translator_None.json",
"da_waymo_nuscenes_trainval_max_sweeps_6_translator_None.json",
"da_waymo_nuscenes_trainval_max_sweeps_7_translator_None.json",
"da_waymo_nuscenes_trainval_max_sweeps_8_translator_None.json",
"da_waymo_nuscenes_trainval_max_sweeps_9_translator_None.json",
"da_waymo_nuscenes_trainval_max_sweeps_10_translator_None.json",
],
"full_translator_none_force_nearest_sweeps_names": [
"da_waymo_nuscenes_trainval_max_sweeps_1_translator_None_force_nearest_sweeps.json",
"da_waymo_nuscenes_trainval_max_sweeps_2_translator_None_force_nearest_sweeps.json",
"da_waymo_nuscenes_trainval_max_sweeps_3_translator_None_force_nearest_sweeps.json",
"da_waymo_nuscenes_trainval_max_sweeps_4_translator_None_force_nearest_sweeps.json",
"da_waymo_nuscenes_trainval_max_sweeps_5_translator_None_force_nearest_sweeps.json",
"da_waymo_nuscenes_trainval_max_sweeps_6_translator_None_force_nearest_sweeps.json",
"da_waymo_nuscenes_trainval_max_sweeps_7_translator_None_force_nearest_sweeps.json",
"da_waymo_nuscenes_trainval_max_sweeps_8_translator_None_force_nearest_sweeps.json",
"da_waymo_nuscenes_trainval_max_sweeps_9_translator_None_force_nearest_sweeps.json",
"da_waymo_nuscenes_trainval_max_sweeps_10_translator_None_force_nearest_sweeps.json",
],
"mini_translator_pointersect_force_nearest_sweeps_k_50_names": [
"da_waymo_nuscenes_mini_max_sweeps_1_translator_pointersect_force_nearest_sweeps_k_50.json",
"da_waymo_nuscenes_mini_max_sweeps_2_translator_pointersect_force_nearest_sweeps_k_50.json",
"da_waymo_nuscenes_mini_max_sweeps_3_translator_pointersect_force_nearest_sweeps_k_50.json",
"da_waymo_nuscenes_mini_max_sweeps_4_translator_pointersect_force_nearest_sweeps_k_50.json",
"da_waymo_nuscenes_mini_max_sweeps_5_translator_pointersect_force_nearest_sweeps_k_50.json",
"da_waymo_nuscenes_mini_max_sweeps_6_translator_pointersect_force_nearest_sweeps_k_50.json",
"da_waymo_nuscenes_mini_max_sweeps_7_translator_pointersect_force_nearest_sweeps_k_50.json",
"da_waymo_nuscenes_mini_max_sweeps_8_translator_pointersect_force_nearest_sweeps_k_50.json",
"da_waymo_nuscenes_mini_max_sweeps_9_translator_pointersect_force_nearest_sweeps_k_50.json",
"da_waymo_nuscenes_mini_max_sweeps_10_translator_pointersect_force_nearest_sweeps_k_50.json",
],
"mini_translator_pointersect_force_nearest_sweeps_k_1_names": [
"da_waymo_nuscenes_mini_max_sweeps_1_translator_pointersect_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_mini_max_sweeps_2_translator_pointersect_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_mini_max_sweeps_3_translator_pointersect_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_mini_max_sweeps_4_translator_pointersect_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_mini_max_sweeps_5_translator_pointersect_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_mini_max_sweeps_6_translator_pointersect_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_mini_max_sweeps_7_translator_pointersect_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_mini_max_sweeps_8_translator_pointersect_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_mini_max_sweeps_9_translator_pointersect_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_mini_max_sweeps_10_translator_pointersect_force_nearest_sweeps_k_1.json",
],
"mini_translator_pointersect_force_nearest_sweeps_sweeps_10_ks_names": [
"da_waymo_nuscenes_mini_max_sweeps_10_translator_pointersect_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_mini_max_sweeps_10_translator_pointersect_force_nearest_sweeps_k_2.json",
"da_waymo_nuscenes_mini_max_sweeps_10_translator_pointersect_force_nearest_sweeps_k_3.json",
"da_waymo_nuscenes_mini_max_sweeps_10_translator_pointersect_force_nearest_sweeps_k_4.json",
"da_waymo_nuscenes_mini_max_sweeps_10_translator_pointersect_force_nearest_sweeps_k_5.json",
"da_waymo_nuscenes_mini_max_sweeps_10_translator_pointersect_force_nearest_sweeps_k_6.json",
"da_waymo_nuscenes_mini_max_sweeps_10_translator_pointersect_force_nearest_sweeps_k_7.json",
"da_waymo_nuscenes_mini_max_sweeps_10_translator_pointersect_force_nearest_sweeps_k_8.json",
"da_waymo_nuscenes_mini_max_sweeps_10_translator_pointersect_force_nearest_sweeps_k_9.json",
"da_waymo_nuscenes_mini_max_sweeps_10_translator_pointersect_force_nearest_sweeps_k_10.json",
"da_waymo_nuscenes_mini_max_sweeps_10_translator_pointersect_force_nearest_sweeps_k_20.json",
"da_waymo_nuscenes_mini_max_sweeps_10_translator_pointersect_force_nearest_sweeps_k_30.json",
"da_waymo_nuscenes_mini_max_sweeps_10_translator_pointersect_force_nearest_sweeps_k_40.json",
"da_waymo_nuscenes_mini_max_sweeps_10_translator_pointersect_force_nearest_sweeps_k_50.json",
"da_waymo_nuscenes_mini_max_sweeps_10_translator_pointersect_force_nearest_sweeps_k_60.json",
],
"mini_translator_pointersect_recompute_voxels_force_nearest_sweeps_k_1_names": [
"da_waymo_nuscenes_mini_max_sweeps_1_translator_pointersect_recompute_voxels_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_mini_max_sweeps_2_translator_pointersect_recompute_voxels_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_mini_max_sweeps_3_translator_pointersect_recompute_voxels_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_mini_max_sweeps_4_translator_pointersect_recompute_voxels_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_mini_max_sweeps_5_translator_pointersect_recompute_voxels_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_mini_max_sweeps_6_translator_pointersect_recompute_voxels_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_mini_max_sweeps_7_translator_pointersect_recompute_voxels_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_mini_max_sweeps_8_translator_pointersect_recompute_voxels_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_mini_max_sweeps_9_translator_pointersect_recompute_voxels_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_mini_max_sweeps_10_translator_pointersect_recompute_voxels_force_nearest_sweeps_k_1.json",
],
"translator_pointersect_force_nearest_sweeps_k_1_names": [
"da_waymo_nuscenes_trainval_max_sweeps_1_translator_pointersect_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_trainval_max_sweeps_2_translator_pointersect_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_trainval_max_sweeps_3_translator_pointersect_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_trainval_max_sweeps_4_translator_pointersect_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_trainval_max_sweeps_5_translator_pointersect_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_trainval_max_sweeps_6_translator_pointersect_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_trainval_max_sweeps_7_translator_pointersect_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_trainval_max_sweeps_8_translator_pointersect_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_trainval_max_sweeps_9_translator_pointersect_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_trainval_max_sweeps_10_translator_pointersect_force_nearest_sweeps_k_1.json",
],
"translator_pointersect_recompute_voxels_force_nearest_sweeps_k_1_names": [
"da_waymo_nuscenes_trainval_max_sweeps_1_translator_pointersect_recompute_voxels_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_trainval_max_sweeps_2_translator_pointersect_recompute_voxels_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_trainval_max_sweeps_3_translator_pointersect_recompute_voxels_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_trainval_max_sweeps_4_translator_pointersect_recompute_voxels_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_trainval_max_sweeps_5_translator_pointersect_recompute_voxels_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_trainval_max_sweeps_6_translator_pointersect_recompute_voxels_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_trainval_max_sweeps_7_translator_pointersect_recompute_voxels_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_trainval_max_sweeps_8_translator_pointersect_recompute_voxels_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_trainval_max_sweeps_9_translator_pointersect_recompute_voxels_force_nearest_sweeps_k_1.json",
"da_waymo_nuscenes_trainval_max_sweeps_10_translator_pointersect_recompute_voxels_force_nearest_sweeps_k_1.json",
],
}
def main():
for config_name, json_names in config_to_json_names.items():
print(f"# {config_name}")
print("max_sweeps,ap_bev,ap_3d,avg_num_points")
for json_name in json_names:
path = _eval_root / json_name
with open(path, "r", encoding="utf-8") as f:
data = json.load(f)
max_sweeps = int(re.findall(r"max_sweeps_(\d+)", json_name)[0])
ap_bev = data["Car_bev/moderate_R40"]
ap_3d = data["Car_3d/moderate_R40"]
avg_num_points = data["avg_num_points"]
print(f"{max_sweeps},{ap_bev},{ap_3d},{avg_num_points / 1000:.02f}K")
if __name__ == "__main__":
main()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
tools/eval_utils/eval_utils.py | Python | import pickle
import time
import numpy as np
import torch
import tqdm
from pcdet.models import load_data_to_gpu
from pcdet.models.model_utils.dsnorm import set_ds_target
from pcdet.utils import common_utils
def statistics_info(cfg, ret_dict, metric, disp_dict):
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric["recall_roi_%s" % str(cur_thresh)] += ret_dict.get(
"roi_%s" % str(cur_thresh), 0
)
metric["recall_rcnn_%s" % str(cur_thresh)] += ret_dict.get(
"rcnn_%s" % str(cur_thresh), 0
)
metric["gt_num"] += ret_dict.get("gt", 0)
min_thresh = cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST[0]
disp_dict["recall_%s" % str(min_thresh)] = "(%d, %d) / %d" % (
metric["recall_roi_%s" % str(min_thresh)],
metric["recall_rcnn_%s" % str(min_thresh)],
metric["gt_num"],
)
def eval_one_epoch(
cfg,
model,
dataloader,
epoch_id,
logger,
dist_test=False,
save_to_file=False,
result_dir=None,
args=None,
):
"""
Return:
If LOCAL_RANK == 0: returns (ret_dict, result_str)
If LOCAL_RANK != 0: returns (None, None)
"""
result_dir.mkdir(parents=True, exist_ok=True)
final_output_dir = result_dir / "final_result" / "data"
if save_to_file:
final_output_dir.mkdir(parents=True, exist_ok=True)
metric = {
"gt_num": 0,
}
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric["recall_roi_%s" % str(cur_thresh)] = 0
metric["recall_rcnn_%s" % str(cur_thresh)] = 0
dataset = dataloader.dataset
class_names = dataset.class_names
det_annos = []
logger.info("*************** EPOCH %s EVALUATION *****************" % epoch_id)
if dist_test:
num_gpus = torch.cuda.device_count()
local_rank = cfg.LOCAL_RANK % num_gpus
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[local_rank], broadcast_buffers=False
)
model.eval()
if cfg.get("SELF_TRAIN", None) and cfg.SELF_TRAIN.get("DSNORM", None):
model.apply(set_ds_target)
if cfg.LOCAL_RANK == 0:
progress_bar = tqdm.tqdm(
total=len(dataloader), leave=True, desc="eval", dynamic_ncols=True
)
start_time = time.time()
for i, batch_dict in enumerate(dataloader):
load_data_to_gpu(batch_dict)
with torch.no_grad():
pred_dicts, ret_dict = model(batch_dict)
disp_dict = {}
statistics_info(cfg, ret_dict, metric, disp_dict)
annos = dataset.generate_prediction_dicts(
batch_dict,
pred_dicts,
class_names,
output_path=final_output_dir if save_to_file else None,
)
det_annos += annos
if cfg.LOCAL_RANK == 0:
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if cfg.LOCAL_RANK == 0:
progress_bar.close()
if dist_test:
rank, world_size = common_utils.get_dist_info()
det_annos = common_utils.merge_results_dist(
det_annos, len(dataset), tmpdir=result_dir / "tmpdir"
)
metric = common_utils.merge_results_dist(
[metric], world_size, tmpdir=result_dir / "tmpdir"
)
logger.info("*************** Performance of EPOCH %s *****************" % epoch_id)
sec_per_example = (time.time() - start_time) / len(dataloader.dataset)
logger.info(
"Generate label finished(sec_per_example: %.4f second)." % sec_per_example
)
if cfg.LOCAL_RANK != 0:
return None, None
ret_dict = {}
if dist_test:
for key, val in metric[0].items():
for k in range(1, world_size):
metric[0][key] += metric[k][key]
metric = metric[0]
gt_num_cnt = metric["gt_num"]
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
cur_roi_recall = metric["recall_roi_%s" % str(cur_thresh)] / max(gt_num_cnt, 1)
cur_rcnn_recall = metric["recall_rcnn_%s" % str(cur_thresh)] / max(
gt_num_cnt, 1
)
logger.info("recall_roi_%s: %f" % (cur_thresh, cur_roi_recall))
logger.info("recall_rcnn_%s: %f" % (cur_thresh, cur_rcnn_recall))
ret_dict["recall/roi_%s" % str(cur_thresh)] = cur_roi_recall
ret_dict["recall/rcnn_%s" % str(cur_thresh)] = cur_rcnn_recall
total_pred_objects = 0
for anno in det_annos:
total_pred_objects += anno["name"].__len__()
logger.info(
"Average predicted number of objects(%d samples): %.3f"
% (len(det_annos), total_pred_objects / max(1, len(det_annos)))
)
with open(result_dir / "result.pkl", "wb") as f:
pickle.dump(det_annos, f)
result_str, result_dict = dataset.evaluation(
det_annos,
class_names,
eval_metric=cfg.MODEL.POST_PROCESSING.EVAL_METRIC,
output_path=final_output_dir,
)
logger.info(result_str)
ret_dict.update(result_dict)
# add avg predicted number of objects to tensorboard log
ret_dict["eval_avg_pred_bboxes"] = total_pred_objects / max(1, len(det_annos))
logger.info("Result is save to %s" % result_dir)
logger.info("****************Evaluation done.*****************")
return ret_dict, result_str
if __name__ == "__main__":
pass
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
tools/eval_utils/eval_utils_with_copypaste.py | Python | import copy
import pickle
import time
from pathlib import Path
import camtools as ct
import numpy as np
import open3d as o3d
import torch
import tqdm
from lit.copy_paste_utils import copy_paste_nuscenes_to_kitti, recompute_voxels
from lit.recon_utils import bboxes_to_lineset
from pcdet.datasets.kitti.kitti_dataset import KittiDataset
from pcdet.models import load_data_to_gpu
from pcdet.models.model_utils.dsnorm import set_ds_target
from pcdet.utils import common_utils
def statistics_info(cfg, ret_dict, metric, disp_dict):
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric["recall_roi_%s" % str(cur_thresh)] += ret_dict.get(
"roi_%s" % str(cur_thresh), 0
)
metric["recall_rcnn_%s" % str(cur_thresh)] += ret_dict.get(
"rcnn_%s" % str(cur_thresh), 0
)
metric["gt_num"] += ret_dict.get("gt", 0)
min_thresh = cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST[0]
disp_dict["recall_%s" % str(min_thresh)] = "(%d, %d) / %d" % (
metric["recall_roi_%s" % str(min_thresh)],
metric["recall_rcnn_%s" % str(min_thresh)],
metric["gt_num"],
)
def eval_one_epoch(
cfg,
model,
dataloader,
epoch_id,
logger,
dist_test=False,
save_to_file=False,
result_dir=None,
copy_paste_with=None,
args=None,
):
"""
Args:
copy_paste_with: Replace foreground points with points from another
dataset or config. When this option is enabled, the batch_size must
be 1.
"""
result_dir.mkdir(parents=True, exist_ok=True)
final_output_dir = result_dir / "final_result" / "data"
if save_to_file:
final_output_dir.mkdir(parents=True, exist_ok=True)
metric = {
"gt_num": 0,
}
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric["recall_roi_%s" % str(cur_thresh)] = 0
metric["recall_rcnn_%s" % str(cur_thresh)] = 0
dataset = dataloader.dataset
# dataset.kitti_infos = dataset.kitti_infos[:50]
class_names = dataset.class_names
logger.info("*************** EPOCH %s EVALUATION *****************" % epoch_id)
if dist_test:
num_gpus = torch.cuda.device_count()
local_rank = cfg.LOCAL_RANK % num_gpus
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[local_rank], broadcast_buffers=False
)
model.eval()
if cfg.get("SELF_TRAIN", None) and cfg.SELF_TRAIN.get("DSNORM", None):
model.apply(set_ds_target)
if cfg.LOCAL_RANK == 0:
progress_bar = tqdm.tqdm(
total=len(dataloader), leave=True, desc="eval", dynamic_ncols=True
)
if isinstance(dataset, KittiDataset) and dataloader.batch_size != 1:
raise ValueError("Only support batch size 1 for customized eval.")
enable_kitti_stats_and_exit = False
kitti_stats = []
det_annos = []
gt_annos = []
start_time = time.time()
for i, batch_dict in enumerate(dataloader):
if enable_kitti_stats_and_exit:
points = batch_dict["points"][:, 1:4]
points = points - dataset.dataset_cfg["SHIFT_COOR"]
kitti_stats.append({"points": points})
progress_bar.update()
continue
if copy_paste_with is not None:
if dataloader.batch_size != 1:
raise ValueError("Only support batch size 1 for copy_paste_with")
print(f"Using copy_paste_with {copy_paste_with}")
if copy_paste_with == "kitti_pcd_kitti_lidar_kitti_size":
# Just to validate recompute_voxels.
batch_dict = recompute_voxels(dataloader, batch_dict)
elif copy_paste_with == "kitti_pcd_kitti_lidar_nuscenes_size":
batch_dict = copy_paste_nuscenes_to_kitti(
dataloader=dataloader,
batch_dict=batch_dict,
src_domain="kitti_pcd",
dst_style="kitti",
dst_bbox_size="nuscenes",
)
elif copy_paste_with == "nuscenes_mesh_nuscenes_lidar_kitti_size":
batch_dict = copy_paste_nuscenes_to_kitti(
dataloader=dataloader,
batch_dict=batch_dict,
src_domain="nuscenes_mesh",
dst_style="nuscenes",
dst_bbox_size="kitti",
)
elif copy_paste_with == "nuscenes_mesh_nuscenes_lidar_nuscenes_size":
batch_dict = copy_paste_nuscenes_to_kitti(
dataloader=dataloader,
batch_dict=batch_dict,
src_domain="nuscenes_mesh",
dst_style="nuscenes",
dst_bbox_size="nuscenes",
)
elif copy_paste_with == "nuscenes_mesh_kitti_lidar_kitti_size":
batch_dict = copy_paste_nuscenes_to_kitti(
dataloader=dataloader,
batch_dict=batch_dict,
src_domain="nuscenes_mesh",
dst_style="kitti",
dst_bbox_size="kitti",
)
elif copy_paste_with == "nuscenes_mesh_kitti_lidar_nuscenes_size":
batch_dict = copy_paste_nuscenes_to_kitti(
dataloader=dataloader,
batch_dict=batch_dict,
src_domain="nuscenes_mesh",
dst_style="kitti",
dst_bbox_size="nuscenes",
)
else:
raise ValueError(f"Unknown copy_paste_with {copy_paste_with}")
# Backup the contents of batch_dict on CPU.
cpu_batch_dict = dict()
cpu_batch_dict["gt_boxes"] = copy.deepcopy(batch_dict["gt_boxes"])
load_data_to_gpu(batch_dict)
with torch.no_grad():
pred_dicts, ret_dict = model(batch_dict)
disp_dict = {}
visualize = False
if visualize:
points = batch_dict["points"][:, 1:4].cpu().numpy()
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
gt_boxes = batch_dict["gt_boxes"][0].cpu().numpy()
gt_ls = bboxes_to_lineset(gt_boxes, frame_pose=np.eye(4))
gt_ls.paint_uniform_color([0, 0, 1])
pd_boxes = pred_dicts[0]["pred_boxes"].cpu().numpy()
pd_ls = bboxes_to_lineset(pd_boxes, frame_pose=np.eye(4))
pd_ls.paint_uniform_color([1, 0, 0])
if Path("kitti_eval_info.pkl").is_file():
with open("kitti_eval_info.pkl", "rb") as f:
kitti_eval_info = pickle.load(f)
eval_det_annos = kitti_eval_info["eval_det_annos"]
eval_gt_annos = kitti_eval_info["eval_gt_annos"]
assert len(eval_det_annos) == len(eval_gt_annos)
if len(eval_det_annos) != len(dataloader):
print(f"len(eval_det_annos)!= len(dataloader), skip stats")
else:
# Ref: code from KittiDataset.evaluation.
eval_det_annos = copy.deepcopy(eval_det_annos)
eval_gt_annos = copy.deepcopy(eval_gt_annos)
eval_det_annos = [eval_det_annos[i]]
eval_gt_annos = [eval_gt_annos[i]]
from pcdet.datasets.kitti.kitti_object_eval_python import (
eval as kitti_eval,
)
ap_result_str, ap_dict = kitti_eval.get_official_eval_result(
eval_gt_annos, eval_det_annos, class_names
)
print(f"# Frame {i} results ####################")
print(ap_result_str)
print(f"#######################################")
axes = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0)
o3d.visualization.draw_geometries([pcd, gt_ls, pd_ls, axes])
statistics_info(cfg, ret_dict, metric, disp_dict)
annos = dataset.generate_prediction_dicts(
batch_dict,
pred_dicts,
class_names,
output_path=final_output_dir if save_to_file else None,
)
if isinstance(dataset, KittiDataset):
# Make a fake gt_dict with gt_boxes.
# We already assume batch_size = 1.
pred_dict = pred_dicts[0]
device = pred_dict["pred_boxes"].device
gt_dict = {}
gt_boxes = cpu_batch_dict["gt_boxes"][0]
num_gt_boxes = len(gt_boxes)
gt_dict["pred_boxes"] = torch.tensor(gt_boxes).to(device)
gt_dict["pred_scores"] = -torch.ones(num_gt_boxes).to(device)
gt_dict["pred_labels"] = torch.ones(num_gt_boxes).to(device).long()
gt_dict["pred_cls_scores"] = torch.zeros(num_gt_boxes).to(device)
gt_dict["pred_iou_scores"] = torch.zeros(num_gt_boxes).to(device)
gt_batch_annos = dataset.generate_prediction_dicts(
batch_dict,
[gt_dict],
class_names,
output_path=None,
force_no_filter=True,
)
gt_annos += gt_batch_annos
det_annos += annos
if cfg.LOCAL_RANK == 0:
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if enable_kitti_stats_and_exit:
script_dir = Path(__file__).parent.absolute()
lit_root = script_dir.parent.parent
kitti_stats_path = lit_root / "data" / "test_data" / "kitti_stats.pkl"
with open(kitti_stats_path, "wb") as f:
pickle.dump(kitti_stats, f)
print(f"Saved kitti stats to {kitti_stats_path}")
exit(0)
if cfg.LOCAL_RANK == 0:
progress_bar.close()
if dist_test:
rank, world_size = common_utils.get_dist_info()
det_annos = common_utils.merge_results_dist(
det_annos, len(dataset), tmpdir=result_dir / "tmpdir"
)
metric = common_utils.merge_results_dist(
[metric], world_size, tmpdir=result_dir / "tmpdir"
)
logger.info("*************** Performance of EPOCH %s *****************" % epoch_id)
sec_per_example = (time.time() - start_time) / len(dataloader.dataset)
logger.info(
"Generate label finished(sec_per_example: %.4f second)." % sec_per_example
)
if cfg.LOCAL_RANK != 0:
return {}
ret_dict = {}
if dist_test:
for key, val in metric[0].items():
for k in range(1, world_size):
metric[0][key] += metric[k][key]
metric = metric[0]
gt_num_cnt = metric["gt_num"]
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
cur_roi_recall = metric["recall_roi_%s" % str(cur_thresh)] / max(gt_num_cnt, 1)
cur_rcnn_recall = metric["recall_rcnn_%s" % str(cur_thresh)] / max(
gt_num_cnt, 1
)
logger.info("recall_roi_%s: %f" % (cur_thresh, cur_roi_recall))
logger.info("recall_rcnn_%s: %f" % (cur_thresh, cur_rcnn_recall))
ret_dict["recall/roi_%s" % str(cur_thresh)] = cur_roi_recall
ret_dict["recall/rcnn_%s" % str(cur_thresh)] = cur_rcnn_recall
total_pred_objects = 0
for anno in det_annos:
total_pred_objects += anno["name"].__len__()
logger.info(
"Average predicted number of objects(%d samples): %.3f"
% (len(det_annos), total_pred_objects / max(1, len(det_annos)))
)
with open(result_dir / "result.pkl", "wb") as f:
pickle.dump(det_annos, f)
# Original non-modified GT.
result_str, result_dict = dataset.evaluation(
det_annos,
class_names,
eval_metric=cfg.MODEL.POST_PROCESSING.EVAL_METRIC,
output_path=final_output_dir,
)
print("Eval with original annos ################################")
logger.info(result_str)
print("##########################################################")
# Copy-pasted GT evaluation.
if isinstance(dataset, KittiDataset):
result_str, result_dict = dataset.evaluation_with_custom_annos(
det_annos,
gt_annos,
class_names,
eval_metric=cfg.MODEL.POST_PROCESSING.EVAL_METRIC,
output_path=final_output_dir,
)
print("Eval with custom gt annos ################################")
logger.info(result_str)
print("##########################################################")
ret_dict.update(result_dict)
# add avg predicted number of objects to tensorboard log
ret_dict["eval_avg_pred_bboxes"] = total_pred_objects / max(1, len(det_annos))
logger.info("Result is save to %s" % result_dir)
logger.info("****************Evaluation done.*****************")
return ret_dict
if __name__ == "__main__":
pass
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
tools/eval_utils/eval_utils_with_translation.py | Python | import pickle
import time
import numpy as np
import torch
import tqdm
from visual_utils import open3d_vis_utils as V
from lidartranslator.api import nuscenes_to_waymo
from pcdet.models import load_data_to_gpu
from pcdet.models.model_utils.dsnorm import set_ds_target
from pcdet.utils import common_utils
def statistics_info(cfg, ret_dict, metric, disp_dict):
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric["recall_roi_%s" % str(cur_thresh)] += ret_dict.get(
"roi_%s" % str(cur_thresh), 0
)
metric["recall_rcnn_%s" % str(cur_thresh)] += ret_dict.get(
"rcnn_%s" % str(cur_thresh), 0
)
metric["gt_num"] += ret_dict.get("gt", 0)
min_thresh = cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST[0]
disp_dict["recall_%s" % str(min_thresh)] = "(%d, %d) / %d" % (
metric["recall_roi_%s" % str(min_thresh)],
metric["recall_rcnn_%s" % str(min_thresh)],
metric["gt_num"],
)
def eval_one_epoch(
cfg,
model,
dataloader,
epoch_id,
logger,
dist_test=False,
save_to_file=False,
result_dir=None,
args=None,
):
result_dir.mkdir(parents=True, exist_ok=True)
final_output_dir = result_dir / "final_result" / "data"
if save_to_file:
final_output_dir.mkdir(parents=True, exist_ok=True)
metric = {
"gt_num": 0,
}
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric["recall_roi_%s" % str(cur_thresh)] = 0
metric["recall_rcnn_%s" % str(cur_thresh)] = 0
dataset = dataloader.dataset
class_names = dataset.class_names
det_annos = []
logger.info("*************** EPOCH %s EVALUATION *****************" % epoch_id)
if dist_test:
num_gpus = torch.cuda.device_count()
local_rank = cfg.LOCAL_RANK % num_gpus
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[local_rank], broadcast_buffers=False
)
model.eval()
if cfg.get("SELF_TRAIN", None) and cfg.SELF_TRAIN.get("DSNORM", None):
model.apply(set_ds_target)
if cfg.LOCAL_RANK == 0:
progress_bar = tqdm.tqdm(
total=len(dataloader), leave=True, desc="eval", dynamic_ncols=True
)
start_time = time.time()
# TODO: only support batch_size=1 for now. This is just for convenience
# for debugging. We should support batch_size > 1 in the future.
if dataloader.batch_size != 1:
raise NotImplementedError("batch_size > 1 is not supported for now.")
all_num_points = []
for i, batch_dict in enumerate(dataloader):
load_data_to_gpu(batch_dict)
with torch.no_grad():
print('src["points"] :', batch_dict["points"].shape)
print('src["frame_id"] :', batch_dict["frame_id"])
print('src["metadata"] :', batch_dict["metadata"])
print('src["gt_boxes"] :', batch_dict["gt_boxes"].shape)
print('src["use_lead_xyz"] :', batch_dict["use_lead_xyz"])
print('src["voxels"] :', batch_dict["voxels"].shape)
print('src["voxel_coords"] :', batch_dict["voxel_coords"].shape)
print('src["voxel_num_points"]:', batch_dict["voxel_num_points"].shape)
print('src["batch_size"] :', batch_dict["batch_size"])
if args.translator == "nksr" or args.translator == "pointersect":
src_points = batch_dict["points"][:, 1:].cpu().numpy()
data_dict = nuscenes_to_waymo(
src_points,
translator=args.translator,
pointersect_k=args.pointersect_k,
visualize=args.visualize,
)
dst_points = data_dict["points"]
if args.recompute_voxels:
print("Recompute voxels!!!!!!!!!!!!!!!!")
# 1. convert all tensors to numpy arrays
for key in batch_dict.keys():
if isinstance(batch_dict[key], torch.Tensor):
batch_dict[key] = batch_dict[key].cpu().numpy()
# 2. strip the batch dimension, strip unused keys
batch_dict["points"] = batch_dict["points"][:, 1:]
if key in ["voxels", "voxel_coords", "voxel_num_points"]:
batch_dict.pop(key)
# 3. update batch_dict
batch_dict["points"] = dst_points
# 4. run preprocessing
batch_dict = dataloader.dataset.point_feature_encoder.forward(
batch_dict
)
batch_dict = dataloader.dataset.data_processor.forward(batch_dict)
# 5. convert all numpy arrays to torch tensors
for key in batch_dict.keys():
if isinstance(batch_dict[key], np.ndarray) and np.issubdtype(
batch_dict[key].dtype, np.number
):
batch_dict[key] = torch.tensor(batch_dict[key]).cuda()
# 6. add batch dimension back
points = batch_dict["points"].cpu().numpy()
points = np.concatenate(
(
np.zeros((points.shape[0], 1), dtype=np.float32),
points,
),
axis=1,
)
batch_dict["points"] = torch.tensor(points).cuda()
voxel_coords = batch_dict["voxel_coords"].cpu().numpy()
voxel_coords = np.concatenate(
(
np.zeros((voxel_coords.shape[0], 1), dtype=np.int32),
voxel_coords,
),
axis=1,
)
batch_dict["voxel_coords"] = torch.tensor(voxel_coords).cuda()
else:
dst_points = np.concatenate(
(
np.zeros((dst_points.shape[0], 1), dtype=np.float32),
dst_points,
),
axis=1,
)
batch_dict["points"] = torch.tensor(dst_points).cuda()
else:
if args.recompute_voxels:
print("Recompute voxels!!!!!!!!!!!!!!!!")
# 1. convert all tensors to numpy arrays
for key in batch_dict.keys():
if isinstance(batch_dict[key], torch.Tensor):
batch_dict[key] = batch_dict[key].cpu().numpy()
# 2. strip the batch dimension, strip unused keys
batch_dict["points"] = batch_dict["points"][:, 1:]
for key in ["voxels", "voxel_coords", "voxel_num_points"]:
batch_dict.pop(key)
# 3. update batch_dict
# batch_dict["points"] = dst_points
# 4. run preprocessing
batch_dict = dataloader.dataset.point_feature_encoder.forward(
batch_dict
)
batch_dict = dataloader.dataset.data_processor.forward(batch_dict)
# 5. convert all numpy arrays to torch tensors
for key in batch_dict.keys():
if isinstance(batch_dict[key], np.ndarray) and np.issubdtype(
batch_dict[key].dtype, np.number
):
batch_dict[key] = torch.tensor(batch_dict[key]).cuda()
# 6. add batch dimension back
points = batch_dict["points"].cpu().numpy()
points = np.concatenate(
(
np.zeros((points.shape[0], 1), dtype=np.float32),
points,
),
axis=1,
)
batch_dict["points"] = torch.tensor(points).cuda()
voxel_coords = batch_dict["voxel_coords"].cpu().numpy()
voxel_coords = np.concatenate(
(
np.zeros((voxel_coords.shape[0], 1), dtype=np.int32),
voxel_coords,
),
axis=1,
)
batch_dict["voxel_coords"] = torch.tensor(voxel_coords).cuda()
all_num_points.append(len(batch_dict["points"]))
# batch_dict
# ['points', 'frame_id', 'metadata', 'gt_boxes', 'use_lead_xyz', 'voxels', 'voxel_coords', 'voxel_num_points', 'batch_size']
print('dst["points"] :', batch_dict["points"].shape)
print('dst["frame_id"] :', batch_dict["frame_id"])
print('dst["metadata"] :', batch_dict["metadata"])
print('dst["gt_boxes"] :', batch_dict["gt_boxes"].shape)
print('dst["use_lead_xyz"] :', batch_dict["use_lead_xyz"])
print('dst["voxels"] :', batch_dict["voxels"].shape)
print('dst["voxel_coords"] :', batch_dict["voxel_coords"].shape)
print('dst["voxel_num_points"]:', batch_dict["voxel_num_points"].shape)
print('dst["batch_size"] :', batch_dict["batch_size"])
pred_dicts, ret_dict = model(batch_dict)
# Visualize
if args.visualize:
if args.translator == "nksr":
# Plot after translation, with rays
# V.draw_scenes(
# points=batch_dict["points"][:, 1:],
# gt_boxes=batch_dict["gt_boxes"][0],
# ref_boxes=pred_dicts[0]["pred_boxes"],
# ref_labels=pred_dicts[0]["pred_labels"],
# ref_scores=pred_dicts[0]["pred_scores"],
# src_points=src_points,
# vertices=data_dict["vertices"],
# triangles=data_dict["triangles"],
# rays_o=data_dict["rays_o"],
# rays_d=data_dict["rays_d"],
# rays_length=10,
# rays_subsample=0.01,
# )
# Plot after translation
V.draw_scenes(
points=batch_dict["points"][:, 1:],
gt_boxes=batch_dict["gt_boxes"][0],
ref_boxes=pred_dicts[0]["pred_boxes"],
ref_labels=pred_dicts[0]["pred_labels"],
ref_scores=pred_dicts[0]["pred_scores"],
src_points=None,
vertices=data_dict["vertices"],
triangles=data_dict["triangles"],
rays_o=None,
rays_d=None,
rays_length=10,
rays_subsample=0.01,
)
# Plot before translation
V.draw_scenes(
points=src_points,
gt_boxes=None,
ref_boxes=None,
ref_labels=None,
ref_scores=None,
)
elif args.translator == "pointerset":
# Plot after translation
V.draw_scenes(
points=batch_dict["points"][:, 1:],
gt_boxes=batch_dict["gt_boxes"][0],
ref_boxes=pred_dicts[0]["pred_boxes"],
ref_labels=pred_dicts[0]["pred_labels"],
ref_scores=pred_dicts[0]["pred_scores"],
src_points=None,
rays_o=None,
rays_d=None,
rays_length=10,
rays_subsample=0.01,
)
else:
V.draw_scenes(
points=batch_dict["points"][:, 1:],
gt_boxes=batch_dict["gt_boxes"][0],
ref_boxes=pred_dicts[0]["pred_boxes"],
ref_labels=pred_dicts[0]["pred_labels"],
ref_scores=pred_dicts[0]["pred_scores"],
)
disp_dict = {}
statistics_info(cfg, ret_dict, metric, disp_dict)
annos = dataset.generate_prediction_dicts(
batch_dict,
pred_dicts,
class_names,
output_path=final_output_dir if save_to_file else None,
)
det_annos += annos
if cfg.LOCAL_RANK == 0:
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if cfg.LOCAL_RANK == 0:
progress_bar.close()
if dist_test:
rank, world_size = common_utils.get_dist_info()
det_annos = common_utils.merge_results_dist(
det_annos, len(dataset), tmpdir=result_dir / "tmpdir"
)
metric = common_utils.merge_results_dist(
[metric], world_size, tmpdir=result_dir / "tmpdir"
)
logger.info("*************** Performance of EPOCH %s *****************" % epoch_id)
sec_per_example = (time.time() - start_time) / len(dataloader.dataset)
logger.info(
"Generate label finished(sec_per_example: %.4f second)." % sec_per_example
)
if cfg.LOCAL_RANK != 0:
return {}
ret_dict = {}
if dist_test:
for key, val in metric[0].items():
for k in range(1, world_size):
metric[0][key] += metric[k][key]
metric = metric[0]
gt_num_cnt = metric["gt_num"]
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
cur_roi_recall = metric["recall_roi_%s" % str(cur_thresh)] / max(gt_num_cnt, 1)
cur_rcnn_recall = metric["recall_rcnn_%s" % str(cur_thresh)] / max(
gt_num_cnt, 1
)
logger.info("recall_roi_%s: %f" % (cur_thresh, cur_roi_recall))
logger.info("recall_rcnn_%s: %f" % (cur_thresh, cur_rcnn_recall))
ret_dict["recall/roi_%s" % str(cur_thresh)] = cur_roi_recall
ret_dict["recall/rcnn_%s" % str(cur_thresh)] = cur_rcnn_recall
total_pred_objects = 0
for anno in det_annos:
total_pred_objects += anno["name"].__len__()
logger.info(
"Average predicted number of objects(%d samples): %.3f"
% (len(det_annos), total_pred_objects / max(1, len(det_annos)))
)
with open(result_dir / "result.pkl", "wb") as f:
pickle.dump(det_annos, f)
result_str, result_dict = dataset.evaluation(
det_annos,
class_names,
eval_metric=cfg.MODEL.POST_PROCESSING.EVAL_METRIC,
output_path=final_output_dir,
)
logger.info(result_str)
ret_dict.update(result_dict)
# add avg predicted number of objects to tensorboard log
ret_dict["eval_avg_pred_bboxes"] = total_pred_objects / max(1, len(det_annos))
logger.info("Result is save to %s" % result_dir)
logger.info("****************Evaluation done.*****************")
# avg num points per frame
avg_num_points = np.mean(all_num_points)
logger.info(f"Average number of points per frame: {avg_num_points:.03f}")
ret_dict["avg_num_points"] = avg_num_points
return ret_dict
if __name__ == "__main__":
pass
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
tools/eval_utils/model_key_mapping.py | Python | import os, sys
import torch
src = torch.load(sys.argv[1]) # source
src = src["model_state"]
def second_key_mapping():
src = torch.load(sys.argv[1])
src = src["model_state"]
key_mapping = {
"rpn_net.conv": "backbone_3d.conv",
"rpn_head.deblocks.": "backbone_2d.deblocks.",
"rpn_head.conv_cls.": "dense_head.conv_cls.",
"rpn_head.conv_box.": "dense_head.conv_box.",
"rpn_head.conv_dir_cls.": "dense_head.conv_dir_cls.",
"rpn_head.blocks.": "backbone_2d.blocks.",
# For PartA2
"rpn_net.inv": "backbone_3d.inv",
"rpn_net.seg_cls_layer": "point_head.cls_layers.0",
"rpn_net.seg_reg_layer": "point_head.part_reg_layers.0",
"rcnn_net.shared_fc_layer.0.conv": "roi_head.shared_fc_layer.0",
"rcnn_net.shared_fc_layer.0.bn.bn": "roi_head.shared_fc_layer.1",
"rcnn_net.shared_fc_layer.2.conv": "roi_head.shared_fc_layer.4",
"rcnn_net.shared_fc_layer.2.bn.bn": "roi_head.shared_fc_layer.5",
"rcnn_net.shared_fc_layer.4.conv": "roi_head.shared_fc_layer.8",
"rcnn_net.shared_fc_layer.4.bn.bn": "roi_head.shared_fc_layer.9",
"rcnn_net.cls_layer.0.conv": "roi_head.cls_layers.0",
"rcnn_net.cls_layer.0.bn.bn": "roi_head.cls_layers.1",
"rcnn_net.cls_layer.2.conv": "roi_head.cls_layers.4",
"rcnn_net.cls_layer.2.bn.bn": "roi_head.cls_layers.5",
"rcnn_net.cls_layer.3.conv": "roi_head.cls_layers.7",
"rcnn_net.reg_layer.0.conv": "roi_head.reg_layers.0",
"rcnn_net.reg_layer.0.bn.bn": "roi_head.reg_layers.1",
"rcnn_net.reg_layer.2.conv": "roi_head.reg_layers.4",
"rcnn_net.reg_layer.2.bn.bn": "roi_head.reg_layers.5",
"rcnn_net.reg_layer.3.conv": "roi_head.reg_layers.7",
"rcnn_net.conv_": "roi_head.conv_",
}
# for idx in [1, 2, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17]:
# key_mapping['rpn_head.blocks.0.%d.' % idx] = 'backbone_2d.blocks.0.%d.' % (idx - 1)
# key_mapping['rpn_head.blocks.1.%d.' % idx] = 'backbone_2d.blocks.1.%d.' % (idx - 1)
src_key_list = list(src.keys())
num_replaced = 0
for src_key in src_key_list:
found = False
for key, val in key_mapping.items():
if key in src_key:
new_key = src_key.replace(key, val)
src[new_key] = src.pop(src_key)
num_replaced += 1
print("Replace: %s => %s" % (src_key, new_key))
found = True
if not found:
print("Skip: %s" % src_key)
print("Total replaced keys: %d / %d" % (num_replaced, len(src)))
ans = {"model_state": src}
new_path = os.path.join(os.path.dirname(sys.argv[1]), "new_mapped_model.pth")
torch.save(ans, new_path)
if __name__ == "__main__":
second_key_mapping()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
tools/scripts/dist_test.sh | Shell | #!/usr/bin/env bash
set -x
NGPUS=$1
PY_ARGS=${@:2}
torchrun --nproc_per_node=${NGPUS} test.py --launcher pytorch ${PY_ARGS}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.