repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
3DTrans
|
3DTrans-master/pcdet/models/roi_heads/roi_head_template.py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...utils import box_coder_utils, common_utils, loss_utils
from ..model_utils.model_nms_utils import class_agnostic_nms
from .target_assigner.proposal_target_layer import ProposalTargetLayer
class RoIHeadTemplate(nn.Module):
def __init__(self, num_class, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.box_coder = getattr(box_coder_utils, self.model_cfg.TARGET_CONFIG.BOX_CODER)(
**self.model_cfg.TARGET_CONFIG.get('BOX_CODER_CONFIG', {})
)
self.proposal_target_layer = ProposalTargetLayer(roi_sampler_cfg=self.model_cfg.TARGET_CONFIG)
self.build_losses(self.model_cfg.LOSS_CONFIG)
self.forward_ret_dict = None
def build_losses(self, losses_cfg):
self.add_module(
'reg_loss_func',
loss_utils.WeightedSmoothL1Loss(code_weights=losses_cfg.LOSS_WEIGHTS['code_weights'])
)
def make_fc_layers(self, input_channels, output_channels, fc_list):
fc_layers = []
pre_channel = input_channels
for k in range(0, fc_list.__len__()):
fc_layers.extend([
nn.Conv1d(pre_channel, fc_list[k], kernel_size=1, bias=False),
nn.BatchNorm1d(fc_list[k]),
nn.ReLU()
])
pre_channel = fc_list[k]
if self.model_cfg.DP_RATIO >= 0 and k == 0:
fc_layers.append(nn.Dropout(self.model_cfg.DP_RATIO))
fc_layers.append(nn.Conv1d(pre_channel, output_channels, kernel_size=1, bias=True))
fc_layers = nn.Sequential(*fc_layers)
return fc_layers
@torch.no_grad()
def proposal_layer(self, batch_dict, nms_config):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
nms_config:
Returns:
batch_dict:
rois: (B, num_rois, 7+C)
roi_scores: (B, num_rois)
roi_labels: (B, num_rois)
"""
if batch_dict.get('rois', None) is not None:
return batch_dict
batch_size = batch_dict['batch_size']
batch_box_preds = batch_dict['batch_box_preds']
batch_cls_preds = batch_dict['batch_cls_preds']
rois = batch_box_preds.new_zeros((batch_size, nms_config.NMS_POST_MAXSIZE, batch_box_preds.shape[-1]))
roi_scores = batch_box_preds.new_zeros((batch_size, nms_config.NMS_POST_MAXSIZE))
roi_labels = batch_box_preds.new_zeros((batch_size, nms_config.NMS_POST_MAXSIZE), dtype=torch.long)
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_cls_preds.shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_cls_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_box_preds[batch_mask]
cls_preds = batch_cls_preds[batch_mask]
cur_roi_scores, cur_roi_labels = torch.max(cls_preds, dim=1)
if nms_config.MULTI_CLASSES_NMS:
raise NotImplementedError
else:
selected, selected_scores = class_agnostic_nms(
box_scores=cur_roi_scores, box_preds=box_preds, nms_config=nms_config
)
rois[index, :len(selected), :] = box_preds[selected]
roi_scores[index, :len(selected)] = cur_roi_scores[selected]
roi_labels[index, :len(selected)] = cur_roi_labels[selected]
batch_dict['rois'] = rois
batch_dict['roi_scores'] = roi_scores
batch_dict['roi_labels'] = roi_labels + 1
batch_dict['has_class_labels'] = True if batch_cls_preds.shape[-1] > 1 else False
batch_dict.pop('batch_index', None)
return batch_dict
def assign_targets(self, batch_dict):
batch_size = batch_dict['batch_size']
with torch.no_grad():
targets_dict = self.proposal_target_layer.forward(batch_dict)
rois = targets_dict['rois'] # (B, N, 7 + C)
gt_of_rois = targets_dict['gt_of_rois'] # (B, N, 7 + C + 1)
targets_dict['gt_of_rois_src'] = gt_of_rois.clone().detach()
# canonical transformation
roi_center = rois[:, :, 0:3]
roi_ry = rois[:, :, 6] % (2 * np.pi)
gt_of_rois[:, :, 0:3] = gt_of_rois[:, :, 0:3] - roi_center
gt_of_rois[:, :, 6] = gt_of_rois[:, :, 6] - roi_ry
# transfer LiDAR coords to local coords
gt_of_rois = common_utils.rotate_points_along_z(
points=gt_of_rois.view(-1, 1, gt_of_rois.shape[-1]), angle=-roi_ry.view(-1)
).view(batch_size, -1, gt_of_rois.shape[-1])
# flip orientation if rois have opposite orientation
heading_label = gt_of_rois[:, :, 6] % (2 * np.pi) # 0 ~ 2pi
opposite_flag = (heading_label > np.pi * 0.5) & (heading_label < np.pi * 1.5)
heading_label[opposite_flag] = (heading_label[opposite_flag] + np.pi) % (2 * np.pi) # (0 ~ pi/2, 3pi/2 ~ 2pi)
flag = heading_label > np.pi
heading_label[flag] = heading_label[flag] - np.pi * 2 # (-pi/2, pi/2)
heading_label = torch.clamp(heading_label, min=-np.pi / 2, max=np.pi / 2)
gt_of_rois[:, :, 6] = heading_label
targets_dict['gt_of_rois'] = gt_of_rois
return targets_dict
def get_box_reg_layer_loss(self, forward_ret_dict):
loss_cfgs = self.model_cfg.LOSS_CONFIG
code_size = self.box_coder.code_size
reg_valid_mask = forward_ret_dict['reg_valid_mask'].view(-1)
gt_boxes3d_ct = forward_ret_dict['gt_of_rois'][..., 0:code_size]
gt_of_rois_src = forward_ret_dict['gt_of_rois_src'][..., 0:code_size].view(-1, code_size)
rcnn_reg = forward_ret_dict['rcnn_reg'] # (rcnn_batch_size, C)
roi_boxes3d = forward_ret_dict['rois']
rcnn_batch_size = gt_boxes3d_ct.view(-1, code_size).shape[0]
fg_mask = (reg_valid_mask > 0)
fg_sum = fg_mask.long().sum().item()
tb_dict = {}
if loss_cfgs.REG_LOSS == 'smooth-l1':
rois_anchor = roi_boxes3d.clone().detach().view(-1, code_size)
rois_anchor[:, 0:3] = 0
rois_anchor[:, 6] = 0
reg_targets = self.box_coder.encode_torch(
gt_boxes3d_ct.view(rcnn_batch_size, code_size), rois_anchor
)
rcnn_loss_reg = self.reg_loss_func(
rcnn_reg.view(rcnn_batch_size, -1).unsqueeze(dim=0),
reg_targets.unsqueeze(dim=0),
) # [B, M, 7]
rcnn_loss_reg = (rcnn_loss_reg.view(rcnn_batch_size, -1) * fg_mask.unsqueeze(dim=-1).float()).sum() / max(fg_sum, 1)
rcnn_loss_reg = rcnn_loss_reg * loss_cfgs.LOSS_WEIGHTS['rcnn_reg_weight']
tb_dict['rcnn_loss_reg'] = rcnn_loss_reg.item()
if loss_cfgs.CORNER_LOSS_REGULARIZATION and fg_sum > 0:
# TODO: NEED to BE CHECK
fg_rcnn_reg = rcnn_reg.view(rcnn_batch_size, -1)[fg_mask]
fg_roi_boxes3d = roi_boxes3d.view(-1, code_size)[fg_mask]
fg_roi_boxes3d = fg_roi_boxes3d.view(1, -1, code_size)
batch_anchors = fg_roi_boxes3d.clone().detach()
roi_ry = fg_roi_boxes3d[:, :, 6].view(-1)
roi_xyz = fg_roi_boxes3d[:, :, 0:3].view(-1, 3)
batch_anchors[:, :, 0:3] = 0
rcnn_boxes3d = self.box_coder.decode_torch(
fg_rcnn_reg.view(batch_anchors.shape[0], -1, code_size), batch_anchors
).view(-1, code_size)
rcnn_boxes3d = common_utils.rotate_points_along_z(
rcnn_boxes3d.unsqueeze(dim=1), roi_ry
).squeeze(dim=1)
rcnn_boxes3d[:, 0:3] += roi_xyz
loss_corner = loss_utils.get_corner_loss_lidar(
rcnn_boxes3d[:, 0:7],
gt_of_rois_src[fg_mask][:, 0:7]
)
loss_corner = loss_corner.mean()
loss_corner = loss_corner * loss_cfgs.LOSS_WEIGHTS['rcnn_corner_weight']
rcnn_loss_reg += loss_corner
tb_dict['rcnn_loss_corner'] = loss_corner.item()
else:
raise NotImplementedError
return rcnn_loss_reg, tb_dict
def get_box_cls_layer_loss(self, forward_ret_dict):
loss_cfgs = self.model_cfg.LOSS_CONFIG
rcnn_cls = forward_ret_dict['rcnn_cls']
rcnn_cls_labels = forward_ret_dict['rcnn_cls_labels'].view(-1)
if loss_cfgs.CLS_LOSS == 'BinaryCrossEntropy':
rcnn_cls_flat = rcnn_cls.view(-1)
batch_loss_cls = F.binary_cross_entropy(torch.sigmoid(rcnn_cls_flat), rcnn_cls_labels.float(), reduction='none')
cls_valid_mask = (rcnn_cls_labels >= 0).float()
rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
elif loss_cfgs.CLS_LOSS == 'CrossEntropy':
batch_loss_cls = F.cross_entropy(rcnn_cls, rcnn_cls_labels, reduction='none', ignore_index=-1)
cls_valid_mask = (rcnn_cls_labels >= 0).float()
rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
else:
raise NotImplementedError
rcnn_loss_cls = rcnn_loss_cls * loss_cfgs.LOSS_WEIGHTS['rcnn_cls_weight']
tb_dict = {'rcnn_loss_cls': rcnn_loss_cls.item()}
return rcnn_loss_cls, tb_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
rcnn_loss = 0
rcnn_loss_cls, cls_tb_dict = self.get_box_cls_layer_loss(self.forward_ret_dict)
rcnn_loss += rcnn_loss_cls
tb_dict.update(cls_tb_dict)
rcnn_loss_reg, reg_tb_dict = self.get_box_reg_layer_loss(self.forward_ret_dict)
rcnn_loss += rcnn_loss_reg
tb_dict.update(reg_tb_dict)
tb_dict['rcnn_loss'] = rcnn_loss.item()
return rcnn_loss, tb_dict
def generate_predicted_boxes(self, batch_size, rois, cls_preds, box_preds):
"""
Args:
batch_size:
rois: (B, N, 7)
cls_preds: (BN, num_class)
box_preds: (BN, code_size)
Returns:
"""
code_size = self.box_coder.code_size
# batch_cls_preds: (B, N, num_class or 1)
batch_cls_preds = cls_preds.view(batch_size, -1, cls_preds.shape[-1])
batch_box_preds = box_preds.view(batch_size, -1, code_size)
roi_ry = rois[:, :, 6].view(-1)
roi_xyz = rois[:, :, 0:3].view(-1, 3)
local_rois = rois.clone().detach()
local_rois[:, :, 0:3] = 0
batch_box_preds = self.box_coder.decode_torch(batch_box_preds, local_rois).view(-1, code_size)
batch_box_preds = common_utils.rotate_points_along_z(
batch_box_preds.unsqueeze(dim=1), roi_ry
).squeeze(dim=1)
batch_box_preds[:, 0:3] += roi_xyz
batch_box_preds = batch_box_preds.view(batch_size, -1, code_size)
return batch_cls_preds, batch_box_preds
| 11,557
| 43.114504
| 128
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/roi_heads/voxelrcnn_head.py
|
import torch
import torch.nn as nn
from ...ops.pointnet2.pointnet2_stack import voxel_pool_modules as voxelpool_stack_modules
from ...utils import common_utils
from .roi_head_template import RoIHeadTemplate
class VoxelRCNNHead(RoIHeadTemplate):
def __init__(self, backbone_channels, model_cfg, point_cloud_range, voxel_size, num_class=1, **kwargs):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
self.pool_cfg = model_cfg.ROI_GRID_POOL
LAYER_cfg = self.pool_cfg.POOL_LAYERS
self.point_cloud_range = point_cloud_range
self.voxel_size = voxel_size
c_out = 0
self.roi_grid_pool_layers = nn.ModuleList()
for src_name in self.pool_cfg.FEATURES_SOURCE:
mlps = LAYER_cfg[src_name].MLPS
for k in range(len(mlps)):
mlps[k] = [backbone_channels[src_name]] + mlps[k]
pool_layer = voxelpool_stack_modules.NeighborVoxelSAModuleMSG(
query_ranges=LAYER_cfg[src_name].QUERY_RANGES,
nsamples=LAYER_cfg[src_name].NSAMPLE,
radii=LAYER_cfg[src_name].POOL_RADIUS,
mlps=mlps,
pool_method=LAYER_cfg[src_name].POOL_METHOD,
)
self.roi_grid_pool_layers.append(pool_layer)
c_out += sum([x[-1] for x in mlps])
GRID_SIZE = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
# c_out = sum([x[-1] for x in mlps])
pre_channel = GRID_SIZE * GRID_SIZE * GRID_SIZE * c_out
shared_fc_list = []
for k in range(0, self.model_cfg.SHARED_FC.__len__()):
shared_fc_list.extend([
nn.Linear(pre_channel, self.model_cfg.SHARED_FC[k], bias=False),
nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
nn.ReLU(inplace=True)
])
pre_channel = self.model_cfg.SHARED_FC[k]
if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.shared_fc_layer = nn.Sequential(*shared_fc_list)
cls_fc_list = []
for k in range(0, self.model_cfg.CLS_FC.__len__()):
cls_fc_list.extend([
nn.Linear(pre_channel, self.model_cfg.CLS_FC[k], bias=False),
nn.BatchNorm1d(self.model_cfg.CLS_FC[k]),
nn.ReLU()
])
pre_channel = self.model_cfg.CLS_FC[k]
if k != self.model_cfg.CLS_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
cls_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.cls_fc_layers = nn.Sequential(*cls_fc_list)
self.cls_pred_layer = nn.Linear(pre_channel, self.num_class, bias=True)
reg_fc_list = []
for k in range(0, self.model_cfg.REG_FC.__len__()):
reg_fc_list.extend([
nn.Linear(pre_channel, self.model_cfg.REG_FC[k], bias=False),
nn.BatchNorm1d(self.model_cfg.REG_FC[k]),
nn.ReLU()
])
pre_channel = self.model_cfg.REG_FC[k]
if k != self.model_cfg.REG_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
reg_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.reg_fc_layers = nn.Sequential(*reg_fc_list)
self.reg_pred_layer = nn.Linear(pre_channel, self.box_coder.code_size * self.num_class, bias=True)
self.init_weights()
def init_weights(self):
init_func = nn.init.xavier_normal_
for module_list in [self.shared_fc_layer, self.cls_fc_layers, self.reg_fc_layers]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.cls_pred_layer.weight, 0, 0.01)
nn.init.constant_(self.cls_pred_layer.bias, 0)
nn.init.normal_(self.reg_pred_layer.weight, mean=0, std=0.001)
nn.init.constant_(self.reg_pred_layer.bias, 0)
# def _init_weights(self):
# init_func = nn.init.xavier_normal_
# for m in self.modules():
# if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
# init_func(m.weight)
# if m.bias is not None:
# nn.init.constant_(m.bias, 0)
# nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001)
def roi_grid_pool(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
point_coords: (num_points, 4) [bs_idx, x, y, z]
point_features: (num_points, C)
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
Returns:
"""
rois = batch_dict['rois']
batch_size = batch_dict['batch_size']
with_vf_transform = batch_dict.get('with_voxel_feature_transform', False)
roi_grid_xyz, _ = self.get_global_grid_points_of_roi(
rois, grid_size=self.pool_cfg.GRID_SIZE
) # (BxN, 6x6x6, 3)
# roi_grid_xyz: (B, Nx6x6x6, 3)
roi_grid_xyz = roi_grid_xyz.view(batch_size, -1, 3)
# compute the voxel coordinates of grid points
roi_grid_coords_x = (roi_grid_xyz[:, :, 0:1] - self.point_cloud_range[0]) // self.voxel_size[0]
roi_grid_coords_y = (roi_grid_xyz[:, :, 1:2] - self.point_cloud_range[1]) // self.voxel_size[1]
roi_grid_coords_z = (roi_grid_xyz[:, :, 2:3] - self.point_cloud_range[2]) // self.voxel_size[2]
# roi_grid_coords: (B, Nx6x6x6, 3)
roi_grid_coords = torch.cat([roi_grid_coords_x, roi_grid_coords_y, roi_grid_coords_z], dim=-1)
batch_idx = rois.new_zeros(batch_size, roi_grid_coords.shape[1], 1)
for bs_idx in range(batch_size):
batch_idx[bs_idx, :, 0] = bs_idx
# roi_grid_coords: (B, Nx6x6x6, 4)
# roi_grid_coords = torch.cat([batch_idx, roi_grid_coords], dim=-1)
# roi_grid_coords = roi_grid_coords.int()
roi_grid_batch_cnt = rois.new_zeros(batch_size).int().fill_(roi_grid_coords.shape[1])
pooled_features_list = []
for k, src_name in enumerate(self.pool_cfg.FEATURES_SOURCE):
pool_layer = self.roi_grid_pool_layers[k]
cur_stride = batch_dict['multi_scale_3d_strides'][src_name]
cur_sp_tensors = batch_dict['multi_scale_3d_features'][src_name]
if with_vf_transform:
cur_sp_tensors = batch_dict['multi_scale_3d_features_post'][src_name]
else:
cur_sp_tensors = batch_dict['multi_scale_3d_features'][src_name]
# compute voxel center xyz and batch_cnt
cur_coords = cur_sp_tensors.indices
cur_voxel_xyz = common_utils.get_voxel_centers(
cur_coords[:, 1:4],
downsample_times=cur_stride,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
cur_voxel_xyz_batch_cnt = cur_voxel_xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
cur_voxel_xyz_batch_cnt[bs_idx] = (cur_coords[:, 0] == bs_idx).sum()
# get voxel2point tensor
v2p_ind_tensor = common_utils.generate_voxel2pinds(cur_sp_tensors)
# compute the grid coordinates in this scale, in [batch_idx, x y z] order
cur_roi_grid_coords = roi_grid_coords // cur_stride
cur_roi_grid_coords = torch.cat([batch_idx, cur_roi_grid_coords], dim=-1)
cur_roi_grid_coords = cur_roi_grid_coords.int()
# voxel neighbor aggregation
pooled_features = pool_layer(
xyz=cur_voxel_xyz.contiguous(),
xyz_batch_cnt=cur_voxel_xyz_batch_cnt,
new_xyz=roi_grid_xyz.contiguous().view(-1, 3),
new_xyz_batch_cnt=roi_grid_batch_cnt,
new_coords=cur_roi_grid_coords.contiguous().view(-1, 4),
features=cur_sp_tensors.features.contiguous(),
voxel2point_indices=v2p_ind_tensor
)
pooled_features = pooled_features.view(
-1, self.pool_cfg.GRID_SIZE ** 3,
pooled_features.shape[-1]
) # (BxN, 6x6x6, C)
pooled_features_list.append(pooled_features)
ms_pooled_features = torch.cat(pooled_features_list, dim=-1)
return ms_pooled_features
def get_global_grid_points_of_roi(self, rois, grid_size):
rois = rois.view(-1, rois.shape[-1])
batch_size_rcnn = rois.shape[0]
local_roi_grid_points = self.get_dense_grid_points(rois, batch_size_rcnn, grid_size) # (B, 6x6x6, 3)
global_roi_grid_points = common_utils.rotate_points_along_z(
local_roi_grid_points.clone(), rois[:, 6]
).squeeze(dim=1)
global_center = rois[:, 0:3].clone()
global_roi_grid_points += global_center.unsqueeze(dim=1)
return global_roi_grid_points, local_roi_grid_points
@staticmethod
def get_dense_grid_points(rois, batch_size_rcnn, grid_size):
faked_features = rois.new_ones((grid_size, grid_size, grid_size))
dense_idx = faked_features.nonzero() # (N, 3) [x_idx, y_idx, z_idx]
dense_idx = dense_idx.repeat(batch_size_rcnn, 1, 1).float() # (B, 6x6x6, 3)
local_roi_size = rois.view(batch_size_rcnn, -1)[:, 3:6]
roi_grid_points = (dense_idx + 0.5) / grid_size * local_roi_size.unsqueeze(dim=1) \
- (local_roi_size.unsqueeze(dim=1) / 2) # (B, 6x6x6, 3)
return roi_grid_points
def forward(self, batch_dict):
"""
:param input_data: input dict
:return:
"""
targets_dict = self.proposal_layer(
batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = self.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
# RoI aware pooling
pooled_features = self.roi_grid_pool(batch_dict) # (BxN, 6x6x6, C)
# Box Refinement
pooled_features = pooled_features.view(pooled_features.size(0), -1)
shared_features = self.shared_fc_layer(pooled_features)
rcnn_cls = self.cls_pred_layer(self.cls_fc_layers(shared_features))
rcnn_reg = self.reg_pred_layer(self.reg_fc_layers(shared_features))
# grid_size = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
# batch_size_rcnn = pooled_features.shape[0]
# pooled_features = pooled_features.permute(0, 2, 1).\
# contiguous().view(batch_size_rcnn, -1, grid_size, grid_size, grid_size) # (BxN, C, 6, 6, 6)
# shared_features = self.shared_fc_layer(pooled_features.view(batch_size_rcnn, -1, 1))
# rcnn_cls = self.cls_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
# rcnn_reg = self.reg_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
if not self.training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
)
batch_dict['batch_cls_preds'] = batch_cls_preds
batch_dict['batch_box_preds'] = batch_box_preds
batch_dict['cls_preds_normalized'] = False
else:
targets_dict['rcnn_cls'] = rcnn_cls
targets_dict['rcnn_reg'] = rcnn_reg
self.forward_ret_dict = targets_dict
return batch_dict
class ActiveVoxelRCNNHead(RoIHeadTemplate):
def __init__(self, backbone_channels, model_cfg, point_cloud_range, voxel_size, num_class=1, **kwargs):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
self.pool_cfg = model_cfg.ROI_GRID_POOL
LAYER_cfg = self.pool_cfg.POOL_LAYERS
self.point_cloud_range = point_cloud_range
self.voxel_size = voxel_size
c_out = 0
self.roi_grid_pool_layers = nn.ModuleList()
for src_name in self.pool_cfg.FEATURES_SOURCE:
mlps = LAYER_cfg[src_name].MLPS
for k in range(len(mlps)):
mlps[k] = [backbone_channels[src_name]] + mlps[k]
pool_layer = voxelpool_stack_modules.NeighborVoxelSAModuleMSG(
query_ranges=LAYER_cfg[src_name].QUERY_RANGES,
nsamples=LAYER_cfg[src_name].NSAMPLE,
radii=LAYER_cfg[src_name].POOL_RADIUS,
mlps=mlps,
pool_method=LAYER_cfg[src_name].POOL_METHOD,
)
self.roi_grid_pool_layers.append(pool_layer)
c_out += sum([x[-1] for x in mlps])
GRID_SIZE = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
# c_out = sum([x[-1] for x in mlps])
pre_channel = GRID_SIZE * GRID_SIZE * GRID_SIZE * c_out
shared_fc_list = []
for k in range(0, self.model_cfg.SHARED_FC.__len__()):
shared_fc_list.extend([
nn.Linear(pre_channel, self.model_cfg.SHARED_FC[k], bias=False),
nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
nn.ReLU(inplace=True)
])
pre_channel = self.model_cfg.SHARED_FC[k]
if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.shared_fc_layer = nn.Sequential(*shared_fc_list)
cls_fc_list = []
for k in range(0, self.model_cfg.CLS_FC.__len__()):
cls_fc_list.extend([
nn.Linear(pre_channel, self.model_cfg.CLS_FC[k], bias=False),
nn.BatchNorm1d(self.model_cfg.CLS_FC[k]),
nn.ReLU()
])
pre_channel = self.model_cfg.CLS_FC[k]
if k != self.model_cfg.CLS_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
cls_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.cls_fc_layers = nn.Sequential(*cls_fc_list)
self.cls_pred_layer = nn.Linear(pre_channel, self.num_class, bias=True)
reg_fc_list = []
for k in range(0, self.model_cfg.REG_FC.__len__()):
reg_fc_list.extend([
nn.Linear(pre_channel, self.model_cfg.REG_FC[k], bias=False),
nn.BatchNorm1d(self.model_cfg.REG_FC[k]),
nn.ReLU()
])
pre_channel = self.model_cfg.REG_FC[k]
if k != self.model_cfg.REG_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
reg_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.reg_fc_layers = nn.Sequential(*reg_fc_list)
self.reg_pred_layer = nn.Linear(pre_channel, self.box_coder.code_size * self.num_class, bias=True)
self.init_weights()
def init_weights(self):
init_func = nn.init.xavier_normal_
for module_list in [self.shared_fc_layer, self.cls_fc_layers, self.reg_fc_layers]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.cls_pred_layer.weight, 0, 0.01)
nn.init.constant_(self.cls_pred_layer.bias, 0)
nn.init.normal_(self.reg_pred_layer.weight, mean=0, std=0.001)
nn.init.constant_(self.reg_pred_layer.bias, 0)
# def _init_weights(self):
# init_func = nn.init.xavier_normal_
# for m in self.modules():
# if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
# init_func(m.weight)
# if m.bias is not None:
# nn.init.constant_(m.bias, 0)
# nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001)
def roi_grid_pool(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
point_coords: (num_points, 4) [bs_idx, x, y, z]
point_features: (num_points, C)
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
Returns:
"""
rois = batch_dict['rois']
batch_size = batch_dict['batch_size']
with_vf_transform = batch_dict.get('with_voxel_feature_transform', False)
roi_grid_xyz, _ = self.get_global_grid_points_of_roi(
rois, grid_size=self.pool_cfg.GRID_SIZE
) # (BxN, 6x6x6, 3)
# roi_grid_xyz: (B, Nx6x6x6, 3)
roi_grid_xyz = roi_grid_xyz.view(batch_size, -1, 3)
# compute the voxel coordinates of grid points
roi_grid_coords_x = (roi_grid_xyz[:, :, 0:1] - self.point_cloud_range[0]) // self.voxel_size[0]
roi_grid_coords_y = (roi_grid_xyz[:, :, 1:2] - self.point_cloud_range[1]) // self.voxel_size[1]
roi_grid_coords_z = (roi_grid_xyz[:, :, 2:3] - self.point_cloud_range[2]) // self.voxel_size[2]
# roi_grid_coords: (B, Nx6x6x6, 3)
roi_grid_coords = torch.cat([roi_grid_coords_x, roi_grid_coords_y, roi_grid_coords_z], dim=-1)
batch_idx = rois.new_zeros(batch_size, roi_grid_coords.shape[1], 1)
for bs_idx in range(batch_size):
batch_idx[bs_idx, :, 0] = bs_idx
# roi_grid_coords: (B, Nx6x6x6, 4)
# roi_grid_coords = torch.cat([batch_idx, roi_grid_coords], dim=-1)
# roi_grid_coords = roi_grid_coords.int()
roi_grid_batch_cnt = rois.new_zeros(batch_size).int().fill_(roi_grid_coords.shape[1])
pooled_features_list = []
for k, src_name in enumerate(self.pool_cfg.FEATURES_SOURCE):
pool_layer = self.roi_grid_pool_layers[k]
cur_stride = batch_dict['multi_scale_3d_strides'][src_name]
cur_sp_tensors = batch_dict['multi_scale_3d_features'][src_name]
if with_vf_transform:
cur_sp_tensors = batch_dict['multi_scale_3d_features_post'][src_name]
else:
cur_sp_tensors = batch_dict['multi_scale_3d_features'][src_name]
# compute voxel center xyz and batch_cnt
cur_coords = cur_sp_tensors.indices
cur_voxel_xyz = common_utils.get_voxel_centers(
cur_coords[:, 1:4],
downsample_times=cur_stride,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
cur_voxel_xyz_batch_cnt = cur_voxel_xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
cur_voxel_xyz_batch_cnt[bs_idx] = (cur_coords[:, 0] == bs_idx).sum()
# get voxel2point tensor
v2p_ind_tensor = common_utils.generate_voxel2pinds(cur_sp_tensors)
# compute the grid coordinates in this scale, in [batch_idx, x y z] order
cur_roi_grid_coords = roi_grid_coords // cur_stride
cur_roi_grid_coords = torch.cat([batch_idx, cur_roi_grid_coords], dim=-1)
cur_roi_grid_coords = cur_roi_grid_coords.int()
# voxel neighbor aggregation
pooled_features = pool_layer(
xyz=cur_voxel_xyz.contiguous(),
xyz_batch_cnt=cur_voxel_xyz_batch_cnt,
new_xyz=roi_grid_xyz.contiguous().view(-1, 3),
new_xyz_batch_cnt=roi_grid_batch_cnt,
new_coords=cur_roi_grid_coords.contiguous().view(-1, 4),
features=cur_sp_tensors.features.contiguous(),
voxel2point_indices=v2p_ind_tensor
)
pooled_features = pooled_features.view(
-1, self.pool_cfg.GRID_SIZE ** 3,
pooled_features.shape[-1]
) # (BxN, 6x6x6, C)
pooled_features_list.append(pooled_features)
ms_pooled_features = torch.cat(pooled_features_list, dim=-1)
return ms_pooled_features
def get_global_grid_points_of_roi(self, rois, grid_size):
rois = rois.view(-1, rois.shape[-1])
batch_size_rcnn = rois.shape[0]
local_roi_grid_points = self.get_dense_grid_points(rois, batch_size_rcnn, grid_size) # (B, 6x6x6, 3)
global_roi_grid_points = common_utils.rotate_points_along_z(
local_roi_grid_points.clone(), rois[:, 6]
).squeeze(dim=1)
global_center = rois[:, 0:3].clone()
global_roi_grid_points += global_center.unsqueeze(dim=1)
return global_roi_grid_points, local_roi_grid_points
@staticmethod
def get_dense_grid_points(rois, batch_size_rcnn, grid_size):
faked_features = rois.new_ones((grid_size, grid_size, grid_size))
dense_idx = faked_features.nonzero() # (N, 3) [x_idx, y_idx, z_idx]
dense_idx = dense_idx.repeat(batch_size_rcnn, 1, 1).float() # (B, 6x6x6, 3)
local_roi_size = rois.view(batch_size_rcnn, -1)[:, 3:6]
roi_grid_points = (dense_idx + 0.5) / grid_size * local_roi_size.unsqueeze(dim=1) \
- (local_roi_size.unsqueeze(dim=1) / 2) # (B, 6x6x6, 3)
return roi_grid_points
def forward(self, batch_dict):
"""
:param input_data: input dict
:return:
"""
targets_dict = self.proposal_layer(
batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = self.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
# RoI aware pooling
pooled_features = self.roi_grid_pool(batch_dict) # (BxN, 6x6x6, C)
# Box Refinement
pooled_features = pooled_features.view(pooled_features.size(0), -1)
shared_features = self.shared_fc_layer(pooled_features)
if batch_dict['mode'] == 'active_evaluate':
batch_size = batch_dict['batch_size']
roi_num = pooled_features.size(0) // batch_size
batch_dict['roi_shared_feature'] = shared_features.view(batch_size, roi_num, -1)
rcnn_cls = self.cls_pred_layer(self.cls_fc_layers(shared_features))
rcnn_reg = self.reg_pred_layer(self.reg_fc_layers(shared_features))
# grid_size = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
# batch_size_rcnn = pooled_features.shape[0]
# pooled_features = pooled_features.permute(0, 2, 1).\
# contiguous().view(batch_size_rcnn, -1, grid_size, grid_size, grid_size) # (BxN, C, 6, 6, 6)
# shared_features = self.shared_fc_layer(pooled_features.view(batch_size_rcnn, -1, 1))
# rcnn_cls = self.cls_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
# rcnn_reg = self.reg_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
if not self.training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
)
batch_dict['batch_cls_preds'] = batch_cls_preds
batch_dict['batch_box_preds'] = batch_box_preds
batch_dict['cls_preds_normalized'] = False
else:
targets_dict['rcnn_cls'] = rcnn_cls
targets_dict['rcnn_reg'] = rcnn_reg
self.forward_ret_dict = targets_dict
return batch_dict
class VoxelRCNNHead_ABL(RoIHeadTemplate):
def __init__(self, backbone_channels, model_cfg, point_cloud_range, voxel_size, num_class=1, **kwargs):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
self.pool_cfg = model_cfg.ROI_GRID_POOL
LAYER_cfg = self.pool_cfg.POOL_LAYERS
self.point_cloud_range = point_cloud_range
self.voxel_size = voxel_size
c_out = 0
self.roi_grid_pool_layers = nn.ModuleList()
for src_name in self.pool_cfg.FEATURES_SOURCE:
mlps = LAYER_cfg[src_name].MLPS
for k in range(len(mlps)):
mlps[k] = [backbone_channels[src_name]] + mlps[k]
pool_layer = voxelpool_stack_modules.NeighborVoxelSAModuleMSG(
query_ranges=LAYER_cfg[src_name].QUERY_RANGES,
nsamples=LAYER_cfg[src_name].NSAMPLE,
radii=LAYER_cfg[src_name].POOL_RADIUS,
mlps=mlps,
pool_method=LAYER_cfg[src_name].POOL_METHOD,
)
self.roi_grid_pool_layers.append(pool_layer)
c_out += sum([x[-1] for x in mlps])
GRID_SIZE = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
# c_out = sum([x[-1] for x in mlps])
pre_channel = GRID_SIZE * GRID_SIZE * GRID_SIZE * c_out
shared_fc_list = []
for k in range(0, self.model_cfg.SHARED_FC.__len__()):
shared_fc_list.extend([
nn.Linear(pre_channel, self.model_cfg.SHARED_FC[k], bias=False),
nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
nn.ReLU(inplace=True)
])
pre_channel = self.model_cfg.SHARED_FC[k]
if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.shared_fc_layer = nn.Sequential(*shared_fc_list)
cls_pre_channel = pre_channel
cls_fc_list = []
for k in range(0, self.model_cfg.CLS_FC.__len__()):
cls_fc_list.extend([
nn.Linear(pre_channel, self.model_cfg.CLS_FC[k], bias=False),
nn.BatchNorm1d(self.model_cfg.CLS_FC[k]),
nn.ReLU()
])
pre_channel = self.model_cfg.CLS_FC[k]
if k != self.model_cfg.CLS_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
cls_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.cls_fc_layers = nn.Sequential(*cls_fc_list)
self.cls_pred_layer = nn.Linear(pre_channel, self.num_class, bias=True)
cls_pre_channel_1 = cls_pre_channel
cls_fc_list_1 = []
for k in range(0, self.model_cfg.CLS_FC.__len__()):
cls_fc_list_1.extend([
nn.Linear(cls_pre_channel_1, self.model_cfg.CLS_FC[k], bias=False),
nn.BatchNorm1d(self.model_cfg.CLS_FC[k]),
nn.ReLU()
])
cls_pre_channel_1 = self.model_cfg.CLS_FC[k]
if k != self.model_cfg.CLS_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
cls_fc_list_1.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.cls_fc_layers_1 = nn.Sequential(*cls_fc_list_1)
self.cls_pred_layer_1 = nn.Linear(pre_channel, self.num_class, bias=True)
cls_pre_channel_2 = cls_pre_channel
cls_fc_list_2 = []
for k in range(0, self.model_cfg.CLS_FC.__len__()):
cls_fc_list_2.extend([
nn.Linear(cls_pre_channel_2, self.model_cfg.CLS_FC[k], bias=False),
nn.BatchNorm1d(self.model_cfg.CLS_FC[k]),
nn.ReLU()
])
cls_pre_channel_2 = self.model_cfg.CLS_FC[k]
if k != self.model_cfg.CLS_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
cls_fc_list_2.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.cls_fc_layers_2 = nn.Sequential(*cls_fc_list_2)
self.cls_pred_layer_2 = nn.Linear(pre_channel, self.num_class, bias=True)
reg_fc_list = []
for k in range(0, self.model_cfg.REG_FC.__len__()):
reg_fc_list.extend([
nn.Linear(pre_channel, self.model_cfg.REG_FC[k], bias=False),
nn.BatchNorm1d(self.model_cfg.REG_FC[k]),
nn.ReLU()
])
pre_channel = self.model_cfg.REG_FC[k]
if k != self.model_cfg.REG_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
reg_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.reg_fc_layers = nn.Sequential(*reg_fc_list)
self.reg_pred_layer = nn.Linear(pre_channel, self.box_coder.code_size * self.num_class, bias=True)
self.init_weights()
def init_weights(self):
init_func = nn.init.xavier_normal_
for module_list in [self.shared_fc_layer, self.cls_fc_layers, self.reg_fc_layers]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.cls_pred_layer.weight, 0, 0.01)
nn.init.constant_(self.cls_pred_layer.bias, 0)
nn.init.normal_(self.reg_pred_layer.weight, mean=0, std=0.001)
nn.init.constant_(self.reg_pred_layer.bias, 0)
nn.init.kaiming_normal_(self.cls_pred_layer_1.weight)
nn.init.xavier_normal_(self.cls_pred_layer_2.weight)
# def _init_weights(self):
# init_func = nn.init.xavier_normal_
# for m in self.modules():
# if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
# init_func(m.weight)
# if m.bias is not None:
# nn.init.constant_(m.bias, 0)
# nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001)
def roi_grid_pool(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
point_coords: (num_points, 4) [bs_idx, x, y, z]
point_features: (num_points, C)
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
Returns:
"""
rois = batch_dict['rois']
batch_size = batch_dict['batch_size']
with_vf_transform = batch_dict.get('with_voxel_feature_transform', False)
roi_grid_xyz, _ = self.get_global_grid_points_of_roi(
rois, grid_size=self.pool_cfg.GRID_SIZE
) # (BxN, 6x6x6, 3)
# roi_grid_xyz: (B, Nx6x6x6, 3)
roi_grid_xyz = roi_grid_xyz.view(batch_size, -1, 3)
# compute the voxel coordinates of grid points
roi_grid_coords_x = (roi_grid_xyz[:, :, 0:1] - self.point_cloud_range[0]) // self.voxel_size[0]
roi_grid_coords_y = (roi_grid_xyz[:, :, 1:2] - self.point_cloud_range[1]) // self.voxel_size[1]
roi_grid_coords_z = (roi_grid_xyz[:, :, 2:3] - self.point_cloud_range[2]) // self.voxel_size[2]
# roi_grid_coords: (B, Nx6x6x6, 3)
roi_grid_coords = torch.cat([roi_grid_coords_x, roi_grid_coords_y, roi_grid_coords_z], dim=-1)
batch_idx = rois.new_zeros(batch_size, roi_grid_coords.shape[1], 1)
for bs_idx in range(batch_size):
batch_idx[bs_idx, :, 0] = bs_idx
# roi_grid_coords: (B, Nx6x6x6, 4)
# roi_grid_coords = torch.cat([batch_idx, roi_grid_coords], dim=-1)
# roi_grid_coords = roi_grid_coords.int()
roi_grid_batch_cnt = rois.new_zeros(batch_size).int().fill_(roi_grid_coords.shape[1])
pooled_features_list = []
for k, src_name in enumerate(self.pool_cfg.FEATURES_SOURCE):
pool_layer = self.roi_grid_pool_layers[k]
cur_stride = batch_dict['multi_scale_3d_strides'][src_name]
cur_sp_tensors = batch_dict['multi_scale_3d_features'][src_name]
if with_vf_transform:
cur_sp_tensors = batch_dict['multi_scale_3d_features_post'][src_name]
else:
cur_sp_tensors = batch_dict['multi_scale_3d_features'][src_name]
# compute voxel center xyz and batch_cnt
cur_coords = cur_sp_tensors.indices
cur_voxel_xyz = common_utils.get_voxel_centers(
cur_coords[:, 1:4],
downsample_times=cur_stride,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
cur_voxel_xyz_batch_cnt = cur_voxel_xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
cur_voxel_xyz_batch_cnt[bs_idx] = (cur_coords[:, 0] == bs_idx).sum()
# get voxel2point tensor
v2p_ind_tensor = common_utils.generate_voxel2pinds(cur_sp_tensors)
# compute the grid coordinates in this scale, in [batch_idx, x y z] order
cur_roi_grid_coords = roi_grid_coords // cur_stride
cur_roi_grid_coords = torch.cat([batch_idx, cur_roi_grid_coords], dim=-1)
cur_roi_grid_coords = cur_roi_grid_coords.int()
# voxel neighbor aggregation
pooled_features = pool_layer(
xyz=cur_voxel_xyz.contiguous(),
xyz_batch_cnt=cur_voxel_xyz_batch_cnt,
new_xyz=roi_grid_xyz.contiguous().view(-1, 3),
new_xyz_batch_cnt=roi_grid_batch_cnt,
new_coords=cur_roi_grid_coords.contiguous().view(-1, 4),
features=cur_sp_tensors.features.contiguous(),
voxel2point_indices=v2p_ind_tensor
)
pooled_features = pooled_features.view(
-1, self.pool_cfg.GRID_SIZE ** 3,
pooled_features.shape[-1]
) # (BxN, 6x6x6, C)
pooled_features_list.append(pooled_features)
ms_pooled_features = torch.cat(pooled_features_list, dim=-1)
return ms_pooled_features
def get_global_grid_points_of_roi(self, rois, grid_size):
rois = rois.view(-1, rois.shape[-1])
batch_size_rcnn = rois.shape[0]
local_roi_grid_points = self.get_dense_grid_points(rois, batch_size_rcnn, grid_size) # (B, 6x6x6, 3)
global_roi_grid_points = common_utils.rotate_points_along_z(
local_roi_grid_points.clone(), rois[:, 6]
).squeeze(dim=1)
global_center = rois[:, 0:3].clone()
global_roi_grid_points += global_center.unsqueeze(dim=1)
return global_roi_grid_points, local_roi_grid_points
@staticmethod
def get_dense_grid_points(rois, batch_size_rcnn, grid_size):
faked_features = rois.new_ones((grid_size, grid_size, grid_size))
dense_idx = faked_features.nonzero() # (N, 3) [x_idx, y_idx, z_idx]
dense_idx = dense_idx.repeat(batch_size_rcnn, 1, 1).float() # (B, 6x6x6, 3)
local_roi_size = rois.view(batch_size_rcnn, -1)[:, 3:6]
roi_grid_points = (dense_idx + 0.5) / grid_size * local_roi_size.unsqueeze(dim=1) \
- (local_roi_size.unsqueeze(dim=1) / 2) # (B, 6x6x6, 3)
return roi_grid_points
def forward(self, batch_dict):
"""
:param input_data: input dict
:return:
"""
targets_dict = self.proposal_layer(
batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = self.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
# RoI aware pooling
pooled_features = self.roi_grid_pool(batch_dict) # (BxN, 6x6x6, C)
# Box Refinement
pooled_features = pooled_features.view(pooled_features.size(0), -1)
shared_features = self.shared_fc_layer(pooled_features)
if batch_dict['mode'] == 'active_evaluate':
batch_size = batch_dict['batch_size']
roi_num = pooled_features.size(0) // batch_size
batch_dict['roi_shared_feature'] = shared_features.view(batch_size, roi_num, -1)
rcnn_cls = self.cls_pred_layer(self.cls_fc_layers(shared_features))
rcnn_cls_1 = self.cls_pred_layer_1(self.cls_fc_layers_1(shared_features))
rcnn_cls_2 = self.cls_pred_layer_2(self.cls_fc_layers_2(shared_features))
rcnn_reg = self.reg_pred_layer(self.reg_fc_layers(shared_features))
batch_dict['rcnn_cls'] = rcnn_cls
batch_dict['rcnn_cls_1'] = rcnn_cls_1
batch_dict['rcnn_cls_2'] = rcnn_cls_2
# grid_size = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
# batch_size_rcnn = pooled_features.shape[0]
# pooled_features = pooled_features.permute(0, 2, 1).\
# contiguous().view(batch_size_rcnn, -1, grid_size, grid_size, grid_size) # (BxN, C, 6, 6, 6)
# shared_features = self.shared_fc_layer(pooled_features.view(batch_size_rcnn, -1, 1))
# rcnn_cls = self.cls_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
# rcnn_reg = self.reg_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
if not self.training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
)
batch_dict['batch_cls_preds'] = batch_cls_preds
batch_dict['batch_box_preds'] = batch_box_preds
batch_dict['cls_preds_normalized'] = False
else:
targets_dict['rcnn_cls'] = rcnn_cls
targets_dict['rcnn_cls_1'] = rcnn_cls_1
targets_dict['rcnn_cls_2'] = rcnn_cls_2
targets_dict['rcnn_reg'] = rcnn_reg
self.forward_ret_dict = targets_dict
return batch_dict
def get_box_mul_cls_layer_loss(self, forward_ret_dict):
loss_cfgs = self.model_cfg.LOSS_CONFIG
rcnn_cls = forward_ret_dict['rcnn_cls']
rcnn_cls_1 = forward_ret_dict['rcnn_cls_1']
rcnn_cls_2 = forward_ret_dict['rcnn_cls_2']
rcnn_cls_labels = forward_ret_dict['rcnn_cls_labels'].view(-1)
if loss_cfgs.CLS_LOSS == 'BinaryCrossEntropy':
rcnn_cls_flat = rcnn_cls.view(-1)
rcnn_cls_flat_1 = rcnn_cls_1.view(-1)
rcnn_cls_flat_2 = rcnn_cls_2.view(-1)
batch_loss_cls = F.binary_cross_entropy(torch.sigmoid(rcnn_cls_flat), rcnn_cls_labels.float(), reduction='none')
batch_loss_cls_1 = F.binary_cross_entropy(torch.sigmoid(rcnn_cls_flat_1), rcnn_cls_labels.float(), reduction='none')
batch_loss_cls_2 = F.binary_cross_entropy(torch.sigmoid(rcnn_cls_flat_2), rcnn_cls_labels.float(), reduction='none')
cls_valid_mask = (rcnn_cls_labels >= 0).float()
rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
rcnn_loss_cls_1 = (batch_loss_cls_1 * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
rcnn_loss_cls_2 = (batch_loss_cls_2 * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
elif loss_cfgs.CLS_LOSS == 'CrossEntropy':
batch_loss_cls = F.cross_entropy(rcnn_cls, rcnn_cls_labels, reduction='none', ignore_index=-1)
batch_loss_cls_1 = F.cross_entropy(rcnn_cls_1, rcnn_cls_labels, reduction='none', ignore_index=-1)
batch_loss_cls_2 = F.cross_entropy(rcnn_cls_2, rcnn_cls_labels, reduction='none', ignore_index=-1)
cls_valid_mask = (rcnn_cls_labels >= 0).float()
rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
rcnn_loss_cls_1 = (batch_loss_cls_1 * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
rcnn_loss_cls_2 = (batch_loss_cls_2 * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
else:
raise NotImplementedError
rcnn_loss_cls = rcnn_loss_cls * loss_cfgs.LOSS_WEIGHTS['rcnn_cls_weight']
rcnn_mul_cls_loss = rcnn_loss_cls_1 + rcnn_loss_cls_2
tb_dict = {'rcnn_loss_cls': rcnn_loss_cls.item()}
return rcnn_loss_cls, rcnn_mul_cls_loss, tb_dict
def get_mul_cls_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
rcnn_loss = 0
rcnn_loss_cls, rcnn_mul_cls_loss, cls_tb_dict = self.get_box_mul_cls_layer_loss(self.forward_ret_dict)
rcnn_loss += rcnn_loss_cls
tb_dict.update(cls_tb_dict)
rcnn_loss_reg, reg_tb_dict = self.get_box_reg_layer_loss(self.forward_ret_dict)
rcnn_loss += rcnn_loss_reg
tb_dict.update(reg_tb_dict)
tb_dict['rcnn_loss'] = rcnn_loss.item()
return rcnn_loss, rcnn_mul_cls_loss, tb_dict
def committee_evaluate(self, batch_dict):
batch_size = batch_dict['batch_size']
cls_1 = batch_dict['rcnn_cls_1'].view(batch_size, -1)
cls_2 = batch_dict['rcnn_cls_2'].view(batch_size, -1)
committee_score = torch.mean(torch.abs(cls_1 - cls_2), dim=-1)
batch_dict['committee_score'] = committee_score
return batch_dict
def uncertainty_evaluate(self, batch_dict):
batch_size = batch_dict['batch_size']
cls = batch_dict['rcnn_cls']
cls = cls.view(batch_size, -1)
uncertainty = torch.mean(torch.abs(cls - (1-cls)), dim=-1)
batch_dict['uncertainty'] = uncertainty
return batch_dict
| 41,813
| 45.876682
| 128
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/roi_heads/pvrcnn_head_semi.py
|
import torch.nn as nn
import torch
import torch.nn.functional as F
from ...ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules
from ...utils import common_utils
from .roi_head_template import RoIHeadTemplate
class PVRCNNHeadSemi(RoIHeadTemplate):
def __init__(self, input_channels, model_cfg, num_class=1, **kwargs):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
self.roi_grid_pool_layer, num_c_out = pointnet2_stack_modules.build_local_aggregation_module(
input_channels=input_channels, config=self.model_cfg.ROI_GRID_POOL
)
GRID_SIZE = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
pre_channel = GRID_SIZE * GRID_SIZE * GRID_SIZE * num_c_out
shared_fc_list = []
for k in range(0, self.model_cfg.SHARED_FC.__len__()):
shared_fc_list.extend([
nn.Conv1d(pre_channel, self.model_cfg.SHARED_FC[k], kernel_size=1, bias=False),
nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
nn.ReLU()
])
pre_channel = self.model_cfg.SHARED_FC[k]
if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.shared_fc_layer = nn.Sequential(*shared_fc_list)
self.cls_layers = self.make_fc_layers(
input_channels=pre_channel, output_channels=self.num_class, fc_list=self.model_cfg.CLS_FC
)
self.reg_layers = self.make_fc_layers(
input_channels=pre_channel,
output_channels=self.box_coder.code_size * self.num_class,
fc_list=self.model_cfg.REG_FC
)
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001)
def roi_grid_pool(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
point_coords: (num_points, 4) [bs_idx, x, y, z]
point_features: (num_points, C)
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
Returns:
"""
batch_size = batch_dict['batch_size']
rois = batch_dict['rois']
point_coords = batch_dict['point_coords']
point_features = batch_dict['point_features']
point_features = point_features * batch_dict['point_cls_scores'].view(-1, 1)
global_roi_grid_points, local_roi_grid_points = self.get_global_grid_points_of_roi(
rois, grid_size=self.model_cfg.ROI_GRID_POOL.GRID_SIZE
) # (BxN, 6x6x6, 3)
global_roi_grid_points = global_roi_grid_points.view(batch_size, -1, 3) # (B, Nx6x6x6, 3)
xyz = point_coords[:, 1:4]
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
batch_idx = point_coords[:, 0]
for k in range(batch_size):
xyz_batch_cnt[k] = (batch_idx == k).sum()
new_xyz = global_roi_grid_points.view(-1, 3)
new_xyz_batch_cnt = xyz.new_zeros(batch_size).int().fill_(global_roi_grid_points.shape[1])
pooled_points, pooled_features = self.roi_grid_pool_layer(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=point_features.contiguous(),
) # (M1 + M2 ..., C)
pooled_features = pooled_features.view(
-1, self.model_cfg.ROI_GRID_POOL.GRID_SIZE ** 3,
pooled_features.shape[-1]
) # (BxN, 6x6x6, C)
return pooled_features
def get_global_grid_points_of_roi(self, rois, grid_size):
rois = rois.view(-1, rois.shape[-1])
batch_size_rcnn = rois.shape[0]
local_roi_grid_points = self.get_dense_grid_points(rois, batch_size_rcnn, grid_size) # (B, 6x6x6, 3)
global_roi_grid_points = common_utils.rotate_points_along_z(
local_roi_grid_points.clone(), rois[:, 6]
).squeeze(dim=1)
global_center = rois[:, 0:3].clone()
global_roi_grid_points += global_center.unsqueeze(dim=1)
return global_roi_grid_points, local_roi_grid_points
@staticmethod
def get_dense_grid_points(rois, batch_size_rcnn, grid_size):
faked_features = rois.new_ones((grid_size, grid_size, grid_size))
dense_idx = faked_features.nonzero() # (N, 3) [x_idx, y_idx, z_idx]
dense_idx = dense_idx.repeat(batch_size_rcnn, 1, 1).float() # (B, 6x6x6, 3)
local_roi_size = rois.view(batch_size_rcnn, -1)[:, 3:6]
roi_grid_points = (dense_idx + 0.5) / grid_size * local_roi_size.unsqueeze(dim=1) \
- (local_roi_size.unsqueeze(dim=1) / 2) # (B, 6x6x6, 3)
return roi_grid_points
def forward(self, batch_dict):
"""
:param input_data: input dict
:return:
"""
if self.model_type == 'origin':
targets_dict = self.proposal_layer(
batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = batch_dict.get('roi_targets_dict', None)
if targets_dict is None:
targets_dict = self.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
# RoI aware pooling
pooled_features = self.roi_grid_pool(batch_dict) # (BxN, 6x6x6, C)
grid_size = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
batch_size_rcnn = pooled_features.shape[0]
pooled_features = pooled_features.permute(0, 2, 1).\
contiguous().view(batch_size_rcnn, -1, grid_size, grid_size, grid_size) # (BxN, C, 6, 6, 6)
shared_features = self.shared_fc_layer(pooled_features.view(batch_size_rcnn, -1, 1))
rcnn_cls = self.cls_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
if not self.training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
)
batch_dict['batch_cls_preds'] = batch_cls_preds
batch_dict['batch_box_preds'] = batch_box_preds
batch_dict['cls_preds_normalized'] = False
else:
targets_dict['rcnn_cls'] = rcnn_cls
targets_dict['rcnn_reg'] = rcnn_reg
self.forward_ret_dict = targets_dict
elif self.model_type == 'teacher':
targets_dict = self.proposal_layer(
batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TEST'] #TODO: Check if the NMS needs to open when SSL
)
# RoI aware pooling
pooled_features = self.roi_grid_pool(batch_dict) # (BxN, 6x6x6, C)
grid_size = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
batch_size_rcnn = pooled_features.shape[0]
pooled_features = pooled_features.permute(0, 2, 1).\
contiguous().view(batch_size_rcnn, -1, grid_size, grid_size, grid_size) # (BxN, C, 6, 6, 6)
shared_features = self.shared_fc_layer(pooled_features.view(batch_size_rcnn, -1, 1))
rcnn_cls = self.cls_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
)
batch_dict['batch_cls_preds'] = batch_cls_preds
batch_dict['batch_box_preds'] = batch_box_preds
batch_dict['cls_preds_normalized'] = False
elif self.model_type == 'student':
if 'gt_boxes' in batch_dict:
targets_dict = self.proposal_layer(
batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
else:
targets_dict = self.proposal_layer(
batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TEST'] #TODO: Check if the NMS needs to open when SSL
)
if self.training:
if 'gt_boxes' in batch_dict:
targets_dict = batch_dict.get('roi_targets_dict', None)
if targets_dict is None:
targets_dict = self.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
# RoI aware pooling
pooled_features = self.roi_grid_pool(batch_dict) # (BxN, 6x6x6, C)
grid_size = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
batch_size_rcnn = pooled_features.shape[0]
pooled_features = pooled_features.permute(0, 2, 1).\
contiguous().view(batch_size_rcnn, -1, grid_size, grid_size, grid_size) # (BxN, C, 6, 6, 6)
shared_features = self.shared_fc_layer(pooled_features.view(batch_size_rcnn, -1, 1))
rcnn_cls = self.cls_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
if not self.training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
)
batch_dict['batch_cls_preds'] = batch_cls_preds
batch_dict['batch_box_preds'] = batch_box_preds
batch_dict['cls_preds_normalized'] = False
else:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
)
batch_dict['batch_cls_preds'] = batch_cls_preds
batch_dict['batch_box_preds'] = batch_box_preds
batch_dict['cls_preds_normalized'] = False
if 'gt_boxes' in batch_dict:
targets_dict['rcnn_cls'] = rcnn_cls
targets_dict['rcnn_reg'] = rcnn_reg
self.forward_ret_dict = targets_dict
else:
raise Exception('Unsupprted model type')
return batch_dict
| 11,809
| 45.679842
| 123
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/roi_heads/partA2_head.py
|
import numpy as np
import torch
import torch.nn as nn
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils.spconv_utils import spconv
from .roi_head_template import RoIHeadTemplate
class PartA2FCHead(RoIHeadTemplate):
def __init__(self, input_channels, model_cfg, num_class=1, **kwargs):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
self.SA_modules = nn.ModuleList()
block = self.post_act_block
c0 = self.model_cfg.ROI_AWARE_POOL.NUM_FEATURES // 2
self.conv_part = spconv.SparseSequential(
block(4, 64, 3, padding=1, indice_key='rcnn_subm1'),
block(64, c0, 3, padding=1, indice_key='rcnn_subm1_1'),
)
self.conv_rpn = spconv.SparseSequential(
block(input_channels, 64, 3, padding=1, indice_key='rcnn_subm2'),
block(64, c0, 3, padding=1, indice_key='rcnn_subm1_2'),
)
shared_fc_list = []
pool_size = self.model_cfg.ROI_AWARE_POOL.POOL_SIZE
pre_channel = self.model_cfg.ROI_AWARE_POOL.NUM_FEATURES * pool_size * pool_size * pool_size
for k in range(0, self.model_cfg.SHARED_FC.__len__()):
shared_fc_list.extend([
nn.Conv1d(pre_channel, self.model_cfg.SHARED_FC[k], kernel_size=1, bias=False),
nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
nn.ReLU()
])
pre_channel = self.model_cfg.SHARED_FC[k]
if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.shared_fc_layer = nn.Sequential(*shared_fc_list)
self.cls_layers = self.make_fc_layers(
input_channels=pre_channel, output_channels=self.num_class, fc_list=self.model_cfg.CLS_FC
)
self.reg_layers = self.make_fc_layers(
input_channels=pre_channel,
output_channels=self.box_coder.code_size * self.num_class,
fc_list=self.model_cfg.REG_FC
)
self.roiaware_pool3d_layer = roiaware_pool3d_utils.RoIAwarePool3d(
out_size=self.model_cfg.ROI_AWARE_POOL.POOL_SIZE,
max_pts_each_voxel=self.model_cfg.ROI_AWARE_POOL.MAX_POINTS_PER_VOXEL
)
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001)
def post_act_block(self, in_channels, out_channels, kernel_size, indice_key, stride=1, padding=0, conv_type='subm'):
if conv_type == 'subm':
m = spconv.SparseSequential(
spconv.SubMConv3d(in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),
nn.ReLU(),
)
elif conv_type == 'spconv':
m = spconv.SparseSequential(
spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding,
bias=False, indice_key=indice_key),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),
nn.ReLU(),
)
elif conv_type == 'inverseconv':
m = spconv.SparseSequential(
spconv.SparseInverseConv3d(in_channels, out_channels, kernel_size,
indice_key=indice_key, bias=False),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01),
nn.ReLU(),
)
else:
raise NotImplementedError
return m
def roiaware_pool(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
point_coords: (num_points, 4) [bs_idx, x, y, z]
point_features: (num_points, C)
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
Returns:
"""
batch_size = batch_dict['batch_size']
batch_idx = batch_dict['point_coords'][:, 0]
point_coords = batch_dict['point_coords'][:, 1:4]
point_features = batch_dict['point_features']
part_features = torch.cat((
batch_dict['point_part_offset'] if not self.model_cfg.get('DISABLE_PART', False) else point_coords,
batch_dict['point_cls_scores'].view(-1, 1).detach()
), dim=1)
part_features[part_features[:, -1] < self.model_cfg.SEG_MASK_SCORE_THRESH, 0:3] = 0
rois = batch_dict['rois']
pooled_part_features_list, pooled_rpn_features_list = [], []
for bs_idx in range(batch_size):
bs_mask = (batch_idx == bs_idx)
cur_point_coords = point_coords[bs_mask]
cur_part_features = part_features[bs_mask]
cur_rpn_features = point_features[bs_mask]
cur_roi = rois[bs_idx][:, 0:7].contiguous() # (N, 7)
pooled_part_features = self.roiaware_pool3d_layer.forward(
cur_roi, cur_point_coords, cur_part_features, pool_method='avg'
) # (N, out_x, out_y, out_z, 4)
pooled_rpn_features = self.roiaware_pool3d_layer.forward(
cur_roi, cur_point_coords, cur_rpn_features, pool_method='max'
) # (N, out_x, out_y, out_z, C)
pooled_part_features_list.append(pooled_part_features)
pooled_rpn_features_list.append(pooled_rpn_features)
pooled_part_features = torch.cat(pooled_part_features_list, dim=0) # (B * N, out_x, out_y, out_z, 4)
pooled_rpn_features = torch.cat(pooled_rpn_features_list, dim=0) # (B * N, out_x, out_y, out_z, C)
return pooled_part_features, pooled_rpn_features
@staticmethod
def fake_sparse_idx(sparse_idx, batch_size_rcnn):
print('Warning: Sparse_Idx_Shape(%s) \r' % (str(sparse_idx.shape)), end='', flush=True)
# at most one sample is non-empty, then fake the first voxels of each sample(BN needs at least
# two values each channel) as non-empty for the below calculation
sparse_idx = sparse_idx.new_zeros((batch_size_rcnn, 3))
bs_idxs = torch.arange(batch_size_rcnn).type_as(sparse_idx).view(-1, 1)
sparse_idx = torch.cat((bs_idxs, sparse_idx), dim=1)
return sparse_idx
def forward(self, batch_dict):
"""
Args:
batch_dict:
Returns:
"""
targets_dict = self.proposal_layer(
batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = self.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
# RoI aware pooling
pooled_part_features, pooled_rpn_features = self.roiaware_pool(batch_dict)
batch_size_rcnn = pooled_part_features.shape[0] # (B * N, out_x, out_y, out_z, 4)
# transform to sparse tensors
sparse_shape = np.array(pooled_part_features.shape[1:4], dtype=np.int32)
sparse_idx = pooled_part_features.sum(dim=-1).nonzero() # (non_empty_num, 4) ==> [bs_idx, x_idx, y_idx, z_idx]
if sparse_idx.shape[0] < 3:
sparse_idx = self.fake_sparse_idx(sparse_idx, batch_size_rcnn)
if self.training:
# these are invalid samples
targets_dict['rcnn_cls_labels'].fill_(-1)
targets_dict['reg_valid_mask'].fill_(-1)
part_features = pooled_part_features[sparse_idx[:, 0], sparse_idx[:, 1], sparse_idx[:, 2], sparse_idx[:, 3]]
rpn_features = pooled_rpn_features[sparse_idx[:, 0], sparse_idx[:, 1], sparse_idx[:, 2], sparse_idx[:, 3]]
coords = sparse_idx.int().contiguous()
part_features = spconv.SparseConvTensor(part_features, coords, sparse_shape, batch_size_rcnn)
rpn_features = spconv.SparseConvTensor(rpn_features, coords, sparse_shape, batch_size_rcnn)
# forward rcnn network
x_part = self.conv_part(part_features)
x_rpn = self.conv_rpn(rpn_features)
merged_feature = torch.cat((x_rpn.features, x_part.features), dim=1) # (N, C)
shared_feature = spconv.SparseConvTensor(merged_feature, coords, sparse_shape, batch_size_rcnn)
shared_feature = shared_feature.dense().view(batch_size_rcnn, -1, 1)
shared_feature = self.shared_fc_layer(shared_feature)
rcnn_cls = self.cls_layers(shared_feature).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layers(shared_feature).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
if not self.training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
)
batch_dict['batch_cls_preds'] = batch_cls_preds
batch_dict['batch_box_preds'] = batch_box_preds
batch_dict['cls_preds_normalized'] = False
else:
targets_dict['rcnn_cls'] = rcnn_cls
targets_dict['rcnn_reg'] = rcnn_reg
self.forward_ret_dict = targets_dict
return batch_dict
| 10,089
| 43.844444
| 120
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/roi_heads/__init__.py
|
from .roi_head_template import RoIHeadTemplate
from .partA2_head import PartA2FCHead
from .pointrcnn_head import PointRCNNHead
from .pvrcnn_head import PVRCNNHead
from .pvrcnn_head_MoE import PVRCNNHeadMoE
from .pvrcnn_head import ActivePVRCNNHead
from .second_head import SECONDHead
from .second_head import ActiveSECONDHead
from .voxelrcnn_head import VoxelRCNNHead
from .voxelrcnn_head import ActiveVoxelRCNNHead
from .voxelrcnn_head import VoxelRCNNHead_ABL
from .pvrcnn_head_semi import PVRCNNHeadSemi
__all__ = {
'RoIHeadTemplate': RoIHeadTemplate,
'PartA2FCHead': PartA2FCHead,
'PointRCNNHead': PointRCNNHead,
'PVRCNNHead': PVRCNNHead,
'PVRCNNHeadMoE': PVRCNNHeadMoE,
'ActivePVRCNNHead': ActivePVRCNNHead,
'SECONDHead': SECONDHead,
'ActiveSECONDHead': ActiveSECONDHead,
'VoxelRCNNHead': VoxelRCNNHead,
'ActiveVoxelRCNNHead': ActiveVoxelRCNNHead,
'VoxelRCNNHead_ABL': VoxelRCNNHead_ABL,
'PVRCNNHeadSemi':PVRCNNHeadSemi,
}
| 977
| 33.928571
| 47
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/roi_heads/pvrcnn_head_MoE.py
|
import torch.nn as nn
from ...ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules
from ...utils import common_utils
from .roi_head_template import RoIHeadTemplate
class PVRCNNHeadMoE(RoIHeadTemplate):
def __init__(self, input_channels, model_cfg, num_class=1, **kwargs):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
self.roi_grid_pool_layer, num_c_out = pointnet2_stack_modules.build_local_aggregation_module(
input_channels=input_channels, config=self.model_cfg.ROI_GRID_POOL
)
GRID_SIZE = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
pre_channel = GRID_SIZE * GRID_SIZE * GRID_SIZE * num_c_out
shared_fc_list = []
for k in range(0, self.model_cfg.SHARED_FC.__len__()):
shared_fc_list.extend([
nn.Conv1d(pre_channel, self.model_cfg.SHARED_FC[k], kernel_size=1, bias=False),
nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
nn.ReLU()
])
pre_channel = self.model_cfg.SHARED_FC[k]
if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.shared_fc_layer = nn.Sequential(*shared_fc_list)
## MoE Learning
self.moe_fc_gate_s1 = nn.Sequential(
nn.Conv1d(self.model_cfg.SHARED_FC[k], self.model_cfg.SHARED_FC[k], kernel_size=1, bias=False),
nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
nn.ReLU()
)
self.moe_fc_gate_s2 = nn.Sequential(
nn.Conv1d(self.model_cfg.SHARED_FC[k], self.model_cfg.SHARED_FC[k], kernel_size=1, bias=False),
nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
nn.ReLU()
)
self.cls_layers = self.make_fc_layers(
input_channels=pre_channel, output_channels=self.num_class, fc_list=self.model_cfg.CLS_FC
)
self.reg_layers = self.make_fc_layers(
input_channels=pre_channel,
output_channels=self.box_coder.code_size * self.num_class,
fc_list=self.model_cfg.REG_FC
)
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001)
def roi_grid_pool(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
point_coords: (num_points, 4) [bs_idx, x, y, z]
point_features: (num_points, C)
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
Returns:
"""
batch_size = batch_dict['batch_size']
rois = batch_dict['rois']
point_coords = batch_dict['point_coords']
point_features = batch_dict['point_features']
point_features = point_features * batch_dict['point_cls_scores'].view(-1, 1)
global_roi_grid_points, local_roi_grid_points = self.get_global_grid_points_of_roi(
rois, grid_size=self.model_cfg.ROI_GRID_POOL.GRID_SIZE
) # (BxN, 6x6x6, 3)
global_roi_grid_points = global_roi_grid_points.view(batch_size, -1, 3) # (B, Nx6x6x6, 3)
xyz = point_coords[:, 1:4]
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
batch_idx = point_coords[:, 0]
for k in range(batch_size):
xyz_batch_cnt[k] = (batch_idx == k).sum()
new_xyz = global_roi_grid_points.view(-1, 3)
new_xyz_batch_cnt = xyz.new_zeros(batch_size).int().fill_(global_roi_grid_points.shape[1])
pooled_points, pooled_features = self.roi_grid_pool_layer(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=point_features.contiguous(),
) # (M1 + M2 ..., C)
pooled_features = pooled_features.view(
-1, self.model_cfg.ROI_GRID_POOL.GRID_SIZE ** 3,
pooled_features.shape[-1]
) # (BxN, 6x6x6, C)
return pooled_features
def get_global_grid_points_of_roi(self, rois, grid_size):
rois = rois.view(-1, rois.shape[-1])
batch_size_rcnn = rois.shape[0]
local_roi_grid_points = self.get_dense_grid_points(rois, batch_size_rcnn, grid_size) # (B, 6x6x6, 3)
global_roi_grid_points = common_utils.rotate_points_along_z(
local_roi_grid_points.clone(), rois[:, 6]
).squeeze(dim=1)
global_center = rois[:, 0:3].clone()
global_roi_grid_points += global_center.unsqueeze(dim=1)
return global_roi_grid_points, local_roi_grid_points
@staticmethod
def get_dense_grid_points(rois, batch_size_rcnn, grid_size):
faked_features = rois.new_ones((grid_size, grid_size, grid_size))
dense_idx = faked_features.nonzero() # (N, 3) [x_idx, y_idx, z_idx]
dense_idx = dense_idx.repeat(batch_size_rcnn, 1, 1).float() # (B, 6x6x6, 3)
local_roi_size = rois.view(batch_size_rcnn, -1)[:, 3:6]
roi_grid_points = (dense_idx + 0.5) / grid_size * local_roi_size.unsqueeze(dim=1) \
- (local_roi_size.unsqueeze(dim=1) / 2) # (B, 6x6x6, 3)
return roi_grid_points
def forward(self, batch_dict):
"""
:param input_data: input dict
:return:
"""
targets_dict = self.proposal_layer(
batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = batch_dict.get('roi_targets_dict', None)
if targets_dict is None:
targets_dict = self.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
# RoI aware pooling
pooled_features = self.roi_grid_pool(batch_dict) # (BxN, 6x6x6, C)
grid_size = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
batch_size_rcnn = pooled_features.shape[0]
pooled_features = pooled_features.permute(0, 2, 1).\
contiguous().view(batch_size_rcnn, -1, grid_size, grid_size, grid_size) # (BxN, C, 6, 6, 6)
shared_features = self.shared_fc_layer(pooled_features.view(batch_size_rcnn, -1, 1))
# MoE to obtain the gate weight
if batch_dict['source_tag'] == 1:
gate_weights_s1 = self.moe_fc_gate_s1(shared_features)
gate_weights_s1 = gate_weights_s1 * shared_features
shared_features = shared_features + gate_weights_s1
elif batch_dict['source_tag'] == 2:
gate_weights_s2 = self.moe_fc_gate_s2(shared_features)
gate_weights_s2 = gate_weights_s2 * shared_features
shared_features = shared_features + gate_weights_s2
rcnn_cls = self.cls_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
if not self.training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
)
batch_dict['batch_cls_preds'] = batch_cls_preds
batch_dict['batch_box_preds'] = batch_box_preds
batch_dict['cls_preds_normalized'] = False
else:
targets_dict['rcnn_cls'] = rcnn_cls
targets_dict['rcnn_reg'] = rcnn_reg
self.forward_ret_dict = targets_dict
return batch_dict
| 8,480
| 42.050761
| 116
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/roi_heads/pvrcnn_head.py
|
import torch.nn as nn
import torch
import torch.nn.functional as F
from ...ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules
from ...utils import common_utils
from .roi_head_template import RoIHeadTemplate
class PVRCNNHead(RoIHeadTemplate):
def __init__(self, input_channels, model_cfg, num_class=1, **kwargs):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
self.roi_grid_pool_layer, num_c_out = pointnet2_stack_modules.build_local_aggregation_module(
input_channels=input_channels, config=self.model_cfg.ROI_GRID_POOL
)
GRID_SIZE = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
pre_channel = GRID_SIZE * GRID_SIZE * GRID_SIZE * num_c_out
shared_fc_list = []
for k in range(0, self.model_cfg.SHARED_FC.__len__()):
shared_fc_list.extend([
nn.Conv1d(pre_channel, self.model_cfg.SHARED_FC[k], kernel_size=1, bias=False),
nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
nn.ReLU()
])
pre_channel = self.model_cfg.SHARED_FC[k]
if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.shared_fc_layer = nn.Sequential(*shared_fc_list)
self.cls_layers = self.make_fc_layers(
input_channels=pre_channel, output_channels=self.num_class, fc_list=self.model_cfg.CLS_FC
)
self.reg_layers = self.make_fc_layers(
input_channels=pre_channel,
output_channels=self.box_coder.code_size * self.num_class,
fc_list=self.model_cfg.REG_FC
)
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001)
def roi_grid_pool(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
point_coords: (num_points, 4) [bs_idx, x, y, z]
point_features: (num_points, C)
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
Returns:
"""
batch_size = batch_dict['batch_size']
rois = batch_dict['rois']
point_coords = batch_dict['point_coords']
point_features = batch_dict['point_features']
point_features = point_features * batch_dict['point_cls_scores'].view(-1, 1)
global_roi_grid_points, local_roi_grid_points = self.get_global_grid_points_of_roi(
rois, grid_size=self.model_cfg.ROI_GRID_POOL.GRID_SIZE
) # (BxN, 6x6x6, 3)
global_roi_grid_points = global_roi_grid_points.view(batch_size, -1, 3) # (B, Nx6x6x6, 3)
xyz = point_coords[:, 1:4]
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
batch_idx = point_coords[:, 0]
for k in range(batch_size):
xyz_batch_cnt[k] = (batch_idx == k).sum()
new_xyz = global_roi_grid_points.view(-1, 3)
new_xyz_batch_cnt = xyz.new_zeros(batch_size).int().fill_(global_roi_grid_points.shape[1])
pooled_points, pooled_features = self.roi_grid_pool_layer(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=point_features.contiguous(),
) # (M1 + M2 ..., C)
pooled_features = pooled_features.view(
-1, self.model_cfg.ROI_GRID_POOL.GRID_SIZE ** 3,
pooled_features.shape[-1]
) # (BxN, 6x6x6, C)
return pooled_features
def get_global_grid_points_of_roi(self, rois, grid_size):
rois = rois.view(-1, rois.shape[-1])
batch_size_rcnn = rois.shape[0]
local_roi_grid_points = self.get_dense_grid_points(rois, batch_size_rcnn, grid_size) # (B, 6x6x6, 3)
global_roi_grid_points = common_utils.rotate_points_along_z(
local_roi_grid_points.clone(), rois[:, 6]
).squeeze(dim=1)
global_center = rois[:, 0:3].clone()
global_roi_grid_points += global_center.unsqueeze(dim=1)
return global_roi_grid_points, local_roi_grid_points
@staticmethod
def get_dense_grid_points(rois, batch_size_rcnn, grid_size):
faked_features = rois.new_ones((grid_size, grid_size, grid_size))
dense_idx = faked_features.nonzero() # (N, 3) [x_idx, y_idx, z_idx]
dense_idx = dense_idx.repeat(batch_size_rcnn, 1, 1).float() # (B, 6x6x6, 3)
local_roi_size = rois.view(batch_size_rcnn, -1)[:, 3:6]
roi_grid_points = (dense_idx + 0.5) / grid_size * local_roi_size.unsqueeze(dim=1) \
- (local_roi_size.unsqueeze(dim=1) / 2) # (B, 6x6x6, 3)
return roi_grid_points
def forward(self, batch_dict):
"""
:param input_data: input dict
:return:
"""
targets_dict = self.proposal_layer(
batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = batch_dict.get('roi_targets_dict', None)
if targets_dict is None:
targets_dict = self.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
# RoI aware pooling
pooled_features = self.roi_grid_pool(batch_dict) # (BxN, 6x6x6, C)
grid_size = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
batch_size_rcnn = pooled_features.shape[0]
pooled_features = pooled_features.permute(0, 2, 1).\
contiguous().view(batch_size_rcnn, -1, grid_size, grid_size, grid_size) # (BxN, C, 6, 6, 6)
shared_features = self.shared_fc_layer(pooled_features.view(batch_size_rcnn, -1, 1))
rcnn_cls = self.cls_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
if not self.training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
)
batch_dict['batch_cls_preds'] = batch_cls_preds
batch_dict['batch_box_preds'] = batch_box_preds
batch_dict['cls_preds_normalized'] = False
else:
targets_dict['rcnn_cls'] = rcnn_cls
targets_dict['rcnn_reg'] = rcnn_reg
self.forward_ret_dict = targets_dict
return batch_dict
class ActivePVRCNNHead(RoIHeadTemplate):
def __init__(self, input_channels, model_cfg, num_class=1, **kwargs):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
self.roi_grid_pool_layer, num_c_out = pointnet2_stack_modules.build_local_aggregation_module(
input_channels=input_channels, config=self.model_cfg.ROI_GRID_POOL
)
GRID_SIZE = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
pre_channel = GRID_SIZE * GRID_SIZE * GRID_SIZE * num_c_out
shared_fc_list = []
for k in range(0, self.model_cfg.SHARED_FC.__len__()):
shared_fc_list.extend([
nn.Conv1d(pre_channel, self.model_cfg.SHARED_FC[k], kernel_size=1, bias=False),
nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
nn.ReLU()
])
pre_channel = self.model_cfg.SHARED_FC[k]
if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.shared_fc_layer = nn.Sequential(*shared_fc_list)
self.cls_layers = self.make_fc_layers(
input_channels=pre_channel, output_channels=self.num_class, fc_list=self.model_cfg.CLS_FC
)
self.reg_layers = self.make_fc_layers(
input_channels=pre_channel,
output_channels=self.box_coder.code_size * self.num_class,
fc_list=self.model_cfg.REG_FC
)
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001)
def roi_grid_pool(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
point_coords: (num_points, 4) [bs_idx, x, y, z]
point_features: (num_points, C)
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
Returns:
"""
batch_size = batch_dict['batch_size']
rois = batch_dict['rois']
point_coords = batch_dict['point_coords']
point_features = batch_dict['point_features']
point_features = point_features * batch_dict['point_cls_scores'].view(-1, 1)
global_roi_grid_points, local_roi_grid_points = self.get_global_grid_points_of_roi(
rois, grid_size=self.model_cfg.ROI_GRID_POOL.GRID_SIZE
) # (BxN, 6x6x6, 3)
global_roi_grid_points = global_roi_grid_points.view(batch_size, -1, 3) # (B, Nx6x6x6, 3)
xyz = point_coords[:, 1:4]
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
batch_idx = point_coords[:, 0]
for k in range(batch_size):
xyz_batch_cnt[k] = (batch_idx == k).sum()
new_xyz = global_roi_grid_points.view(-1, 3)
new_xyz_batch_cnt = xyz.new_zeros(batch_size).int().fill_(global_roi_grid_points.shape[1])
pooled_points, pooled_features = self.roi_grid_pool_layer(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=point_features.contiguous(),
) # (M1 + M2 ..., C)
pooled_features = pooled_features.view(
-1, self.model_cfg.ROI_GRID_POOL.GRID_SIZE ** 3,
pooled_features.shape[-1]
) # (BxN, 6x6x6, C)
return pooled_features
def get_global_grid_points_of_roi(self, rois, grid_size):
rois = rois.view(-1, rois.shape[-1])
batch_size_rcnn = rois.shape[0]
local_roi_grid_points = self.get_dense_grid_points(rois, batch_size_rcnn, grid_size) # (B, 6x6x6, 3)
global_roi_grid_points = common_utils.rotate_points_along_z(
local_roi_grid_points.clone(), rois[:, 6]
).squeeze(dim=1)
global_center = rois[:, 0:3].clone()
global_roi_grid_points += global_center.unsqueeze(dim=1)
return global_roi_grid_points, local_roi_grid_points
@staticmethod
def get_dense_grid_points(rois, batch_size_rcnn, grid_size):
faked_features = rois.new_ones((grid_size, grid_size, grid_size))
dense_idx = faked_features.nonzero() # (N, 3) [x_idx, y_idx, z_idx]
dense_idx = dense_idx.repeat(batch_size_rcnn, 1, 1).float() # (B, 6x6x6, 3)
local_roi_size = rois.view(batch_size_rcnn, -1)[:, 3:6]
roi_grid_points = (dense_idx + 0.5) / grid_size * local_roi_size.unsqueeze(dim=1) \
- (local_roi_size.unsqueeze(dim=1) / 2) # (B, 6x6x6, 3)
return roi_grid_points
def forward(self, batch_dict):
"""
:param input_data: input dict
:return:
"""
targets_dict = self.proposal_layer(
batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = batch_dict.get('roi_targets_dict', None)
if targets_dict is None:
targets_dict = self.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
# RoI aware pooling
pooled_features = self.roi_grid_pool(batch_dict) # (BxN, 6x6x6, C)
grid_size = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
batch_size_rcnn = pooled_features.shape[0]
pooled_features = pooled_features.permute(0, 2, 1).\
contiguous().view(batch_size_rcnn, -1, grid_size, grid_size, grid_size) # (BxN, C, 6, 6, 6)
shared_features = self.shared_fc_layer(pooled_features.view(batch_size_rcnn, -1, 1))
if batch_dict['mode'] == 'active_evaluate':
batch_size = batch_dict['batch_size']
roi_num = batch_size_rcnn // batch_size
batch_dict['roi_shared_feature'] = shared_features.view(batch_size, roi_num, -1)
rcnn_cls = self.cls_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
if not self.training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
)
batch_dict['batch_cls_preds'] = batch_cls_preds
batch_dict['batch_box_preds'] = batch_box_preds
batch_dict['cls_preds_normalized'] = False
else:
targets_dict['rcnn_cls'] = rcnn_cls
targets_dict['rcnn_reg'] = rcnn_reg
self.forward_ret_dict = targets_dict
return batch_dict
| 15,017
| 41.543909
| 116
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/roi_heads/second_head.py
|
import torch
import torch.nn as nn
from .roi_head_template import RoIHeadTemplate
from ...utils import common_utils, loss_utils
class SECONDHead(RoIHeadTemplate):
def __init__(self, input_channels, model_cfg, num_class=1, **kwargs):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
GRID_SIZE = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
pre_channel = self.model_cfg.ROI_GRID_POOL.IN_CHANNEL * GRID_SIZE * GRID_SIZE
shared_fc_list = []
for k in range(0, self.model_cfg.SHARED_FC.__len__()):
shared_fc_list.extend([
nn.Conv1d(pre_channel, self.model_cfg.SHARED_FC[k], kernel_size=1, bias=False),
nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
nn.ReLU()
])
pre_channel = self.model_cfg.SHARED_FC[k]
if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.shared_fc_layer = nn.Sequential(*shared_fc_list)
self.iou_layers = self.make_fc_layers(
input_channels=pre_channel, output_channels=1, fc_list=self.model_cfg.IOU_FC
)
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def roi_grid_pool(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
spatial_features_2d: (B, C, H, W)
Returns:
"""
batch_size = batch_dict['batch_size']
rois = batch_dict['rois'].detach()
spatial_features_2d = batch_dict['spatial_features_2d'].detach()
height, width = spatial_features_2d.size(2), spatial_features_2d.size(3)
dataset_cfg = batch_dict['dataset_cfg']
min_x = dataset_cfg.POINT_CLOUD_RANGE[0]
min_y = dataset_cfg.POINT_CLOUD_RANGE[1]
voxel_size_x = dataset_cfg.DATA_PROCESSOR[-1].VOXEL_SIZE[0]
voxel_size_y = dataset_cfg.DATA_PROCESSOR[-1].VOXEL_SIZE[1]
down_sample_ratio = self.model_cfg.ROI_GRID_POOL.DOWNSAMPLE_RATIO
pooled_features_list = []
torch.backends.cudnn.enabled = False
for b_id in range(batch_size):
# Map global boxes coordinates to feature map coordinates
x1 = (rois[b_id, :, 0] - rois[b_id, :, 3] / 2 - min_x) / (voxel_size_x * down_sample_ratio)
x2 = (rois[b_id, :, 0] + rois[b_id, :, 3] / 2 - min_x) / (voxel_size_x * down_sample_ratio)
y1 = (rois[b_id, :, 1] - rois[b_id, :, 4] / 2 - min_y) / (voxel_size_y * down_sample_ratio)
y2 = (rois[b_id, :, 1] + rois[b_id, :, 4] / 2 - min_y) / (voxel_size_y * down_sample_ratio)
angle, _ = common_utils.check_numpy_to_torch(rois[b_id, :, 6])
cosa = torch.cos(angle)
sina = torch.sin(angle)
theta = torch.stack((
(x2 - x1) / (width - 1) * cosa, (x2 - x1) / (width - 1) * (-sina), (x1 + x2 - width + 1) / (width - 1),
(y2 - y1) / (height - 1) * sina, (y2 - y1) / (height - 1) * cosa, (y1 + y2 - height + 1) / (height - 1)
), dim=1).view(-1, 2, 3).float()
grid_size = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
grid = nn.functional.affine_grid(
theta,
torch.Size((rois.size(1), spatial_features_2d.size(1), grid_size, grid_size))
)
pooled_features = nn.functional.grid_sample(
spatial_features_2d[b_id].unsqueeze(0).expand(rois.size(1), spatial_features_2d.size(1), height, width),
grid
)
pooled_features_list.append(pooled_features)
torch.backends.cudnn.enabled = True
pooled_features = torch.cat(pooled_features_list, dim=0)
return pooled_features
def forward(self, batch_dict):
"""
:param input_data: input dict
:return:
"""
targets_dict = self.proposal_layer(
batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = self.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
# RoI aware pooling
pooled_features = self.roi_grid_pool(batch_dict) # (BxN, C, 7, 7)
batch_size_rcnn = pooled_features.shape[0]
shared_features = self.shared_fc_layer(pooled_features.view(batch_size_rcnn, -1, 1))
rcnn_iou = self.iou_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B*N, 1)
if not self.training:
batch_dict['batch_cls_preds'] = rcnn_iou.view(batch_dict['batch_size'], -1, rcnn_iou.shape[-1])
batch_dict['batch_box_preds'] = batch_dict['rois']
batch_dict['cls_preds_normalized'] = False
else:
targets_dict['rcnn_iou'] = rcnn_iou
self.forward_ret_dict = targets_dict
return batch_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
rcnn_loss = 0
rcnn_loss_cls, cls_tb_dict = self.get_box_iou_layer_loss(self.forward_ret_dict)
rcnn_loss += rcnn_loss_cls
tb_dict.update(cls_tb_dict)
tb_dict['rcnn_loss'] = rcnn_loss.item()
return rcnn_loss, tb_dict
def get_box_iou_layer_loss(self, forward_ret_dict):
loss_cfgs = self.model_cfg.LOSS_CONFIG
rcnn_iou = forward_ret_dict['rcnn_iou']
rcnn_iou_labels = forward_ret_dict['rcnn_cls_labels'].view(-1)
rcnn_iou_flat = rcnn_iou.view(-1)
if loss_cfgs.IOU_LOSS == 'BinaryCrossEntropy':
batch_loss_iou = nn.functional.binary_cross_entropy_with_logits(
rcnn_iou_flat,
rcnn_iou_labels.float(), reduction='none'
)
elif loss_cfgs.IOU_LOSS == 'L2':
batch_loss_iou = nn.functional.mse_loss(rcnn_iou_flat, rcnn_iou_labels, reduction='none')
elif loss_cfgs.IOU_LOSS == 'smoothL1':
diff = rcnn_iou_flat - rcnn_iou_labels
batch_loss_iou = loss_utils.WeightedSmoothL1Loss.smooth_l1_loss(diff, 1.0 / 9.0)
elif loss_cfgs.IOU_LOSS == 'focalbce':
batch_loss_iou = loss_utils.sigmoid_focal_cls_loss(rcnn_iou_flat, rcnn_iou_labels)
else:
raise NotImplementedError
iou_valid_mask = (rcnn_iou_labels >= 0).float()
rcnn_loss_iou = (batch_loss_iou * iou_valid_mask).sum() / torch.clamp(iou_valid_mask.sum(), min=1.0)
rcnn_loss_iou = rcnn_loss_iou * loss_cfgs.LOSS_WEIGHTS['rcnn_iou_weight']
tb_dict = {'rcnn_loss_iou': rcnn_loss_iou.item()}
return rcnn_loss_iou, tb_dict
class ActiveSECONDHead(RoIHeadTemplate):
def __init__(self, input_channels, model_cfg, num_class=1, **kwargs):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
GRID_SIZE = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
pre_channel = self.model_cfg.ROI_GRID_POOL.IN_CHANNEL * GRID_SIZE * GRID_SIZE
shared_fc_list = []
for k in range(0, self.model_cfg.SHARED_FC.__len__()):
shared_fc_list.extend([
nn.Conv1d(pre_channel, self.model_cfg.SHARED_FC[k], kernel_size=1, bias=False),
nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
nn.ReLU()
])
pre_channel = self.model_cfg.SHARED_FC[k]
if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.shared_fc_layer = nn.Sequential(*shared_fc_list)
self.iou_layers = self.make_fc_layers(
input_channels=pre_channel, output_channels=1, fc_list=self.model_cfg.IOU_FC
)
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def roi_grid_pool(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
spatial_features_2d: (B, C, H, W)
Returns:
"""
batch_size = batch_dict['batch_size']
rois = batch_dict['rois'].detach()
spatial_features_2d = batch_dict['spatial_features_2d'].detach()
height, width = spatial_features_2d.size(2), spatial_features_2d.size(3)
dataset_cfg = batch_dict['dataset_cfg']
min_x = dataset_cfg.POINT_CLOUD_RANGE[0]
min_y = dataset_cfg.POINT_CLOUD_RANGE[1]
voxel_size_x = dataset_cfg.DATA_PROCESSOR[-1].VOXEL_SIZE[0]
voxel_size_y = dataset_cfg.DATA_PROCESSOR[-1].VOXEL_SIZE[1]
down_sample_ratio = self.model_cfg.ROI_GRID_POOL.DOWNSAMPLE_RATIO
pooled_features_list = []
torch.backends.cudnn.enabled = False
for b_id in range(batch_size):
# Map global boxes coordinates to feature map coordinates
x1 = (rois[b_id, :, 0] - rois[b_id, :, 3] / 2 - min_x) / (voxel_size_x * down_sample_ratio)
x2 = (rois[b_id, :, 0] + rois[b_id, :, 3] / 2 - min_x) / (voxel_size_x * down_sample_ratio)
y1 = (rois[b_id, :, 1] - rois[b_id, :, 4] / 2 - min_y) / (voxel_size_y * down_sample_ratio)
y2 = (rois[b_id, :, 1] + rois[b_id, :, 4] / 2 - min_y) / (voxel_size_y * down_sample_ratio)
angle, _ = common_utils.check_numpy_to_torch(rois[b_id, :, 6])
cosa = torch.cos(angle)
sina = torch.sin(angle)
theta = torch.stack((
(x2 - x1) / (width - 1) * cosa, (x2 - x1) / (width - 1) * (-sina), (x1 + x2 - width + 1) / (width - 1),
(y2 - y1) / (height - 1) * sina, (y2 - y1) / (height - 1) * cosa, (y1 + y2 - height + 1) / (height - 1)
), dim=1).view(-1, 2, 3).float()
grid_size = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
grid = nn.functional.affine_grid(
theta,
torch.Size((rois.size(1), spatial_features_2d.size(1), grid_size, grid_size))
)
pooled_features = nn.functional.grid_sample(
spatial_features_2d[b_id].unsqueeze(0).expand(rois.size(1), spatial_features_2d.size(1), height, width),
grid
)
pooled_features_list.append(pooled_features)
torch.backends.cudnn.enabled = True
pooled_features = torch.cat(pooled_features_list, dim=0)
return pooled_features
def forward(self, batch_dict):
"""
:param input_data: input dict
:return:
"""
targets_dict = self.proposal_layer(
batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = self.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
# RoI aware pooling
pooled_features = self.roi_grid_pool(batch_dict) # (BxN, C, 7, 7)
batch_size_rcnn = pooled_features.shape[0]
shared_features = self.shared_fc_layer(pooled_features.view(batch_size_rcnn, -1, 1))
if batch_dict['mode'] == 'active_evaluate':
batch_size = batch_dict['batch_size']
roi_num = batch_size_rcnn // batch_size
batch_dict['roi_shared_feature'] = shared_features.view(batch_size, roi_num, -1)
rcnn_iou = self.iou_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B*N, 1)
if not self.training:
batch_dict['batch_cls_preds'] = rcnn_iou.view(batch_dict['batch_size'], -1, rcnn_iou.shape[-1])
batch_dict['batch_box_preds'] = batch_dict['rois']
batch_dict['cls_preds_normalized'] = False
else:
targets_dict['rcnn_iou'] = rcnn_iou
self.forward_ret_dict = targets_dict
return batch_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
rcnn_loss = 0
rcnn_loss_cls, cls_tb_dict = self.get_box_iou_layer_loss(self.forward_ret_dict)
rcnn_loss += rcnn_loss_cls
tb_dict.update(cls_tb_dict)
tb_dict['rcnn_loss'] = rcnn_loss.item()
return rcnn_loss, tb_dict
def get_box_iou_layer_loss(self, forward_ret_dict):
loss_cfgs = self.model_cfg.LOSS_CONFIG
rcnn_iou = forward_ret_dict['rcnn_iou']
rcnn_iou_labels = forward_ret_dict['rcnn_cls_labels'].view(-1)
rcnn_iou_flat = rcnn_iou.view(-1)
if loss_cfgs.IOU_LOSS == 'BinaryCrossEntropy':
batch_loss_iou = nn.functional.binary_cross_entropy_with_logits(
rcnn_iou_flat,
rcnn_iou_labels.float(), reduction='none'
)
elif loss_cfgs.IOU_LOSS == 'L2':
batch_loss_iou = nn.functional.mse_loss(rcnn_iou_flat, rcnn_iou_labels, reduction='none')
elif loss_cfgs.IOU_LOSS == 'smoothL1':
diff = rcnn_iou_flat - rcnn_iou_labels
batch_loss_iou = loss_utils.WeightedSmoothL1Loss.smooth_l1_loss(diff, 1.0 / 9.0)
elif loss_cfgs.IOU_LOSS == 'focalbce':
batch_loss_iou = loss_utils.sigmoid_focal_cls_loss(rcnn_iou_flat, rcnn_iou_labels)
else:
raise NotImplementedError
iou_valid_mask = (rcnn_iou_labels >= 0).float()
rcnn_loss_iou = (batch_loss_iou * iou_valid_mask).sum() / torch.clamp(iou_valid_mask.sum(), min=1.0)
rcnn_loss_iou = rcnn_loss_iou * loss_cfgs.LOSS_WEIGHTS['rcnn_iou_weight']
tb_dict = {'rcnn_loss_iou': rcnn_loss_iou.item()}
return rcnn_loss_iou, tb_dict
| 15,196
| 41.449721
| 120
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/roi_heads/pointrcnn_head.py
|
import torch
import torch.nn as nn
from ...ops.pointnet2.pointnet2_batch import pointnet2_modules
from ...ops.roipoint_pool3d import roipoint_pool3d_utils
from ...utils import common_utils
from .roi_head_template import RoIHeadTemplate
class PointRCNNHead(RoIHeadTemplate):
def __init__(self, input_channels, model_cfg, num_class=1, **kwargs):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
use_bn = self.model_cfg.USE_BN
self.SA_modules = nn.ModuleList()
channel_in = input_channels
self.num_prefix_channels = 3 + 2 # xyz + point_scores + point_depth
xyz_mlps = [self.num_prefix_channels] + self.model_cfg.XYZ_UP_LAYER
shared_mlps = []
for k in range(len(xyz_mlps) - 1):
shared_mlps.append(nn.Conv2d(xyz_mlps[k], xyz_mlps[k + 1], kernel_size=1, bias=not use_bn))
if use_bn:
shared_mlps.append(nn.BatchNorm2d(xyz_mlps[k + 1]))
shared_mlps.append(nn.ReLU())
self.xyz_up_layer = nn.Sequential(*shared_mlps)
c_out = self.model_cfg.XYZ_UP_LAYER[-1]
self.merge_down_layer = nn.Sequential(
nn.Conv2d(c_out * 2, c_out, kernel_size=1, bias=not use_bn),
*[nn.BatchNorm2d(c_out), nn.ReLU()] if use_bn else [nn.ReLU()]
)
for k in range(self.model_cfg.SA_CONFIG.NPOINTS.__len__()):
mlps = [channel_in] + self.model_cfg.SA_CONFIG.MLPS[k]
npoint = self.model_cfg.SA_CONFIG.NPOINTS[k] if self.model_cfg.SA_CONFIG.NPOINTS[k] != -1 else None
self.SA_modules.append(
pointnet2_modules.PointnetSAModule(
npoint=npoint,
radius=self.model_cfg.SA_CONFIG.RADIUS[k],
nsample=self.model_cfg.SA_CONFIG.NSAMPLE[k],
mlp=mlps,
use_xyz=True,
bn=use_bn
)
)
channel_in = mlps[-1]
self.cls_layers = self.make_fc_layers(
input_channels=channel_in, output_channels=self.num_class, fc_list=self.model_cfg.CLS_FC
)
self.reg_layers = self.make_fc_layers(
input_channels=channel_in,
output_channels=self.box_coder.code_size * self.num_class,
fc_list=self.model_cfg.REG_FC
)
self.roipoint_pool3d_layer = roipoint_pool3d_utils.RoIPointPool3d(
num_sampled_points=self.model_cfg.ROI_POINT_POOL.NUM_SAMPLED_POINTS,
pool_extra_width=self.model_cfg.ROI_POINT_POOL.POOL_EXTRA_WIDTH
)
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001)
def roipool3d_gpu(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
point_coords: (num_points, 4) [bs_idx, x, y, z]
point_features: (num_points, C)
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
Returns:
"""
batch_size = batch_dict['batch_size']
batch_idx = batch_dict['point_coords'][:, 0]
point_coords = batch_dict['point_coords'][:, 1:4]
point_features = batch_dict['point_features']
rois = batch_dict['rois'] # (B, num_rois, 7 + C)
batch_cnt = point_coords.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
batch_cnt[bs_idx] = (batch_idx == bs_idx).sum()
assert batch_cnt.min() == batch_cnt.max()
point_scores = batch_dict['point_cls_scores'].detach()
point_depths = point_coords.norm(dim=1) / self.model_cfg.ROI_POINT_POOL.DEPTH_NORMALIZER - 0.5
point_features_list = [point_scores[:, None], point_depths[:, None], point_features]
point_features_all = torch.cat(point_features_list, dim=1)
batch_points = point_coords.view(batch_size, -1, 3)
batch_point_features = point_features_all.view(batch_size, -1, point_features_all.shape[-1])
with torch.no_grad():
pooled_features, pooled_empty_flag = self.roipoint_pool3d_layer(
batch_points, batch_point_features, rois
) # pooled_features: (B, num_rois, num_sampled_points, 3 + C), pooled_empty_flag: (B, num_rois)
# canonical transformation
roi_center = rois[:, :, 0:3]
pooled_features[:, :, :, 0:3] -= roi_center.unsqueeze(dim=2)
pooled_features = pooled_features.view(-1, pooled_features.shape[-2], pooled_features.shape[-1])
pooled_features[:, :, 0:3] = common_utils.rotate_points_along_z(
pooled_features[:, :, 0:3], -rois.view(-1, rois.shape[-1])[:, 6]
)
pooled_features[pooled_empty_flag.view(-1) > 0] = 0
return pooled_features
def forward(self, batch_dict):
"""
Args:
batch_dict:
Returns:
"""
targets_dict = self.proposal_layer(
batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = self.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
pooled_features = self.roipool3d_gpu(batch_dict) # (total_rois, num_sampled_points, 3 + C)
xyz_input = pooled_features[..., 0:self.num_prefix_channels].transpose(1, 2).unsqueeze(dim=3).contiguous()
xyz_features = self.xyz_up_layer(xyz_input)
point_features = pooled_features[..., self.num_prefix_channels:].transpose(1, 2).unsqueeze(dim=3)
merged_features = torch.cat((xyz_features, point_features), dim=1)
merged_features = self.merge_down_layer(merged_features)
l_xyz, l_features = [pooled_features[..., 0:3].contiguous()], [merged_features.squeeze(dim=3).contiguous()]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
shared_features = l_features[-1] # (total_rois, num_features, 1)
rcnn_cls = self.cls_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
if not self.training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
)
batch_dict['batch_cls_preds'] = batch_cls_preds
batch_dict['batch_box_preds'] = batch_box_preds
batch_dict['cls_preds_normalized'] = False
else:
targets_dict['rcnn_cls'] = rcnn_cls
targets_dict['rcnn_reg'] = rcnn_reg
self.forward_ret_dict = targets_dict
return batch_dict
| 7,835
| 42.533333
| 116
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/roi_heads/target_assigner/proposal_target_layer.py
|
import numpy as np
import torch
import torch.nn as nn
from ....ops.iou3d_nms import iou3d_nms_utils
class ProposalTargetLayer(nn.Module):
def __init__(self, roi_sampler_cfg):
super().__init__()
self.roi_sampler_cfg = roi_sampler_cfg
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
roi_scores: (B, num_rois)
gt_boxes: (B, N, 7 + C + 1)
roi_labels: (B, num_rois)
Returns:
batch_dict:
rois: (B, M, 7 + C)
gt_of_rois: (B, M, 7 + C)
gt_iou_of_rois: (B, M)
roi_scores: (B, M)
roi_labels: (B, M)
reg_valid_mask: (B, M)
rcnn_cls_labels: (B, M)
"""
batch_rois, batch_gt_of_rois, batch_roi_ious, batch_roi_scores, batch_roi_labels = self.sample_rois_for_rcnn(
batch_dict=batch_dict
)
# regression valid mask
reg_valid_mask = (batch_roi_ious > self.roi_sampler_cfg.REG_FG_THRESH).long()
# classification label
if self.roi_sampler_cfg.CLS_SCORE_TYPE == 'cls':
batch_cls_labels = (batch_roi_ious > self.roi_sampler_cfg.CLS_FG_THRESH).long()
ignore_mask = (batch_roi_ious > self.roi_sampler_cfg.CLS_BG_THRESH) & \
(batch_roi_ious < self.roi_sampler_cfg.CLS_FG_THRESH)
batch_cls_labels[ignore_mask > 0] = -1
elif self.roi_sampler_cfg.CLS_SCORE_TYPE == 'roi_iou':
iou_bg_thresh = self.roi_sampler_cfg.CLS_BG_THRESH
iou_fg_thresh = self.roi_sampler_cfg.CLS_FG_THRESH
fg_mask = batch_roi_ious > iou_fg_thresh
bg_mask = batch_roi_ious < iou_bg_thresh
interval_mask = (fg_mask == 0) & (bg_mask == 0)
batch_cls_labels = (fg_mask > 0).float()
batch_cls_labels[interval_mask] = \
(batch_roi_ious[interval_mask] - iou_bg_thresh) / (iou_fg_thresh - iou_bg_thresh)
elif self.roi_sampler_cfg.CLS_SCORE_TYPE == 'raw_roi_iou':
batch_cls_labels = batch_roi_ious
else:
raise NotImplementedError
targets_dict = {'rois': batch_rois, 'gt_of_rois': batch_gt_of_rois, 'gt_iou_of_rois': batch_roi_ious,
'roi_scores': batch_roi_scores, 'roi_labels': batch_roi_labels,
'reg_valid_mask': reg_valid_mask,
'rcnn_cls_labels': batch_cls_labels}
return targets_dict
def sample_rois_for_rcnn(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
roi_scores: (B, num_rois)
gt_boxes: (B, N, 7 + C + 1)
roi_labels: (B, num_rois)
Returns:
"""
batch_size = batch_dict['batch_size']
rois = batch_dict['rois']
roi_scores = batch_dict['roi_scores']
roi_labels = batch_dict['roi_labels']
gt_boxes = batch_dict['gt_boxes']
code_size = rois.shape[-1]
batch_rois = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE, code_size)
batch_gt_of_rois = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE, code_size + 1)
batch_roi_ious = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE)
batch_roi_scores = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE)
batch_roi_labels = rois.new_zeros((batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE), dtype=torch.long)
for index in range(batch_size):
cur_roi, cur_gt, cur_roi_labels, cur_roi_scores = \
rois[index], gt_boxes[index], roi_labels[index], roi_scores[index]
k = cur_gt.__len__() - 1
while k >= 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
cur_gt = cur_gt.new_zeros((1, cur_gt.shape[1])) if len(cur_gt) == 0 else cur_gt
if self.roi_sampler_cfg.get('SAMPLE_ROI_BY_EACH_CLASS', False):
max_overlaps, gt_assignment = self.get_max_iou_with_same_class(
rois=cur_roi, roi_labels=cur_roi_labels,
gt_boxes=cur_gt[:, 0:7], gt_labels=cur_gt[:, -1].long()
)
else:
iou3d = iou3d_nms_utils.boxes_iou3d_gpu(cur_roi, cur_gt[:, 0:7]) # (M, N)
#iou3d = iou3d_nms_utils.boxes_iou3d_gpu(cur_roi[:, 0:7], cur_gt[:, 0:7]) #modified: cur_roi->cur_roi[:, 0:7]
max_overlaps, gt_assignment = torch.max(iou3d, dim=1)
sampled_inds = self.subsample_rois(max_overlaps=max_overlaps)
batch_rois[index] = cur_roi[sampled_inds]
batch_roi_labels[index] = cur_roi_labels[sampled_inds]
batch_roi_ious[index] = max_overlaps[sampled_inds]
batch_roi_scores[index] = cur_roi_scores[sampled_inds]
batch_gt_of_rois[index] = cur_gt[gt_assignment[sampled_inds]]
return batch_rois, batch_gt_of_rois, batch_roi_ious, batch_roi_scores, batch_roi_labels
def subsample_rois(self, max_overlaps):
# sample fg, easy_bg, hard_bg
fg_rois_per_image = int(np.round(self.roi_sampler_cfg.FG_RATIO * self.roi_sampler_cfg.ROI_PER_IMAGE))
fg_thresh = min(self.roi_sampler_cfg.REG_FG_THRESH, self.roi_sampler_cfg.CLS_FG_THRESH)
fg_inds = ((max_overlaps >= fg_thresh)).nonzero().view(-1)
easy_bg_inds = ((max_overlaps < self.roi_sampler_cfg.CLS_BG_THRESH_LO)).nonzero().view(-1)
hard_bg_inds = ((max_overlaps < self.roi_sampler_cfg.REG_FG_THRESH) &
(max_overlaps >= self.roi_sampler_cfg.CLS_BG_THRESH_LO)).nonzero().view(-1)
fg_num_rois = fg_inds.numel()
bg_num_rois = hard_bg_inds.numel() + easy_bg_inds.numel()
if fg_num_rois > 0 and bg_num_rois > 0:
# sampling fg
fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois)
rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).type_as(max_overlaps).long()
fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]]
# sampling bg
bg_rois_per_this_image = self.roi_sampler_cfg.ROI_PER_IMAGE - fg_rois_per_this_image
bg_inds = self.sample_bg_inds(
hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, self.roi_sampler_cfg.HARD_BG_RATIO
)
elif fg_num_rois > 0 and bg_num_rois == 0:
# sampling fg
rand_num = np.floor(np.random.rand(self.roi_sampler_cfg.ROI_PER_IMAGE) * fg_num_rois)
rand_num = torch.from_numpy(rand_num).type_as(max_overlaps).long()
fg_inds = fg_inds[rand_num]
bg_inds = fg_inds[fg_inds < 0] # yield empty tensor
elif bg_num_rois > 0 and fg_num_rois == 0:
# sampling bg
bg_rois_per_this_image = self.roi_sampler_cfg.ROI_PER_IMAGE
bg_inds = self.sample_bg_inds(
hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, self.roi_sampler_cfg.HARD_BG_RATIO
)
else:
print('maxoverlaps:(min=%f, max=%f)' % (max_overlaps.min().item(), max_overlaps.max().item()))
print('ERROR: FG=%d, BG=%d' % (fg_num_rois, bg_num_rois))
raise NotImplementedError
sampled_inds = torch.cat((fg_inds, bg_inds), dim=0)
return sampled_inds
@staticmethod
def sample_bg_inds(hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, hard_bg_ratio):
if hard_bg_inds.numel() > 0 and easy_bg_inds.numel() > 0:
hard_bg_rois_num = min(int(bg_rois_per_this_image * hard_bg_ratio), len(hard_bg_inds))
easy_bg_rois_num = bg_rois_per_this_image - hard_bg_rois_num
# sampling hard bg
rand_idx = torch.randint(low=0, high=hard_bg_inds.numel(), size=(hard_bg_rois_num,)).long()
hard_bg_inds = hard_bg_inds[rand_idx]
# sampling easy bg
rand_idx = torch.randint(low=0, high=easy_bg_inds.numel(), size=(easy_bg_rois_num,)).long()
easy_bg_inds = easy_bg_inds[rand_idx]
bg_inds = torch.cat([hard_bg_inds, easy_bg_inds], dim=0)
elif hard_bg_inds.numel() > 0 and easy_bg_inds.numel() == 0:
hard_bg_rois_num = bg_rois_per_this_image
# sampling hard bg
rand_idx = torch.randint(low=0, high=hard_bg_inds.numel(), size=(hard_bg_rois_num,)).long()
bg_inds = hard_bg_inds[rand_idx]
elif hard_bg_inds.numel() == 0 and easy_bg_inds.numel() > 0:
easy_bg_rois_num = bg_rois_per_this_image
# sampling easy bg
rand_idx = torch.randint(low=0, high=easy_bg_inds.numel(), size=(easy_bg_rois_num,)).long()
bg_inds = easy_bg_inds[rand_idx]
else:
raise NotImplementedError
return bg_inds
@staticmethod
def get_max_iou_with_same_class(rois, roi_labels, gt_boxes, gt_labels):
"""
Args:
rois: (N, 7)
roi_labels: (N)
gt_boxes: (N, )
gt_labels:
Returns:
"""
"""
:param rois: (N, 7)
:param roi_labels: (N)
:param gt_boxes: (N, 8)
:return:
"""
max_overlaps = rois.new_zeros(rois.shape[0])
gt_assignment = roi_labels.new_zeros(roi_labels.shape[0])
for k in range(gt_labels.min().item(), gt_labels.max().item() + 1):
roi_mask = (roi_labels == k)
gt_mask = (gt_labels == k)
if roi_mask.sum() > 0 and gt_mask.sum() > 0:
cur_roi = rois[roi_mask]
cur_gt = gt_boxes[gt_mask]
original_gt_assignment = gt_mask.nonzero().view(-1)
iou3d = iou3d_nms_utils.boxes_iou3d_gpu(cur_roi, cur_gt) # (M, N)
#iou3d = iou3d_nms_utils.boxes_iou3d_gpu(cur_roi[:, 0:7], cur_gt) #modified: cur_roi->cur_roi[:, 0:7]
cur_max_overlaps, cur_gt_assignment = torch.max(iou3d, dim=1)
max_overlaps[roi_mask] = cur_max_overlaps
gt_assignment[roi_mask] = original_gt_assignment[cur_gt_assignment]
return max_overlaps, gt_assignment
| 10,343
| 43.779221
| 126
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/roi_heads/target_assigner/__init__.py
| 0
| 0
| 0
|
py
|
|
3DTrans
|
3DTrans-master/pcdet/models/model_utils/centernet_utils.py
|
# This file is modified from https://github.com/tianweiy/CenterPoint
import torch
import torch.nn.functional as F
import numpy as np
import numba
def gaussian_radius(height, width, min_overlap=0.5):
"""
Args:
height: (N)
width: (N)
min_overlap:
Returns:
"""
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = (b1 ** 2 - 4 * a1 * c1).sqrt()
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = (b2 ** 2 - 4 * a2 * c2).sqrt()
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = (b3 ** 2 - 4 * a3 * c3).sqrt()
r3 = (b3 + sq3) / 2
ret = torch.min(torch.min(r1, r2), r3)
return ret
def gaussian2D(shape, sigma=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m + 1, -n:n + 1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_gaussian(heatmap, center, radius, k=1):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def draw_gaussian_to_heatmap(heatmap, center, radius, k=1, valid_mask=None):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = torch.from_numpy(
gaussian[radius - top:radius + bottom, radius - left:radius + right]
).to(heatmap.device).float()
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
if valid_mask is not None:
cur_valid_mask = valid_mask[y - top:y + bottom, x - left:x + right]
masked_gaussian = masked_gaussian * cur_valid_mask.float()
torch.max(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def _nms(heat, kernel=3):
pad = (kernel - 1) // 2
hmax = F.max_pool2d(heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == heat).float()
return heat * keep
@numba.jit(nopython=True)
def circle_nms(dets, thresh):
x1 = dets[:, 0]
y1 = dets[:, 1]
scores = dets[:, 2]
order = scores.argsort()[::-1].astype(np.int32) # highest->lowest
ndets = dets.shape[0]
suppressed = np.zeros((ndets), dtype=np.int32)
keep = []
for _i in range(ndets):
i = order[_i] # start with highest score box
if suppressed[i] == 1: # if any box have enough iou with this, remove it
continue
keep.append(i)
for _j in range(_i + 1, ndets):
j = order[_j]
if suppressed[j] == 1:
continue
# calculate center distance between i and j box
dist = (x1[i] - x1[j]) ** 2 + (y1[i] - y1[j]) ** 2
# ovr = inter / areas[j]
if dist <= thresh:
suppressed[j] = 1
return keep
def _circle_nms(boxes, min_radius, post_max_size=83):
"""
NMS according to center distance
"""
keep = np.array(circle_nms(boxes.cpu().numpy(), thresh=min_radius))[:post_max_size]
keep = torch.from_numpy(keep).long().to(boxes.device)
return keep
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = _gather_feat(feat, ind)
return feat
def _topk(scores, K=40):
batch, num_class, height, width = scores.size()
topk_scores, topk_inds = torch.topk(scores.flatten(2, 3), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds // width).float()
topk_xs = (topk_inds % width).int().float()
topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), K)
topk_classes = (topk_ind // K).int()
topk_inds = _gather_feat(topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)
topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)
topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)
return topk_score, topk_inds, topk_classes, topk_ys, topk_xs
def decode_bbox_from_heatmap(heatmap, rot_cos, rot_sin, center, center_z, dim,
point_cloud_range=None, voxel_size=None, feature_map_stride=None, vel=None, K=100,
circle_nms=False, score_thresh=None, post_center_limit_range=None):
batch_size, num_class, _, _ = heatmap.size()
if circle_nms:
# TODO: not checked yet
assert False, 'not checked yet'
heatmap = _nms(heatmap)
scores, inds, class_ids, ys, xs = _topk(heatmap, K=K)
center = _transpose_and_gather_feat(center, inds).view(batch_size, K, 2)
rot_sin = _transpose_and_gather_feat(rot_sin, inds).view(batch_size, K, 1)
rot_cos = _transpose_and_gather_feat(rot_cos, inds).view(batch_size, K, 1)
center_z = _transpose_and_gather_feat(center_z, inds).view(batch_size, K, 1)
dim = _transpose_and_gather_feat(dim, inds).view(batch_size, K, 3)
angle = torch.atan2(rot_sin, rot_cos)
xs = xs.view(batch_size, K, 1) + center[:, :, 0:1]
ys = ys.view(batch_size, K, 1) + center[:, :, 1:2]
xs = xs * feature_map_stride * voxel_size[0] + point_cloud_range[0]
ys = ys * feature_map_stride * voxel_size[1] + point_cloud_range[1]
box_part_list = [xs, ys, center_z, dim, angle]
if vel is not None:
vel = _transpose_and_gather_feat(vel, inds).view(batch_size, K, 2)
box_part_list.append(vel)
final_box_preds = torch.cat((box_part_list), dim=-1)
final_scores = scores.view(batch_size, K)
final_class_ids = class_ids.view(batch_size, K)
assert post_center_limit_range is not None
mask = (final_box_preds[..., :3] >= post_center_limit_range[:3]).all(2)
mask &= (final_box_preds[..., :3] <= post_center_limit_range[3:]).all(2)
if score_thresh is not None:
mask &= (final_scores > score_thresh)
ret_pred_dicts = []
for k in range(batch_size):
cur_mask = mask[k]
cur_boxes = final_box_preds[k, cur_mask]
cur_scores = final_scores[k, cur_mask]
cur_labels = final_class_ids[k, cur_mask]
if circle_nms:
assert False, 'not checked yet'
centers = cur_boxes[:, [0, 1]]
boxes = torch.cat((centers, scores.view(-1, 1)), dim=1)
keep = _circle_nms(boxes, min_radius=min_radius, post_max_size=nms_post_max_size)
cur_boxes = cur_boxes[keep]
cur_scores = cur_scores[keep]
cur_labels = cur_labels[keep]
ret_pred_dicts.append({
'pred_boxes': cur_boxes,
'pred_scores': cur_scores,
'pred_labels': cur_labels
})
return ret_pred_dicts
| 7,932
| 33.04721
| 111
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/model_utils/model_nms_utils.py
|
import torch
from ...ops.iou3d_nms import iou3d_nms_utils
def class_agnostic_nms(box_scores, box_preds, nms_config, score_thresh=None):
src_box_scores = box_scores
if score_thresh is not None:
scores_mask = (box_scores >= score_thresh)
box_scores = box_scores[scores_mask]
box_preds = box_preds[scores_mask]
selected = []
if box_scores.shape[0] > 0:
box_scores_nms, indices = torch.topk(box_scores, k=min(nms_config.NMS_PRE_MAXSIZE, box_scores.shape[0]))
boxes_for_nms = box_preds[indices]
keep_idx, selected_scores = getattr(iou3d_nms_utils, nms_config.NMS_TYPE)(
boxes_for_nms[:, 0:7], box_scores_nms, nms_config.NMS_THRESH, **nms_config
)
selected = indices[keep_idx[:nms_config.NMS_POST_MAXSIZE]]
if score_thresh is not None:
original_idxs = scores_mask.nonzero().view(-1)
selected = original_idxs[selected]
return selected, src_box_scores[selected]
def class_agnostic_nms_with_roi(box_scores, box_preds, roi_feature, nms_config, score_thresh=None):
src_box_scores = box_scores
if score_thresh is not None:
scores_mask = (box_scores >= score_thresh)
box_scores = box_scores[scores_mask]
box_preds = box_preds[scores_mask]
selected = []
if box_scores.shape[0] > 0:
box_scores_nms, indices = torch.topk(box_scores, k=min(nms_config.NMS_PRE_MAXSIZE, box_scores.shape[0]))
boxes_for_nms = box_preds[indices]
keep_idx, selected_scores = getattr(iou3d_nms_utils, nms_config.NMS_TYPE)(
boxes_for_nms[:, 0:7], box_scores_nms, nms_config.NMS_THRESH, **nms_config
)
selected = indices[keep_idx[:nms_config.NMS_POST_MAXSIZE]]
if score_thresh is not None:
original_idxs = scores_mask.nonzero().view(-1)
selected = original_idxs[selected]
return selected, src_box_scores[selected], roi_feature[selected]
def multi_classes_nms(cls_scores, box_preds, nms_config, score_thresh=None):
"""
Args:
cls_scores: (N, num_class)
box_preds: (N, 7 + C)
nms_config:
score_thresh:
Returns:
"""
pred_scores, pred_labels, pred_boxes = [], [], []
for k in range(cls_scores.shape[1]):
if score_thresh is not None:
scores_mask = (cls_scores[:, k] >= score_thresh)
box_scores = cls_scores[scores_mask, k]
cur_box_preds = box_preds[scores_mask]
else:
box_scores = cls_scores[:, k]
cur_box_preds = box_preds
selected = []
if box_scores.shape[0] > 0:
box_scores_nms, indices = torch.topk(box_scores, k=min(nms_config.NMS_PRE_MAXSIZE, box_scores.shape[0]))
boxes_for_nms = cur_box_preds[indices]
keep_idx, selected_scores = getattr(iou3d_nms_utils, nms_config.NMS_TYPE)(
boxes_for_nms[:, 0:7], box_scores_nms, nms_config.NMS_THRESH, **nms_config
)
selected = indices[keep_idx[:nms_config.NMS_POST_MAXSIZE]]
pred_scores.append(box_scores[selected])
pred_labels.append(box_scores.new_ones(len(selected)).long() * k)
pred_boxes.append(cur_box_preds[selected])
pred_scores = torch.cat(pred_scores, dim=0)
pred_labels = torch.cat(pred_labels, dim=0)
pred_boxes = torch.cat(pred_boxes, dim=0)
return pred_scores, pred_labels, pred_boxes
| 3,422
| 37.460674
| 116
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/model_utils/__init__.py
| 0
| 0
| 0
|
py
|
|
3DTrans
|
3DTrans-master/pcdet/models/model_utils/basic_block_2d.py
|
import torch.nn as nn
class BasicBlock2D(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
"""
Initializes convolutional block
Args:
in_channels: int, Number of input channels
out_channels: int, Number of output channels
**kwargs: Dict, Extra arguments for nn.Conv2d
"""
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.conv = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
**kwargs)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, features):
"""
Applies convolutional block
Args:
features: (B, C_in, H, W), Input features
Returns:
x: (B, C_out, H, W), Output features
"""
x = self.conv(features)
x = self.bn(x)
x = self.relu(x)
return x
| 1,038
| 28.685714
| 60
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/model_utils/ensemble.py
|
"""
This file is to match with previous version of CenterPoint model
"""
import torch
import numpy as np
from .wbf_3d import weighted_boxes_fusion_3d
def wbf_online(boxes, scores, labels):
device = boxes.device
dtype = boxes.dtype
boxes_list = boxes.cpu().numpy()
scores_list = scores.cpu().numpy()
labels_list = labels.cpu().numpy()
iou_thresh = [0.8, 0.6, 0.7]
skip_box_thr = [0.1, 0.01, 0.01]
boxes, scores, labels = weighted_boxes_fusion_3d(
boxes_list=boxes_list,
scores_list=scores_list,
labels_list=labels_list,
weights=None,
iou_thr=iou_thresh,
skip_box_thr=skip_box_thr,
conf_type='avg',
iou_type='3d',
allows_overflow=False
)
boxes = torch.from_numpy(boxes).to(device)
scores = torch.from_numpy(scores).to(device)
labels = torch.from_numpy(labels).to(device)
return boxes, scores, labels
def wbf_offline(boxes, scores, labels):
raise NotImplementedError
| 1,000
| 24.025
| 64
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/model_utils/wbf_3d.py
|
"""
This file is to match with previous version of CenterPoint model
"""
import copy
import numpy as np
import torch
from ...ops.iou3d_nms import iou3d_nms_utils
def prefilter_boxes(boxes, scores, labels, weights, thresh):
# Create dict with boxes stored by its label
new_boxes = dict()
for i in range(len(boxes)):
if len(boxes[i]) != len(scores[i]):
raise ValueError("Length of boxes not equal to length of scores.")
if len(boxes[i]) != len(labels[i]):
raise ValueError("Length of boxes not equal to length of labels.")
for j in range(len(boxes[i])):
score = scores[i][j][0]
# if score < thresh:
# continue
label = int(labels[i][j][0])
if label == 0:
continue
# import pdb; pdb.set_trace()
box = boxes[i][j]
x = float(box[0])
y = float(box[1])
z = float(box[2])
dx = float(box[3])
dy = float(box[4])
dz = float(box[5])
yaw = float(box[6])
new_box = [int(label), float(score) * weights[i], x, y, z, dx, dy, dz, yaw]
if label not in new_boxes:
new_boxes[label] = []
new_boxes[label].append(new_box)
# Sort each list in dict by score and transform it to numpy array
for k in new_boxes:
current_boxes = np.array(new_boxes[k])
new_boxes[k] = current_boxes[current_boxes[:, 1].argsort()[::-1]]
current_boxes = np.array(new_boxes[k])
new_boxes[k] = current_boxes[current_boxes[:, 1] >= thresh[k - 1]]
return new_boxes
def get_weighted_box(boxes, conf_type='avg'):
"""
Create weighted box for set of boxes
Param:
boxes: set of boxes to fuse
conf_type: type of confidence, one of 'avg' or 'max'
Return:
weighted box
"""
weighted_box = np.zeros(9, dtype=np.float32)
conf = 0
conf_list = []
for box in boxes:
weighted_box[2:] += (box[1] * box[2:])
conf += box[1]
conf_list.append(box[1])
# assign label
weighted_box[0] = boxes[0][0]
# assign new score
if conf_type == 'avg':
weighted_box[1] = conf / len(boxes)
elif conf_type == 'max':
weighted_box[1] = np.array(conf_list).max()
weighted_box[2:] /= conf
weighted_box[-1] = boxes[conf_list.index(max(conf_list))][-1]
return weighted_box
def find_matching_box(boxes_list, new_box, iou_thresh, iou_type):
if len(boxes_list) == 0:
return -1, iou_thresh
boxes_list = np.array(boxes_list)
boxes_gpu = copy.deepcopy(torch.from_numpy(boxes_list[:, 2:]).float().cuda())
new_box = torch.from_numpy(new_box[2:]).unsqueeze(0).float().cuda()
if iou_type == '3d':
ious = iou3d_nms_utils.boxes_iou3d_gpu(new_box, boxes_gpu)
elif iou_type == 'bev':
ious = iou3d_nms_utils.boxes_iou_bev(new_box, boxes_gpu)
best_idx = ious.argmax().item()
best_iou = ious[0][best_idx].item()
if best_iou <= iou_thresh:
best_iou = iou_thresh
best_idx = -1
return best_idx, best_iou
def weighted_boxes_fusion_3d(boxes_list, scores_list, labels_list,
weights=None, iou_thr=None, skip_box_thr=None,
conf_type='avg', iou_type='3d',
allows_overflow=False):
'''
Param:
boxes_list: list of boxes predictions from each model, each box is 7-dim
It has 3 dimensions (models_number, model_preds, 6)
Order of boxes: x,y,z,dx,dy,dz,yaw. We expect float normalized coordinates [0; 1]
scores_list: list of scores of each box from each model
labels_list: list of labels of each box from each model
weights: list of weights for each model.
Default: None, which means weight == 1 for each model
iou_thr: IoU threshold for boxes to be a match
skip_box_thr: exclude boxes with score lower than this threshold
conf_type: confidence calculation type
'avg': average value, 'max': maximum value
allows_overflow: false if we want confidence score not exceed 1.0
Return:
boxes: new boxes coordinates (Order of boxes: x1, y1, z1, x2, y2, z2).
scores: new confidence scores
labels: boxes labels
'''
if weights is None:
weights = np.ones(len(boxes_list))
if len(weights) != len(boxes_list):
print('Warning: incorrect number of weights {}. Must be: {}. Set weights equal to 1.'.format(len(weights),
len(boxes_list)))
weights = np.ones(len(boxes_list))
weights = np.array(weights)
if conf_type not in ['avg', 'max']:
print('Error. Unknown conf_type: {}. Must be "avg" or "max". Use "avg"'.format(conf_type))
conf_type = 'avg'
filtered_boxes = prefilter_boxes(boxes_list, scores_list, labels_list, weights, skip_box_thr)
if len(filtered_boxes) == 0:
return np.zeros((0, 7)), np.zeros((0,)), np.zeros((0,))
overall_boxes = []
for label in filtered_boxes:
boxes = filtered_boxes[label]
new_boxes = []
weighted_boxes = []
# clusterize boxes
for j in range(0, len(boxes)):
index, best_iou = find_matching_box(weighted_boxes, boxes[j], iou_thr[label - 1], iou_type)
if index != -1:
new_boxes[index].append(boxes[j])
weighted_boxes[index] = get_weighted_box(new_boxes[index], conf_type)
else:
new_boxes.append([boxes[j].copy()])
weighted_boxes.append(boxes[j].copy())
# rescale confidence based on number of models and boxes
for i in range(len(new_boxes)):
if not allows_overflow:
weighted_boxes[i][1] = weighted_boxes[i][1] * min(weights.sum(), len(new_boxes[i])) / weights.sum()
else:
weighted_boxes[i][1] = weighted_boxes[i][1] * len(new_boxes[i]) / weights.sum()
if len(weighted_boxes) != 0:
overall_boxes.append(np.array(weighted_boxes))
if len(overall_boxes) == 0:
return np.zeros((0, 7)), np.zeros((0,)), np.zeros((0,))
overall_boxes = np.concatenate(overall_boxes, axis=0)
overall_boxes = overall_boxes[overall_boxes[:, 1].argsort()[::-1]]
boxes = overall_boxes[:, 2:]
scores = overall_boxes[:, 1]
labels = overall_boxes[:, 0].astype(int)
return boxes, scores, labels
| 6,649
| 35.141304
| 118
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_2d/base_bev_backbone.py
|
import numpy as np
import torch
import torch.nn as nn
# from ...utils import uni3d_norm
from ...utils import uni3d_norm as uni3d_norm_used
# from ...utils import uni3d_norm_parallel as uni3d_norm_used
class BaseBEVBackbone(nn.Module):
def __init__(self, model_cfg, input_channels):
super().__init__()
self.model_cfg = model_cfg
if self.model_cfg.get('DUAL_NORM', None):
self.db_source = int(self.model_cfg.db_source)
if self.model_cfg.get('LAYER_NUMS', None) is not None:
assert len(self.model_cfg.LAYER_NUMS) == len(self.model_cfg.LAYER_STRIDES) == len(self.model_cfg.NUM_FILTERS)
layer_nums = self.model_cfg.LAYER_NUMS
layer_strides = self.model_cfg.LAYER_STRIDES
num_filters = self.model_cfg.NUM_FILTERS
else:
layer_nums = layer_strides = num_filters = []
if self.model_cfg.get('UPSAMPLE_STRIDES', None) is not None:
assert len(self.model_cfg.UPSAMPLE_STRIDES) == len(self.model_cfg.NUM_UPSAMPLE_FILTERS)
num_upsample_filters = self.model_cfg.NUM_UPSAMPLE_FILTERS
upsample_strides = self.model_cfg.UPSAMPLE_STRIDES
else:
upsample_strides = num_upsample_filters = []
num_levels = len(layer_nums)
c_in_list = [input_channels, *num_filters[:-1]]
self.blocks = nn.ModuleList()
self.deblocks = nn.ModuleList()
# using the Dual-Norm:
if self.model_cfg.get('DUAL_NORM', None):
for idx in range(num_levels):
cur_layers = [
nn.ZeroPad2d(1),
nn.Conv2d(
c_in_list[idx], num_filters[idx], kernel_size=3,
stride=layer_strides[idx], padding=0, bias=False
),
uni3d_norm_used.UniNorm2d(num_filters[idx], dataset_from_flag=self.db_source, eps=1e-3, momentum=0.01), # using the dataset-specific norm
nn.ReLU()
]
for k in range(layer_nums[idx]):
cur_layers.extend([
nn.Conv2d(num_filters[idx], num_filters[idx], kernel_size=3, padding=1, bias=False),
uni3d_norm_used.UniNorm2d(num_filters[idx], dataset_from_flag=self.db_source, eps=1e-3, momentum=0.01),
nn.ReLU()
])
self.blocks.append(nn.Sequential(*cur_layers))
if len(upsample_strides) > 0:
stride = upsample_strides[idx]
if stride >= 1:
self.deblocks.append(nn.Sequential(
nn.ConvTranspose2d(
num_filters[idx], num_upsample_filters[idx],
upsample_strides[idx],
stride=upsample_strides[idx], bias=False
),
uni3d_norm_used.UniNorm2d(num_upsample_filters[idx], dataset_from_flag=self.db_source, eps=1e-3, momentum=0.01),
nn.ReLU()
))
else:
stride = np.round(1 / stride).astype(np.int)
self.deblocks.append(nn.Sequential(
nn.Conv2d(
num_filters[idx], num_upsample_filters[idx],
stride,
stride=stride, bias=False
),
uni3d_norm_used.UniNorm2d(num_upsample_filters[idx], dataset_from_flag=self.db_source, eps=1e-3, momentum=0.01),
nn.ReLU()
))
c_in = sum(num_upsample_filters)
if len(upsample_strides) > num_levels:
self.deblocks.append(nn.Sequential(
nn.ConvTranspose2d(c_in, c_in, upsample_strides[-1], stride=upsample_strides[-1], bias=False),
uni3d_norm_used.UniNorm2d(c_in, dataset_from_flag=self.db_source, eps=1e-3, momentum=0.01),
nn.ReLU(),
))
self.num_bev_features = c_in
else:
for idx in range(num_levels):
cur_layers = [
nn.ZeroPad2d(1),
nn.Conv2d(
c_in_list[idx], num_filters[idx], kernel_size=3,
stride=layer_strides[idx], padding=0, bias=False
),
nn.BatchNorm2d(num_filters[idx], eps=1e-3, momentum=0.01),
nn.ReLU()
]
for k in range(layer_nums[idx]):
cur_layers.extend([
nn.Conv2d(num_filters[idx], num_filters[idx], kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_filters[idx], eps=1e-3, momentum=0.01),
nn.ReLU()
])
self.blocks.append(nn.Sequential(*cur_layers))
if len(upsample_strides) > 0:
stride = upsample_strides[idx]
if stride >= 1:
self.deblocks.append(nn.Sequential(
nn.ConvTranspose2d(
num_filters[idx], num_upsample_filters[idx],
upsample_strides[idx],
stride=upsample_strides[idx], bias=False
),
nn.BatchNorm2d(num_upsample_filters[idx], eps=1e-3, momentum=0.01),
nn.ReLU()
))
else:
stride = np.round(1 / stride).astype(np.int)
self.deblocks.append(nn.Sequential(
nn.Conv2d(
num_filters[idx], num_upsample_filters[idx],
stride,
stride=stride, bias=False
),
nn.BatchNorm2d(num_upsample_filters[idx], eps=1e-3, momentum=0.01),
nn.ReLU()
))
c_in = sum(num_upsample_filters)
if len(upsample_strides) > num_levels:
self.deblocks.append(nn.Sequential(
nn.ConvTranspose2d(c_in, c_in, upsample_strides[-1], stride=upsample_strides[-1], bias=False),
nn.BatchNorm2d(c_in, eps=1e-3, momentum=0.01),
nn.ReLU(),
))
self.num_bev_features = c_in
def forward(self, data_dict):
"""
Args:
data_dict:
spatial_features
Returns:
"""
spatial_features = data_dict['spatial_features']
ups = []
ret_dict = {}
x = spatial_features
for i in range(len(self.blocks)):
x = self.blocks[i](x)
stride = int(spatial_features.shape[2] / x.shape[2])
ret_dict['spatial_features_%dx' % stride] = x
if len(self.deblocks) > 0:
ups.append(self.deblocks[i](x))
else:
ups.append(x)
if len(ups) > 1:
x = torch.cat(ups, dim=1)
elif len(ups) == 1:
x = ups[0]
if len(self.deblocks) > len(self.blocks):
x = self.deblocks[-1](x)
data_dict['spatial_features_2d'] = x
return data_dict
| 7,589
| 43.127907
| 157
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_2d/__init__.py
|
from .base_bev_backbone import BaseBEVBackbone
__all__ = {
'BaseBEVBackbone': BaseBEVBackbone
}
| 101
| 16
| 46
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_2d/map_to_bev/conv2d_collapse.py
|
import torch
import torch.nn as nn
from pcdet.models.model_utils.basic_block_2d import BasicBlock2D
class Conv2DCollapse(nn.Module):
def __init__(self, model_cfg, grid_size):
"""
Initializes 2D convolution collapse module
Args:
model_cfg: EasyDict, Model configuration
grid_size: (X, Y, Z) Voxel grid size
"""
super().__init__()
self.model_cfg = model_cfg
self.num_heights = grid_size[-1]
self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES
self.block = BasicBlock2D(in_channels=self.num_bev_features * self.num_heights,
out_channels=self.num_bev_features,
**self.model_cfg.ARGS)
def forward(self, batch_dict):
"""
Collapses voxel features to BEV via concatenation and channel reduction
Args:
batch_dict:
voxel_features: (B, C, Z, Y, X), Voxel feature representation
Returns:
batch_dict:
spatial_features: (B, C, Y, X), BEV feature representation
"""
voxel_features = batch_dict["voxel_features"]
bev_features = voxel_features.flatten(start_dim=1, end_dim=2) # (B, C, Z, Y, X) -> (B, C*Z, Y, X)
bev_features = self.block(bev_features) # (B, C*Z, Y, X) -> (B, C, Y, X)
batch_dict["spatial_features"] = bev_features
return batch_dict
| 1,451
| 36.230769
| 106
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_2d/map_to_bev/pointpillar_scatter.py
|
import torch
import torch.nn as nn
class PointPillarScatter(nn.Module):
def __init__(self, model_cfg, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES
self.nx, self.ny, self.nz = grid_size
assert self.nz == 1
def forward(self, batch_dict, **kwargs):
pillar_features, coords = batch_dict['pillar_features'], batch_dict['voxel_coords']
batch_spatial_features = []
batch_size = coords[:, 0].max().int().item() + 1
for batch_idx in range(batch_size):
spatial_feature = torch.zeros(
self.num_bev_features,
self.nz * self.nx * self.ny,
dtype=pillar_features.dtype,
device=pillar_features.device)
batch_mask = coords[:, 0] == batch_idx
this_coords = coords[batch_mask, :]
indices = this_coords[:, 1] + this_coords[:, 2] * self.nx + this_coords[:, 3]
indices = indices.type(torch.long)
pillars = pillar_features[batch_mask, :]
pillars = pillars.t()
spatial_feature[:, indices] = pillars
batch_spatial_features.append(spatial_feature)
batch_spatial_features = torch.stack(batch_spatial_features, 0)
batch_spatial_features = batch_spatial_features.view(batch_size, self.num_bev_features * self.nz, self.ny, self.nx)
batch_dict['spatial_features'] = batch_spatial_features
return batch_dict
| 1,545
| 39.684211
| 123
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_2d/map_to_bev/__init__.py
|
from .height_compression import HeightCompression
from .pointpillar_scatter import PointPillarScatter
from .conv2d_collapse import Conv2DCollapse
__all__ = {
'HeightCompression': HeightCompression,
'PointPillarScatter': PointPillarScatter,
'Conv2DCollapse': Conv2DCollapse
}
| 288
| 27.9
| 51
|
py
|
3DTrans
|
3DTrans-master/pcdet/models/backbones_2d/map_to_bev/height_compression.py
|
import torch.nn as nn
class HeightCompression(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES
def forward(self, batch_dict):
"""
Args:
batch_dict:
encoded_spconv_tensor: sparse tensor
Returns:
batch_dict:
spatial_features:
"""
encoded_spconv_tensor = batch_dict['encoded_spconv_tensor']
spatial_features = encoded_spconv_tensor.dense()
N, C, D, H, W = spatial_features.shape
spatial_features = spatial_features.view(N, C * D, H, W)
batch_dict['spatial_features'] = spatial_features
batch_dict['spatial_features_stride'] = batch_dict['encoded_spconv_tensor_stride']
return batch_dict
| 870
| 31.259259
| 90
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/dataset.py
|
import torch
import copy
from pathlib import Path
from collections import defaultdict
import numpy as np
import torch.utils.data as torch_data
from .augmentor.data_augmentor import DataAugmentor
from .processor.data_processor import DataProcessor
from .processor.point_feature_encoder import PointFeatureEncoder
from ..utils import common_utils, box_utils, self_training_utils
from ..ops.roiaware_pool3d import roiaware_pool3d_utils
class DatasetTemplate(torch_data.Dataset):
def __init__(self, dataset_cfg=None, class_names=None, training=True, root_path=None, logger=None):
super().__init__()
self.dataset_cfg = dataset_cfg
self.training = training
self.class_names = class_names
self.logger = logger
self.root_path = root_path if root_path is not None else Path(self.dataset_cfg.DATA_PATH)
self.oss_path = self.dataset_cfg.OSS_PATH if 'OSS_PATH' in self.dataset_cfg else None
self.logger = logger
if self.dataset_cfg is None or class_names is None:
return
self.point_cloud_range = np.array(self.dataset_cfg.POINT_CLOUD_RANGE, dtype=np.float32)
self.point_feature_encoder = PointFeatureEncoder(
self.dataset_cfg.POINT_FEATURE_ENCODING,
point_cloud_range=self.point_cloud_range
)
if self.oss_path is not None:
self.data_augmentor = DataAugmentor(
self.oss_path, self.dataset_cfg.DATA_AUGMENTOR, self.class_names, logger=self.logger, oss_flag=True
) if self.training else None
else:
self.data_augmentor = DataAugmentor(
self.root_path, self.dataset_cfg.DATA_AUGMENTOR, self.class_names, logger=self.logger, oss_flag=False
) if self.training else None
self.data_processor = DataProcessor(
self.dataset_cfg.DATA_PROCESSOR, point_cloud_range=self.point_cloud_range,
training=self.training, num_point_features=self.point_feature_encoder.num_point_features
)
self.grid_size = self.data_processor.grid_size
self.voxel_size = self.data_processor.voxel_size
self.total_epochs = 0
self._merge_all_iters_to_one_epoch = False
if hasattr(self.data_processor, "depth_downsample_factor"):
self.depth_downsample_factor = self.data_processor.depth_downsample_factor
else:
self.depth_downsample_factor = None
@property
def mode(self):
return 'train' if self.training else 'test'
def __getstate__(self):
d = dict(self.__dict__)
del d['logger']
return d
def __setstate__(self, d):
self.__dict__.update(d)
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
"""
To support a custom dataset, implement this function to receive the predicted results from the model, and then
transform the unified normative coordinate to your required coordinate, and optionally save them to disk.
Args:
batch_dict: dict of original data from the dataloader
pred_dicts: dict of predicted results from the model
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path: if it is not None, save the results to this path
Returns:
"""
raise NotImplementedError
@staticmethod
def __vis__(points, gt_boxes, ref_boxes=None, scores=None, use_fakelidar=False):
import visual_utils.visualize_utils as vis
import mayavi.mlab as mlab
gt_boxes = copy.deepcopy(gt_boxes)
if use_fakelidar:
gt_boxes = box_utils.boxes3d_kitti_lidar_to_fakelidar(gt_boxes)
if ref_boxes is not None:
ref_boxes = copy.deepcopy(ref_boxes)
if use_fakelidar:
ref_boxes = box_utils.boxes3d_kitti_lidar_to_fakelidar(ref_boxes)
vis.draw_scenes(points, gt_boxes, ref_boxes=ref_boxes, ref_scores=scores)
mlab.show(stop=True)
@staticmethod
def __vis_fake__(points, gt_boxes, ref_boxes=None, scores=None, use_fakelidar=True):
import visual_utils.visualize_utils as vis
import mayavi.mlab as mlab
gt_boxes = copy.deepcopy(gt_boxes)
if use_fakelidar:
gt_boxes = box_utils.boxes3d_kitti_lidar_to_fakelidar(gt_boxes)
if ref_boxes is not None:
ref_boxes = copy.deepcopy(ref_boxes)
if use_fakelidar:
ref_boxes = box_utils.boxes3d_kitti_lidar_to_fakelidar(ref_boxes)
vis.draw_scenes(points, gt_boxes, ref_boxes=ref_boxes, ref_scores=scores)
mlab.show(stop=True)
@staticmethod
def extract_fov_data(points, fov_degree, heading_angle):
"""
Args:
points: (N, 3 + C)
fov_degree: [0~180]
heading_angle: [0~360] in lidar coords, 0 is the x-axis, increase clockwise
Returns:
"""
half_fov_degree = fov_degree / 180 * np.pi / 2
heading_angle = -heading_angle / 180 * np.pi
points_new = common_utils.rotate_points_along_z(
points.copy()[np.newaxis, :, :], np.array([heading_angle])
)[0]
angle = np.arctan2(points_new[:, 1], points_new[:, 0])
fov_mask = ((np.abs(angle) < half_fov_degree) & (points_new[:, 0] > 0))
points = points_new[fov_mask]
return points
@staticmethod
def extract_fov_gt(gt_boxes, fov_degree, heading_angle):
"""
Args:
anno_dict:
fov_degree: [0~180]
heading_angle: [0~360] in lidar coords, 0 is the x-axis, increase clockwise
Returns:
"""
half_fov_degree = fov_degree / 180 * np.pi / 2
heading_angle = -heading_angle / 180 * np.pi
gt_boxes_lidar = copy.deepcopy(gt_boxes)
gt_boxes_lidar = common_utils.rotate_points_along_z(
gt_boxes_lidar[np.newaxis, :, :], np.array([heading_angle])
)[0]
gt_boxes_lidar[:, 6] += heading_angle
gt_angle = np.arctan2(gt_boxes_lidar[:, 1], gt_boxes_lidar[:, 0])
fov_gt_mask = ((np.abs(gt_angle) < half_fov_degree) & (gt_boxes_lidar[:, 0] > 0))
return fov_gt_mask
def fill_pseudo_labels(self, input_dict):
gt_boxes = self_training_utils.load_ps_label(input_dict['frame_id'])
gt_scores = gt_boxes[:, 8]
gt_classes = gt_boxes[:, 7]
gt_boxes = gt_boxes[:, :7]
# only suitable for only one classes, generating gt_names for prepare data
gt_names = np.array([self.class_names[0] for n in gt_boxes])
input_dict['gt_boxes'] = gt_boxes
input_dict['gt_names'] = gt_names
input_dict['gt_classes'] = gt_classes
input_dict['gt_scores'] = gt_scores
input_dict['pos_ps_bbox'] = (gt_classes > 0).sum()
input_dict['ign_ps_bbox'] = gt_boxes.shape[0] - input_dict['pos_ps_bbox']
input_dict.pop('num_points_in_gt', None)
def merge_all_iters_to_one_epoch(self, merge=True, epochs=None):
if merge:
self._merge_all_iters_to_one_epoch = True
self.total_epochs = epochs
else:
self._merge_all_iters_to_one_epoch = False
def __len__(self):
raise NotImplementedError
def __getitem__(self, index):
"""
To support a custom dataset, implement this function to load the raw data (and labels), then transform them to
the unified normative coordinate and call the function self.prepare_data() to process the data and send them
to the model.
Args:
index:
Returns:
"""
raise NotImplementedError
def prepare_data(self, data_dict):
"""
Args:
data_dict:
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
gt_names: optional, (N), string
...
Returns:
data_dict:
frame_id: string
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
gt_names: optional, (N), string
use_lead_xyz: bool
voxels: optional (num_voxels, max_points_per_voxel, 3 + C)
voxel_coords: optional (num_voxels, 3)
voxel_num_points: optional (num_voxels)
...
"""
if self.training:
# filter gt_boxes without points
num_points_in_gt = data_dict.get('num_points_in_gt', None)
if num_points_in_gt is None:
num_points_in_gt = roiaware_pool3d_utils.points_in_boxes_cpu(
torch.from_numpy(data_dict['points'][:, :3]),
torch.from_numpy(data_dict['gt_boxes'][:, :7])).numpy().sum(axis=1)
mask = (num_points_in_gt >= self.dataset_cfg.get('MIN_POINTS_OF_GT', 1))
data_dict['gt_boxes'] = data_dict['gt_boxes'][mask]
data_dict['gt_names'] = data_dict['gt_names'][mask]
if 'gt_classes' in data_dict:
data_dict['gt_classes'] = data_dict['gt_classes'][mask]
data_dict['gt_scores'] = data_dict['gt_scores'][mask]
assert 'gt_boxes' in data_dict, 'gt_boxes should be provided for training'
gt_boxes_mask = np.array([n in self.class_names for n in data_dict['gt_names']], dtype=np.bool_)
data_dict = self.data_augmentor.forward(
data_dict={
**data_dict,
'gt_boxes_mask': gt_boxes_mask
}
)
if data_dict.get('gt_boxes', None) is not None:
selected = common_utils.keep_arrays_by_name(data_dict['gt_names'], self.class_names)
data_dict['gt_boxes'] = data_dict['gt_boxes'][selected]
data_dict['gt_names'] = data_dict['gt_names'][selected]
# for pseudo label has ignore labels.
if 'gt_classes' not in data_dict:
gt_classes = np.array([self.class_names.index(n) + 1 for n in data_dict['gt_names']], dtype=np.int32)
else:
gt_classes = data_dict['gt_classes'][selected]
data_dict['gt_scores'] = data_dict['gt_scores'][selected]
gt_boxes = np.concatenate((data_dict['gt_boxes'], gt_classes.reshape(-1, 1).astype(np.float32)), axis=1)
data_dict['gt_boxes'] = gt_boxes
if data_dict.get('gt_boxes2d', None) is not None:
data_dict['gt_boxes2d'] = data_dict['gt_boxes2d'][selected]
if data_dict.get('points', None) is not None:
data_dict = self.point_feature_encoder.forward(data_dict)
data_dict = self.data_processor.forward(
data_dict=data_dict
)
if self.training and len(data_dict['gt_boxes']) == 0:
new_index = np.random.randint(self.__len__())
return self.__getitem__(new_index)
data_dict.pop('gt_names', None)
data_dict.pop('gt_classes', None)
return data_dict
@staticmethod
def collate_batch(batch_list, _unused=False):
data_dict = defaultdict(list)
for cur_sample in batch_list:
for key, val in cur_sample.items():
data_dict[key].append(val)
batch_size = len(batch_list)
ret = {}
for key, val in data_dict.items():
try:
if key in ['voxels', 'voxel_num_points']:
ret[key] = np.concatenate(val, axis=0)
elif key in ['points', 'voxel_coords']:
coors = []
for i, coor in enumerate(val):
coor_pad = np.pad(coor, ((0, 0), (1, 0)), mode='constant', constant_values=i)
coors.append(coor_pad)
ret[key] = np.concatenate(coors, axis=0)
elif key in ['gt_boxes']:
max_gt = max([len(x) for x in val])
batch_gt_boxes3d = np.zeros((batch_size, max_gt, val[0].shape[-1]), dtype=np.float32)
for k in range(batch_size):
batch_gt_boxes3d[k, :val[k].__len__(), :] = val[k]
ret[key] = batch_gt_boxes3d
elif key in ['gt_scores']:
max_gt = max([len(x) for x in val])
batch_scores = np.zeros((batch_size, max_gt), dtype=np.float32)
for k in range(batch_size):
batch_scores[k, :val[k].__len__()] = val[k]
ret[key] = batch_scores
elif key in ['gt_boxes2d']:
max_boxes = 0
max_boxes = max([len(x) for x in val])
batch_boxes2d = np.zeros((batch_size, max_boxes, val[0].shape[-1]), dtype=np.float32)
for k in range(batch_size):
if val[k].size > 0:
batch_boxes2d[k, :val[k].__len__(), :] = val[k]
ret[key] = batch_boxes2d
elif key in ["images", "depth_maps"]:
# Get largest image size (H, W)
max_h = 0
max_w = 0
for image in val:
max_h = max(max_h, image.shape[0])
max_w = max(max_w, image.shape[1])
# Change size of images
images = []
for image in val:
pad_h = common_utils.get_pad_params(desired_size=max_h, cur_size=image.shape[0])
pad_w = common_utils.get_pad_params(desired_size=max_w, cur_size=image.shape[1])
pad_width = (pad_h, pad_w)
# Pad with nan, to be replaced later in the pipeline.
pad_value = np.nan
if key == "images":
pad_width = (pad_h, pad_w, (0, 0))
elif key == "depth_maps":
pad_width = (pad_h, pad_w)
image_pad = np.pad(image,
pad_width=pad_width,
mode='constant',
constant_values=pad_value)
images.append(image_pad)
ret[key] = np.stack(images, axis=0)
else:
ret[key] = np.stack(val, axis=0)
except:
print('Error in collate_batch: key=%s' % key)
raise TypeError
ret['batch_size'] = batch_size
return ret
def eval(self):
self.training = False
self.data_processor.eval()
def train(self):
self.training = True
self.data_processor.train()
| 15,032
| 40.527624
| 118
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/semi_dataset.py
|
from collections import defaultdict
from pathlib import Path
import copy
import numpy as np
import torch.utils.data as torch_data
from ..utils import common_utils
from .augmentor.data_augmentor import DataAugmentor
from .augmentor.ssl_data_augmentor import SSLDataAugmentor
from .processor.data_processor import DataProcessor, PairDataProcessor
from .processor.point_feature_encoder import PointFeatureEncoder
class SemiDatasetTemplate(torch_data.Dataset):
def __init__(self, dataset_cfg=None, class_names=None, training=True, root_path=None, logger=None):
super().__init__()
self.dataset_cfg = dataset_cfg
self.training = training
self.class_names = class_names
self.logger = logger
self.root_path = Path(root_path) if root_path is not None else Path(self.dataset_cfg.DATA_PATH)
self.oss_path = self.dataset_cfg.OSS_PATH if 'OSS_PATH' in self.dataset_cfg else None
self.logger = logger
if self.dataset_cfg is None or class_names is None:
return
self.point_cloud_range = np.array(self.dataset_cfg.POINT_CLOUD_RANGE, dtype=np.float32)
self.point_feature_encoder = PointFeatureEncoder(
self.dataset_cfg.POINT_FEATURE_ENCODING,
point_cloud_range=self.point_cloud_range
)
if self.oss_path is not None:
self.data_augmentor = DataAugmentor(
self.oss_path, self.dataset_cfg.DATA_AUGMENTOR, self.class_names, logger=self.logger, oss_flag=True
) if self.training else None
else:
self.data_augmentor = DataAugmentor(
self.root_path, self.dataset_cfg.DATA_AUGMENTOR, self.class_names, logger=self.logger, oss_flag=False
) if self.training else None
if self.dataset_cfg.get('USE_PAIR_PROCESSOR', False):
self.data_processor = PairDataProcessor(
self.dataset_cfg.DATA_PROCESSOR, point_cloud_range=self.point_cloud_range, training=self.training, num_point_features=self.point_feature_encoder.num_point_features
)
else:
self.data_processor = DataProcessor(
self.dataset_cfg.DATA_PROCESSOR, point_cloud_range=self.point_cloud_range, training=self.training, num_point_features=self.point_feature_encoder.num_point_features
)
if self.dataset_cfg.get('USE_SHARED_AUGMENTOR', False):
if self.oss_path is not None:
self.share_augmentor = SSLDataAugmentor(
self.oss_path, self.dataset_cfg.SHARED_AUGMENTOR, self.class_names, logger=self.logger, oss_flag=True
) if self.training else None
else:
self.share_augmentor = SSLDataAugmentor(
self.root_path, self.dataset_cfg.SHARED_AUGMENTOR, self.class_names, logger=self.logger, oss_flag=False
) if self.training else None
else:
self.share_augmentor = None
if self.oss_path is not None:
self.teacher_augmentor = SSLDataAugmentor(
self.oss_path, self.dataset_cfg.TEACHER_AUGMENTOR, self.class_names, logger=self.logger, oss_flag=True
) if self.training else None
self.student_augmentor = SSLDataAugmentor(
self.oss_path, self.dataset_cfg.STUDENT_AUGMENTOR, self.class_names, logger=self.logger, oss_flag=True
) if self.training else None
else:
self.teacher_augmentor = SSLDataAugmentor(
self.root_path, self.dataset_cfg.TEACHER_AUGMENTOR, self.class_names, logger=self.logger, oss_flag=False
) if self.training else None
self.student_augmentor = SSLDataAugmentor(
self.root_path, self.dataset_cfg.STUDENT_AUGMENTOR, self.class_names, logger=self.logger, oss_flag=False
) if self.training else None
self.grid_size = self.data_processor.grid_size
self.voxel_size = self.data_processor.voxel_size
self.total_epochs = 0
self._merge_all_iters_to_one_epoch = False
if hasattr(self.data_processor, "depth_downsample_factor"):
self.depth_downsample_factor = self.data_processor.depth_downsample_factor
else:
self.depth_downsample_factor = None
@property
def mode(self):
return 'train' if self.training else 'test'
def __getstate__(self):
d = dict(self.__dict__)
del d['logger']
return d
def __setstate__(self, d):
self.__dict__.update(d)
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
"""
To support a custom dataset, implement this function to receive the predicted results from the model, and then
transform the unified normative coordinate to your required coordinate, and optionally save them to disk.
Args:
batch_dict: dict of original data from the dataloader
pred_dicts: dict of predicted results from the model
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path: if it is not None, save the results to this path
Returns:
"""
def merge_all_iters_to_one_epoch(self, merge=True, epochs=None):
if merge:
self._merge_all_iters_to_one_epoch = True
self.total_epochs = epochs
else:
self._merge_all_iters_to_one_epoch = False
def __len__(self):
raise NotImplementedError
def __getitem__(self, index):
"""
To support a custom dataset, implement this function to load the raw data (and labels), then transform them to
the unified normative coordinate and call the function self.prepare_data() to process the data and send them
to the model.
Args:
index:
Returns:
"""
raise NotImplementedError
def prepare_data(self, data_dict):
"""
Args:
data_dict:
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
gt_names: optional, (N), string
...
Returns:
data_dict:
frame_id: string
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
gt_names: optional, (N), string
use_lead_xyz: bool
voxels: optional (num_voxels, max_points_per_voxel, 3 + C)
voxel_coords: optional (num_voxels, 3)
voxel_num_points: optional (num_voxels)
...
"""
if self.training:
assert 'gt_boxes' in data_dict, 'gt_boxes should be provided for training'
gt_boxes_mask = np.array([n in self.class_names for n in data_dict['gt_names']], dtype=np.bool_)
data_dict = self.data_augmentor.forward(
data_dict={
**data_dict,
'gt_boxes_mask': gt_boxes_mask
}
)
if len(data_dict['gt_boxes']) == 0:
new_index = np.random.randint(self.__len__())
return self.__getitem__(new_index)
if data_dict.get('gt_boxes', None) is not None:
selected = common_utils.keep_arrays_by_name(data_dict['gt_names'], self.class_names)
data_dict['gt_boxes'] = data_dict['gt_boxes'][selected]
data_dict['gt_names'] = data_dict['gt_names'][selected]
gt_classes = np.array([self.class_names.index(n) + 1 for n in data_dict['gt_names']], dtype=np.int32)
gt_boxes = np.concatenate((data_dict['gt_boxes'], gt_classes.reshape(-1, 1).astype(np.float32)), axis=1)
data_dict['gt_boxes'] = gt_boxes
data_dict = self.point_feature_encoder.forward(data_dict)
data_dict = self.data_processor.forward(
data_dict=data_dict
)
data_dict.pop('gt_names', None)
return data_dict
def prepare_data_ssl(self, data_dict, output_dicts):
"""
Args:
data_dict:
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
gt_names: optional, (N), string
...
Returns:
data_dict:
frame_id: string
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
gt_names: optional, (N), string
use_lead_xyz: bool
voxels: optional (num_voxels, max_points_per_voxel, 3 + C)
voxel_coords: optional (num_voxels, 3)
voxel_num_points: optional (num_voxels)
...
"""
if 'gt_boxes' in data_dict:
gt_boxes_mask = np.array([n in self.class_names for n in data_dict['gt_names']], dtype=np.bool_)
data_dict={
**data_dict,
'gt_boxes_mask': gt_boxes_mask
}
if self.share_augmentor is not None:
data_dict = self.share_augmentor.forward(data_dict)
if 'teacher' in output_dicts:
teacher_data_dict = self.teacher_augmentor.forward(copy.deepcopy(data_dict))
else:
teacher_data_dict = None
if 'student' in output_dicts:
student_data_dict = self.student_augmentor.forward(copy.deepcopy(data_dict))
else:
student_data_dict = None
if data_dict != None and student_data_dict == None and teacher_data_dict == None:
if 'gt_boxes' in data_dict:
if len(data_dict['gt_boxes']) == 0:
new_index = np.random.randint(self.__len__())
return self.__getitem__(new_index)
selected = common_utils.keep_arrays_by_name(data_dict['gt_names'], self.class_names)
data_dict['gt_boxes'] = data_dict['gt_boxes'][selected]
data_dict['gt_names'] = data_dict['gt_names'][selected]
gt_classes = np.array([self.class_names.index(n) + 1 for n in data_dict['gt_names']], dtype=np.int32)
gt_boxes = np.concatenate((data_dict['gt_boxes'], gt_classes.reshape(-1, 1).astype(np.float32)), axis=1)
data_dict['gt_boxes'] = gt_boxes
data_dict = self.point_feature_encoder.forward(data_dict)
data_dict = self.data_processor.forward(
data_dict=data_dict
)
data_dict.pop('gt_names', None)
return data_dict
for data_dict in [teacher_data_dict, student_data_dict]:
if data_dict is None:
continue
if 'gt_boxes' in data_dict:
if len(data_dict['gt_boxes']) == 0:
new_index = np.random.randint(self.__len__())
return self.__getitem__(new_index)
selected = common_utils.keep_arrays_by_name(data_dict['gt_names'], self.class_names)
data_dict['gt_boxes'] = data_dict['gt_boxes'][selected]
data_dict['gt_names'] = data_dict['gt_names'][selected]
gt_classes = np.array([self.class_names.index(n) + 1 for n in data_dict['gt_names']], dtype=np.int32)
gt_boxes = np.concatenate((data_dict['gt_boxes'], gt_classes.reshape(-1, 1).astype(np.float32)), axis=1)
data_dict['gt_boxes'] = gt_boxes
data_dict = self.point_feature_encoder.forward(data_dict)
data_dict = self.data_processor.forward(
data_dict=data_dict
)
data_dict.pop('gt_names', None)
return teacher_data_dict, student_data_dict
def prepare_data_ssl_pair(self, data_dict, output_dicts):
"""
Args:
data_dict:
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
gt_names: optional, (N), string
...
Returns:
data_dict:
frame_id: string
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
gt_names: optional, (N), string
use_lead_xyz: bool
voxels: optional (num_voxels, max_points_per_voxel, 3 + C)
voxel_coords: optional (num_voxels, 3)
voxel_num_points: optional (num_voxels)
...
"""
if 'gt_boxes' in data_dict:
gt_boxes_mask = np.array([n in self.class_names for n in data_dict['gt_names']], dtype=np.bool_)
data_dict={
**data_dict,
'gt_boxes_mask': gt_boxes_mask
}
if self.share_augmentor is not None:
data_dict = self.share_augmentor.forward(data_dict)
if 'teacher' in output_dicts:
teacher_data_dict = self.teacher_augmentor.forward(copy.deepcopy(data_dict))
else:
teacher_data_dict = None
if 'student' in output_dicts:
student_data_dict = self.student_augmentor.forward(copy.deepcopy(data_dict))
else:
student_data_dict = None
if data_dict != None and student_data_dict == None and teacher_data_dict == None:
if 'gt_boxes' in data_dict:
if len(data_dict['gt_boxes']) == 0:
new_index = np.random.randint(self.__len__())
return self.__getitem__(new_index)
selected = common_utils.keep_arrays_by_name(data_dict['gt_names'], self.class_names)
data_dict['gt_boxes'] = data_dict['gt_boxes'][selected]
data_dict['gt_names'] = data_dict['gt_names'][selected]
gt_classes = np.array([self.class_names.index(n) + 1 for n in data_dict['gt_names']], dtype=np.int32)
gt_boxes = np.concatenate((data_dict['gt_boxes'], gt_classes.reshape(-1, 1).astype(np.float32)), axis=1)
data_dict['gt_boxes'] = gt_boxes
data_dict = self.point_feature_encoder.forward(data_dict)
data_dict = self.data_processor.forward(
data_dict=data_dict
)
data_dict.pop('gt_names', None)
return data_dict
for data_dict in [teacher_data_dict, student_data_dict]:
if data_dict is None:
continue
if 'gt_boxes' in data_dict:
if len(data_dict['gt_boxes']) == 0:
new_index = np.random.randint(self.__len__())
return self.__getitem__(new_index)
selected = common_utils.keep_arrays_by_name(data_dict['gt_names'], self.class_names)
data_dict['gt_boxes'] = data_dict['gt_boxes'][selected]
data_dict['gt_names'] = data_dict['gt_names'][selected]
gt_classes = np.array([self.class_names.index(n) + 1 for n in data_dict['gt_names']], dtype=np.int32)
gt_boxes = np.concatenate((data_dict['gt_boxes'], gt_classes.reshape(-1, 1).astype(np.float32)), axis=1)
data_dict['gt_boxes'] = gt_boxes
data_dict = self.point_feature_encoder.forward(data_dict)
teacher_data_dict, student_data_dict = self.data_processor.forward(
data_dict_1=teacher_data_dict, data_dict_2=student_data_dict
)
data_dict.pop('gt_names', None)
return teacher_data_dict, student_data_dict
@staticmethod
def collate_batch(batch_list, _unused=False):
def collate_single_batch(batch_list):
data_dict = defaultdict(list)
for cur_sample in batch_list:
if isinstance(cur_sample, dict):
for key, val in cur_sample.items():
data_dict[key].append(val)
else:
raise Exception('batch samples must be dict')
batch_size = len(batch_list)
ret = {}
for key, val in data_dict.items():
try:
if key in ['voxels', 'voxel_num_points']:
ret[key] = np.concatenate(val, axis=0)
elif key in ['points', 'voxel_coords']:
coors = []
for i, coor in enumerate(val):
coor_pad = np.pad(coor, ((0, 0), (1, 0)), mode='constant', constant_values=i)
coors.append(coor_pad)
ret[key] = np.concatenate(coors, axis=0)
elif key in ['gt_boxes']:
max_gt = max([len(x) for x in val])
batch_gt_boxes3d = np.zeros((batch_size, max_gt, val[0].shape[-1]), dtype=np.float32)
for k in range(batch_size):
batch_gt_boxes3d[k, :val[k].__len__(), :] = val[k]
ret[key] = batch_gt_boxes3d
elif key in ['augmentation_list', 'augmentation_params']:
ret[key] = val
else:
ret[key] = np.stack(val, axis=0)
except:
print('Error in collate_batch: key=%s' % key)
raise TypeError
ret['batch_size'] = batch_size
return ret
if isinstance(batch_list[0], dict):
return collate_single_batch(batch_list)
elif isinstance(batch_list[0], tuple):
if batch_list[0][0] is None:
teacher_batch = None
else:
teacher_batch_list = [sample[0] for sample in batch_list]
teacher_batch = collate_single_batch(teacher_batch_list)
if batch_list[0][1] is None:
student_batch = None
else:
student_batch_list = [sample[1] for sample in batch_list]
student_batch = collate_single_batch(student_batch_list)
return teacher_batch, student_batch
else:
raise Exception('batch samples must be dict or tuple')
| 18,381
| 41.848485
| 179
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/__init__.py
|
import torch
from torch.utils.data import DataLoader
from torch.utils.data import DistributedSampler as _DistributedSampler
from pcdet.utils import common_utils
from .dataset import DatasetTemplate
from .kitti.kitti_dataset import KittiDataset
from .kitti.kitti_dataset_ada import ActiveKittiDataset
from .nuscenes.nuscenes_dataset import NuScenesDataset
from .nuscenes.nuscenes_dataset_ada import ActiveNuScenesDataset
from .waymo.waymo_dataset import WaymoDataset
from .waymo.waymo_dataset_ada import ActiveWaymoDataset
from .pandaset.pandaset_dataset import PandasetDataset
from .lyft.lyft_dataset import LyftDataset
from .lyft.lyft_dataset_ada import ActiveLyftDataset
from .once.once_dataset import ONCEDataset
from .once.once_dataset_ada import ActiveONCEDataset
from .once.once_semi_dataset import ONCEPretrainDataset, ONCELabeledDataset, ONCEUnlabeledDataset, ONCETestDataset, ONCEUnlabeledPairDataset, split_once_semi_data
from .nuscenes.nuscenes_semi_dataset import NuScenesPretrainDataset, NuScenesLabeledDataset, NuScenesUnlabeledDataset, NuScenesTestDataset, split_nuscenes_semi_data
from .kitti.kitti_semi_dataset import KittiPretrainDataset, KittiLabeledDataset, KittiUnlabeledDataset, KittiTestDataset, split_kitti_semi_data
__all__ = {
'DatasetTemplate': DatasetTemplate,
'KittiDataset': KittiDataset,
'ActiveKittiDataset': ActiveKittiDataset,
'NuScenesDataset': NuScenesDataset,
'ActiveNuScenesDataset': ActiveNuScenesDataset,
'WaymoDataset': WaymoDataset,
'ActiveWaymoDataset': ActiveWaymoDataset,
'PandasetDataset': PandasetDataset,
'LyftDataset': LyftDataset,
'ONCEDataset': ONCEDataset,
'ActiveLyftDataset': ActiveLyftDataset,
'ActiveONCEDataset': ActiveONCEDataset,
}
_semi_dataset_dict = {
'ONCEDataset': {
'PARTITION_FUNC': split_once_semi_data,
'PRETRAIN': ONCEPretrainDataset,
'LABELED': ONCELabeledDataset,
'UNLABELED': ONCEUnlabeledDataset,
'UNLABELED_PAIR': ONCEUnlabeledPairDataset,
'TEST': ONCETestDataset
},
'NuScenesDataset': {
'PARTITION_FUNC': split_nuscenes_semi_data,
'PRETRAIN': NuScenesPretrainDataset,
'LABELED': NuScenesLabeledDataset,
'UNLABELED': NuScenesUnlabeledDataset,
'TEST': NuScenesTestDataset
},
'KittiDataset': {
'PARTITION_FUNC': split_kitti_semi_data,
'PRETRAIN': KittiPretrainDataset,
'LABELED': KittiLabeledDataset,
'UNLABELED': KittiUnlabeledDataset,
'TEST': KittiTestDataset
}
}
class DistributedSampler(_DistributedSampler):
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def build_dataloader(dataset_cfg, class_names, batch_size, dist, root_path=None, workers=4,
logger=None, training=True, merge_all_iters_to_one_epoch=False, total_epochs=0):
dataset = __all__[dataset_cfg.DATASET](
dataset_cfg=dataset_cfg,
class_names=class_names,
root_path=root_path,
training=training,
logger=logger,
)
if merge_all_iters_to_one_epoch:
assert hasattr(dataset, 'merge_all_iters_to_one_epoch')
dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
if dist:
if training:
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
rank, world_size = common_utils.get_dist_info()
sampler = DistributedSampler(dataset, world_size, rank, shuffle=False)
else:
sampler = None
dataloader = DataLoader(
dataset, batch_size=batch_size, pin_memory=True, num_workers=workers,
shuffle=(sampler is None) and training, collate_fn=dataset.collate_batch,
drop_last=False, sampler=sampler, timeout=0
)
return dataset, dataloader, sampler
def build_dataloader_ada(dataset_cfg, class_names, batch_size, dist, root_path=None, workers=4,
logger=None, training=True, info_path=None, merge_all_iters_to_one_epoch=False, total_epochs=0):
dataset = __all__[dataset_cfg.DATASET](
dataset_cfg=dataset_cfg,
class_names=class_names,
root_path=root_path,
training=training,
logger=logger,
sample_info_path=info_path,
)
if merge_all_iters_to_one_epoch:
assert hasattr(dataset, 'merge_all_iters_to_one_epoch')
dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
if dist:
if training:
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
rank, world_size = common_utils.get_dist_info()
sampler = DistributedSampler(dataset, world_size, rank, shuffle=False)
else:
sampler = None
dataloader = DataLoader(
dataset, batch_size=batch_size, pin_memory=True, num_workers=workers,
shuffle=(sampler is None) and training, collate_fn=dataset.collate_batch,
drop_last=False, sampler=sampler, timeout=0
)
return dataset, dataloader, sampler
def build_dataloader_mdf(dataset_cfg, class_names, batch_size, dist, root_path=None, workers=4, drop_last=True,
logger=None, training=True, merge_all_iters_to_one_epoch=False, total_epochs=0):
dataset = __all__[dataset_cfg.DATASET](
dataset_cfg=dataset_cfg,
class_names=class_names,
root_path=root_path,
training=training,
logger=logger,
)
if merge_all_iters_to_one_epoch:
assert hasattr(dataset, 'merge_all_iters_to_one_epoch')
dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
if dist:
if training:
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
rank, world_size = common_utils.get_dist_info()
sampler = DistributedSampler(dataset, world_size, rank, shuffle=False)
else:
sampler = None
dataloader = DataLoader(
dataset, batch_size=batch_size, pin_memory=True, num_workers=workers,
shuffle=(sampler is None) and training, collate_fn=dataset.collate_batch,
drop_last=drop_last, sampler=sampler, timeout=0
)
return dataset, dataloader, sampler
def build_semi_dataloader(dataset_cfg, class_names, batch_size, dist, root_path=None, workers=4,
logger=None, merge_all_iters_to_one_epoch=False):
assert merge_all_iters_to_one_epoch is False
train_infos, test_infos, labeled_infos, unlabeled_infos = _semi_dataset_dict[dataset_cfg.DATASET]['PARTITION_FUNC'](
dataset_cfg = dataset_cfg,
info_paths = dataset_cfg.INFO_PATH,
data_splits = dataset_cfg.DATA_SPLIT,
root_path = root_path,
labeled_ratio = dataset_cfg.LABELED_RATIO,
logger = logger,
)
pretrain_dataset = _semi_dataset_dict[dataset_cfg.DATASET]['PRETRAIN'](
dataset_cfg=dataset_cfg,
class_names=class_names,
infos = train_infos,
root_path=root_path,
logger=logger,
)
if dist:
pretrain_sampler = torch.utils.data.distributed.DistributedSampler(pretrain_dataset)
else:
pretrain_sampler = None
pretrain_dataloader = DataLoader(
pretrain_dataset, batch_size=batch_size['pretrain'], pin_memory=True, num_workers=workers,
shuffle=(pretrain_sampler is None) and True, collate_fn=pretrain_dataset.collate_batch,
drop_last=False, sampler=pretrain_sampler, timeout=0
)
labeled_dataset = _semi_dataset_dict[dataset_cfg.DATASET]['LABELED'](
dataset_cfg=dataset_cfg,
class_names=class_names,
infos = labeled_infos,
root_path=root_path,
logger=logger,
)
if dist:
labeled_sampler = torch.utils.data.distributed.DistributedSampler(labeled_dataset)
else:
labeled_sampler = None
labeled_dataloader = DataLoader(
labeled_dataset, batch_size=batch_size['labeled'], pin_memory=True, num_workers=workers,
shuffle=(labeled_sampler is None) and True, collate_fn=labeled_dataset.collate_batch,
drop_last=False, sampler=labeled_sampler, timeout=0
)
unlabeled_dataset = _semi_dataset_dict[dataset_cfg.DATASET]['UNLABELED'](
dataset_cfg=dataset_cfg,
class_names=class_names,
infos = unlabeled_infos,
root_path=root_path,
logger=logger,
)
if dist:
unlabeled_sampler = torch.utils.data.distributed.DistributedSampler(unlabeled_dataset)
else:
unlabeled_sampler = None
unlabeled_dataloader = DataLoader(
unlabeled_dataset, batch_size=batch_size['unlabeled'], pin_memory=True, num_workers=workers,
shuffle=(unlabeled_sampler is None) and True, collate_fn=unlabeled_dataset.collate_batch,
drop_last=False, sampler=unlabeled_sampler, timeout=0
)
test_dataset = _semi_dataset_dict[dataset_cfg.DATASET]['TEST'](
dataset_cfg=dataset_cfg,
class_names=class_names,
infos = test_infos,
root_path=root_path,
logger=logger,
)
if dist:
rank, world_size = common_utils.get_dist_info()
test_sampler = DistributedSampler(test_dataset, world_size, rank, shuffle=False)
else:
test_sampler = None
test_dataloader = DataLoader(
test_dataset, batch_size=batch_size['test'], pin_memory=True, num_workers=workers,
shuffle=(test_sampler is None) and False, collate_fn=test_dataset.collate_batch,
drop_last=False, sampler=test_sampler, timeout=0
)
datasets = {
'pretrain': pretrain_dataset,
'labeled': labeled_dataset,
'unlabeled': unlabeled_dataset,
'test': test_dataset
}
dataloaders = {
'pretrain': pretrain_dataloader,
'labeled': labeled_dataloader,
'unlabeled': unlabeled_dataloader,
'test': test_dataloader
}
samplers = {
'pretrain': pretrain_sampler,
'labeled': labeled_sampler,
'unlabeled': unlabeled_sampler,
'test': test_sampler
}
return datasets, dataloaders, samplers
def build_unsupervised_dataloader(dataset_cfg, class_names, batch_size, dist, root_path=None, workers=4,
logger=None, merge_all_iters_to_one_epoch=False):
assert merge_all_iters_to_one_epoch is False
train_infos, test_infos, labeled_infos, unlabeled_infos = _semi_dataset_dict[dataset_cfg.DATASET]['PARTITION_FUNC'](
dataset_cfg = dataset_cfg,
info_paths = dataset_cfg.INFO_PATH,
data_splits = dataset_cfg.DATA_SPLIT,
root_path = root_path,
labeled_ratio = dataset_cfg.LABELED_RATIO,
logger = logger,
)
unlabeled_dataset = _semi_dataset_dict[dataset_cfg.DATASET]['UNLABELED_PAIR'](
dataset_cfg=dataset_cfg,
class_names=class_names,
infos = unlabeled_infos,
root_path=root_path,
logger=logger,
)
if dist:
unlabeled_sampler = torch.utils.data.distributed.DistributedSampler(unlabeled_dataset)
else:
unlabeled_sampler = None
unlabeled_dataloader = DataLoader(
unlabeled_dataset, batch_size=batch_size['unlabeled'], pin_memory=True, num_workers=workers,
shuffle=(unlabeled_sampler is None) and True, collate_fn=unlabeled_dataset.collate_batch,
drop_last=False, sampler=unlabeled_sampler, timeout=0
)
test_dataset = _semi_dataset_dict[dataset_cfg.DATASET]['TEST'](
dataset_cfg=dataset_cfg,
class_names=class_names,
infos = test_infos,
root_path=root_path,
logger=logger,
)
if dist:
rank, world_size = common_utils.get_dist_info()
test_sampler = DistributedSampler(test_dataset, world_size, rank, shuffle=False)
else:
test_sampler = None
test_dataloader = DataLoader(
test_dataset, batch_size=batch_size['test'], pin_memory=True, num_workers=workers,
shuffle=(test_sampler is None) and False, collate_fn=test_dataset.collate_batch,
drop_last=False, sampler=test_sampler, timeout=0
)
datasets = {
'unlabeled': unlabeled_dataset,
'test': test_dataset
}
dataloaders = {
'unlabeled': unlabeled_dataloader,
'test': test_dataloader
}
samplers = {
'unlabeled': unlabeled_sampler,
'test': test_sampler
}
return datasets, dataloaders, samplers
| 13,075
| 36.36
| 164
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/waymo/waymo_utils.py
|
import os
import pickle
import numpy as np
from ...utils import common_utils
import tensorflow as tf
from waymo_open_dataset.utils import frame_utils, transform_utils, range_image_utils
from waymo_open_dataset import dataset_pb2
try:
tf.enable_eager_execution()
except:
pass
WAYMO_CLASSES = ['unknown', 'Vehicle', 'Pedestrian', 'Sign', 'Cyclist']
def generate_labels(frame):
obj_name, difficulty, dimensions, locations, heading_angles = [], [], [], [], []
tracking_difficulty, speeds, accelerations, obj_ids = [], [], [], []
num_points_in_gt = []
laser_labels = frame.laser_labels
for i in range(len(laser_labels)):
box = laser_labels[i].box
class_ind = laser_labels[i].type
loc = [box.center_x, box.center_y, box.center_z]
heading_angles.append(box.heading)
obj_name.append(WAYMO_CLASSES[class_ind])
difficulty.append(laser_labels[i].detection_difficulty_level)
tracking_difficulty.append(laser_labels[i].tracking_difficulty_level)
dimensions.append([box.length, box.width, box.height]) # lwh in unified coordinate of 3DTrans
locations.append(loc)
obj_ids.append(laser_labels[i].id)
num_points_in_gt.append(laser_labels[i].num_lidar_points_in_box)
annotations = {}
annotations['name'] = np.array(obj_name)
annotations['difficulty'] = np.array(difficulty)
annotations['dimensions'] = np.array(dimensions)
annotations['location'] = np.array(locations)
annotations['heading_angles'] = np.array(heading_angles)
annotations['obj_ids'] = np.array(obj_ids)
annotations['tracking_difficulty'] = np.array(tracking_difficulty)
annotations['num_points_in_gt'] = np.array(num_points_in_gt)
annotations = common_utils.drop_info_with_name(annotations, name='unknown')
if annotations['name'].__len__() > 0:
gt_boxes_lidar = np.concatenate([
annotations['location'], annotations['dimensions'], annotations['heading_angles'][..., np.newaxis]],
axis=1
)
else:
gt_boxes_lidar = np.zeros((0, 7))
annotations['gt_boxes_lidar'] = gt_boxes_lidar
return annotations
def convert_range_image_to_point_cloud(frame, range_images, camera_projections, range_image_top_pose, ri_index=(0, 1), target_beam=64):
"""
Modified from the codes of Waymo Open Dataset.
Convert range images to point cloud.
Args:
frame: open dataset frame
range_images: A dict of {laser_name, [range_image_first_return, range_image_second_return]}.
camera_projections: A dict of {laser_name,
[camera_projection_from_first_return, camera_projection_from_second_return]}.
range_image_top_pose: range image pixel pose for top lidar.
ri_index: 0 for the first return, 1 for the second return.
Returns:
points: {[N, 3]} list of 3d lidar points of length 5 (number of lidars).
cp_points: {[N, 6]} list of camera projections of length 5 (number of lidars).
"""
calibrations = sorted(frame.context.laser_calibrations, key=lambda c: c.name)
points = []
cp_points = []
points_NLZ = []
points_intensity = []
points_elongation = []
frame_pose = tf.convert_to_tensor(np.reshape(np.array(frame.pose.transform), [4, 4]))
# [H, W, 6]
range_image_top_pose_tensor = tf.reshape(
tf.convert_to_tensor(range_image_top_pose.data), range_image_top_pose.shape.dims
)
# [H, W, 3, 3]
range_image_top_pose_tensor_rotation = transform_utils.get_rotation_matrix(
range_image_top_pose_tensor[..., 0], range_image_top_pose_tensor[..., 1],
range_image_top_pose_tensor[..., 2])
range_image_top_pose_tensor_translation = range_image_top_pose_tensor[..., 3:]
range_image_top_pose_tensor = transform_utils.get_transform(
range_image_top_pose_tensor_rotation,
range_image_top_pose_tensor_translation)
if target_beam != 64:
step = int(64 / target_beam)
range_image_top_pose_tensor = range_image_top_pose_tensor[0:64:step, :, :, :]
for c in calibrations:
points_single, cp_points_single, points_NLZ_single, points_intensity_single, points_elongation_single \
= [], [], [], [], []
for cur_ri_index in ri_index:
range_image = range_images[c.name][cur_ri_index]
if len(c.beam_inclinations) == 0: # pylint: disable=g-explicit-length-test
beam_inclinations = range_image_utils.compute_inclination(
tf.constant([c.beam_inclination_min, c.beam_inclination_max]),
height=range_image.shape.dims[0])
else:
beam_inclinations = tf.constant(c.beam_inclinations)
beam_inclinations = tf.reverse(beam_inclinations, axis=[-1])
extrinsic = np.reshape(np.array(c.extrinsic.transform), [4, 4])
range_image_tensor = tf.reshape(
tf.convert_to_tensor(range_image.data), range_image.shape.dims)
# you may want to subsample the lidar beam as 32 or 16.
if target_beam != 64:
step = int(64 / target_beam)
range_image_tensor = range_image_tensor[0:64:step, :, :]
beam_inclinations = beam_inclinations[0:64:step]
pixel_pose_local = None
frame_pose_local = None
if c.name == dataset_pb2.LaserName.TOP:
pixel_pose_local = range_image_top_pose_tensor
pixel_pose_local = tf.expand_dims(pixel_pose_local, axis=0)
frame_pose_local = tf.expand_dims(frame_pose, axis=0)
range_image_mask = range_image_tensor[..., 0] > 0
range_image_NLZ = range_image_tensor[..., 3]
range_image_intensity = range_image_tensor[..., 1]
range_image_elongation = range_image_tensor[..., 2]
range_image_cartesian = range_image_utils.extract_point_cloud_from_range_image(
tf.expand_dims(range_image_tensor[..., 0], axis=0),
tf.expand_dims(extrinsic, axis=0),
tf.expand_dims(tf.convert_to_tensor(beam_inclinations), axis=0),
pixel_pose=pixel_pose_local,
frame_pose=frame_pose_local)
range_image_cartesian = tf.squeeze(range_image_cartesian, axis=0)
points_tensor = tf.gather_nd(range_image_cartesian,
tf.where(range_image_mask))
points_NLZ_tensor = tf.gather_nd(range_image_NLZ, tf.compat.v1.where(range_image_mask))
points_intensity_tensor = tf.gather_nd(range_image_intensity, tf.compat.v1.where(range_image_mask))
points_elongation_tensor = tf.gather_nd(range_image_elongation, tf.compat.v1.where(range_image_mask))
cp = camera_projections[c.name][0]
cp_tensor = tf.reshape(tf.convert_to_tensor(cp.data), cp.shape.dims)
cp_points_tensor = tf.gather_nd(cp_tensor, tf.where(range_image_mask))
points_single.append(points_tensor.numpy())
cp_points_single.append(cp_points_tensor.numpy())
points_NLZ_single.append(points_NLZ_tensor.numpy())
points_intensity_single.append(points_intensity_tensor.numpy())
points_elongation_single.append(points_elongation_tensor.numpy())
points.append(np.concatenate(points_single, axis=0))
cp_points.append(np.concatenate(cp_points_single, axis=0))
points_NLZ.append(np.concatenate(points_NLZ_single, axis=0))
points_intensity.append(np.concatenate(points_intensity_single, axis=0))
points_elongation.append(np.concatenate(points_elongation_single, axis=0))
return points, cp_points, points_NLZ, points_intensity, points_elongation
def save_lidar_points(frame, cur_save_path, use_two_returns=True):
range_images, camera_projections, range_image_top_pose = \
frame_utils.parse_range_image_and_camera_projection(frame)
points, cp_points, points_in_NLZ_flag, points_intensity, points_elongation = convert_range_image_to_point_cloud(
frame, range_images, camera_projections, range_image_top_pose, ri_index=(0, 1) if use_two_returns else (0,), target_beam=64
)
# 3d points in vehicle frame.
points_all = np.concatenate(points, axis=0)
points_in_NLZ_flag = np.concatenate(points_in_NLZ_flag, axis=0).reshape(-1, 1)
points_intensity = np.concatenate(points_intensity, axis=0).reshape(-1, 1)
points_elongation = np.concatenate(points_elongation, axis=0).reshape(-1, 1)
num_points_of_each_lidar = [point.shape[0] for point in points]
save_points = np.concatenate([
points_all, points_intensity, points_elongation, points_in_NLZ_flag
], axis=-1).astype(np.float32)
np.save(cur_save_path, save_points)
# print('saving to ', cur_save_path)
return num_points_of_each_lidar
def process_single_sequence(sequence_file, save_path, sampled_interval, has_label=True, use_two_returns=True):
sequence_name = os.path.splitext(os.path.basename(sequence_file))[0]
# print('Load record (sampled_interval=%d): %s' % (sampled_interval, sequence_name))
if not sequence_file.exists():
print('NotFoundError: %s' % sequence_file)
return []
dataset = tf.data.TFRecordDataset(str(sequence_file), compression_type='')
cur_save_dir = save_path / sequence_name
cur_save_dir.mkdir(parents=True, exist_ok=True)
pkl_file = cur_save_dir / ('%s.pkl' % sequence_name)
sequence_infos = []
if pkl_file.exists():
sequence_infos = pickle.load(open(pkl_file, 'rb'))
print('Skip sequence since it has been processed before: %s' % pkl_file)
return sequence_infos
for cnt, data in enumerate(dataset):
if cnt % sampled_interval != 0:
continue
# print(sequence_name, cnt)
frame = dataset_pb2.Frame()
frame.ParseFromString(bytearray(data.numpy()))
info = {}
pc_info = {'num_features': 5, 'lidar_sequence': sequence_name, 'sample_idx': cnt}
info['point_cloud'] = pc_info
info['frame_id'] = sequence_name + ('_%03d' % cnt)
info['metadata'] = {
'context_name': frame.context.name,
'timestamp_micros': frame.timestamp_micros
}
image_info = {}
for j in range(5):
width = frame.context.camera_calibrations[j].width
height = frame.context.camera_calibrations[j].height
image_info.update({'image_shape_%d' % j: (height, width)})
info['image'] = image_info
pose = np.array(frame.pose.transform, dtype=np.float32).reshape(4, 4)
info['pose'] = pose
if has_label:
annotations = generate_labels(frame)
info['annos'] = annotations
num_points_of_each_lidar = save_lidar_points(
frame, cur_save_dir / ('%04d.npy' % cnt), use_two_returns=use_two_returns
)
info['num_points_of_each_lidar'] = num_points_of_each_lidar
sequence_infos.append(info)
with open(pkl_file, 'wb') as f:
pickle.dump(sequence_infos, f)
print('Infos are saved to (sampled_interval=%d): %s' % (sampled_interval, pkl_file))
return sequence_infos
| 11,279
| 44.301205
| 135
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/waymo/waymo_dataset.py
|
# OpenPCDet PyTorch Dataloader and Evaluation Tools for Waymo Open Dataset
# Reference https://github.com/open-mmlab/OpenPCDet
# Written by Shaoshuai Shi, Chaoxu Guo
# All Rights Reserved 2019-2020.
import os
import io
import pickle
import copy
import numpy as np
import torch
import multiprocessing
import SharedArray
import torch.distributed as dist
from tqdm import tqdm
from pathlib import Path
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import box_utils, common_utils
from ..dataset import DatasetTemplate
class WaymoDataset(DatasetTemplate):
"""Petrel Ceph storage backend.
3DTrans supports the reading and writing data from Ceph
Usage:
self.oss_path = 's3://path/of/waymo'
'~/.petreloss.conf': A config file of Ceph, saving the KEY/ACCESS_KEY of S3 Ceph
"""
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
if self.oss_path is not None:
self.data_path = os.path.join(self.oss_path, self.dataset_cfg.PROCESSED_DATA_TAG)
from petrel_client.client import Client
self.client = Client('~/.petreloss.conf')
logger.info(f'self.data_path: {self.data_path}')
oss_data_list_manifest = self.oss_path + '/manifest.lst'
if not self.client.contains(oss_data_list_manifest):
logger.info(f'listing files in {self.data_path}')
self.oss_data_list = self.list_oss_dir(self.data_path, with_info=False)
self.client.put(oss_data_list_manifest, '\n'.join(self.oss_data_list).encode())
logger.info(f'Listing finished and cache the oss_data_list to {oss_data_list_manifest}')
else:
logger.info(f'loading the self.oss_data_list from {oss_data_list_manifest}')
self.oss_data_list = self.client.get(oss_data_list_manifest).decode().splitlines()
else:
self.data_path = self.root_path / self.dataset_cfg.PROCESSED_DATA_TAG
self.split = self.dataset_cfg.DATA_SPLIT[self.mode]
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_sequence_list = [x.strip() for x in open(split_dir).readlines()]
self.infos = []
self.include_waymo_data(self.mode)
self.use_shared_memory = self.dataset_cfg.get('USE_SHARED_MEMORY', False) and self.training
if self.use_shared_memory:
self.shared_memory_file_limit = self.dataset_cfg.get('SHARED_MEMORY_FILE_LIMIT', 0x7FFFFFFF)
self.load_data_to_shared_memory()
def set_split(self, split):
super().__init__(
dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training,
root_path=self.root_path, logger=self.logger
)
self.split = split
split_dir = os.path.join(self.root_path, 'ImageSets', self.split+'.txt')
self.sample_sequence_list = [x.strip() for x in open(split_dir).readlines()]
self.infos = []
self.include_waymo_data(self.mode)
def include_waymo_data(self, mode):
self.logger.info('Loading Waymo dataset')
waymo_infos = []
num_skipped_infos = 0
self.logger.info('start to include waymo data')
for k in tqdm(range(len(self.sample_sequence_list))):
sequence_name = os.path.splitext(self.sample_sequence_list[k])[0]
if self.oss_path is None:
info_path = self.data_path / sequence_name / ('%s.pkl' % sequence_name)
info_path = self.check_sequence_name_with_all_version(info_path)
if not info_path.exists():
num_skipped_infos += 1
continue
else:
info_path = os.path.join(self.data_path, sequence_name, ('%s.pkl' % sequence_name))
info_path = self.check_sequence_name_with_all_version(info_path)
if not self.oss_exist(info_path):
num_skipped_infos += 1
continue
if self.oss_path is None:
with open(info_path, 'rb') as f:
infos = pickle.load(f)
waymo_infos.extend(infos)
else:
#pkl_bytes = self.client.get(info_path)
pkl_bytes = self.client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
waymo_infos.extend(infos)
self.infos.extend(waymo_infos[:])
self.logger.info('Total skipped info %s' % num_skipped_infos)
self.logger.info('Total samples for Waymo dataset: %d' % (len(waymo_infos)))
if self.dataset_cfg.SAMPLED_INTERVAL[mode] > 1:
sampled_waymo_infos = []
for k in range(0, len(self.infos), self.dataset_cfg.SAMPLED_INTERVAL[mode]):
sampled_waymo_infos.append(self.infos[k])
self.infos = sampled_waymo_infos
self.logger.info('Total sampled samples for Waymo dataset: %d' % len(self.infos))
def load_data_to_shared_memory(self):
self.logger.info(f'Loading training data to shared memory (file limit={self.shared_memory_file_limit})')
cur_rank, num_gpus = common_utils.get_dist_info()
all_infos = self.infos[:self.shared_memory_file_limit] \
if self.shared_memory_file_limit < len(self.infos) else self.infos
cur_infos = all_infos[cur_rank::num_gpus]
for info in cur_infos:
pc_info = info['point_cloud']
sequence_name = pc_info['lidar_sequence']
sample_idx = pc_info['sample_idx']
sa_key = f'{sequence_name}___{sample_idx}'
if os.path.exists(f"/dev/shm/{sa_key}"):
continue
points = self.get_lidar(sequence_name, sample_idx)
common_utils.sa_create(f"shm://{sa_key}", points)
dist.barrier()
self.logger.info('Training data has been saved to shared memory')
def clean_shared_memory(self):
self.logger.info(f'Clean training data from shared memory (file limit={self.shared_memory_file_limit})')
cur_rank, num_gpus = common_utils.get_dist_info()
all_infos = self.infos[:self.shared_memory_file_limit] \
if self.shared_memory_file_limit < len(self.infos) else self.infos
cur_infos = all_infos[cur_rank::num_gpus]
for info in cur_infos:
pc_info = info['point_cloud']
sequence_name = pc_info['lidar_sequence']
sample_idx = pc_info['sample_idx']
sa_key = f'{sequence_name}___{sample_idx}'
if not os.path.exists(f"/dev/shm/{sa_key}"):
continue
SharedArray.delete(f"shm://{sa_key}")
if num_gpus > 1:
dist.barrier()
self.logger.info('Training data has been deleted from shared memory')
# @staticmethod
# def check_sequence_name_with_all_version(sequence_file):
# if not sequence_file.exists():
# found_sequence_file = sequence_file
# for pre_text in ['training', 'validation', 'testing']:
# if not sequence_file.exists():
# temp_sequence_file = Path(str(sequence_file).replace('segment', pre_text + '_segment'))
# if temp_sequence_file.exists():
# found_sequence_file = temp_sequence_file
# break
# if not found_sequence_file.exists():
# found_sequence_file = Path(str(sequence_file).replace('_with_camera_labels', ''))
# if found_sequence_file.exists():
# sequence_file = found_sequence_file
# return sequence_file
def check_sequence_name_with_all_version(self, sequence_file):
if self.oss_path is None:
if not sequence_file.exists():
found_sequence_file = sequence_file
for pre_text in ['training', 'validation', 'testing']:
if not sequence_file.exists():
temp_sequence_file = Path(str(sequence_file).replace('segment', pre_text + '_segment'))
if temp_sequence_file.exists():
found_sequence_file = temp_sequence_file
break
if not found_sequence_file.exists():
found_sequence_file = Path(str(sequence_file).replace('_with_camera_labels', ''))
if found_sequence_file.exists():
sequence_file = found_sequence_file
else:
if not self.oss_exist(sequence_file):
found_sequence_file = sequence_file
for pre_text in ['training', 'validation', 'testing']:
if not self.oss_exist(sequence_file):
#temp_sequence_file = Path(str(sequence_file).replace('segment', pre_text + '_segment'))
temp_sequence_file = sequence_file.replace('segment', pre_text + '_segment')
if self.oss_exist(temp_sequence_file):
found_sequence_file = temp_sequence_file
break
if not self.oss_exist(found_sequence_file):
#found_sequence_file = Path(str(sequence_file).replace('_with_camera_labels', ''))
found_sequence_file = sequence_file.replace('_with_camera_labels', '')
if self.oss_exist(found_sequence_file):
sequence_file = found_sequence_file
return sequence_file
# def check_sequence_name_with_all_version(self, sequence_file):
# if self.oss_path is not None:
# if '_with_camera_labels' not in sequence_file and not self.oss_exist(sequence_file):
# sequence_file = sequence_file[:-9] + '_with_camera_labels.tfrecord'
# if '_with_camera_labels' in sequence_file and not self.oss_exist(sequence_file):
# sequence_file = sequence_file.replace('_with_camera_labels', '')
# else:
# if '_with_camera_labels' not in str(sequence_file) and not os.path.exists(sequence_file):
# sequence_file = Path(str(sequence_file[:-9]) + '_with_camera_labels.tfrecord')
# if '_with_camera_labels' in str(sequence_file) and not os.path.exists(sequence_file):
# sequence_file = Path(str(sequence_file).replace('_with_camera_labels', ''))
# return sequence_file
"""
For loading files from OSS
"""
def list_oss_dir(self, oss_path, with_info=False):
s3_dir = self.fix_path(oss_path)
files_iter = self.client.get_file_iterator(s3_dir)
if with_info:
file_list = {p: k for p, k in files_iter}
else:
file_list = [p for p, k in files_iter]
return file_list
@staticmethod
def fix_path(path_str):
try:
st_ = str(path_str)
if "s3://" in st_:
return st_
if "s3:/" in st_:
st_ = "s3://" + st_.strip('s3:/')
return st_
else:
st_ = "s3://" + st_
return st_
except:
raise TypeError
def oss_exist(self, file_path, refresh=False):
if self.data_path is None:
raise IndexError("No initialized path set!")
if refresh:
self.oss_data_list = self.list_oss_dir(self.data_path, with_info=False)
pure_name = self.fix_path(file_path).strip("s3://")
if pure_name in self.oss_data_list:
return True
else:
return False
def get_infos(self, raw_data_path, save_path, num_workers=multiprocessing.cpu_count(), has_label=True, sampled_interval=1):
from functools import partial
from . import waymo_utils
print('---------------The waymo sample interval is %d, total sequecnes is %d-----------------'
% (sampled_interval, len(self.sample_sequence_list)))
process_single_sequence = partial(
waymo_utils.process_single_sequence,
save_path=save_path, sampled_interval=sampled_interval, has_label=has_label
)
sample_sequence_file_list = [
self.check_sequence_name_with_all_version(raw_data_path / sequence_file)
for sequence_file in self.sample_sequence_list
]
with multiprocessing.Pool(num_workers) as p:
sequence_infos = list(tqdm(p.imap(process_single_sequence, sample_sequence_file_list),
total=len(sample_sequence_file_list)))
all_sequences_infos = [item for infos in sequence_infos for item in infos]
return all_sequences_infos
def get_lidar(self, sequence_name, sample_idx):
if self.oss_path is None:
#lidar_file = self.data_path / sequence_name / ('%04d.npy' % sample_idx) #
lidar_file = os.path.join(self.data_path, sequence_name, ('%04d.npy' % sample_idx))
point_features = np.load(lidar_file) # (N, 7): [x, y, z, intensity, elongation, NLZ_flag]
else:
lidar_file = os.path.join(self.data_path, sequence_name, ('%04d.npy' % sample_idx))
#npy_bytes = self.client.get(lidar_file)
npy_bytes = self.client.get(lidar_file, update_cache=True)
point_features = np.load(io.BytesIO(npy_bytes))
points_all, NLZ_flag = point_features[:, 0:5], point_features[:, 5]
if not self.dataset_cfg.get('DISABLE_NLZ_FLAG_ON_POINTS', False):
points_all = points_all[NLZ_flag == -1]
points_all[:, 3] = np.tanh(points_all[:, 3])
return points_all
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.infos) * self.total_epochs
return len(self.infos)
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.infos)
info = copy.deepcopy(self.infos[index])
pc_info = info['point_cloud']
sequence_name = pc_info['lidar_sequence']
sample_idx = pc_info['sample_idx']
if self.use_shared_memory and index < self.shared_memory_file_limit:
sa_key = f'{sequence_name}___{sample_idx}'
points = SharedArray.attach(f"shm://{sa_key}").copy()
else:
points = self.get_lidar(sequence_name, sample_idx)
lidar_z = points[:, 2]
input_dict = {
'db_flag': "waymo",
'points': points,
'frame_id': info['frame_id'],
}
if 'annos' in info:
annos = info['annos']
annos = common_utils.drop_info_with_name(annos, name='unknown')
if self.dataset_cfg.get('INFO_WITH_FAKELIDAR', False):
gt_boxes_lidar = box_utils.boxes3d_kitti_fakelidar_to_lidar(annos['gt_boxes_lidar'])
else:
gt_boxes_lidar = annos['gt_boxes_lidar']
lidar_z = gt_boxes_lidar[:, 2]
if self.training and self.dataset_cfg.get('FILTER_EMPTY_BOXES_FOR_TRAIN', False):
mask = (annos['num_points_in_gt'] > 0) # filter empty boxes
annos['name'] = annos['name'][mask]
gt_boxes_lidar = gt_boxes_lidar[mask]
annos['num_points_in_gt'] = annos['num_points_in_gt'][mask]
input_dict.update({
'gt_names': annos['name'],
'gt_boxes': gt_boxes_lidar,
'num_points_in_gt': annos.get('num_points_in_gt', None)
})
if self.dataset_cfg.get('USE_PSEUDO_LABEL', None) and self.training:
input_dict['gt_boxes'] = None
if self.dataset_cfg.get('FOV_POINTS_ONLY', None):
input_dict['points'] = self.extract_fov_data(
input_dict['points'], self.dataset_cfg.FOV_DEGREE, self.dataset_cfg.FOV_ANGLE
)
if input_dict['gt_boxes'] is not None:
fov_gt_flag = self.extract_fov_gt(
input_dict['gt_boxes'], self.dataset_cfg.FOV_DEGREE, self.dataset_cfg.FOV_ANGLE
)
input_dict.update({
'gt_names': input_dict['gt_names'][fov_gt_flag],
'gt_boxes': input_dict['gt_boxes'][fov_gt_flag],
'num_points_in_gt': input_dict['num_points_in_gt'][fov_gt_flag] if input_dict['num_points_in_gt'] is not None else None
})
# load saved pseudo label for unlabeled data
if self.dataset_cfg.get('USE_PSEUDO_LABEL', None) and self.training:
self.fill_pseudo_labels(input_dict)
data_dict = self.prepare_data(data_dict=input_dict)
data_dict['metadata'] = info.get('metadata', info['frame_id'])
data_dict.pop('num_points_in_gt', None)
return data_dict
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
"""
Args:
batch_dict:
frame_id:
pred_dicts: list of pred_dicts
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path:
Returns:
"""
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'score': np.zeros(num_samples),
'boxes_lidar': np.zeros([num_samples, 7])
}
return ret_dict
def generate_single_sample_dict(box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['score'] = pred_scores
pred_dict['boxes_lidar'] = pred_boxes
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
single_pred_dict = generate_single_sample_dict(box_dict)
single_pred_dict['frame_id'] = batch_dict['frame_id'][index]
single_pred_dict['metadata'] = batch_dict['metadata'][index]
annos.append(single_pred_dict)
return annos
def evaluation(self, det_annos, class_names, **kwargs):
if 'annos' not in self.infos[0].keys():
return 'No ground-truth boxes for evaluation', {}
def kitti_eval(eval_det_annos, eval_gt_annos):
from ..kitti.kitti_object_eval_python import eval as kitti_eval
from ..kitti import kitti_utils
map_name_to_kitti = {
'Vehicle': 'Car',
'Pedestrian': 'Pedestrian',
'Cyclist': 'Cyclist',
'Sign': 'Sign',
'Car': 'Car'
}
kitti_utils.transform_annotations_to_kitti_format(eval_det_annos, map_name_to_kitti=map_name_to_kitti)
kitti_utils.transform_annotations_to_kitti_format(
eval_gt_annos, map_name_to_kitti=map_name_to_kitti,
info_with_fakelidar=self.dataset_cfg.get('INFO_WITH_FAKELIDAR', False)
)
kitti_class_names = [map_name_to_kitti[x] for x in class_names]
ap_result_str, ap_dict = kitti_eval.get_official_eval_result(
gt_annos=eval_gt_annos, dt_annos=eval_det_annos, current_classes=kitti_class_names
)
return ap_result_str, ap_dict
def waymo_eval(eval_det_annos, eval_gt_annos):
from .waymo_eval import OpenPCDetWaymoDetectionMetricsEstimator
eval = OpenPCDetWaymoDetectionMetricsEstimator()
ap_dict = eval.waymo_evaluation(
eval_det_annos, eval_gt_annos, class_name=class_names,
distance_thresh=1000, fake_gt_infos=self.dataset_cfg.get('INFO_WITH_FAKELIDAR', False)
)
ap_result_str = '\n'
for key in ap_dict:
ap_dict[key] = ap_dict[key][0]
ap_result_str += '%s: %.4f \n' % (key, ap_dict[key])
return ap_result_str, ap_dict
eval_det_annos = copy.deepcopy(det_annos)
eval_gt_annos = [copy.deepcopy(info['annos']) for info in self.infos]
if kwargs['eval_metric'] == 'kitti':
ap_result_str, ap_dict = kitti_eval(eval_det_annos, eval_gt_annos)
elif kwargs['eval_metric'] == 'waymo':
ap_result_str, ap_dict = waymo_eval(eval_det_annos, eval_gt_annos)
else:
raise NotImplementedError
return ap_result_str, ap_dict
def create_groundtruth_database(self, info_path, save_path, used_classes=None, split='train', sampled_interval=10,
processed_data_tag=None):
database_save_path = save_path / ('%s_gt_database_%s_sampled_%d' % (processed_data_tag, split, sampled_interval))
db_info_save_path = save_path / ('%s_waymo_dbinfos_%s_sampled_%d.pkl' % (processed_data_tag, split, sampled_interval))
db_data_save_path = save_path / ('%s_gt_database_%s_sampled_%d_global.npy' % (processed_data_tag, split, sampled_interval))
database_save_path.mkdir(parents=True, exist_ok=True)
all_db_infos = {}
with open(info_path, 'rb') as f:
infos = pickle.load(f)
point_offset_cnt = 0
stacked_gt_points = []
for k in range(0, len(infos), sampled_interval):
print('gt_database sample: %d/%d' % (k + 1, len(infos)))
info = infos[k]
pc_info = info['point_cloud']
sequence_name = pc_info['lidar_sequence']
sample_idx = pc_info['sample_idx']
points = self.get_lidar(sequence_name, sample_idx)
annos = info['annos']
names = annos['name']
difficulty = annos['difficulty']
gt_boxes = annos['gt_boxes_lidar']
if k % 4 != 0 and len(names) > 0:
mask = (names == 'Vehicle')
names = names[~mask]
difficulty = difficulty[~mask]
gt_boxes = gt_boxes[~mask]
if k % 2 != 0 and len(names) > 0:
mask = (names == 'Pedestrian')
names = names[~mask]
difficulty = difficulty[~mask]
gt_boxes = gt_boxes[~mask]
num_obj = gt_boxes.shape[0]
if num_obj == 0:
continue
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
torch.from_numpy(points[:, 0:3]).unsqueeze(dim=0).float().cuda(),
torch.from_numpy(gt_boxes[:, 0:7]).unsqueeze(dim=0).float().cuda()
).long().squeeze(dim=0).cpu().numpy()
for i in range(num_obj):
filename = '%s_%04d_%s_%d.bin' % (sequence_name, sample_idx, names[i], i)
filepath = database_save_path / filename
gt_points = points[box_idxs_of_pts == i]
gt_points[:, :3] -= gt_boxes[i, :3]
if (used_classes is None) or names[i] in used_classes:
with open(filepath, 'w') as f:
gt_points.tofile(f)
db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin
db_info = {'name': names[i], 'path': db_path, 'sequence_name': sequence_name,
'sample_idx': sample_idx, 'gt_idx': i, 'box3d_lidar': gt_boxes[i],
'num_points_in_gt': gt_points.shape[0], 'difficulty': difficulty[i]}
# it will be used if you choose to use shared memory for gt sampling
stacked_gt_points.append(gt_points)
db_info['global_data_offset'] = [point_offset_cnt, point_offset_cnt + gt_points.shape[0]]
point_offset_cnt += gt_points.shape[0]
if names[i] in all_db_infos:
all_db_infos[names[i]].append(db_info)
else:
all_db_infos[names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
# it will be used if you choose to use shared memory for gt sampling
stacked_gt_points = np.concatenate(stacked_gt_points, axis=0)
np.save(db_data_save_path, stacked_gt_points)
def create_waymo_infos(dataset_cfg, class_names, data_path, save_path,
raw_data_tag='raw_data', processed_data_tag='waymo_processed_data',
workers=min(16, multiprocessing.cpu_count())):
dataset = WaymoDataset(
dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path,
training=False, logger=common_utils.create_logger()
)
train_split, val_split = 'train', 'val'
train_filename = save_path / ('%s_infos_%s.pkl' % (processed_data_tag, train_split))
val_filename = save_path / ('%s_infos_%s.pkl' % (processed_data_tag, val_split))
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
print('---------------Start to generate data infos---------------')
dataset.set_split(train_split)
waymo_infos_train = dataset.get_infos(
raw_data_path=data_path / raw_data_tag,
save_path=save_path / processed_data_tag, num_workers=workers, has_label=True,
sampled_interval=1
)
with open(train_filename, 'wb') as f:
pickle.dump(waymo_infos_train, f)
print('----------------Waymo info train file is saved to %s----------------' % train_filename)
dataset.set_split(val_split)
waymo_infos_val = dataset.get_infos(
raw_data_path=data_path / raw_data_tag,
save_path=save_path / processed_data_tag, num_workers=workers, has_label=True,
sampled_interval=1
)
with open(val_filename, 'wb') as f:
pickle.dump(waymo_infos_val, f)
print('----------------Waymo info val file is saved to %s----------------' % val_filename)
print('---------------Start create groundtruth database for data augmentation---------------')
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
dataset.set_split(train_split)
dataset.create_groundtruth_database(
info_path=train_filename, save_path=save_path, split='train', sampled_interval=1,
used_classes=['Vehicle', 'Pedestrian', 'Cyclist'], processed_data_tag=processed_data_tag
)
print('---------------Data preparation Done---------------')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config of dataset')
parser.add_argument('--func', type=str, default='create_waymo_infos', help='')
parser.add_argument('--processed_data_tag', type=str, default='waymo_processed_data_v0_5_0', help='')
args = parser.parse_args()
if args.func == 'create_waymo_infos':
import yaml
from easydict import EasyDict
try:
yaml_config = yaml.safe_load(open(args.cfg_file), Loader=yaml.FullLoader)
except:
yaml_config = yaml.safe_load(open(args.cfg_file))
dataset_cfg = EasyDict(yaml_config)
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
dataset_cfg.PROCESSED_DATA_TAG = args.processed_data_tag
create_waymo_infos(
dataset_cfg=dataset_cfg,
class_names=['Vehicle', 'Pedestrian', 'Cyclist'],
data_path=ROOT_DIR / 'data' / 'waymo',
save_path=ROOT_DIR / 'data' / 'waymo',
raw_data_tag='raw_data',
processed_data_tag=args.processed_data_tag
)
| 28,281
| 44.035032
| 139
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/waymo/__init__.py
| 0
| 0
| 0
|
py
|
|
3DTrans
|
3DTrans-master/pcdet/datasets/waymo/waymo_dataset_ada.py
|
import os
import io
import pickle
import copy
import numpy as np
import torch
import multiprocessing
import SharedArray
import torch.distributed as dist
from tqdm import tqdm
from pathlib import Path
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import box_utils, common_utils
from ..dataset import DatasetTemplate
class ActiveWaymoDataset(DatasetTemplate):
"""Petrel Ceph storage backend.
3DTrans supports the reading and writing data from Ceph
Usage:
self.oss_path = 's3://path/of/waymo'
'~/.petreloss.conf': A config file of Ceph, saving the KEY/ACCESS_KEY of S3 Ceph
"""
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, sample_info_path=None):
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
if self.oss_path is not None:
self.data_path = os.path.join(self.oss_path, self.dataset_cfg.PROCESSED_DATA_TAG)
from petrel_client.client import Client
self.client = Client('~/.petreloss.conf')
logger.info(f'self.data_path: {self.data_path}')
oss_data_list_manifest = self.oss_path + '/manifest.lst'
if not self.client.contains(oss_data_list_manifest):
logger.info(f'listing files in {self.data_path}')
self.oss_data_list = self.list_oss_dir(self.data_path, with_info=False)
self.client.put(oss_data_list_manifest, '\n'.join(self.oss_data_list).encode())
logger.info(f'Listing finished and cache the oss_data_list to {oss_data_list_manifest}')
else:
logger.info(f'loading the self.oss_data_list from {oss_data_list_manifest}')
self.oss_data_list = self.client.get(oss_data_list_manifest).decode().splitlines()
else:
self.data_path = self.root_path / self.dataset_cfg.PROCESSED_DATA_TAG
self.split = self.dataset_cfg.DATA_SPLIT[self.mode]
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_sequence_list = [x.strip() for x in open(split_dir).readlines()]
self.infos = []
self.include_waymo_data(self.mode, sample_info_path)
self.use_shared_memory = self.dataset_cfg.get('USE_SHARED_MEMORY', False) and self.training
if self.use_shared_memory:
self.shared_memory_file_limit = self.dataset_cfg.get('SHARED_MEMORY_FILE_LIMIT', 0x7FFFFFFF)
self.load_data_to_shared_memory()
def set_split(self, split):
super().__init__(
dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training,
root_path=self.root_path, logger=self.logger
)
self.split = split
split_dir = os.path.join(self.root_path, 'ImageSets', self.split+'.txt')
self.sample_sequence_list = [x.strip() for x in open(split_dir).readlines()]
self.infos = []
self.include_waymo_data(self.mode)
def include_waymo_data(self, mode, sample_info_path=None):
self.logger.info('Loading Waymo dataset')
waymo_infos = []
num_skipped_infos = 0
self.logger.info('start to include waymo data')
if sample_info_path != None:
with open(sample_info_path, 'rb') as f:
infos = pickle.load(f)
waymo_infos.extend(infos)
else:
for k in tqdm(range(len(self.sample_sequence_list))):
sequence_name = os.path.splitext(self.sample_sequence_list[k])[0]
if self.oss_path is None:
info_path = self.data_path / sequence_name / ('%s.pkl' % sequence_name)
info_path = self.check_sequence_name_with_all_version(info_path)
if not info_path.exists():
num_skipped_infos += 1
continue
else:
info_path = os.path.join(self.data_path, sequence_name, ('%s.pkl' % sequence_name))
info_path = self.check_sequence_name_with_all_version(info_path)
if not self.oss_exist(info_path):
num_skipped_infos += 1
continue
if self.oss_path is None:
with open(info_path, 'rb') as f:
infos = pickle.load(f)
waymo_infos.extend(infos)
else:
#pkl_bytes = self.client.get(info_path)
pkl_bytes = self.client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
waymo_infos.extend(infos)
self.infos.extend(waymo_infos[:])
self.logger.info('Total skipped info %s' % num_skipped_infos)
self.logger.info('Total samples for Waymo dataset: %d' % (len(waymo_infos)))
if self.dataset_cfg.get('SOURCE_SAMPLE', False) == True:
sampled_waymo_infos = []
for k in range(0, len(self.infos), self.dataset_cfg.SOURCE_SAMPLED_INTERVAL):
sampled_waymo_infos.append(self.infos[k])
self.infos = sampled_waymo_infos
self.logger.info('Total sampled samples for Waymo dataset: %d' % len(self.infos))
elif self.dataset_cfg.SAMPLED_INTERVAL[mode] > 1 and sample_info_path == None:
sampled_waymo_infos = []
for k in range(0, len(self.infos), self.dataset_cfg.SAMPLED_INTERVAL[mode]):
sampled_waymo_infos.append(self.infos[k])
self.infos = sampled_waymo_infos
self.logger.info('Total sampled samples for Waymo dataset: %d' % len(self.infos))
def load_data_to_shared_memory(self):
self.logger.info(f'Loading training data to shared memory (file limit={self.shared_memory_file_limit})')
cur_rank, num_gpus = common_utils.get_dist_info()
all_infos = self.infos[:self.shared_memory_file_limit] \
if self.shared_memory_file_limit < len(self.infos) else self.infos
cur_infos = all_infos[cur_rank::num_gpus]
for info in cur_infos:
pc_info = info['point_cloud']
sequence_name = pc_info['lidar_sequence']
sample_idx = pc_info['sample_idx']
sa_key = f'{sequence_name}___{sample_idx}'
if os.path.exists(f"/dev/shm/{sa_key}"):
continue
points = self.get_lidar(sequence_name, sample_idx)
common_utils.sa_create(f"shm://{sa_key}", points)
dist.barrier()
self.logger.info('Training data has been saved to shared memory')
def clean_shared_memory(self):
self.logger.info(f'Clean training data from shared memory (file limit={self.shared_memory_file_limit})')
cur_rank, num_gpus = common_utils.get_dist_info()
all_infos = self.infos[:self.shared_memory_file_limit] \
if self.shared_memory_file_limit < len(self.infos) else self.infos
cur_infos = all_infos[cur_rank::num_gpus]
for info in cur_infos:
pc_info = info['point_cloud']
sequence_name = pc_info['lidar_sequence']
sample_idx = pc_info['sample_idx']
sa_key = f'{sequence_name}___{sample_idx}'
if not os.path.exists(f"/dev/shm/{sa_key}"):
continue
SharedArray.delete(f"shm://{sa_key}")
if num_gpus > 1:
dist.barrier()
self.logger.info('Training data has been deleted from shared memory')
# @staticmethod
# def check_sequence_name_with_all_version(sequence_file):
# if not sequence_file.exists():
# found_sequence_file = sequence_file
# for pre_text in ['training', 'validation', 'testing']:
# if not sequence_file.exists():
# temp_sequence_file = Path(str(sequence_file).replace('segment', pre_text + '_segment'))
# if temp_sequence_file.exists():
# found_sequence_file = temp_sequence_file
# break
# if not found_sequence_file.exists():
# found_sequence_file = Path(str(sequence_file).replace('_with_camera_labels', ''))
# if found_sequence_file.exists():
# sequence_file = found_sequence_file
# return sequence_file
def check_sequence_name_with_all_version(self, sequence_file):
if self.oss_path is None:
if not sequence_file.exists():
found_sequence_file = sequence_file
for pre_text in ['training', 'validation', 'testing']:
if not sequence_file.exists():
temp_sequence_file = Path(str(sequence_file).replace('segment', pre_text + '_segment'))
if temp_sequence_file.exists():
found_sequence_file = temp_sequence_file
break
if not found_sequence_file.exists():
found_sequence_file = Path(str(sequence_file).replace('_with_camera_labels', ''))
if found_sequence_file.exists():
sequence_file = found_sequence_file
else:
if not self.oss_exist(sequence_file):
found_sequence_file = sequence_file
for pre_text in ['training', 'validation', 'testing']:
if not self.oss_exist(sequence_file):
#temp_sequence_file = Path(str(sequence_file).replace('segment', pre_text + '_segment'))
temp_sequence_file = sequence_file.replace('segment', pre_text + '_segment')
if self.oss_exist(temp_sequence_file):
found_sequence_file = temp_sequence_file
break
if not self.oss_exist(found_sequence_file):
#found_sequence_file = Path(str(sequence_file).replace('_with_camera_labels', ''))
found_sequence_file = sequence_file.replace('_with_camera_labels', '')
if self.oss_exist(found_sequence_file):
sequence_file = found_sequence_file
return sequence_file
# def check_sequence_name_with_all_version(self, sequence_file):
# if self.oss_path is not None:
# if '_with_camera_labels' not in sequence_file and not self.oss_exist(sequence_file):
# sequence_file = sequence_file[:-9] + '_with_camera_labels.tfrecord'
# if '_with_camera_labels' in sequence_file and not self.oss_exist(sequence_file):
# sequence_file = sequence_file.replace('_with_camera_labels', '')
# else:
# if '_with_camera_labels' not in str(sequence_file) and not os.path.exists(sequence_file):
# sequence_file = Path(str(sequence_file[:-9]) + '_with_camera_labels.tfrecord')
# if '_with_camera_labels' in str(sequence_file) and not os.path.exists(sequence_file):
# sequence_file = Path(str(sequence_file).replace('_with_camera_labels', ''))
# return sequence_file
"""
For loading files from OSS
"""
def list_oss_dir(self, oss_path, with_info=False):
s3_dir = self.fix_path(oss_path)
files_iter = self.client.get_file_iterator(s3_dir)
if with_info:
file_list = {p: k for p, k in files_iter}
else:
file_list = [p for p, k in files_iter]
return file_list
@staticmethod
def fix_path(path_str):
try:
st_ = str(path_str)
if "s3://" in st_:
return st_
if "s3:/" in st_:
st_ = "s3://" + st_.strip('s3:/')
return st_
else:
st_ = "s3://" + st_
return st_
except:
raise TypeError
def oss_exist(self, file_path, refresh=False):
if self.data_path is None:
raise IndexError("No initialized path set!")
if refresh:
self.oss_data_list = self.list_oss_dir(self.data_path, with_info=False)
pure_name = self.fix_path(file_path).strip("s3://")
if pure_name in self.oss_data_list:
return True
else:
return False
def get_infos(self, raw_data_path, save_path, num_workers=multiprocessing.cpu_count(), has_label=True, sampled_interval=1):
from functools import partial
from . import waymo_utils
print('---------------The waymo sample interval is %d, total sequecnes is %d-----------------'
% (sampled_interval, len(self.sample_sequence_list)))
process_single_sequence = partial(
waymo_utils.process_single_sequence,
save_path=save_path, sampled_interval=sampled_interval, has_label=has_label
)
sample_sequence_file_list = [
self.check_sequence_name_with_all_version(raw_data_path / sequence_file)
for sequence_file in self.sample_sequence_list
]
with multiprocessing.Pool(num_workers) as p:
sequence_infos = list(tqdm(p.imap(process_single_sequence, sample_sequence_file_list),
total=len(sample_sequence_file_list)))
all_sequences_infos = [item for infos in sequence_infos for item in infos]
return all_sequences_infos
def get_lidar(self, sequence_name, sample_idx):
if self.oss_path is None:
#lidar_file = self.data_path / sequence_name / ('%04d.npy' % sample_idx) #
lidar_file = os.path.join(self.data_path, sequence_name, ('%04d.npy' % sample_idx))
point_features = np.load(lidar_file) # (N, 7): [x, y, z, intensity, elongation, NLZ_flag]
else:
lidar_file = os.path.join(self.data_path, sequence_name, ('%04d.npy' % sample_idx))
#npy_bytes = self.client.get(lidar_file)
npy_bytes = self.client.get(lidar_file, update_cache=True)
point_features = np.load(io.BytesIO(npy_bytes))
points_all, NLZ_flag = point_features[:, 0:5], point_features[:, 5]
if not self.dataset_cfg.get('DISABLE_NLZ_FLAG_ON_POINTS', False):
points_all = points_all[NLZ_flag == -1]
points_all[:, 3] = np.tanh(points_all[:, 3])
return points_all
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.infos) * self.total_epochs
return len(self.infos)
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.infos)
info = copy.deepcopy(self.infos[index])
pc_info = info['point_cloud']
sequence_name = pc_info['lidar_sequence']
sample_idx = pc_info['sample_idx']
if self.use_shared_memory and index < self.shared_memory_file_limit:
sa_key = f'{sequence_name}___{sample_idx}'
points = SharedArray.attach(f"shm://{sa_key}").copy()
else:
points = self.get_lidar(sequence_name, sample_idx)
lidar_z = points[:, 2]
input_dict = {
'db_flag': "waymo",
'points': points,
'frame_id': info['frame_id'],
}
if 'annos' in info:
annos = info['annos']
annos = common_utils.drop_info_with_name(annos, name='unknown')
if self.dataset_cfg.get('INFO_WITH_FAKELIDAR', False):
gt_boxes_lidar = box_utils.boxes3d_kitti_fakelidar_to_lidar(annos['gt_boxes_lidar'])
else:
gt_boxes_lidar = annos['gt_boxes_lidar']
lidar_z = gt_boxes_lidar[:, 2]
if self.training and self.dataset_cfg.get('FILTER_EMPTY_BOXES_FOR_TRAIN', False):
mask = (annos['num_points_in_gt'] > 0) # filter empty boxes
annos['name'] = annos['name'][mask]
gt_boxes_lidar = gt_boxes_lidar[mask]
annos['num_points_in_gt'] = annos['num_points_in_gt'][mask]
input_dict.update({
'gt_names': annos['name'],
'gt_boxes': gt_boxes_lidar,
'num_points_in_gt': annos.get('num_points_in_gt', None)
})
if self.dataset_cfg.get('USE_PSEUDO_LABEL', None) and self.training:
input_dict['gt_boxes'] = None
if self.dataset_cfg.get('FOV_POINTS_ONLY', None):
input_dict['points'] = self.extract_fov_data(
input_dict['points'], self.dataset_cfg.FOV_DEGREE, self.dataset_cfg.FOV_ANGLE
)
if input_dict['gt_boxes'] is not None:
fov_gt_flag = self.extract_fov_gt(
input_dict['gt_boxes'], self.dataset_cfg.FOV_DEGREE, self.dataset_cfg.FOV_ANGLE
)
input_dict.update({
'gt_names': input_dict['gt_names'][fov_gt_flag],
'gt_boxes': input_dict['gt_boxes'][fov_gt_flag],
'num_points_in_gt': input_dict['num_points_in_gt'][fov_gt_flag] if input_dict['num_points_in_gt'] is not None else None
})
# load saved pseudo label for unlabeled data
if self.dataset_cfg.get('USE_PSEUDO_LABEL', None) and self.training:
self.fill_pseudo_labels(input_dict)
data_dict = self.prepare_data(data_dict=input_dict)
data_dict['metadata'] = info.get('metadata', info['frame_id'])
data_dict.pop('num_points_in_gt', None)
return data_dict
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
"""
Args:
batch_dict:
frame_id:
pred_dicts: list of pred_dicts
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path:
Returns:
"""
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'score': np.zeros(num_samples),
'boxes_lidar': np.zeros([num_samples, 7])
}
return ret_dict
def generate_single_sample_dict(box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['score'] = pred_scores
pred_dict['boxes_lidar'] = pred_boxes
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
single_pred_dict = generate_single_sample_dict(box_dict)
single_pred_dict['frame_id'] = batch_dict['frame_id'][index]
single_pred_dict['metadata'] = batch_dict['metadata'][index]
annos.append(single_pred_dict)
return annos
def evaluation(self, det_annos, class_names, **kwargs):
if 'annos' not in self.infos[0].keys():
return 'No ground-truth boxes for evaluation', {}
def kitti_eval(eval_det_annos, eval_gt_annos):
from ..kitti.kitti_object_eval_python import eval as kitti_eval
from ..kitti import kitti_utils
map_name_to_kitti = {
'Vehicle': 'Car',
'Pedestrian': 'Pedestrian',
'Cyclist': 'Cyclist',
'Sign': 'Sign',
'Car': 'Car'
}
kitti_utils.transform_annotations_to_kitti_format(eval_det_annos, map_name_to_kitti=map_name_to_kitti)
kitti_utils.transform_annotations_to_kitti_format(
eval_gt_annos, map_name_to_kitti=map_name_to_kitti,
info_with_fakelidar=self.dataset_cfg.get('INFO_WITH_FAKELIDAR', False)
)
kitti_class_names = [map_name_to_kitti[x] for x in class_names]
ap_result_str, ap_dict = kitti_eval.get_official_eval_result(
gt_annos=eval_gt_annos, dt_annos=eval_det_annos, current_classes=kitti_class_names
)
return ap_result_str, ap_dict
def waymo_eval(eval_det_annos, eval_gt_annos):
from .waymo_eval import OpenPCDetWaymoDetectionMetricsEstimator
eval = OpenPCDetWaymoDetectionMetricsEstimator()
ap_dict = eval.waymo_evaluation(
eval_det_annos, eval_gt_annos, class_name=class_names,
distance_thresh=1000, fake_gt_infos=self.dataset_cfg.get('INFO_WITH_FAKELIDAR', False)
)
ap_result_str = '\n'
for key in ap_dict:
ap_dict[key] = ap_dict[key][0]
ap_result_str += '%s: %.4f \n' % (key, ap_dict[key])
return ap_result_str, ap_dict
eval_det_annos = copy.deepcopy(det_annos)
eval_gt_annos = [copy.deepcopy(info['annos']) for info in self.infos]
if kwargs['eval_metric'] == 'kitti':
ap_result_str, ap_dict = kitti_eval(eval_det_annos, eval_gt_annos)
elif kwargs['eval_metric'] == 'waymo':
ap_result_str, ap_dict = waymo_eval(eval_det_annos, eval_gt_annos)
else:
raise NotImplementedError
return ap_result_str, ap_dict
def create_groundtruth_database(self, info_path, save_path, used_classes=None, split='train', sampled_interval=10,
processed_data_tag=None):
database_save_path = save_path / ('%s_gt_database_%s_sampled_%d' % (processed_data_tag, split, sampled_interval))
db_info_save_path = save_path / ('%s_waymo_dbinfos_%s_sampled_%d.pkl' % (processed_data_tag, split, sampled_interval))
db_data_save_path = save_path / ('%s_gt_database_%s_sampled_%d_global.npy' % (processed_data_tag, split, sampled_interval))
database_save_path.mkdir(parents=True, exist_ok=True)
all_db_infos = {}
with open(info_path, 'rb') as f:
infos = pickle.load(f)
point_offset_cnt = 0
stacked_gt_points = []
for k in range(0, len(infos), sampled_interval):
print('gt_database sample: %d/%d' % (k + 1, len(infos)))
info = infos[k]
pc_info = info['point_cloud']
sequence_name = pc_info['lidar_sequence']
sample_idx = pc_info['sample_idx']
points = self.get_lidar(sequence_name, sample_idx)
annos = info['annos']
names = annos['name']
difficulty = annos['difficulty']
gt_boxes = annos['gt_boxes_lidar']
if k % 4 != 0 and len(names) > 0:
mask = (names == 'Vehicle')
names = names[~mask]
difficulty = difficulty[~mask]
gt_boxes = gt_boxes[~mask]
if k % 2 != 0 and len(names) > 0:
mask = (names == 'Pedestrian')
names = names[~mask]
difficulty = difficulty[~mask]
gt_boxes = gt_boxes[~mask]
num_obj = gt_boxes.shape[0]
if num_obj == 0:
continue
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
torch.from_numpy(points[:, 0:3]).unsqueeze(dim=0).float().cuda(),
torch.from_numpy(gt_boxes[:, 0:7]).unsqueeze(dim=0).float().cuda()
).long().squeeze(dim=0).cpu().numpy()
for i in range(num_obj):
filename = '%s_%04d_%s_%d.bin' % (sequence_name, sample_idx, names[i], i)
filepath = database_save_path / filename
gt_points = points[box_idxs_of_pts == i]
gt_points[:, :3] -= gt_boxes[i, :3]
if (used_classes is None) or names[i] in used_classes:
with open(filepath, 'w') as f:
gt_points.tofile(f)
db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin
db_info = {'name': names[i], 'path': db_path, 'sequence_name': sequence_name,
'sample_idx': sample_idx, 'gt_idx': i, 'box3d_lidar': gt_boxes[i],
'num_points_in_gt': gt_points.shape[0], 'difficulty': difficulty[i]}
# it will be used if you choose to use shared memory for gt sampling
stacked_gt_points.append(gt_points)
db_info['global_data_offset'] = [point_offset_cnt, point_offset_cnt + gt_points.shape[0]]
point_offset_cnt += gt_points.shape[0]
if names[i] in all_db_infos:
all_db_infos[names[i]].append(db_info)
else:
all_db_infos[names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
# it will be used if you choose to use shared memory for gt sampling
stacked_gt_points = np.concatenate(stacked_gt_points, axis=0)
np.save(db_data_save_path, stacked_gt_points)
def create_waymo_infos(dataset_cfg, class_names, data_path, save_path,
raw_data_tag='raw_data', processed_data_tag='waymo_processed_data',
workers=min(16, multiprocessing.cpu_count())):
dataset = WaymoDataset(
dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path,
training=False, logger=common_utils.create_logger()
)
train_split, val_split = 'train', 'val'
train_filename = save_path / ('%s_infos_%s.pkl' % (processed_data_tag, train_split))
val_filename = save_path / ('%s_infos_%s.pkl' % (processed_data_tag, val_split))
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
print('---------------Start to generate data infos---------------')
dataset.set_split(train_split)
waymo_infos_train = dataset.get_infos(
raw_data_path=data_path / raw_data_tag,
save_path=save_path / processed_data_tag, num_workers=workers, has_label=True,
sampled_interval=1
)
with open(train_filename, 'wb') as f:
pickle.dump(waymo_infos_train, f)
print('----------------Waymo info train file is saved to %s----------------' % train_filename)
dataset.set_split(val_split)
waymo_infos_val = dataset.get_infos(
raw_data_path=data_path / raw_data_tag,
save_path=save_path / processed_data_tag, num_workers=workers, has_label=True,
sampled_interval=1
)
with open(val_filename, 'wb') as f:
pickle.dump(waymo_infos_val, f)
print('----------------Waymo info val file is saved to %s----------------' % val_filename)
print('---------------Start create groundtruth database for data augmentation---------------')
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
dataset.set_split(train_split)
dataset.create_groundtruth_database(
info_path=train_filename, save_path=save_path, split='train', sampled_interval=1,
used_classes=['Vehicle', 'Pedestrian', 'Cyclist'], processed_data_tag=processed_data_tag
)
print('---------------Data preparation Done---------------')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config of dataset')
parser.add_argument('--func', type=str, default='create_waymo_infos', help='')
parser.add_argument('--processed_data_tag', type=str, default='waymo_processed_data_v0_5_0', help='')
args = parser.parse_args()
if args.func == 'create_waymo_infos':
import yaml
from easydict import EasyDict
try:
yaml_config = yaml.safe_load(open(args.cfg_file), Loader=yaml.FullLoader)
except:
yaml_config = yaml.safe_load(open(args.cfg_file))
dataset_cfg = EasyDict(yaml_config)
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
dataset_cfg.PROCESSED_DATA_TAG = args.processed_data_tag
create_waymo_infos(
dataset_cfg=dataset_cfg,
class_names=['Vehicle', 'Pedestrian', 'Cyclist'],
data_path=ROOT_DIR / 'data' / 'waymo',
save_path=ROOT_DIR / 'data' / 'waymo',
raw_data_tag='raw_data',
processed_data_tag=args.processed_data_tag
)
| 28,867
| 44.247649
| 139
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/waymo/waymo_eval.py
|
# OpenPCDet PyTorch Dataloader and Evaluation Tools for Waymo Open Dataset
# Reference https://github.com/open-mmlab/OpenPCDet
# Written by Shaoshuai Shi, Chaoxu Guo
# All Rights Reserved 2019-2020.
import numpy as np
import pickle
import tensorflow as tf
from google.protobuf import text_format
from waymo_open_dataset.metrics.python import detection_metrics
from waymo_open_dataset.protos import metrics_pb2
import argparse
tf.get_logger().setLevel('INFO')
def limit_period(val, offset=0.5, period=np.pi):
return val - np.floor(val / period + offset) * period
class OpenPCDetWaymoDetectionMetricsEstimator(tf.test.TestCase):
WAYMO_CLASSES = ['unknown', 'Vehicle', 'Pedestrian', 'Truck', 'Cyclist']
def generate_waymo_type_results(self, infos, class_names, is_gt=False, fake_gt_infos=True):
def boxes3d_kitti_fakelidar_to_lidar(boxes3d_lidar):
"""
Args:
boxes3d_fakelidar: (N, 7) [x, y, z, w, l, h, r] in old LiDAR coordinates, z is bottom center
Returns:
boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
"""
w, l, h, r = boxes3d_lidar[:, 3:4], boxes3d_lidar[:, 4:5], boxes3d_lidar[:, 5:6], boxes3d_lidar[:, 6:7]
boxes3d_lidar[:, 2] += h[:, 0] / 2
return np.concatenate([boxes3d_lidar[:, 0:3], l, w, h, -(r + np.pi / 2)], axis=-1)
frame_id, boxes3d, obj_type, score, overlap_nlz, difficulty = [], [], [], [], [], []
for frame_index, info in enumerate(infos):
if is_gt:
box_mask = np.array([n in class_names for n in info['name']], dtype=np.bool_)
if 'num_points_in_gt' in info:
zero_difficulty_mask = info['difficulty'] == 0
info['difficulty'][(info['num_points_in_gt'] > 5) & zero_difficulty_mask] = 1
info['difficulty'][(info['num_points_in_gt'] <= 5) & zero_difficulty_mask] = 2
nonzero_mask = info['num_points_in_gt'] > 0
box_mask = box_mask & nonzero_mask
else:
print('Please provide the num_points_in_gt for evaluating on Waymo Dataset '
'(If you create Waymo Infos before 20201126, please re-create the validation infos '
'with version 1.2 Waymo dataset to get this attribute). SSS of OpenPCDet')
raise NotImplementedError
num_boxes = box_mask.sum()
box_name = info['name'][box_mask]
difficulty.append(info['difficulty'][box_mask])
score.append(np.ones(num_boxes))
if fake_gt_infos:
info['gt_boxes_lidar'] = boxes3d_kitti_fakelidar_to_lidar(info['gt_boxes_lidar'])
boxes3d.append(info['gt_boxes_lidar'][box_mask])
else:
num_boxes = len(info['boxes_lidar'])
difficulty.append([0] * num_boxes)
score.append(info['score'])
boxes3d.append(np.array(info['boxes_lidar']))
box_name = info['name']
obj_type += [self.WAYMO_CLASSES.index(name) for i, name in enumerate(box_name)]
frame_id.append(np.array([frame_index] * num_boxes))
overlap_nlz.append(np.zeros(num_boxes)) # set zero currently
frame_id = np.concatenate(frame_id).reshape(-1).astype(np.int64)
boxes3d = np.concatenate(boxes3d, axis=0)
obj_type = np.array(obj_type).reshape(-1)
score = np.concatenate(score).reshape(-1)
overlap_nlz = np.concatenate(overlap_nlz).reshape(-1)
difficulty = np.concatenate(difficulty).reshape(-1).astype(np.int8)
boxes3d[:, -1] = limit_period(boxes3d[:, -1], offset=0.5, period=np.pi * 2)
return frame_id, boxes3d, obj_type, score, overlap_nlz, difficulty
def build_config(self):
config = metrics_pb2.Config()
config_text = """
breakdown_generator_ids: OBJECT_TYPE
difficulties {
levels:1
levels:2
}
matcher_type: TYPE_HUNGARIAN
iou_thresholds: 0.0
iou_thresholds: 0.7
iou_thresholds: 0.5
iou_thresholds: 0.5
iou_thresholds: 0.5
box_type: TYPE_3D
"""
for x in range(0, 100):
config.score_cutoffs.append(x * 0.01)
config.score_cutoffs.append(1.0)
text_format.Merge(config_text, config)
return config
def build_graph(self, graph):
with graph.as_default():
self._pd_frame_id = tf.compat.v1.placeholder(dtype=tf.int64)
self._pd_bbox = tf.compat.v1.placeholder(dtype=tf.float32)
self._pd_type = tf.compat.v1.placeholder(dtype=tf.uint8)
self._pd_score = tf.compat.v1.placeholder(dtype=tf.float32)
self._pd_overlap_nlz = tf.compat.v1.placeholder(dtype=tf.bool)
self._gt_frame_id = tf.compat.v1.placeholder(dtype=tf.int64)
self._gt_bbox = tf.compat.v1.placeholder(dtype=tf.float32)
self._gt_type = tf.compat.v1.placeholder(dtype=tf.uint8)
self._gt_difficulty = tf.compat.v1.placeholder(dtype=tf.uint8)
metrics = detection_metrics.get_detection_metric_ops(
config=self.build_config(),
prediction_frame_id=self._pd_frame_id,
prediction_bbox=self._pd_bbox,
prediction_type=self._pd_type,
prediction_score=self._pd_score,
prediction_overlap_nlz=self._pd_overlap_nlz,
ground_truth_bbox=self._gt_bbox,
ground_truth_type=self._gt_type,
ground_truth_frame_id=self._gt_frame_id,
ground_truth_difficulty=self._gt_difficulty,
)
return metrics
def run_eval_ops(
self,
sess,
graph,
metrics,
prediction_frame_id,
prediction_bbox,
prediction_type,
prediction_score,
prediction_overlap_nlz,
ground_truth_frame_id,
ground_truth_bbox,
ground_truth_type,
ground_truth_difficulty,
):
sess.run(
[tf.group([value[1] for value in metrics.values()])],
feed_dict={
self._pd_bbox: prediction_bbox,
self._pd_frame_id: prediction_frame_id,
self._pd_type: prediction_type,
self._pd_score: prediction_score,
self._pd_overlap_nlz: prediction_overlap_nlz,
self._gt_bbox: ground_truth_bbox,
self._gt_type: ground_truth_type,
self._gt_frame_id: ground_truth_frame_id,
self._gt_difficulty: ground_truth_difficulty,
},
)
def eval_value_ops(self, sess, graph, metrics):
return {item[0]: sess.run([item[1][0]]) for item in metrics.items()}
def mask_by_distance(self, distance_thresh, boxes_3d, *args):
mask = np.linalg.norm(boxes_3d[:, 0:2], axis=1) < distance_thresh + 0.5
boxes_3d = boxes_3d[mask]
ret_ans = [boxes_3d]
for arg in args:
ret_ans.append(arg[mask])
return tuple(ret_ans)
def waymo_evaluation(self, prediction_infos, gt_infos, class_name, distance_thresh=100, fake_gt_infos=True):
print('Start the waymo evaluation...')
assert len(prediction_infos) == len(gt_infos), '%d vs %d' % (prediction_infos.__len__(), gt_infos.__len__())
tf.compat.v1.disable_eager_execution()
pd_frameid, pd_boxes3d, pd_type, pd_score, pd_overlap_nlz, _ = self.generate_waymo_type_results(
prediction_infos, class_name, is_gt=False
)
gt_frameid, gt_boxes3d, gt_type, gt_score, gt_overlap_nlz, gt_difficulty = self.generate_waymo_type_results(
gt_infos, class_name, is_gt=True, fake_gt_infos=fake_gt_infos
)
pd_boxes3d, pd_frameid, pd_type, pd_score, pd_overlap_nlz = self.mask_by_distance(
distance_thresh, pd_boxes3d, pd_frameid, pd_type, pd_score, pd_overlap_nlz
)
gt_boxes3d, gt_frameid, gt_type, gt_score, gt_difficulty = self.mask_by_distance(
distance_thresh, gt_boxes3d, gt_frameid, gt_type, gt_score, gt_difficulty
)
print('Number: (pd, %d) VS. (gt, %d)' % (len(pd_boxes3d), len(gt_boxes3d)))
print('Level 1: %d, Level2: %d)' % ((gt_difficulty == 1).sum(), (gt_difficulty == 2).sum()))
if pd_score.max() > 1:
# assert pd_score.max() <= 1.0, 'Waymo evaluation only supports normalized scores'
pd_score = 1 / (1 + np.exp(-pd_score))
print('Warning: Waymo evaluation only supports normalized scores')
graph = tf.Graph()
metrics = self.build_graph(graph)
with self.test_session(graph=graph) as sess:
sess.run(tf.compat.v1.initializers.local_variables())
self.run_eval_ops(
sess, graph, metrics, pd_frameid, pd_boxes3d, pd_type, pd_score, pd_overlap_nlz,
gt_frameid, gt_boxes3d, gt_type, gt_difficulty,
)
with tf.compat.v1.variable_scope('detection_metrics', reuse=True):
aps = self.eval_value_ops(sess, graph, metrics)
return aps
def main():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--pred_infos', type=str, default=None, help='pickle file')
parser.add_argument('--gt_infos', type=str, default=None, help='pickle file')
parser.add_argument('--class_names', type=str, nargs='+', default=['Vehicle', 'Pedestrian', 'Cyclist'], help='')
parser.add_argument('--sampled_interval', type=int, default=5, help='sampled interval for GT sequences')
args = parser.parse_args()
pred_infos = pickle.load(open(args.pred_infos, 'rb'))
gt_infos = pickle.load(open(args.gt_infos, 'rb'))
print('Start to evaluate the waymo format results...')
eval = OpenPCDetWaymoDetectionMetricsEstimator()
gt_infos_dst = []
for idx in range(0, len(gt_infos), args.sampled_interval):
cur_info = gt_infos[idx]['annos']
cur_info['frame_id'] = gt_infos[idx]['frame_id']
gt_infos_dst.append(cur_info)
waymo_AP = eval.waymo_evaluation(
pred_infos, gt_infos_dst, class_name=args.class_names, distance_thresh=1000, fake_gt_infos=False
)
print(waymo_AP)
if __name__ == '__main__':
main()
| 10,489
| 41.469636
| 116
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/once/once_dataset.py
|
import copy
import pickle
import os
import numpy as np
from PIL import Image
import torch
import torch.nn.functional as F
from pathlib import Path
from ..dataset import DatasetTemplate
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import box_utils
from .once_toolkits import Octopus
# Since we use petrel OSS
import io
class ONCEDataset(DatasetTemplate):
"""Petrel Ceph storage backend.
3DTrans supports the reading and writing data from Ceph
Usage:
self.oss_path = 's3://path/of/ONCE'
'~/.petreloss.conf': A config file of Ceph, saving the KEY/ACCESS_KEY of S3 Ceph
"""
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.split = dataset_cfg.DATA_SPLIT['train'] if training else dataset_cfg.DATA_SPLIT['test']
assert self.split in ['train', 'val', 'test', 'raw_small', 'raw_medium', 'raw_large']
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_seq_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None
self.cam_names = ['cam01', 'cam03', 'cam05', 'cam06', 'cam07', 'cam08', 'cam09']
self.cam_tags = ['top', 'top2', 'left_back', 'left_front', 'right_front', 'right_back', 'back']
if self.oss_path is None:
self.toolkits = Octopus(self.root_path)
else:
from petrel_client.client import Client
self.client = Client('~/.petreloss.conf')
self.toolkits = Octopus(self.root_path, self.oss_path, self.client)
self.once_infos = []
self.include_once_data(self.split)
def include_once_data(self, split):
if self.logger is not None:
self.logger.info('Loading ONCE dataset')
once_infos = []
for info_path in self.dataset_cfg.INFO_PATH[split]:
if self.oss_path is None:
info_path = self.root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
once_infos.extend(infos)
else:
info_path = os.path.join(self.oss_path, info_path)
pkl_bytes = self.client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
once_infos.extend(infos)
def check_annos(info):
return 'annos' in info
if self.split.split('_')[0] != 'raw':
once_infos = list(filter(check_annos,once_infos))
self.once_infos.extend(once_infos)
if self.logger is not None:
self.logger.info('Total samples for ONCE dataset: %d' % (len(once_infos)))
def set_split(self, split):
super().__init__(
dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training, root_path=self.root_path, logger=self.logger
)
self.split = split
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_seq_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None
def get_lidar(self, sequence_id, frame_id):
return self.toolkits.load_point_cloud(sequence_id, frame_id)
def get_image(self, sequence_id, frame_id, cam_name):
return self.toolkits.load_image(sequence_id, frame_id, cam_name)
def project_lidar_to_image(self, sequence_id, frame_id):
return self.toolkits.project_lidar_to_image(sequence_id, frame_id)
def point_painting(self, points, info):
semseg_dir = './' # add your own seg directory
used_classes = [0,1,2,3,4,5]
num_classes = len(used_classes)
frame_id = str(info['frame_id'])
seq_id = str(info['sequence_id'])
painted = np.zeros((points.shape[0], num_classes)) # classes + bg
for cam_name in self.cam_names:
img_path = Path(semseg_dir) / Path(seq_id) / Path(cam_name) / Path(frame_id+'_label.png')
calib_info = info['calib'][cam_name]
cam_2_velo = calib_info['cam_to_velo']
cam_intri = np.hstack([calib_info['cam_intrinsic'], np.zeros((3, 1), dtype=np.float32)])
point_xyz = points[:, :3]
points_homo = np.hstack(
[point_xyz, np.ones(point_xyz.shape[0], dtype=np.float32).reshape((-1, 1))])
points_lidar = np.dot(points_homo, np.linalg.inv(cam_2_velo).T)
mask = points_lidar[:, 2] > 0
points_lidar = points_lidar[mask]
points_img = np.dot(points_lidar, cam_intri.T)
points_img = points_img / points_img[:, [2]]
uv = points_img[:, [0,1]]
#depth = points_img[:, [2]]
seg_map = np.array(Image.open(img_path)) # (H, W)
H, W = seg_map.shape
seg_feats = np.zeros((H*W, num_classes))
seg_map = seg_map.reshape(-1)
for cls_i in used_classes:
seg_feats[seg_map==cls_i, cls_i] = 1
seg_feats = seg_feats.reshape(H, W, num_classes).transpose(2, 0, 1)
uv[:, 0] = (uv[:, 0] - W / 2) / (W / 2)
uv[:, 1] = (uv[:, 1] - H / 2) / (H / 2)
uv_tensor = torch.from_numpy(uv).unsqueeze(0).unsqueeze(0) # [1,1,N,2]
seg_feats = torch.from_numpy(seg_feats).unsqueeze(0) # [1,C,H,W]
proj_scores = F.grid_sample(seg_feats, uv_tensor, mode='bilinear', padding_mode='zeros') # [1, C, 1, N]
proj_scores = proj_scores.squeeze(0).squeeze(1).transpose(0, 1).contiguous() # [N, C]
painted[mask] = proj_scores.numpy()
return np.concatenate([points, painted], axis=1)
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.once_infos) * self.total_epochs
return len(self.once_infos)
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.once_infos)
info = copy.deepcopy(self.once_infos[index])
frame_id = info['frame_id']
seq_id = info['sequence_id']
points = self.get_lidar(seq_id, frame_id)
if self.dataset_cfg.get('POINT_PAINTING', False):
points = self.point_painting(points, info)
input_dict = {
'db_flag': "once",
'points': points,
'frame_id': frame_id,
}
if 'annos' in info:
annos = info['annos']
input_dict.update({
'gt_names': annos['name'],
'gt_boxes': annos['boxes_3d'],
'num_points_in_gt': annos.get('num_points_in_gt', None)
})
data_dict = self.prepare_data(data_dict=input_dict)
data_dict.pop('num_points_in_gt', None)
return data_dict
def get_infos(self, num_workers=4, sample_seq_list=None):
import concurrent.futures as futures
import json
root_path = self.root_path
cam_names = self.cam_names
"""
# dataset json format
{
'meta_info':
'calib': {
'cam01': {
'cam_to_velo': list
'cam_intrinsic': list
'distortion': list
}
...
}
'frames': [
{
'frame_id': timestamp,
'annos': {
'names': list
'boxes_3d': list of list
'boxes_2d': {
'cam01': list of list
...
}
}
'pose': list
},
...
]
}
# open pcdet format
{
'meta_info':
'sequence_id': seq_idx
'frame_id': timestamp
'timestamp': timestamp
'lidar': path
'cam01': path
...
'calib': {
'cam01': {
'cam_to_velo': np.array
'cam_intrinsic': np.array
'distortion': np.array
}
...
}
'pose': np.array
'annos': {
'name': np.array
'boxes_3d': np.array
'boxes_2d': {
'cam01': np.array
....
}
}
}
"""
def process_single_sequence(seq_idx):
print('%s seq_idx: %s' % (self.split, seq_idx))
seq_infos = []
seq_path = Path(root_path) / 'data' / seq_idx
json_path = seq_path / ('%s.json' % seq_idx)
with open(json_path, 'r') as f:
info_this_seq = json.load(f)
meta_info = info_this_seq['meta_info']
calib = info_this_seq['calib']
for f_idx, frame in enumerate(info_this_seq['frames']):
frame_id = frame['frame_id']
if f_idx == 0:
prev_id = None
else:
prev_id = info_this_seq['frames'][f_idx-1]['frame_id']
if f_idx == len(info_this_seq['frames'])-1:
next_id = None
else:
next_id = info_this_seq['frames'][f_idx+1]['frame_id']
pc_path = str(seq_path / 'lidar_roof' / ('%s.bin' % frame_id))
pose = np.array(frame['pose'])
frame_dict = {
'sequence_id': seq_idx,
'frame_id': frame_id,
'timestamp': int(frame_id),
'prev_id': prev_id,
'next_id': next_id,
'meta_info': meta_info,
'lidar': pc_path,
'pose': pose
}
calib_dict = {}
for cam_name in cam_names:
cam_path = str(seq_path / cam_name / ('%s.jpg' % frame_id))
frame_dict.update({cam_name: cam_path})
calib_dict[cam_name] = {}
calib_dict[cam_name]['cam_to_velo'] = np.array(calib[cam_name]['cam_to_velo'])
calib_dict[cam_name]['cam_intrinsic'] = np.array(calib[cam_name]['cam_intrinsic'])
calib_dict[cam_name]['distortion'] = np.array(calib[cam_name]['distortion'])
frame_dict.update({'calib': calib_dict})
if 'annos' in frame:
annos = frame['annos']
boxes_3d = np.array(annos['boxes_3d'])
if boxes_3d.shape[0] == 0:
print(frame_id)
continue
boxes_2d_dict = {}
for cam_name in cam_names:
boxes_2d_dict[cam_name] = np.array(annos['boxes_2d'][cam_name])
annos_dict = {
'name': np.array(annos['names']),
'boxes_3d': boxes_3d,
'boxes_2d': boxes_2d_dict
}
points = self.get_lidar(seq_idx, frame_id)
corners_lidar = box_utils.boxes_to_corners_3d(np.array(annos['boxes_3d']))
num_gt = boxes_3d.shape[0]
num_points_in_gt = -np.ones(num_gt, dtype=np.int32)
for k in range(num_gt):
flag = box_utils.in_hull(points[:, 0:3], corners_lidar[k])
num_points_in_gt[k] = flag.sum()
annos_dict['num_points_in_gt'] = num_points_in_gt
frame_dict.update({'annos': annos_dict})
seq_infos.append(frame_dict)
return seq_infos
sample_seq_list = sample_seq_list if sample_seq_list is not None else self.sample_seq_list
with futures.ThreadPoolExecutor(num_workers) as executor:
infos = executor.map(process_single_sequence, sample_seq_list)
all_infos = []
for info in infos:
all_infos.extend(info)
return all_infos
def create_groundtruth_database(self, info_path=None, used_classes=None, split='train'):
import torch
database_save_path = Path(self.root_path) / ('gt_database' if split == 'train' else ('gt_database_%s' % split))
db_info_save_path = Path(self.root_path) / ('once_dbinfos_%s.pkl' % split)
database_save_path.mkdir(parents=True, exist_ok=True)
all_db_infos = {}
with open(info_path, 'rb') as f:
infos = pickle.load(f)
for k in range(len(infos)):
if 'annos' not in infos[k]:
continue
print('gt_database sample: %d' % (k + 1))
info = infos[k]
frame_id = info['frame_id']
seq_id = info['sequence_id']
points = self.get_lidar(seq_id, frame_id)
annos = info['annos']
names = annos['name']
gt_boxes = annos['boxes_3d']
num_obj = gt_boxes.shape[0]
point_indices = roiaware_pool3d_utils.points_in_boxes_cpu(
torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)
).numpy() # (nboxes, npoints)
for i in range(num_obj):
# TODO: use the pseudo-labeling, threshold > 0.9, to generate the BINs
filename = '%s_%s_%d.bin' % (frame_id, names[i], i)
filepath = database_save_path / filename
gt_points = points[point_indices[i] > 0]
gt_points[:, :3] -= gt_boxes[i, :3]
with open(filepath, 'w') as f:
gt_points.tofile(f)
db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin
db_info = {'name': names[i], 'path': db_path, 'gt_idx': i,
'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0]}
if names[i] in all_db_infos:
all_db_infos[names[i]].append(db_info)
else:
all_db_infos[names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'score': np.zeros(num_samples),
'boxes_3d': np.zeros((num_samples, 7))
}
return ret_dict
def generate_single_sample_dict(box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['score'] = pred_scores
pred_dict['boxes_3d'] = pred_boxes
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
frame_id = batch_dict['frame_id'][index]
single_pred_dict = generate_single_sample_dict(box_dict)
single_pred_dict['frame_id'] = frame_id
annos.append(single_pred_dict)
if output_path is not None:
raise NotImplementedError
return annos
def evaluation(self, det_annos, class_names, **kwargs):
from .once_eval.evaluation import get_evaluation_results
eval_det_annos = copy.deepcopy(det_annos)
eval_gt_annos = [copy.deepcopy(info['annos']) for info in self.once_infos]
ap_result_str, ap_dict = get_evaluation_results(eval_gt_annos, eval_det_annos, class_names)
return ap_result_str, ap_dict
def create_once_infos(dataset_cfg, class_names, data_path, save_path, workers=4):
dataset = ONCEDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False)
splits = ['train', 'val', 'test', 'raw_small', 'raw_medium', 'raw_large']
#ignore raw_small/raw_medium/raw_large
ignore = ['test', 'raw_small', 'raw_medium', 'raw_large']
print('---------------Start to generate data infos---------------')
for split in splits:
if split in ignore:
continue
filename = 'once_infos_%s.pkl' % split
filename = save_path / Path(filename)
dataset.set_split(split)
once_infos = dataset.get_infos(num_workers=workers)
with open(filename, 'wb') as f:
pickle.dump(once_infos, f)
print('ONCE info %s file is saved to %s' % (split, filename))
train_filename = save_path / 'once_infos_train.pkl'
print('---------------Start create groundtruth database for data augmentation---------------')
dataset.set_split('train')
dataset.create_groundtruth_database(train_filename, split='train')
print('---------------Data preparation Done---------------')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config of dataset')
parser.add_argument('--func', type=str, default='create_waymo_infos', help='')
parser.add_argument('--runs_on', type=str, default='server', help='')
args = parser.parse_args()
if args.func == 'create_once_infos':
import yaml
from pathlib import Path
from easydict import EasyDict
dataset_cfg = EasyDict(yaml.load(open(args.cfg_file)))
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
once_data_path = ROOT_DIR / 'data' / 'once'
once_save_path = ROOT_DIR / 'data' / 'once'
if args.runs_on == 'cloud':
once_data_path = Path('/cache/once/')
once_save_path = Path('/cache/once/')
dataset_cfg.DATA_PATH = dataset_cfg.CLOUD_DATA_PATH
create_once_infos(
dataset_cfg=dataset_cfg,
class_names=['Car', 'Bus', 'Truck', 'Pedestrian', 'Bicycle'],
data_path=once_data_path,
save_path=once_save_path
)
| 18,925
| 39.353945
| 140
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/once/once_semi_dataset.py
|
import copy
import pickle
import numpy as np
import os
from pathlib import Path
from ..semi_dataset import SemiDatasetTemplate
from .once_toolkits import Octopus
import io
def split_once_semi_data(dataset_cfg, info_paths, data_splits, root_path, labeled_ratio, logger):
oss_path = dataset_cfg.OSS_PATH if 'OSS_PATH' in dataset_cfg else None
if oss_path:
from petrel_client.client import Client
client = Client('~/.petreloss.conf')
once_pretrain_infos = []
once_test_infos = []
once_labeled_infos = []
once_unlabeled_infos = []
def check_annos(info):
return 'annos' in info
root_path = Path(root_path)
train_split = data_splits['train']
for info_path in info_paths[train_split]:
if oss_path is None:
info_path = root_path / info_path
with open(info_path, 'rb') as f:
infos = pickle.load(f)
infos = list(filter(check_annos, infos))
once_pretrain_infos.extend(copy.deepcopy(infos))
once_labeled_infos.extend(copy.deepcopy(infos))
else:
info_path = os.path.join(oss_path, info_path)
pkl_bytes = client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
infos = list(filter(check_annos, infos))
once_pretrain_infos.extend(copy.deepcopy(infos))
once_labeled_infos.extend(copy.deepcopy(infos))
test_split = data_splits['test']
for info_path in info_paths[test_split]:
if oss_path is None:
info_path = root_path / info_path
with open(info_path, 'rb') as f:
infos = pickle.load(f)
infos = list(filter(check_annos, infos))
once_test_infos.extend(copy.deepcopy(infos))
else:
info_path = os.path.join(oss_path, info_path)
pkl_bytes = client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
infos = list(filter(check_annos, infos))
once_test_infos.extend(copy.deepcopy(infos))
raw_split = data_splits['raw']
for info_path in info_paths[raw_split]:
if oss_path is None:
info_path = root_path / info_path
with open(info_path, 'rb') as f:
infos = pickle.load(f)
once_unlabeled_infos.extend(copy.deepcopy(infos))
else:
info_path = os.path.join(oss_path, info_path)
pkl_bytes = client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
once_unlabeled_infos.extend(copy.deepcopy(infos))
logger.info('Total samples for ONCE pre-training dataset: %d' % (len(once_pretrain_infos)))
logger.info('Total samples for ONCE testing dataset: %d' % (len(once_test_infos)))
logger.info('Total samples for ONCE labeled dataset: %d' % (len(once_labeled_infos)))
logger.info('Total samples for ONCE unlabeled dataset: %d' % (len(once_unlabeled_infos)))
return once_pretrain_infos, once_test_infos, once_labeled_infos, once_unlabeled_infos
class ONCESemiDataset(SemiDatasetTemplate):
"""Petrel Ceph storage backend.
3DTrans supports the reading and writing data from Ceph
Usage:
self.oss_path = 's3://path/of/ONCE'
'~/.petreloss.conf': A config file of Ceph, saving the KEY/ACCESS_KEY of S3 Ceph
"""
def __init__(self, dataset_cfg, class_names, infos=None, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.cam_names = ['cam01', 'cam03', 'cam05', 'cam06', 'cam07', 'cam08', 'cam09']
self.cam_tags = ['top', 'top2', 'left_back', 'left_front', 'right_front', 'right_back', 'back']
if self.oss_path is None:
self.toolkits = Octopus(self.root_path)
else:
from petrel_client.client import Client
self.client = Client('~/.petreloss.conf')
self.toolkits = Octopus(self.root_path, self.oss_path, self.client)
self.once_infos = infos
def get_lidar(self, sequence_id, frame_id):
return self.toolkits.load_point_cloud(sequence_id, frame_id)
def get_image(self, sequence_id, frame_id, cam_name):
return self.toolkits.load_image(sequence_id, frame_id, cam_name)
def project_lidar_to_image(self, sequence_id, frame_id):
return self.toolkits.project_lidar_to_image(sequence_id, frame_id)
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.once_infos) * self.total_epochs
return len(self.once_infos)
def __getitem__(self, index):
raise NotImplementedError
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'score': np.zeros(num_samples),
'boxes_3d': np.zeros((num_samples, 7))
}
return ret_dict
def generate_single_sample_dict(box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['score'] = pred_scores
pred_dict['boxes_3d'] = pred_boxes
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
frame_id = batch_dict['frame_id'][index]
single_pred_dict = generate_single_sample_dict(box_dict)
single_pred_dict['frame_id'] = frame_id
annos.append(single_pred_dict)
if output_path is not None:
raise NotImplementedError
return annos
def evaluation(self, det_annos, class_names, **kwargs):
from .once_eval.evaluation import get_evaluation_results
eval_det_annos = copy.deepcopy(det_annos)
eval_gt_annos = [copy.deepcopy(info['annos']) for info in self.once_infos]
ap_result_str, ap_dict = get_evaluation_results(eval_gt_annos, eval_det_annos, class_names)
"""
eval_det_annos = copy.deepcopy(eval_gt_annos)
for gt_anno in eval_det_annos:
gt_anno['score'] = np.random.uniform(low=0.1, high=1,size=gt_anno['name'].shape[0])
#gt_anno['score'] = np.ones(gt_anno['name'].shape[0])
gt_anno['boxes_3d'][:, 0] += 0#np.random.uniform(low=0.1, high=1,size=gt_anno['name'].shape[0]) * 0.001
gt_anno['boxes_3d'][:, 1] += 0#np.random.uniform(low=0.1, high=1,size=gt_anno['name'].shape[0]) * 0.001
ap_result_str, ap_dict = get_evaluation_results(eval_gt_annos, eval_det_annos, class_names)
"""
return ap_result_str, ap_dict
class ONCEPretrainDataset(ONCESemiDataset):
def __init__(self, dataset_cfg, class_names, infos=None, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
assert training is True
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, infos=infos, training=training, root_path=root_path, logger=logger
)
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.once_infos)
info = copy.deepcopy(self.once_infos[index])
frame_id = info['frame_id']
seq_id = info['sequence_id']
points = self.get_lidar(seq_id, frame_id)
input_dict = {
'points': points,
'frame_id': frame_id,
}
if 'annos' in info:
annos = info['annos']
input_dict.update({
'gt_names': annos['name'],
'gt_boxes': annos['boxes_3d'],
'num_points_in_gt': annos.get('num_points_in_gt', None)
})
data_dict = self.prepare_data(data_dict=input_dict)
data_dict.pop('num_points_in_gt', None)
return data_dict
class ONCELabeledDataset(ONCESemiDataset):
def __init__(self, dataset_cfg, class_names, infos=None, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
assert training is True
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, infos=infos, training=training, root_path=root_path, logger=logger
)
self.labeled_data_for = dataset_cfg.LABELED_DATA_FOR
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.once_infos)
info = copy.deepcopy(self.once_infos[index])
frame_id = info['frame_id']
seq_id = info['sequence_id']
points = self.get_lidar(seq_id, frame_id)
input_dict = {
'points': points,
'frame_id': frame_id,
}
assert 'annos' in info
annos = info['annos']
input_dict.update({
'gt_names': annos['name'],
'gt_boxes': annos['boxes_3d'],
'num_points_in_gt': annos.get('num_points_in_gt', None)
})
data_dict = self.prepare_data_ssl(input_dict, output_dicts=self.labeled_data_for)
if isinstance(data_dict, tuple):
teacher_dict, student_dict = data_dict[0], data_dict[1]
if teacher_dict is not None: teacher_dict.pop('num_points_in_gt', None)
if student_dict is not None: student_dict.pop('num_points_in_gt', None)
return tuple([teacher_dict, student_dict])
else:
return data_dict
# teacher_dict, student_dict = self.prepare_data_ssl(input_dict, output_dicts=self.labeled_data_for)
# if teacher_dict is not None: teacher_dict.pop('num_points_in_gt', None)
# if student_dict is not None: student_dict.pop('num_points_in_gt', None)
# return tuple([teacher_dict, student_dict])
class ONCEUnlabeledDataset(ONCESemiDataset):
def __init__(self, dataset_cfg, class_names, infos=None, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
assert training is True
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, infos=infos, training=training, root_path=root_path, logger=logger
)
self.unlabeled_data_for = dataset_cfg.UNLABELED_DATA_FOR
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.once_infos)
info = copy.deepcopy(self.once_infos[index])
frame_id = info['frame_id']
seq_id = info['sequence_id']
points = self.get_lidar(seq_id, frame_id)
input_dict = {
'points': points,
'frame_id': frame_id,
}
if self.dataset_cfg.get('USE_UNLABELED_PSEUDO_LABEL', None) and self.training:
self.fill_pseudo_labels(input_dict)
data_dict = self.prepare_data_ssl(input_dict, output_dicts=self.unlabeled_data_for)
if isinstance(data_dict, tuple):
teacher_dict, student_dict = data_dict[0], data_dict[1]
return tuple([teacher_dict, student_dict])
else:
return data_dict
class ONCETestDataset(ONCESemiDataset):
def __init__(self, dataset_cfg, class_names, infos=None, training=False, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
assert training is False
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, infos=infos, training=training, root_path=root_path, logger=logger
)
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.once_infos)
info = copy.deepcopy(self.once_infos[index])
frame_id = info['frame_id']
seq_id = info['sequence_id']
points = self.get_lidar(seq_id, frame_id)
input_dict = {
'points': points,
'frame_id': frame_id,
}
if 'annos' in info:
annos = info['annos']
input_dict.update({
'gt_names': annos['name'],
'gt_boxes': annos['boxes_3d'],
'num_points_in_gt': annos.get('num_points_in_gt', None)
})
data_dict = self.prepare_data(data_dict=input_dict)
data_dict.pop('num_points_in_gt', None)
return data_dict
# return two batch_dict that have consistent point_idx
class ONCEUnlabeledPairDataset(ONCESemiDataset):
def __init__(self, dataset_cfg, class_names, infos=None, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
assert training is True
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, infos=infos, training=training, root_path=root_path, logger=logger
)
self.unlabeled_data_for = dataset_cfg.UNLABELED_DATA_FOR
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.once_infos)
info = copy.deepcopy(self.once_infos[index])
frame_id = info['frame_id']
seq_id = info['sequence_id']
points = self.get_lidar(seq_id, frame_id)
input_dict = {
'points': points,
'frame_id': frame_id,
}
if self.dataset_cfg.get('USE_UNLABELED_PSEUDO_LABEL', None) and self.training:
self.fill_pseudo_labels(input_dict)
data_dict = self.prepare_data_ssl_pair(input_dict, output_dicts=self.unlabeled_data_for)
if isinstance(data_dict, tuple):
teacher_dict, student_dict = data_dict[0], data_dict[1]
return tuple([teacher_dict, student_dict])
else:
return data_dict
| 14,854
| 37.584416
| 128
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/once/once_toolkits.py
|
import json
import os.path as osp
from collections import defaultdict
import cv2
import numpy as np
class Octopus(object):
"""
dataset structure:
- data_root
- train_split.txt
- val_split.txt
- test_split.txt
-
"""
camera_names = ['cam01', 'cam03', 'cam05', 'cam06', 'cam07', 'cam08', 'cam09']
camera_tags = ['top', 'top2', 'left_back', 'left_front', 'right_front', 'right_back', 'back']
def __init__(self, dataset_root, oss_path=None, client=None):
self.dataset_root = dataset_root
self.oss_path = oss_path
self.data_root = osp.join(self.dataset_root, 'data')
if self.oss_path is not None:
self.oss_root = osp.join(self.oss_path, 'data')
self.client = client
self._collect_basic_infos()
@property
def train_split_list(self):
if not osp.isfile(osp.join(self.dataset_root, 'ImageSets', 'train_set.txt')):
train_split_list = None
else:
train_split_list = set(map(lambda x: x.strip(),
open(osp.join(self.data_root, 'train_set.txt')).readlines()))
return train_split_list
@property
def val_split_list(self):
if not osp.isfile(osp.join(self.dataset_root, 'ImageSets', 'val_set.txt')):
val_split_list = None
else:
val_split_list = set(map(lambda x: x.strip(),
open(osp.join(self.data_root, 'val_set.txt')).readlines()))
return val_split_list
@property
def test_split_list(self):
if not osp.isfile(osp.join(self.dataset_root, 'ImageSets', 'test_set.txt')):
test_split_list = None
else:
test_split_list = set(map(lambda x: x.strip(),
open(osp.join(self.data_root, 'test_set.txt')).readlines()))
return test_split_list
@property
def raw_split_list(self):
if not osp.isfile(osp.join(self.dataset_root, 'ImageSets', 'raw_set.txt')):
raw_split_list = None
else:
raw_split_list = set(map(lambda x: x.strip(),
open(osp.join(self.data_root, 'raw_set.txt')).readlines()))
return raw_split_list
def _find_split_name(self, seq_id):
if seq_id in self.raw_split_list:
return 'raw'
if seq_id in self.train_split_list:
return 'train'
if seq_id in self.test_split_list:
return 'test'
if seq_id in self.val_split_list:
return 'val'
print("sequence id {} corresponding to no split".format(seq_id))
raise NotImplementedError
def _collect_basic_infos(self):
self.train_info = defaultdict(dict)
if self.train_split_list is not None:
for train_seq in self.train_split_list:
if self.oss_path is None:
anno_file_path = osp.join(self.data_root, train_seq, '{}.json'.format(train_seq))
if not osp.isfile(anno_file_path):
print("no annotation file for sequence {}".format(train_seq))
raise FileNotFoundError
anno_file = json.load(open(anno_file_path, 'r'))
else:
anno_file_path = osp.join(self.oss_root, train_seq, '{}.json'.format(train_seq))
text_bytes = self.client.get(anno_file_path, update_cache=True)
text_bytes = text_bytes.decode('utf-8')
anno_file = json.load(io.StringIO(text_bytes))
for frame_anno in anno_file['frames']:
self.train_info[train_seq][frame_anno['frame_id']] = {
'pose': frame_anno['pose'],
'calib': anno_file['calib'],
}
def get_frame_anno(self, seq_id, frame_id):
split_name = self._find_split_name(seq_id)
frame_info = getattr(self, '{}_info'.format(split_name))[seq_id][frame_id]
if 'anno' in frame_info:
return frame_info['anno']
return None
def load_point_cloud(self, seq_id, frame_id):
if self.oss_path is None:
bin_path = osp.join(self.data_root, seq_id, 'lidar_roof', '{}.bin'.format(frame_id))
points = np.fromfile(bin_path, dtype=np.float32).reshape(-1, 4)
else:
bin_path = osp.join(self.oss_root, seq_id, 'lidar_roof', '{}.bin'.format(frame_id))
sdk_local_bytes = self.client.get(bin_path, update_cache=True)
points = np.frombuffer(sdk_local_bytes, dtype=np.float32).reshape(-1, 4).copy()
return points
def load_image(self, seq_id, frame_id, cam_name):
if self.oss_path is None:
cam_path = osp.join(self.data_root, seq_id, cam_name, '{}.jpg'.format(frame_id))
img_buf = cv2.cvtColor(cv2.imread(cam_path), cv2.COLOR_BGR2RGB)
else:
cam_path = osp.join(self.oss_root, seq_id, cam_name, '{}.jpg'.format(frame_id))
sdk_local_bytes = self.client.get(cam_path, update_cache=True)
img_buf = cv2.cvtColor(np.frombuffer(sdk_local_bytes, dtype='uint8'), cv2.COLOR_BGR2RGB)
return img_buf
def project_lidar_to_image(self, seq_id, frame_id):
points = self.load_point_cloud(seq_id, frame_id)
split_name = self._find_split_name(seq_id)
frame_info = getattr(self, '{}_info'.format(split_name))[seq_id][frame_id]
points_img_dict = dict()
for cam_name in self.__class__.camera_names:
calib_info = frame_info['calib'][cam_name]
cam_2_velo = calib_info['cam_to_velo']
cam_intri = calib_info['cam_intrinsic']
point_xyz = points[:, :3]
points_homo = np.hstack(
[point_xyz, np.ones(point_xyz.shape[0], dtype=np.float32).reshape((-1, 1))])
points_lidar = np.dot(points_homo, np.linalg.inv(cam_2_velo).T)
mask = points_lidar[:, 2] > 0
points_lidar = points_lidar[mask]
points_img = np.dot(points_lidar, cam_intri.T)
points_img_dict[cam_name] = points_img
return points_img_dict
def undistort_image(self, seq_id, frame_id):
pass
| 6,304
| 41.891156
| 101
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/once/once_target_dataset.py
|
import copy
import pickle
import numpy as np
from ..dataset import DatasetTemplate
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import box_utils
from .once_toolkits import Octopus
class ONCEDataset(DatasetTemplate):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.split = 'train' if training else 'val'
assert self.split in ['train', 'val', 'test', 'raw_small', 'raw_medium', 'raw_large']
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_seq_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None
self.cam_names = ['cam01', 'cam03', 'cam05', 'cam06', 'cam07', 'cam08', 'cam09']
self.cam_tags = ['top', 'top2', 'left_back', 'left_front', 'right_front', 'right_back', 'back']
self.toolkits = Octopus(self.root_path)
self.once_infos = []
self.include_once_data(self.split)
def include_once_data(self, split):
if self.logger is not None:
self.logger.info('Loading ONCE dataset')
once_infos = []
for info_path in self.dataset_cfg.INFO_PATH[split]:
info_path = self.root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
once_infos.extend(infos)
def check_annos(info):
return 'annos' in info
if self.split != 'raw':
once_infos = list(filter(check_annos,once_infos))
self.once_infos.extend(once_infos)
if self.logger is not None:
self.logger.info('Total samples for ONCE dataset: %d' % (len(once_infos)))
def set_split(self, split):
super().__init__(
dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training, root_path=self.root_path, logger=self.logger
)
self.split = split
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_seq_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None
def get_lidar(self, sequence_id, frame_id):
return self.toolkits.load_point_cloud(sequence_id, frame_id)
def get_image(self, sequence_id, frame_id, cam_name):
return self.toolkits.load_image(sequence_id, frame_id, cam_name)
def project_lidar_to_image(self, sequence_id, frame_id, cam_name):
return self.toolkits.project_lidar_to_image(sequence_id, frame_id, cam_name)
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.once_infos) * self.total_epochs
return len(self.once_infos)
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.once_infos)
info = copy.deepcopy(self.once_infos[index])
frame_id = info['frame_id']
seq_id = info['sequence_id']
points = self.get_lidar(seq_id, frame_id)
if self.dataset_cfg.get('SHIFT_COOR', None):
points[:, 0:3] += np.array(self.dataset_cfg.SHIFT_COOR, dtype=np.float32)
input_dict = {
'points': points,
'frame_id': frame_id,
}
if 'annos' in info:
annos = info['annos']
gt_boxes_lidar = annos['boxes_3d']
if self.dataset_cfg.get('SHIFT_COOR', None):
gt_boxes_lidar[:, 0:3] += self.dataset_cfg.SHIFT_COOR
input_dict.update({
'gt_names': annos['name'],
'gt_boxes': gt_boxes_lidar,
'num_points_in_gt': annos.get('num_points_in_gt', None)
})
if self.dataset_cfg.get('REMOVE_ORIGIN_GTS', None) and self.training:
input_dict['points'] = box_utils.remove_points_in_boxes3d(input_dict['points'], input_dict['gt_boxes'])
mask = np.zeros(gt_boxes_lidar.shape[0], dtype=np.bool_)
input_dict['gt_boxes'] = input_dict['gt_boxes'][mask]
input_dict['gt_names'] = input_dict['gt_names'][mask]
if self.dataset_cfg.get('USE_PSEUDO_LABEL', None) and self.training:
input_dict['gt_boxes'] = None
# load saved pseudo label for unlabel data
#print(self.training)
if self.dataset_cfg.get('USE_PSEUDO_LABEL', None) and self.training:
self.fill_pseudo_labels(input_dict)
data_dict = self.prepare_data(data_dict=input_dict)
data_dict.pop('num_points_in_gt', None)
return data_dict
def get_infos(self, num_workers=4, sample_seq_list=None):
import concurrent.futures as futures
import json
root_path = self.root_path
cam_names = self.cam_names
"""
# dataset json format
{
'meta_info':
'calib': {
'cam01': {
'cam_to_velo': list
'cam_intrinsic': list
'distortion': list
}
...
}
'frames': [
{
'frame_id': timestamp,
'annos': {
'names': list
'boxes_3d': list of list
'boxes_2d': {
'cam01': list of list
...
}
}
'pose': list
},
...
]
}
# open pcdet format
{
'meta_info':
'sequence_id': seq_idx
'frame_id': timestamp
'timestamp': timestamp
'lidar': path
'cam01': path
...
'calib': {
'cam01': {
'cam_to_velo': np.array
'cam_intrinsic': np.array
'distortion': np.array
}
...
}
'pose': np.array
'annos': {
'name': np.array
'boxes_3d': np.array
'boxes_2d': {
'cam01': np.array
....
}
}
}
"""
def process_single_sequence(seq_idx):
print('%s seq_idx: %s' % (self.split, seq_idx))
seq_infos = []
seq_path = Path(root_path) / 'data' / seq_idx
json_path = seq_path / ('%s.json' % seq_idx)
with open(json_path, 'r') as f:
info_this_seq = json.load(f)
meta_info = info_this_seq['meta_info']
calib = info_this_seq['calib']
for f_idx, frame in enumerate(info_this_seq['frames']):
frame_id = frame['frame_id']
if f_idx == 0:
prev_id = None
else:
prev_id = info_this_seq['frames'][f_idx-1]['frame_id']
if f_idx == len(info_this_seq['frames'])-1:
next_id = None
else:
next_id = info_this_seq['frames'][f_idx+1]['frame_id']
pc_path = str(seq_path / 'lidar_roof' / ('%s.bin' % frame_id))
pose = np.array(frame['pose'])
frame_dict = {
'sequence_id': seq_idx,
'frame_id': frame_id,
'timestamp': int(frame_id),
'prev_id': prev_id,
'next_id': next_id,
'meta_info': meta_info,
'lidar': pc_path,
'pose': pose
}
calib_dict = {}
for cam_name in cam_names:
cam_path = str(seq_path / cam_name / ('%s.jpg' % frame_id))
frame_dict.update({cam_name: cam_path})
calib_dict[cam_name] = {}
calib_dict[cam_name]['cam_to_velo'] = np.array(calib[cam_name]['cam_to_velo'])
calib_dict[cam_name]['cam_intrinsic'] = np.array(calib[cam_name]['cam_intrinsic'])
calib_dict[cam_name]['distortion'] = np.array(calib[cam_name]['distortion'])
frame_dict.update({'calib': calib_dict})
if 'annos' in frame:
annos = frame['annos']
boxes_3d = np.array(annos['boxes_3d'])
if boxes_3d.shape[0] == 0:
print(frame_id)
continue
boxes_2d_dict = {}
for cam_name in cam_names:
boxes_2d_dict[cam_name] = np.array(annos['boxes_2d'][cam_name])
annos_dict = {
'name': np.array(annos['names']),
'boxes_3d': boxes_3d,
'boxes_2d': boxes_2d_dict
}
points = self.get_lidar(seq_idx, frame_id)
corners_lidar = box_utils.boxes_to_corners_3d(np.array(annos['boxes_3d']))
num_gt = boxes_3d.shape[0]
num_points_in_gt = -np.ones(num_gt, dtype=np.int32)
for k in range(num_gt):
flag = box_utils.in_hull(points[:, 0:3], corners_lidar[k])
num_points_in_gt[k] = flag.sum()
annos_dict['num_points_in_gt'] = num_points_in_gt
frame_dict.update({'annos': annos_dict})
seq_infos.append(frame_dict)
return seq_infos
sample_seq_list = sample_seq_list if sample_seq_list is not None else self.sample_seq_list
with futures.ThreadPoolExecutor(num_workers) as executor:
infos = executor.map(process_single_sequence, sample_seq_list)
all_infos = []
for info in infos:
all_infos.extend(info)
return all_infos
def create_groundtruth_database(self, info_path=None, used_classes=None, split='train'):
import torch
database_save_path = Path(self.root_path) / ('gt_database' if split == 'train' else ('gt_database_%s' % split))
db_info_save_path = Path(self.root_path) / ('once_dbinfos_%s.pkl' % split)
database_save_path.mkdir(parents=True, exist_ok=True)
all_db_infos = {}
with open(info_path, 'rb') as f:
infos = pickle.load(f)
for k in range(len(infos)):
if 'annos' not in infos[k]:
continue
print('gt_database sample: %d' % (k + 1))
info = infos[k]
frame_id = info['frame_id']
seq_id = info['sequence_id']
points = self.get_lidar(seq_id, frame_id)
annos = info['annos']
names = annos['name']
gt_boxes = annos['boxes_3d']
num_obj = gt_boxes.shape[0]
point_indices = roiaware_pool3d_utils.points_in_boxes_cpu(
torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)
).numpy() # (nboxes, npoints)
for i in range(num_obj):
filename = '%s_%s_%d.bin' % (frame_id, names[i], i)
filepath = database_save_path / filename
gt_points = points[point_indices[i] > 0]
gt_points[:, :3] -= gt_boxes[i, :3]
with open(filepath, 'w') as f:
gt_points.tofile(f)
db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin
db_info = {'name': names[i], 'path': db_path, 'gt_idx': i,
'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0]}
if names[i] in all_db_infos:
all_db_infos[names[i]].append(db_info)
else:
all_db_infos[names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'score': np.zeros(num_samples),
'boxes_3d': np.zeros((num_samples, 7))
}
return ret_dict
def generate_single_sample_dict(box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['score'] = pred_scores
pred_dict['boxes_3d'] = pred_boxes
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
frame_id = batch_dict['frame_id'][index]
single_pred_dict = generate_single_sample_dict(box_dict)
single_pred_dict['frame_id'] = frame_id
annos.append(single_pred_dict)
if output_path is not None:
raise NotImplementedError
return annos
def kitti_eval(self, eval_det_annos, eval_gt_annos, class_names):
from ..kitti.kitti_object_eval_python import eval as kitti_eval
map_name_to_kitti = {
'Car': 'Car',
'Pedestrian': 'Pedestrian',
'Truck': 'Truck',
}
def transform_to_kitti_format(annos, info_with_fakelidar=False, is_gt=False):
for anno in annos:
if 'name' not in anno:
anno['name'] = anno['gt_names']
anno.pop('gt_names')
for k in range(anno['name'].shape[0]):
if anno['name'][k] in map_name_to_kitti:
anno['name'][k] = map_name_to_kitti[anno['name'][k]]
else:
anno['name'][k] = 'Person_sitting'
"""
if 'boxes_lidar' in anno:
gt_boxes_lidar = anno['boxes_lidar'].copy()
else:
gt_boxes_lidar = anno['gt_boxes'].copy()
"""
gt_boxes_lidar = anno['boxes_3d'].copy()
# filter by fov
if is_gt and self.dataset_cfg.get('GT_FILTER', None):
if self.dataset_cfg.GT_FILTER.get('FOV_FILTER', None):
fov_gt_flag = self.extract_fov_gt(
gt_boxes_lidar, self.dataset_cfg['FOV_DEGREE'], self.dataset_cfg['FOV_ANGLE']
)
gt_boxes_lidar = gt_boxes_lidar[fov_gt_flag]
anno['name'] = anno['name'][fov_gt_flag]
anno['bbox'] = np.zeros((len(anno['name']), 4))
anno['bbox'][:, 2:4] = 50 # [0, 0, 50, 50]
anno['truncated'] = np.zeros(len(anno['name']))
anno['occluded'] = np.zeros(len(anno['name']))
if len(gt_boxes_lidar) > 0:
if info_with_fakelidar:
gt_boxes_lidar = box_utils.boxes3d_kitti_fakelidar_to_lidar(gt_boxes_lidar)
gt_boxes_lidar[:, 2] -= gt_boxes_lidar[:, 5] / 2
anno['location'] = np.zeros((gt_boxes_lidar.shape[0], 3))
anno['location'][:, 0] = -gt_boxes_lidar[:, 1] # x = -y_lidar
anno['location'][:, 1] = -gt_boxes_lidar[:, 2] # y = -z_lidar
anno['location'][:, 2] = gt_boxes_lidar[:, 0] # z = x_lidar
dxdydz = gt_boxes_lidar[:, 3:6]
anno['dimensions'] = dxdydz[:, [0, 2, 1]] # lwh ==> lhw
anno['rotation_y'] = -gt_boxes_lidar[:, 6] - np.pi / 2.0
anno['alpha'] = -np.arctan2(-gt_boxes_lidar[:, 1], gt_boxes_lidar[:, 0]) + anno['rotation_y']
else:
anno['location'] = anno['dimensions'] = np.zeros((0, 3))
anno['rotation_y'] = anno['alpha'] = np.zeros(0)
transform_to_kitti_format(eval_det_annos)
transform_to_kitti_format(eval_gt_annos, info_with_fakelidar=False, is_gt=True)
kitti_class_names = []
for x in class_names:
if x in map_name_to_kitti:
kitti_class_names.append(map_name_to_kitti[x])
else:
kitti_class_names.append('Person_sitting')
ap_result_str, ap_dict = kitti_eval.get_official_eval_result(
gt_annos=eval_gt_annos, dt_annos=eval_det_annos, current_classes=kitti_class_names
)
return ap_result_str, ap_dict
def evaluation(self, det_annos, class_names, **kwargs):
eval_det_annos = copy.deepcopy(det_annos)
eval_gt_annos = [copy.deepcopy(info['annos']) for info in self.once_infos]
if kwargs['eval_metric'] == 'kitti':
return self.kitti_eval(eval_det_annos, eval_gt_annos, class_names)
elif kwargs['eval_metric'] == 'once':
from .once_eval.evaluation import get_evaluation_results
ap_result_str, ap_dict = get_evaluation_results(eval_gt_annos, eval_det_annos, class_names)
return ap_result_str, ap_dict
else:
raise NotImplementedError
def create_once_infos(dataset_cfg, class_names, data_path, save_path, workers=4):
dataset = ONCEDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False)
splits = ['train', 'val', 'test', 'raw_small', 'raw_medium', 'raw_large']
ignore = ['test']
print('---------------Start to generate data infos---------------')
for split in splits:
if split in ignore:
continue
filename = 'once_infos_%s.pkl' % split
filename = save_path / Path(filename)
dataset.set_split(split)
once_infos = dataset.get_infos(num_workers=workers)
with open(filename, 'wb') as f:
pickle.dump(once_infos, f)
print('ONCE info %s file is saved to %s' % (split, filename))
train_filename = save_path / 'once_infos_train.pkl'
print('---------------Start create groundtruth database for data augmentation---------------')
dataset.set_split('train')
dataset.create_groundtruth_database(train_filename, split='train')
print('---------------Data preparation Done---------------')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config of dataset')
parser.add_argument('--func', type=str, default='create_waymo_infos', help='')
parser.add_argument('--runs_on', type=str, default='server', help='')
args = parser.parse_args()
if args.func == 'create_once_infos':
import yaml
from pathlib import Path
from easydict import EasyDict
dataset_cfg = EasyDict(yaml.load(open(args.cfg_file)))
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
once_data_path = ROOT_DIR / 'data' / 'once'
once_save_path = ROOT_DIR / 'data' / 'once'
if args.runs_on == 'cloud':
once_data_path = Path('/cache/once/')
once_save_path = Path('/cache/once/')
dataset_cfg.DATA_PATH = dataset_cfg.CLOUD_DATA_PATH
create_once_infos(
dataset_cfg=dataset_cfg,
class_names=['Car', 'Bus', 'Truck', 'Pedestrian', 'Bicycle'],
data_path=once_data_path,
save_path=once_save_path
)
| 20,276
| 39.554
| 140
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/once/__init__.py
| 0
| 0
| 0
|
py
|
|
3DTrans
|
3DTrans-master/pcdet/datasets/once/once_dataset_ada.py
|
import copy
import pickle
import os
import numpy as np
from PIL import Image
import torch
import torch.nn.functional as F
from pathlib import Path
from ..dataset import DatasetTemplate
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import box_utils
from .once_toolkits import Octopus
# Since we use petrel OSS
import io
class ActiveONCEDataset(DatasetTemplate):
"""Petrel Ceph storage backend.
3DTrans supports the reading and writing data from Ceph
Usage:
self.oss_path = 's3://path/of/ONCE'
'~/.petreloss.conf': A config file of Ceph, saving the KEY/ACCESS_KEY of S3 Ceph
"""
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, sample_info_path=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.split = dataset_cfg.DATA_SPLIT['train'] if training else dataset_cfg.DATA_SPLIT['test']
assert self.split in ['train', 'val', 'test', 'raw_small', 'raw_medium', 'raw_large']
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_seq_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None
self.cam_names = ['cam01', 'cam03', 'cam05', 'cam06', 'cam07', 'cam08', 'cam09']
self.cam_tags = ['top', 'top2', 'left_back', 'left_front', 'right_front', 'right_back', 'back']
if self.oss_path is None:
self.toolkits = Octopus(self.root_path)
else:
from petrel_client.client import Client
self.client = Client('~/.petreloss.conf')
self.toolkits = Octopus(self.root_path, self.oss_path, self.client)
self.once_infos = []
self.include_once_data(self.split, sample_info_path)
def include_once_data(self, split, sample_info_path=None):
if self.logger is not None:
self.logger.info('Loading ONCE dataset')
once_infos = []
for info_path in self.dataset_cfg.INFO_PATH[split]:
if sample_info_path is not None and str(sample_info_path).split(':')[0] != 's3':
info_path = sample_info_path
if not Path(info_path).exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
once_infos.extend(infos)
elif sample_info_path is not None and str(sample_info_path).split(':')[0] == 's3':
info_path = sample_info_path
pkl_bytes = self.client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
once_infos.extend(infos)
elif self.oss_path is None:
info_path = self.root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
once_infos.extend(infos)
else:
info_path = os.path.join(self.oss_path, info_path)
pkl_bytes = self.client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
once_infos.extend(infos)
def check_annos(info):
return 'annos' in info
if self.split.split('_')[0] != 'raw':
once_infos = list(filter(check_annos,once_infos))
self.once_infos.extend(once_infos)
if self.logger is not None:
self.logger.info('Total samples for ONCE dataset: %d' % (len(once_infos)))
def set_split(self, split):
super().__init__(
dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training, root_path=self.root_path, logger=self.logger
)
self.split = split
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_seq_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None
def get_lidar(self, sequence_id, frame_id):
return self.toolkits.load_point_cloud(sequence_id, frame_id)
def get_image(self, sequence_id, frame_id, cam_name):
return self.toolkits.load_image(sequence_id, frame_id, cam_name)
def project_lidar_to_image(self, sequence_id, frame_id):
return self.toolkits.project_lidar_to_image(sequence_id, frame_id)
def point_painting(self, points, info):
semseg_dir = './' # add your own seg directory
used_classes = [0,1,2,3,4,5]
num_classes = len(used_classes)
frame_id = str(info['frame_id'])
seq_id = str(info['sequence_id'])
painted = np.zeros((points.shape[0], num_classes)) # classes + bg
for cam_name in self.cam_names:
img_path = Path(semseg_dir) / Path(seq_id) / Path(cam_name) / Path(frame_id+'_label.png')
calib_info = info['calib'][cam_name]
cam_2_velo = calib_info['cam_to_velo']
cam_intri = np.hstack([calib_info['cam_intrinsic'], np.zeros((3, 1), dtype=np.float32)])
point_xyz = points[:, :3]
points_homo = np.hstack(
[point_xyz, np.ones(point_xyz.shape[0], dtype=np.float32).reshape((-1, 1))])
points_lidar = np.dot(points_homo, np.linalg.inv(cam_2_velo).T)
mask = points_lidar[:, 2] > 0
points_lidar = points_lidar[mask]
points_img = np.dot(points_lidar, cam_intri.T)
points_img = points_img / points_img[:, [2]]
uv = points_img[:, [0,1]]
#depth = points_img[:, [2]]
seg_map = np.array(Image.open(img_path)) # (H, W)
H, W = seg_map.shape
seg_feats = np.zeros((H*W, num_classes))
seg_map = seg_map.reshape(-1)
for cls_i in used_classes:
seg_feats[seg_map==cls_i, cls_i] = 1
seg_feats = seg_feats.reshape(H, W, num_classes).transpose(2, 0, 1)
uv[:, 0] = (uv[:, 0] - W / 2) / (W / 2)
uv[:, 1] = (uv[:, 1] - H / 2) / (H / 2)
uv_tensor = torch.from_numpy(uv).unsqueeze(0).unsqueeze(0) # [1,1,N,2]
seg_feats = torch.from_numpy(seg_feats).unsqueeze(0) # [1,C,H,W]
proj_scores = F.grid_sample(seg_feats, uv_tensor, mode='bilinear', padding_mode='zeros') # [1, C, 1, N]
proj_scores = proj_scores.squeeze(0).squeeze(1).transpose(0, 1).contiguous() # [N, C]
painted[mask] = proj_scores.numpy()
return np.concatenate([points, painted], axis=1)
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.once_infos) * self.total_epochs
return len(self.once_infos)
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.once_infos)
info = copy.deepcopy(self.once_infos[index])
frame_id = info['frame_id']
seq_id = info['sequence_id']
points = self.get_lidar(seq_id, frame_id)
# add shift coordinate
if self.dataset_cfg.get('SHIFT_COOR', None):
points[:, 0:3] += np.array(self.dataset_cfg.SHIFT_COOR, dtype=np.float32)
if self.dataset_cfg.get('POINT_PAINTING', False):
points = self.point_painting(points, info)
input_dict = {
'db_flag': "once",
'points': points,
'frame_id': frame_id,
}
if 'annos' in info:
annos = info['annos']
input_dict.update({
'gt_names': annos['name'],
'gt_boxes': annos['boxes_3d'],
'num_points_in_gt': annos.get('num_points_in_gt', None)
})
# add shift coordinate
if self.dataset_cfg.get('SHIFT_COOR', None):
input_dict['gt_boxes'][:, 0:3] += self.dataset_cfg.SHIFT_COOR
data_dict = self.prepare_data(data_dict=input_dict)
data_dict.pop('num_points_in_gt', None)
return data_dict
def get_infos(self, num_workers=4, sample_seq_list=None):
import concurrent.futures as futures
import json
root_path = self.root_path
cam_names = self.cam_names
"""
# dataset json format
{
'meta_info':
'calib': {
'cam01': {
'cam_to_velo': list
'cam_intrinsic': list
'distortion': list
}
...
}
'frames': [
{
'frame_id': timestamp,
'annos': {
'names': list
'boxes_3d': list of list
'boxes_2d': {
'cam01': list of list
...
}
}
'pose': list
},
...
]
}
# open pcdet format
{
'meta_info':
'sequence_id': seq_idx
'frame_id': timestamp
'timestamp': timestamp
'lidar': path
'cam01': path
...
'calib': {
'cam01': {
'cam_to_velo': np.array
'cam_intrinsic': np.array
'distortion': np.array
}
...
}
'pose': np.array
'annos': {
'name': np.array
'boxes_3d': np.array
'boxes_2d': {
'cam01': np.array
....
}
}
}
"""
def process_single_sequence(seq_idx):
print('%s seq_idx: %s' % (self.split, seq_idx))
seq_infos = []
seq_path = Path(root_path) / 'data' / seq_idx
json_path = seq_path / ('%s.json' % seq_idx)
with open(json_path, 'r') as f:
info_this_seq = json.load(f)
meta_info = info_this_seq['meta_info']
calib = info_this_seq['calib']
for f_idx, frame in enumerate(info_this_seq['frames']):
frame_id = frame['frame_id']
if f_idx == 0:
prev_id = None
else:
prev_id = info_this_seq['frames'][f_idx-1]['frame_id']
if f_idx == len(info_this_seq['frames'])-1:
next_id = None
else:
next_id = info_this_seq['frames'][f_idx+1]['frame_id']
pc_path = str(seq_path / 'lidar_roof' / ('%s.bin' % frame_id))
pose = np.array(frame['pose'])
frame_dict = {
'sequence_id': seq_idx,
'frame_id': frame_id,
'timestamp': int(frame_id),
'prev_id': prev_id,
'next_id': next_id,
'meta_info': meta_info,
'lidar': pc_path,
'pose': pose
}
calib_dict = {}
for cam_name in cam_names:
cam_path = str(seq_path / cam_name / ('%s.jpg' % frame_id))
frame_dict.update({cam_name: cam_path})
calib_dict[cam_name] = {}
calib_dict[cam_name]['cam_to_velo'] = np.array(calib[cam_name]['cam_to_velo'])
calib_dict[cam_name]['cam_intrinsic'] = np.array(calib[cam_name]['cam_intrinsic'])
calib_dict[cam_name]['distortion'] = np.array(calib[cam_name]['distortion'])
frame_dict.update({'calib': calib_dict})
if 'annos' in frame:
annos = frame['annos']
boxes_3d = np.array(annos['boxes_3d'])
if boxes_3d.shape[0] == 0:
print(frame_id)
continue
boxes_2d_dict = {}
for cam_name in cam_names:
boxes_2d_dict[cam_name] = np.array(annos['boxes_2d'][cam_name])
annos_dict = {
'name': np.array(annos['names']),
'boxes_3d': boxes_3d,
'boxes_2d': boxes_2d_dict
}
points = self.get_lidar(seq_idx, frame_id)
corners_lidar = box_utils.boxes_to_corners_3d(np.array(annos['boxes_3d']))
num_gt = boxes_3d.shape[0]
num_points_in_gt = -np.ones(num_gt, dtype=np.int32)
for k in range(num_gt):
flag = box_utils.in_hull(points[:, 0:3], corners_lidar[k])
num_points_in_gt[k] = flag.sum()
annos_dict['num_points_in_gt'] = num_points_in_gt
frame_dict.update({'annos': annos_dict})
seq_infos.append(frame_dict)
return seq_infos
sample_seq_list = sample_seq_list if sample_seq_list is not None else self.sample_seq_list
with futures.ThreadPoolExecutor(num_workers) as executor:
infos = executor.map(process_single_sequence, sample_seq_list)
all_infos = []
for info in infos:
all_infos.extend(info)
return all_infos
def create_groundtruth_database(self, info_path=None, used_classes=None, split='train'):
import torch
database_save_path = Path(self.root_path) / ('gt_database' if split == 'train' else ('gt_database_%s' % split))
db_info_save_path = Path(self.root_path) / ('once_dbinfos_%s.pkl' % split)
database_save_path.mkdir(parents=True, exist_ok=True)
all_db_infos = {}
with open(info_path, 'rb') as f:
infos = pickle.load(f)
for k in range(len(infos)):
if 'annos' not in infos[k]:
continue
print('gt_database sample: %d' % (k + 1))
info = infos[k]
frame_id = info['frame_id']
seq_id = info['sequence_id']
points = self.get_lidar(seq_id, frame_id)
annos = info['annos']
names = annos['name']
gt_boxes = annos['boxes_3d']
num_obj = gt_boxes.shape[0]
point_indices = roiaware_pool3d_utils.points_in_boxes_cpu(
torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)
).numpy() # (nboxes, npoints)
for i in range(num_obj):
filename = '%s_%s_%d.bin' % (frame_id, names[i], i)
filepath = database_save_path / filename
gt_points = points[point_indices[i] > 0]
gt_points[:, :3] -= gt_boxes[i, :3]
with open(filepath, 'w') as f:
gt_points.tofile(f)
db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin
db_info = {'name': names[i], 'path': db_path, 'gt_idx': i,
'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0]}
if names[i] in all_db_infos:
all_db_infos[names[i]].append(db_info)
else:
all_db_infos[names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
# @staticmethod
def generate_prediction_dicts(self, batch_dict, pred_dicts, class_names, output_path=None):
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'score': np.zeros(num_samples),
'boxes_3d': np.zeros((num_samples, 7))
}
return ret_dict
def generate_single_sample_dict(box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
if self.dataset_cfg.get('SHIFT_COOR', None):
pred_boxes[:, 0:3] -= self.dataset_cfg.SHIFT_COOR
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['score'] = pred_scores
pred_dict['boxes_3d'] = pred_boxes
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
frame_id = batch_dict['frame_id'][index]
single_pred_dict = generate_single_sample_dict(box_dict)
single_pred_dict['frame_id'] = frame_id
annos.append(single_pred_dict)
if output_path is not None:
raise NotImplementedError
return annos
def evaluation(self, det_annos, class_names, **kwargs):
from .once_eval.evaluation import get_evaluation_results
eval_det_annos = copy.deepcopy(det_annos)
eval_gt_annos = [copy.deepcopy(info['annos']) for info in self.once_infos]
ap_result_str, ap_dict = get_evaluation_results(eval_gt_annos, eval_det_annos, class_names)
return ap_result_str, ap_dict
def create_once_infos(dataset_cfg, class_names, data_path, save_path, workers=4):
dataset = ONCEDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False)
splits = ['train', 'val', 'test', 'raw_small', 'raw_medium', 'raw_large']
#ignore raw_small/raw_medium/raw_large
ignore = ['test', 'raw_small', 'raw_medium', 'raw_large']
print('---------------Start to generate data infos---------------')
for split in splits:
if split in ignore:
continue
filename = 'once_infos_%s.pkl' % split
filename = save_path / Path(filename)
dataset.set_split(split)
once_infos = dataset.get_infos(num_workers=workers)
with open(filename, 'wb') as f:
pickle.dump(once_infos, f)
print('ONCE info %s file is saved to %s' % (split, filename))
train_filename = save_path / 'once_infos_train.pkl'
print('---------------Start create groundtruth database for data augmentation---------------')
dataset.set_split('train')
dataset.create_groundtruth_database(train_filename, split='train')
print('---------------Data preparation Done---------------')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config of dataset')
parser.add_argument('--func', type=str, default='create_waymo_infos', help='')
parser.add_argument('--runs_on', type=str, default='server', help='')
args = parser.parse_args()
if args.func == 'create_once_infos':
import yaml
from pathlib import Path
from easydict import EasyDict
dataset_cfg = EasyDict(yaml.load(open(args.cfg_file)))
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
once_data_path = ROOT_DIR / 'data' / 'once'
once_save_path = ROOT_DIR / 'data' / 'once'
if args.runs_on == 'cloud':
once_data_path = Path('/cache/once/')
once_save_path = Path('/cache/once/')
dataset_cfg.DATA_PATH = dataset_cfg.CLOUD_DATA_PATH
create_once_infos(
dataset_cfg=dataset_cfg,
class_names=['Car', 'Bus', 'Truck', 'Pedestrian', 'Bicycle'],
data_path=once_data_path,
save_path=once_save_path
)
| 20,079
| 39.89613
| 140
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/once/once_eval/iou_utils.py
|
"""
Rotate IoU computation is referred from https://github.com/hongzhenwang/RRPN-revise
"""
import math
import numba
import numpy as np
from numba import cuda
@numba.jit(nopython=True)
def div_up(m, n):
return m // n + (m % n > 0)
@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True)
def trangle_area(a, b, c):
return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) *
(b[0] - c[0])) / 2.0
@cuda.jit('(float32[:], int32)', device=True, inline=True)
def area(int_pts, num_of_inter):
area_val = 0.0
for i in range(num_of_inter - 2):
area_val += abs(
trangle_area(int_pts[:2], int_pts[2 * i + 2:2 * i + 4],
int_pts[2 * i + 4:2 * i + 6]))
return area_val
@cuda.jit('(float32[:], int32)', device=True, inline=True)
def sort_vertex_in_convex_polygon(int_pts, num_of_inter):
if num_of_inter > 0:
center = cuda.local.array((2,), dtype=numba.float32)
center[:] = 0.0
for i in range(num_of_inter):
center[0] += int_pts[2 * i]
center[1] += int_pts[2 * i + 1]
center[0] /= num_of_inter
center[1] /= num_of_inter
v = cuda.local.array((2,), dtype=numba.float32)
vs = cuda.local.array((16,), dtype=numba.float32)
for i in range(num_of_inter):
v[0] = int_pts[2 * i] - center[0]
v[1] = int_pts[2 * i + 1] - center[1]
d = math.sqrt(v[0] * v[0] + v[1] * v[1])
v[0] = v[0] / d
v[1] = v[1] / d
if v[1] < 0:
v[0] = -2 - v[0]
vs[i] = v[0]
j = 0
temp = 0
for i in range(1, num_of_inter):
if vs[i - 1] > vs[i]:
temp = vs[i]
tx = int_pts[2 * i]
ty = int_pts[2 * i + 1]
j = i
while j > 0 and vs[j - 1] > temp:
vs[j] = vs[j - 1]
int_pts[j * 2] = int_pts[j * 2 - 2]
int_pts[j * 2 + 1] = int_pts[j * 2 - 1]
j -= 1
vs[j] = temp
int_pts[j * 2] = tx
int_pts[j * 2 + 1] = ty
@cuda.jit(
'(float32[:], float32[:], int32, int32, float32[:])',
device=True,
inline=True)
def line_segment_intersection(pts1, pts2, i, j, temp_pts):
A = cuda.local.array((2,), dtype=numba.float32)
B = cuda.local.array((2,), dtype=numba.float32)
C = cuda.local.array((2,), dtype=numba.float32)
D = cuda.local.array((2,), dtype=numba.float32)
A[0] = pts1[2 * i]
A[1] = pts1[2 * i + 1]
B[0] = pts1[2 * ((i + 1) % 4)]
B[1] = pts1[2 * ((i + 1) % 4) + 1]
C[0] = pts2[2 * j]
C[1] = pts2[2 * j + 1]
D[0] = pts2[2 * ((j + 1) % 4)]
D[1] = pts2[2 * ((j + 1) % 4) + 1]
BA0 = B[0] - A[0]
BA1 = B[1] - A[1]
DA0 = D[0] - A[0]
CA0 = C[0] - A[0]
DA1 = D[1] - A[1]
CA1 = C[1] - A[1]
acd = DA1 * CA0 > CA1 * DA0
bcd = (D[1] - B[1]) * (C[0] - B[0]) > (C[1] - B[1]) * (D[0] - B[0])
if acd != bcd:
abc = CA1 * BA0 > BA1 * CA0
abd = DA1 * BA0 > BA1 * DA0
if abc != abd:
DC0 = D[0] - C[0]
DC1 = D[1] - C[1]
ABBA = A[0] * B[1] - B[0] * A[1]
CDDC = C[0] * D[1] - D[0] * C[1]
DH = BA1 * DC0 - BA0 * DC1
Dx = ABBA * DC0 - BA0 * CDDC
Dy = ABBA * DC1 - BA1 * CDDC
temp_pts[0] = Dx / DH
temp_pts[1] = Dy / DH
return True
return False
@cuda.jit(
'(float32[:], float32[:], int32, int32, float32[:])',
device=True,
inline=True)
def line_segment_intersection_v1(pts1, pts2, i, j, temp_pts):
a = cuda.local.array((2,), dtype=numba.float32)
b = cuda.local.array((2,), dtype=numba.float32)
c = cuda.local.array((2,), dtype=numba.float32)
d = cuda.local.array((2,), dtype=numba.float32)
a[0] = pts1[2 * i]
a[1] = pts1[2 * i + 1]
b[0] = pts1[2 * ((i + 1) % 4)]
b[1] = pts1[2 * ((i + 1) % 4) + 1]
c[0] = pts2[2 * j]
c[1] = pts2[2 * j + 1]
d[0] = pts2[2 * ((j + 1) % 4)]
d[1] = pts2[2 * ((j + 1) % 4) + 1]
area_abc = trangle_area(a, b, c)
area_abd = trangle_area(a, b, d)
if area_abc * area_abd >= 0:
return False
area_cda = trangle_area(c, d, a)
area_cdb = area_cda + area_abc - area_abd
if area_cda * area_cdb >= 0:
return False
t = area_cda / (area_abd - area_abc)
dx = t * (b[0] - a[0])
dy = t * (b[1] - a[1])
temp_pts[0] = a[0] + dx
temp_pts[1] = a[1] + dy
return True
"""
@cuda.jit('(float32, float32, float32[:])', device=True, inline=True)
def point_in_quadrilateral(pt_x, pt_y, corners):
ab0 = corners[2] - corners[0]
ab1 = corners[3] - corners[1]
ad0 = corners[6] - corners[0]
ad1 = corners[7] - corners[1]
ap0 = pt_x - corners[0]
ap1 = pt_y - corners[1]
abab = ab0 * ab0 + ab1 * ab1
abap = ab0 * ap0 + ab1 * ap1
adad = ad0 * ad0 + ad1 * ad1
adap = ad0 * ap0 + ad1 * ap1
return abab >= abap and abap >= 0 and adad >= adap and adap >= 0
"""
@cuda.jit('(float32, float32, float32[:])', device=True, inline=True)
def point_in_quadrilateral(pt_x, pt_y, corners):
PA0 = corners[0] - pt_x
PA1 = corners[1] - pt_y
PB0 = corners[2] - pt_x
PB1 = corners[3] - pt_y
PC0 = corners[4] - pt_x
PC1 = corners[5] - pt_y
PD0 = corners[6] - pt_x
PD1 = corners[7] - pt_y
PAB = PA0 * PB1 - PB0 * PA1
PBC = PB0 * PC1 - PC0 * PB1
PCD = PC0 * PD1 - PD0 * PC1
PDA = PD0 * PA1 - PA0 * PD1
return PAB >= 0 and PBC >= 0 and PCD >= 0 and PDA >= 0 or \
PAB <= 0 and PBC <= 0 and PCD <= 0 and PDA <= 0
@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True)
def quadrilateral_intersection(pts1, pts2, int_pts):
num_of_inter = 0
for i in range(4):
if point_in_quadrilateral(pts1[2 * i], pts1[2 * i + 1], pts2):
int_pts[num_of_inter * 2] = pts1[2 * i]
int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1]
num_of_inter += 1
if point_in_quadrilateral(pts2[2 * i], pts2[2 * i + 1], pts1):
int_pts[num_of_inter * 2] = pts2[2 * i]
int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1]
num_of_inter += 1
temp_pts = cuda.local.array((2,), dtype=numba.float32)
for i in range(4):
for j in range(4):
has_pts = line_segment_intersection(pts1, pts2, i, j, temp_pts)
if has_pts:
int_pts[num_of_inter * 2] = temp_pts[0]
int_pts[num_of_inter * 2 + 1] = temp_pts[1]
num_of_inter += 1
return num_of_inter
@cuda.jit('(float32[:], float32[:])', device=True, inline=True)
def rbbox_to_corners(corners, rbbox):
# generate clockwise corners and rotate it clockwise
angle = rbbox[4]
a_cos = math.cos(angle)
a_sin = math.sin(angle)
center_x = rbbox[0]
center_y = rbbox[1]
x_d = rbbox[2]
y_d = rbbox[3]
corners_x = cuda.local.array((4,), dtype=numba.float32)
corners_y = cuda.local.array((4,), dtype=numba.float32)
corners_x[0] = -x_d / 2
corners_x[1] = -x_d / 2
corners_x[2] = x_d / 2
corners_x[3] = x_d / 2
corners_y[0] = -y_d / 2
corners_y[1] = y_d / 2
corners_y[2] = y_d / 2
corners_y[3] = -y_d / 2
for i in range(4):
corners[2 *
i] = a_cos * corners_x[i] + a_sin * corners_y[i] + center_x
corners[2 * i
+ 1] = -a_sin * corners_x[i] + a_cos * corners_y[i] + center_y
@cuda.jit('(float32[:], float32[:])', device=True, inline=True)
def inter(rbbox1, rbbox2):
corners1 = cuda.local.array((8,), dtype=numba.float32)
corners2 = cuda.local.array((8,), dtype=numba.float32)
intersection_corners = cuda.local.array((16,), dtype=numba.float32)
rbbox_to_corners(corners1, rbbox1)
rbbox_to_corners(corners2, rbbox2)
num_intersection = quadrilateral_intersection(corners1, corners2,
intersection_corners)
sort_vertex_in_convex_polygon(intersection_corners, num_intersection)
# print(intersection_corners.reshape([-1, 2])[:num_intersection])
return area(intersection_corners, num_intersection)
@cuda.jit('(float32[:], float32[:], int32)', device=True, inline=True)
def devRotateIoUEval(rbox1, rbox2, criterion=-1):
area1 = rbox1[2] * rbox1[3]
area2 = rbox2[2] * rbox2[3]
area_inter = inter(rbox1, rbox2)
if criterion == -1:
return area_inter / (area1 + area2 - area_inter)
elif criterion == 0:
return area_inter / area1
elif criterion == 1:
return area_inter / area2
else:
return area_inter
@cuda.jit('(int64, int64, float32[:], float32[:], float32[:], int32)', fastmath=False)
def rotate_iou_kernel_eval(N, K, dev_boxes, dev_query_boxes, dev_iou, criterion=-1):
threadsPerBlock = 8 * 8
row_start = cuda.blockIdx.x
col_start = cuda.blockIdx.y
tx = cuda.threadIdx.x
row_size = min(N - row_start * threadsPerBlock, threadsPerBlock)
col_size = min(K - col_start * threadsPerBlock, threadsPerBlock)
block_boxes = cuda.shared.array(shape=(64 * 5,), dtype=numba.float32)
block_qboxes = cuda.shared.array(shape=(64 * 5,), dtype=numba.float32)
dev_query_box_idx = threadsPerBlock * col_start + tx
dev_box_idx = threadsPerBlock * row_start + tx
if (tx < col_size):
block_qboxes[tx * 5 + 0] = dev_query_boxes[dev_query_box_idx * 5 + 0]
block_qboxes[tx * 5 + 1] = dev_query_boxes[dev_query_box_idx * 5 + 1]
block_qboxes[tx * 5 + 2] = dev_query_boxes[dev_query_box_idx * 5 + 2]
block_qboxes[tx * 5 + 3] = dev_query_boxes[dev_query_box_idx * 5 + 3]
block_qboxes[tx * 5 + 4] = dev_query_boxes[dev_query_box_idx * 5 + 4]
if (tx < row_size):
block_boxes[tx * 5 + 0] = dev_boxes[dev_box_idx * 5 + 0]
block_boxes[tx * 5 + 1] = dev_boxes[dev_box_idx * 5 + 1]
block_boxes[tx * 5 + 2] = dev_boxes[dev_box_idx * 5 + 2]
block_boxes[tx * 5 + 3] = dev_boxes[dev_box_idx * 5 + 3]
block_boxes[tx * 5 + 4] = dev_boxes[dev_box_idx * 5 + 4]
cuda.syncthreads()
if tx < row_size:
for i in range(col_size):
offset = row_start * threadsPerBlock * K + col_start * threadsPerBlock + tx * K + i
dev_iou[offset] = devRotateIoUEval(block_qboxes[i * 5:i * 5 + 5],
block_boxes[tx * 5:tx * 5 + 5], criterion)
def rotate_iou_gpu_eval(boxes, query_boxes, criterion=-1, device_id=0):
"""rotated box iou running in gpu. 500x faster than cpu version
(take 5ms in one example with numba.cuda code).
convert from [this project](
https://github.com/hongzhenwang/RRPN-revise/tree/master/pcdet/rotation).
Args:
boxes (float tensor: [N, 5]): rbboxes. format: centers, dims,
angles(clockwise when positive)
query_boxes (float tensor: [K, 5]): [description]
device_id (int, optional): Defaults to 0. [description]
Returns:
[type]: [description]
"""
box_dtype = boxes.dtype
boxes = boxes.astype(np.float32)
query_boxes = query_boxes.astype(np.float32)
N = boxes.shape[0]
K = query_boxes.shape[0]
iou = np.zeros((N, K), dtype=np.float32)
if N == 0 or K == 0:
return iou
threadsPerBlock = 8 * 8
cuda.select_device(device_id)
blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock))
stream = cuda.stream()
with stream.auto_synchronize():
boxes_dev = cuda.to_device(boxes.reshape([-1]), stream)
query_boxes_dev = cuda.to_device(query_boxes.reshape([-1]), stream)
iou_dev = cuda.to_device(iou.reshape([-1]), stream)
rotate_iou_kernel_eval[blockspergrid, threadsPerBlock, stream](
N, K, boxes_dev, query_boxes_dev, iou_dev, criterion)
iou_dev.copy_to_host(iou.reshape([-1]), stream=stream)
return iou.astype(boxes.dtype)
| 12,048
| 33.924638
| 95
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/once/once_eval/evaluation.py
|
"""
Evaluation Server
Written by Jiageng Mao
"""
import numpy as np
import numba
from .iou_utils import rotate_iou_gpu_eval
from .eval_utils import compute_split_parts, overall_filter, distance_filter, overall_distance_filter
iou_threshold_dict = {
'Car': 0.7,
'Bus': 0.7,
'Truck': 0.7,
'Pedestrian': 0.3,
'Cyclist': 0.5
}
superclass_iou_threshold_dict = {
'Vehicle': 0.7,
'Pedestrian': 0.3,
'Cyclist': 0.5
}
def get_evaluation_results(gt_annos, pred_annos, classes,
use_superclass=True,
iou_thresholds=None,
num_pr_points=50,
difficulty_mode='Overall&Distance',
ap_with_heading=True,
num_parts=100,
print_ok=False
):
if iou_thresholds is None:
if use_superclass:
iou_thresholds = superclass_iou_threshold_dict
else:
iou_thresholds = iou_threshold_dict
assert len(gt_annos) == len(pred_annos), "the number of GT must match predictions"
assert difficulty_mode in ['Overall&Distance', 'Overall', 'Distance'], "difficulty mode is not supported"
if use_superclass:
if ('Car' in classes) or ('Bus' in classes) or ('Truck' in classes):
assert ('Car' in classes) and ('Bus' in classes) and ('Truck' in classes), "Car/Bus/Truck must all exist for vehicle detection"
classes = [cls_name for cls_name in classes if cls_name not in ['Car', 'Bus', 'Truck']]
classes.insert(0, 'Vehicle')
num_samples = len(gt_annos)
split_parts = compute_split_parts(num_samples, num_parts)
ious = compute_iou3d(gt_annos, pred_annos, split_parts, with_heading=ap_with_heading)
num_classes = len(classes)
if difficulty_mode == 'Distance':
num_difficulties = 3
difficulty_types = ['0-30m', '30-50m', '50m-inf']
elif difficulty_mode == 'Overall':
num_difficulties = 1
difficulty_types = ['overall']
elif difficulty_mode == 'Overall&Distance':
num_difficulties = 4
difficulty_types = ['overall', '0-30m', '30-50m', '50m-inf']
else:
raise NotImplementedError
precision = np.zeros([num_classes, num_difficulties, num_pr_points+1])
recall = np.zeros([num_classes, num_difficulties, num_pr_points+1])
for cls_idx, cur_class in enumerate(classes):
iou_threshold = iou_thresholds[cur_class]
for diff_idx in range(num_difficulties):
### filter data & determine score thresholds on p-r curve ###
accum_all_scores, gt_flags, pred_flags = [], [], []
num_valid_gt = 0
for sample_idx in range(num_samples):
gt_anno = gt_annos[sample_idx]
pred_anno = pred_annos[sample_idx]
pred_score = pred_anno['score']
iou = ious[sample_idx]
gt_flag, pred_flag = filter_data(gt_anno, pred_anno, difficulty_mode,
difficulty_level=diff_idx, class_name=cur_class, use_superclass=use_superclass)
gt_flags.append(gt_flag)
pred_flags.append(pred_flag)
num_valid_gt += sum(gt_flag == 0)
accum_scores = accumulate_scores(iou, pred_score, gt_flag, pred_flag,
iou_threshold=iou_threshold)
accum_all_scores.append(accum_scores)
all_scores = np.concatenate(accum_all_scores, axis=0)
thresholds = get_thresholds(all_scores, num_valid_gt, num_pr_points=num_pr_points)
### compute tp/fp/fn ###
confusion_matrix = np.zeros([len(thresholds), 3]) # only record tp/fp/fn
for sample_idx in range(num_samples):
pred_score = pred_annos[sample_idx]['score']
iou = ious[sample_idx]
gt_flag, pred_flag = gt_flags[sample_idx], pred_flags[sample_idx]
for th_idx, score_th in enumerate(thresholds):
tp, fp, fn = compute_statistics(iou, pred_score, gt_flag, pred_flag,
score_threshold=score_th, iou_threshold=iou_threshold)
confusion_matrix[th_idx, 0] += tp
confusion_matrix[th_idx, 1] += fp
confusion_matrix[th_idx, 2] += fn
### draw p-r curve ###
for th_idx in range(len(thresholds)):
recall[cls_idx, diff_idx, th_idx] = confusion_matrix[th_idx, 0] / \
(confusion_matrix[th_idx, 0] + confusion_matrix[th_idx, 2])
precision[cls_idx, diff_idx, th_idx] = confusion_matrix[th_idx, 0] / \
(confusion_matrix[th_idx, 0] + confusion_matrix[th_idx, 1])
for th_idx in range(len(thresholds)):
precision[cls_idx, diff_idx, th_idx] = np.max(
precision[cls_idx, diff_idx, th_idx:], axis=-1)
recall[cls_idx, diff_idx, th_idx] = np.max(
recall[cls_idx, diff_idx, th_idx:], axis=-1)
AP = 0
for i in range(1, precision.shape[-1]):
AP += precision[..., i]
AP = AP / num_pr_points * 100
ret_dict = {}
ret_str = "\n|AP@%-9s|" % (str(num_pr_points))
for diff_type in difficulty_types:
ret_str += '%-12s|' % diff_type
ret_str += '\n'
for cls_idx, cur_class in enumerate(classes):
ret_str += "|%-12s|" % cur_class
for diff_idx in range(num_difficulties):
diff_type = difficulty_types[diff_idx]
key = 'AP_' + cur_class + '/' + diff_type
ap_score = AP[cls_idx,diff_idx]
ret_dict[key] = ap_score
ret_str += "%-12.2f|" % ap_score
ret_str += "\n"
mAP = np.mean(AP, axis=0)
ret_str += "|%-12s|" % 'mAP'
for diff_idx in range(num_difficulties):
diff_type = difficulty_types[diff_idx]
key = 'AP_mean' + '/' + diff_type
ap_score = mAP[diff_idx]
ret_dict[key] = ap_score
ret_str += "%-12.2f|" % ap_score
ret_str += "\n"
if print_ok:
print(ret_str)
return ret_str, ret_dict
@numba.jit(nopython=True)
def get_thresholds(scores, num_gt, num_pr_points):
eps = 1e-6
scores.sort()
scores = scores[::-1]
recall_level = 0
thresholds = []
for i, score in enumerate(scores):
l_recall = (i + 1) / num_gt
if i < (len(scores) - 1):
r_recall = (i + 2) / num_gt
else:
r_recall = l_recall
if (r_recall + l_recall < 2 * recall_level) and i < (len(scores) - 1):
continue
thresholds.append(score)
recall_level += 1 / num_pr_points
# avoid numerical errors
# while r_recall + l_recall >= 2 * recall_level:
while r_recall + l_recall + eps > 2 * recall_level:
thresholds.append(score)
recall_level += 1 / num_pr_points
return thresholds
@numba.jit(nopython=True)
def accumulate_scores(iou, pred_scores, gt_flag, pred_flag, iou_threshold):
num_gt = iou.shape[0]
num_pred = iou.shape[1]
assigned = np.full(num_pred, False)
accum_scores = np.zeros(num_gt)
accum_idx = 0
for i in range(num_gt):
if gt_flag[i] == -1: # not the same class
continue
det_idx = -1
detected_score = -1
for j in range(num_pred):
if pred_flag[j] == -1: # not the same class
continue
if assigned[j]:
continue
iou_ij = iou[i, j]
pred_score = pred_scores[j]
if (iou_ij > iou_threshold) and (pred_score > detected_score):
det_idx = j
detected_score = pred_score
if (detected_score == -1) and (gt_flag[i] == 0): # false negative
pass
elif (detected_score != -1) and (gt_flag[i] == 1 or pred_flag[det_idx] == 1): # ignore
assigned[det_idx] = True
elif detected_score != -1: # true positive
accum_scores[accum_idx] = pred_scores[det_idx]
accum_idx += 1
assigned[det_idx] = True
return accum_scores[:accum_idx]
@numba.jit(nopython=True)
def compute_statistics(iou, pred_scores, gt_flag, pred_flag, score_threshold, iou_threshold):
num_gt = iou.shape[0]
num_pred = iou.shape[1]
assigned = np.full(num_pred, False)
under_threshold = pred_scores < score_threshold
tp, fp, fn = 0, 0, 0
for i in range(num_gt):
if gt_flag[i] == -1: # different classes
continue
det_idx = -1
detected = False
best_matched_iou = 0
gt_assigned_to_ignore = False
for j in range(num_pred):
if pred_flag[j] == -1: # different classes
continue
if assigned[j]: # already assigned to other GT
continue
if under_threshold[j]: # compute only boxes above threshold
continue
iou_ij = iou[i, j]
if (iou_ij > iou_threshold) and (iou_ij > best_matched_iou or gt_assigned_to_ignore) and pred_flag[j] == 0:
best_matched_iou = iou_ij
det_idx = j
detected = True
gt_assigned_to_ignore = False
elif (iou_ij > iou_threshold) and (not detected) and pred_flag[j] == 1:
det_idx = j
detected = True
gt_assigned_to_ignore = True
if (not detected) and gt_flag[i] == 0: # false negative
fn += 1
elif detected and (gt_flag[i] == 1 or pred_flag[det_idx] == 1): # ignore
assigned[det_idx] = True
elif detected: # true positive
tp += 1
assigned[det_idx] = True
for j in range(num_pred):
if not (assigned[j] or pred_flag[j] == -1 or pred_flag[j] == 1 or under_threshold[j]):
fp += 1
return tp, fp, fn
def filter_data(gt_anno, pred_anno, difficulty_mode, difficulty_level, class_name, use_superclass):
"""
Filter data by class name and difficulty
Args:
gt_anno:
pred_anno:
difficulty_mode:
difficulty_level:
class_name:
Returns:
gt_flags/pred_flags:
1 : same class but ignored with different difficulty levels
0 : accepted
-1 : rejected with different classes
"""
num_gt = len(gt_anno['name'])
gt_flag = np.zeros(num_gt, dtype=np.int64)
if use_superclass:
if class_name == 'Vehicle':
reject = np.logical_or(gt_anno['name']=='Pedestrian', gt_anno['name']=='Cyclist')
else:
reject = gt_anno['name'] != class_name
else:
reject = gt_anno['name'] != class_name
gt_flag[reject] = -1
num_pred = len(pred_anno['name'])
pred_flag = np.zeros(num_pred, dtype=np.int64)
if use_superclass:
if class_name == 'Vehicle':
reject = np.logical_or(pred_anno['name']=='Pedestrian', pred_anno['name']=='Cyclist')
else:
reject = pred_anno['name'] != class_name
else:
reject = pred_anno['name'] != class_name
pred_flag[reject] = -1
if difficulty_mode == 'Overall':
ignore = overall_filter(gt_anno['boxes_3d'])
gt_flag[ignore] = 1
ignore = overall_filter(pred_anno['boxes_3d'])
pred_flag[ignore] = 1
elif difficulty_mode == 'Distance':
ignore = distance_filter(gt_anno['boxes_3d'], difficulty_level)
gt_flag[ignore] = 1
ignore = distance_filter(pred_anno['boxes_3d'], difficulty_level)
pred_flag[ignore] = 1
elif difficulty_mode == 'Overall&Distance':
ignore = overall_distance_filter(gt_anno['boxes_3d'], difficulty_level)
gt_flag[ignore] = 1
ignore = overall_distance_filter(pred_anno['boxes_3d'], difficulty_level)
pred_flag[ignore] = 1
else:
raise NotImplementedError
return gt_flag, pred_flag
def iou3d_kernel(gt_boxes, pred_boxes):
"""
Core iou3d computation (with cuda)
Args:
gt_boxes: [N, 7] (x, y, z, w, l, h, rot) in Lidar coordinates
pred_boxes: [M, 7]
Returns:
iou3d: [N, M]
"""
intersection_2d = rotate_iou_gpu_eval(gt_boxes[:, [0, 1, 3, 4, 6]], pred_boxes[:, [0, 1, 3, 4, 6]], criterion=2)
gt_max_h = gt_boxes[:, [2]] + gt_boxes[:, [5]] * 0.5
gt_min_h = gt_boxes[:, [2]] - gt_boxes[:, [5]] * 0.5
pred_max_h = pred_boxes[:, [2]] + pred_boxes[:, [5]] * 0.5
pred_min_h = pred_boxes[:, [2]] - pred_boxes[:, [5]] * 0.5
max_of_min = np.maximum(gt_min_h, pred_min_h.T)
min_of_max = np.minimum(gt_max_h, pred_max_h.T)
inter_h = min_of_max - max_of_min
inter_h[inter_h <= 0] = 0
#inter_h[intersection_2d <= 0] = 0
intersection_3d = intersection_2d * inter_h
gt_vol = gt_boxes[:, [3]] * gt_boxes[:, [4]] * gt_boxes[:, [5]]
pred_vol = pred_boxes[:, [3]] * pred_boxes[:, [4]] * pred_boxes[:, [5]]
union_3d = gt_vol + pred_vol.T - intersection_3d
#eps = 1e-6
#union_3d[union_3d<eps] = eps
iou3d = intersection_3d / union_3d
return iou3d
def iou3d_kernel_with_heading(gt_boxes, pred_boxes):
"""
Core iou3d computation (with cuda)
Args:
gt_boxes: [N, 7] (x, y, z, w, l, h, rot) in Lidar coordinates
pred_boxes: [M, 7]
Returns:
iou3d: [N, M]
"""
intersection_2d = rotate_iou_gpu_eval(gt_boxes[:, [0, 1, 3, 4, 6]], pred_boxes[:, [0, 1, 3, 4, 6]], criterion=2)
gt_max_h = gt_boxes[:, [2]] + gt_boxes[:, [5]] * 0.5
gt_min_h = gt_boxes[:, [2]] - gt_boxes[:, [5]] * 0.5
pred_max_h = pred_boxes[:, [2]] + pred_boxes[:, [5]] * 0.5
pred_min_h = pred_boxes[:, [2]] - pred_boxes[:, [5]] * 0.5
max_of_min = np.maximum(gt_min_h, pred_min_h.T)
min_of_max = np.minimum(gt_max_h, pred_max_h.T)
inter_h = min_of_max - max_of_min
inter_h[inter_h <= 0] = 0
#inter_h[intersection_2d <= 0] = 0
intersection_3d = intersection_2d * inter_h
gt_vol = gt_boxes[:, [3]] * gt_boxes[:, [4]] * gt_boxes[:, [5]]
pred_vol = pred_boxes[:, [3]] * pred_boxes[:, [4]] * pred_boxes[:, [5]]
union_3d = gt_vol + pred_vol.T - intersection_3d
#eps = 1e-6
#union_3d[union_3d<eps] = eps
iou3d = intersection_3d / union_3d
# rotation orientation filtering
diff_rot = gt_boxes[:, [6]] - pred_boxes[:, [6]].T
diff_rot = np.abs(diff_rot)
reverse_diff_rot = 2 * np.pi - diff_rot
diff_rot[diff_rot >= np.pi] = reverse_diff_rot[diff_rot >= np.pi] # constrain to [0-pi]
iou3d[diff_rot > np.pi/2] = 0 # unmatched if diff_rot > 90
return iou3d
def compute_iou3d(gt_annos, pred_annos, split_parts, with_heading):
"""
Compute iou3d of all samples by parts
Args:
with_heading: filter with heading
gt_annos: list of dicts for each sample
pred_annos:
split_parts: for part-based iou computation
Returns:
ious: list of iou arrays for each sample
"""
gt_num_per_sample = np.stack([len(anno["name"]) for anno in gt_annos], 0)
pred_num_per_sample = np.stack([len(anno["name"]) for anno in pred_annos], 0)
ious = []
sample_idx = 0
for num_part_samples in split_parts:
gt_annos_part = gt_annos[sample_idx:sample_idx + num_part_samples]
pred_annos_part = pred_annos[sample_idx:sample_idx + num_part_samples]
gt_boxes = np.concatenate([anno["boxes_3d"] for anno in gt_annos_part], 0)
pred_boxes = np.concatenate([anno["boxes_3d"] for anno in pred_annos_part], 0)
if with_heading:
iou3d_part = iou3d_kernel_with_heading(gt_boxes, pred_boxes)
else:
iou3d_part = iou3d_kernel(gt_boxes, pred_boxes)
gt_num_idx, pred_num_idx = 0, 0
for idx in range(num_part_samples):
gt_box_num = gt_num_per_sample[sample_idx + idx]
pred_box_num = pred_num_per_sample[sample_idx + idx]
ious.append(iou3d_part[gt_num_idx: gt_num_idx + gt_box_num, pred_num_idx: pred_num_idx+pred_box_num])
gt_num_idx += gt_box_num
pred_num_idx += pred_box_num
sample_idx += num_part_samples
return ious
| 16,368
| 37.881235
| 139
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/once/once_eval/eval_utils.py
|
import numpy as np
def compute_split_parts(num_samples, num_parts):
part_samples = num_samples // num_parts
remain_samples = num_samples % num_parts
if part_samples == 0:
return [num_samples]
if remain_samples == 0:
return [part_samples] * num_parts
else:
return [part_samples] * num_parts + [remain_samples]
def overall_filter(boxes):
ignore = np.zeros(boxes.shape[0], dtype=np.bool) # all false
return ignore
def distance_filter(boxes, level):
ignore = np.ones(boxes.shape[0], dtype=np.bool) # all true
dist = np.sqrt(np.sum(boxes[:, 0:3] * boxes[:, 0:3], axis=1))
if level == 0: # 0-30m
flag = dist < 30
elif level == 1: # 30-50m
flag = (dist >= 30) & (dist < 50)
elif level == 2: # 50m-inf
flag = dist >= 50
else:
assert False, 'level < 3 for distance metric, found level %s' % (str(level))
ignore[flag] = False
return ignore
def overall_distance_filter(boxes, level):
ignore = np.ones(boxes.shape[0], dtype=np.bool) # all true
dist = np.sqrt(np.sum(boxes[:, 0:3] * boxes[:, 0:3], axis=1))
if level == 0:
flag = np.ones(boxes.shape[0], dtype=np.bool)
elif level == 1: # 0-30m
flag = dist < 30
elif level == 2: # 30-50m
flag = (dist >= 30) & (dist < 50)
elif level == 3: # 50m-inf
flag = dist >= 50
else:
assert False, 'level < 4 for overall & distance metric, found level %s' % (str(level))
ignore[flag] = False
return ignore
| 1,530
| 29.62
| 94
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/lyft/lyft_dataset_ada.py
|
import copy
import pickle
from pathlib import Path
import os
import io
import numpy as np
from tqdm import tqdm
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import common_utils, box_utils
from ..dataset import DatasetTemplate
class ActiveLyftDataset(DatasetTemplate):
"""Petrel Ceph storage backend.
3DTrans supports the reading and writing data from Ceph
Usage:
self.oss_path = 's3://path/of/Lyft'
'~/.petreloss.conf': A config file of Ceph, saving the KEY/ACCESS_KEY of S3 Ceph
"""
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, sample_info_path=None):
self.root_path = (root_path if root_path is not None else Path(dataset_cfg.DATA_PATH)) / dataset_cfg.VERSION
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=self.root_path, logger=logger
)
if self.oss_path is not None:
from petrel_client.client import Client
self.client = Client('~/.petreloss.conf')
self.infos = []
self.include_lyft_data(self.mode, sample_info_path)
def include_lyft_data(self, mode, sample_info_path=None):
self.logger.info('Loading lyft dataset')
lyft_infos = []
for info_path in self.dataset_cfg.INFO_PATH[mode]:
if sample_info_path is not None and str(sample_info_path).split(':')[0] != 's3':
info_path = sample_info_path
if not Path(info_path).exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
lyft_infos.extend(infos)
elif sample_info_path is not None and str(sample_info_path).split(':')[0] == 's3':
info_path = sample_info_path
pkl_bytes = self.client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
lyft_infos.extend(infos)
elif self.oss_path is None:
info_path = self.root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
lyft_infos.extend(infos)
else:
info_path = os.path.join(self.oss_path, info_path)
pkl_bytes = self.client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
lyft_infos.extend(infos)
self.infos.extend(lyft_infos)
self.logger.info('Total samples for lyft dataset: %d' % (len(lyft_infos)))
@staticmethod
def remove_ego_points(points, center_radius=1.0):
mask = ~((np.abs(points[:, 0]) < center_radius*1.5) & (np.abs(points[:, 1]) < center_radius))
return points[mask]
def get_sweep(self, sweep_info):
if self.oss_path is None:
lidar_path = self.root_path / sweep_info['lidar_path']
points_sweep = np.fromfile(str(lidar_path), dtype=np.float32, count=-1)
else:
lidar_path = os.path.join(self.oss_path, sweep_info['lidar_path'])
sdk_local_bytes = self.client.get(lidar_path, update_cache=True)
points_sweep = np.frombuffer(sdk_local_bytes, dtype=np.float32, count=-1)
if points_sweep.shape[0] % 5 != 0:
points_sweep = points_sweep[: points_sweep.shape[0] - (points_sweep.shape[0] % 5)]
points_sweep = points_sweep.reshape([-1, 5])[:, :4]
points_sweep = self.remove_ego_points(points_sweep).T
if sweep_info['transform_matrix'] is not None:
num_points = points_sweep.shape[1]
points_sweep[:3, :] = sweep_info['transform_matrix'].dot(
np.vstack((points_sweep[:3, :], np.ones(num_points))))[:3, :]
cur_times = sweep_info['time_lag'] * np.ones((1, points_sweep.shape[1]))
return points_sweep.T, cur_times.T
def get_lidar_with_sweeps(self, index, max_sweeps=1):
info = self.infos[index]
if self.oss_path is None:
lidar_path = self.root_path / info['lidar_path']
points = np.fromfile(str(lidar_path), dtype=np.float32, count=-1)
else:
lidar_path = os.path.join(self.oss_path, info['lidar_path'])
# print(lidar_path)
sdk_local_bytes = self.client.get(lidar_path, update_cache=True)
points = np.frombuffer(sdk_local_bytes, dtype=np.float32, count=-1).copy()
if points.shape[0] % 5 != 0:
points = points[: points.shape[0] - (points.shape[0] % 5)]
points = points.reshape([-1, 5])[:, :4]
sweep_points_list = [points]
sweep_times_list = [np.zeros((points.shape[0], 1))]
for k in np.random.choice(len(info['sweeps']), max_sweeps - 1, replace=False):
points_sweep, times_sweep = self.get_sweep(info['sweeps'][k])
sweep_points_list.append(points_sweep)
sweep_times_list.append(times_sweep)
points = np.concatenate(sweep_points_list, axis=0)
times = np.concatenate(sweep_times_list, axis=0).astype(points.dtype)
points = np.concatenate((points, times), axis=1)
return points
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.infos) * self.total_epochs
return len(self.infos)
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.infos)
info = copy.deepcopy(self.infos[index])
points = self.get_lidar_with_sweeps(index, max_sweeps=self.dataset_cfg.MAX_SWEEPS)
if self.dataset_cfg.get('SHIFT_COOR', None):
points[:, 0:3] += np.array(self.dataset_cfg.SHIFT_COOR, dtype=np.float32)
input_dict = {
'db_flag': "lyft",
'points': points,
'frame_id': Path(info['lidar_path']).stem,
'metadata': {'token': info['token']}
}
if 'gt_boxes' in info:
# if self.dataset_cfg.get('FILTER_MIN_POINTS_IN_GT', False):
# mask = (info['num_lidar_pts'] > self.dataset_cfg.FILTER_MIN_POINTS_IN_GT - 1)
# else:
# mask = None
# input_dict.update({
# 'gt_names': info['gt_names'] if mask is None else info['gt_names'][mask],
# 'gt_boxes': info['gt_boxes'] if mask is None else info['gt_boxes'][mask]
# })
# if self.dataset_cfg.get('SHIFT_COOR', None):
# input_dict['gt_boxes'][:, 0:3] += self.dataset_cfg.SHIFT_COOR
# if self.dataset_cfg.get('USE_PSEUDO_LABEL', None) and self.training:
# input_dict['gt_boxes'] = None
# # for debug only
# # gt_boxes_mask = np.array([n in self.class_names for n in input_dict['gt_names']], dtype=np.bool_)
# # debug_dict = {'gt_boxes': copy.deepcopy(input_dict['gt_boxes'][gt_boxes_mask])}
# if self.dataset_cfg.get('FOV_POINTS_ONLY', None):
# input_dict['points'] = self.extract_fov_data(
# input_dict['points'], self.dataset_cfg.FOV_DEGREE, self.dataset_cfg.FOV_ANGLE
# )
# if input_dict['gt_boxes'] is not None:
# fov_gt_flag = self.extract_fov_gt(
# input_dict['gt_boxes'], self.dataset_cfg.FOV_DEGREE, self.dataset_cfg.FOV_ANGLE
# )
input_dict.update({
'gt_boxes': info['gt_boxes'],
'gt_names': info['gt_names']
})
if self.dataset_cfg.get('SHIFT_COOR', None):
input_dict['gt_boxes'][:, 0:3] += self.dataset_cfg.SHIFT_COOR
if self.dataset_cfg.get('USE_PSEUDO_LABEL', None) and self.training:
self.fill_pseudo_labels(input_dict)
if self.dataset_cfg.get('SETNAN_VELOCITY_TO_ZEROS', False) and not self.dataset_cfg.get('USE_PSEUDO_LABEL', None):
gt_boxes = input_dict['gt_boxes']
gt_boxes[np.isnan(gt_boxes)] = 0
input_dict['gt_boxes'] = gt_boxes
if not self.dataset_cfg.PRED_VELOCITY and 'gt_boxes' in input_dict and not self.dataset_cfg.get('USE_PSEUDO_LABEL', None):
input_dict['gt_boxes'] = input_dict['gt_boxes'][:, [0, 1, 2, 3, 4, 5, 6]]
data_dict = self.prepare_data(data_dict=input_dict)
return data_dict
def generate_prediction_dicts(self, batch_dict, pred_dicts, class_names, output_path=None):
"""
Args:
batch_dict:
frame_id:
pred_dicts: list of pred_dicts
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path:
Returns:
"""
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'score': np.zeros(num_samples),
'boxes_lidar': np.zeros([num_samples, 7]), 'pred_labels': np.zeros(num_samples)
}
return ret_dict
def generate_single_sample_dict(box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
if self.dataset_cfg.get('SHIFT_COOR', None):
#print ("*******WARNING FOR SHIFT_COOR:", self.dataset_cfg.SHIFT_COOR)
pred_boxes[:, 0:3] -= self.dataset_cfg.SHIFT_COOR
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['score'] = pred_scores
pred_dict['boxes_lidar'] = pred_boxes
pred_dict['pred_labels'] = pred_labels
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
single_pred_dict = generate_single_sample_dict(box_dict)
single_pred_dict['frame_id'] = batch_dict['frame_id'][index]
single_pred_dict['metadata'] = batch_dict['metadata'][index]
annos.append(single_pred_dict)
return annos
def kitti_eval(self, eval_det_annos, eval_gt_annos, class_names):
from ..kitti.kitti_object_eval_python import eval as kitti_eval
from ..kitti import kitti_utils
map_name_to_kitti = {
'car': 'Car',
'pedestrian': 'Pedestrian',
'truck': 'Truck',
'bicycle': 'Cyclist',
'motorcycle': 'Cyclist'
}
def transform_to_kitti_format(annos, info_with_fakelidar=False, is_gt=False):
for anno in annos:
if 'name' not in anno:
anno['name'] = anno['gt_names']
anno.pop('gt_names')
for k in range(anno['name'].shape[0]):
if anno['name'][k] in map_name_to_kitti:
anno['name'][k] = map_name_to_kitti[anno['name'][k]]
else:
anno['name'][k] = 'Person_sitting'
if 'boxes_lidar' in anno:
gt_boxes_lidar = anno['boxes_lidar'].copy()
else:
gt_boxes_lidar = anno['gt_boxes'].copy()
# filter by range
if self.dataset_cfg.get('GT_FILTER', None) and \
self.dataset_cfg.GT_FILTER.RANGE_FILTER:
if self.dataset_cfg.GT_FILTER.get('RANGE', None):
point_cloud_range = self.dataset_cfg.GT_FILTER.RANGE
else:
point_cloud_range = self.point_cloud_range
point_cloud_range[2] = -10
point_cloud_range[5] = 10
mask = box_utils.mask_boxes_outside_range_numpy(gt_boxes_lidar,
point_cloud_range,
min_num_corners=1)
gt_boxes_lidar = gt_boxes_lidar[mask]
anno['name'] = anno['name'][mask]
if not is_gt:
anno['score'] = anno['score'][mask]
anno['pred_labels'] = anno['pred_labels'][mask]
# filter by fov
if is_gt and self.dataset_cfg.get('GT_FILTER', None):
if self.dataset_cfg.GT_FILTER.get('FOV_FILTER', None):
fov_gt_flag = self.extract_fov_gt(
gt_boxes_lidar, self.dataset_cfg['FOV_DEGREE'], self.dataset_cfg['FOV_ANGLE']
)
gt_boxes_lidar = gt_boxes_lidar[fov_gt_flag]
anno['name'] = anno['name'][fov_gt_flag]
anno['bbox'] = np.zeros((len(anno['name']), 4))
anno['bbox'][:, 2:4] = 50 # [0, 0, 50, 50]
anno['truncated'] = np.zeros(len(anno['name']))
anno['occluded'] = np.zeros(len(anno['name']))
if len(gt_boxes_lidar) > 0:
if info_with_fakelidar:
gt_boxes_lidar = box_utils.boxes3d_kitti_fakelidar_to_lidar(gt_boxes_lidar)
gt_boxes_lidar[:, 2] -= gt_boxes_lidar[:, 5] / 2
anno['location'] = np.zeros((gt_boxes_lidar.shape[0], 3))
anno['location'][:, 0] = -gt_boxes_lidar[:, 1] # x = -y_lidar
anno['location'][:, 1] = -gt_boxes_lidar[:, 2] # y = -z_lidar
anno['location'][:, 2] = gt_boxes_lidar[:, 0] # z = x_lidar
dxdydz = gt_boxes_lidar[:, 3:6]
anno['dimensions'] = dxdydz[:, [0, 2, 1]] # lwh ==> lhw
anno['rotation_y'] = -gt_boxes_lidar[:, 6] - np.pi / 2.0
anno['alpha'] = -np.arctan2(-gt_boxes_lidar[:, 1], gt_boxes_lidar[:, 0]) + anno['rotation_y']
else:
anno['location'] = anno['dimensions'] = np.zeros((0, 3))
anno['rotation_y'] = anno['alpha'] = np.zeros(0)
transform_to_kitti_format(eval_det_annos)
transform_to_kitti_format(
eval_gt_annos,
info_with_fakelidar=self.dataset_cfg.get('INFO_WITH_FAKELIDAR', False),
is_gt=True
)
kitti_class_names = [map_name_to_kitti[x] for x in class_names]
ap_result_str, ap_dict = kitti_eval.get_official_eval_result(
gt_annos=eval_gt_annos, dt_annos=eval_det_annos, current_classes=kitti_class_names
)
return ap_result_str, ap_dict
def evaluation(self, det_annos, class_names, **kwargs):
if kwargs['eval_metric'] == 'kitti':
eval_det_annos = copy.deepcopy(det_annos)
eval_gt_annos = copy.deepcopy(self.infos)
return self.kitti_eval(eval_det_annos, eval_gt_annos, class_names)
elif kwargs['eval_metric'] == 'lyft':
return self.lyft_eval(det_annos, class_names,
iou_thresholds=self.dataset_cfg.EVAL_LYFT_IOU_LIST)
else:
raise NotImplementedError
def lyft_eval(self, det_annos, class_names, iou_thresholds=[0.5]):
from lyft_dataset_sdk.lyftdataset import LyftDataset as Lyft
from . import lyft_utils
# from lyft_dataset_sdk.eval.detection.mAP_evaluation import get_average_precisions
from .lyft_mAP_eval.lyft_eval import get_average_precisions
lyft = Lyft(json_path=self.root_path / 'data', data_path=self.root_path, verbose=True)
det_lyft_boxes, sample_tokens = lyft_utils.convert_det_to_lyft_format(lyft, det_annos)
gt_lyft_boxes = lyft_utils.load_lyft_gt_by_tokens(lyft, sample_tokens)
average_precisions = get_average_precisions(gt_lyft_boxes, det_lyft_boxes, class_names, iou_thresholds)
ap_result_str, ap_dict = lyft_utils.format_lyft_results(average_precisions, class_names, iou_thresholds, version=self.dataset_cfg.VERSION)
return ap_result_str, ap_dict
def create_groundtruth_database(self, used_classes=None, max_sweeps=10):
import torch
database_save_path = self.root_path / f'gt_database'
db_info_save_path = self.root_path / f'lyft_dbinfos_{max_sweeps}sweeps.pkl'
database_save_path.mkdir(parents=True, exist_ok=True)
all_db_infos = {}
for idx in tqdm(range(len(self.infos))):
sample_idx = idx
info = self.infos[idx]
points = self.get_lidar_with_sweeps(idx, max_sweeps=max_sweeps)
gt_boxes = info['gt_boxes']
gt_names = info['gt_names']
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
torch.from_numpy(points[:, 0:3]).unsqueeze(dim=0).float().cuda(),
torch.from_numpy(gt_boxes[:, 0:7]).unsqueeze(dim=0).float().cuda()
).long().squeeze(dim=0).cpu().numpy()
for i in range(gt_boxes.shape[0]):
filename = '%s_%s_%d.bin' % (sample_idx, gt_names[i], i)
filepath = database_save_path / filename
gt_points = points[box_idxs_of_pts == i]
gt_points[:, :3] -= gt_boxes[i, :3]
with open(filepath, 'w') as f:
gt_points.tofile(f)
if (used_classes is None) or gt_names[i] in used_classes:
db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin
db_info = {'name': gt_names[i], 'path': db_path, 'image_idx': sample_idx, 'gt_idx': i,
'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0]}
if gt_names[i] in all_db_infos:
all_db_infos[gt_names[i]].append(db_info)
else:
all_db_infos[gt_names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
def create_lyft_info(version, data_path, save_path, split, max_sweeps=10):
from lyft_dataset_sdk.lyftdataset import LyftDataset
from . import lyft_utils
data_path = data_path / version
save_path = save_path / version
split_path = data_path.parent / 'ImageSets'
if split is not None:
save_path = save_path / split
split_path = split_path / split
save_path.mkdir(exist_ok=True)
assert version in ['trainval', 'one_scene', 'test']
if version == 'trainval':
train_split_path = split_path / 'train.txt'
val_split_path = split_path / 'val.txt'
elif version == 'test':
train_split_path = split_path / 'test.txt'
val_split_path = None
elif version == 'one_scene':
train_split_path = split_path / 'one_scene.txt'
val_split_path = split_path / 'one_scene.txt'
else:
raise NotImplementedError
train_scenes = [x.strip() for x in open(train_split_path).readlines()] if train_split_path.exists() else []
val_scenes = [x.strip() for x in open(val_split_path).readlines()] if val_split_path is not None and val_split_path.exists() else []
lyft = LyftDataset(json_path=data_path / 'data', data_path=data_path, verbose=True)
available_scenes = lyft_utils.get_available_scenes(lyft)
available_scene_names = [s['name'] for s in available_scenes]
train_scenes = list(filter(lambda x: x in available_scene_names, train_scenes))
val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes))
train_scenes = set([available_scenes[available_scene_names.index(s)]['token'] for s in train_scenes])
val_scenes = set([available_scenes[available_scene_names.index(s)]['token'] for s in val_scenes])
print('%s: train scene(%d), val scene(%d)' % (version, len(train_scenes), len(val_scenes)))
train_lyft_infos, val_lyft_infos = lyft_utils.fill_trainval_infos(
data_path=data_path, lyft=lyft, train_scenes=train_scenes, val_scenes=val_scenes,
test='test' in version, max_sweeps=max_sweeps
)
if version == 'test':
print('test sample: %d' % len(train_lyft_infos))
with open(save_path / f'lyft_infos_test.pkl', 'wb') as f:
pickle.dump(train_lyft_infos, f)
else:
print('train sample: %d, val sample: %d' % (len(train_lyft_infos), len(val_lyft_infos)))
with open(save_path / f'lyft_infos_train.pkl', 'wb') as f:
pickle.dump(train_lyft_infos, f)
with open(save_path / f'lyft_infos_val.pkl', 'wb') as f:
pickle.dump(val_lyft_infos, f)
if __name__ == '__main__':
import yaml
import argparse
from pathlib import Path
from easydict import EasyDict
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config of dataset')
parser.add_argument('--func', type=str, default='create_lyft_infos', help='')
parser.add_argument('--version', type=str, default='trainval', help='')
parser.add_argument('--split', type=str, default=None, help='')
parser.add_argument('--max_sweeps', type=int, default=10, help='')
args = parser.parse_args()
if args.func == 'create_lyft_infos':
try:
yaml_config = yaml.safe_load(open(args.cfg_file), Loader=yaml.FullLoader)
except:
yaml_config = yaml.safe_load(open(args.cfg_file))
dataset_cfg = EasyDict(yaml_config)
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
dataset_cfg.VERSION = args.version
dataset_cfg.MAX_SWEEPS = args.max_sweeps
create_lyft_info(
version=dataset_cfg.VERSION,
data_path=ROOT_DIR / 'data' / 'lyft',
save_path=ROOT_DIR / 'data' / 'lyft',
split=args.split,
max_sweeps=dataset_cfg.MAX_SWEEPS
)
lyft_dataset = ActiveLyftDataset(
dataset_cfg=dataset_cfg, class_names=None,
root_path=ROOT_DIR / 'data' / 'lyft',
logger=common_utils.create_logger(), training=True
)
if args.version != 'test':
lyft_dataset.create_groundtruth_database(max_sweeps=dataset_cfg.MAX_SWEEPS)
| 22,729
| 44.009901
| 146
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/lyft/lyft_utils.py
|
"""
The Lyft data pre-processing and evaluation is modified from
https://github.com/poodarchu/Det3D
"""
import operator
from functools import reduce
from pathlib import Path
import numpy as np
import tqdm
from lyft_dataset_sdk.utils.data_classes import Box, Quaternion
from lyft_dataset_sdk.lyftdataset import LyftDataset
from lyft_dataset_sdk.utils.geometry_utils import transform_matrix
from lyft_dataset_sdk.eval.detection.mAP_evaluation import Box3D
def get_available_scenes(lyft):
available_scenes = []
print('total scene num:', len(lyft.scene))
for scene in lyft.scene:
scene_token = scene['token']
scene_rec = lyft.get('scene', scene_token)
sample_rec = lyft.get('sample', scene_rec['first_sample_token'])
sd_rec = lyft.get('sample_data', sample_rec['data']['LIDAR_TOP'])
has_more_frames = True
scene_not_exist = False
while has_more_frames:
lidar_path, boxes, _ = lyft.get_sample_data(sd_rec['token'])
if not Path(lidar_path).exists():
scene_not_exist = True
break
else:
break
# if not sd_rec['next'] == '':
# sd_rec = nusc.get('sample_data', sd_rec['next'])
# else:
# has_more_frames = False
if scene_not_exist:
continue
available_scenes.append(scene)
print('exist scene num:', len(available_scenes))
return available_scenes
def get_sample_data(lyft, sample_data_token):
sd_rec = lyft.get("sample_data", sample_data_token)
cs_rec = lyft.get("calibrated_sensor", sd_rec["calibrated_sensor_token"])
sensor_rec = lyft.get("sensor", cs_rec["sensor_token"])
pose_rec = lyft.get("ego_pose", sd_rec["ego_pose_token"])
boxes = lyft.get_boxes(sample_data_token)
box_list = []
for box in boxes:
box.translate(-np.array(pose_rec["translation"]))
box.rotate(Quaternion(pose_rec["rotation"]).inverse)
box.translate(-np.array(cs_rec["translation"]))
box.rotate(Quaternion(cs_rec["rotation"]).inverse)
box_list.append(box)
return box_list, pose_rec
def quaternion_yaw(q: Quaternion) -> float:
"""
Calculate the yaw angle from a quaternion.
Note that this only works for a quaternion that represents a box in lidar or global coordinate frame.
It does not work for a box in the camera frame.
:param q: Quaternion of interest.
:return: Yaw angle in radians.
"""
# Project into xy plane.
v = np.dot(q.rotation_matrix, np.array([1, 0, 0]))
# Measure yaw using arctan.
yaw = np.arctan2(v[1], v[0])
return yaw
def fill_trainval_infos(data_path, lyft, train_scenes, val_scenes, test=False, max_sweeps=10):
train_lyft_infos = []
val_lyft_infos = []
progress_bar = tqdm.tqdm(total=len(lyft.sample), desc='create_info', dynamic_ncols=True)
# ref_chans = ["LIDAR_TOP", "LIDAR_FRONT_LEFT", "LIDAR_FRONT_RIGHT"]
ref_chan = "LIDAR_TOP"
for index, sample in enumerate(lyft.sample):
progress_bar.update()
ref_info = {}
ref_sd_token = sample["data"][ref_chan]
ref_sd_rec = lyft.get("sample_data", ref_sd_token)
ref_cs_token = ref_sd_rec["calibrated_sensor_token"]
ref_cs_rec = lyft.get("calibrated_sensor", ref_cs_token)
ref_to_car = transform_matrix(
ref_cs_rec["translation"],
Quaternion(ref_cs_rec["rotation"]),
inverse=False,
)
ref_from_car = transform_matrix(
ref_cs_rec["translation"],
Quaternion(ref_cs_rec["rotation"]),
inverse=True,
)
ref_lidar_path = lyft.get_sample_data_path(ref_sd_token)
ref_boxes, ref_pose_rec = get_sample_data(lyft, ref_sd_token)
ref_time = 1e-6 * ref_sd_rec["timestamp"]
car_from_global = transform_matrix(
ref_pose_rec["translation"],
Quaternion(ref_pose_rec["rotation"]),
inverse=True,
)
car_to_global = transform_matrix(
ref_pose_rec["translation"],
Quaternion(ref_pose_rec["rotation"]),
inverse=False,
)
info = {
"lidar_path": Path(ref_lidar_path).relative_to(data_path).__str__(),
"ref_from_car": ref_from_car,
"ref_to_car": ref_to_car,
'token': sample['token'],
'car_from_global': car_from_global,
'car_to_global': car_to_global,
'timestamp': ref_time,
'sweeps': []
}
sample_data_token = sample['data'][ref_chan]
curr_sd_rec = lyft.get('sample_data', sample_data_token)
sweeps = []
while len(sweeps) < max_sweeps - 1:
if curr_sd_rec['prev'] == '':
if len(sweeps) == 0:
sweep = {
'lidar_path': Path(ref_lidar_path).relative_to(data_path).__str__(),
'sample_data_token': curr_sd_rec['token'],
'transform_matrix': None,
'time_lag': curr_sd_rec['timestamp'] * 0,
}
sweeps.append(sweep)
else:
sweeps.append(sweeps[-1])
else:
curr_sd_rec = lyft.get('sample_data', curr_sd_rec['prev'])
# Get past pose
current_pose_rec = lyft.get('ego_pose', curr_sd_rec['ego_pose_token'])
global_from_car = transform_matrix(
current_pose_rec['translation'], Quaternion(current_pose_rec['rotation']), inverse=False,
)
# Homogeneous transformation matrix from sensor coordinate frame to ego car frame.
current_cs_rec = lyft.get(
'calibrated_sensor', curr_sd_rec['calibrated_sensor_token']
)
car_from_current = transform_matrix(
current_cs_rec['translation'], Quaternion(current_cs_rec['rotation']), inverse=False,
)
tm = reduce(np.dot, [ref_from_car, car_from_global, global_from_car, car_from_current])
lidar_path = lyft.get_sample_data_path(curr_sd_rec['token'])
time_lag = ref_time - 1e-6 * curr_sd_rec['timestamp']
sweep = {
'lidar_path': Path(lidar_path).relative_to(data_path).__str__(),
'sample_data_token': curr_sd_rec['token'],
'transform_matrix': tm,
'global_from_car': global_from_car,
'car_from_current': car_from_current,
'time_lag': time_lag,
}
sweeps.append(sweep)
info['sweeps'] = sweeps
if not test:
annotations = [
lyft.get("sample_annotation", token) for token in sample["anns"]
]
locs = np.array([b.center for b in ref_boxes]).reshape(-1, 3)
dims = np.array([b.wlh for b in ref_boxes]).reshape(-1, 3)[:, [1, 0, 2]]
rots = np.array([quaternion_yaw(b.orientation) for b in ref_boxes]).reshape(
-1, 1
)
velocity = np.array([b.velocity for b in ref_boxes]).reshape(-1, 3)
names = np.array([b.name for b in ref_boxes])
tokens = np.array([b.token for b in ref_boxes]).reshape(-1, 1)
gt_boxes = np.concatenate([locs, dims, rots], axis=1)
assert len(annotations) == len(gt_boxes)
info["gt_boxes"] = gt_boxes
info["gt_boxes_velocity"] = velocity
info["gt_names"] = names
info["gt_boxes_token"] = tokens
if sample["scene_token"] in train_scenes:
train_lyft_infos.append(info)
else:
val_lyft_infos.append(info)
progress_bar.close()
return train_lyft_infos, val_lyft_infos
def boxes_lidar_to_lyft(boxes3d, scores=None, labels=None):
box_list = []
for k in range(boxes3d.shape[0]):
quat = Quaternion(axis=[0, 0, 1], radians=boxes3d[k, 6])
box = Box(
boxes3d[k, :3],
boxes3d[k, [4, 3, 5]], # wlh
quat, label=labels[k] if labels is not None else np.nan,
score=scores[k] if scores is not None else np.nan,
)
box_list.append(box)
return box_list
def lidar_lyft_box_to_global(lyft, boxes, sample_token):
s_record = lyft.get('sample', sample_token)
sample_data_token = s_record['data']['LIDAR_TOP']
sd_record = lyft.get('sample_data', sample_data_token)
cs_record = lyft.get('calibrated_sensor', sd_record['calibrated_sensor_token'])
sensor_record = lyft.get('sensor', cs_record['sensor_token'])
pose_record = lyft.get('ego_pose', sd_record['ego_pose_token'])
box_list = []
for box in boxes:
# Move box to ego vehicle coord system
box.rotate(Quaternion(cs_record['rotation']))
box.translate(np.array(cs_record['translation']))
# Move box to global coord system
box.rotate(Quaternion(pose_record['rotation']))
box.translate(np.array(pose_record['translation']))
box_list.append(box)
return box_list
def convert_det_to_lyft_format(lyft, det_annos):
sample_tokens = []
det_lyft_box = []
for anno in det_annos:
sample_tokens.append(anno['metadata']['token'])
boxes_lyft_list = boxes_lidar_to_lyft(anno['boxes_lidar'], anno['score'], anno['pred_labels'])
boxes_list = lidar_lyft_box_to_global(lyft, boxes_lyft_list, anno['metadata']['token'])
for idx, box in enumerate(boxes_list):
name = anno['name'][idx]
box3d = {
'sample_token': anno['metadata']['token'],
'translation': box.center.tolist(),
'size': box.wlh.tolist(),
'rotation': box.orientation.elements.tolist(),
'name': name,
'score': box.score
}
det_lyft_box.append(box3d)
return det_lyft_box, sample_tokens
def load_lyft_gt_by_tokens(lyft, sample_tokens):
"""
Modify from Lyft tutorial
"""
gt_box3ds = []
# Load annotations and filter predictions and annotations.
for sample_token in sample_tokens:
sample = lyft.get('sample', sample_token)
sample_annotation_tokens = sample['anns']
sample_lidar_token = sample["data"]["LIDAR_TOP"]
lidar_data = lyft.get("sample_data", sample_lidar_token)
ego_pose = lyft.get("ego_pose", lidar_data["ego_pose_token"])
ego_translation = np.array(ego_pose['translation'])
for sample_annotation_token in sample_annotation_tokens:
sample_annotation = lyft.get('sample_annotation', sample_annotation_token)
sample_annotation_translation = sample_annotation['translation']
class_name = sample_annotation['category_name']
box3d = {
'sample_token': sample_token,
'translation': sample_annotation_translation,
'size': sample_annotation['size'],
'rotation': sample_annotation['rotation'],
'name': class_name
}
gt_box3ds.append(box3d)
return gt_box3ds
def format_lyft_results(classwise_ap, class_names, iou_threshold_list, version='trainval'):
ret_dict = {}
result = '----------------Lyft %s results-----------------\n' % version
result += 'Average precision over IoUs: {}\n'.format(str(iou_threshold_list))
for c_idx, class_name in enumerate(class_names):
result += '{:<20}: \t {:.4f}\n'.format(class_name, classwise_ap[c_idx])
ret_dict[class_name] = classwise_ap[c_idx]
result += '--------------average performance-------------\n'
mAP = np.mean(classwise_ap)
result += 'mAP:\t {:.4f}\n'.format(mAP)
ret_dict['mAP'] = mAP
return result, ret_dict
| 12,061
| 35.222222
| 109
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/lyft/lyft_dataset.py
|
import copy
import pickle
from pathlib import Path
import os
import io
import numpy as np
from tqdm import tqdm
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import common_utils, box_utils
from ..dataset import DatasetTemplate
class LyftDataset(DatasetTemplate):
"""Petrel Ceph storage backend.
3DTrans supports the reading and writing data from Ceph
Usage:
self.oss_path = 's3://path/of/Lyft'
'~/.petreloss.conf': A config file of Ceph, saving the KEY/ACCESS_KEY of S3 Ceph
"""
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
self.root_path = (root_path if root_path is not None else Path(dataset_cfg.DATA_PATH)) / dataset_cfg.VERSION
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=self.root_path, logger=logger
)
if self.oss_path is not None:
from petrel_client.client import Client
self.client = Client('~/.petreloss.conf')
self.infos = []
self.include_lyft_data(self.mode)
def include_lyft_data(self, mode):
self.logger.info('Loading lyft dataset')
lyft_infos = []
for info_path in self.dataset_cfg.INFO_PATH[mode]:
if self.oss_path is None:
info_path = self.root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
lyft_infos.extend(infos)
else:
info_path = os.path.join(self.oss_path, info_path)
pkl_bytes = self.client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
lyft_infos.extend(infos)
self.infos.extend(lyft_infos)
self.logger.info('Total samples for lyft dataset: %d' % (len(lyft_infos)))
@staticmethod
def remove_ego_points(points, center_radius=1.0):
mask = ~((np.abs(points[:, 0]) < center_radius*1.5) & (np.abs(points[:, 1]) < center_radius))
return points[mask]
def get_sweep(self, sweep_info):
if self.oss_path is None:
lidar_path = self.root_path / sweep_info['lidar_path']
points_sweep = np.fromfile(str(lidar_path), dtype=np.float32, count=-1)
else:
lidar_path = os.path.join(self.oss_path, sweep_info['lidar_path'])
sdk_local_bytes = self.client.get(lidar_path, update_cache=True)
points_sweep = np.frombuffer(sdk_local_bytes, dtype=np.float32, count=-1)
if points_sweep.shape[0] % 5 != 0:
points_sweep = points_sweep[: points_sweep.shape[0] - (points_sweep.shape[0] % 5)]
points_sweep = points_sweep.reshape([-1, 5])[:, :4]
points_sweep = self.remove_ego_points(points_sweep).T
if sweep_info['transform_matrix'] is not None:
num_points = points_sweep.shape[1]
points_sweep[:3, :] = sweep_info['transform_matrix'].dot(
np.vstack((points_sweep[:3, :], np.ones(num_points))))[:3, :]
cur_times = sweep_info['time_lag'] * np.ones((1, points_sweep.shape[1]))
return points_sweep.T, cur_times.T
def get_lidar_with_sweeps(self, index, max_sweeps=1):
info = self.infos[index]
if self.oss_path is None:
lidar_path = self.root_path / info['lidar_path']
points = np.fromfile(str(lidar_path), dtype=np.float32, count=-1)
else:
lidar_path = os.path.join(self.oss_path, info['lidar_path'])
# print(lidar_path)
sdk_local_bytes = self.client.get(lidar_path, update_cache=True)
points = np.frombuffer(sdk_local_bytes, dtype=np.float32, count=-1).copy()
if points.shape[0] % 5 != 0:
points = points[: points.shape[0] - (points.shape[0] % 5)]
points = points.reshape([-1, 5])[:, :4]
sweep_points_list = [points]
sweep_times_list = [np.zeros((points.shape[0], 1))]
for k in np.random.choice(len(info['sweeps']), max_sweeps - 1, replace=False):
points_sweep, times_sweep = self.get_sweep(info['sweeps'][k])
sweep_points_list.append(points_sweep)
sweep_times_list.append(times_sweep)
points = np.concatenate(sweep_points_list, axis=0)
times = np.concatenate(sweep_times_list, axis=0).astype(points.dtype)
points = np.concatenate((points, times), axis=1)
return points
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.infos) * self.total_epochs
return len(self.infos)
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.infos)
info = copy.deepcopy(self.infos[index])
points = self.get_lidar_with_sweeps(index, max_sweeps=self.dataset_cfg.MAX_SWEEPS)
if self.dataset_cfg.get('SHIFT_COOR', None):
points[:, 0:3] += np.array(self.dataset_cfg.SHIFT_COOR, dtype=np.float32)
input_dict = {
'db_flag': "lyft",
'points': points,
'frame_id': Path(info['lidar_path']).stem,
'metadata': {'token': info['token']}
}
if 'gt_boxes' in info:
input_dict.update({
'gt_boxes': info['gt_boxes'],
'gt_names': info['gt_names']
})
if self.dataset_cfg.get('SHIFT_COOR', None):
input_dict['gt_boxes'][:, 0:3] += self.dataset_cfg.SHIFT_COOR
if self.dataset_cfg.get('USE_PSEUDO_LABEL', None) and self.training:
self.fill_pseudo_labels(input_dict)
if self.dataset_cfg.get('SETNAN_VELOCITY_TO_ZEROS', False) and not self.dataset_cfg.get('USE_PSEUDO_LABEL', None):
gt_boxes = input_dict['gt_boxes']
gt_boxes[np.isnan(gt_boxes)] = 0
input_dict['gt_boxes'] = gt_boxes
if not self.dataset_cfg.PRED_VELOCITY and 'gt_boxes' in input_dict and not self.dataset_cfg.get('USE_PSEUDO_LABEL', None):
input_dict['gt_boxes'] = input_dict['gt_boxes'][:, [0, 1, 2, 3, 4, 5, 6]]
data_dict = self.prepare_data(data_dict=input_dict)
return data_dict
def generate_prediction_dicts(self, batch_dict, pred_dicts, class_names, output_path=None):
"""
Args:
batch_dict:
frame_id:
pred_dicts: list of pred_dicts
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path:
Returns:
"""
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'score': np.zeros(num_samples),
'boxes_lidar': np.zeros([num_samples, 7]), 'pred_labels': np.zeros(num_samples)
}
return ret_dict
def generate_single_sample_dict(box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
if self.dataset_cfg.get('SHIFT_COOR', None):
#print ("*******WARNING FOR SHIFT_COOR:", self.dataset_cfg.SHIFT_COOR)
pred_boxes[:, 0:3] -= self.dataset_cfg.SHIFT_COOR
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['score'] = pred_scores
pred_dict['boxes_lidar'] = pred_boxes
pred_dict['pred_labels'] = pred_labels
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
single_pred_dict = generate_single_sample_dict(box_dict)
single_pred_dict['frame_id'] = batch_dict['frame_id'][index]
single_pred_dict['metadata'] = batch_dict['metadata'][index]
annos.append(single_pred_dict)
return annos
def kitti_eval(self, eval_det_annos, eval_gt_annos, class_names):
from ..kitti.kitti_object_eval_python import eval as kitti_eval
from ..kitti import kitti_utils
map_name_to_kitti = {
'car': 'Car',
'pedestrian': 'Pedestrian',
'truck': 'Truck',
'bicycle': 'Cyclist',
'motorcycle': 'Cyclist'
}
def transform_to_kitti_format(annos, info_with_fakelidar=False, is_gt=False):
for anno in annos:
if 'name' not in anno:
anno['name'] = anno['gt_names']
anno.pop('gt_names')
for k in range(anno['name'].shape[0]):
if anno['name'][k] in map_name_to_kitti:
anno['name'][k] = map_name_to_kitti[anno['name'][k]]
else:
anno['name'][k] = 'Person_sitting'
if 'boxes_lidar' in anno:
gt_boxes_lidar = anno['boxes_lidar'].copy()
else:
gt_boxes_lidar = anno['gt_boxes'].copy()
# filter by range
if self.dataset_cfg.get('GT_FILTER', None) and \
self.dataset_cfg.GT_FILTER.RANGE_FILTER:
if self.dataset_cfg.GT_FILTER.get('RANGE', None):
point_cloud_range = self.dataset_cfg.GT_FILTER.RANGE
else:
point_cloud_range = self.point_cloud_range
point_cloud_range[2] = -10
point_cloud_range[5] = 10
mask = box_utils.mask_boxes_outside_range_numpy(gt_boxes_lidar,
point_cloud_range,
min_num_corners=1)
gt_boxes_lidar = gt_boxes_lidar[mask]
anno['name'] = anno['name'][mask]
if not is_gt:
anno['score'] = anno['score'][mask]
anno['pred_labels'] = anno['pred_labels'][mask]
# filter by fov
if is_gt and self.dataset_cfg.get('GT_FILTER', None):
if self.dataset_cfg.GT_FILTER.get('FOV_FILTER', None):
fov_gt_flag = self.extract_fov_gt(
gt_boxes_lidar, self.dataset_cfg['FOV_DEGREE'], self.dataset_cfg['FOV_ANGLE']
)
gt_boxes_lidar = gt_boxes_lidar[fov_gt_flag]
anno['name'] = anno['name'][fov_gt_flag]
anno['bbox'] = np.zeros((len(anno['name']), 4))
anno['bbox'][:, 2:4] = 50 # [0, 0, 50, 50]
anno['truncated'] = np.zeros(len(anno['name']))
anno['occluded'] = np.zeros(len(anno['name']))
if len(gt_boxes_lidar) > 0:
if info_with_fakelidar:
gt_boxes_lidar = box_utils.boxes3d_kitti_fakelidar_to_lidar(gt_boxes_lidar)
gt_boxes_lidar[:, 2] -= gt_boxes_lidar[:, 5] / 2
anno['location'] = np.zeros((gt_boxes_lidar.shape[0], 3))
anno['location'][:, 0] = -gt_boxes_lidar[:, 1] # x = -y_lidar
anno['location'][:, 1] = -gt_boxes_lidar[:, 2] # y = -z_lidar
anno['location'][:, 2] = gt_boxes_lidar[:, 0] # z = x_lidar
dxdydz = gt_boxes_lidar[:, 3:6]
anno['dimensions'] = dxdydz[:, [0, 2, 1]] # lwh ==> lhw
anno['rotation_y'] = -gt_boxes_lidar[:, 6] - np.pi / 2.0
anno['alpha'] = -np.arctan2(-gt_boxes_lidar[:, 1], gt_boxes_lidar[:, 0]) + anno['rotation_y']
else:
anno['location'] = anno['dimensions'] = np.zeros((0, 3))
anno['rotation_y'] = anno['alpha'] = np.zeros(0)
transform_to_kitti_format(eval_det_annos)
transform_to_kitti_format(
eval_gt_annos,
info_with_fakelidar=self.dataset_cfg.get('INFO_WITH_FAKELIDAR', False),
is_gt=True
)
kitti_class_names = [map_name_to_kitti[x] for x in class_names]
ap_result_str, ap_dict = kitti_eval.get_official_eval_result(
gt_annos=eval_gt_annos, dt_annos=eval_det_annos, current_classes=kitti_class_names
)
return ap_result_str, ap_dict
def evaluation(self, det_annos, class_names, **kwargs):
if kwargs['eval_metric'] == 'kitti':
eval_det_annos = copy.deepcopy(det_annos)
eval_gt_annos = copy.deepcopy(self.infos)
return self.kitti_eval(eval_det_annos, eval_gt_annos, class_names)
elif kwargs['eval_metric'] == 'lyft':
return self.lyft_eval(det_annos, class_names,
iou_thresholds=self.dataset_cfg.EVAL_LYFT_IOU_LIST)
else:
raise NotImplementedError
def lyft_eval(self, det_annos, class_names, iou_thresholds=[0.5]):
from lyft_dataset_sdk.lyftdataset import LyftDataset as Lyft
from . import lyft_utils
# from lyft_dataset_sdk.eval.detection.mAP_evaluation import get_average_precisions
from .lyft_mAP_eval.lyft_eval import get_average_precisions
lyft = Lyft(json_path=self.root_path / 'data', data_path=self.root_path, verbose=True)
det_lyft_boxes, sample_tokens = lyft_utils.convert_det_to_lyft_format(lyft, det_annos)
gt_lyft_boxes = lyft_utils.load_lyft_gt_by_tokens(lyft, sample_tokens)
average_precisions = get_average_precisions(gt_lyft_boxes, det_lyft_boxes, class_names, iou_thresholds)
ap_result_str, ap_dict = lyft_utils.format_lyft_results(average_precisions, class_names, iou_thresholds, version=self.dataset_cfg.VERSION)
return ap_result_str, ap_dict
def create_groundtruth_database(self, used_classes=None, max_sweeps=10):
import torch
database_save_path = self.root_path / f'gt_database'
db_info_save_path = self.root_path / f'lyft_dbinfos_{max_sweeps}sweeps.pkl'
database_save_path.mkdir(parents=True, exist_ok=True)
all_db_infos = {}
for idx in tqdm(range(len(self.infos))):
sample_idx = idx
info = self.infos[idx]
points = self.get_lidar_with_sweeps(idx, max_sweeps=max_sweeps)
gt_boxes = info['gt_boxes']
gt_names = info['gt_names']
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
torch.from_numpy(points[:, 0:3]).unsqueeze(dim=0).float().cuda(),
torch.from_numpy(gt_boxes[:, 0:7]).unsqueeze(dim=0).float().cuda()
).long().squeeze(dim=0).cpu().numpy()
for i in range(gt_boxes.shape[0]):
filename = '%s_%s_%d.bin' % (sample_idx, gt_names[i], i)
filepath = database_save_path / filename
gt_points = points[box_idxs_of_pts == i]
gt_points[:, :3] -= gt_boxes[i, :3]
with open(filepath, 'w') as f:
gt_points.tofile(f)
if (used_classes is None) or gt_names[i] in used_classes:
db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin
db_info = {'name': gt_names[i], 'path': db_path, 'image_idx': sample_idx, 'gt_idx': i,
'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0]}
if gt_names[i] in all_db_infos:
all_db_infos[gt_names[i]].append(db_info)
else:
all_db_infos[gt_names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
def create_lyft_info(version, data_path, save_path, split, max_sweeps=10):
from lyft_dataset_sdk.lyftdataset import LyftDataset
from . import lyft_utils
data_path = data_path / version
save_path = save_path / version
split_path = data_path.parent / 'ImageSets'
if split is not None:
save_path = save_path / split
split_path = split_path / split
save_path.mkdir(exist_ok=True)
assert version in ['trainval', 'one_scene', 'test']
if version == 'trainval':
train_split_path = split_path / 'train.txt'
val_split_path = split_path / 'val.txt'
elif version == 'test':
train_split_path = split_path / 'test.txt'
val_split_path = None
elif version == 'one_scene':
train_split_path = split_path / 'one_scene.txt'
val_split_path = split_path / 'one_scene.txt'
else:
raise NotImplementedError
train_scenes = [x.strip() for x in open(train_split_path).readlines()] if train_split_path.exists() else []
val_scenes = [x.strip() for x in open(val_split_path).readlines()] if val_split_path is not None and val_split_path.exists() else []
lyft = LyftDataset(json_path=data_path / 'data', data_path=data_path, verbose=True)
available_scenes = lyft_utils.get_available_scenes(lyft)
available_scene_names = [s['name'] for s in available_scenes]
train_scenes = list(filter(lambda x: x in available_scene_names, train_scenes))
val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes))
train_scenes = set([available_scenes[available_scene_names.index(s)]['token'] for s in train_scenes])
val_scenes = set([available_scenes[available_scene_names.index(s)]['token'] for s in val_scenes])
print('%s: train scene(%d), val scene(%d)' % (version, len(train_scenes), len(val_scenes)))
train_lyft_infos, val_lyft_infos = lyft_utils.fill_trainval_infos(
data_path=data_path, lyft=lyft, train_scenes=train_scenes, val_scenes=val_scenes,
test='test' in version, max_sweeps=max_sweeps
)
if version == 'test':
print('test sample: %d' % len(train_lyft_infos))
with open(save_path / f'lyft_infos_test.pkl', 'wb') as f:
pickle.dump(train_lyft_infos, f)
else:
print('train sample: %d, val sample: %d' % (len(train_lyft_infos), len(val_lyft_infos)))
with open(save_path / f'lyft_infos_train.pkl', 'wb') as f:
pickle.dump(train_lyft_infos, f)
with open(save_path / f'lyft_infos_val.pkl', 'wb') as f:
pickle.dump(val_lyft_infos, f)
if __name__ == '__main__':
import yaml
import argparse
from pathlib import Path
from easydict import EasyDict
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config of dataset')
parser.add_argument('--func', type=str, default='create_lyft_infos', help='')
parser.add_argument('--version', type=str, default='trainval', help='')
parser.add_argument('--split', type=str, default=None, help='')
parser.add_argument('--max_sweeps', type=int, default=10, help='')
args = parser.parse_args()
if args.func == 'create_lyft_infos':
try:
yaml_config = yaml.safe_load(open(args.cfg_file), Loader=yaml.FullLoader)
except:
yaml_config = yaml.safe_load(open(args.cfg_file))
dataset_cfg = EasyDict(yaml_config)
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
dataset_cfg.VERSION = args.version
dataset_cfg.MAX_SWEEPS = args.max_sweeps
create_lyft_info(
version=dataset_cfg.VERSION,
data_path=ROOT_DIR / 'data' / 'lyft',
save_path=ROOT_DIR / 'data' / 'lyft',
split=args.split,
max_sweeps=dataset_cfg.MAX_SWEEPS
)
lyft_dataset = LyftDataset(
dataset_cfg=dataset_cfg, class_names=None,
root_path=ROOT_DIR / 'data' / 'lyft',
logger=common_utils.create_logger(), training=True
)
if args.version != 'test':
lyft_dataset.create_groundtruth_database(max_sweeps=dataset_cfg.MAX_SWEEPS)
| 20,471
| 43.12069
| 146
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/lyft/__init__.py
| 0
| 0
| 0
|
py
|
|
3DTrans
|
3DTrans-master/pcdet/datasets/lyft/lyft_mAP_eval/lyft_eval.py
|
"""
modified from lyft toolkit https://github.com/lyft/nuscenes-devkit.git
"""
"""
mAP 3D calculation for the data in nuScenes format.
The intput files expected to have the format:
Expected fields:
gt = [{
'sample_token': '0f0e3ce89d2324d8b45aa55a7b4f8207fbb039a550991a5149214f98cec136ac',
'translation': [974.2811881299899, 1714.6815014457964, -23.689857123368846],
'size': [1.796, 4.488, 1.664],
'rotation': [0.14882026466054782, 0, 0, 0.9888642620837121],
'name': 'car'
}]
prediction_result = {
'sample_token': '0f0e3ce89d2324d8b45aa55a7b4f8207fbb039a550991a5149214f98cec136ac',
'translation': [971.8343488872263, 1713.6816097857359, -25.82534357061308],
'size': [2.519726579986132, 7.810161372666739, 3.483438286096803],
'rotation': [0.10913582721095375, 0.04099572636992043, 0.01927712319721745, 1.029328402625659],
'name': 'car',
'score': 0.3077029437237213
}
input arguments:
--pred_file: file with predictions
--gt_file: ground truth file
--iou_threshold: IOU threshold
In general we would be interested in average of mAP at thresholds [0.5, 0.55, 0.6, 0.65,...0.95], similar to the
standard COCO => one needs to run this file N times for every IOU threshold independently.
"""
import argparse
import json
from collections import defaultdict
from pathlib import Path
import numpy as np
from pyquaternion import Quaternion
from shapely.geometry import Polygon
class Box3D:
"""Data class used during detection evaluation. Can be a prediction or ground truth."""
def __init__(self, **kwargs):
sample_token = kwargs["sample_token"]
translation = kwargs["translation"]
size = kwargs["size"]
rotation = kwargs["rotation"]
name = kwargs["name"]
score = kwargs.get("score", -1)
if not isinstance(sample_token, str):
raise TypeError("Sample_token must be a string!")
if not len(translation) == 3:
raise ValueError("Translation must have 3 elements!")
if np.any(np.isnan(translation)):
raise ValueError("Translation may not be NaN!")
if not len(size) == 3:
raise ValueError("Size must have 3 elements!")
if np.any(np.isnan(size)):
raise ValueError("Size may not be NaN!")
if not len(rotation) == 4:
raise ValueError("Rotation must have 4 elements!")
if np.any(np.isnan(rotation)):
raise ValueError("Rotation may not be NaN!")
if name is None:
raise ValueError("Name cannot be empty!")
# Assign.
self.sample_token = sample_token
self.translation = translation
self.size = size
self.volume = np.prod(self.size)
self.score = score
assert np.all([x > 0 for x in size])
self.rotation = rotation
self.name = name
self.quaternion = Quaternion(self.rotation)
self.width, self.length, self.height = size
self.center_x, self.center_y, self.center_z = self.translation
self.min_z = self.center_z - self.height / 2
self.max_z = self.center_z + self.height / 2
self.ground_bbox_coords = None
self.ground_bbox_coords = self.get_ground_bbox_coords()
@staticmethod
def check_orthogonal(a, b, c):
"""Check that vector (b - a) is orthogonal to the vector (c - a)."""
return np.isclose((b[0] - a[0]) * (c[0] - a[0]) + (b[1] - a[1]) * (c[1] - a[1]), 0)
def get_ground_bbox_coords(self):
if self.ground_bbox_coords is not None:
return self.ground_bbox_coords
return self.calculate_ground_bbox_coords()
def calculate_ground_bbox_coords(self):
"""We assume that the 3D box has lower plane parallel to the ground.
Returns: Polygon with 4 points describing the base.
"""
if self.ground_bbox_coords is not None:
return self.ground_bbox_coords
rotation_matrix = self.quaternion.rotation_matrix
cos_angle = rotation_matrix[0, 0]
sin_angle = rotation_matrix[1, 0]
point_0_x = self.center_x + self.length / 2 * cos_angle + self.width / 2 * sin_angle
point_0_y = self.center_y + self.length / 2 * sin_angle - self.width / 2 * cos_angle
point_1_x = self.center_x + self.length / 2 * cos_angle - self.width / 2 * sin_angle
point_1_y = self.center_y + self.length / 2 * sin_angle + self.width / 2 * cos_angle
point_2_x = self.center_x - self.length / 2 * cos_angle - self.width / 2 * sin_angle
point_2_y = self.center_y - self.length / 2 * sin_angle + self.width / 2 * cos_angle
point_3_x = self.center_x - self.length / 2 * cos_angle + self.width / 2 * sin_angle
point_3_y = self.center_y - self.length / 2 * sin_angle - self.width / 2 * cos_angle
point_0 = point_0_x, point_0_y
point_1 = point_1_x, point_1_y
point_2 = point_2_x, point_2_y
point_3 = point_3_x, point_3_y
assert self.check_orthogonal(point_0, point_1, point_3)
assert self.check_orthogonal(point_1, point_0, point_2)
assert self.check_orthogonal(point_2, point_1, point_3)
assert self.check_orthogonal(point_3, point_0, point_2)
self.ground_bbox_coords = Polygon(
[
(point_0_x, point_0_y),
(point_1_x, point_1_y),
(point_2_x, point_2_y),
(point_3_x, point_3_y),
(point_0_x, point_0_y),
]
)
return self.ground_bbox_coords
def get_height_intersection(self, other):
min_z = max(other.min_z, self.min_z)
max_z = min(other.max_z, self.max_z)
return max(0, max_z - min_z)
def get_area_intersection(self, other) -> float:
result = self.ground_bbox_coords.intersection(other.ground_bbox_coords).area
assert result <= self.width * self.length
return result
def get_intersection(self, other) -> float:
height_intersection = self.get_height_intersection(other)
area_intersection = self.ground_bbox_coords.intersection(other.ground_bbox_coords).area
return height_intersection * area_intersection
def get_iou(self, other):
intersection = self.get_intersection(other)
union = self.volume + other.volume - intersection
iou = np.clip(intersection / union, 0, 1)
return iou
def __repr__(self):
return str(self.serialize())
def serialize(self) -> dict:
"""Returns: Serialized instance as dict."""
return {
"sample_token": self.sample_token,
"translation": self.translation,
"size": self.size,
"rotation": self.rotation,
"name": self.name,
"volume": self.volume,
"score": self.score,
}
def group_by_key(detections, key):
groups = defaultdict(list)
for detection in detections:
groups[detection[key]].append(detection)
return groups
def wrap_in_box(input):
result = {}
for key, value in input.items():
result[key] = [Box3D(**x) for x in value]
return result
def get_envelope(precisions):
"""Compute the precision envelope.
Args:
precisions:
Returns:
"""
for i in range(precisions.size - 1, 0, -1):
precisions[i - 1] = np.maximum(precisions[i - 1], precisions[i])
return precisions
def get_ap(recalls, precisions):
"""Calculate average precision.
Args:
recalls:
precisions: Returns (float): average precision.
Returns:
"""
# correct AP calculation
# first append sentinel values at the end
recalls = np.concatenate(([0.0], recalls, [1.0]))
precisions = np.concatenate(([0.0], precisions, [0.0]))
precisions = get_envelope(precisions)
# to calculate area under PR curve, look for points where X axis (recall) changes value
i = np.where(recalls[1:] != recalls[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((recalls[i + 1] - recalls[i]) * precisions[i + 1])
return ap
def get_ious(gt_boxes, predicted_box):
return [predicted_box.get_iou(x) for x in gt_boxes]
def recall_precision(gt, predictions, iou_threshold_list):
num_gts = len(gt)
if num_gts == 0:
return -1, -1, -1
image_gts = group_by_key(gt, "sample_token")
image_gts = wrap_in_box(image_gts)
sample_gt_checked = {sample_token: np.zeros((len(boxes), len(iou_threshold_list))) for sample_token, boxes in image_gts.items()}
predictions = sorted(predictions, key=lambda x: x["score"], reverse=True)
# go down dets and mark TPs and FPs
num_predictions = len(predictions)
tp = np.zeros((num_predictions, len(iou_threshold_list)))
fp = np.zeros((num_predictions, len(iou_threshold_list)))
for prediction_index, prediction in enumerate(predictions):
predicted_box = Box3D(**prediction)
sample_token = prediction["sample_token"]
max_overlap = -np.inf
jmax = -1
try:
gt_boxes = image_gts[sample_token] # gt_boxes per sample
gt_checked = sample_gt_checked[sample_token] # gt flags per sample
except KeyError:
gt_boxes = []
gt_checked = None
if len(gt_boxes) > 0:
overlaps = get_ious(gt_boxes, predicted_box)
max_overlap = np.max(overlaps)
jmax = np.argmax(overlaps)
for i, iou_threshold in enumerate(iou_threshold_list):
if max_overlap > iou_threshold:
if gt_checked[jmax, i] == 0:
tp[prediction_index, i] = 1.0
gt_checked[jmax, i] = 1
else:
fp[prediction_index, i] = 1.0
else:
fp[prediction_index, i] = 1.0
# compute precision recall
fp = np.cumsum(fp, axis=0)
tp = np.cumsum(tp, axis=0)
recalls = tp / float(num_gts)
assert np.all(0 <= recalls) & np.all(recalls <= 1)
# avoid divide by zero in case the first detection matches a difficult ground truth
precisions = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
assert np.all(0 <= precisions) & np.all(precisions <= 1)
ap_list = []
for i in range(len(iou_threshold_list)):
recall = recalls[:, i]
precision = precisions[:, i]
ap = get_ap(recall, precision)
ap_list.append(ap)
return recalls, precisions, ap_list
def get_average_precisions(gt: list, predictions: list, class_names: list, iou_thresholds: list) -> np.array:
"""Returns an array with an average precision per class.
Args:
gt: list of dictionaries in the format described below.
predictions: list of dictionaries in the format described below.
class_names: list of the class names.
iou_threshold: list of IOU thresholds used to calculate TP / FN
Returns an array with an average precision per class.
Ground truth and predictions should have schema:
gt = [{
'sample_token': '0f0e3ce89d2324d8b45aa55a7b4f8207fbb039a550991a5149214f98cec136ac',
'translation': [974.2811881299899, 1714.6815014457964, -23.689857123368846],
'size': [1.796, 4.488, 1.664],
'rotation': [0.14882026466054782, 0, 0, 0.9888642620837121],
'name': 'car'
}]
predictions = [{
'sample_token': '0f0e3ce89d2324d8b45aa55a7b4f8207fbb039a550991a5149214f98cec136ac',
'translation': [971.8343488872263, 1713.6816097857359, -25.82534357061308],
'size': [2.519726579986132, 7.810161372666739, 3.483438286096803],
'rotation': [0.10913582721095375, 0.04099572636992043, 0.01927712319721745, 1.029328402625659],
'name': 'car',
'score': 0.3077029437237213
}]
"""
assert all([0 <= iou_th <= 1 for iou_th in iou_thresholds])
gt_by_class_name = group_by_key(gt, "name")
pred_by_class_name = group_by_key(predictions, "name")
average_precisions = np.zeros(len(class_names))
for class_id, class_name in enumerate(class_names):
if class_name in pred_by_class_name:
recalls, precisions, ap_list = recall_precision(
gt_by_class_name[class_name], pred_by_class_name[class_name], iou_thresholds
)
aps = np.mean(ap_list)
average_precisions[class_id] = aps
return average_precisions
def get_class_names(gt: dict) -> list:
"""Get sorted list of class names.
Args:
gt:
Returns: Sorted list of class names.
"""
return sorted(list(set([x["name"] for x in gt])))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg("-p", "--pred_file", type=str, help="Path to the predictions file.", required=True)
arg("-g", "--gt_file", type=str, help="Path to the ground truth file.", required=True)
arg("-t", "--iou_threshold", type=float, help="iou threshold", default=0.5)
args = parser.parse_args()
gt_path = Path(args.gt_file)
pred_path = Path(args.pred_file)
with open(args.pred_file) as f:
predictions = json.load(f)
with open(args.gt_file) as f:
gt = json.load(f)
class_names = get_class_names(gt)
print("Class_names = ", class_names)
average_precisions = get_average_precisions(gt, predictions, class_names, args.iou_threshold)
mAP = np.mean(average_precisions)
print("Average per class mean average precision = ", mAP)
for class_id in sorted(list(zip(class_names, average_precisions.flatten().tolist()))):
print(class_id)
| 13,634
| 30.272936
| 132
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/lyft/lyft_mAP_eval/__init__.py
| 0
| 0
| 0
|
py
|
|
3DTrans
|
3DTrans-master/pcdet/datasets/processor/point_feature_encoder.py
|
import numpy as np
class PointFeatureEncoder(object):
def __init__(self, config, point_cloud_range=None):
super().__init__()
self.point_encoding_config = config
assert list(self.point_encoding_config.src_feature_list[0:3]) == ['x', 'y', 'z']
self.used_feature_list = self.point_encoding_config.used_feature_list
self.src_feature_list = self.point_encoding_config.src_feature_list
self.point_cloud_range = point_cloud_range
@property
def num_point_features(self):
return getattr(self, self.point_encoding_config.encoding_type)(points=None)
def forward(self, data_dict):
"""
Args:
data_dict:
points: (N, 3 + C_in)
...
Returns:
data_dict:
points: (N, 3 + C_out),
use_lead_xyz: whether to use xyz as point-wise features
...
"""
data_dict['points'], use_lead_xyz = getattr(self, self.point_encoding_config.encoding_type)(
data_dict['points']
)
data_dict['use_lead_xyz'] = use_lead_xyz
if self.point_encoding_config.get('filter_sweeps', False) and 'timestamp' in self.src_feature_list:
max_sweeps = self.point_encoding_config.max_sweeps
idx = self.src_feature_list.index('timestamp')
dt = np.round(data_dict['points'][:, idx], 2)
max_dt = sorted(np.unique(dt))[min(len(np.unique(dt))-1, max_sweeps-1)]
data_dict['points'] = data_dict['points'][dt <= max_dt]
return data_dict
def absolute_coordinates_encoding(self, points=None):
if points is None:
num_output_features = len(self.used_feature_list)
return num_output_features
point_feature_list = [points[:, 0:3]]
for x in self.used_feature_list:
if x in ['x', 'y', 'z']:
continue
idx = self.src_feature_list.index(x)
point_feature_list.append(points[:, idx:idx+1])
point_features = np.concatenate(point_feature_list, axis=1)
return point_features, True
| 2,169
| 37.070175
| 107
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/processor/data_processor.py
|
from functools import partial
import numpy as np
from skimage import transform
from ...utils import box_utils, common_utils
tv = None
try:
import cumm.tensorview as tv
except:
pass
class VoxelGeneratorWrapper():
def __init__(self, vsize_xyz, coors_range_xyz, num_point_features, max_num_points_per_voxel, max_num_voxels):
try:
from spconv.utils import VoxelGeneratorV2 as VoxelGenerator
self.spconv_ver = 1
except:
try:
from spconv.utils import VoxelGenerator
self.spconv_ver = 1
except:
from spconv.utils import Point2VoxelCPU3d as VoxelGenerator
self.spconv_ver = 2
if self.spconv_ver == 1:
self._voxel_generator = VoxelGenerator(
voxel_size=vsize_xyz,
point_cloud_range=coors_range_xyz,
max_num_points=max_num_points_per_voxel,
max_voxels=max_num_voxels
)
else:
self._voxel_generator = VoxelGenerator(
vsize_xyz=vsize_xyz,
coors_range_xyz=coors_range_xyz,
num_point_features=num_point_features,
max_num_points_per_voxel=max_num_points_per_voxel,
max_num_voxels=max_num_voxels
)
def generate(self, points):
if self.spconv_ver == 1:
voxel_output = self._voxel_generator.generate(points)
if isinstance(voxel_output, dict):
voxels, coordinates, num_points = \
voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel']
else:
voxels, coordinates, num_points = voxel_output
else:
assert tv is not None, f"Unexpected error, library: 'cumm' wasn't imported properly."
voxel_output = self._voxel_generator.point_to_voxel(tv.from_numpy(points))
tv_voxels, tv_coordinates, tv_num_points = voxel_output
# make copy with numpy(), since numpy_view() will disappear as soon as the generator is deleted
voxels = tv_voxels.numpy()
coordinates = tv_coordinates.numpy()
num_points = tv_num_points.numpy()
return voxels, coordinates, num_points
class DataProcessor(object):
def __init__(self, processor_configs, point_cloud_range, training, num_point_features):
self.point_cloud_range = point_cloud_range
self.training = training
self.num_point_features = num_point_features
self.mode = 'train' if training else 'test'
self.grid_size = self.voxel_size = None
self.data_processor_queue = []
self.voxel_generator = None
for cur_cfg in processor_configs:
cur_processor = getattr(self, cur_cfg.NAME)(config=cur_cfg)
self.data_processor_queue.append(cur_processor)
# copy from lidar distill
def mask_boxes_outside_length(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.mask_boxes_outside_length, config=config)
min_mask = data_dict['gt_boxes'][:, 3] >= config['LENGTH_RANGE'][0]
max_mask = data_dict['gt_boxes'][:, 3] <= config['LENGTH_RANGE'][1]
mask = min_mask & max_mask
data_dict['gt_boxes'] = data_dict['gt_boxes'][mask]
return data_dict
def mask_points_and_boxes_outside_range(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.mask_points_and_boxes_outside_range, config=config)
if data_dict.get('points', None) is not None:
mask = common_utils.mask_points_by_range(data_dict['points'], self.point_cloud_range)
data_dict['points'] = data_dict['points'][mask]
if data_dict.get('gt_boxes', None) is not None and config.REMOVE_OUTSIDE_BOXES and self.training:
mask = box_utils.mask_boxes_outside_range_numpy(
data_dict['gt_boxes'], self.point_cloud_range, min_num_corners=config.get('min_num_corners', 1)
)
data_dict['gt_boxes'] = data_dict['gt_boxes'][mask]
return data_dict
def shuffle_points(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.shuffle_points, config=config)
if config.SHUFFLE_ENABLED[self.mode]:
points = data_dict['points']
shuffle_idx = np.random.permutation(points.shape[0])
points = points[shuffle_idx]
data_dict['points'] = points
return data_dict
def points_coord_learnable_transform(self, data_dict=None, config=None):
# We want to perform the alignment operation of point coodrinates
# before the network inference
if data_dict is None:
return partial(self.points_coord_learnable_transform, config=config)
if config.POINTS_TRANSFER and self.training:
points = data_dict['points']
return NotImplementedError
def transform_points_to_voxels_placeholder(self, data_dict=None, config=None):
# just calculate grid size
if data_dict is None:
grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array(config.VOXEL_SIZE)
self.grid_size = np.round(grid_size).astype(np.int64)
self.voxel_size = config.VOXEL_SIZE
return partial(self.transform_points_to_voxels_placeholder, config=config)
return data_dict
def transform_points_to_voxels(self, data_dict=None, config=None):
if data_dict is None:
grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array(config.VOXEL_SIZE)
self.grid_size = np.round(grid_size).astype(np.int64)
self.voxel_size = config.VOXEL_SIZE
# just bind the config, we will create the VoxelGeneratorWrapper later,
# to avoid pickling issues in multiprocess spawn
return partial(self.transform_points_to_voxels, config=config)
if self.voxel_generator is None:
self.voxel_generator = VoxelGeneratorWrapper(
vsize_xyz=config.VOXEL_SIZE,
coors_range_xyz=self.point_cloud_range,
num_point_features=self.num_point_features,
max_num_points_per_voxel=config.MAX_POINTS_PER_VOXEL,
max_num_voxels=config.MAX_NUMBER_OF_VOXELS[self.mode],
)
points = data_dict['points']
voxel_output = self.voxel_generator.generate(points)
voxels, coordinates, num_points = voxel_output
if not data_dict['use_lead_xyz']:
voxels = voxels[..., 3:] # remove xyz in voxels(N, 3)
data_dict['voxels'] = voxels
data_dict['voxel_coords'] = coordinates
data_dict['voxel_num_points'] = num_points
return data_dict
def sample_points_by_voxels(self, data_dict=None, config=None, voxel_generator=None):
if data_dict is None:
grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array(config.VOXEL_SIZE)
self.grid_size = np.round(grid_size).astype(np.int64)
self.voxel_size = config.VOXEL_SIZE
if self.voxel_generator is None:
voxel_generator = VoxelGeneratorWrapper(
vsize_xyz=config.VOXEL_SIZE,
coors_range_xyz=self.point_cloud_range,
num_point_features=self.num_point_features,
max_num_points_per_voxel=config.MAX_POINTS_PER_VOXEL,
max_num_voxels=config.MAX_NUMBER_OF_VOXELS[self.mode],
)
return partial(self.sample_points_by_voxels, config=config)
num_points = config.NUM_POINTS[self.mode]
if num_points == -1: # dynamic voxelization !
return data_dict
# voxelization
data_dict = self.transform_points_to_voxels(data_dict, config)
if config.get('SAMPLE_TYPE', 'raw') == 'mean_vfe':
voxels = data_dict['voxels']
voxel_num_points = data_dict['voxel_num_points']
a = voxels.sum(axis=1)
b = np.expand_dims(voxel_num_points, axis=1).repeat(voxels.shape[-1], axis=-1)
points = a / b
else: # defalt: 'raw'
points = data_dict['voxels'][:,0] # remain only one point per voxel
data_dict['points'] = points
# sampling
data_dict = self.sample_points(data_dict, config)
data_dict.pop('voxels')
data_dict.pop('voxel_coords')
data_dict.pop('voxel_num_points')
return data_dict
def sample_points(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.sample_points, config=config)
num_points = config.NUM_POINTS[self.mode]
if num_points == -1:
return data_dict
points = data_dict['points']
if num_points < len(points):
pts_depth = np.linalg.norm(points[:, 0:3], axis=1)
pts_near_flag = pts_depth < 40.0
far_idxs_choice = np.where(pts_near_flag == 0)[0]
near_idxs = np.where(pts_near_flag == 1)[0]
choice = []
if num_points > len(far_idxs_choice):
near_idxs_choice = np.random.choice(near_idxs, num_points - len(far_idxs_choice), replace=False)
choice = np.concatenate((near_idxs_choice, far_idxs_choice), axis=0) \
if len(far_idxs_choice) > 0 else near_idxs_choice
else:
choice = np.arange(0, len(points), dtype=np.int32)
choice = np.random.choice(choice, num_points, replace=False)
np.random.shuffle(choice)
else:
choice = np.arange(0, len(points), dtype=np.int32)
if num_points > len(points):
#extra_choice = np.random.choice(choice, num_points - len(points), replace=False) #OpenPCD0.5.2 version
extra_choice = np.random.choice(choice, num_points - len(points)) #IA-SSD version
choice = np.concatenate((choice, extra_choice), axis=0)
np.random.shuffle(choice)
data_dict['points'] = points[choice]
return data_dict
def calculate_grid_size(self, data_dict=None, config=None):
if data_dict is None:
grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array(config.VOXEL_SIZE)
self.grid_size = np.round(grid_size).astype(np.int64)
self.voxel_size = config.VOXEL_SIZE
return partial(self.calculate_grid_size, config=config)
return data_dict
def downsample_depth_map(self, data_dict=None, config=None):
if data_dict is None:
self.depth_downsample_factor = config.DOWNSAMPLE_FACTOR
return partial(self.downsample_depth_map, config=config)
data_dict['depth_maps'] = transform.downscale_local_mean(
image=data_dict['depth_maps'],
factors=(self.depth_downsample_factor, self.depth_downsample_factor)
)
return data_dict
def forward(self, data_dict):
"""
Args:
data_dict:
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
gt_names: optional, (N), string
...
Returns:
"""
for cur_processor in self.data_processor_queue:
data_dict = cur_processor(data_dict=data_dict)
return data_dict
def eval(self):
self.training = False
self.mode = 'test'
def train(self):
self.training = True
self.mode = 'train'
class PairDataProcessor(object):
def __init__(self, processor_configs, point_cloud_range, training, num_point_features):
self.point_cloud_range = point_cloud_range
self.training = training
self.num_point_features = num_point_features
self.mode = 'train' if training else 'test'
self.grid_size = self.voxel_size = None
self.data_processor_queue = []
self.voxel_generator = None
for cur_cfg in processor_configs:
cur_processor = getattr(self, cur_cfg.NAME)(config=cur_cfg)
self.data_processor_queue.append(cur_processor)
def mask_points_and_boxes_outside_range(self, data_dict_1=None, data_dict_2=None, config=None):
if data_dict_1 is None and data_dict_2 is None:
return partial(self.mask_points_and_boxes_outside_range, config=config)
if data_dict_1.get('points', None) is not None:
mask = common_utils.mask_points_by_range(data_dict_1['points'], self.point_cloud_range)
data_dict_1['points'] = data_dict_1['points'][mask]
data_dict_2['points'] = data_dict_2['points'][mask]
if data_dict_2.get('points', None) is not None:
mask = common_utils.mask_points_by_range(data_dict_2['points'], self.point_cloud_range)
data_dict_2['points'] = data_dict_2['points'][mask]
data_dict_1['points'] = data_dict_1['points'][mask]
if data_dict_1.get('gt_boxes', None) is not None and config.REMOVE_OUTSIDE_BOXES and self.training:
mask = box_utils.mask_boxes_outside_range_numpy(
data_dict_1['gt_boxes'], self.point_cloud_range, min_num_corners=config.get('min_num_corners', 1)
)
data_dict_1['gt_boxes'] = data_dict_1['gt_boxes'][mask]
data_dict_2['gt_boxes'] = data_dict_2['gt_boxes'][mask]
if data_dict_2.get('gt_boxes', None) is not None and config.REMOVE_OUTSIDE_BOXES and self.training:
mask = box_utils.mask_boxes_outside_range_numpy(
data_dict_2['gt_boxes'], self.point_cloud_range, min_num_corners=config.get('min_num_corners', 1)
)
data_dict_2['gt_boxes'] = data_dict_2['gt_boxes'][mask]
data_dict_1['gt_boxes'] = data_dict_1['gt_boxes'][mask]
return data_dict_1, data_dict_2
def shuffle_points(self, data_dict_1=None, data_dict_2=None, config=None):
if data_dict_1 is None and data_dict_2 is None:
return partial(self.shuffle_points, config=config)
if config.SHUFFLE_ENABLED[self.mode]:
points_1 = data_dict_1['points']
points_2 = data_dict_2['points']
assert points_1.shape[0] == points_2.shape[0]
shuffle_idx = np.random.permutation(points_1.shape[0])
points_1 = points_1[shuffle_idx]
points_2 = points_2[shuffle_idx]
data_dict_1['points'] = points_1
data_dict_2['points'] = points_2
return data_dict_1, data_dict_2
def transform_points_to_voxels(self, data_dict_1=None, data_dict_2=None, config=None):
if data_dict_1 is None and data_dict_2 is None:
grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array(config.VOXEL_SIZE)
self.grid_size = np.round(grid_size).astype(np.int64)
self.voxel_size = config.VOXEL_SIZE
# just bind the config, we will create the VoxelGeneratorWrapper later,
# to avoid pickling issues in multiprocess spawn
return partial(self.transform_points_to_voxels, config=config)
if self.voxel_generator is None:
self.voxel_generator = VoxelGeneratorWrapper(
vsize_xyz=config.VOXEL_SIZE,
coors_range_xyz=self.point_cloud_range,
num_point_features=self.num_point_features,
max_num_points_per_voxel=config.MAX_POINTS_PER_VOXEL,
max_num_voxels=config.MAX_NUMBER_OF_VOXELS[self.mode],
)
points = data_dict_1['points']
voxel_output = self.voxel_generator.generate(points)
voxels, coordinates, num_points = voxel_output
if not data_dict_1['use_lead_xyz']:
voxels = voxels[..., 3:] # remove xyz in voxels(N, 3)
data_dict_1['voxels'] = voxels
data_dict_1['voxel_coords'] = coordinates
data_dict_1['voxel_num_points'] = num_points
points = data_dict_2['points']
voxel_output = self.voxel_generator.generate(points)
voxels, coordinates, num_points = voxel_output
if not data_dict_1['use_lead_xyz']:
voxels = voxels[..., 3:] # remove xyz in voxels(N, 3)
data_dict_2['voxels'] = voxels
data_dict_2['voxel_coords'] = coordinates
data_dict_2['voxel_num_points'] = num_points
return data_dict_1, data_dict_2
def forward(self, data_dict_1, data_dict_2):
"""
Args:
data_dict:
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
gt_names: optional, (N), string
...
Returns:
"""
for cur_processor in self.data_processor_queue:
data_dict_1, data_dict_2 = cur_processor(data_dict_1=data_dict_1, data_dict_2=data_dict_2)
return data_dict_1, data_dict_2
def eval(self):
self.training = False
self.mode = 'test'
def train(self):
self.training = True
self.mode = 'train'
| 17,420
| 41.07971
| 119
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/processor/__init__.py
| 0
| 0
| 0
|
py
|
|
3DTrans
|
3DTrans-master/pcdet/datasets/augmentor/ssl_database_sampler.py
|
import numpy as np
import pickle
from ...ops.iou3d_nms import iou3d_nms_utils
from ...utils import box_utils
import os
import io
class SSLDataBaseSampler(object):
def __init__(self, root_path, sampler_cfg, class_names, logger=None, client=None, oss_flag=False):
self.root_path = root_path
self.class_names = class_names
self.sampler_cfg = sampler_cfg
self.logger = logger
self.oss_flag = oss_flag
self.db_infos = {}
for class_name in class_names:
self.db_infos[class_name] = []
self.logger.info(f"*************root_path***********: {root_path}")
if self.oss_flag:
from petrel_client.client import Client
# ~/.petreloss.conf: save the KEY/ACCESS_KEY of S3 Ceph
self.client = Client('~/.petreloss.conf')
for db_info_path in sampler_cfg.DB_INFO_PATH:
if not self.oss_flag:
db_info_path = self.root_path.resolve() / db_info_path
self.logger.info(f"*************Load LINUX db_info_path*************: {db_info_path}")
with open(str(db_info_path), 'rb') as f:
infos = pickle.load(f)
[self.db_infos[cur_class].extend(infos[cur_class]) for cur_class in class_names]
else:
db_info_path = os.path.join(self.root_path, db_info_path)
self.logger.info(f"*************Load OSS db_info_path*************: {db_info_path}")
pkl_bytes = self.client.get(db_info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
[self.db_infos[cur_class].extend(infos[cur_class]) for cur_class in class_names]
for func_name, val in sampler_cfg.PREPARE.items():
self.db_infos = getattr(self, func_name)(self.db_infos, val)
self.sample_groups = {}
self.sample_class_num = {}
self.limit_whole_scene = sampler_cfg.get('LIMIT_WHOLE_SCENE', False)
for x in sampler_cfg.SAMPLE_GROUPS:
class_name, sample_num = x.split(':')
if class_name not in class_names:
continue
self.sample_class_num[class_name] = sample_num
self.sample_groups[class_name] = {
'sample_num': sample_num,
'pointer': len(self.db_infos[class_name]),
'indices': np.arange(len(self.db_infos[class_name]))
}
def __getstate__(self):
d = dict(self.__dict__)
del d['logger']
return d
def __setstate__(self, d):
self.__dict__.update(d)
def filter_by_difficulty(self, db_infos, removed_difficulty):
new_db_infos = {}
for key, dinfos in db_infos.items():
pre_len = len(dinfos)
new_db_infos[key] = [
info for info in dinfos
if info['difficulty'] not in removed_difficulty
]
if self.logger is not None:
self.logger.info('Database filter by difficulty %s: %d => %d' % (key, pre_len, len(new_db_infos[key])))
return new_db_infos
def filter_by_min_points(self, db_infos, min_gt_points_list):
for name_num in min_gt_points_list:
name, min_num = name_num.split(':')
min_num = int(min_num)
if min_num > 0 and name in db_infos.keys():
filtered_infos = []
for info in db_infos[name]:
if info['num_points_in_gt'] >= min_num:
filtered_infos.append(info)
if self.logger is not None:
self.logger.info('Database filter by min points %s: %d => %d' %
(name, len(db_infos[name]), len(filtered_infos)))
db_infos[name] = filtered_infos
return db_infos
def sample_with_fixed_number(self, class_name, sample_group):
"""
Args:
class_name:
sample_group:
Returns:
"""
sample_num, pointer, indices = int(sample_group['sample_num']), sample_group['pointer'], sample_group['indices']
if pointer >= len(self.db_infos[class_name]):
indices = np.random.permutation(len(self.db_infos[class_name]))
pointer = 0
sampled_dict = [self.db_infos[class_name][idx] for idx in indices[pointer: pointer + sample_num]]
pointer += sample_num
sample_group['pointer'] = pointer
sample_group['indices'] = indices
return sampled_dict
@staticmethod
def put_boxes_on_road_planes(gt_boxes, road_planes, calib):
"""
Only validate in KITTIDataset
Args:
gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
road_planes: [a, b, c, d]
calib:
Returns:
"""
a, b, c, d = road_planes
center_cam = calib.lidar_to_rect(gt_boxes[:, 0:3])
cur_height_cam = (-d - a * center_cam[:, 0] - c * center_cam[:, 2]) / b
center_cam[:, 1] = cur_height_cam
cur_lidar_height = calib.rect_to_lidar(center_cam)[:, 2]
mv_height = gt_boxes[:, 2] - gt_boxes[:, 5] / 2 - cur_lidar_height
gt_boxes[:, 2] -= mv_height # lidar view
return gt_boxes, mv_height
def add_sampled_boxes_to_scene(self, data_dict, sampled_gt_boxes, total_valid_sampled_dict):
gt_boxes_mask = data_dict['gt_boxes_mask']
gt_boxes = data_dict['gt_boxes'][gt_boxes_mask]
gt_names = data_dict['gt_names'][gt_boxes_mask]
points = data_dict['points']
if self.sampler_cfg.get('USE_ROAD_PLANE', False):
sampled_gt_boxes, mv_height = self.put_boxes_on_road_planes(
sampled_gt_boxes, data_dict['road_plane'], data_dict['calib']
)
data_dict.pop('calib')
data_dict.pop('road_plane')
obj_points_list = []
for idx, info in enumerate(total_valid_sampled_dict):
file_path = os.path.join(self.root_path, info['path'])
if self.oss_flag:
sdk_local_bytes = self.client.get(file_path, update_cache=True)
obj_points = np.frombuffer(sdk_local_bytes, dtype=np.float32).reshape(
[-1, self.sampler_cfg.NUM_POINT_FEATURES]).copy()
else:
obj_points = np.fromfile(str(file_path), dtype=np.float32).reshape(
[-1, self.sampler_cfg.NUM_POINT_FEATURES])
obj_points[:, :3] += info['box3d_lidar'][:3]
if self.sampler_cfg.get('USE_ROAD_PLANE', False):
# mv height
obj_points[:, 2] -= mv_height[idx]
obj_points_list.append(obj_points)
obj_points = np.concatenate(obj_points_list, axis=0)
sampled_gt_names = np.array([x['name'] for x in total_valid_sampled_dict])
large_sampled_gt_boxes = box_utils.enlarge_box3d(
sampled_gt_boxes[:, 0:7], extra_width=self.sampler_cfg.REMOVE_EXTRA_WIDTH
)
points = box_utils.remove_points_in_boxes3d(points, large_sampled_gt_boxes)
points = np.concatenate([obj_points, points], axis=0)
gt_names = np.concatenate([gt_names, sampled_gt_names], axis=0)
gt_boxes = np.concatenate([gt_boxes, sampled_gt_boxes], axis=0)
data_dict['gt_boxes'] = gt_boxes
data_dict['gt_names'] = gt_names
data_dict['points'] = points
return data_dict
def add_sampled_boxes_to_scene_wo_gt(self, data_dict, sampled_gt_boxes, total_valid_sampled_dict):
points = data_dict['points']
if self.sampler_cfg.get('USE_ROAD_PLANE', False):
sampled_gt_boxes, mv_height = self.put_boxes_on_road_planes(
sampled_gt_boxes, data_dict['road_plane'], data_dict['calib']
)
data_dict.pop('calib')
data_dict.pop('road_plane')
obj_points_list = []
for idx, info in enumerate(total_valid_sampled_dict):
file_path = os.path.join(self.root_path, info['path'])
if self.oss_flag:
sdk_local_bytes = self.client.get(file_path, update_cache=True)
obj_points = np.frombuffer(sdk_local_bytes, dtype=np.float32).reshape(
[-1, self.sampler_cfg.NUM_POINT_FEATURES]).copy()
else:
obj_points = np.fromfile(str(file_path), dtype=np.float32).reshape(
[-1, self.sampler_cfg.NUM_POINT_FEATURES])
obj_points[:, :3] += info['box3d_lidar'][:3]
if self.sampler_cfg.get('USE_ROAD_PLANE', False):
# mv height
obj_points[:, 2] -= mv_height[idx]
obj_points_list.append(obj_points)
obj_points = np.concatenate(obj_points_list, axis=0)
large_sampled_gt_boxes = box_utils.enlarge_box3d(
sampled_gt_boxes[:, 0:7], extra_width=self.sampler_cfg.REMOVE_EXTRA_WIDTH
)
points = box_utils.remove_points_in_boxes3d(points, large_sampled_gt_boxes)
points = np.concatenate([obj_points, points], axis=0)
data_dict['points'] = points
return data_dict
def __call__(self, data_dict):
"""
Args:
data_dict:
gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
Returns:
"""
has_gt_boxes_label = 'gt_boxes' in data_dict
if has_gt_boxes_label:
gt_boxes = data_dict['gt_boxes']
gt_names = data_dict['gt_names'].astype(str)
existed_boxes = gt_boxes
total_valid_sampled_dict = []
for class_name, sample_group in self.sample_groups.items():
if self.limit_whole_scene:
num_gt = np.sum(class_name == gt_names)
sample_group['sample_num'] = str(int(self.sample_class_num[class_name]) - num_gt)
if int(sample_group['sample_num']) > 0:
sampled_dict = self.sample_with_fixed_number(class_name, sample_group)
sampled_boxes = np.stack([x['box3d_lidar'] for x in sampled_dict], axis=0).astype(np.float32)
if self.sampler_cfg.get('DATABASE_WITH_FAKELIDAR', False):
sampled_boxes = box_utils.boxes3d_kitti_fakelidar_to_lidar(sampled_boxes)
iou1 = iou3d_nms_utils.boxes_bev_iou_cpu(sampled_boxes[:, 0:7], existed_boxes[:, 0:7])
iou2 = iou3d_nms_utils.boxes_bev_iou_cpu(sampled_boxes[:, 0:7], sampled_boxes[:, 0:7])
iou2[range(sampled_boxes.shape[0]), range(sampled_boxes.shape[0])] = 0
iou1 = iou1 if iou1.shape[1] > 0 else iou2
valid_mask = ((iou1.max(axis=1) + iou2.max(axis=1)) == 0).nonzero()[0]
valid_sampled_dict = [sampled_dict[x] for x in valid_mask]
valid_sampled_boxes = sampled_boxes[valid_mask]
existed_boxes = np.concatenate((existed_boxes, valid_sampled_boxes), axis=0)
total_valid_sampled_dict.extend(valid_sampled_dict)
sampled_gt_boxes = existed_boxes[gt_boxes.shape[0]:, :]
if total_valid_sampled_dict.__len__() > 0:
data_dict = self.add_sampled_boxes_to_scene(data_dict, sampled_gt_boxes, total_valid_sampled_dict)
data_dict.pop('gt_boxes_mask')
else:
sampled_gt_boxes = []
total_valid_sampled_dict = []
for class_name, sample_group in self.sample_groups.items():
if self.limit_whole_scene:
num_gt = 0
sample_group['sample_num'] = str(int(self.sample_class_num[class_name]) - num_gt)
if int(sample_group['sample_num']) > 0:
sampled_dict = self.sample_with_fixed_number(class_name, sample_group)
sampled_boxes = np.stack([x['box3d_lidar'] for x in sampled_dict], axis=0).astype(np.float32)
if self.sampler_cfg.get('DATABASE_WITH_FAKELIDAR', False):
sampled_boxes = box_utils.boxes3d_kitti_fakelidar_to_lidar(sampled_boxes)
sampled_gt_boxes.append(sampled_boxes)
total_valid_sampled_dict.extend(sampled_dict)
sampled_gt_boxes = np.concatenate(sampled_gt_boxes, axis=0)
if total_valid_sampled_dict.__len__() > 0:
data_dict = self.add_sampled_boxes_to_scene_wo_gt(data_dict, sampled_gt_boxes, total_valid_sampled_dict)
return data_dict
| 12,576
| 44.404332
| 120
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/augmentor/augmentor_utils.py
|
import torch
import numpy as np
import numba
import math
import copy
from ...utils import common_utils
from ...utils import box_utils
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...ops.iou3d_nms import iou3d_nms_utils
import warnings
try:
from numba.errors import NumbaPerformanceWarning
warnings.filterwarnings("ignore", category=NumbaPerformanceWarning)
except:
pass
def random_flip_along_x(gt_boxes, points):
"""
Args:
gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C)
Returns:
"""
enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
if enable:
gt_boxes[:, 1] = -gt_boxes[:, 1]
gt_boxes[:, 6] = -gt_boxes[:, 6]
points[:, 1] = -points[:, 1]
if gt_boxes.shape[1] > 7:
gt_boxes[:, 8] = -gt_boxes[:, 8]
return gt_boxes, points
def random_flip_along_y(gt_boxes, points):
"""
Args:
gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C)
Returns:
"""
enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
if enable:
gt_boxes[:, 0] = -gt_boxes[:, 0]
gt_boxes[:, 6] = -(gt_boxes[:, 6] + np.pi)
points[:, 0] = -points[:, 0]
if gt_boxes.shape[1] > 7:
gt_boxes[:, 7] = -gt_boxes[:, 7]
return gt_boxes, points
def global_rotation(gt_boxes, points, rot_range):
"""
Args:
gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C),
rot_range: [min, max]
Returns:
"""
noise_rotation = np.random.uniform(rot_range[0], rot_range[1])
points = common_utils.rotate_points_along_z(points[np.newaxis, :, :], np.array([noise_rotation]))[0]
gt_boxes[:, 0:3] = common_utils.rotate_points_along_z(gt_boxes[np.newaxis, :, 0:3], np.array([noise_rotation]))[0]
gt_boxes[:, 6] += noise_rotation
if gt_boxes.shape[1] > 7:
gt_boxes[:, 7:9] = common_utils.rotate_points_along_z(
np.hstack((gt_boxes[:, 7:9], np.zeros((gt_boxes.shape[0], 1))))[np.newaxis, :, :],
np.array([noise_rotation])
)[0][:, 0:2]
return gt_boxes, points
def global_scaling(gt_boxes, points, scale_range):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading]
points: (M, 3 + C),
scale_range: [min, max]
Returns:
"""
if scale_range[1] - scale_range[0] < 1e-3:
return gt_boxes, points
noise_scale = np.random.uniform(scale_range[0], scale_range[1])
points[:, :3] *= noise_scale
gt_boxes[:, :6] *= noise_scale
return gt_boxes, points
def random_image_flip_horizontal(image, depth_map, gt_boxes, calib):
"""
Performs random horizontal flip augmentation
Args:
image: (H_image, W_image, 3), Image
depth_map: (H_depth, W_depth), Depth map
gt_boxes: (N, 7), 3D box labels in LiDAR coordinates [x, y, z, w, l, h, ry]
calib: calibration.Calibration, Calibration object
Returns:
aug_image: (H_image, W_image, 3), Augmented image
aug_depth_map: (H_depth, W_depth), Augmented depth map
aug_gt_boxes: (N, 7), Augmented 3D box labels in LiDAR coordinates [x, y, z, w, l, h, ry]
"""
# Randomly augment with 50% chance
enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
if enable:
# Flip images
aug_image = np.fliplr(image)
aug_depth_map = np.fliplr(depth_map)
# Flip 3D gt_boxes by flipping the centroids in image space
aug_gt_boxes = copy.copy(gt_boxes)
locations = aug_gt_boxes[:, :3]
img_pts, img_depth = calib.lidar_to_img(locations)
W = image.shape[1]
img_pts[:, 0] = W - img_pts[:, 0]
pts_rect = calib.img_to_rect(u=img_pts[:, 0], v=img_pts[:, 1], depth_rect=img_depth)
pts_lidar = calib.rect_to_lidar(pts_rect)
aug_gt_boxes[:, :3] = pts_lidar
aug_gt_boxes[:, 6] = -1 * aug_gt_boxes[:, 6]
else:
aug_image = image
aug_depth_map = depth_map
aug_gt_boxes = gt_boxes
return aug_image, aug_depth_map, aug_gt_boxes
def random_translation_along_x(gt_boxes, points, offset_std):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C),
offset_std: float
Returns:
"""
offset = np.random.normal(0, offset_std, 1)
points[:, 0] += offset
gt_boxes[:, 0] += offset
# if gt_boxes.shape[1] > 7:
# gt_boxes[:, 7] += offset
return gt_boxes, points
def random_translation_along_y(gt_boxes, points, offset_std):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C),
offset_std: float
Returns:
"""
offset = np.random.normal(0, offset_std, 1)
points[:, 1] += offset
gt_boxes[:, 1] += offset
# if gt_boxes.shape[1] > 8:
# gt_boxes[:, 8] += offset
return gt_boxes, points
def random_translation_along_z(gt_boxes, points, offset_std):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C),
offset_std: float
Returns:
"""
offset = np.random.normal(0, offset_std, 1)
points[:, 2] += offset
gt_boxes[:, 2] += offset
return gt_boxes, points
def random_local_translation_along_x(gt_boxes, points, offset_range):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C),
offset_range: [min max]]
Returns:
"""
# augs = {}
for idx, box in enumerate(gt_boxes):
offset = np.random.uniform(offset_range[0], offset_range[1])
# augs[f'object_{idx}'] = offset
points_in_box, mask = get_points_in_box(points, box)
points[mask, 0] += offset
gt_boxes[idx, 0] += offset
# if gt_boxes.shape[1] > 7:
# gt_boxes[idx, 7] += offset
return gt_boxes, points
def random_local_translation_along_y(gt_boxes, points, offset_range):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C),
offset_range: [min max]]
Returns:
"""
# augs = {}
for idx, box in enumerate(gt_boxes):
offset = np.random.uniform(offset_range[0], offset_range[1])
# augs[f'object_{idx}'] = offset
points_in_box, mask = get_points_in_box(points, box)
points[mask, 1] += offset
gt_boxes[idx, 1] += offset
# if gt_boxes.shape[1] > 8:
# gt_boxes[idx, 8] += offset
return gt_boxes, points
def random_local_translation_along_z(gt_boxes, points, offset_range):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C),
offset_range: [min max]]
Returns:
"""
# augs = {}
for idx, box in enumerate(gt_boxes):
offset = np.random.uniform(offset_range[0], offset_range[1])
# augs[f'object_{idx}'] = offset
points_in_box, mask = get_points_in_box(points, box)
points[mask, 2] += offset
gt_boxes[idx, 2] += offset
return gt_boxes, points
def global_frustum_dropout_top(gt_boxes, points, intensity_range):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]],
points: (M, 3 + C),
intensity: [min, max]
Returns:
"""
intensity = np.random.uniform(intensity_range[0], intensity_range[1])
# threshold = max - length * uniform(0 ~ 0.2)
threshold = np.max(points[:, 2]) - intensity * (np.max(points[:, 2]) - np.min(points[:, 2]))
points = points[points[:, 2] < threshold]
gt_boxes = gt_boxes[gt_boxes[:, 2] < threshold]
return gt_boxes, points
def global_frustum_dropout_bottom(gt_boxes, points, intensity_range):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]],
points: (M, 3 + C),
intensity: [min, max]
Returns:
"""
intensity = np.random.uniform(intensity_range[0], intensity_range[1])
threshold = np.min(points[:, 2]) + intensity * (np.max(points[:, 2]) - np.min(points[:, 2]))
points = points[points[:, 2] > threshold]
gt_boxes = gt_boxes[gt_boxes[:, 2] > threshold]
return gt_boxes, points
def global_frustum_dropout_left(gt_boxes, points, intensity_range):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]],
points: (M, 3 + C),
intensity: [min, max]
Returns:
"""
intensity = np.random.uniform(intensity_range[0], intensity_range[1])
threshold = np.max(points[:, 1]) - intensity * (np.max(points[:, 1]) - np.min(points[:, 1]))
points = points[points[:, 1] < threshold]
gt_boxes = gt_boxes[gt_boxes[:, 1] < threshold]
return gt_boxes, points
def global_frustum_dropout_right(gt_boxes, points, intensity_range):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]],
points: (M, 3 + C),
intensity: [min, max]
Returns:
"""
intensity = np.random.uniform(intensity_range[0], intensity_range[1])
threshold = np.min(points[:, 1]) + intensity * (np.max(points[:, 1]) - np.min(points[:, 1]))
points = points[points[:, 1] > threshold]
gt_boxes = gt_boxes[gt_boxes[:, 1] > threshold]
return gt_boxes, points
def local_scaling(gt_boxes, points, scale_range):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading]
points: (M, 3 + C),
scale_range: [min, max]
Returns:
"""
if scale_range[1] - scale_range[0] < 1e-3:
return gt_boxes, points
# augs = {}
for idx, box in enumerate(gt_boxes):
noise_scale = np.random.uniform(scale_range[0], scale_range[1])
# augs[f'object_{idx}'] = noise_scale
points_in_box, mask = get_points_in_box(points, box)
# tranlation to axis center
points[mask, 0] -= box[0]
points[mask, 1] -= box[1]
points[mask, 2] -= box[2]
# apply scaling
points[mask, :3] *= noise_scale
# tranlation back to original position
points[mask, 0] += box[0]
points[mask, 1] += box[1]
points[mask, 2] += box[2]
gt_boxes[idx, 3:6] *= noise_scale
return gt_boxes, points
def local_rotation(gt_boxes, points, rot_range):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C),
rot_range: [min, max]
Returns:
"""
# augs = {}
for idx, box in enumerate(gt_boxes):
noise_rotation = np.random.uniform(rot_range[0], rot_range[1])
# augs[f'object_{idx}'] = noise_rotation
points_in_box, mask = get_points_in_box(points, box)
centroid_x = box[0]
centroid_y = box[1]
centroid_z = box[2]
# tranlation to axis center
points[mask, 0] -= centroid_x
points[mask, 1] -= centroid_y
points[mask, 2] -= centroid_z
box[0] -= centroid_x
box[1] -= centroid_y
box[2] -= centroid_z
# apply rotation
points[mask, :] = common_utils.rotate_points_along_z(points[np.newaxis, mask, :], np.array([noise_rotation]))[0]
box[0:3] = common_utils.rotate_points_along_z(box[np.newaxis, np.newaxis, 0:3], np.array([noise_rotation]))[0][0]
# tranlation back to original position
points[mask, 0] += centroid_x
points[mask, 1] += centroid_y
points[mask, 2] += centroid_z
box[0] += centroid_x
box[1] += centroid_y
box[2] += centroid_z
gt_boxes[idx, 6] += noise_rotation
if gt_boxes.shape[1] > 8:
gt_boxes[idx, 7:9] = common_utils.rotate_points_along_z(
np.hstack((gt_boxes[idx, 7:9], np.zeros((gt_boxes.shape[0], 1))))[np.newaxis, :, :],
np.array([noise_rotation])
)[0][:, 0:2]
return gt_boxes, points
def local_frustum_dropout_top(gt_boxes, points, intensity_range):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]],
points: (M, 3 + C),
intensity: [min, max]
Returns:
"""
for idx, box in enumerate(gt_boxes):
x, y, z, dx, dy, dz = box[0], box[1], box[2], box[3], box[4], box[5]
intensity = np.random.uniform(intensity_range[0], intensity_range[1])
points_in_box, mask = get_points_in_box(points, box)
threshold = (z + dz / 2) - intensity * dz
points = points[np.logical_not(np.logical_and(mask, points[:, 2] >= threshold))]
return gt_boxes, points
def local_frustum_dropout_bottom(gt_boxes, points, intensity_range):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]],
points: (M, 3 + C),
intensity: [min, max]
Returns:
"""
for idx, box in enumerate(gt_boxes):
x, y, z, dx, dy, dz = box[0], box[1], box[2], box[3], box[4], box[5]
intensity = np.random.uniform(intensity_range[0], intensity_range[1])
points_in_box, mask = get_points_in_box(points, box)
threshold = (z - dz / 2) + intensity * dz
points = points[np.logical_not(np.logical_and(mask, points[:, 2] <= threshold))]
return gt_boxes, points
def local_frustum_dropout_left(gt_boxes, points, intensity_range):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]],
points: (M, 3 + C),
intensity: [min, max]
Returns:
"""
for idx, box in enumerate(gt_boxes):
x, y, z, dx, dy, dz = box[0], box[1], box[2], box[3], box[4], box[5]
intensity = np.random.uniform(intensity_range[0], intensity_range[1])
points_in_box, mask = get_points_in_box(points, box)
threshold = (y + dy / 2) - intensity * dy
points = points[np.logical_not(np.logical_and(mask, points[:, 1] >= threshold))]
return gt_boxes, points
def local_frustum_dropout_right(gt_boxes, points, intensity_range):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]],
points: (M, 3 + C),
intensity: [min, max]
Returns:
"""
for idx, box in enumerate(gt_boxes):
x, y, z, dx, dy, dz = box[0], box[1], box[2], box[3], box[4], box[5]
intensity = np.random.uniform(intensity_range[0], intensity_range[1])
points_in_box, mask = get_points_in_box(points, box)
threshold = (y - dy / 2) + intensity * dy
points = points[np.logical_not(np.logical_and(mask, points[:, 1] <= threshold))]
return gt_boxes, points
def get_points_in_box(points, gt_box):
x, y, z = points[:, 0], points[:, 1], points[:, 2]
cx, cy, cz = gt_box[0], gt_box[1], gt_box[2]
dx, dy, dz, rz = gt_box[3], gt_box[4], gt_box[5], gt_box[6]
shift_x, shift_y, shift_z = x - cx, y - cy, z - cz
MARGIN = 1e-1
cosa, sina = math.cos(-rz), math.sin(-rz)
local_x = shift_x * cosa + shift_y * (-sina)
local_y = shift_x * sina + shift_y * cosa
mask = np.logical_and(abs(shift_z) <= dz / 2.0,
np.logical_and(abs(local_x) <= dx / 2.0 + MARGIN,
abs(local_y) <= dy / 2.0 + MARGIN))
points = points[mask]
return points, mask
def get_pyramids(boxes):
pyramid_orders = np.array([
[0, 1, 5, 4],
[4, 5, 6, 7],
[7, 6, 2, 3],
[3, 2, 1, 0],
[1, 2, 6, 5],
[0, 4, 7, 3]
])
boxes_corners = box_utils.boxes_to_corners_3d(boxes).reshape(-1, 24)
pyramid_list = []
for order in pyramid_orders:
# frustum polygon: 5 corners, 5 surfaces
pyramid = np.concatenate((
boxes[:, 0:3],
boxes_corners[:, 3 * order[0]: 3 * order[0] + 3],
boxes_corners[:, 3 * order[1]: 3 * order[1] + 3],
boxes_corners[:, 3 * order[2]: 3 * order[2] + 3],
boxes_corners[:, 3 * order[3]: 3 * order[3] + 3]), axis=1)
pyramid_list.append(pyramid[:, None, :])
pyramids = np.concatenate(pyramid_list, axis=1) # [N, 6, 15], 15=5*3
return pyramids
def one_hot(x, num_class=1):
if num_class is None:
num_class = 1
ohx = np.zeros((len(x), num_class))
ohx[range(len(x)), x] = 1
return ohx
def points_in_pyramids_mask(points, pyramids):
pyramids = pyramids.reshape(-1, 5, 3)
flags = np.zeros((points.shape[0], pyramids.shape[0]), dtype=np.bool)
for i, pyramid in enumerate(pyramids):
flags[:, i] = np.logical_or(flags[:, i], box_utils.in_hull(points[:, 0:3], pyramid))
return flags
def local_pyramid_dropout(gt_boxes, points, dropout_prob, pyramids=None):
if pyramids is None:
pyramids = get_pyramids(gt_boxes).reshape([-1, 6, 5, 3]) # each six surface of boxes: [num_boxes, 6, 15=3*5]
drop_pyramid_indices = np.random.randint(0, 6, (pyramids.shape[0]))
drop_pyramid_one_hot = one_hot(drop_pyramid_indices, num_class=6)
drop_box_mask = np.random.uniform(0, 1, (pyramids.shape[0])) <= dropout_prob
if np.sum(drop_box_mask) != 0:
drop_pyramid_mask = (np.tile(drop_box_mask[:, None], [1, 6]) * drop_pyramid_one_hot) > 0
drop_pyramids = pyramids[drop_pyramid_mask]
point_masks = points_in_pyramids_mask(points, drop_pyramids)
points = points[np.logical_not(point_masks.any(-1))]
# print(drop_box_mask)
pyramids = pyramids[np.logical_not(drop_box_mask)]
return gt_boxes, points, pyramids
def local_pyramid_sparsify(gt_boxes, points, prob, max_num_pts, pyramids=None):
if pyramids is None:
pyramids = get_pyramids(gt_boxes).reshape([-1, 6, 5, 3]) # each six surface of boxes: [num_boxes, 6, 15=3*5]
if pyramids.shape[0] > 0:
sparsity_prob, sparsity_num = prob, max_num_pts
sparsify_pyramid_indices = np.random.randint(0, 6, (pyramids.shape[0]))
sparsify_pyramid_one_hot = one_hot(sparsify_pyramid_indices, num_class=6)
sparsify_box_mask = np.random.uniform(0, 1, (pyramids.shape[0])) <= sparsity_prob
sparsify_pyramid_mask = (np.tile(sparsify_box_mask[:, None], [1, 6]) * sparsify_pyramid_one_hot) > 0
# print(sparsify_box_mask)
pyramid_sampled = pyramids[sparsify_pyramid_mask] # (-1,6,5,3)[(num_sample,6)]
# print(pyramid_sampled.shape)
pyramid_sampled_point_masks = points_in_pyramids_mask(points, pyramid_sampled)
pyramid_sampled_points_num = pyramid_sampled_point_masks.sum(0) # the number of points in each surface pyramid
valid_pyramid_sampled_mask = pyramid_sampled_points_num > sparsity_num # only much than sparsity_num should be sparse
sparsify_pyramids = pyramid_sampled[valid_pyramid_sampled_mask]
if sparsify_pyramids.shape[0] > 0:
point_masks = pyramid_sampled_point_masks[:, valid_pyramid_sampled_mask]
remain_points = points[
np.logical_not(point_masks.any(-1))] # points which outside the down sampling pyramid
to_sparsify_points = [points[point_masks[:, i]] for i in range(point_masks.shape[1])]
sparsified_points = []
for sample in to_sparsify_points:
sampled_indices = np.random.choice(sample.shape[0], size=sparsity_num, replace=False)
sparsified_points.append(sample[sampled_indices])
sparsified_points = np.concatenate(sparsified_points, axis=0)
points = np.concatenate([remain_points, sparsified_points], axis=0)
pyramids = pyramids[np.logical_not(sparsify_box_mask)]
return gt_boxes, points, pyramids
def local_pyramid_swap(gt_boxes, points, prob, max_num_pts, pyramids=None):
def get_points_ratio(points, pyramid):
surface_center = (pyramid[3:6] + pyramid[6:9] + pyramid[9:12] + pyramid[12:]) / 4.0
vector_0, vector_1, vector_2 = pyramid[6:9] - pyramid[3:6], pyramid[12:] - pyramid[3:6], pyramid[0:3] - surface_center
alphas = ((points[:, 0:3] - pyramid[3:6]) * vector_0).sum(-1) / np.power(vector_0, 2).sum()
betas = ((points[:, 0:3] - pyramid[3:6]) * vector_1).sum(-1) / np.power(vector_1, 2).sum()
gammas = ((points[:, 0:3] - surface_center) * vector_2).sum(-1) / np.power(vector_2, 2).sum()
return [alphas, betas, gammas]
def recover_points_by_ratio(points_ratio, pyramid):
alphas, betas, gammas = points_ratio
surface_center = (pyramid[3:6] + pyramid[6:9] + pyramid[9:12] + pyramid[12:]) / 4.0
vector_0, vector_1, vector_2 = pyramid[6:9] - pyramid[3:6], pyramid[12:] - pyramid[3:6], pyramid[0:3] - surface_center
points = (alphas[:, None] * vector_0 + betas[:, None] * vector_1) + pyramid[3:6] + gammas[:, None] * vector_2
return points
def recover_points_intensity_by_ratio(points_intensity_ratio, max_intensity, min_intensity):
return points_intensity_ratio * (max_intensity - min_intensity) + min_intensity
# swap partition
if pyramids is None:
pyramids = get_pyramids(gt_boxes).reshape([-1, 6, 5, 3]) # each six surface of boxes: [num_boxes, 6, 15=3*5]
swap_prob, num_thres = prob, max_num_pts
swap_pyramid_mask = np.random.uniform(0, 1, (pyramids.shape[0])) <= swap_prob
if swap_pyramid_mask.sum() > 0:
point_masks = points_in_pyramids_mask(points, pyramids)
point_nums = point_masks.sum(0).reshape(pyramids.shape[0], -1) # [N, 6]
non_zero_pyramids_mask = point_nums > num_thres # ingore dropout pyramids or highly occluded pyramids
selected_pyramids = non_zero_pyramids_mask * swap_pyramid_mask[:,
None] # selected boxes and all their valid pyramids
# print(selected_pyramids)
if selected_pyramids.sum() > 0:
# get to_swap pyramids
index_i, index_j = np.nonzero(selected_pyramids)
selected_pyramid_indices = [np.random.choice(index_j[index_i == i]) \
if e and (index_i == i).any() else 0 for i, e in
enumerate(swap_pyramid_mask)]
selected_pyramids_mask = selected_pyramids * one_hot(selected_pyramid_indices, num_class=6) == 1
to_swap_pyramids = pyramids[selected_pyramids_mask]
# get swapped pyramids
index_i, index_j = np.nonzero(selected_pyramids_mask)
non_zero_pyramids_mask[selected_pyramids_mask] = False
swapped_index_i = np.array([np.random.choice(np.where(non_zero_pyramids_mask[:, j])[0]) if \
np.where(non_zero_pyramids_mask[:, j])[0].shape[0] > 0 else
index_i[i] for i, j in enumerate(index_j.tolist())])
swapped_indicies = np.concatenate([swapped_index_i[:, None], index_j[:, None]], axis=1)
swapped_pyramids = pyramids[
swapped_indicies[:, 0].astype(np.int32), swapped_indicies[:, 1].astype(np.int32)]
# concat to_swap&swapped pyramids
swap_pyramids = np.concatenate([to_swap_pyramids, swapped_pyramids], axis=0)
swap_point_masks = points_in_pyramids_mask(points, swap_pyramids)
remain_points = points[np.logical_not(swap_point_masks.any(-1))]
# swap pyramids
points_res = []
num_swapped_pyramids = swapped_pyramids.shape[0]
for i in range(num_swapped_pyramids):
to_swap_pyramid = to_swap_pyramids[i]
swapped_pyramid = swapped_pyramids[i]
to_swap_points = points[swap_point_masks[:, i]]
swapped_points = points[swap_point_masks[:, i + num_swapped_pyramids]]
# for intensity transform
to_swap_points_intensity_ratio = (to_swap_points[:, -1:] - to_swap_points[:, -1:].min()) / \
np.clip(
(to_swap_points[:, -1:].max() - to_swap_points[:, -1:].min()),
1e-6, 1)
swapped_points_intensity_ratio = (swapped_points[:, -1:] - swapped_points[:, -1:].min()) / \
np.clip(
(swapped_points[:, -1:].max() - swapped_points[:, -1:].min()),
1e-6, 1)
to_swap_points_ratio = get_points_ratio(to_swap_points, to_swap_pyramid.reshape(15))
swapped_points_ratio = get_points_ratio(swapped_points, swapped_pyramid.reshape(15))
new_to_swap_points = recover_points_by_ratio(swapped_points_ratio, to_swap_pyramid.reshape(15))
new_swapped_points = recover_points_by_ratio(to_swap_points_ratio, swapped_pyramid.reshape(15))
# for intensity transform
new_to_swap_points_intensity = recover_points_intensity_by_ratio(
swapped_points_intensity_ratio, to_swap_points[:, -1:].max(),
to_swap_points[:, -1:].min())
new_swapped_points_intensity = recover_points_intensity_by_ratio(
to_swap_points_intensity_ratio, swapped_points[:, -1:].max(),
swapped_points[:, -1:].min())
# new_to_swap_points = np.concatenate([new_to_swap_points, swapped_points[:, -1:]], axis=1)
# new_swapped_points = np.concatenate([new_swapped_points, to_swap_points[:, -1:]], axis=1)
new_to_swap_points = np.concatenate([new_to_swap_points, new_to_swap_points_intensity], axis=1)
new_swapped_points = np.concatenate([new_swapped_points, new_swapped_points_intensity], axis=1)
points_res.append(new_to_swap_points)
points_res.append(new_swapped_points)
points_res = np.concatenate(points_res, axis=0)
points = np.concatenate([remain_points, points_res], axis=0)
return gt_boxes, points
def global_sampling(gt_boxes, points, gt_boxes_mask, sample_ratio_range, prob):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading]
points: (M, 3 + C)
gt_boxes_mask: (N), boolen mask for gt_boxes
sample_ratio_range: [min, max]. ratio to keep points remain.
prob: prob to dentermine whether sampling this frame
Returns:
"""
if np.random.uniform(0, 1) > prob:
return gt_boxes, points, gt_boxes_mask
num_points = points.shape[0]
sample_ratio = np.random.uniform(sample_ratio_range[0], sample_ratio_range[1])
remain_points_num = int(num_points * sample_ratio)
# shuffle points
shuffle_idx = np.random.permutation(points.shape[0])
points = points[shuffle_idx]
# sample points
points = points[:remain_points_num]
# mask empty gt_boxes
num_points_in_gt = roiaware_pool3d_utils.points_in_boxes_cpu(
torch.from_numpy(points[:, :3]),
torch.from_numpy(gt_boxes[:, :7])
).numpy().sum(axis=1)
mask = (num_points_in_gt >= 1)
gt_boxes_mask = gt_boxes_mask & mask
return gt_boxes, points, gt_boxes_mask
def scale_pre_object(gt_boxes, points, scale_perturb, num_try=50):
"""
uniform sacle object with given range
Args:
gt_boxes: (N, 7) under unified coordinates
points: (M, 3 + C) points in lidar
gt_boxes_mask: (N), boolen mask for
scale_perturb:
num_try:
Returns:
"""
num_boxes = gt_boxes.shape[0]
if not isinstance(scale_perturb, (list, tuple, np.ndarray)):
scale_perturb = [-scale_perturb, scale_perturb]
# boxes wise scale ratio
scale_noises = np.random.uniform(scale_perturb[0], scale_perturb[1], size=[num_boxes, num_try])
for k in range(num_boxes):
# if gt_boxes_mask[k] == 0:
# continue
scl_box = copy.deepcopy(gt_boxes[k])
scl_box = scl_box.reshape(1, -1).repeat([num_try], axis=0)
scl_box[:, 3:6] = scl_box[:, 3:6] * scale_noises[k].reshape(-1, 1).repeat([3], axis=1)
# detect conflict
# [num_try, N-1]
if num_boxes > 1:
self_mask = np.ones(num_boxes, dtype=np.bool_)
self_mask[k] = False
iou_matrix = iou3d_nms_utils.boxes_bev_iou_cpu(scl_box, gt_boxes[self_mask])
ious = np.max(iou_matrix, axis=1)
no_conflict_mask = (ious == 0)
# all trys have conflict with other gts
if no_conflict_mask.sum() == 0:
continue
# scale points and assign new box
try_idx = no_conflict_mask.nonzero()[0][0]
else:
try_idx = 0
point_masks = roiaware_pool3d_utils.points_in_boxes_cpu(
points[:, 0:3],np.expand_dims(gt_boxes[k], axis=0)).squeeze(0)
obj_points = points[point_masks > 0]
obj_center, lwh, ry = gt_boxes[k, 0:3], gt_boxes[k, 3:6], gt_boxes[k, 6]
# relative coordinates
obj_points[:, 0:3] -= obj_center
obj_points = common_utils.rotate_points_along_z(np.expand_dims(obj_points, axis=0), -ry).squeeze(0)
new_lwh = lwh * scale_noises[k][try_idx]
obj_points[:, 0:3] = obj_points[:, 0:3] * scale_noises[k][try_idx]
obj_points = common_utils.rotate_points_along_z(np.expand_dims(obj_points, axis=0), ry).squeeze(0)
# calculate new object center to avoid object float over the road
obj_center[2] += (new_lwh[2] - lwh[2]) / 2
obj_points[:, 0:3] += obj_center
points[point_masks > 0] = obj_points
gt_boxes[k, 3:6] = new_lwh
# if enlarge boxes, remove bg points
if scale_noises[k][try_idx] > 1:
points_dst_mask = roiaware_pool3d_utils.points_in_boxes_cpu(points[:, 0:3],
np.expand_dims(gt_boxes[k],
axis=0)).squeeze(0)
keep_mask = ~np.logical_xor(point_masks, points_dst_mask)
points = points[keep_mask]
return points, gt_boxes
def normalize_object_size(boxes, points, boxes_mask, size_res):
"""
:param boxes: (N, 7) under unified boxes
:param points: (N, 3 + C)
:param boxes_mask
:param size_res: (3) [l, w, h]
:return:
"""
points = copy.deepcopy(points)
boxes = copy.deepcopy(boxes)
for k in range(boxes.shape[0]):
# skip boxes that not need to normalize
if boxes_mask[k] == 0:
continue
masks = roiaware_pool3d_utils.points_in_boxes_cpu(points[:, 0:3], boxes[k:k+1, :7]).squeeze(0)
obj_points = points[masks > 0]
obj_center, lwh, ry = boxes[k, 0:3], boxes[k, 3:6], boxes[k, 6]
obj_points[:, 0:3] -= obj_center
obj_points = common_utils.rotate_points_along_z(np.expand_dims(obj_points, axis=0), -ry).squeeze(0)
new_lwh = lwh + np.array(size_res)
# skip boxes that shift to have negative
if (new_lwh < 0).any():
boxes_mask[k] = False
continue
scale_lwh = new_lwh / lwh
obj_points[:, 0:3] = obj_points[:, 0:3] * scale_lwh
obj_points = common_utils.rotate_points_along_z(np.expand_dims(obj_points, axis=0), ry).squeeze(0)
# calculate new object center to avoid object float over the road
obj_center[2] += size_res[2] / 2
obj_points[:, 0:3] += obj_center
points[masks > 0] = obj_points
boxes[k, 3:6] = new_lwh
# if enlarge boxes, remove bg points
if (np.array(size_res) > 0).any():
points_dst_mask = roiaware_pool3d_utils.points_in_boxes_cpu(points[:, 0:3],
np.expand_dims(boxes[k],
axis=0)).squeeze(0)
keep_mask = ~np.logical_xor(masks, points_dst_mask)
points = points[keep_mask]
return points, boxes
def rotate_objects(gt_boxes, points, gt_boxes_mask, rotation_perturb, prob, num_try=50):
"""
Args:
gt_boxes: [N, 7] (x, y, z, dx, dy, dz, heading) on unified coordinate
points: [M]
gt_boxes_mask: [N] bool
rotation_perturb: ratation noise parameter
prob: prob to random rotate object
num_try: times to try rotate one object
Returns:
"""
num_boxes = gt_boxes.shape[0]
if not isinstance(rotation_perturb, (list, tuple, np.ndarray)):
rotation_perturb = [-rotation_perturb, rotation_perturb]
# with prob to rotate each object
rot_mask = np.random.uniform(0, 1, size=[num_boxes]) < prob
# generate random ratate noise for each boxes
rot_noise = np.random.uniform(rotation_perturb[0], rotation_perturb[1], size=[num_boxes, num_try])
for idx in range(num_boxes):
# don't need to rotate this object
if (not rot_mask[idx]) or (not gt_boxes_mask[idx]):
continue
# generate rotated boxes num_try times
rot_box = copy.deepcopy(gt_boxes[idx])
# [num_try, 7]
rot_box = rot_box.reshape(1, -1).repeat([num_try], axis=0)
rot_box[:, 6] += rot_noise[idx]
# detect conflict
# [num_try, N-1]
if num_boxes > 1:
self_mask = np.ones(num_boxes, dtype=np.bool_)
self_mask[idx] = False
iou_matrix = iou3d_nms_utils.boxes_bev_iou_cpu(rot_box, gt_boxes[self_mask])
ious = np.max(iou_matrix, axis=1)
no_conflict_mask = (ious == 0)
# all trys have conflict with other gts
if no_conflict_mask.sum() == 0:
continue
# rotate points and assign new box
try_idx = no_conflict_mask.nonzero()[0][0]
else:
try_idx = 0
point_masks = roiaware_pool3d_utils.points_in_boxes_cpu(points[:, 0:3],
np.expand_dims(gt_boxes[idx], axis=0)).squeeze(0)
object_points = points[point_masks > 0]
object_center = gt_boxes[idx][0:3]
object_points[:, 0:3] -= object_center
object_points = common_utils.rotate_points_along_z(object_points[np.newaxis, :, :],
np.array([rot_noise[idx][try_idx]]))[0]
object_points[:, 0:3] += object_center
points[point_masks > 0] = object_points
# remove bg points that lie the position we want to place object
points_dst_mask = roiaware_pool3d_utils.points_in_boxes_cpu(points[:, 0:3],
np.expand_dims(rot_box[try_idx], axis=0)).squeeze(0)
keep_mask = ~np.logical_xor(point_masks, points_dst_mask)
points = points[keep_mask]
gt_boxes[idx] = rot_box[try_idx]
return gt_boxes, points
| 35,553
| 37.3125
| 126
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/augmentor/ssl_data_augmentor.py
|
from functools import partial
import numpy as np
import copy
from ...utils import common_utils
from .ssl_database_sampler import SSLDataBaseSampler
class SSLDataAugmentor(object):
def __init__(self, root_path, augmentor_configs, class_names, logger=None, oss_flag=False):
self.root_path = root_path
self.class_names = class_names
self.logger = logger
self.oss_flag = oss_flag
self.aug_list = []
self.augmentor_queue = []
aug_config_list = augmentor_configs.AUG_CONFIG_LIST
for cur_cfg in aug_config_list:
if cur_cfg.NAME in augmentor_configs.DISABLE_AUG_LIST:
continue
cur_augmentor = getattr(self, cur_cfg.NAME)(config=cur_cfg)
self.augmentor_queue.append(cur_augmentor)
self.aug_list.append(cur_cfg.NAME)
def gt_sampling(self, config=None):
db_sampler = SSLDataBaseSampler(
root_path=self.root_path,
sampler_cfg=config,
class_names=self.class_names,
logger=self.logger,
oss_flag=self.oss_flag
)
return db_sampler
def __getstate__(self):
d = dict(self.__dict__)
del d['logger']
return d
def __setstate__(self, d):
self.__dict__.update(d)
def random_world_flip(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_flip, config=config)
params = []
points = data_dict['points']
gt_boxes = data_dict['gt_boxes'] if 'gt_boxes' in data_dict else None
for cur_axis in config['ALONG_AXIS_LIST']:
if cur_axis == 'x':
enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
if enable:
points[:, 1] = -points[:, 1]
if 'gt_boxes' in data_dict:
gt_boxes[:, 1] = -gt_boxes[:, 1]
gt_boxes[:, 6] = -gt_boxes[:, 6]
if gt_boxes.shape[1] > 7:
gt_boxes[:, 8] = -gt_boxes[:, 8]
params.append('x')
elif cur_axis == 'y':
enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
if enable:
points[:, 0] = -points[:, 0]
if 'gt_boxes' in data_dict:
gt_boxes[:, 0] = -gt_boxes[:, 0]
gt_boxes[:, 6] = -(gt_boxes[:, 6] + np.pi)
if gt_boxes.shape[1] > 7:
gt_boxes[:, 7] = -gt_boxes[:, 7]
params.append('y')
else:
raise NotImplementedError
data_dict['augmentation_params']['random_world_flip'] = params
data_dict['points'] = points
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'] = gt_boxes
return data_dict
def random_world_rotation(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_rotation, config=config)
rot_range = config['WORLD_ROT_ANGLE']
if not isinstance(rot_range, list):
rot_range = [-rot_range, rot_range]
points = data_dict['points']
gt_boxes = data_dict['gt_boxes'] if 'gt_boxes' in data_dict else None
noise_rotation = np.random.uniform(rot_range[0], rot_range[1])
points = common_utils.rotate_points_along_z(points[np.newaxis, :, :], np.array([noise_rotation]))[0]
if 'gt_boxes' in data_dict:
gt_boxes[:, 0:3] = common_utils.rotate_points_along_z(gt_boxes[np.newaxis, :, 0:3], np.array([noise_rotation]))[0]
gt_boxes[:, 6] += noise_rotation
if gt_boxes.shape[1] > 7:
gt_boxes[:, 7:9] = common_utils.rotate_points_along_z(
np.hstack((gt_boxes[:, 7:9], np.zeros((gt_boxes.shape[0], 1))))[np.newaxis, :, :],
np.array([noise_rotation])
)[0][:, 0:2]
data_dict['augmentation_params']['random_world_rotation'] = noise_rotation
data_dict['points'] = points
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'] = gt_boxes
return data_dict
def random_world_scaling(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_scaling, config=config)
scale_range = config['WORLD_SCALE_RANGE']
points = data_dict['points']
gt_boxes = data_dict['gt_boxes'] if 'gt_boxes' in data_dict else None
if scale_range[1] - scale_range[0] < 1e-3:
noise_scale = 1
else:
noise_scale = np.random.uniform(scale_range[0], scale_range[1])
points[:, :3] *= noise_scale
if 'gt_boxes' in data_dict:
gt_boxes[:, :6] *= noise_scale
data_dict['augmentation_params']['random_world_scaling'] = noise_scale
data_dict['points'] = points
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'] = gt_boxes
return data_dict
def forward(self, data_dict):
"""
Args:
data_dict:
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7) [x, y, z, dx, dy, dz, heading]
gt_names: optional, (N), string
...
Returns:
"""
data_dict['augmentation_list'] = copy.deepcopy(self.aug_list)
data_dict['augmentation_params'] = {}
for cur_augmentor in self.augmentor_queue:
data_dict = cur_augmentor(data_dict)
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'][:, 6] = common_utils.limit_period(
data_dict['gt_boxes'][:, 6], offset=0.5, period=2 * np.pi
)
if 'calib' in data_dict:
data_dict.pop('calib')
if 'road_plane' in data_dict:
data_dict.pop('road_plane')
if 'gt_boxes_mask' in data_dict:
gt_boxes_mask = data_dict['gt_boxes_mask']
data_dict['gt_boxes'] = data_dict['gt_boxes'][gt_boxes_mask]
data_dict['gt_names'] = data_dict['gt_names'][gt_boxes_mask]
data_dict.pop('gt_boxes_mask')
return data_dict
| 6,295
| 37.390244
| 126
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/augmentor/__init__.py
| 0
| 0
| 0
|
py
|
|
3DTrans
|
3DTrans-master/pcdet/datasets/augmentor/data_augmentor.py
|
from functools import partial
import numpy as np
from ...utils import common_utils
from . import augmentor_utils, database_sampler
class DataAugmentor(object):
def __init__(self, root_path, augmentor_configs, class_names, logger=None, oss_flag=False):
self.root_path = root_path
self.class_names = class_names
self.logger = logger
self.oss_flag = oss_flag
self.augmentor_configs = augmentor_configs
self.data_augmentor_queue = []
aug_config_list = augmentor_configs if isinstance(augmentor_configs, list) \
else augmentor_configs.AUG_CONFIG_LIST
for cur_cfg in aug_config_list:
if not isinstance(augmentor_configs, list):
if cur_cfg.NAME in augmentor_configs.DISABLE_AUG_LIST:
continue
cur_augmentor = getattr(self, cur_cfg.NAME)(config=cur_cfg)
self.data_augmentor_queue.append(cur_augmentor)
def gt_sampling(self, config=None):
db_sampler = database_sampler.DataBaseSampler(
root_path=self.root_path,
sampler_cfg=config,
class_names=self.class_names,
logger=self.logger,
oss_flag=self.oss_flag
)
return db_sampler
def __getstate__(self):
d = dict(self.__dict__)
del d['logger']
return d
def __setstate__(self, d):
self.__dict__.update(d)
def random_object_rotation(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_object_rotation, config=config)
gt_boxes, points = augmentor_utils.rotate_objects(
data_dict['gt_boxes'],
data_dict['points'],
data_dict['gt_boxes_mask'],
rotation_perturb=config['ROT_UNIFORM_NOISE'],
prob=config['ROT_PROB'],
num_try=50
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def random_object_scaling(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_object_scaling, config=config)
points, gt_boxes = augmentor_utils.scale_pre_object(
data_dict['gt_boxes'], data_dict['points'],
# gt_boxes_mask=data_dict['gt_boxes_mask'],
scale_perturb=config['SCALE_UNIFORM_NOISE']
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def random_world_sampling(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_sampling, config=config)
gt_boxes, points, gt_boxes_mask = augmentor_utils.global_sampling(
data_dict['gt_boxes'], data_dict['points'],
gt_boxes_mask=data_dict['gt_boxes_mask'],
sample_ratio_range=config['WORLD_SAMPLE_RATIO'],
prob=config['PROB']
)
data_dict['gt_boxes'] = gt_boxes
data_dict['gt_boxes_mask'] = gt_boxes_mask
data_dict['points'] = points
return data_dict
def normalize_object_size(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.normalize_object_size, config=config)
points, gt_boxes = augmentor_utils.normalize_object_size(
data_dict['gt_boxes'], data_dict['points'], data_dict['gt_boxes_mask'], config['SIZE_RES']
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def random_world_flip(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_flip, config=config)
gt_boxes, points = data_dict['gt_boxes'], data_dict['points']
for cur_axis in config['ALONG_AXIS_LIST']:
assert cur_axis in ['x', 'y']
gt_boxes, points = getattr(augmentor_utils, 'random_flip_along_%s' % cur_axis)(
gt_boxes, points,
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def random_world_rotation(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_rotation, config=config)
rot_range = config['WORLD_ROT_ANGLE']
if not isinstance(rot_range, list):
rot_range = [-rot_range, rot_range]
gt_boxes, points = augmentor_utils.global_rotation(
data_dict['gt_boxes'], data_dict['points'], rot_range=rot_range
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def random_world_scaling(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_scaling, config=config)
gt_boxes, points = augmentor_utils.global_scaling(
data_dict['gt_boxes'], data_dict['points'], config['WORLD_SCALE_RANGE']
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def random_image_flip(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_image_flip, config=config)
images = data_dict["images"]
depth_maps = data_dict["depth_maps"]
gt_boxes = data_dict['gt_boxes']
gt_boxes2d = data_dict["gt_boxes2d"]
calib = data_dict["calib"]
for cur_axis in config['ALONG_AXIS_LIST']:
assert cur_axis in ['horizontal']
images, depth_maps, gt_boxes = getattr(augmentor_utils, 'random_image_flip_%s' % cur_axis)(
images, depth_maps, gt_boxes, calib,
)
data_dict['images'] = images
data_dict['depth_maps'] = depth_maps
data_dict['gt_boxes'] = gt_boxes
return data_dict
def random_world_translation(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_translation, config=config)
noise_translate_std = config['NOISE_TRANSLATE_STD']
if noise_translate_std == 0:
return data_dict
gt_boxes, points = data_dict['gt_boxes'], data_dict['points']
for cur_axis in config['ALONG_AXIS_LIST']:
assert cur_axis in ['x', 'y', 'z']
gt_boxes, points = getattr(augmentor_utils, 'random_translation_along_%s' % cur_axis)(
gt_boxes, points, noise_translate_std,
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def random_local_translation(self, data_dict=None, config=None):
"""
Please check the correctness of it before using.
"""
if data_dict is None:
return partial(self.random_local_translation, config=config)
offset_range = config['LOCAL_TRANSLATION_RANGE']
gt_boxes, points = data_dict['gt_boxes'], data_dict['points']
for cur_axis in config['ALONG_AXIS_LIST']:
assert cur_axis in ['x', 'y', 'z']
gt_boxes, points = getattr(augmentor_utils, 'random_local_translation_along_%s' % cur_axis)(
gt_boxes, points, offset_range,
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def random_local_rotation(self, data_dict=None, config=None):
"""
Please check the correctness of it before using.
"""
if data_dict is None:
return partial(self.random_local_rotation, config=config)
rot_range = config['LOCAL_ROT_ANGLE']
if not isinstance(rot_range, list):
rot_range = [-rot_range, rot_range]
gt_boxes, points = augmentor_utils.local_rotation(
data_dict['gt_boxes'], data_dict['points'], rot_range=rot_range
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def random_local_scaling(self, data_dict=None, config=None):
"""
Please check the correctness of it before using.
"""
if data_dict is None:
return partial(self.random_local_scaling, config=config)
gt_boxes, points = augmentor_utils.local_scaling(
data_dict['gt_boxes'], data_dict['points'], config['LOCAL_SCALE_RANGE']
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def random_world_frustum_dropout(self, data_dict=None, config=None):
"""
Please check the correctness of it before using.
"""
if data_dict is None:
return partial(self.random_world_frustum_dropout, config=config)
intensity_range = config['INTENSITY_RANGE']
gt_boxes, points = data_dict['gt_boxes'], data_dict['points']
for direction in config['DIRECTION']:
assert direction in ['top', 'bottom', 'left', 'right']
gt_boxes, points = getattr(augmentor_utils, 'global_frustum_dropout_%s' % direction)(
gt_boxes, points, intensity_range,
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def random_local_frustum_dropout(self, data_dict=None, config=None):
"""
Please check the correctness of it before using.
"""
if data_dict is None:
return partial(self.random_local_frustum_dropout, config=config)
intensity_range = config['INTENSITY_RANGE']
gt_boxes, points = data_dict['gt_boxes'], data_dict['points']
for direction in config['DIRECTION']:
assert direction in ['top', 'bottom', 'left', 'right']
gt_boxes, points = getattr(augmentor_utils, 'local_frustum_dropout_%s' % direction)(
gt_boxes, points, intensity_range,
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def random_local_pyramid_aug(self, data_dict=None, config=None):
"""
Refer to the paper:
SE-SSD: Self-Ensembling Single-Stage Object Detector From Point Cloud
"""
if data_dict is None:
return partial(self.random_local_pyramid_aug, config=config)
gt_boxes, points = data_dict['gt_boxes'], data_dict['points']
gt_boxes, points, pyramids = augmentor_utils.local_pyramid_dropout(gt_boxes, points, config['DROP_PROB'])
gt_boxes, points, pyramids = augmentor_utils.local_pyramid_sparsify(gt_boxes, points,
config['SPARSIFY_PROB'],
config['SPARSIFY_MAX_NUM'],
pyramids)
gt_boxes, points = augmentor_utils.local_pyramid_swap(gt_boxes, points,
config['SWAP_PROB'],
config['SWAP_MAX_NUM'],
pyramids)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def forward(self, data_dict):
"""
Args:
data_dict:
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7) [x, y, z, dx, dy, dz, heading]
gt_names: optional, (N), string
...
Returns:
"""
for cur_augmentor in self.data_augmentor_queue:
data_dict = cur_augmentor(data_dict=data_dict)
data_dict['gt_boxes'][:, 6] = common_utils.limit_period(
data_dict['gt_boxes'][:, 6], offset=0.5, period=2 * np.pi
)
if 'calib' in data_dict:
data_dict.pop('calib')
if 'road_plane' in data_dict:
data_dict.pop('road_plane')
if 'gt_boxes_mask' in data_dict:
gt_boxes_mask = data_dict['gt_boxes_mask']
data_dict['gt_boxes'] = data_dict['gt_boxes'][gt_boxes_mask]
data_dict['gt_names'] = data_dict['gt_names'][gt_boxes_mask]
if 'gt_boxes2d' in data_dict:
data_dict['gt_boxes2d'] = data_dict['gt_boxes2d'][gt_boxes_mask]
data_dict.pop('gt_boxes_mask')
return data_dict
def re_prepare(self, augmentor_configs=None, intensity=None, aug_times=1):
self.data_augmentor_queue = []
if augmentor_configs is None:
augmentor_configs = self.augmentor_configs
aug_config_list = augmentor_configs if isinstance(augmentor_configs, list) \
else augmentor_configs.AUG_CONFIG_LIST
for cur_cfg in aug_config_list:
if not isinstance(augmentor_configs, list):
if cur_cfg.NAME in augmentor_configs.DISABLE_AUG_LIST:
continue
# scale data augmentation intensity
if intensity is not None:
if cur_cfg.NAME == 'normalize_object_size':
#rate = np.power(0.5, aug_times)
cur_cfg = self.adjust_augment_intensity_SN(cur_cfg, 0.25)
print ("***********cur_cfg:", aug_times)
print ("***********cur_cfg:", cur_cfg)
else:
cur_cfg = self.adjust_augment_intensity(cur_cfg, intensity)
cur_augmentor = getattr(self, cur_cfg.NAME)(config=cur_cfg)
self.data_augmentor_queue.append(cur_augmentor)
def adjust_augment_intensity_SN(self, config, rate):
adjust_map = {
'normalize_object_size': 'SIZE_RES',
}
def cal_new_intensity(config):
origin_intensity_list = config.get(adjust_map[config.NAME])
assert len(origin_intensity_list) == 3
new_intensity_list = [x*rate for x in origin_intensity_list]
return new_intensity_list
if config.NAME not in adjust_map:
return config
# for data augmentations that init with 1
#print ("***********config.NAME**************", config.get(adjust_map[config.NAME]))
if config.NAME in ['normalize_object_size']:
new_intensity_list = cal_new_intensity(config)
setattr(config, adjust_map[config.NAME], new_intensity_list)
return config
else:
raise NotImplementedError
def adjust_augment_intensity(self, config, intensity):
adjust_map = {
#'normalize_object_size': 'SIZE_RES',
'random_object_scaling': 'SCALE_UNIFORM_NOISE',
'random_object_rotation': 'ROT_UNIFORM_NOISE',
'random_world_rotation': 'WORLD_ROT_ANGLE',
'random_world_scaling': 'WORLD_SCALE_RANGE',
}
def cal_new_intensity(config, flag):
origin_intensity_list = config.get(adjust_map[config.NAME])
assert len(origin_intensity_list) == 2
assert np.isclose(flag - origin_intensity_list[0], origin_intensity_list[1] - flag)
noise = origin_intensity_list[1] - flag
new_noise = noise * intensity
new_intensity_list = [flag - new_noise, new_noise + flag]
return new_intensity_list
if config.NAME not in adjust_map:
return config
# for data augmentations that init with 1
if config.NAME in ['random_object_scaling', 'random_world_scaling']:
new_intensity_list = cal_new_intensity(config, flag=1)
setattr(config, adjust_map[config.NAME], new_intensity_list)
return config
elif config.NAME in ['random_object_rotation', 'random_world_rotation']:
new_intensity_list = cal_new_intensity(config, flag=0)
setattr(config, adjust_map[config.NAME], new_intensity_list)
return config
# modified
# elif config.NAME in ['normalize_object_size']:
# new_intensity_list = cal_new_intensity(config, flag=0)
# setattr(config, adjust_map[config.NAME], new_intensity_list)
# return config
else:
raise NotImplementedError
| 16,561
| 39.692875
| 113
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/augmentor/database_sampler.py
|
import pickle
import os
import copy
import numpy as np
import SharedArray
import torch.distributed as dist
from ...ops.iou3d_nms import iou3d_nms_utils
from ...utils import box_utils, common_utils
import os
import io
class DataBaseSampler(object):
def __init__(self, root_path, sampler_cfg, class_names, logger=None, client=None, oss_flag=False):
self.root_path = root_path
self.class_names = class_names
self.sampler_cfg = sampler_cfg
self.logger = logger
self.client = client
self.oss_flag = oss_flag
self.db_infos = {}
for class_name in class_names:
self.db_infos[class_name] = []
self.use_shared_memory = sampler_cfg.get('USE_SHARED_MEMORY', False)
self.logger.info(f"*************root_path***********: {root_path}")
if self.oss_flag:
from petrel_client.client import Client
# ~/.petreloss.conf: save the KEY/ACCESS_KEY of S3 Ceph
self.client = Client('~/.petreloss.conf')
for db_info_path in sampler_cfg.DB_INFO_PATH:
if not self.oss_flag:
db_info_path = self.root_path.resolve() / db_info_path
self.logger.info(f"*************Load LINUX db_info_path*************: {db_info_path}")
with open(str(db_info_path), 'rb') as f:
infos = pickle.load(f)
[self.db_infos[cur_class].extend(infos[cur_class]) for cur_class in class_names]
else:
db_info_path = os.path.join(self.root_path, db_info_path)
self.logger.info(f"*************Load OSS db_info_path*************: {db_info_path}")
# pkl_bytes = self.client.get(db_info_path)
pkl_bytes = self.client.get(db_info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
[self.db_infos[cur_class].extend(infos[cur_class]) for cur_class in class_names]
for func_name, val in sampler_cfg.PREPARE.items():
self.db_infos = getattr(self, func_name)(self.db_infos, val)
self.gt_database_data_key = self.load_db_to_shared_memory() if self.use_shared_memory else None
self.sample_groups = {}
self.sample_class_num = {}
self.limit_whole_scene = sampler_cfg.get('LIMIT_WHOLE_SCENE', False)
for x in sampler_cfg.SAMPLE_GROUPS:
class_name, sample_num = x.split(':')
if class_name not in class_names:
continue
self.sample_class_num[class_name] = sample_num
self.sample_groups[class_name] = {
'sample_num': sample_num,
'pointer': len(self.db_infos[class_name]),
'indices': np.arange(len(self.db_infos[class_name]))
}
def __getstate__(self):
d = dict(self.__dict__)
del d['logger']
return d
def __setstate__(self, d):
self.__dict__.update(d)
def __del__(self):
if self.use_shared_memory:
self.logger.info('Deleting GT database from shared memory')
cur_rank, num_gpus = common_utils.get_dist_info()
sa_key = self.sampler_cfg.DB_DATA_PATH[0]
if cur_rank % num_gpus == 0 and os.path.exists(f"/dev/shm/{sa_key}"):
SharedArray.delete(f"shm://{sa_key}")
if num_gpus > 1:
dist.barrier()
self.logger.info('GT database has been removed from shared memory')
def load_db_to_shared_memory(self):
self.logger.info('Loading GT database to shared memory')
cur_rank, world_size, num_gpus = common_utils.get_dist_info(return_gpu_per_machine=True)
assert self.sampler_cfg.DB_DATA_PATH.__len__() == 1, 'Current only support single DB_DATA'
db_data_path = self.root_path.resolve() / self.sampler_cfg.DB_DATA_PATH[0]
sa_key = self.sampler_cfg.DB_DATA_PATH[0]
if cur_rank % num_gpus == 0 and not os.path.exists(f"/dev/shm/{sa_key}"):
gt_database_data = np.load(db_data_path)
common_utils.sa_create(f"shm://{sa_key}", gt_database_data)
if num_gpus > 1:
dist.barrier()
self.logger.info('GT database has been saved to shared memory')
return sa_key
def filter_by_difficulty(self, db_infos, removed_difficulty):
new_db_infos = {}
for key, dinfos in db_infos.items():
pre_len = len(dinfos)
new_db_infos[key] = [
info for info in dinfos
if info['difficulty'] not in removed_difficulty
]
if self.logger is not None:
self.logger.info('Database filter by difficulty %s: %d => %d' % (key, pre_len, len(new_db_infos[key])))
return new_db_infos
def filter_by_min_points(self, db_infos, min_gt_points_list):
for name_num in min_gt_points_list:
name, min_num = name_num.split(':')
min_num = int(min_num)
if min_num > 0 and name in db_infos.keys():
filtered_infos = []
for info in db_infos[name]:
if info['num_points_in_gt'] >= min_num:
filtered_infos.append(info)
if self.logger is not None:
self.logger.info('Database filter by min points %s: %d => %d' %
(name, len(db_infos[name]), len(filtered_infos)))
db_infos[name] = filtered_infos
return db_infos
def sample_with_fixed_number(self, class_name, sample_group):
"""
Args:
class_name:
sample_group:
Returns:
"""
sample_num, pointer, indices = int(sample_group['sample_num']), sample_group['pointer'], sample_group['indices']
if pointer >= len(self.db_infos[class_name]):
indices = np.random.permutation(len(self.db_infos[class_name]))
pointer = 0
sampled_dict = [self.db_infos[class_name][idx] for idx in indices[pointer: pointer + sample_num]]
pointer += sample_num
sample_group['pointer'] = pointer
sample_group['indices'] = indices
return sampled_dict
@staticmethod
def put_boxes_on_road_planes(gt_boxes, road_planes, calib):
"""
Only validate in KITTIDataset
Args:
gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
road_planes: [a, b, c, d]
calib:
Returns:
"""
a, b, c, d = road_planes
center_cam = calib.lidar_to_rect(gt_boxes[:, 0:3])
cur_height_cam = (-d - a * center_cam[:, 0] - c * center_cam[:, 2]) / b
center_cam[:, 1] = cur_height_cam
cur_lidar_height = calib.rect_to_lidar(center_cam)[:, 2]
mv_height = gt_boxes[:, 2] - gt_boxes[:, 5] / 2 - cur_lidar_height
gt_boxes[:, 2] -= mv_height # lidar view
return gt_boxes, mv_height
def add_sampled_boxes_to_scene(self, data_dict, sampled_gt_boxes, total_valid_sampled_dict):
gt_boxes_mask = data_dict['gt_boxes_mask']
gt_boxes = data_dict['gt_boxes'][gt_boxes_mask]
gt_names = data_dict['gt_names'][gt_boxes_mask]
points = data_dict['points']
if self.sampler_cfg.get('USE_ROAD_PLANE', False):
sampled_gt_boxes, mv_height = self.put_boxes_on_road_planes(
sampled_gt_boxes, data_dict['road_plane'], data_dict['calib']
)
data_dict.pop('calib')
data_dict.pop('road_plane')
obj_points_list = []
if self.use_shared_memory:
gt_database_data = SharedArray.attach(f"shm://{self.gt_database_data_key}")
gt_database_data.setflags(write=0)
else:
gt_database_data = None
for idx, info in enumerate(total_valid_sampled_dict):
if self.use_shared_memory:
start_offset, end_offset = info['global_data_offset']
obj_points = copy.deepcopy(gt_database_data[start_offset:end_offset])
else:
#file_path = self.root_path / info['path']
file_path = os.path.join(self.root_path, info['path'])
if self.oss_flag:
#print ("*************file_path*************:", file_path)
#sdk_local_bytes = self.client.get(file_path)
sdk_local_bytes = self.client.get(file_path, update_cache=True)
obj_points = np.frombuffer(sdk_local_bytes, dtype=np.float32).reshape(
[-1, self.sampler_cfg.NUM_POINT_FEATURES]).copy()
else:
obj_points = np.fromfile(str(file_path), dtype=np.float32).reshape(
[-1, self.sampler_cfg.NUM_POINT_FEATURES])
obj_points[:, :3] += info['box3d_lidar'][:3]
if self.sampler_cfg.get('USE_ROAD_PLANE', False):
# mv height
obj_points[:, 2] -= mv_height[idx]
obj_points_list.append(obj_points)
obj_points = np.concatenate(obj_points_list, axis=0)
sampled_gt_names = np.array([x['name'] for x in total_valid_sampled_dict])
large_sampled_gt_boxes = box_utils.enlarge_box3d(
sampled_gt_boxes[:, 0:7], extra_width=self.sampler_cfg.REMOVE_EXTRA_WIDTH
)
points = box_utils.remove_points_in_boxes3d(points, large_sampled_gt_boxes)
points = np.concatenate([obj_points, points], axis=0)
gt_names = np.concatenate([gt_names, sampled_gt_names], axis=0)
gt_boxes = np.concatenate([gt_boxes, sampled_gt_boxes], axis=0)
data_dict['gt_boxes'] = gt_boxes
data_dict['gt_names'] = gt_names
data_dict['points'] = points
return data_dict
def __call__(self, data_dict):
"""
Args:
data_dict:
gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
Returns:
"""
gt_boxes = data_dict['gt_boxes']
gt_names = data_dict['gt_names'].astype(str)
existed_boxes = gt_boxes
total_valid_sampled_dict = []
for class_name, sample_group in self.sample_groups.items():
if self.limit_whole_scene:
num_gt = np.sum(class_name == gt_names)
sample_group['sample_num'] = str(int(self.sample_class_num[class_name]) - num_gt)
if int(sample_group['sample_num']) > 0:
sampled_dict = self.sample_with_fixed_number(class_name, sample_group)
sampled_boxes = np.stack([x['box3d_lidar'] for x in sampled_dict], axis=0).astype(np.float32)
if self.sampler_cfg.get('DATABASE_WITH_FAKELIDAR', False):
sampled_boxes = box_utils.boxes3d_kitti_fakelidar_to_lidar(sampled_boxes)
iou1 = iou3d_nms_utils.boxes_bev_iou_cpu(sampled_boxes[:, 0:7], existed_boxes[:, 0:7])
iou2 = iou3d_nms_utils.boxes_bev_iou_cpu(sampled_boxes[:, 0:7], sampled_boxes[:, 0:7])
iou2[range(sampled_boxes.shape[0]), range(sampled_boxes.shape[0])] = 0
iou1 = iou1 if iou1.shape[1] > 0 else iou2
valid_mask = ((iou1.max(axis=1) + iou2.max(axis=1)) == 0).nonzero()[0]
valid_sampled_dict = [sampled_dict[x] for x in valid_mask]
valid_sampled_boxes = sampled_boxes[valid_mask]
existed_boxes = np.concatenate((existed_boxes, valid_sampled_boxes), axis=0)
total_valid_sampled_dict.extend(valid_sampled_dict)
sampled_gt_boxes = existed_boxes[gt_boxes.shape[0]:, :]
if total_valid_sampled_dict.__len__() > 0:
data_dict = self.add_sampled_boxes_to_scene(data_dict, sampled_gt_boxes, total_valid_sampled_dict)
data_dict.pop('gt_boxes_mask')
return data_dict
| 11,921
| 42.510949
| 120
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/nuscenes/nuscenes_utils.py
|
"""
The NuScenes data pre-processing and evaluation is modified from
https://github.com/traveller59/second.pytorch and https://github.com/poodarchu/Det3D
"""
import operator
from functools import reduce
from pathlib import Path
import numpy as np
import tqdm
from nuscenes.utils.data_classes import Box
from nuscenes.utils.geometry_utils import transform_matrix
from pyquaternion import Quaternion
map_name_from_general_to_detection = {
'human.pedestrian.adult': 'pedestrian',
'human.pedestrian.child': 'pedestrian',
'human.pedestrian.wheelchair': 'ignore',
'human.pedestrian.stroller': 'ignore',
'human.pedestrian.personal_mobility': 'ignore',
'human.pedestrian.police_officer': 'pedestrian',
'human.pedestrian.construction_worker': 'pedestrian',
'animal': 'ignore',
'vehicle.car': 'car',
'vehicle.motorcycle': 'motorcycle',
'vehicle.bicycle': 'bicycle',
'vehicle.bus.bendy': 'bus',
'vehicle.bus.rigid': 'bus',
'vehicle.truck': 'truck',
'vehicle.construction': 'construction_vehicle',
'vehicle.emergency.ambulance': 'ignore',
'vehicle.emergency.police': 'ignore',
'vehicle.trailer': 'trailer',
'movable_object.barrier': 'barrier',
'movable_object.trafficcone': 'traffic_cone',
'movable_object.pushable_pullable': 'ignore',
'movable_object.debris': 'ignore',
'static_object.bicycle_rack': 'ignore',
}
map_name_from_general_to_detection_mdf = {
'human.pedestrian.adult': 'Pedestrian',
'human.pedestrian.child': 'Pedestrian',
'human.pedestrian.wheelchair': 'ignore',
'human.pedestrian.stroller': 'ignore',
'human.pedestrian.personal_mobility': 'ignore',
'human.pedestrian.police_officer': 'Pedestrian',
'human.pedestrian.construction_worker': 'Pedestrian',
'animal': 'ignore',
'vehicle.car': 'Vehicle',
'vehicle.motorcycle': 'Cyclist',
'vehicle.bicycle': 'Cyclist',
'vehicle.bus.bendy': 'Vehicle',
'vehicle.bus.rigid': 'Vehicle',
'vehicle.truck': 'Vehicle',
'vehicle.construction': 'Vehicle',
'vehicle.emergency.ambulance': 'ignore',
'vehicle.emergency.police': 'ignore',
'vehicle.trailer': 'Vehicle',
'movable_object.barrier': 'ignore',
'movable_object.trafficcone': 'ignore',
'movable_object.pushable_pullable': 'ignore',
'movable_object.debris': 'ignore',
'static_object.bicycle_rack': 'ignore',
}
cls_attr_dist = {
'barrier': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 0,
'vehicle.parked': 0,
'vehicle.stopped': 0,
},
'bicycle': {
'cycle.with_rider': 2791,
'cycle.without_rider': 8946,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 0,
'vehicle.parked': 0,
'vehicle.stopped': 0,
},
'bus': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 9092,
'vehicle.parked': 3294,
'vehicle.stopped': 3881,
},
'car': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 114304,
'vehicle.parked': 330133,
'vehicle.stopped': 46898,
},
'construction_vehicle': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 882,
'vehicle.parked': 11549,
'vehicle.stopped': 2102,
},
'ignore': {
'cycle.with_rider': 307,
'cycle.without_rider': 73,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 165,
'vehicle.parked': 400,
'vehicle.stopped': 102,
},
'motorcycle': {
'cycle.with_rider': 4233,
'cycle.without_rider': 8326,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 0,
'vehicle.parked': 0,
'vehicle.stopped': 0,
},
'pedestrian': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 157444,
'pedestrian.sitting_lying_down': 13939,
'pedestrian.standing': 46530,
'vehicle.moving': 0,
'vehicle.parked': 0,
'vehicle.stopped': 0,
},
'traffic_cone': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 0,
'vehicle.parked': 0,
'vehicle.stopped': 0,
},
'trailer': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 3421,
'vehicle.parked': 19224,
'vehicle.stopped': 1895,
},
'truck': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 21339,
'vehicle.parked': 55626,
'vehicle.stopped': 11097,
},
}
def get_available_scenes(nusc):
available_scenes = []
print('total scene num:', len(nusc.scene))
for scene in nusc.scene:
scene_token = scene['token']
scene_rec = nusc.get('scene', scene_token)
sample_rec = nusc.get('sample', scene_rec['first_sample_token'])
sd_rec = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP'])
has_more_frames = True
scene_not_exist = False
while has_more_frames:
lidar_path, boxes, _ = nusc.get_sample_data(sd_rec['token'])
if not Path(lidar_path).exists():
scene_not_exist = True
break
else:
break
# if not sd_rec['next'] == '':
# sd_rec = nusc.get('sample_data', sd_rec['next'])
# else:
# has_more_frames = False
if scene_not_exist:
continue
available_scenes.append(scene)
print('exist scene num:', len(available_scenes))
return available_scenes
def get_sample_data(nusc, sample_data_token, selected_anntokens=None):
"""
Returns the data path as well as all annotations related to that sample_data.
Note that the boxes are transformed into the current sensor's coordinate frame.
Args:
nusc:
sample_data_token: Sample_data token.
selected_anntokens: If provided only return the selected annotation.
Returns:
"""
# Retrieve sensor & pose records
sd_record = nusc.get('sample_data', sample_data_token)
cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token'])
sensor_record = nusc.get('sensor', cs_record['sensor_token'])
pose_record = nusc.get('ego_pose', sd_record['ego_pose_token'])
data_path = nusc.get_sample_data_path(sample_data_token)
if sensor_record['modality'] == 'camera':
cam_intrinsic = np.array(cs_record['camera_intrinsic'])
imsize = (sd_record['width'], sd_record['height'])
else:
cam_intrinsic = imsize = None
# Retrieve all sample annotations and map to sensor coordinate system.
if selected_anntokens is not None:
boxes = list(map(nusc.get_box, selected_anntokens))
else:
boxes = nusc.get_boxes(sample_data_token)
# Make list of Box objects including coord system transforms.
box_list = []
for box in boxes:
box.velocity = nusc.box_velocity(box.token)
# Move box to ego vehicle coord system
box.translate(-np.array(pose_record['translation']))
box.rotate(Quaternion(pose_record['rotation']).inverse)
# Move box to sensor coord system
box.translate(-np.array(cs_record['translation']))
box.rotate(Quaternion(cs_record['rotation']).inverse)
box_list.append(box)
return data_path, box_list, cam_intrinsic
def quaternion_yaw(q: Quaternion) -> float:
"""
Calculate the yaw angle from a quaternion.
Note that this only works for a quaternion that represents a box in lidar or global coordinate frame.
It does not work for a box in the camera frame.
:param q: Quaternion of interest.
:return: Yaw angle in radians.
"""
# Project into xy plane.
v = np.dot(q.rotation_matrix, np.array([1, 0, 0]))
# Measure yaw using arctan.
yaw = np.arctan2(v[1], v[0])
return yaw
def fill_trainval_infos(data_path, nusc, train_scenes, val_scenes, test=False, max_sweeps=10):
train_nusc_infos = []
val_nusc_infos = []
progress_bar = tqdm.tqdm(total=len(nusc.sample), desc='create_info', dynamic_ncols=True)
ref_chan = 'LIDAR_TOP' # The radar channel from which we track back n sweeps to aggregate the point cloud.
chan = 'LIDAR_TOP' # The reference channel of the current sample_rec that the point clouds are mapped to.
for index, sample in enumerate(nusc.sample):
progress_bar.update()
ref_sd_token = sample['data'][ref_chan]
ref_sd_rec = nusc.get('sample_data', ref_sd_token)
ref_cs_rec = nusc.get('calibrated_sensor', ref_sd_rec['calibrated_sensor_token'])
ref_pose_rec = nusc.get('ego_pose', ref_sd_rec['ego_pose_token'])
ref_time = 1e-6 * ref_sd_rec['timestamp']
ref_lidar_path, ref_boxes, _ = get_sample_data(nusc, ref_sd_token)
ref_cam_front_token = sample['data']['CAM_FRONT']
ref_cam_path, _, ref_cam_intrinsic = nusc.get_sample_data(ref_cam_front_token)
# Homogeneous transform from ego car frame to reference frame
ref_from_car = transform_matrix(
ref_cs_rec['translation'], Quaternion(ref_cs_rec['rotation']), inverse=True
)
# Homogeneous transformation matrix from global to _current_ ego car frame
car_from_global = transform_matrix(
ref_pose_rec['translation'], Quaternion(ref_pose_rec['rotation']), inverse=True,
)
info = {
'lidar_path': Path(ref_lidar_path).relative_to(data_path).__str__(),
'cam_front_path': Path(ref_cam_path).relative_to(data_path).__str__(),
'cam_intrinsic': ref_cam_intrinsic,
'token': sample['token'],
'sweeps': [],
'ref_from_car': ref_from_car,
'car_from_global': car_from_global,
'timestamp': ref_time,
}
sample_data_token = sample['data'][chan]
curr_sd_rec = nusc.get('sample_data', sample_data_token)
sweeps = []
while len(sweeps) < max_sweeps - 1:
if curr_sd_rec['prev'] == '':
if len(sweeps) == 0:
sweep = {
'lidar_path': Path(ref_lidar_path).relative_to(data_path).__str__(),
'sample_data_token': curr_sd_rec['token'],
'transform_matrix': None,
'time_lag': curr_sd_rec['timestamp'] * 0,
}
sweeps.append(sweep)
else:
sweeps.append(sweeps[-1])
else:
curr_sd_rec = nusc.get('sample_data', curr_sd_rec['prev'])
# Get past pose
current_pose_rec = nusc.get('ego_pose', curr_sd_rec['ego_pose_token'])
global_from_car = transform_matrix(
current_pose_rec['translation'], Quaternion(current_pose_rec['rotation']), inverse=False,
)
# Homogeneous transformation matrix from sensor coordinate frame to ego car frame.
current_cs_rec = nusc.get(
'calibrated_sensor', curr_sd_rec['calibrated_sensor_token']
)
car_from_current = transform_matrix(
current_cs_rec['translation'], Quaternion(current_cs_rec['rotation']), inverse=False,
)
tm = reduce(np.dot, [ref_from_car, car_from_global, global_from_car, car_from_current])
lidar_path = nusc.get_sample_data_path(curr_sd_rec['token'])
time_lag = ref_time - 1e-6 * curr_sd_rec['timestamp']
sweep = {
'lidar_path': Path(lidar_path).relative_to(data_path).__str__(),
'sample_data_token': curr_sd_rec['token'],
'transform_matrix': tm,
'global_from_car': global_from_car,
'car_from_current': car_from_current,
'time_lag': time_lag,
}
sweeps.append(sweep)
info['sweeps'] = sweeps
assert len(info['sweeps']) == max_sweeps - 1, \
f"sweep {curr_sd_rec['token']} only has {len(info['sweeps'])} sweeps, " \
f"you should duplicate to sweep num {max_sweeps - 1}"
if not test:
annotations = [nusc.get('sample_annotation', token) for token in sample['anns']]
# the filtering gives 0.5~1 map improvement
num_lidar_pts = np.array([anno['num_lidar_pts'] for anno in annotations])
num_radar_pts = np.array([anno['num_radar_pts'] for anno in annotations])
mask = (num_lidar_pts + num_radar_pts > 0)
locs = np.array([b.center for b in ref_boxes]).reshape(-1, 3)
dims = np.array([b.wlh for b in ref_boxes]).reshape(-1, 3)[:, [1, 0, 2]] # wlh == > dxdydz (lwh)
velocity = np.array([b.velocity for b in ref_boxes]).reshape(-1, 3)
rots = np.array([quaternion_yaw(b.orientation) for b in ref_boxes]).reshape(-1, 1)
names = np.array([b.name for b in ref_boxes])
tokens = np.array([b.token for b in ref_boxes])
gt_boxes = np.concatenate([locs, dims, rots, velocity[:, :2]], axis=1)
assert len(annotations) == len(gt_boxes) == len(velocity)
info['gt_boxes'] = gt_boxes[mask, :]
info['gt_boxes_velocity'] = velocity[mask, :]
info['gt_names'] = np.array([map_name_from_general_to_detection[name] for name in names])[mask]
info['gt_boxes_token'] = tokens[mask]
info['num_lidar_pts'] = num_lidar_pts[mask]
info['num_radar_pts'] = num_radar_pts[mask]
if sample['scene_token'] in train_scenes:
train_nusc_infos.append(info)
else:
val_nusc_infos.append(info)
progress_bar.close()
return train_nusc_infos, val_nusc_infos
def boxes_lidar_to_nusenes(det_info):
boxes3d = det_info['boxes_lidar']
scores = det_info['score']
labels = det_info['pred_labels']
box_list = []
for k in range(boxes3d.shape[0]):
quat = Quaternion(axis=[0, 0, 1], radians=boxes3d[k, 6])
velocity = (*boxes3d[k, 7:9], 0.0) if boxes3d.shape[1] == 9 else (0.0, 0.0, 0.0)
box = Box(
boxes3d[k, :3],
boxes3d[k, [4, 3, 5]], # wlh
quat, label=labels[k], score=scores[k], velocity=velocity,
)
box_list.append(box)
return box_list
def lidar_nusc_box_to_global(nusc, boxes, sample_token):
s_record = nusc.get('sample', sample_token)
sample_data_token = s_record['data']['LIDAR_TOP']
sd_record = nusc.get('sample_data', sample_data_token)
cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token'])
sensor_record = nusc.get('sensor', cs_record['sensor_token'])
pose_record = nusc.get('ego_pose', sd_record['ego_pose_token'])
data_path = nusc.get_sample_data_path(sample_data_token)
box_list = []
for box in boxes:
# Move box to ego vehicle coord system
box.rotate(Quaternion(cs_record['rotation']))
box.translate(np.array(cs_record['translation']))
# Move box to global coord system
box.rotate(Quaternion(pose_record['rotation']))
box.translate(np.array(pose_record['translation']))
box_list.append(box)
return box_list
def transform_det_annos_to_nusc_annos(det_annos, nusc):
nusc_annos = {
'results': {},
'meta': None,
}
for det in det_annos:
annos = []
box_list = boxes_lidar_to_nusenes(det)
box_list = lidar_nusc_box_to_global(
nusc=nusc, boxes=box_list, sample_token=det['metadata']['token']
)
for k, box in enumerate(box_list):
name = det['name'][k]
if np.sqrt(box.velocity[0] ** 2 + box.velocity[1] ** 2) > 0.2:
if name in ['car', 'construction_vehicle', 'bus', 'truck', 'trailer']:
attr = 'vehicle.moving'
elif name in ['bicycle', 'motorcycle']:
attr = 'cycle.with_rider'
else:
attr = None
else:
if name in ['pedestrian']:
attr = 'pedestrian.standing'
elif name in ['bus']:
attr = 'vehicle.stopped'
else:
attr = None
attr = attr if attr is not None else max(
cls_attr_dist[name].items(), key=operator.itemgetter(1))[0]
nusc_anno = {
'sample_token': det['metadata']['token'],
'translation': box.center.tolist(),
'size': box.wlh.tolist(),
'rotation': box.orientation.elements.tolist(),
'velocity': box.velocity[:2].tolist(),
'detection_name': name,
'detection_score': box.score,
'attribute_name': attr
}
annos.append(nusc_anno)
nusc_annos['results'].update({det["metadata"]["token"]: annos})
return nusc_annos
def format_nuscene_results(metrics, class_names, version='default'):
result = '----------------Nuscene %s results-----------------\n' % version
for name in class_names:
threshs = ', '.join(list(metrics['label_aps'][name].keys()))
ap_list = list(metrics['label_aps'][name].values())
err_name =', '.join([x.split('_')[0] for x in list(metrics['label_tp_errors'][name].keys())])
error_list = list(metrics['label_tp_errors'][name].values())
result += f'***{name} error@{err_name} | AP@{threshs}\n'
result += ', '.join(['%.2f' % x for x in error_list]) + ' | '
result += ', '.join(['%.2f' % (x * 100) for x in ap_list])
result += f" | mean AP: {metrics['mean_dist_aps'][name]}"
result += '\n'
result += '--------------average performance-------------\n'
details = {}
for key, val in metrics['tp_errors'].items():
result += '%s:\t %.4f\n' % (key, val)
details[key] = val
result += 'mAP:\t %.4f\n' % metrics['mean_ap']
result += 'NDS:\t %.4f\n' % metrics['nd_score']
details.update({
'mAP': metrics['mean_ap'],
'NDS': metrics['nd_score'],
})
return result, details
| 19,464
| 36.005703
| 111
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/nuscenes/nuscenes_dataset.py
|
import copy
import pickle
from pathlib import Path
import os
import io
import numpy as np
from tqdm import tqdm
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import common_utils
from ..dataset import DatasetTemplate
class NuScenesDataset(DatasetTemplate):
"""Petrel Ceph storage backend.
3DTrans supports the reading and writing data from Ceph
Usage:
self.oss_path = 's3://path/of/nuScenes'
'~/.petreloss.conf': A config file of Ceph, saving the KEY/ACCESS_KEY of S3 Ceph
"""
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
root_path = (root_path if root_path is not None else Path(dataset_cfg.DATA_PATH)) / dataset_cfg.VERSION
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
if self.oss_path is not None:
from petrel_client.client import Client
self.client = Client('~/.petreloss.conf')
# self.oss_data_list = self.list_oss_dir(self.oss_path, with_info=False)
# zhangbo: for OSS format, list the nuScenes dataset will cause a Bug,
# due to OSS cannot load too many objects
self.infos = []
self.include_nuscenes_data(self.mode)
if self.training and self.dataset_cfg.get('BALANCED_RESAMPLING', False):
self.infos = self.balanced_infos_resampling(self.infos)
def include_nuscenes_data(self, mode):
self.logger.info('Loading NuScenes dataset')
nuscenes_infos = []
for info_path in self.dataset_cfg.INFO_PATH[mode]:
if self.oss_path is None:
info_path = self.root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
nuscenes_infos.extend(infos)
else:
info_path = os.path.join(self.oss_path, info_path)
#pkl_bytes = self.client.get(info_path)
pkl_bytes = self.client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
nuscenes_infos.extend(infos)
self.infos.extend(nuscenes_infos)
self.logger.info('Total samples for NuScenes dataset: %d' % (len(nuscenes_infos)))
def balanced_infos_resampling(self, infos):
"""
Class-balanced sampling of nuScenes dataset from https://arxiv.org/abs/1908.09492
"""
if self.class_names is None:
return infos
cls_infos = {name: [] for name in self.class_names}
for info in infos:
for name in set(info['gt_names']):
if name in self.class_names:
cls_infos[name].append(info)
duplicated_samples = sum([len(v) for _, v in cls_infos.items()])
cls_dist = {k: len(v) / duplicated_samples for k, v in cls_infos.items()}
sampled_infos = []
frac = 1.0 / len(self.class_names)
ratios = [frac / v for v in cls_dist.values()]
for cur_cls_infos, ratio in zip(list(cls_infos.values()), ratios):
sampled_infos += np.random.choice(
cur_cls_infos, int(len(cur_cls_infos) * ratio)
).tolist()
self.logger.info('Total samples after balanced resampling: %s' % (len(sampled_infos)))
cls_infos_new = {name: [] for name in self.class_names}
for info in sampled_infos:
for name in set(info['gt_names']):
if name in self.class_names:
cls_infos_new[name].append(info)
cls_dist_new = {k: len(v) / len(sampled_infos) for k, v in cls_infos_new.items()}
return sampled_infos
def get_sweep(self, sweep_info):
def remove_ego_points(points, center_radius=1.0):
mask = ~((np.abs(points[:, 0]) < center_radius) & (np.abs(points[:, 1]) < center_radius))
return points[mask]
if self.oss_path is None:
lidar_path = self.root_path / sweep_info['lidar_path']
points_sweep = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])[:, :4]
else:
lidar_path = os.path.join(self.oss_path, sweep_info['lidar_path'])
#sdk_local_bytes = self.client.get(lidar_path)
sdk_local_bytes = self.client.get(lidar_path, update_cache=True)
points_sweep = np.frombuffer(sdk_local_bytes, dtype=np.float32, count=-1).reshape([-1, 5])[:, :4]
points_sweep = remove_ego_points(points_sweep).T
if sweep_info['transform_matrix'] is not None:
num_points = points_sweep.shape[1]
points_sweep[:3, :] = sweep_info['transform_matrix'].dot(
np.vstack((points_sweep[:3, :], np.ones(num_points))))[:3, :]
cur_times = sweep_info['time_lag'] * np.ones((1, points_sweep.shape[1]))
return points_sweep.T, cur_times.T
def get_lidar_with_sweeps(self, index, max_sweeps=1):
info = self.infos[index]
if self.oss_path is None:
lidar_path = self.root_path / info['lidar_path']
points = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])[:, :4]
else:
lidar_path = os.path.join(self.oss_path, info['lidar_path'])
#sdk_local_bytes = self.client.get(lidar_path)
sdk_local_bytes = self.client.get(lidar_path, update_cache=True)
points_pre = np.frombuffer(sdk_local_bytes, dtype=np.float32, count=-1).reshape([-1, 5]).copy()
points = points_pre[:, :4]
sweep_points_list = [points]
sweep_times_list = [np.zeros((points.shape[0], 1))]
for k in np.random.choice(len(info['sweeps']), max_sweeps - 1, replace=False):
points_sweep, times_sweep = self.get_sweep(info['sweeps'][k])
sweep_points_list.append(points_sweep)
sweep_times_list.append(times_sweep)
points = np.concatenate(sweep_points_list, axis=0)
times = np.concatenate(sweep_times_list, axis=0).astype(points.dtype)
points = np.concatenate((points, times), axis=1)
return points
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.infos) * self.total_epochs
return len(self.infos)
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.infos)
info = copy.deepcopy(self.infos[index])
points = self.get_lidar_with_sweeps(index, max_sweeps=self.dataset_cfg.MAX_SWEEPS)
if self.dataset_cfg.get('SHIFT_COOR', None):
points[:, 0:3] += np.array(self.dataset_cfg.SHIFT_COOR, dtype=np.float32)
input_dict = {
'db_flag': "nusc",
'points': points,
'frame_id': Path(info['lidar_path']).stem,
'metadata': {'token': info['token']}
}
if 'gt_boxes' in info:
if self.dataset_cfg.get('FILTER_MIN_POINTS_IN_GT', False):
mask = (info['num_lidar_pts'] > self.dataset_cfg.FILTER_MIN_POINTS_IN_GT - 1)
else:
mask = None
input_dict.update({
'gt_names': info['gt_names'] if mask is None else info['gt_names'][mask],
'gt_boxes': info['gt_boxes'] if mask is None else info['gt_boxes'][mask]
})
if self.dataset_cfg.get('SHIFT_COOR', None):
input_dict['gt_boxes'][:, 0:3] += self.dataset_cfg.SHIFT_COOR
if self.dataset_cfg.get('USE_PSEUDO_LABEL', None) and self.training:
input_dict['gt_boxes'] = None
# for debug only
# gt_boxes_mask = np.array([n in self.class_names for n in input_dict['gt_names']], dtype=np.bool_)
# debug_dict = {'gt_boxes': copy.deepcopy(input_dict['gt_boxes'][gt_boxes_mask])}
if self.dataset_cfg.get('FOV_POINTS_ONLY', None):
input_dict['points'] = self.extract_fov_data(
input_dict['points'], self.dataset_cfg.FOV_DEGREE, self.dataset_cfg.FOV_ANGLE
)
if input_dict['gt_boxes'] is not None:
fov_gt_flag = self.extract_fov_gt(
input_dict['gt_boxes'], self.dataset_cfg.FOV_DEGREE, self.dataset_cfg.FOV_ANGLE
)
input_dict.update({
'gt_names': input_dict['gt_names'][fov_gt_flag],
'gt_boxes': input_dict['gt_boxes'][fov_gt_flag],
})
if self.dataset_cfg.get('USE_PSEUDO_LABEL', None) and self.training:
self.fill_pseudo_labels(input_dict)
data_dict = self.prepare_data(data_dict=input_dict)
if self.dataset_cfg.get('SET_NAN_VELOCITY_TO_ZEROS', False) and not self.dataset_cfg.get('USE_PSEUDO_LABEL', None):
gt_boxes = data_dict['gt_boxes']
gt_boxes[np.isnan(gt_boxes)] = 0
data_dict['gt_boxes'] = gt_boxes
if not self.dataset_cfg.PRED_VELOCITY and 'gt_boxes' in data_dict and not self.dataset_cfg.get('USE_PSEUDO_LABEL', None):
data_dict['gt_boxes'] = data_dict['gt_boxes'][:, [0, 1, 2, 3, 4, 5, 6, -1]]
return data_dict
#@staticmethod
def generate_prediction_dicts(self, batch_dict, pred_dicts, class_names, output_path=None):
"""
Args:
batch_dict:
frame_id:
pred_dicts: list of pred_dicts
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path:
Returns:
"""
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'score': np.zeros(num_samples),
'boxes_lidar': np.zeros([num_samples, 7]), 'pred_labels': np.zeros(num_samples)
}
return ret_dict
def generate_single_sample_dict(box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
if self.dataset_cfg.get('SHIFT_COOR', None):
#print ("*******WARNING FOR SHIFT_COOR:", self.dataset_cfg.SHIFT_COOR)
pred_boxes[:, 0:3] -= self.dataset_cfg.SHIFT_COOR
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['score'] = pred_scores
pred_dict['boxes_lidar'] = pred_boxes
pred_dict['pred_labels'] = pred_labels
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
single_pred_dict = generate_single_sample_dict(box_dict)
single_pred_dict['frame_id'] = batch_dict['frame_id'][index]
single_pred_dict['metadata'] = batch_dict['metadata'][index]
annos.append(single_pred_dict)
return annos
def kitti_eval(self, eval_det_annos, eval_gt_annos, class_names):
from ..kitti.kitti_object_eval_python import eval as kitti_eval
map_name_to_kitti = {
'car': 'Car',
'pedestrian': 'Pedestrian',
'truck': 'Truck',
'bicycle': 'Cyclist',
}
def transform_to_kitti_format(annos, info_with_fakelidar=False, is_gt=False):
for anno in annos:
if 'name' not in anno:
anno['name'] = anno['gt_names']
anno.pop('gt_names')
for k in range(anno['name'].shape[0]):
if anno['name'][k] in map_name_to_kitti:
anno['name'][k] = map_name_to_kitti[anno['name'][k]]
else:
anno['name'][k] = 'Person_sitting'
if 'boxes_lidar' in anno:
gt_boxes_lidar = anno['boxes_lidar'].copy()
else:
gt_boxes_lidar = anno['gt_boxes'].copy()
# filter by fov
if is_gt and self.dataset_cfg.get('GT_FILTER', None):
if self.dataset_cfg.GT_FILTER.get('FOV_FILTER', None):
fov_gt_flag = self.extract_fov_gt(
gt_boxes_lidar, self.dataset_cfg['FOV_DEGREE'], self.dataset_cfg['FOV_ANGLE']
)
gt_boxes_lidar = gt_boxes_lidar[fov_gt_flag]
anno['name'] = anno['name'][fov_gt_flag]
anno['bbox'] = np.zeros((len(anno['name']), 4))
anno['bbox'][:, 2:4] = 50 # [0, 0, 50, 50]
anno['truncated'] = np.zeros(len(anno['name']))
anno['occluded'] = np.zeros(len(anno['name']))
if len(gt_boxes_lidar) > 0:
if info_with_fakelidar:
gt_boxes_lidar = box_utils.boxes3d_kitti_fakelidar_to_lidar(gt_boxes_lidar)
gt_boxes_lidar[:, 2] -= gt_boxes_lidar[:, 5] / 2
anno['location'] = np.zeros((gt_boxes_lidar.shape[0], 3))
anno['location'][:, 0] = -gt_boxes_lidar[:, 1] # x = -y_lidar
anno['location'][:, 1] = -gt_boxes_lidar[:, 2] # y = -z_lidar
anno['location'][:, 2] = gt_boxes_lidar[:, 0] # z = x_lidar
dxdydz = gt_boxes_lidar[:, 3:6]
anno['dimensions'] = dxdydz[:, [0, 2, 1]] # lwh ==> lhw
anno['rotation_y'] = -gt_boxes_lidar[:, 6] - np.pi / 2.0
anno['alpha'] = -np.arctan2(-gt_boxes_lidar[:, 1], gt_boxes_lidar[:, 0]) + anno['rotation_y']
else:
anno['location'] = anno['dimensions'] = np.zeros((0, 3))
anno['rotation_y'] = anno['alpha'] = np.zeros(0)
transform_to_kitti_format(eval_det_annos)
transform_to_kitti_format(eval_gt_annos, info_with_fakelidar=False, is_gt=True)
kitti_class_names = []
for x in class_names:
if x in map_name_to_kitti:
kitti_class_names.append(map_name_to_kitti[x])
else:
kitti_class_names.append('Person_sitting')
ap_result_str, ap_dict = kitti_eval.get_official_eval_result(
gt_annos=eval_gt_annos, dt_annos=eval_det_annos, current_classes=kitti_class_names
)
return ap_result_str, ap_dict
def nuscene_eval(self, det_annos, class_names, **kwargs):
import json
from nuscenes.nuscenes import NuScenes
from . import nuscenes_utils
nusc = NuScenes(version=self.dataset_cfg.VERSION, dataroot=str(self.root_path), verbose=True)
nusc_annos = nuscenes_utils.transform_det_annos_to_nusc_annos(det_annos, nusc)
nusc_annos['meta'] = {
'use_camera': False,
'use_lidar': True,
'use_radar': False,
'use_map': False,
'use_external': False,
}
output_path = Path(kwargs['output_path'])
output_path.mkdir(exist_ok=True, parents=True)
res_path = str(output_path / 'results_nusc.json')
with open(res_path, 'w') as f:
json.dump(nusc_annos, f)
self.logger.info(f'The predictions of NuScenes have been saved to {res_path}')
if self.dataset_cfg.VERSION == 'v1.0-test':
return 'No ground-truth annotations for evaluation', {}
from nuscenes.eval.detection.config import config_factory
from nuscenes.eval.detection.evaluate import NuScenesEval
eval_set_map = {
'v1.0-mini': 'mini_val',
'v1.0-trainval': 'val',
'v1.0-test': 'test'
}
try:
eval_version = 'detection_cvpr_2019'
eval_config = config_factory(eval_version)
except:
eval_version = 'cvpr_2019'
eval_config = config_factory(eval_version)
nusc_eval = NuScenesEval(
nusc,
config=eval_config,
result_path=res_path,
eval_set=eval_set_map[self.dataset_cfg.VERSION],
output_dir=str(output_path),
verbose=True,
)
metrics_summary = nusc_eval.main(plot_examples=0, render_curves=False)
with open(output_path / 'metrics_summary.json', 'r') as f:
metrics = json.load(f)
result_str, result_dict = nuscenes_utils.format_nuscene_results(metrics, self.class_names, version=eval_version)
return result_str, result_dict
def evaluation(self, det_annos, class_names, **kwargs):
if kwargs['eval_metric'] == 'kitti':
eval_det_annos = copy.deepcopy(det_annos)
eval_gt_annos = copy.deepcopy(self.infos)
return self.kitti_eval(eval_det_annos, eval_gt_annos, class_names)
elif kwargs['eval_metric'] == 'nuscenes':
return self.nuscene_eval(det_annos, class_names, **kwargs)
else:
raise NotImplementedError
def create_groundtruth_database(self, used_classes=None, max_sweeps=10):
import torch
database_save_path = self.root_path / f'gt_database_{max_sweeps}sweeps_withvelo'
db_info_save_path = self.root_path / f'nuscenes_dbinfos_{max_sweeps}sweeps_withvelo.pkl'
database_save_path.mkdir(parents=True, exist_ok=True)
all_db_infos = {}
for idx in tqdm(range(len(self.infos))):
sample_idx = idx
info = self.infos[idx]
points = self.get_lidar_with_sweeps(idx, max_sweeps=max_sweeps)
gt_boxes = info['gt_boxes']
gt_names = info['gt_names']
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
torch.from_numpy(points[:, 0:3]).unsqueeze(dim=0).float().cuda(),
torch.from_numpy(gt_boxes[:, 0:7]).unsqueeze(dim=0).float().cuda()
).long().squeeze(dim=0).cpu().numpy()
for i in range(gt_boxes.shape[0]):
filename = '%s_%s_%d.bin' % (sample_idx, gt_names[i], i)
filepath = database_save_path / filename
gt_points = points[box_idxs_of_pts == i]
gt_points[:, :3] -= gt_boxes[i, :3]
with open(filepath, 'w') as f:
gt_points.tofile(f)
if (used_classes is None) or gt_names[i] in used_classes:
db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin
db_info = {'name': gt_names[i], 'path': db_path, 'image_idx': sample_idx, 'gt_idx': i,
'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0]}
if gt_names[i] in all_db_infos:
all_db_infos[gt_names[i]].append(db_info)
else:
all_db_infos[gt_names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
def create_nuscenes_info(version, data_path, save_path, max_sweeps=10):
from nuscenes.nuscenes import NuScenes
from nuscenes.utils import splits
from . import nuscenes_utils
data_path = data_path / version
save_path = save_path / version
assert version in ['v1.0-trainval', 'v1.0-test', 'v1.0-mini']
if version == 'v1.0-trainval':
train_scenes = splits.train
val_scenes = splits.val
elif version == 'v1.0-test':
train_scenes = splits.test
val_scenes = []
elif version == 'v1.0-mini':
train_scenes = splits.mini_train
val_scenes = splits.mini_val
else:
raise NotImplementedError
nusc = NuScenes(version=version, dataroot=data_path, verbose=True)
available_scenes = nuscenes_utils.get_available_scenes(nusc)
available_scene_names = [s['name'] for s in available_scenes]
train_scenes = list(filter(lambda x: x in available_scene_names, train_scenes))
val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes))
train_scenes = set([available_scenes[available_scene_names.index(s)]['token'] for s in train_scenes])
val_scenes = set([available_scenes[available_scene_names.index(s)]['token'] for s in val_scenes])
print('%s: train scene(%d), val scene(%d)' % (version, len(train_scenes), len(val_scenes)))
train_nusc_infos, val_nusc_infos = nuscenes_utils.fill_trainval_infos(
data_path=data_path, nusc=nusc, train_scenes=train_scenes, val_scenes=val_scenes,
test='test' in version, max_sweeps=max_sweeps
)
if version == 'v1.0-test':
print('test sample: %d' % len(train_nusc_infos))
with open(save_path / f'nuscenes_infos_{max_sweeps}sweeps_test.pkl', 'wb') as f:
pickle.dump(train_nusc_infos, f)
else:
print('train sample: %d, val sample: %d' % (len(train_nusc_infos), len(val_nusc_infos)))
with open(save_path / f'nuscenes_infos_{max_sweeps}sweeps_train.pkl', 'wb') as f:
pickle.dump(train_nusc_infos, f)
with open(save_path / f'nuscenes_infos_{max_sweeps}sweeps_val.pkl', 'wb') as f:
pickle.dump(val_nusc_infos, f)
if __name__ == '__main__':
import yaml
import argparse
from pathlib import Path
from easydict import EasyDict
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config of dataset')
parser.add_argument('--func', type=str, default='create_nuscenes_infos', help='')
parser.add_argument('--version', type=str, default='v1.0-trainval', help='')
args = parser.parse_args()
if args.func == 'create_nuscenes_infos':
dataset_cfg = EasyDict(yaml.safe_load(open(args.cfg_file)))
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
dataset_cfg.VERSION = args.version
create_nuscenes_info(
version=dataset_cfg.VERSION,
data_path=ROOT_DIR / 'data' / 'nuscenes',
save_path=ROOT_DIR / 'data' / 'nuscenes',
max_sweeps=dataset_cfg.MAX_SWEEPS,
)
nuscenes_dataset = NuScenesDataset(
dataset_cfg=dataset_cfg, class_names=None,
root_path=ROOT_DIR / 'data' / 'nuscenes',
logger=common_utils.create_logger(), training=True
)
nuscenes_dataset.create_groundtruth_database(max_sweeps=dataset_cfg.MAX_SWEEPS)
| 22,903
| 42.461101
| 129
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/nuscenes/nuscenes_semi_dataset.py
|
import copy
import pickle
from pathlib import Path
import os
import io
import numpy as np
from tqdm import tqdm
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import common_utils
from ..semi_dataset import SemiDatasetTemplate
def split_nuscenes_semi_data(dataset_cfg, info_paths, data_splits, root_path, labeled_ratio, logger):
oss_path = dataset_cfg.OSS_PATH if 'OSS_PATH' in dataset_cfg else None
if oss_path:
from petrel_client.client import Client
client = Client('~/.petreloss.conf')
nuscenes_pretrain_infos = []
nuscenes_test_infos = []
nuscenes_labeled_infos = []
nuscenes_unlabeled_infos = []
def check_annos(info):
return 'annos' in info
if dataset_cfg.get('RANDOM_SAMPLE_ID_PATH', None):
root_path = Path(root_path)
logger.info('Loading NuScenes dataset')
nuscenes_infos = {"train":[], "test":[]}
for info_path in dataset_cfg.INFO_PATH["train"]:
if oss_path is None:
info_path = root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
nuscenes_infos["train"].extend(infos)
else:
info_path = os.path.join(oss_path, info_path)
#pkl_bytes = self.client.get(info_path)
pkl_bytes = client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
nuscenes_infos["train"].extend(infos)
for info_path in dataset_cfg.INFO_PATH["test"]:
if oss_path is None:
info_path = root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
nuscenes_infos["test"].extend(infos)
else:
info_path = os.path.join(oss_path, info_path)
#pkl_bytes = self.client.get(info_path)
pkl_bytes = client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
nuscenes_infos["test"].extend(infos)
sampled_id = np.load(dataset_cfg.RANDOM_SAMPLE_ID_PATH)
nuscenes_pretrain_infos = [nuscenes_infos["train"][i] for i in sampled_id]
nuscenes_labeled_infos = [nuscenes_infos["train"][i] for i in sampled_id]
if dataset_cfg.get('RANDOM_SAMPLE_ID_PATH_UNLABEL', None):
sampled_id_unlabel = np.load(dataset_cfg.RANDOM_SAMPLE_ID_PATH_UNLABEL)
nuscenes_unlabeled_infos = [nuscenes_infos["train"][i] for i in sampled_id_unlabel if i not in sampled_id]
else:
nuscenes_unlabeled_infos = [nuscenes_infos["train"][i] for i in range(len(nuscenes_infos["train"])) if i not in sampled_id]
nuscenes_test_infos = nuscenes_infos["test"]
else:
root_path = Path(root_path)
train_split = data_splits['train']
for info_path in info_paths[train_split]:
if oss_path is None:
info_path = root_path / info_path
with open(info_path, 'rb') as f:
infos = pickle.load(f)
infos = list(filter(check_annos, infos))
nuscenes_pretrain_infos.extend(copy.deepcopy(infos))
nuscenes_labeled_infos.extend(copy.deepcopy(infos))
else:
info_path = os.path.join(oss_path, info_path)
pkl_bytes = client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
# infos = list(filter(check_annos, infos))
nuscenes_pretrain_infos.extend(copy.deepcopy(infos))
nuscenes_labeled_infos.extend(copy.deepcopy(infos))
test_split = data_splits['test']
for info_path in info_paths[test_split]:
if oss_path is None:
info_path = root_path / info_path
with open(info_path, 'rb') as f:
infos = pickle.load(f)
infos = list(filter(check_annos, infos))
nuscenes_test_infos.extend(copy.deepcopy(infos))
else:
info_path = os.path.join(oss_path, info_path)
pkl_bytes = client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
# infos = list(filter(check_annos, infos))
nuscenes_test_infos.extend(copy.deepcopy(infos))
raw_split = data_splits['raw']
for info_path in info_paths[raw_split]:
if oss_path is None:
info_path = root_path / info_path
with open(info_path, 'rb') as f:
infos = pickle.load(f)
nuscenes_unlabeled_infos.extend(copy.deepcopy(infos))
else:
info_path = os.path.join(oss_path, info_path)
pkl_bytes = client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
nuscenes_unlabeled_infos.extend(copy.deepcopy(infos))
logger.info('Total samples for nuscenes pre-training dataset: %d' % (len(nuscenes_pretrain_infos)))
logger.info('Total samples for nuscenes testing dataset: %d' % (len(nuscenes_test_infos)))
logger.info('Total samples for nuscenes labeled dataset: %d' % (len(nuscenes_labeled_infos)))
logger.info('Total samples for nuscenes unlabeled dataset: %d' % (len(nuscenes_unlabeled_infos)))
return nuscenes_pretrain_infos, nuscenes_test_infos, nuscenes_labeled_infos, nuscenes_unlabeled_infos
class NuScenesSemiDataset(SemiDatasetTemplate):
"""Petrel Ceph storage backend.
3DTrans supports the reading and writing data from Ceph
Usage:
self.oss_path = 's3://path/of/nuScenes'
'~/.petreloss.conf': A config file of Ceph, saving the KEY/ACCESS_KEY of S3 Ceph
"""
def __init__(self, dataset_cfg, class_names,infos=None, training=True, root_path=None, logger=None):
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
if self.oss_path is not None:
from petrel_client.client import Client
self.client = Client('~/.petreloss.conf')
# self.oss_data_list = self.list_oss_dir(self.oss_path, with_info=False)
# zhangbo: for OSS format, list the nuScenes dataset will cause a Bug,
# due to OSS cannot load too many objects
self.nuscenes_infos = infos
def balanced_infos_resampling(self, infos):
"""
Class-balanced sampling of nuScenes dataset from https://arxiv.org/abs/1908.09492
"""
if self.class_names is None:
return infos
cls_infos = {name: [] for name in self.class_names}
for info in infos:
for name in set(info['gt_names']):
if name in self.class_names:
cls_infos[name].append(info)
duplicated_samples = sum([len(v) for _, v in cls_infos.items()])
cls_dist = {k: len(v) / duplicated_samples for k, v in cls_infos.items()}
sampled_infos = []
frac = 1.0 / len(self.class_names)
ratios = [frac / v for v in cls_dist.values()]
for cur_cls_infos, ratio in zip(list(cls_infos.values()), ratios):
sampled_infos += np.random.choice(
cur_cls_infos, int(len(cur_cls_infos) * ratio)
).tolist()
self.logger.info('Total samples after balanced resampling: %s' % (len(sampled_infos)))
cls_infos_new = {name: [] for name in self.class_names}
for info in sampled_infos:
for name in set(info['gt_names']):
if name in self.class_names:
cls_infos_new[name].append(info)
cls_dist_new = {k: len(v) / len(sampled_infos) for k, v in cls_infos_new.items()}
return sampled_infos
def get_sweep(self, sweep_info):
def remove_ego_points(points, center_radius=1.0):
mask = ~((np.abs(points[:, 0]) < center_radius) & (np.abs(points[:, 1]) < center_radius))
return points[mask]
if self.oss_path is None:
lidar_path = self.root_path / sweep_info['lidar_path']
points_sweep = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])[:, :4]
else:
lidar_path = os.path.join(self.oss_path, sweep_info['lidar_path'])
#sdk_local_bytes = self.client.get(lidar_path)
sdk_local_bytes = self.client.get(lidar_path, update_cache=True)
points_sweep = np.frombuffer(sdk_local_bytes, dtype=np.float32, count=-1).reshape([-1, 5])[:, :4]
points_sweep = remove_ego_points(points_sweep).T
if sweep_info['transform_matrix'] is not None:
num_points = points_sweep.shape[1]
points_sweep[:3, :] = sweep_info['transform_matrix'].dot(
np.vstack((points_sweep[:3, :], np.ones(num_points))))[:3, :]
cur_times = sweep_info['time_lag'] * np.ones((1, points_sweep.shape[1]))
return points_sweep.T, cur_times.T
def get_lidar_with_sweeps(self, index, max_sweeps=1):
info = self.nuscenes_infos[index]
if self.oss_path is None:
lidar_path = self.root_path / info['lidar_path']
points = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])[:, :4]
else:
lidar_path = os.path.join(self.oss_path, info['lidar_path'])
#sdk_local_bytes = self.client.get(lidar_path)
sdk_local_bytes = self.client.get(lidar_path, update_cache=True)
points_pre = np.frombuffer(sdk_local_bytes, dtype=np.float32, count=-1).reshape([-1, 5]).copy()
points = points_pre[:, :4]
sweep_points_list = [points]
sweep_times_list = [np.zeros((points.shape[0], 1))]
for k in np.random.choice(len(info['sweeps']), max_sweeps - 1, replace=False):
points_sweep, times_sweep = self.get_sweep(info['sweeps'][k])
sweep_points_list.append(points_sweep)
sweep_times_list.append(times_sweep)
points = np.concatenate(sweep_points_list, axis=0)
times = np.concatenate(sweep_times_list, axis=0).astype(points.dtype)
points = np.concatenate((points, times), axis=1)
return points
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.nuscenes_infos) * self.total_epochs
return len(self.nuscenes_infos)
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.nuscenes_infos)
info = copy.deepcopy(self.nuscenes_infos[index])
points = self.get_lidar_with_sweeps(index, max_sweeps=self.dataset_cfg.MAX_SWEEPS)
if self.dataset_cfg.get('SHIFT_COOR', None):
points[:, 0:3] += np.array(self.dataset_cfg.SHIFT_COOR, dtype=np.float32)
input_dict = {
'db_flag': "nusc",
'points': points,
'frame_id': Path(info['lidar_path']).stem,
'metadata': {'token': info['token']},
'index_id': index
}
if 'gt_boxes' in info:
if self.dataset_cfg.get('FILTER_MIN_POINTS_IN_GT', False):
mask = (info['num_lidar_pts'] > self.dataset_cfg.FILTER_MIN_POINTS_IN_GT - 1)
else:
mask = None
input_dict.update({
'gt_names': info['gt_names'] if mask is None else info['gt_names'][mask],
'gt_boxes': info['gt_boxes'] if mask is None else info['gt_boxes'][mask]
})
if self.dataset_cfg.get('SHIFT_COOR', None):
input_dict['gt_boxes'][:, 0:3] += self.dataset_cfg.SHIFT_COOR
if self.dataset_cfg.get('USE_PSEUDO_LABEL', None) and self.training:
input_dict['gt_boxes'] = None
# for debug only
# gt_boxes_mask = np.array([n in self.class_names for n in input_dict['gt_names']], dtype=np.bool_)
# debug_dict = {'gt_boxes': copy.deepcopy(input_dict['gt_boxes'][gt_boxes_mask])}
if self.dataset_cfg.get('FOV_POINTS_ONLY', None):
input_dict['points'] = self.extract_fov_data(
input_dict['points'], self.dataset_cfg.FOV_DEGREE, self.dataset_cfg.FOV_ANGLE
)
if input_dict['gt_boxes'] is not None:
fov_gt_flag = self.extract_fov_gt(
input_dict['gt_boxes'], self.dataset_cfg.FOV_DEGREE, self.dataset_cfg.FOV_ANGLE
)
input_dict.update({
'gt_names': input_dict['gt_names'][fov_gt_flag],
'gt_boxes': input_dict['gt_boxes'][fov_gt_flag],
})
if self.dataset_cfg.get('USE_PSEUDO_LABEL', None) and self.training:
self.fill_pseudo_labels(input_dict)
data_dict = self.prepare_data(data_dict=input_dict)
if self.dataset_cfg.get('SET_NAN_VELOCITY_TO_ZEROS', False) and not self.dataset_cfg.get('USE_PSEUDO_LABEL', None):
gt_boxes = data_dict['gt_boxes']
gt_boxes[np.isnan(gt_boxes)] = 0
data_dict['gt_boxes'] = gt_boxes
if not self.dataset_cfg.PRED_VELOCITY and 'gt_boxes' in data_dict and not self.dataset_cfg.get('USE_PSEUDO_LABEL', None):
data_dict['gt_boxes'] = data_dict['gt_boxes'][:, [0, 1, 2, 3, 4, 5, 6, -1]]
return data_dict
#@staticmethod
def generate_prediction_dicts(self, batch_dict, pred_dicts, class_names, output_path=None):
"""
Args:
batch_dict:
frame_id:
pred_dicts: list of pred_dicts
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path:
Returns:
"""
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'score': np.zeros(num_samples),
'boxes_lidar': np.zeros([num_samples, 7]), 'pred_labels': np.zeros(num_samples)
}
return ret_dict
def generate_single_sample_dict(box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
if self.dataset_cfg.get('SHIFT_COOR', None):
#print ("*******WARNING FOR SHIFT_COOR:", self.dataset_cfg.SHIFT_COOR)
pred_boxes[:, 0:3] -= self.dataset_cfg.SHIFT_COOR
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['score'] = pred_scores
pred_dict['boxes_lidar'] = pred_boxes
pred_dict['pred_labels'] = pred_labels
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
single_pred_dict = generate_single_sample_dict(box_dict)
single_pred_dict['frame_id'] = batch_dict['frame_id'][index]
single_pred_dict['metadata'] = batch_dict['metadata'][index]
annos.append(single_pred_dict)
return annos
def kitti_eval(self, eval_det_annos, eval_gt_annos, class_names):
from ..kitti.kitti_object_eval_python import eval as kitti_eval
map_name_to_kitti = {
'car': 'Car',
'pedestrian': 'Pedestrian',
'truck': 'Truck',
'bicycle': 'Cyclist',
}
def transform_to_kitti_format(annos, info_with_fakelidar=False, is_gt=False):
for anno in annos:
if 'name' not in anno:
anno['name'] = anno['gt_names']
anno.pop('gt_names')
for k in range(anno['name'].shape[0]):
if anno['name'][k] in map_name_to_kitti:
anno['name'][k] = map_name_to_kitti[anno['name'][k]]
else:
anno['name'][k] = 'Person_sitting'
if 'boxes_lidar' in anno:
gt_boxes_lidar = anno['boxes_lidar'].copy()
else:
gt_boxes_lidar = anno['gt_boxes'].copy()
# filter by fov
if is_gt and self.dataset_cfg.get('GT_FILTER', None):
if self.dataset_cfg.GT_FILTER.get('FOV_FILTER', None):
fov_gt_flag = self.extract_fov_gt(
gt_boxes_lidar, self.dataset_cfg['FOV_DEGREE'], self.dataset_cfg['FOV_ANGLE']
)
gt_boxes_lidar = gt_boxes_lidar[fov_gt_flag]
anno['name'] = anno['name'][fov_gt_flag]
anno['bbox'] = np.zeros((len(anno['name']), 4))
anno['bbox'][:, 2:4] = 50 # [0, 0, 50, 50]
anno['truncated'] = np.zeros(len(anno['name']))
anno['occluded'] = np.zeros(len(anno['name']))
if len(gt_boxes_lidar) > 0:
if info_with_fakelidar:
gt_boxes_lidar = box_utils.boxes3d_kitti_fakelidar_to_lidar(gt_boxes_lidar)
gt_boxes_lidar[:, 2] -= gt_boxes_lidar[:, 5] / 2
anno['location'] = np.zeros((gt_boxes_lidar.shape[0], 3))
anno['location'][:, 0] = -gt_boxes_lidar[:, 1] # x = -y_lidar
anno['location'][:, 1] = -gt_boxes_lidar[:, 2] # y = -z_lidar
anno['location'][:, 2] = gt_boxes_lidar[:, 0] # z = x_lidar
dxdydz = gt_boxes_lidar[:, 3:6]
anno['dimensions'] = dxdydz[:, [0, 2, 1]] # lwh ==> lhw
anno['rotation_y'] = -gt_boxes_lidar[:, 6] - np.pi / 2.0
anno['alpha'] = -np.arctan2(-gt_boxes_lidar[:, 1], gt_boxes_lidar[:, 0]) + anno['rotation_y']
else:
anno['location'] = anno['dimensions'] = np.zeros((0, 3))
anno['rotation_y'] = anno['alpha'] = np.zeros(0)
transform_to_kitti_format(eval_det_annos)
transform_to_kitti_format(eval_gt_annos, info_with_fakelidar=False, is_gt=True)
kitti_class_names = []
for x in class_names:
if x in map_name_to_kitti:
kitti_class_names.append(map_name_to_kitti[x])
else:
kitti_class_names.append('Person_sitting')
ap_result_str, ap_dict = kitti_eval.get_official_eval_result(
gt_annos=eval_gt_annos, dt_annos=eval_det_annos, current_classes=kitti_class_names
)
return ap_result_str, ap_dict
def nuscene_eval(self, det_annos, class_names, **kwargs):
import json
from nuscenes.nuscenes import NuScenes
from . import nuscenes_utils
nusc = NuScenes(version=self.dataset_cfg.VERSION, dataroot=str(self.root_path), verbose=True)
nusc_annos = nuscenes_utils.transform_det_annos_to_nusc_annos(det_annos, nusc)
nusc_annos['meta'] = {
'use_camera': False,
'use_lidar': True,
'use_radar': False,
'use_map': False,
'use_external': False,
}
output_path = Path(kwargs['output_path'])
output_path.mkdir(exist_ok=True, parents=True)
res_path = str(output_path / 'results_nusc.json')
with open(res_path, 'w') as f:
json.dump(nusc_annos, f)
self.logger.info(f'The predictions of NuScenes have been saved to {res_path}')
if self.dataset_cfg.VERSION == 'v1.0-test':
return 'No ground-truth annotations for evaluation', {}
from nuscenes.eval.detection.config import config_factory
from nuscenes.eval.detection.evaluate import NuScenesEval
eval_set_map = {
'v1.0-mini': 'mini_val',
'v1.0-trainval': 'val',
'v1.0-test': 'test'
}
try:
eval_version = 'detection_cvpr_2019'
eval_config = config_factory(eval_version)
except:
eval_version = 'cvpr_2019'
eval_config = config_factory(eval_version)
nusc_eval = NuScenesEval(
nusc,
config=eval_config,
result_path=res_path,
eval_set=eval_set_map[self.dataset_cfg.VERSION],
output_dir=str(output_path),
verbose=True,
)
metrics_summary = nusc_eval.main(plot_examples=0, render_curves=False)
with open(output_path / 'metrics_summary.json', 'r') as f:
metrics = json.load(f)
result_str, result_dict = nuscenes_utils.format_nuscene_results(metrics, self.class_names, version=eval_version)
return result_str, result_dict
def evaluation(self, det_annos, class_names, **kwargs):
if kwargs['eval_metric'] == 'kitti':
eval_det_annos = copy.deepcopy(det_annos)
eval_gt_annos = copy.deepcopy(self.nuscenes_infos)
return self.kitti_eval(eval_det_annos, eval_gt_annos, class_names)
elif kwargs['eval_metric'] == 'nuscenes':
return self.nuscene_eval(det_annos, class_names, **kwargs)
else:
raise NotImplementedError
class NuScenesPretrainDataset(NuScenesSemiDataset):
def __init__(self, dataset_cfg, class_names, infos=None, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
assert training is True
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, infos=infos, training=training, root_path=root_path, logger=logger
)
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.nuscenes_infos)
info = copy.deepcopy(self.nuscenes_infos[index])
points = self.get_lidar_with_sweeps(index, max_sweeps=self.dataset_cfg.MAX_SWEEPS)
if self.dataset_cfg.get('SHIFT_COOR', None):
points[:, 0:3] += np.array(self.dataset_cfg.SHIFT_COOR, dtype=np.float32)
input_dict = {
'db_flag': "nusc",
'points': points,
'frame_id': Path(info['lidar_path']).stem,
'metadata': {'token': info['token']},
'index_id': index
}
if 'gt_boxes' in info:
if self.dataset_cfg.get('FILTER_MIN_POINTS_IN_GT', False):
mask = (info['num_lidar_pts'] > self.dataset_cfg.FILTER_MIN_POINTS_IN_GT - 1)
else:
mask = None
input_dict.update({
'gt_names': info['gt_names'] if mask is None else info['gt_names'][mask],
'gt_boxes': info['gt_boxes'] if mask is None else info['gt_boxes'][mask]
})
if self.dataset_cfg.get('SHIFT_COOR', None):
input_dict['gt_boxes'][:, 0:3] += self.dataset_cfg.SHIFT_COOR
data_dict = self.prepare_data(data_dict=input_dict)
if self.dataset_cfg.get('SET_NAN_VELOCITY_TO_ZEROS', False) and not self.dataset_cfg.get('USE_PSEUDO_LABEL', None):
gt_boxes = data_dict['gt_boxes']
gt_boxes[np.isnan(gt_boxes)] = 0
data_dict['gt_boxes'] = gt_boxes
if not self.dataset_cfg.PRED_VELOCITY and 'gt_boxes' in data_dict and not self.dataset_cfg.get('USE_PSEUDO_LABEL', None):
data_dict['gt_boxes'] = data_dict['gt_boxes'][:, [0, 1, 2, 3, 4, 5, 6, -1]]
return data_dict
class NuScenesLabeledDataset(NuScenesSemiDataset):
def __init__(self, dataset_cfg, class_names, infos=None, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
assert training is True
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, infos=infos, training=training, root_path=root_path, logger=logger
)
self.labeled_data_for = dataset_cfg.LABELED_DATA_FOR
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.nuscenes_infos)
info = copy.deepcopy(self.nuscenes_infos[index])
points = self.get_lidar_with_sweeps(index, max_sweeps=self.dataset_cfg.MAX_SWEEPS)
if self.dataset_cfg.get('SHIFT_COOR', None):
points[:, 0:3] += np.array(self.dataset_cfg.SHIFT_COOR, dtype=np.float32)
input_dict = {
'db_flag': "nusc",
'points': points,
'frame_id': Path(info['lidar_path']).stem,
'metadata': {'token': info['token']},
'index_id': index
}
assert 'gt_boxes' in info
if self.dataset_cfg.get('FILTER_MIN_POINTS_IN_GT', False):
mask = (info['num_lidar_pts'] > self.dataset_cfg.FILTER_MIN_POINTS_IN_GT - 1)
else:
mask = None
input_dict.update({
'gt_names': info['gt_names'] if mask is None else info['gt_names'][mask],
'gt_boxes': info['gt_boxes'] if mask is None else info['gt_boxes'][mask]
})
if self.dataset_cfg.get('SHIFT_COOR', None):
input_dict['gt_boxes'][:, 0:3] += self.dataset_cfg.SHIFT_COOR
teacher_dict, student_dict = self.prepare_data_ssl(input_dict, output_dicts=self.labeled_data_for)
if self.dataset_cfg.get('SET_NAN_VELOCITY_TO_ZEROS', False) and not self.dataset_cfg.get('USE_PSEUDO_LABEL', None):
if teacher_dict is not None:
gt_boxes = teacher_dict['gt_boxes']
gt_boxes[np.isnan(gt_boxes)] = 0
teacher_dict['gt_boxes'] = gt_boxes
gt_boxes = student_dict['gt_boxes']
gt_boxes[np.isnan(gt_boxes)] = 0
student_dict['gt_boxes'] = gt_boxes
if teacher_dict is not None and not self.dataset_cfg.PRED_VELOCITY and 'gt_boxes' in teacher_dict and not self.dataset_cfg.get('USE_PSEUDO_LABEL', None):
teacher_dict['gt_boxes'] = teacher_dict['gt_boxes'][:, [0, 1, 2, 3, 4, 5, 6, -1]]
if not self.dataset_cfg.PRED_VELOCITY and 'gt_boxes' in student_dict and not self.dataset_cfg.get('USE_PSEUDO_LABEL', None):
student_dict['gt_boxes'] = student_dict['gt_boxes'][:, [0, 1, 2, 3, 4, 5, 6, -1]]
return tuple([teacher_dict, student_dict])
class NuScenesUnlabeledDataset(NuScenesSemiDataset):
def __init__(self, dataset_cfg, class_names, infos=None, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
assert training is True
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, infos=infos, training=training, root_path=root_path, logger=logger
)
self.unlabeled_data_for = dataset_cfg.UNLABELED_DATA_FOR
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.nuscenes_infos)
info = copy.deepcopy(self.nuscenes_infos[index])
points = self.get_lidar_with_sweeps(index, max_sweeps=self.dataset_cfg.MAX_SWEEPS)
if self.dataset_cfg.get('SHIFT_COOR', None):
points[:, 0:3] += np.array(self.dataset_cfg.SHIFT_COOR, dtype=np.float32)
input_dict = {
'db_flag': "nusc",
'points': points,
'frame_id': Path(info['lidar_path']).stem,
'metadata': {'token': info['token']},
'index_id': index
}
teacher_dict, student_dict = self.prepare_data_ssl(input_dict, output_dicts=self.unlabeled_data_for)
return tuple([teacher_dict, student_dict])
class NuScenesTestDataset(NuScenesSemiDataset):
def __init__(self, dataset_cfg, class_names, infos=None, training=False, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
assert training is False
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, infos=infos, training=training, root_path=root_path, logger=logger
)
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.nuscenes_infos)
info = copy.deepcopy(self.nuscenes_infos[index])
points = self.get_lidar_with_sweeps(index, max_sweeps=self.dataset_cfg.MAX_SWEEPS)
if self.dataset_cfg.get('SHIFT_COOR', None):
points[:, 0:3] += np.array(self.dataset_cfg.SHIFT_COOR, dtype=np.float32)
input_dict = {
'db_flag': "nusc",
'points': points,
'frame_id': Path(info['lidar_path']).stem,
'metadata': {'token': info['token']},
'index_id': index
}
if 'gt_boxes' in info:
if self.dataset_cfg.get('FILTER_MIN_POINTS_IN_GT', False):
mask = (info['num_lidar_pts'] > self.dataset_cfg.FILTER_MIN_POINTS_IN_GT - 1)
else:
mask = None
input_dict.update({
'gt_names': info['gt_names'] if mask is None else info['gt_names'][mask],
'gt_boxes': info['gt_boxes'] if mask is None else info['gt_boxes'][mask]
})
if self.dataset_cfg.get('SHIFT_COOR', None):
input_dict['gt_boxes'][:, 0:3] += self.dataset_cfg.SHIFT_COOR
data_dict = self.prepare_data(data_dict=input_dict)
if self.dataset_cfg.get('SET_NAN_VELOCITY_TO_ZEROS', False) and not self.dataset_cfg.get('USE_PSEUDO_LABEL', None):
gt_boxes = data_dict['gt_boxes']
gt_boxes[np.isnan(gt_boxes)] = 0
data_dict['gt_boxes'] = gt_boxes
if not self.dataset_cfg.PRED_VELOCITY and 'gt_boxes' in data_dict and not self.dataset_cfg.get('USE_PSEUDO_LABEL', None):
data_dict['gt_boxes'] = data_dict['gt_boxes'][:, [0, 1, 2, 3, 4, 5, 6, -1]]
return data_dict
| 30,827
| 41.876217
| 161
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/nuscenes/__init__.py
| 0
| 0
| 0
|
py
|
|
3DTrans
|
3DTrans-master/pcdet/datasets/nuscenes/nuscenes_dataset_ada.py
|
import copy
import pickle
from pathlib import Path
import os
import io
import numpy as np
from tqdm import tqdm
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import common_utils
from ..dataset import DatasetTemplate
class ActiveNuScenesDataset(DatasetTemplate):
"""Petrel Ceph storage backend.
3DTrans supports the reading and writing data from Ceph
Usage:
self.oss_path = 's3://path/of/nuScenes'
'~/.petreloss.conf': A config file of Ceph, saving the KEY/ACCESS_KEY of S3 Ceph
"""
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, sample_info_path=None):
root_path = (root_path if root_path is not None else Path(dataset_cfg.DATA_PATH)) / dataset_cfg.VERSION
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
if self.oss_path is not None:
from petrel_client.client import Client
self.client = Client('~/.petreloss.conf')
# self.oss_data_list = self.list_oss_dir(self.oss_path, with_info=False)
# zhangbo: for OSS format, list the nuScenes dataset will cause a Bug,
# due to OSS cannot load too many objects
self.infos = []
self.include_nuscenes_data(self.mode, sample_info_path)
if self.training and self.dataset_cfg.get('BALANCED_RESAMPLING', False):
self.infos = self.balanced_infos_resampling(self.infos)
def include_nuscenes_data(self, mode, sample_info_path=None):
self.logger.info('Loading NuScenes dataset')
nuscenes_infos = []
for info_path in self.dataset_cfg.INFO_PATH[mode]:
if sample_info_path is not None and str(sample_info_path).split(':')[0] != 's3':
info_path = sample_info_path
if not Path(info_path).exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
nuscenes_infos.extend(infos)
elif sample_info_path is not None and str(sample_info_path).split(':')[0] == 's3':
info_path = sample_info_path
pkl_bytes = self.client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
nuscenes_infos.extend(infos)
elif self.oss_path is None:
info_path = self.root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
nuscenes_infos.extend(infos)
else:
info_path = os.path.join(self.oss_path, info_path)
#pkl_bytes = self.client.get(info_path)
pkl_bytes = self.client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
nuscenes_infos.extend(infos)
self.infos.extend(nuscenes_infos)
self.logger.info('Total samples for NuScenes dataset: %d' % (len(nuscenes_infos)))
def balanced_infos_resampling(self, infos):
"""
Class-balanced sampling of nuScenes dataset from https://arxiv.org/abs/1908.09492
"""
if self.class_names is None:
return infos
cls_infos = {name: [] for name in self.class_names}
for info in infos:
for name in set(info['gt_names']):
if name in self.class_names:
cls_infos[name].append(info)
duplicated_samples = sum([len(v) for _, v in cls_infos.items()])
cls_dist = {k: len(v) / duplicated_samples for k, v in cls_infos.items()}
sampled_infos = []
frac = 1.0 / len(self.class_names)
ratios = [frac / v for v in cls_dist.values()]
for cur_cls_infos, ratio in zip(list(cls_infos.values()), ratios):
sampled_infos += np.random.choice(
cur_cls_infos, int(len(cur_cls_infos) * ratio)
).tolist()
self.logger.info('Total samples after balanced resampling: %s' % (len(sampled_infos)))
cls_infos_new = {name: [] for name in self.class_names}
for info in sampled_infos:
for name in set(info['gt_names']):
if name in self.class_names:
cls_infos_new[name].append(info)
cls_dist_new = {k: len(v) / len(sampled_infos) for k, v in cls_infos_new.items()}
return sampled_infos
def get_sweep(self, sweep_info):
def remove_ego_points(points, center_radius=1.0):
mask = ~((np.abs(points[:, 0]) < center_radius) & (np.abs(points[:, 1]) < center_radius))
return points[mask]
if self.oss_path is None:
lidar_path = self.root_path / sweep_info['lidar_path']
points_sweep = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])[:, :4]
else:
lidar_path = os.path.join(self.oss_path, sweep_info['lidar_path'])
#sdk_local_bytes = self.client.get(lidar_path)
sdk_local_bytes = self.client.get(lidar_path, update_cache=True)
points_sweep = np.frombuffer(sdk_local_bytes, dtype=np.float32, count=-1).reshape([-1, 5])[:, :4]
points_sweep = remove_ego_points(points_sweep).T
if sweep_info['transform_matrix'] is not None:
num_points = points_sweep.shape[1]
points_sweep[:3, :] = sweep_info['transform_matrix'].dot(
np.vstack((points_sweep[:3, :], np.ones(num_points))))[:3, :]
cur_times = sweep_info['time_lag'] * np.ones((1, points_sweep.shape[1]))
return points_sweep.T, cur_times.T
def get_lidar_with_sweeps(self, index, max_sweeps=1):
info = self.infos[index]
if self.oss_path is None:
lidar_path = self.root_path / info['lidar_path']
points = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])[:, :4]
else:
lidar_path = os.path.join(self.oss_path, info['lidar_path'])
#sdk_local_bytes = self.client.get(lidar_path)
sdk_local_bytes = self.client.get(lidar_path, update_cache=True)
points_pre = np.frombuffer(sdk_local_bytes, dtype=np.float32, count=-1).reshape([-1, 5]).copy()
points = points_pre[:, :4]
sweep_points_list = [points]
sweep_times_list = [np.zeros((points.shape[0], 1))]
for k in np.random.choice(len(info['sweeps']), max_sweeps - 1, replace=False):
points_sweep, times_sweep = self.get_sweep(info['sweeps'][k])
sweep_points_list.append(points_sweep)
sweep_times_list.append(times_sweep)
points = np.concatenate(sweep_points_list, axis=0)
times = np.concatenate(sweep_times_list, axis=0).astype(points.dtype)
points = np.concatenate((points, times), axis=1)
return points
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.infos) * self.total_epochs
return len(self.infos)
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.infos)
info = copy.deepcopy(self.infos[index])
points = self.get_lidar_with_sweeps(index, max_sweeps=self.dataset_cfg.MAX_SWEEPS)
if self.dataset_cfg.get('SHIFT_COOR', None):
points[:, 0:3] += np.array(self.dataset_cfg.SHIFT_COOR, dtype=np.float32)
input_dict = {
'db_flag': "nusc",
'points': points,
'frame_id': Path(info['lidar_path']).stem,
'metadata': {'token': info['token']}
}
if 'gt_boxes' in info:
if self.dataset_cfg.get('FILTER_MIN_POINTS_IN_GT', False):
mask = (info['num_lidar_pts'] > self.dataset_cfg.FILTER_MIN_POINTS_IN_GT - 1)
else:
mask = None
input_dict.update({
'gt_names': info['gt_names'] if mask is None else info['gt_names'][mask],
'gt_boxes': info['gt_boxes'] if mask is None else info['gt_boxes'][mask]
})
if self.dataset_cfg.get('SHIFT_COOR', None):
input_dict['gt_boxes'][:, 0:3] += self.dataset_cfg.SHIFT_COOR
if self.dataset_cfg.get('USE_PSEUDO_LABEL', None) and self.training:
input_dict['gt_boxes'] = None
# for debug only
# gt_boxes_mask = np.array([n in self.class_names for n in input_dict['gt_names']], dtype=np.bool_)
# debug_dict = {'gt_boxes': copy.deepcopy(input_dict['gt_boxes'][gt_boxes_mask])}
if self.dataset_cfg.get('FOV_POINTS_ONLY', None):
input_dict['points'] = self.extract_fov_data(
input_dict['points'], self.dataset_cfg.FOV_DEGREE, self.dataset_cfg.FOV_ANGLE
)
if input_dict['gt_boxes'] is not None:
fov_gt_flag = self.extract_fov_gt(
input_dict['gt_boxes'], self.dataset_cfg.FOV_DEGREE, self.dataset_cfg.FOV_ANGLE
)
input_dict.update({
'gt_names': input_dict['gt_names'][fov_gt_flag],
'gt_boxes': input_dict['gt_boxes'][fov_gt_flag],
})
if self.dataset_cfg.get('USE_PSEUDO_LABEL', None) and self.training:
self.fill_pseudo_labels(input_dict)
if self.dataset_cfg.get('SETNAN_VELOCITY_TO_ZEROS', False) and not self.dataset_cfg.get('USE_PSEUDO_LABEL', None):
gt_boxes = input_dict['gt_boxes']
gt_boxes[np.isnan(gt_boxes)] = 0
input_dict['gt_boxes'] = gt_boxes
if not self.dataset_cfg.PRED_VELOCITY and 'gt_boxes' in input_dict and not self.dataset_cfg.get('USE_PSEUDO_LABEL', None):
input_dict['gt_boxes'] = input_dict['gt_boxes'][:, [0, 1, 2, 3, 4, 5, 6]]
data_dict = self.prepare_data(data_dict=input_dict)
return data_dict
#@staticmethod
def generate_prediction_dicts(self, batch_dict, pred_dicts, class_names, output_path=None):
"""
Args:
batch_dict:
frame_id:
pred_dicts: list of pred_dicts
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path:
Returns:
"""
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'score': np.zeros(num_samples),
'boxes_lidar': np.zeros([num_samples, 7]), 'pred_labels': np.zeros(num_samples)
}
return ret_dict
def generate_single_sample_dict(box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
if self.dataset_cfg.get('SHIFT_COOR', None):
#print ("*******WARNING FOR SHIFT_COOR:", self.dataset_cfg.SHIFT_COOR)
pred_boxes[:, 0:3] -= self.dataset_cfg.SHIFT_COOR
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['score'] = pred_scores
pred_dict['boxes_lidar'] = pred_boxes
pred_dict['pred_labels'] = pred_labels
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
single_pred_dict = generate_single_sample_dict(box_dict)
single_pred_dict['frame_id'] = batch_dict['frame_id'][index]
single_pred_dict['metadata'] = batch_dict['metadata'][index]
annos.append(single_pred_dict)
return annos
def kitti_eval(self, eval_det_annos, eval_gt_annos, class_names):
from ..kitti.kitti_object_eval_python import eval as kitti_eval
map_name_to_kitti = {
'car': 'Car',
'pedestrian': 'Pedestrian',
'truck': 'Truck',
}
def transform_to_kitti_format(annos, info_with_fakelidar=False, is_gt=False):
for anno in annos:
if 'name' not in anno:
anno['name'] = anno['gt_names']
anno.pop('gt_names')
for k in range(anno['name'].shape[0]):
if anno['name'][k] in map_name_to_kitti:
anno['name'][k] = map_name_to_kitti[anno['name'][k]]
else:
anno['name'][k] = 'Person_sitting'
if 'boxes_lidar' in anno:
gt_boxes_lidar = anno['boxes_lidar'].copy()
else:
gt_boxes_lidar = anno['gt_boxes'].copy()
# filter by fov
if is_gt and self.dataset_cfg.get('GT_FILTER', None):
if self.dataset_cfg.GT_FILTER.get('FOV_FILTER', None):
fov_gt_flag = self.extract_fov_gt(
gt_boxes_lidar, self.dataset_cfg['FOV_DEGREE'], self.dataset_cfg['FOV_ANGLE']
)
gt_boxes_lidar = gt_boxes_lidar[fov_gt_flag]
anno['name'] = anno['name'][fov_gt_flag]
anno['bbox'] = np.zeros((len(anno['name']), 4))
anno['bbox'][:, 2:4] = 50 # [0, 0, 50, 50]
anno['truncated'] = np.zeros(len(anno['name']))
anno['occluded'] = np.zeros(len(anno['name']))
if len(gt_boxes_lidar) > 0:
if info_with_fakelidar:
gt_boxes_lidar = box_utils.boxes3d_kitti_fakelidar_to_lidar(gt_boxes_lidar)
gt_boxes_lidar[:, 2] -= gt_boxes_lidar[:, 5] / 2
anno['location'] = np.zeros((gt_boxes_lidar.shape[0], 3))
anno['location'][:, 0] = -gt_boxes_lidar[:, 1] # x = -y_lidar
anno['location'][:, 1] = -gt_boxes_lidar[:, 2] # y = -z_lidar
anno['location'][:, 2] = gt_boxes_lidar[:, 0] # z = x_lidar
dxdydz = gt_boxes_lidar[:, 3:6]
anno['dimensions'] = dxdydz[:, [0, 2, 1]] # lwh ==> lhw
anno['rotation_y'] = -gt_boxes_lidar[:, 6] - np.pi / 2.0
anno['alpha'] = -np.arctan2(-gt_boxes_lidar[:, 1], gt_boxes_lidar[:, 0]) + anno['rotation_y']
else:
anno['location'] = anno['dimensions'] = np.zeros((0, 3))
anno['rotation_y'] = anno['alpha'] = np.zeros(0)
transform_to_kitti_format(eval_det_annos)
transform_to_kitti_format(eval_gt_annos, info_with_fakelidar=False, is_gt=True)
kitti_class_names = []
for x in class_names:
if x in map_name_to_kitti:
kitti_class_names.append(map_name_to_kitti[x])
else:
kitti_class_names.append('Person_sitting')
ap_result_str, ap_dict = kitti_eval.get_official_eval_result(
gt_annos=eval_gt_annos, dt_annos=eval_det_annos, current_classes=kitti_class_names
)
return ap_result_str, ap_dict
def nuscene_eval(self, det_annos, class_names, **kwargs):
import json
from nuscenes.nuscenes import NuScenes
from . import nuscenes_utils
nusc = NuScenes(version=self.dataset_cfg.VERSION, dataroot=str(self.root_path), verbose=True)
nusc_annos = nuscenes_utils.transform_det_annos_to_nusc_annos(det_annos, nusc)
nusc_annos['meta'] = {
'use_camera': False,
'use_lidar': True,
'use_radar': False,
'use_map': False,
'use_external': False,
}
output_path = Path(kwargs['output_path'])
output_path.mkdir(exist_ok=True, parents=True)
res_path = str(output_path / 'results_nusc.json')
with open(res_path, 'w') as f:
json.dump(nusc_annos, f)
self.logger.info(f'The predictions of NuScenes have been saved to {res_path}')
if self.dataset_cfg.VERSION == 'v1.0-test':
return 'No ground-truth annotations for evaluation', {}
from nuscenes.eval.detection.config import config_factory
from nuscenes.eval.detection.evaluate import NuScenesEval
eval_set_map = {
'v1.0-mini': 'mini_val',
'v1.0-trainval': 'val',
'v1.0-test': 'test'
}
try:
eval_version = 'detection_cvpr_2019'
eval_config = config_factory(eval_version)
except:
eval_version = 'cvpr_2019'
eval_config = config_factory(eval_version)
nusc_eval = NuScenesEval(
nusc,
config=eval_config,
result_path=res_path,
eval_set=eval_set_map[self.dataset_cfg.VERSION],
output_dir=str(output_path),
verbose=True,
)
metrics_summary = nusc_eval.main(plot_examples=0, render_curves=False)
with open(output_path / 'metrics_summary.json', 'r') as f:
metrics = json.load(f)
result_str, result_dict = nuscenes_utils.format_nuscene_results(metrics, self.class_names, version=eval_version)
return result_str, result_dict
def evaluation(self, det_annos, class_names, **kwargs):
if kwargs['eval_metric'] == 'kitti':
eval_det_annos = copy.deepcopy(det_annos)
eval_gt_annos = copy.deepcopy(self.infos)
return self.kitti_eval(eval_det_annos, eval_gt_annos, class_names)
elif kwargs['eval_metric'] == 'nuscenes':
return self.nuscene_eval(det_annos, class_names, **kwargs)
else:
raise NotImplementedError
def create_groundtruth_database(self, used_classes=None, max_sweeps=10):
import torch
database_save_path = self.root_path / f'gt_database_{max_sweeps}sweeps_withvelo'
db_info_save_path = self.root_path / f'nuscenes_dbinfos_{max_sweeps}sweeps_withvelo.pkl'
database_save_path.mkdir(parents=True, exist_ok=True)
all_db_infos = {}
for idx in tqdm(range(len(self.infos))):
sample_idx = idx
info = self.infos[idx]
points = self.get_lidar_with_sweeps(idx, max_sweeps=max_sweeps)
gt_boxes = info['gt_boxes']
gt_names = info['gt_names']
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
torch.from_numpy(points[:, 0:3]).unsqueeze(dim=0).float().cuda(),
torch.from_numpy(gt_boxes[:, 0:7]).unsqueeze(dim=0).float().cuda()
).long().squeeze(dim=0).cpu().numpy()
for i in range(gt_boxes.shape[0]):
filename = '%s_%s_%d.bin' % (sample_idx, gt_names[i], i)
filepath = database_save_path / filename
gt_points = points[box_idxs_of_pts == i]
gt_points[:, :3] -= gt_boxes[i, :3]
with open(filepath, 'w') as f:
gt_points.tofile(f)
if (used_classes is None) or gt_names[i] in used_classes:
db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin
db_info = {'name': gt_names[i], 'path': db_path, 'image_idx': sample_idx, 'gt_idx': i,
'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0]}
if gt_names[i] in all_db_infos:
all_db_infos[gt_names[i]].append(db_info)
else:
all_db_infos[gt_names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
def create_nuscenes_info(version, data_path, save_path, max_sweeps=10):
from nuscenes.nuscenes import NuScenes
from nuscenes.utils import splits
from . import nuscenes_utils
data_path = data_path / version
save_path = save_path / version
assert version in ['v1.0-trainval', 'v1.0-test', 'v1.0-mini']
if version == 'v1.0-trainval':
train_scenes = splits.train
val_scenes = splits.val
elif version == 'v1.0-test':
train_scenes = splits.test
val_scenes = []
elif version == 'v1.0-mini':
train_scenes = splits.mini_train
val_scenes = splits.mini_val
else:
raise NotImplementedError
nusc = NuScenes(version=version, dataroot=data_path, verbose=True)
available_scenes = nuscenes_utils.get_available_scenes(nusc)
available_scene_names = [s['name'] for s in available_scenes]
train_scenes = list(filter(lambda x: x in available_scene_names, train_scenes))
val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes))
train_scenes = set([available_scenes[available_scene_names.index(s)]['token'] for s in train_scenes])
val_scenes = set([available_scenes[available_scene_names.index(s)]['token'] for s in val_scenes])
print('%s: train scene(%d), val scene(%d)' % (version, len(train_scenes), len(val_scenes)))
train_nusc_infos, val_nusc_infos = nuscenes_utils.fill_trainval_infos(
data_path=data_path, nusc=nusc, train_scenes=train_scenes, val_scenes=val_scenes,
test='test' in version, max_sweeps=max_sweeps
)
if version == 'v1.0-test':
print('test sample: %d' % len(train_nusc_infos))
with open(save_path / f'nuscenes_infos_{max_sweeps}sweeps_test.pkl', 'wb') as f:
pickle.dump(train_nusc_infos, f)
else:
print('train sample: %d, val sample: %d' % (len(train_nusc_infos), len(val_nusc_infos)))
with open(save_path / f'nuscenes_infos_{max_sweeps}sweeps_train.pkl', 'wb') as f:
pickle.dump(train_nusc_infos, f)
with open(save_path / f'nuscenes_infos_{max_sweeps}sweeps_val.pkl', 'wb') as f:
pickle.dump(val_nusc_infos, f)
if __name__ == '__main__':
import yaml
import argparse
from pathlib import Path
from easydict import EasyDict
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config of dataset')
parser.add_argument('--func', type=str, default='create_nuscenes_infos', help='')
parser.add_argument('--version', type=str, default='v1.0-trainval', help='')
args = parser.parse_args()
if args.func == 'create_nuscenes_infos':
dataset_cfg = EasyDict(yaml.safe_load(open(args.cfg_file)))
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
dataset_cfg.VERSION = args.version
create_nuscenes_info(
version=dataset_cfg.VERSION,
data_path=ROOT_DIR / 'data' / 'nuscenes',
save_path=ROOT_DIR / 'data' / 'nuscenes',
max_sweeps=dataset_cfg.MAX_SWEEPS,
)
nuscenes_dataset = NuScenesDataset(
dataset_cfg=dataset_cfg, class_names=None,
root_path=ROOT_DIR / 'data' / 'nuscenes',
logger=common_utils.create_logger(), training=True
)
nuscenes_dataset.create_groundtruth_database(max_sweeps=dataset_cfg.MAX_SWEEPS)
| 23,617
| 42.899628
| 130
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/pandaset/pandaset_dataset.py
|
"""
Dataset from Pandaset (Hesai)
"""
import pickle
import os
try:
import pandas as pd
import pandaset as ps
except:
pass
import numpy as np
from ..dataset import DatasetTemplate
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
import torch
def pose_dict_to_numpy(pose):
"""
Conert pandaset pose dict to a numpy vector in order to pass it through the network
"""
pose_np = [pose["position"]["x"],
pose["position"]["y"],
pose["position"]["z"],
pose["heading"]["w"],
pose["heading"]["x"],
pose["heading"]["y"],
pose["heading"]["z"]]
return pose_np
def pose_numpy_to_dict(pose):
"""
Conert pandaset pose dict to a numpy vector in order to pass it through the network
"""
pose_dict = {'position':
{'x': pose[0],
'y': pose[1],
'z': pose[2]},
'heading':
{'w': pose[3],
'x': pose[4],
'y': pose[5],
'z': pose[6]}}
return pose_dict
class PandasetDataset(DatasetTemplate):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
if root_path is None:
root_path = self.dataset_cfg.DATA_PATH
self.dataset = ps.DataSet(os.path.join(root_path, 'dataset'))
self.split = self.dataset_cfg.DATA_SPLIT[self.mode]
self.pandaset_infos = []
self.include_pandaset_infos(self.mode)
def include_pandaset_infos(self, mode):
if self.logger is not None:
self.logger.info('Loading PandaSet dataset')
pandaset_infos = []
for info_path in self.dataset_cfg.INFO_PATH[mode]:
info_path = os.path.join(self.root_path, info_path)
if not os.path.exists(info_path):
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
pandaset_infos.extend(infos)
self.pandaset_infos.extend(pandaset_infos)
if self.logger is not None:
self.logger.info('Total samples for PandaSet dataset ({}): {}'.format(self.mode, len(pandaset_infos)))
def set_split(self, split):
self.sequences = self.dataset_cfg.SEQUENCES[split]
self.split = split
def __len__(self):
return len(self.pandaset_infos)
def __getitem__(self, index):
"""
To support a custom dataset, implement this function to load the raw data (and labels), then transform them to
the unified normative coordinate (x pointing forward, z pointing upwards) and call the function self.prepare_data() to process the data and send them
to the model.
Args:
index:
Returns:
"""
info = self.pandaset_infos[index]
seq_idx = info['sequence']
pose = self._get_pose(info)
points = self._get_lidar_points(info, pose)
boxes, labels, zrot_world_to_ego = self._get_annotations(info, pose)
pose_np = pose_dict_to_numpy(pose)
input_dict = {'points': points,
'gt_boxes': boxes,
'gt_names': labels,
'sequence': int(seq_idx),
'frame_idx': info['frame_idx'],
'zrot_world_to_ego': zrot_world_to_ego,
'pose': pose_dict_to_numpy(pose)
}
# seq_idx is converted to int because strings can't be passed to
# the gpu in pytorch
# zrot_world_to_ego is propagated in order to be able to transform the
# predicted yaws back to world coordinates
data_dict = self.prepare_data(data_dict=input_dict)
return data_dict
def _get_pose(self, info):
seq_idx = info['sequence']
# get pose for world to ego frame transformation
if self.dataset[seq_idx].lidar.poses is None:
self.dataset[seq_idx].lidar._load_poses()
pose = self.dataset[seq_idx].lidar.poses[info['frame_idx']]
return pose
def _get_lidar_points(self, info, pose):
"""
Get lidar in the unified normative coordinate system for a given frame
The intensity is normalized to fit [0-1] range (pandaset intensity is in [0-255] range)
"""
# get lidar points
lidar_frame = pd.read_pickle(info['lidar_path'])
# get points for the required lidar(s) only
device = self.dataset_cfg.get('LIDAR_DEVICE', 0)
if device != -1:
lidar_frame = lidar_frame[lidar_frame.d == device]
world_points = lidar_frame.to_numpy()
# There seems to be issues with the automatic deletion of pandas datasets sometimes
del lidar_frame
points_loc = world_points[:, :3]
points_int = world_points[:, 3]
# nromalize intensity
points_int = points_int / 255
ego_points = ps.geometry.lidar_points_to_ego(points_loc, pose)
# Pandaset ego coordinates are:
# - x pointing to the right
# - y pointing to the front
# - z pointing up
# Normative coordinates are:
# - x pointing foreward
# - y pointings to the left
# - z pointing to the top
# So a transformation is required to the match the normative coordinates
ego_points = ego_points[:, [1, 0, 2]] # switch x and y
ego_points[:, 1] = - ego_points[:, 1] # revert y axis
return np.append(ego_points, np.expand_dims(points_int, axis=1), axis=1).astype(np.float32)
def _get_annotations(self,info, pose):
"""
Get box informations in the unified normative coordinate system for a given frame
"""
# get boxes
cuboids = pd.read_pickle(info["cuboids_path"])
device = self.dataset_cfg.get('LIDAR_DEVICE', 0)
if device != -1:
# keep cuboids that are seen by a given device
cuboids = cuboids[cuboids["cuboids.sensor_id"] != 1 - device]
xs = cuboids['position.x'].to_numpy()
ys = cuboids['position.y'].to_numpy()
zs = cuboids['position.z'].to_numpy()
dxs = cuboids['dimensions.x'].to_numpy()
dys = cuboids['dimensions.y'].to_numpy()
dzs = cuboids['dimensions.z'].to_numpy()
yaws = cuboids['yaw'].to_numpy()
labels = cuboids['label'].to_numpy()
del cuboids # There seem to be issues with the automatic deletion of pandas datasets sometimes
labels = np.array([self.dataset_cfg.TRAINING_CATEGORIES.get(lab, lab)
for lab in labels] )
# Compute the center points coordinates in ego coordinates
centers = np.vstack([xs, ys, zs]).T
ego_centers = ps.geometry.lidar_points_to_ego(centers, pose)
# Compute the yaw in ego coordinates
# The following implementation supposes that the pitch of the car is
# negligible compared to its yaw, in order to be able to express the
# bbox coordinates in the ego coordinate system with an {axis aligned
# box + yaw} only representation
yaxis_points_from_pose = ps.geometry.lidar_points_to_ego(np.array([[0, 0, 0], [0, 1., 0]]), pose)
yaxis_from_pose = yaxis_points_from_pose[1, :] - yaxis_points_from_pose[0, :]
if yaxis_from_pose[-1] >= 10**-1:
if self.logger is not None:
self.logger.warning("The car's pitch is supposed to be negligible " +
"sin(pitch) is >= 10**-1 ({})".format(yaxis_from_pose[-1]))
# rotation angle in rads of the y axis around thz z axis
zrot_world_to_ego = np.arctan2(-yaxis_from_pose[0], yaxis_from_pose[1])
ego_yaws = yaws + zrot_world_to_ego
# Pandaset ego coordinates are:
# - x pointing to the right
# - y pointing to the front
# - z pointing up
# Normative coordinates are:
# - x pointing foreward
# - y pointings to the left
# - z pointing to the top
# So a transformation is required to the match the normative coordinates
ego_xs = ego_centers[:, 1]
ego_ys = -ego_centers[:, 0]
ego_zs = ego_centers[:, 2]
ego_dxs = dys
ego_dys = dxs # stays >= 0
ego_dzs = dzs
ego_boxes = np.vstack([ego_xs, ego_ys, ego_zs, ego_dxs, ego_dys, ego_dzs, ego_yaws]).T
return ego_boxes.astype(np.float32), labels, zrot_world_to_ego
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
"""
To support a custom dataset, implement this function to receive the predicted results from the model, and then
transform the unified normative coordinate to your required coordinate, and optionally save them to disk.
Args:
batch_dict: dict of original data from the dataloader
pred_dicts: dict of predicted results from the model
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path: if it is not None, save the results to this path
Returns:
"""
def generate_single_sample_dataframe(batch_index, box_dict, zrot_world_to_ego, pose):
pred_boxes = box_dict["pred_boxes"].cpu().numpy()
pred_scores = box_dict["pred_scores"].cpu().numpy()
pred_labels = box_dict["pred_labels"].cpu().numpy()
zrot = zrot_world_to_ego.cpu().numpy()
pose_dict = pose_numpy_to_dict(pose.cpu().numpy())
xs = pred_boxes[:, 0]
ys = pred_boxes[:, 1]
zs = pred_boxes[:, 2]
dxs = pred_boxes[:, 3]
dys = pred_boxes[:, 4]
dzs = pred_boxes[:, 5]
yaws = pred_boxes[:, 6]
names = np.array(class_names)[pred_labels - 1] # Predicted labels start on 1
# convert from normative coordinates to pandaset ego coordinates
ego_xs = - ys
ego_ys = xs
ego_zs = zs
ego_dxs = dys
ego_dys = dxs
ego_dzs = dzs
ego_yaws = yaws
# convert from pandaset ego coordinates to world coordinates
# for the moment, an simplified estimation of the ego yaw is computed in __getitem__
# which sets ego_yaw = world_yaw + zrot_world_to_ego
world_yaws = ego_yaws - zrot
ego_centers = np.vstack([ego_xs, ego_ys, ego_zs]).T
world_centers = ps.geometry.ego_to_lidar_points(ego_centers, pose_dict)
world_xs = world_centers[:, 0]
world_ys = world_centers[:, 1]
world_zs = world_centers[:, 2]
# dx, dy, dz remain unchanged as the bbox orientation is handled by
# the yaw information
data_dict = {'position.x': world_xs,
'position.y': world_ys,
'position.z': world_zs,
'dimensions.x': ego_dxs,
'dimensions.y': ego_dys,
'dimensions.z': ego_dzs,
'yaw': world_yaws % (2 * np.pi),
'label': names,
'score': pred_scores
}
return pd.DataFrame(data_dict)
annos = []
for index, box_dict in enumerate(pred_dicts):
frame_idx = batch_dict['frame_idx'][index]
seq_idx = batch_dict['sequence'][index]
zrot = batch_dict['zrot_world_to_ego'][index]
pose = batch_dict['pose'][index]
single_pred_df = generate_single_sample_dataframe(index, box_dict, zrot, pose)
single_pred_dict = {'preds' : single_pred_df,
# 'name 'ensures testing the number of detections in a compatible format as kitti
'name' : single_pred_df['label'].tolist(),
'frame_idx': frame_idx,
'sequence': str(seq_idx).zfill(3)}
# seq_idx was converted to int in self.__getitem__` because strings
# can't be passed to the gpu in pytorch.
# To convert it back to a string, we assume that the sequence is
# provided in pandaset format with 3 digits
if output_path is not None:
frame_id = str(int(frame_idx)).zfill(2)
seq_id = str(int(seq_idx)).zfill(3)
cur_det_file = os.path.join(output_path, seq_id, 'predictions',
'cuboids', ("{}.pkl.gz".format(frame_id)))
os.makedirs(os.path.dirname(cur_det_file), exist_ok=True)
single_pred_df.to_pickle(cur_det_file)
annos.append(single_pred_dict)
return annos
def get_infos(self):
"""
Generate the dataset infos dict for each sample of the dataset.
For each sample, this dict contains:
- the sequence index
- the frame index
- the path to the lidar data
- the path to the bounding box annotations
"""
infos = []
for seq in self.sequences:
s = self.dataset[seq]
s.load_lidar()
if len(s.lidar.data) > 100:
raise ValueError("The implementation for this dataset assumes that each sequence is " +
"no longer than 100 frames. The current sequence has {}".format(len(s.lidar.data)))
info = [{'sequence': seq,
'frame_idx': ii,
'lidar_path': os.path.join(self.root_path, 'dataset', seq, 'lidar', ("{:02d}.pkl.gz".format(ii))),
'cuboids_path': os.path.join(self.root_path, 'dataset', seq,
'annotations', 'cuboids', ("{:02d}.pkl.gz".format(ii)))
} for ii in range(len(s.lidar.data))]
infos.extend(info)
del self.dataset._sequences[seq]
return infos
def create_groundtruth_database(self, info_path=None, used_classes=None, split='train'):
database_save_path = os.path.join(self.root_path,
'gt_database' if split == 'train' else 'gt_database_{}'.format(split))
db_info_save_path = os.path.join(self.root_path,
'pandaset_dbinfos_{}.pkl'.format(split))
os.makedirs(database_save_path, exist_ok=True)
all_db_infos = {}
with open(info_path, 'rb') as f:
infos = pickle.load(f)
for k in range(len(infos)):
print('gt_database sample: %d/%d' % (k + 1, len(infos)))
info = infos[k]
sample_idx = info['frame_idx']
pose = self._get_pose(info)
points = self._get_lidar_points(info, pose)
gt_boxes, names, _ = self._get_annotations(info, pose)
num_obj = gt_boxes.shape[0]
point_indices = roiaware_pool3d_utils.points_in_boxes_cpu(
torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)
).numpy() # (nboxes, npoints)
for i in range(num_obj):
tmp_name = names[i].replace("/", "").replace(" ", "")
filename = '%s_%s_%d.bin' % (sample_idx, tmp_name, i)
filepath = os.path.join(database_save_path, filename)
gt_points = points[point_indices[i] > 0]
gt_points[:, :3] -= gt_boxes[i, :3]
with open(filepath, 'wb') as f:
gt_points.tofile(f)
if (used_classes is None) or names[i] in used_classes:
db_path = os.path.relpath(filepath, self.root_path) # gt_database/xxxxx.bin
db_info = {'name': names[i], 'path': db_path, 'gt_idx': i,
'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0],
'difficulty': -1}
if names[i] in all_db_infos:
all_db_infos[names[i]].append(db_info)
else:
all_db_infos[names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
def evaluation(self, det_annos, class_names, **kwargs):
self.logger.warning('Evaluation is not implemented for Pandaset as there is no official one. ' +
'Returning an empty evaluation result.')
ap_result_str = ''
ap_dict = {}
return ap_result_str, ap_dict
def create_pandaset_infos(dataset_cfg, class_names, data_path, save_path):
"""
Create dataset_infos files in order not to have it in a preprocessed pickle
file with the info for each sample
See PandasetDataset.get_infos for further details.
"""
dataset = PandasetDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False)
for split in ["train", "val", "test"]:
print("---------------- Start to generate {} data infos ---------------".format(split))
dataset.set_split(split)
infos = dataset.get_infos()
file_path = os.path.join(save_path, 'pandaset_infos_{}.pkl'.format(split))
with open(file_path, 'wb') as f:
pickle.dump(infos, f)
print("Pandaset info {} file is saved to {}".format(split, file_path))
print('------------Start create groundtruth database for data augmentation-----------')
dataset = PandasetDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False)
dataset.set_split("train")
dataset.create_groundtruth_database(
os.path.join(save_path, 'pandaset_infos_train.pkl'),
split="train"
)
print('---------------Data preparation Done---------------')
if __name__ == '__main__':
import sys
if sys.argv.__len__() > 1 and sys.argv[1] == 'create_pandaset_infos':
import yaml
from pathlib import Path
from easydict import EasyDict
dataset_cfg = EasyDict(yaml.safe_load(open(sys.argv[2])))
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
create_pandaset_infos(
dataset_cfg=dataset_cfg,
class_names=['Car', 'Pedestrian', 'Cyclist'],
data_path=ROOT_DIR / 'data' / 'pandaset',
save_path=ROOT_DIR / 'data' / 'pandaset'
)
| 19,065
| 37.910204
| 157
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/pandaset/__init__.py
| 0
| 0
| 0
|
py
|
|
3DTrans
|
3DTrans-master/pcdet/datasets/kitti/kitti_dataset_ada.py
|
import copy
import pickle
import os
from random import sample
import numpy as np
from pathlib import Path
from . import kitti_utils
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import box_utils, calibration_kitti, common_utils, object3d_kitti
from ..dataset import DatasetTemplate
# using from skimage import io when preprocessing the KITTI
# from skimage import io
# Since we use petrel OSS
import io
class ActiveKittiDataset(DatasetTemplate):
"""Petrel Ceph storage backend.
3DTrans supports the reading and writing data from Ceph
Usage:
self.oss_path = 's3://path/of/KITTI'
'~/.petreloss.conf': A config file of Ceph, saving the KEY/ACCESS_KEY of S3 Ceph
"""
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, sample_info_path=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.split = self.dataset_cfg.DATA_SPLIT[self.mode]
if self.oss_path is not None:
from petrel_client.client import Client
self.client = Client('~/.petreloss.conf')
if self.split != 'test':
self.root_split_path = os.path.join(self.oss_path, 'training')
else:
self.root_split_path = os.path.join(self.oss_path, 'testing')
else:
self.root_split_path = self.root_path / ('training' if self.split != 'test' else 'testing')
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None
self.kitti_infos = []
self.include_kitti_data(self.mode, sample_info_path)
def include_kitti_data(self, mode, sample_info_path=None):
if self.logger is not None:
self.logger.info('Loading KITTI dataset')
kitti_infos = []
for info_path in self.dataset_cfg.INFO_PATH[mode]:
if sample_info_path is not None and str(sample_info_path).split(':')[0] != 's3':
info_path = sample_info_path
if not Path(info_path).exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
kitti_infos.extend(infos)
elif sample_info_path is not None and str(sample_info_path).split(':')[0] == 's3':
info_path = sample_info_path
pkl_bytes = self.client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
kitti_infos.extend(infos)
elif self.oss_path is None:
info_path = self.root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
kitti_infos.extend(infos)
else:
info_path = os.path.join(self.oss_path, info_path)
#pkl_bytes = self.client.get(info_path)
pkl_bytes = self.client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
kitti_infos.extend(infos)
self.kitti_infos.extend(kitti_infos)
if self.logger is not None:
self.logger.info('Total samples for KITTI dataset: %d' % (len(kitti_infos)))
def set_split(self, split):
super().__init__(
dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training, root_path=self.root_path, logger=self.logger
)
self.split = split
self.root_split_path = self.root_path / ('training' if self.split != 'test' else 'testing')
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None
def get_lidar(self, idx):
if self.oss_path is None:
lidar_file = self.root_split_path / 'velodyne' / ('%s.bin' % idx)
assert lidar_file.exists()
points = np.fromfile(str(lidar_file), dtype=np.float32).reshape(-1, 4)
else:
lidar_file = os.path.join(self.root_split_path, 'velodyne', ('%s.bin' % idx))
sdk_local_bytes = self.client.get(lidar_file, update_cache=True)
points = np.frombuffer(sdk_local_bytes, dtype=np.float32).reshape(-1, 4).copy()
return points
def get_image(self, idx):
"""
Loads image for a sample
Args:
idx: int, Sample index
Returns:
image: (H, W, 3), RGB Image
"""
img_file = self.root_split_path / 'image_2' / ('%s.png' % idx)
assert img_file.exists()
image = io.imread(img_file)
image = image.astype(np.float32)
image /= 255.0
return image
def get_image_shape(self, idx):
from skimage import io
img_file = self.root_split_path / 'image_2' / ('%s.png' % idx)
assert img_file.exists()
return np.array(io.imread(img_file).shape[:2], dtype=np.int32)
def get_label(self, idx):
label_file = self.root_split_path / 'label_2' / ('%s.txt' % idx)
assert label_file.exists()
return object3d_kitti.get_objects_from_label(label_file)
def get_depth_map(self, idx):
"""
Loads depth map for a sample
Args:
idx: str, Sample index
Returns:
depth: (H, W), Depth map
"""
depth_file = self.root_split_path / 'depth_2' / ('%s.png' % idx)
assert depth_file.exists()
depth = io.imread(depth_file)
depth = depth.astype(np.float32)
depth /= 256.0
return depth
def get_calib(self, idx):
if self.oss_path is None:
calib_file = self.root_split_path / 'calib' / ('%s.txt' % idx)
assert calib_file.exists()
calibrated_res = calibration_kitti.Calibration(calib_file, False)
else:
calib_file = os.path.join(self.root_split_path, 'calib', ('%s.txt' % idx))
text_bytes = self.client.get(calib_file, update_cache=True)
text_bytes = text_bytes.decode('utf-8')
calibrated_res = calibration_kitti.Calibration(io.StringIO(text_bytes), True)
return calibrated_res
def get_road_plane(self, idx):
if self.oss_path is None:
plane_file = self.root_split_path / 'planes' / ('%s.txt' % idx)
if not plane_file.exists():
return None
with open(plane_file, 'r') as f:
lines = f.readlines()
else:
plane_file = os.path.join(self.root_split_path, 'planes', ('%s.txt' % idx))
text_bytes = self.client.get(plane_file, update_cache=True)
text_bytes = text_bytes.decode('utf-8')
lines = io.StringIO(text_bytes).readlines()
lines = [float(i) for i in lines[3].split()]
plane = np.asarray(lines)
# Ensure normal is always facing up, this is in the rectified camera coordinate
if plane[1] > 0:
plane = -plane
norm = np.linalg.norm(plane[0:3])
plane = plane / norm
return plane
@staticmethod
def get_fov_flag(pts_rect, img_shape, calib, margin=0):
"""
Args:
pts_rect:
img_shape:
calib:
margin:
Returns:
"""
pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0 - margin, pts_img[:, 0] < img_shape[1] + margin)
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0 - margin, pts_img[:, 1] < img_shape[0] + margin)
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
return pts_valid_flag
def get_infos(self, num_workers=4, has_label=True, count_inside_pts=True, sample_id_list=None):
import concurrent.futures as futures
def process_single_scene(sample_idx):
print('%s sample_idx: %s' % (self.split, sample_idx))
info = {}
pc_info = {'num_features': 4, 'lidar_idx': sample_idx}
info['point_cloud'] = pc_info
image_info = {'image_idx': sample_idx, 'image_shape': self.get_image_shape(sample_idx)}
info['image'] = image_info
calib = self.get_calib(sample_idx)
P2 = np.concatenate([calib.P2, np.array([[0., 0., 0., 1.]])], axis=0)
R0_4x4 = np.zeros([4, 4], dtype=calib.R0.dtype)
R0_4x4[3, 3] = 1.
R0_4x4[:3, :3] = calib.R0
V2C_4x4 = np.concatenate([calib.V2C, np.array([[0., 0., 0., 1.]])], axis=0)
calib_info = {'P2': P2, 'R0_rect': R0_4x4, 'Tr_velo_to_cam': V2C_4x4}
info['calib'] = calib_info
if has_label:
obj_list = self.get_label(sample_idx)
annotations = {}
annotations['name'] = np.array([obj.cls_type for obj in obj_list])
annotations['truncated'] = np.array([obj.truncation for obj in obj_list])
annotations['occluded'] = np.array([obj.occlusion for obj in obj_list])
annotations['alpha'] = np.array([obj.alpha for obj in obj_list])
annotations['bbox'] = np.concatenate([obj.box2d.reshape(1, 4) for obj in obj_list], axis=0)
annotations['dimensions'] = np.array([[obj.l, obj.h, obj.w] for obj in obj_list]) # lhw(camera) format
annotations['location'] = np.concatenate([obj.loc.reshape(1, 3) for obj in obj_list], axis=0)
annotations['rotation_y'] = np.array([obj.ry for obj in obj_list])
annotations['score'] = np.array([obj.score for obj in obj_list])
annotations['difficulty'] = np.array([obj.level for obj in obj_list], np.int32)
num_objects = len([obj.cls_type for obj in obj_list if obj.cls_type != 'DontCare'])
num_gt = len(annotations['name'])
index = list(range(num_objects)) + [-1] * (num_gt - num_objects)
annotations['index'] = np.array(index, dtype=np.int32)
loc = annotations['location'][:num_objects]
dims = annotations['dimensions'][:num_objects]
rots = annotations['rotation_y'][:num_objects]
loc_lidar = calib.rect_to_lidar(loc)
l, h, w = dims[:, 0:1], dims[:, 1:2], dims[:, 2:3]
loc_lidar[:, 2] += h[:, 0] / 2
gt_boxes_lidar = np.concatenate([loc_lidar, l, w, h, -(np.pi / 2 + rots[..., np.newaxis])], axis=1)
annotations['gt_boxes_lidar'] = gt_boxes_lidar
info['annos'] = annotations
if count_inside_pts:
points = self.get_lidar(sample_idx)
calib = self.get_calib(sample_idx)
pts_rect = calib.lidar_to_rect(points[:, 0:3])
fov_flag = self.get_fov_flag(pts_rect, info['image']['image_shape'], calib)
pts_fov = points[fov_flag]
corners_lidar = box_utils.boxes_to_corners_3d(gt_boxes_lidar)
num_points_in_gt = -np.ones(num_gt, dtype=np.int32)
for k in range(num_objects):
flag = box_utils.in_hull(pts_fov[:, 0:3], corners_lidar[k])
num_points_in_gt[k] = flag.sum()
annotations['num_points_in_gt'] = num_points_in_gt
return info
sample_id_list = sample_id_list if sample_id_list is not None else self.sample_id_list
with futures.ThreadPoolExecutor(num_workers) as executor:
infos = executor.map(process_single_scene, sample_id_list)
return list(infos)
def create_groundtruth_database(self, info_path=None, used_classes=None, split='train'):
import torch
database_save_path = Path(self.root_path) / ('gt_database' if split == 'train' else ('gt_database_%s' % split))
db_info_save_path = Path(self.root_path) / ('kitti_dbinfos_%s.pkl' % split)
database_save_path.mkdir(parents=True, exist_ok=True)
all_db_infos = {}
with open(info_path, 'rb') as f:
infos = pickle.load(f)
for k in range(len(infos)):
print('gt_database sample: %d/%d' % (k + 1, len(infos)))
info = infos[k]
sample_idx = info['point_cloud']['lidar_idx']
points = self.get_lidar(sample_idx)
annos = info['annos']
names = annos['name']
difficulty = annos['difficulty']
bbox = annos['bbox']
gt_boxes = annos['gt_boxes_lidar']
num_obj = gt_boxes.shape[0]
point_indices = roiaware_pool3d_utils.points_in_boxes_cpu(
torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)
).numpy() # (nboxes, npoints)
for i in range(num_obj):
filename = '%s_%s_%d.bin' % (sample_idx, names[i], i)
filepath = database_save_path / filename
gt_points = points[point_indices[i] > 0]
gt_points[:, :3] -= gt_boxes[i, :3]
with open(filepath, 'w') as f:
gt_points.tofile(f)
if (used_classes is None) or names[i] in used_classes:
db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin
db_info = {'name': names[i], 'path': db_path, 'image_idx': sample_idx, 'gt_idx': i,
'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0],
'difficulty': difficulty[i], 'bbox': bbox[i], 'score': annos['score'][i]}
if names[i] in all_db_infos:
all_db_infos[names[i]].append(db_info)
else:
all_db_infos[names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
#@staticmethod
def generate_prediction_dicts(self, batch_dict, pred_dicts, class_names, output_path=None):
"""
Args:
batch_dict:
frame_id:
pred_dicts: list of pred_dicts
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path:
Returns:
"""
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'truncated': np.zeros(num_samples),
'occluded': np.zeros(num_samples), 'alpha': np.zeros(num_samples),
'bbox': np.zeros([num_samples, 4]), 'dimensions': np.zeros([num_samples, 3]),
'location': np.zeros([num_samples, 3]), 'rotation_y': np.zeros(num_samples),
'score': np.zeros(num_samples), 'boxes_lidar': np.zeros([num_samples, 7])
}
return ret_dict
def generate_single_sample_dict(batch_index, box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
calib = batch_dict['calib'][batch_index]
image_shape = batch_dict['image_shape'][batch_index].cpu().numpy()
if self.dataset_cfg.get('SHIFT_COOR', None):
#print ("*******WARNING FOR SHIFT_COOR:", self.dataset_cfg.SHIFT_COOR)
pred_boxes[:, 0:3] -= self.dataset_cfg.SHIFT_COOR
# BOX FILTER
if self.dataset_cfg.get('TEST', None) and self.dataset_cfg.TEST.BOX_FILTER['FOV_FILTER']:
box_preds_lidar_center = pred_boxes[:, 0:3]
pts_rect = calib.lidar_to_rect(box_preds_lidar_center)
fov_flag = self.get_fov_flag(pts_rect, image_shape, calib, margin=5)
pred_boxes = pred_boxes[fov_flag]
pred_labels = pred_labels[fov_flag]
pred_scores = pred_scores[fov_flag]
pred_boxes_camera = box_utils.boxes3d_lidar_to_kitti_camera(pred_boxes, calib)
pred_boxes_img = box_utils.boxes3d_kitti_camera_to_imageboxes(
pred_boxes_camera, calib, image_shape=image_shape
)
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['alpha'] = -np.arctan2(-pred_boxes[:, 1], pred_boxes[:, 0]) + pred_boxes_camera[:, 6]
pred_dict['bbox'] = pred_boxes_img
pred_dict['dimensions'] = pred_boxes_camera[:, 3:6]
pred_dict['location'] = pred_boxes_camera[:, 0:3]
pred_dict['rotation_y'] = pred_boxes_camera[:, 6]
pred_dict['score'] = pred_scores
pred_dict['boxes_lidar'] = pred_boxes
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
frame_id = batch_dict['frame_id'][index]
single_pred_dict = generate_single_sample_dict(index, box_dict)
single_pred_dict['frame_id'] = frame_id
annos.append(single_pred_dict)
if output_path is not None:
cur_det_file = output_path / ('%s.txt' % frame_id)
with open(cur_det_file, 'w') as f:
bbox = single_pred_dict['bbox']
loc = single_pred_dict['location']
dims = single_pred_dict['dimensions'] # lhw -> hwl
for idx in range(len(bbox)):
print('%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f'
% (single_pred_dict['name'][idx], single_pred_dict['alpha'][idx],
bbox[idx][0], bbox[idx][1], bbox[idx][2], bbox[idx][3],
dims[idx][1], dims[idx][2], dims[idx][0], loc[idx][0],
loc[idx][1], loc[idx][2], single_pred_dict['rotation_y'][idx],
single_pred_dict['score'][idx]), file=f)
return annos
def evaluation(self, det_annos, class_names, **kwargs):
if 'annos' not in self.kitti_infos[0].keys():
return None, {}
from .kitti_object_eval_python import eval as kitti_eval
eval_det_annos = copy.deepcopy(det_annos)
eval_gt_annos = [copy.deepcopy(info['annos']) for info in self.kitti_infos]
ap_result_str, ap_dict = kitti_eval.get_official_eval_result(eval_gt_annos, eval_det_annos, class_names)
return ap_result_str, ap_dict
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.kitti_infos) * self.total_epochs
return len(self.kitti_infos)
def __getitem__(self, index):
# index = 4
if self._merge_all_iters_to_one_epoch:
index = index % len(self.kitti_infos)
info = copy.deepcopy(self.kitti_infos[index])
sample_idx = info['point_cloud']['lidar_idx']
calib = self.get_calib(sample_idx)
get_item_list = self.dataset_cfg.get('GET_ITEM_LIST', ['points'])
input_dict = {
'db_flag': "kitti",
'frame_id': sample_idx,
'calib': calib,
}
if 'annos' in info:
annos = info['annos']
annos = common_utils.drop_info_with_name(annos, name='DontCare')
loc, dims, rots = annos['location'], annos['dimensions'], annos['rotation_y']
gt_names = annos['name']
gt_boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1).astype(np.float32)
gt_boxes_lidar = box_utils.boxes3d_kitti_camera_to_lidar(gt_boxes_camera, calib)
if self.dataset_cfg.get('SHIFT_COOR', None):
gt_boxes_lidar[:, 0:3] += self.dataset_cfg.SHIFT_COOR
input_dict.update({
'gt_names': gt_names,
'gt_boxes': gt_boxes_lidar
})
if "gt_boxes2d" in get_item_list:
input_dict['gt_boxes2d'] = annos["bbox"]
if self.dataset_cfg.get('REMOVE_ORIGIN_GTS', None) and self.training:
input_dict['points'] = box_utils.remove_points_in_boxes3d(input_dict['points'], input_dict['gt_boxes'])
mask = np.zeros(gt_boxes_lidar.shape[0], dtype=np.bool_)
input_dict['gt_boxes'] = input_dict['gt_boxes'][mask]
input_dict['gt_names'] = input_dict['gt_names'][mask]
if self.dataset_cfg.get('USE_PSEUDO_LABEL', None) and self.training:
input_dict['gt_boxes'] = None
road_plane = self.get_road_plane(sample_idx)
if road_plane is not None:
input_dict['road_plane'] = road_plane
# for debug only
# gt_boxes_mask = np.array([n in self.class_names for n in input_dict['gt_names']], dtype=np.bool_)
# debug_dict = {'gt_boxes': copy.deepcopy(gt_boxes_lidar[gt_boxes_mask])}
if "points" in get_item_list:
points = self.get_lidar(sample_idx)
img_shape = info['image']['image_shape']
if self.dataset_cfg.FOV_POINTS_ONLY:
pts_rect = calib.lidar_to_rect(points[:, 0:3])
fov_flag = self.get_fov_flag(pts_rect, img_shape, calib)
points = points[fov_flag]
if self.dataset_cfg.get('SHIFT_COOR', None):
points[:, 0:3] += np.array(self.dataset_cfg.SHIFT_COOR, dtype=np.float32)
input_dict['points'] = points
if "images" in get_item_list:
input_dict['images'] = self.get_image(sample_idx)
if "depth_maps" in get_item_list:
input_dict['depth_maps'] = self.get_depth_map(sample_idx)
if "calib_matricies" in get_item_list:
input_dict["trans_lidar_to_cam"], input_dict["trans_cam_to_img"] = kitti_utils.calib_to_matricies(calib)
# load saved pseudo label for unlabel data
if self.dataset_cfg.get('USE_PSEUDO_LABEL', None) and self.training:
self.fill_pseudo_labels(input_dict)
data_dict = self.prepare_data(data_dict=input_dict)
data_dict['image_shape'] = img_shape
return data_dict
def create_kitti_infos(dataset_cfg, class_names, data_path, save_path, workers=4):
dataset = KittiDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False)
train_split, val_split = 'train', 'val'
train_filename = save_path / ('kitti_infos_%s.pkl' % train_split)
val_filename = save_path / ('kitti_infos_%s.pkl' % val_split)
trainval_filename = save_path / 'kitti_infos_trainval.pkl'
test_filename = save_path / 'kitti_infos_test.pkl'
print('---------------Start to generate data infos---------------')
dataset.set_split(train_split)
kitti_infos_train = dataset.get_infos(num_workers=workers, has_label=True, count_inside_pts=True)
with open(train_filename, 'wb') as f:
pickle.dump(kitti_infos_train, f)
print('Kitti info train file is saved to %s' % train_filename)
dataset.set_split(val_split)
kitti_infos_val = dataset.get_infos(num_workers=workers, has_label=True, count_inside_pts=True)
with open(val_filename, 'wb') as f:
pickle.dump(kitti_infos_val, f)
print('Kitti info val file is saved to %s' % val_filename)
with open(trainval_filename, 'wb') as f:
pickle.dump(kitti_infos_train + kitti_infos_val, f)
print('Kitti info trainval file is saved to %s' % trainval_filename)
dataset.set_split('test')
kitti_infos_test = dataset.get_infos(num_workers=workers, has_label=False, count_inside_pts=False)
with open(test_filename, 'wb') as f:
pickle.dump(kitti_infos_test, f)
print('Kitti info test file is saved to %s' % test_filename)
print('---------------Start create groundtruth database for data augmentation---------------')
dataset.set_split(train_split)
dataset.create_groundtruth_database(train_filename, split=train_split)
print('---------------Data preparation Done---------------')
if __name__ == '__main__':
import sys
if sys.argv.__len__() > 1 and sys.argv[1] == 'create_kitti_infos':
import yaml
from pathlib import Path
from easydict import EasyDict
dataset_cfg = EasyDict(yaml.safe_load(open(sys.argv[2])))
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
create_kitti_infos(
dataset_cfg=dataset_cfg,
class_names=['Car', 'Pedestrian', 'Cyclist'],
data_path=ROOT_DIR / 'data' / 'kitti',
save_path=ROOT_DIR / 'data' / 'kitti'
)
| 25,584
| 43.036145
| 140
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/kitti/kitti_dataset.py
|
import copy
import pickle
import os
import numpy as np
from . import kitti_utils
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import box_utils, calibration_kitti, common_utils, object3d_kitti
from ..dataset import DatasetTemplate
# using from skimage import io when preprocessing the KITTI
# from skimage import io
# Since we use petrel OSS
import io
class KittiDataset(DatasetTemplate):
"""Petrel Ceph storage backend.
3DTrans supports the reading and writing data from Ceph
Usage:
self.oss_path = 's3://path/of/KITTI'
'~/.petreloss.conf': A config file of Ceph, saving the KEY/ACCESS_KEY of S3 Ceph
"""
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.split = self.dataset_cfg.DATA_SPLIT[self.mode]
if self.oss_path is not None:
from petrel_client.client import Client
self.client = Client('~/.petreloss.conf')
if self.split != 'test':
self.root_split_path = os.path.join(self.oss_path, 'training')
else:
self.root_split_path = os.path.join(self.oss_path, 'testing')
else:
self.root_split_path = self.root_path / ('training' if self.split != 'test' else 'testing')
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None
self.kitti_infos = []
self.include_kitti_data(self.mode)
def include_kitti_data(self, mode):
if self.logger is not None:
self.logger.info('Loading KITTI dataset')
kitti_infos = []
for info_path in self.dataset_cfg.INFO_PATH[mode]:
if self.oss_path is None:
info_path = self.root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
kitti_infos.extend(infos)
else:
info_path = os.path.join(self.oss_path, info_path)
#pkl_bytes = self.client.get(info_path)
pkl_bytes = self.client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
kitti_infos.extend(infos)
self.kitti_infos.extend(kitti_infos)
if self.logger is not None:
self.logger.info('Total samples for KITTI dataset: %d' % (len(kitti_infos)))
def set_split(self, split):
super().__init__(
dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training, root_path=self.root_path, logger=self.logger
)
self.split = split
self.root_split_path = self.root_path / ('training' if self.split != 'test' else 'testing')
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None
def get_lidar(self, idx):
if self.oss_path is None:
lidar_file = self.root_split_path / 'velodyne' / ('%s.bin' % idx)
assert lidar_file.exists()
points = np.fromfile(str(lidar_file), dtype=np.float32).reshape(-1, 4)
else:
lidar_file = os.path.join(self.root_split_path, 'velodyne', ('%s.bin' % idx))
sdk_local_bytes = self.client.get(lidar_file, update_cache=True)
points = np.frombuffer(sdk_local_bytes, dtype=np.float32).reshape(-1, 4).copy()
return points
def get_image(self, idx):
"""
Loads image for a sample
Args:
idx: int, Sample index
Returns:
image: (H, W, 3), RGB Image
"""
img_file = self.root_split_path / 'image_2' / ('%s.png' % idx)
assert img_file.exists()
image = io.imread(img_file)
image = image.astype(np.float32)
image /= 255.0
return image
def get_image_shape(self, idx):
from skimage import io
img_file = self.root_split_path / 'image_2' / ('%s.png' % idx)
assert img_file.exists()
return np.array(io.imread(img_file).shape[:2], dtype=np.int32)
def get_label(self, idx):
label_file = self.root_split_path / 'label_2' / ('%s.txt' % idx)
assert label_file.exists()
return object3d_kitti.get_objects_from_label(label_file)
def get_depth_map(self, idx):
"""
Loads depth map for a sample
Args:
idx: str, Sample index
Returns:
depth: (H, W), Depth map
"""
depth_file = self.root_split_path / 'depth_2' / ('%s.png' % idx)
assert depth_file.exists()
depth = io.imread(depth_file)
depth = depth.astype(np.float32)
depth /= 256.0
return depth
def get_calib(self, idx):
if self.oss_path is None:
calib_file = self.root_split_path / 'calib' / ('%s.txt' % idx)
assert calib_file.exists()
calibrated_res = calibration_kitti.Calibration(calib_file, False)
else:
calib_file = os.path.join(self.root_split_path, 'calib', ('%s.txt' % idx))
text_bytes = self.client.get(calib_file, update_cache=True)
text_bytes = text_bytes.decode('utf-8')
calibrated_res = calibration_kitti.Calibration(io.StringIO(text_bytes), True)
return calibrated_res
def get_road_plane(self, idx):
if self.oss_path is None:
plane_file = self.root_split_path / 'planes' / ('%s.txt' % idx)
if not plane_file.exists():
return None
with open(plane_file, 'r') as f:
lines = f.readlines()
else:
plane_file = os.path.join(self.root_split_path, 'planes', ('%s.txt' % idx))
text_bytes = self.client.get(plane_file, update_cache=True)
text_bytes = text_bytes.decode('utf-8')
lines = io.StringIO(text_bytes).readlines()
lines = [float(i) for i in lines[3].split()]
plane = np.asarray(lines)
# Ensure normal is always facing up, this is in the rectified camera coordinate
if plane[1] > 0:
plane = -plane
norm = np.linalg.norm(plane[0:3])
plane = plane / norm
return plane
@staticmethod
def get_fov_flag(pts_rect, img_shape, calib, margin=0):
"""
Args:
pts_rect:
img_shape:
calib:
margin:
Returns:
"""
pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0 - margin, pts_img[:, 0] < img_shape[1] + margin)
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0 - margin, pts_img[:, 1] < img_shape[0] + margin)
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
return pts_valid_flag
def get_infos(self, num_workers=4, has_label=True, count_inside_pts=True, sample_id_list=None):
import concurrent.futures as futures
def process_single_scene(sample_idx):
print('%s sample_idx: %s' % (self.split, sample_idx))
info = {}
pc_info = {'num_features': 4, 'lidar_idx': sample_idx}
info['point_cloud'] = pc_info
image_info = {'image_idx': sample_idx, 'image_shape': self.get_image_shape(sample_idx)}
info['image'] = image_info
calib = self.get_calib(sample_idx)
P2 = np.concatenate([calib.P2, np.array([[0., 0., 0., 1.]])], axis=0)
R0_4x4 = np.zeros([4, 4], dtype=calib.R0.dtype)
R0_4x4[3, 3] = 1.
R0_4x4[:3, :3] = calib.R0
V2C_4x4 = np.concatenate([calib.V2C, np.array([[0., 0., 0., 1.]])], axis=0)
calib_info = {'P2': P2, 'R0_rect': R0_4x4, 'Tr_velo_to_cam': V2C_4x4}
info['calib'] = calib_info
if has_label:
obj_list = self.get_label(sample_idx)
annotations = {}
annotations['name'] = np.array([obj.cls_type for obj in obj_list])
annotations['truncated'] = np.array([obj.truncation for obj in obj_list])
annotations['occluded'] = np.array([obj.occlusion for obj in obj_list])
annotations['alpha'] = np.array([obj.alpha for obj in obj_list])
annotations['bbox'] = np.concatenate([obj.box2d.reshape(1, 4) for obj in obj_list], axis=0)
annotations['dimensions'] = np.array([[obj.l, obj.h, obj.w] for obj in obj_list]) # lhw(camera) format
annotations['location'] = np.concatenate([obj.loc.reshape(1, 3) for obj in obj_list], axis=0)
annotations['rotation_y'] = np.array([obj.ry for obj in obj_list])
annotations['score'] = np.array([obj.score for obj in obj_list])
annotations['difficulty'] = np.array([obj.level for obj in obj_list], np.int32)
num_objects = len([obj.cls_type for obj in obj_list if obj.cls_type != 'DontCare'])
num_gt = len(annotations['name'])
index = list(range(num_objects)) + [-1] * (num_gt - num_objects)
annotations['index'] = np.array(index, dtype=np.int32)
loc = annotations['location'][:num_objects]
dims = annotations['dimensions'][:num_objects]
rots = annotations['rotation_y'][:num_objects]
loc_lidar = calib.rect_to_lidar(loc)
l, h, w = dims[:, 0:1], dims[:, 1:2], dims[:, 2:3]
loc_lidar[:, 2] += h[:, 0] / 2
gt_boxes_lidar = np.concatenate([loc_lidar, l, w, h, -(np.pi / 2 + rots[..., np.newaxis])], axis=1)
annotations['gt_boxes_lidar'] = gt_boxes_lidar
info['annos'] = annotations
if count_inside_pts:
points = self.get_lidar(sample_idx)
calib = self.get_calib(sample_idx)
pts_rect = calib.lidar_to_rect(points[:, 0:3])
fov_flag = self.get_fov_flag(pts_rect, info['image']['image_shape'], calib)
pts_fov = points[fov_flag]
corners_lidar = box_utils.boxes_to_corners_3d(gt_boxes_lidar)
num_points_in_gt = -np.ones(num_gt, dtype=np.int32)
for k in range(num_objects):
flag = box_utils.in_hull(pts_fov[:, 0:3], corners_lidar[k])
num_points_in_gt[k] = flag.sum()
annotations['num_points_in_gt'] = num_points_in_gt
return info
sample_id_list = sample_id_list if sample_id_list is not None else self.sample_id_list
with futures.ThreadPoolExecutor(num_workers) as executor:
infos = executor.map(process_single_scene, sample_id_list)
return list(infos)
def create_groundtruth_database(self, info_path=None, used_classes=None, split='train'):
import torch
database_save_path = Path(self.root_path) / ('gt_database' if split == 'train' else ('gt_database_%s' % split))
db_info_save_path = Path(self.root_path) / ('kitti_dbinfos_%s.pkl' % split)
database_save_path.mkdir(parents=True, exist_ok=True)
all_db_infos = {}
with open(info_path, 'rb') as f:
infos = pickle.load(f)
for k in range(len(infos)):
print('gt_database sample: %d/%d' % (k + 1, len(infos)))
info = infos[k]
sample_idx = info['point_cloud']['lidar_idx']
points = self.get_lidar(sample_idx)
annos = info['annos']
names = annos['name']
difficulty = annos['difficulty']
bbox = annos['bbox']
gt_boxes = annos['gt_boxes_lidar']
num_obj = gt_boxes.shape[0]
point_indices = roiaware_pool3d_utils.points_in_boxes_cpu(
torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)
).numpy() # (nboxes, npoints)
for i in range(num_obj):
filename = '%s_%s_%d.bin' % (sample_idx, names[i], i)
filepath = database_save_path / filename
gt_points = points[point_indices[i] > 0]
gt_points[:, :3] -= gt_boxes[i, :3]
with open(filepath, 'w') as f:
gt_points.tofile(f)
if (used_classes is None) or names[i] in used_classes:
db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin
db_info = {'name': names[i], 'path': db_path, 'image_idx': sample_idx, 'gt_idx': i,
'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0],
'difficulty': difficulty[i], 'bbox': bbox[i], 'score': annos['score'][i]}
if names[i] in all_db_infos:
all_db_infos[names[i]].append(db_info)
else:
all_db_infos[names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
#@staticmethod
def generate_prediction_dicts(self, batch_dict, pred_dicts, class_names, output_path=None):
"""
Args:
batch_dict:
frame_id:
pred_dicts: list of pred_dicts
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path:
Returns:
"""
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'truncated': np.zeros(num_samples),
'occluded': np.zeros(num_samples), 'alpha': np.zeros(num_samples),
'bbox': np.zeros([num_samples, 4]), 'dimensions': np.zeros([num_samples, 3]),
'location': np.zeros([num_samples, 3]), 'rotation_y': np.zeros(num_samples),
'score': np.zeros(num_samples), 'boxes_lidar': np.zeros([num_samples, 7])
}
return ret_dict
def generate_single_sample_dict(batch_index, box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
calib = batch_dict['calib'][batch_index]
image_shape = batch_dict['image_shape'][batch_index].cpu().numpy()
if self.dataset_cfg.get('SHIFT_COOR', None):
#print ("*******WARNING FOR SHIFT_COOR:", self.dataset_cfg.SHIFT_COOR)
pred_boxes[:, 0:3] -= self.dataset_cfg.SHIFT_COOR
# BOX FILTER
if self.dataset_cfg.get('TEST', None) and self.dataset_cfg.TEST.BOX_FILTER['FOV_FILTER']:
box_preds_lidar_center = pred_boxes[:, 0:3]
pts_rect = calib.lidar_to_rect(box_preds_lidar_center)
fov_flag = self.get_fov_flag(pts_rect, image_shape, calib, margin=5)
pred_boxes = pred_boxes[fov_flag]
pred_labels = pred_labels[fov_flag]
pred_scores = pred_scores[fov_flag]
pred_boxes_camera = box_utils.boxes3d_lidar_to_kitti_camera(pred_boxes, calib)
pred_boxes_img = box_utils.boxes3d_kitti_camera_to_imageboxes(
pred_boxes_camera, calib, image_shape=image_shape
)
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['alpha'] = -np.arctan2(-pred_boxes[:, 1], pred_boxes[:, 0]) + pred_boxes_camera[:, 6]
pred_dict['bbox'] = pred_boxes_img
pred_dict['dimensions'] = pred_boxes_camera[:, 3:6]
pred_dict['location'] = pred_boxes_camera[:, 0:3]
pred_dict['rotation_y'] = pred_boxes_camera[:, 6]
pred_dict['score'] = pred_scores
pred_dict['boxes_lidar'] = pred_boxes
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
frame_id = batch_dict['frame_id'][index]
single_pred_dict = generate_single_sample_dict(index, box_dict)
single_pred_dict['frame_id'] = frame_id
annos.append(single_pred_dict)
if output_path is not None:
cur_det_file = output_path / ('%s.txt' % frame_id)
with open(cur_det_file, 'w') as f:
bbox = single_pred_dict['bbox']
loc = single_pred_dict['location']
dims = single_pred_dict['dimensions'] # lhw -> hwl
for idx in range(len(bbox)):
print('%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f'
% (single_pred_dict['name'][idx], single_pred_dict['alpha'][idx],
bbox[idx][0], bbox[idx][1], bbox[idx][2], bbox[idx][3],
dims[idx][1], dims[idx][2], dims[idx][0], loc[idx][0],
loc[idx][1], loc[idx][2], single_pred_dict['rotation_y'][idx],
single_pred_dict['score'][idx]), file=f)
return annos
def evaluation(self, det_annos, class_names, **kwargs):
if 'annos' not in self.kitti_infos[0].keys():
return None, {}
from .kitti_object_eval_python import eval as kitti_eval
eval_det_annos = copy.deepcopy(det_annos)
eval_gt_annos = [copy.deepcopy(info['annos']) for info in self.kitti_infos]
ap_result_str, ap_dict = kitti_eval.get_official_eval_result(eval_gt_annos, eval_det_annos, class_names)
return ap_result_str, ap_dict
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.kitti_infos) * self.total_epochs
return len(self.kitti_infos)
def __getitem__(self, index):
# index = 4
if self._merge_all_iters_to_one_epoch:
index = index % len(self.kitti_infos)
info = copy.deepcopy(self.kitti_infos[index])
sample_idx = info['point_cloud']['lidar_idx']
calib = self.get_calib(sample_idx)
get_item_list = self.dataset_cfg.get('GET_ITEM_LIST', ['points'])
input_dict = {
'db_flag': "kitti",
'frame_id': sample_idx,
'calib': calib,
}
if 'annos' in info:
annos = info['annos']
annos = common_utils.drop_info_with_name(annos, name='DontCare')
loc, dims, rots = annos['location'], annos['dimensions'], annos['rotation_y']
gt_names = annos['name']
gt_boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1).astype(np.float32)
gt_boxes_lidar = box_utils.boxes3d_kitti_camera_to_lidar(gt_boxes_camera, calib)
if self.dataset_cfg.get('SHIFT_COOR', None):
gt_boxes_lidar[:, 0:3] += self.dataset_cfg.SHIFT_COOR
input_dict.update({
'gt_names': gt_names,
'gt_boxes': gt_boxes_lidar
})
if "gt_boxes2d" in get_item_list:
input_dict['gt_boxes2d'] = annos["bbox"]
if self.dataset_cfg.get('REMOVE_ORIGIN_GTS', None) and self.training:
input_dict['points'] = box_utils.remove_points_in_boxes3d(input_dict['points'], input_dict['gt_boxes'])
mask = np.zeros(gt_boxes_lidar.shape[0], dtype=np.bool_)
input_dict['gt_boxes'] = input_dict['gt_boxes'][mask]
input_dict['gt_names'] = input_dict['gt_names'][mask]
if self.dataset_cfg.get('USE_PSEUDO_LABEL', None) and self.training:
input_dict['gt_boxes'] = None
# Since we could use the Sim-KITTI, which excludes the ROAD-PLANE
if not self.dataset_cfg.get('USE_SIM_DATA', None):
road_plane = self.get_road_plane(sample_idx)
if road_plane is not None:
input_dict['road_plane'] = road_plane
# for debug only
# gt_boxes_mask = np.array([n in self.class_names for n in input_dict['gt_names']], dtype=np.bool_)
# debug_dict = {'gt_boxes': copy.deepcopy(gt_boxes_lidar[gt_boxes_mask])}
if "points" in get_item_list:
points = self.get_lidar(sample_idx)
img_shape = info['image']['image_shape']
if self.dataset_cfg.FOV_POINTS_ONLY:
pts_rect = calib.lidar_to_rect(points[:, 0:3])
fov_flag = self.get_fov_flag(pts_rect, img_shape, calib)
points = points[fov_flag]
if self.dataset_cfg.get('SHIFT_COOR', None):
points[:, 0:3] += np.array(self.dataset_cfg.SHIFT_COOR, dtype=np.float32)
input_dict['points'] = points
if "images" in get_item_list:
input_dict['images'] = self.get_image(sample_idx)
if "depth_maps" in get_item_list:
input_dict['depth_maps'] = self.get_depth_map(sample_idx)
if "calib_matricies" in get_item_list:
input_dict["trans_lidar_to_cam"], input_dict["trans_cam_to_img"] = kitti_utils.calib_to_matricies(calib)
# load saved pseudo label for unlabel data
if self.dataset_cfg.get('USE_PSEUDO_LABEL', None) and self.training:
self.fill_pseudo_labels(input_dict)
data_dict = self.prepare_data(data_dict=input_dict)
data_dict['image_shape'] = img_shape
return data_dict
def create_kitti_infos(dataset_cfg, class_names, data_path, save_path, workers=4):
dataset = KittiDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False)
train_split, val_split = 'train', 'val'
train_filename = save_path / ('kitti_infos_%s.pkl' % train_split)
val_filename = save_path / ('kitti_infos_%s.pkl' % val_split)
trainval_filename = save_path / 'kitti_infos_trainval.pkl'
test_filename = save_path / 'kitti_infos_test.pkl'
print('---------------Start to generate data infos---------------')
dataset.set_split(train_split)
kitti_infos_train = dataset.get_infos(num_workers=workers, has_label=True, count_inside_pts=True)
with open(train_filename, 'wb') as f:
pickle.dump(kitti_infos_train, f)
print('Kitti info train file is saved to %s' % train_filename)
dataset.set_split(val_split)
kitti_infos_val = dataset.get_infos(num_workers=workers, has_label=True, count_inside_pts=True)
with open(val_filename, 'wb') as f:
pickle.dump(kitti_infos_val, f)
print('Kitti info val file is saved to %s' % val_filename)
with open(trainval_filename, 'wb') as f:
pickle.dump(kitti_infos_train + kitti_infos_val, f)
print('Kitti info trainval file is saved to %s' % trainval_filename)
dataset.set_split('test')
kitti_infos_test = dataset.get_infos(num_workers=workers, has_label=False, count_inside_pts=False)
with open(test_filename, 'wb') as f:
pickle.dump(kitti_infos_test, f)
print('Kitti info test file is saved to %s' % test_filename)
print('---------------Start create groundtruth database for data augmentation---------------')
dataset.set_split(train_split)
dataset.create_groundtruth_database(train_filename, split=train_split)
print('---------------Data preparation Done---------------')
if __name__ == '__main__':
import sys
if sys.argv.__len__() > 1 and sys.argv[1] == 'create_kitti_infos':
import yaml
from pathlib import Path
from easydict import EasyDict
dataset_cfg = EasyDict(yaml.safe_load(open(sys.argv[2])))
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
create_kitti_infos(
dataset_cfg=dataset_cfg,
class_names=['Car', 'Pedestrian', 'Cyclist'],
data_path=ROOT_DIR / 'data' / 'kitti',
save_path=ROOT_DIR / 'data' / 'kitti'
)
| 24,946
| 42.766667
| 140
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/kitti/__init__.py
| 0
| 0
| 0
|
py
|
|
3DTrans
|
3DTrans-master/pcdet/datasets/kitti/kitti_eval.py
|
import pickle
import argparse
from .kitti_object_eval_python import eval as kitti_eval
import copy
import numpy as np
from . import kitti_utils
def filter_by_range(infos, gt_key, range_min=0, range_max=80, is_pred=False, dataset='kitti'):
infos = copy.deepcopy(infos)
total_objs = 0
for i, info in enumerate(infos):
if is_pred:
info.pop('truncated', None)
info.pop('occluded', None)
location = info['location']
range_distance = np.linalg.norm(location[:, [0, 2]], axis=-1)
mask = (range_distance >= range_min) & (range_distance <= range_max)
total_objs += mask.sum()
for key, val in info.items():
if isinstance(val, np.ndarray):
if key == gt_key:
info[key] = val[mask[:val.shape[0]]] # ignore the Don't Care mask
elif key in ['car_from_global', 'fov_gt_flag', 'gt_boxes_velocity', 'gt_boxes_token', 'cam_intrinsic',
'ref_from_car', 'gt_boxes', 'num_lidar_pts', 'num_radar_pts']:
continue
else:
try:
info[key] = val[mask]
except:
import ipdb; ipdb.set_trace(context=20)
return infos, total_objs
def transform_to_kitti_format(pred_infos, gt_annos, dataset, fakelidar):
if dataset == 'waymo':
map_name_to_kitti = {
'Vehicle': 'Car',
'Pedestrian': 'Pedestrian',
'Cyclist': 'Cyclist',
'Sign': 'Sign',
'Car': 'Car'
}
elif dataset in ['lyft', 'nuscenes']:
map_name_to_kitti = {
'car': 'Car',
'pedestrian': 'Pedestrian',
'truck': 'Truck',
}
else:
raise NotImplementedError
kwargs = {
'is_gt': True,
'GT_FILTER': True,
'FOV_FILTER': True,
'FOV_DEGREE': 90,
'FOV_ANGLE': 0,
'RANGE_FILTER': [0, -40, -10, 70.4, 40, 10]
}
kitti_utils.transform_annotations_to_kitti_format(pred_infos, map_name_to_kitti=map_name_to_kitti)
kitti_utils.transform_annotations_to_kitti_format(
gt_annos, map_name_to_kitti=map_name_to_kitti,
info_with_fakelidar=fakelidar, **kwargs
)
def main():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--pred_infos', type=str, default=None, help='pickle file')
parser.add_argument('--gt_infos', type=str, default=None, help='pickle file')
parser.add_argument('--class_names', type=str, nargs='+', default=['Car'], help='')
parser.add_argument('--dataset', type=str, default='kitti', help='')
parser.add_argument('--fakelidar', type=bool, default=False, help='')
args = parser.parse_args()
pred_infos = pickle.load(open(args.pred_infos, 'rb'))
gt_infos = pickle.load(open(args.gt_infos, 'rb'))
if args.dataset in ['kitti']:
gt_annos = [info['annos'] for info in gt_infos]
else:
gt_annos = gt_infos
gt_keys = {
'kitti': ['gt_boxes_lidar'],
'lyft': 'gt_boxes_lidar',
'nuscenes': 'gt_boxes_lidar'
}
# For other datasets
if args.dataset != 'kitti':
transform_to_kitti_format(pred_infos, gt_annos, args.dataset, args.fakelidar)
print('------------------Start to eval------------------------')
range_list = [[0, 1000], [0, 30], [30, 50], [50, 80]]
for cur_range in range_list:
cur_pred_info, num_pred_objs = filter_by_range(
pred_infos, gt_keys[args.dataset], range_min=cur_range[0], range_max=cur_range[1],
is_pred=True, dataset=args.dataset
)
cur_gt_annos, num_gt_objs = filter_by_range(
gt_annos, gt_keys[args.dataset], range_min=cur_range[0], range_max=cur_range[1], dataset=args.dataset
)
ap_result_str, ap_dict = kitti_eval.get_official_eval_result(
cur_gt_annos, cur_pred_info, current_classes=['Car']
)
print(f'----------Range={cur_range}, avg_pred_objs={num_pred_objs / len(pred_infos)}, '
f'avg_gt_objs={num_gt_objs / len(gt_infos)}-------------')
print(ap_result_str)
if __name__ == '__main__':
main()
| 4,252
| 34.441667
| 118
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/kitti/kitti_utils.py
|
import numpy as np
from ...utils import box_utils
def transform_annotations_to_kitti_format(annos, map_name_to_kitti=None, info_with_fakelidar=False):
"""
Args:
annos:
map_name_to_kitti: dict, map name to KITTI names (Car, Pedestrian, Cyclist)
info_with_fakelidar:
Returns:
"""
for anno in annos:
# For lyft and nuscenes, different anno key in info
if 'name' not in anno:
anno['name'] = anno['gt_names']
anno.pop('gt_names')
for k in range(anno['name'].shape[0]):
anno['name'][k] = map_name_to_kitti[anno['name'][k]]
anno['bbox'] = np.zeros((len(anno['name']), 4))
anno['bbox'][:, 2:4] = 50 # [0, 0, 50, 50]
anno['truncated'] = np.zeros(len(anno['name']))
anno['occluded'] = np.zeros(len(anno['name']))
if 'boxes_lidar' in anno:
gt_boxes_lidar = anno['boxes_lidar'].copy()
else:
gt_boxes_lidar = anno['gt_boxes_lidar'].copy()
if len(gt_boxes_lidar) > 0:
if info_with_fakelidar:
gt_boxes_lidar = box_utils.boxes3d_kitti_fakelidar_to_lidar(gt_boxes_lidar)
gt_boxes_lidar[:, 2] -= gt_boxes_lidar[:, 5] / 2
anno['location'] = np.zeros((gt_boxes_lidar.shape[0], 3))
anno['location'][:, 0] = -gt_boxes_lidar[:, 1] # x = -y_lidar
anno['location'][:, 1] = -gt_boxes_lidar[:, 2] # y = -z_lidar
anno['location'][:, 2] = gt_boxes_lidar[:, 0] # z = x_lidar
dxdydz = gt_boxes_lidar[:, 3:6]
anno['dimensions'] = dxdydz[:, [0, 2, 1]] # lwh ==> lhw
anno['rotation_y'] = -gt_boxes_lidar[:, 6] - np.pi / 2.0
anno['alpha'] = -np.arctan2(-gt_boxes_lidar[:, 1], gt_boxes_lidar[:, 0]) + anno['rotation_y']
else:
anno['location'] = anno['dimensions'] = np.zeros((0, 3))
anno['rotation_y'] = anno['alpha'] = np.zeros(0)
return annos
def calib_to_matricies(calib):
"""
Converts calibration object to transformation matricies
Args:
calib: calibration.Calibration, Calibration object
Returns
V2R: (4, 4), Lidar to rectified camera transformation matrix
P2: (3, 4), Camera projection matrix
"""
V2C = np.vstack((calib.V2C, np.array([0, 0, 0, 1], dtype=np.float32))) # (4, 4)
R0 = np.hstack((calib.R0, np.zeros((3, 1), dtype=np.float32))) # (3, 4)
R0 = np.vstack((R0, np.array([0, 0, 0, 1], dtype=np.float32))) # (4, 4)
V2R = R0 @ V2C
P2 = calib.P2
return V2R, P2
| 2,576
| 38.045455
| 105
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/kitti/kitti_semi_dataset.py
|
import copy
import pickle
from pathlib import Path
from . import kitti_utils
import io
import os
import numpy as np
from tqdm import tqdm
from ...utils import box_utils, calibration_kitti, common_utils, object3d_kitti
from ..semi_dataset import SemiDatasetTemplate
def split_kitti_semi_data(dataset_cfg, info_paths, data_splits, root_path, labeled_ratio, logger):
oss_path = dataset_cfg.OSS_PATH if 'OSS_PATH' in dataset_cfg else None
if oss_path:
from petrel_client.client import Client
client = Client('~/.petreloss.conf')
kitti_pretrain_infos = []
kitti_test_infos = []
kitti_labeled_infos = []
kitti_unlabeled_infos = []
def check_annos(info):
return 'annos' in info
if dataset_cfg.get('RANDOM_SAMPLE_ID_PATH', None):
root_path = Path(root_path)
logger.info('Loading kitti dataset')
kitti_infos = {"train":[], "test":[]}
for info_path in dataset_cfg.INFO_PATH["train"]:
if oss_path is None:
info_path = root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
kitti_infos["train"].extend(infos)
else:
info_path = os.path.join(oss_path, info_path)
#pkl_bytes = self.client.get(info_path)
pkl_bytes = client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
kitti_infos["train"].extend(infos)
for info_path in dataset_cfg.INFO_PATH["test"]:
if oss_path is None:
info_path = root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
kitti_infos["test"].extend(infos)
else:
info_path = os.path.join(oss_path, info_path)
#pkl_bytes = self.client.get(info_path)
pkl_bytes = client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
kitti_infos["test"].extend(infos)
sampled_id = np.load(dataset_cfg.RANDOM_SAMPLE_ID_PATH)
kitti_pretrain_infos = [kitti_infos["train"][i] for i in sampled_id]
kitti_labeled_infos = [kitti_infos["train"][i] for i in sampled_id]
if dataset_cfg.get('RANDOM_SAMPLE_ID_PATH_UNLABEL', None):
sampled_id_unlabel = np.load(dataset_cfg.RANDOM_SAMPLE_ID_PATH_UNLABEL)
kitti_unlabeled_infos = [kitti_infos["train"][i] for i in sampled_id_unlabel if i not in sampled_id]
else:
kitti_unlabeled_infos = [kitti_infos["train"][i] for i in range(len(kitti_infos["train"])) if i not in sampled_id]
kitti_test_infos = kitti_infos["test"]
else:
root_path = Path(root_path)
train_split = data_splits['train']
for info_path in info_paths[train_split]:
if oss_path is None:
info_path = root_path / info_path
with open(info_path, 'rb') as f:
infos = pickle.load(f)
infos = list(filter(check_annos, infos))
kitti_pretrain_infos.extend(copy.deepcopy(infos))
kitti_labeled_infos.extend(copy.deepcopy(infos))
else:
info_path = os.path.join(oss_path, info_path)
pkl_bytes = client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
# infos = list(filter(check_annos, infos))
kitti_pretrain_infos.extend(copy.deepcopy(infos))
kitti_labeled_infos.extend(copy.deepcopy(infos))
test_split = data_splits['test']
for info_path in info_paths[test_split]:
if oss_path is None:
info_path = root_path / info_path
with open(info_path, 'rb') as f:
infos = pickle.load(f)
infos = list(filter(check_annos, infos))
kitti_test_infos.extend(copy.deepcopy(infos))
else:
info_path = os.path.join(oss_path, info_path)
pkl_bytes = client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
# infos = list(filter(check_annos, infos))
kitti_test_infos.extend(copy.deepcopy(infos))
raw_split = data_splits['raw']
for info_path in info_paths[raw_split]:
if oss_path is None:
info_path = root_path / info_path
with open(info_path, 'rb') as f:
infos = pickle.load(f)
kitti_unlabeled_infos.extend(copy.deepcopy(infos))
else:
info_path = os.path.join(oss_path, info_path)
pkl_bytes = client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
kitti_unlabeled_infos.extend(copy.deepcopy(infos))
logger.info('Total samples for kitti pre-training dataset: %d' % (len(kitti_pretrain_infos)))
logger.info('Total samples for kitti testing dataset: %d' % (len(kitti_test_infos)))
logger.info('Total samples for kitti labeled dataset: %d' % (len(kitti_labeled_infos)))
logger.info('Total samples for kitti unlabeled dataset: %d' % (len(kitti_unlabeled_infos)))
return kitti_pretrain_infos, kitti_test_infos, kitti_labeled_infos, kitti_unlabeled_infos
class KittiSemiDataset(SemiDatasetTemplate):
"""Petrel Ceph storage backend.
3DTrans supports the reading and writing data from Ceph
Usage:
self.oss_path = 's3://path/of/KITTI'
'~/.petreloss.conf': A config file of Ceph, saving the KEY/ACCESS_KEY of S3 Ceph
"""
def __init__(self, dataset_cfg, class_names, infos=None, training=True, root_path=None, logger=None):
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.split = self.dataset_cfg.DATA_SPLIT[self.mode]
if self.oss_path is not None:
from petrel_client.client import Client
self.client = Client('~/.petreloss.conf')
if self.split != 'test':
self.root_split_path = os.path.join(self.oss_path, 'training')
else:
self.root_split_path = os.path.join(self.oss_path, 'testing')
else:
self.root_split_path = self.root_path / ('training' if self.split != 'test' else 'testing')
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None
self.kitti_infos = infos
def set_split(self, split):
super().__init__(
dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training, root_path=self.root_path, logger=self.logger
)
self.split = split
self.root_split_path = self.root_path / ('training' if self.split != 'test' else 'testing')
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None
def get_lidar(self, idx):
if self.oss_path is None:
lidar_file = self.root_split_path / 'velodyne' / ('%s.bin' % idx)
assert lidar_file.exists()
points = np.fromfile(str(lidar_file), dtype=np.float32).reshape(-1, 4)
else:
lidar_file = os.path.join(self.root_split_path, 'velodyne', ('%s.bin' % idx))
sdk_local_bytes = self.client.get(lidar_file, update_cache=True)
points = np.frombuffer(sdk_local_bytes, dtype=np.float32).reshape(-1, 4).copy()
return points
def get_image(self, idx):
"""
Loads image for a sample
Args:
idx: int, Sample index
Returns:
image: (H, W, 3), RGB Image
"""
img_file = self.root_split_path / 'image_2' / ('%s.png' % idx)
assert img_file.exists()
image = io.imread(img_file)
image = image.astype(np.float32)
image /= 255.0
return image
def get_image_shape(self, idx):
from skimage import io
img_file = self.root_split_path / 'image_2' / ('%s.png' % idx)
assert img_file.exists()
return np.array(io.imread(img_file).shape[:2], dtype=np.int32)
def get_label(self, idx):
label_file = self.root_split_path / 'label_2' / ('%s.txt' % idx)
assert label_file.exists()
return object3d_kitti.get_objects_from_label(label_file)
def get_depth_map(self, idx):
"""
Loads depth map for a sample
Args:
idx: str, Sample index
Returns:
depth: (H, W), Depth map
"""
depth_file = self.root_split_path / 'depth_2' / ('%s.png' % idx)
assert depth_file.exists()
depth = io.imread(depth_file)
depth = depth.astype(np.float32)
depth /= 256.0
return depth
def get_calib(self, idx):
if self.oss_path is None:
calib_file = self.root_split_path / 'calib' / ('%s.txt' % idx)
assert calib_file.exists()
calibrated_res = calibration_kitti.Calibration(calib_file, False)
else:
calib_file = os.path.join(self.root_split_path, 'calib', ('%s.txt' % idx))
text_bytes = self.client.get(calib_file, update_cache=True)
text_bytes = text_bytes.decode('utf-8')
calibrated_res = calibration_kitti.Calibration(io.StringIO(text_bytes), True)
return calibrated_res
def get_road_plane(self, idx):
if self.oss_path is None:
plane_file = self.root_split_path / 'planes' / ('%s.txt' % idx)
if not plane_file.exists():
return None
with open(plane_file, 'r') as f:
lines = f.readlines()
else:
plane_file = os.path.join(self.root_split_path, 'planes', ('%s.txt' % idx))
text_bytes = self.client.get(plane_file, update_cache=True)
text_bytes = text_bytes.decode('utf-8')
lines = io.StringIO(text_bytes).readlines()
lines = [float(i) for i in lines[3].split()]
plane = np.asarray(lines)
# Ensure normal is always facing up, this is in the rectified camera coordinate
if plane[1] > 0:
plane = -plane
norm = np.linalg.norm(plane[0:3])
plane = plane / norm
return plane
@staticmethod
def get_fov_flag(pts_rect, img_shape, calib, margin=0):
"""
Args:
pts_rect:
img_shape:
calib:
margin:
Returns:
"""
pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0 - margin, pts_img[:, 0] < img_shape[1] + margin)
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0 - margin, pts_img[:, 1] < img_shape[0] + margin)
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
return pts_valid_flag
#@staticmethod
def generate_prediction_dicts(self, batch_dict, pred_dicts, class_names, output_path=None):
"""
Args:
batch_dict:
frame_id:
pred_dicts: list of pred_dicts
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path:
Returns:
"""
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'truncated': np.zeros(num_samples),
'occluded': np.zeros(num_samples), 'alpha': np.zeros(num_samples),
'bbox': np.zeros([num_samples, 4]), 'dimensions': np.zeros([num_samples, 3]),
'location': np.zeros([num_samples, 3]), 'rotation_y': np.zeros(num_samples),
'score': np.zeros(num_samples), 'boxes_lidar': np.zeros([num_samples, 7])
}
return ret_dict
def generate_single_sample_dict(batch_index, box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
calib = batch_dict['calib'][batch_index]
image_shape = batch_dict['image_shape'][batch_index].cpu().numpy()
if self.dataset_cfg.get('SHIFT_COOR', None):
#print ("*******WARNING FOR SHIFT_COOR:", self.dataset_cfg.SHIFT_COOR)
pred_boxes[:, 0:3] -= self.dataset_cfg.SHIFT_COOR
# BOX FILTER
if self.dataset_cfg.get('TEST', None) and self.dataset_cfg.TEST.BOX_FILTER['FOV_FILTER']:
box_preds_lidar_center = pred_boxes[:, 0:3]
pts_rect = calib.lidar_to_rect(box_preds_lidar_center)
fov_flag = self.get_fov_flag(pts_rect, image_shape, calib, margin=5)
pred_boxes = pred_boxes[fov_flag]
pred_labels = pred_labels[fov_flag]
pred_scores = pred_scores[fov_flag]
pred_boxes_camera = box_utils.boxes3d_lidar_to_kitti_camera(pred_boxes, calib)
pred_boxes_img = box_utils.boxes3d_kitti_camera_to_imageboxes(
pred_boxes_camera, calib, image_shape=image_shape
)
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['alpha'] = -np.arctan2(-pred_boxes[:, 1], pred_boxes[:, 0]) + pred_boxes_camera[:, 6]
pred_dict['bbox'] = pred_boxes_img
pred_dict['dimensions'] = pred_boxes_camera[:, 3:6]
pred_dict['location'] = pred_boxes_camera[:, 0:3]
pred_dict['rotation_y'] = pred_boxes_camera[:, 6]
pred_dict['score'] = pred_scores
pred_dict['boxes_lidar'] = pred_boxes
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
frame_id = batch_dict['frame_id'][index]
single_pred_dict = generate_single_sample_dict(index, box_dict)
single_pred_dict['frame_id'] = frame_id
annos.append(single_pred_dict)
if output_path is not None:
cur_det_file = output_path / ('%s.txt' % frame_id)
with open(cur_det_file, 'w') as f:
bbox = single_pred_dict['bbox']
loc = single_pred_dict['location']
dims = single_pred_dict['dimensions'] # lhw -> hwl
for idx in range(len(bbox)):
print('%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f'
% (single_pred_dict['name'][idx], single_pred_dict['alpha'][idx],
bbox[idx][0], bbox[idx][1], bbox[idx][2], bbox[idx][3],
dims[idx][1], dims[idx][2], dims[idx][0], loc[idx][0],
loc[idx][1], loc[idx][2], single_pred_dict['rotation_y'][idx],
single_pred_dict['score'][idx]), file=f)
return annos
def evaluation(self, det_annos, class_names, **kwargs):
if 'annos' not in self.kitti_infos[0].keys():
return None, {}
from .kitti_object_eval_python import eval as kitti_eval
eval_det_annos = copy.deepcopy(det_annos)
eval_gt_annos = [copy.deepcopy(info['annos']) for info in self.kitti_infos]
ap_result_str, ap_dict = kitti_eval.get_official_eval_result(eval_gt_annos, eval_det_annos, class_names)
return ap_result_str, ap_dict
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.kitti_infos) * self.total_epochs
return len(self.kitti_infos)
def __getitem__(self, index):
# index = 4
if self._merge_all_iters_to_one_epoch:
index = index % len(self.kitti_infos)
info = copy.deepcopy(self.kitti_infos[index])
sample_idx = info['point_cloud']['lidar_idx']
calib = self.get_calib(sample_idx)
get_item_list = self.dataset_cfg.get('GET_ITEM_LIST', ['points'])
input_dict = {
'db_flag': "kitti",
'frame_id': sample_idx,
'calib': calib,
}
if 'annos' in info:
annos = info['annos']
annos = common_utils.drop_info_with_name(annos, name='DontCare')
loc, dims, rots = annos['location'], annos['dimensions'], annos['rotation_y']
gt_names = annos['name']
gt_boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1).astype(np.float32)
gt_boxes_lidar = box_utils.boxes3d_kitti_camera_to_lidar(gt_boxes_camera, calib)
if self.dataset_cfg.get('SHIFT_COOR', None):
gt_boxes_lidar[:, 0:3] += self.dataset_cfg.SHIFT_COOR
input_dict.update({
'gt_names': gt_names,
'gt_boxes': gt_boxes_lidar
})
if "gt_boxes2d" in get_item_list:
input_dict['gt_boxes2d'] = annos["bbox"]
if self.dataset_cfg.get('REMOVE_ORIGIN_GTS', None) and self.training:
input_dict['points'] = box_utils.remove_points_in_boxes3d(input_dict['points'], input_dict['gt_boxes'])
mask = np.zeros(gt_boxes_lidar.shape[0], dtype=np.bool_)
input_dict['gt_boxes'] = input_dict['gt_boxes'][mask]
input_dict['gt_names'] = input_dict['gt_names'][mask]
if self.dataset_cfg.get('USE_PSEUDO_LABEL', None) and self.training:
input_dict['gt_boxes'] = None
road_plane = self.get_road_plane(sample_idx)
if road_plane is not None:
input_dict['road_plane'] = road_plane
# for debug only
# gt_boxes_mask = np.array([n in self.class_names for n in input_dict['gt_names']], dtype=np.bool_)
# debug_dict = {'gt_boxes': copy.deepcopy(gt_boxes_lidar[gt_boxes_mask])}
if "points" in get_item_list:
points = self.get_lidar(sample_idx)
img_shape = info['image']['image_shape']
if self.dataset_cfg.FOV_POINTS_ONLY:
pts_rect = calib.lidar_to_rect(points[:, 0:3])
fov_flag = self.get_fov_flag(pts_rect, img_shape, calib)
points = points[fov_flag]
if self.dataset_cfg.get('SHIFT_COOR', None):
points[:, 0:3] += np.array(self.dataset_cfg.SHIFT_COOR, dtype=np.float32)
input_dict['points'] = points
if "images" in get_item_list:
input_dict['images'] = self.get_image(sample_idx)
if "depth_maps" in get_item_list:
input_dict['depth_maps'] = self.get_depth_map(sample_idx)
if "calib_matricies" in get_item_list:
input_dict["trans_lidar_to_cam"], input_dict["trans_cam_to_img"] = kitti_utils.calib_to_matricies(calib)
# load saved pseudo label for unlabel data
if self.dataset_cfg.get('USE_PSEUDO_LABEL', None) and self.training:
self.fill_pseudo_labels(input_dict)
data_dict = self.prepare_data(data_dict=input_dict)
data_dict['image_shape'] = img_shape
return data_dict
class KittiPretrainDataset(KittiSemiDataset):
def __init__(self, dataset_cfg, class_names, infos=None, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
assert training is True
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, infos=infos, training=training, root_path=root_path, logger=logger
)
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.kitti_infos)
info = copy.deepcopy(self.kitti_infos[index])
sample_idx = info['point_cloud']['lidar_idx']
calib = self.get_calib(sample_idx)
get_item_list = self.dataset_cfg.get('GET_ITEM_LIST', ['points'])
input_dict = {
'db_flag': "kitti",
'frame_id': sample_idx,
'calib': calib,
}
if 'annos' in info:
annos = info['annos']
annos = common_utils.drop_info_with_name(annos, name='DontCare')
loc, dims, rots = annos['location'], annos['dimensions'], annos['rotation_y']
gt_names = annos['name']
gt_boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1).astype(np.float32)
gt_boxes_lidar = box_utils.boxes3d_kitti_camera_to_lidar(gt_boxes_camera, calib)
if self.dataset_cfg.get('SHIFT_COOR', None):
gt_boxes_lidar[:, 0:3] += self.dataset_cfg.SHIFT_COOR
input_dict.update({
'gt_names': gt_names,
'gt_boxes': gt_boxes_lidar
})
if "gt_boxes2d" in get_item_list:
input_dict['gt_boxes2d'] = annos["bbox"]
if self.dataset_cfg.get('REMOVE_ORIGIN_GTS', None) and self.training:
input_dict['points'] = box_utils.remove_points_in_boxes3d(input_dict['points'], input_dict['gt_boxes'])
mask = np.zeros(gt_boxes_lidar.shape[0], dtype=np.bool_)
input_dict['gt_boxes'] = input_dict['gt_boxes'][mask]
input_dict['gt_names'] = input_dict['gt_names'][mask]
road_plane = self.get_road_plane(sample_idx)
if road_plane is not None:
input_dict['road_plane'] = road_plane
if "points" in get_item_list:
points = self.get_lidar(sample_idx)
img_shape = info['image']['image_shape']
if self.dataset_cfg.FOV_POINTS_ONLY:
pts_rect = calib.lidar_to_rect(points[:, 0:3])
fov_flag = self.get_fov_flag(pts_rect, img_shape, calib)
points = points[fov_flag]
if self.dataset_cfg.get('SHIFT_COOR', None):
points[:, 0:3] += np.array(self.dataset_cfg.SHIFT_COOR, dtype=np.float32)
input_dict['points'] = points
if "images" in get_item_list:
input_dict['images'] = self.get_image(sample_idx)
if "depth_maps" in get_item_list:
input_dict['depth_maps'] = self.get_depth_map(sample_idx)
if "calib_matricies" in get_item_list:
input_dict["trans_lidar_to_cam"], input_dict["trans_cam_to_img"] = kitti_utils.calib_to_matricies(calib)
data_dict = self.prepare_data(data_dict=input_dict)
data_dict['image_shape'] = img_shape
return data_dict
class KittiLabeledDataset(KittiSemiDataset):
def __init__(self, dataset_cfg, class_names, infos=None, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
assert training is True
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, infos=infos, training=training, root_path=root_path, logger=logger
)
self.labeled_data_for = dataset_cfg.LABELED_DATA_FOR
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.kitti_infos)
info = copy.deepcopy(self.kitti_infos[index])
sample_idx = info['point_cloud']['lidar_idx']
calib = self.get_calib(sample_idx)
get_item_list = self.dataset_cfg.get('GET_ITEM_LIST', ['points'])
input_dict = {
'db_flag': "kitti",
'frame_id': sample_idx,
'calib': calib,
}
assert 'annos' in info
annos = info['annos']
annos = common_utils.drop_info_with_name(annos, name='DontCare')
loc, dims, rots = annos['location'], annos['dimensions'], annos['rotation_y']
gt_names = annos['name']
gt_boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1).astype(np.float32)
gt_boxes_lidar = box_utils.boxes3d_kitti_camera_to_lidar(gt_boxes_camera, calib)
if self.dataset_cfg.get('SHIFT_COOR', None):
gt_boxes_lidar[:, 0:3] += self.dataset_cfg.SHIFT_COOR
input_dict.update({
'gt_names': gt_names,
'gt_boxes': gt_boxes_lidar
})
if "gt_boxes2d" in get_item_list:
input_dict['gt_boxes2d'] = annos["bbox"]
if self.dataset_cfg.get('REMOVE_ORIGIN_GTS', None) and self.training:
input_dict['points'] = box_utils.remove_points_in_boxes3d(input_dict['points'], input_dict['gt_boxes'])
mask = np.zeros(gt_boxes_lidar.shape[0], dtype=np.bool_)
input_dict['gt_boxes'] = input_dict['gt_boxes'][mask]
input_dict['gt_names'] = input_dict['gt_names'][mask]
road_plane = self.get_road_plane(sample_idx)
if road_plane is not None:
input_dict['road_plane'] = road_plane
if "points" in get_item_list:
points = self.get_lidar(sample_idx)
img_shape = info['image']['image_shape']
if self.dataset_cfg.FOV_POINTS_ONLY:
pts_rect = calib.lidar_to_rect(points[:, 0:3])
fov_flag = self.get_fov_flag(pts_rect, img_shape, calib)
points = points[fov_flag]
if self.dataset_cfg.get('SHIFT_COOR', None):
points[:, 0:3] += np.array(self.dataset_cfg.SHIFT_COOR, dtype=np.float32)
input_dict['points'] = points
if "images" in get_item_list:
input_dict['images'] = self.get_image(sample_idx)
if "depth_maps" in get_item_list:
input_dict['depth_maps'] = self.get_depth_map(sample_idx)
if "calib_matricies" in get_item_list:
input_dict["trans_lidar_to_cam"], input_dict["trans_cam_to_img"] = kitti_utils.calib_to_matricies(calib)
teacher_dict, student_dict = self.prepare_data_ssl(input_dict, output_dicts=self.labeled_data_for)
if teacher_dict is not None :
teacher_dict['image_shape'] = img_shape
if student_dict is not None:
student_dict['image_shape'] = img_shape
return tuple([teacher_dict, student_dict])
class KittiUnlabeledDataset(KittiSemiDataset):
def __init__(self, dataset_cfg, class_names, infos=None, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
assert training is True
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, infos=infos, training=training, root_path=root_path, logger=logger
)
self.unlabeled_data_for = dataset_cfg.UNLABELED_DATA_FOR
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.kitti_infos)
info = copy.deepcopy(self.kitti_infos[index])
sample_idx = info['point_cloud']['lidar_idx']
calib = self.get_calib(sample_idx)
get_item_list = self.dataset_cfg.get('GET_ITEM_LIST', ['points'])
input_dict = {
'db_flag': "kitti",
'frame_id': sample_idx,
'calib': calib,
}
if "points" in get_item_list:
points = self.get_lidar(sample_idx)
img_shape = info['image']['image_shape']
if self.dataset_cfg.FOV_POINTS_ONLY:
pts_rect = calib.lidar_to_rect(points[:, 0:3])
fov_flag = self.get_fov_flag(pts_rect, img_shape, calib)
points = points[fov_flag]
if self.dataset_cfg.get('SHIFT_COOR', None):
points[:, 0:3] += np.array(self.dataset_cfg.SHIFT_COOR, dtype=np.float32)
input_dict['points'] = points
if "images" in get_item_list:
input_dict['images'] = self.get_image(sample_idx)
if "depth_maps" in get_item_list:
input_dict['depth_maps'] = self.get_depth_map(sample_idx)
if "calib_matricies" in get_item_list:
input_dict["trans_lidar_to_cam"], input_dict["trans_cam_to_img"] = kitti_utils.calib_to_matricies(calib)
teacher_dict, student_dict = self.prepare_data_ssl(input_dict, output_dicts=self.unlabeled_data_for)
if teacher_dict is not None :
teacher_dict['image_shape'] = img_shape
if student_dict is not None:
student_dict['image_shape'] = img_shape
return tuple([teacher_dict, student_dict])
class KittiTestDataset(KittiSemiDataset):
def __init__(self, dataset_cfg, class_names, infos=None, training=False, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
assert training is False
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, infos=infos, training=training, root_path=root_path, logger=logger
)
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.kitti_infos)
info = copy.deepcopy(self.kitti_infos[index])
sample_idx = info['point_cloud']['lidar_idx']
calib = self.get_calib(sample_idx)
get_item_list = self.dataset_cfg.get('GET_ITEM_LIST', ['points'])
input_dict = {
'db_flag': "kitti",
'frame_id': sample_idx,
'calib': calib,
}
if 'annos' in info:
annos = info['annos']
annos = common_utils.drop_info_with_name(annos, name='DontCare')
loc, dims, rots = annos['location'], annos['dimensions'], annos['rotation_y']
gt_names = annos['name']
gt_boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1).astype(np.float32)
gt_boxes_lidar = box_utils.boxes3d_kitti_camera_to_lidar(gt_boxes_camera, calib)
if self.dataset_cfg.get('SHIFT_COOR', None):
gt_boxes_lidar[:, 0:3] += self.dataset_cfg.SHIFT_COOR
input_dict.update({
'gt_names': gt_names,
'gt_boxes': gt_boxes_lidar
})
if "gt_boxes2d" in get_item_list:
input_dict['gt_boxes2d'] = annos["bbox"]
if self.dataset_cfg.get('REMOVE_ORIGIN_GTS', None) and self.training:
input_dict['points'] = box_utils.remove_points_in_boxes3d(input_dict['points'], input_dict['gt_boxes'])
mask = np.zeros(gt_boxes_lidar.shape[0], dtype=np.bool_)
input_dict['gt_boxes'] = input_dict['gt_boxes'][mask]
input_dict['gt_names'] = input_dict['gt_names'][mask]
road_plane = self.get_road_plane(sample_idx)
if road_plane is not None:
input_dict['road_plane'] = road_plane
if "points" in get_item_list:
points = self.get_lidar(sample_idx)
img_shape = info['image']['image_shape']
if self.dataset_cfg.FOV_POINTS_ONLY:
pts_rect = calib.lidar_to_rect(points[:, 0:3])
fov_flag = self.get_fov_flag(pts_rect, img_shape, calib)
points = points[fov_flag]
if self.dataset_cfg.get('SHIFT_COOR', None):
points[:, 0:3] += np.array(self.dataset_cfg.SHIFT_COOR, dtype=np.float32)
input_dict['points'] = points
if "images" in get_item_list:
input_dict['images'] = self.get_image(sample_idx)
if "depth_maps" in get_item_list:
input_dict['depth_maps'] = self.get_depth_map(sample_idx)
if "calib_matricies" in get_item_list:
input_dict["trans_lidar_to_cam"], input_dict["trans_cam_to_img"] = kitti_utils.calib_to_matricies(calib)
data_dict = self.prepare_data(data_dict=input_dict)
data_dict['image_shape'] = img_shape
return data_dict
| 33,193
| 41.124365
| 140
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/kitti/kitti_object_eval_python/rotate_iou.py
|
#####################
# Based on https://github.com/hongzhenwang/RRPN-revise
# Licensed under The MIT License
# Author: yanyan, scrin@foxmail.com
#####################
import math
import numba
import numpy as np
from numba import cuda
@numba.jit(nopython=True)
def div_up(m, n):
return m // n + (m % n > 0)
@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True)
def trangle_area(a, b, c):
return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) *
(b[0] - c[0])) / 2.0
@cuda.jit('(float32[:], int32)', device=True, inline=True)
def area(int_pts, num_of_inter):
area_val = 0.0
for i in range(num_of_inter - 2):
area_val += abs(
trangle_area(int_pts[:2], int_pts[2 * i + 2:2 * i + 4],
int_pts[2 * i + 4:2 * i + 6]))
return area_val
@cuda.jit('(float32[:], int32)', device=True, inline=True)
def sort_vertex_in_convex_polygon(int_pts, num_of_inter):
if num_of_inter > 0:
center = cuda.local.array((2, ), dtype=numba.float32)
center[:] = 0.0
for i in range(num_of_inter):
center[0] += int_pts[2 * i]
center[1] += int_pts[2 * i + 1]
center[0] /= num_of_inter
center[1] /= num_of_inter
v = cuda.local.array((2, ), dtype=numba.float32)
vs = cuda.local.array((16, ), dtype=numba.float32)
for i in range(num_of_inter):
v[0] = int_pts[2 * i] - center[0]
v[1] = int_pts[2 * i + 1] - center[1]
d = math.sqrt(v[0] * v[0] + v[1] * v[1])
v[0] = v[0] / d
v[1] = v[1] / d
if v[1] < 0:
v[0] = -2 - v[0]
vs[i] = v[0]
j = 0
temp = 0
for i in range(1, num_of_inter):
if vs[i - 1] > vs[i]:
temp = vs[i]
tx = int_pts[2 * i]
ty = int_pts[2 * i + 1]
j = i
while j > 0 and vs[j - 1] > temp:
vs[j] = vs[j - 1]
int_pts[j * 2] = int_pts[j * 2 - 2]
int_pts[j * 2 + 1] = int_pts[j * 2 - 1]
j -= 1
vs[j] = temp
int_pts[j * 2] = tx
int_pts[j * 2 + 1] = ty
@cuda.jit(
'(float32[:], float32[:], int32, int32, float32[:])',
device=True,
inline=True)
def line_segment_intersection(pts1, pts2, i, j, temp_pts):
A = cuda.local.array((2, ), dtype=numba.float32)
B = cuda.local.array((2, ), dtype=numba.float32)
C = cuda.local.array((2, ), dtype=numba.float32)
D = cuda.local.array((2, ), dtype=numba.float32)
A[0] = pts1[2 * i]
A[1] = pts1[2 * i + 1]
B[0] = pts1[2 * ((i + 1) % 4)]
B[1] = pts1[2 * ((i + 1) % 4) + 1]
C[0] = pts2[2 * j]
C[1] = pts2[2 * j + 1]
D[0] = pts2[2 * ((j + 1) % 4)]
D[1] = pts2[2 * ((j + 1) % 4) + 1]
BA0 = B[0] - A[0]
BA1 = B[1] - A[1]
DA0 = D[0] - A[0]
CA0 = C[0] - A[0]
DA1 = D[1] - A[1]
CA1 = C[1] - A[1]
acd = DA1 * CA0 > CA1 * DA0
bcd = (D[1] - B[1]) * (C[0] - B[0]) > (C[1] - B[1]) * (D[0] - B[0])
if acd != bcd:
abc = CA1 * BA0 > BA1 * CA0
abd = DA1 * BA0 > BA1 * DA0
if abc != abd:
DC0 = D[0] - C[0]
DC1 = D[1] - C[1]
ABBA = A[0] * B[1] - B[0] * A[1]
CDDC = C[0] * D[1] - D[0] * C[1]
DH = BA1 * DC0 - BA0 * DC1
Dx = ABBA * DC0 - BA0 * CDDC
Dy = ABBA * DC1 - BA1 * CDDC
temp_pts[0] = Dx / DH
temp_pts[1] = Dy / DH
return True
return False
@cuda.jit(
'(float32[:], float32[:], int32, int32, float32[:])',
device=True,
inline=True)
def line_segment_intersection_v1(pts1, pts2, i, j, temp_pts):
a = cuda.local.array((2, ), dtype=numba.float32)
b = cuda.local.array((2, ), dtype=numba.float32)
c = cuda.local.array((2, ), dtype=numba.float32)
d = cuda.local.array((2, ), dtype=numba.float32)
a[0] = pts1[2 * i]
a[1] = pts1[2 * i + 1]
b[0] = pts1[2 * ((i + 1) % 4)]
b[1] = pts1[2 * ((i + 1) % 4) + 1]
c[0] = pts2[2 * j]
c[1] = pts2[2 * j + 1]
d[0] = pts2[2 * ((j + 1) % 4)]
d[1] = pts2[2 * ((j + 1) % 4) + 1]
area_abc = trangle_area(a, b, c)
area_abd = trangle_area(a, b, d)
if area_abc * area_abd >= 0:
return False
area_cda = trangle_area(c, d, a)
area_cdb = area_cda + area_abc - area_abd
if area_cda * area_cdb >= 0:
return False
t = area_cda / (area_abd - area_abc)
dx = t * (b[0] - a[0])
dy = t * (b[1] - a[1])
temp_pts[0] = a[0] + dx
temp_pts[1] = a[1] + dy
return True
@cuda.jit('(float32, float32, float32[:])', device=True, inline=True)
def point_in_quadrilateral(pt_x, pt_y, corners):
ab0 = corners[2] - corners[0]
ab1 = corners[3] - corners[1]
ad0 = corners[6] - corners[0]
ad1 = corners[7] - corners[1]
ap0 = pt_x - corners[0]
ap1 = pt_y - corners[1]
abab = ab0 * ab0 + ab1 * ab1
abap = ab0 * ap0 + ab1 * ap1
adad = ad0 * ad0 + ad1 * ad1
adap = ad0 * ap0 + ad1 * ap1
return abab >= abap and abap >= 0 and adad >= adap and adap >= 0
@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True)
def quadrilateral_intersection(pts1, pts2, int_pts):
num_of_inter = 0
for i in range(4):
if point_in_quadrilateral(pts1[2 * i], pts1[2 * i + 1], pts2):
int_pts[num_of_inter * 2] = pts1[2 * i]
int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1]
num_of_inter += 1
if point_in_quadrilateral(pts2[2 * i], pts2[2 * i + 1], pts1):
int_pts[num_of_inter * 2] = pts2[2 * i]
int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1]
num_of_inter += 1
temp_pts = cuda.local.array((2, ), dtype=numba.float32)
for i in range(4):
for j in range(4):
has_pts = line_segment_intersection(pts1, pts2, i, j, temp_pts)
if has_pts:
int_pts[num_of_inter * 2] = temp_pts[0]
int_pts[num_of_inter * 2 + 1] = temp_pts[1]
num_of_inter += 1
return num_of_inter
@cuda.jit('(float32[:], float32[:])', device=True, inline=True)
def rbbox_to_corners(corners, rbbox):
# generate clockwise corners and rotate it clockwise
angle = rbbox[4]
a_cos = math.cos(angle)
a_sin = math.sin(angle)
center_x = rbbox[0]
center_y = rbbox[1]
x_d = rbbox[2]
y_d = rbbox[3]
corners_x = cuda.local.array((4, ), dtype=numba.float32)
corners_y = cuda.local.array((4, ), dtype=numba.float32)
corners_x[0] = -x_d / 2
corners_x[1] = -x_d / 2
corners_x[2] = x_d / 2
corners_x[3] = x_d / 2
corners_y[0] = -y_d / 2
corners_y[1] = y_d / 2
corners_y[2] = y_d / 2
corners_y[3] = -y_d / 2
for i in range(4):
corners[2 *
i] = a_cos * corners_x[i] + a_sin * corners_y[i] + center_x
corners[2 * i
+ 1] = -a_sin * corners_x[i] + a_cos * corners_y[i] + center_y
@cuda.jit('(float32[:], float32[:])', device=True, inline=True)
def inter(rbbox1, rbbox2):
corners1 = cuda.local.array((8, ), dtype=numba.float32)
corners2 = cuda.local.array((8, ), dtype=numba.float32)
intersection_corners = cuda.local.array((16, ), dtype=numba.float32)
rbbox_to_corners(corners1, rbbox1)
rbbox_to_corners(corners2, rbbox2)
num_intersection = quadrilateral_intersection(corners1, corners2,
intersection_corners)
sort_vertex_in_convex_polygon(intersection_corners, num_intersection)
# print(intersection_corners.reshape([-1, 2])[:num_intersection])
return area(intersection_corners, num_intersection)
@cuda.jit('(float32[:], float32[:], int32)', device=True, inline=True)
def devRotateIoUEval(rbox1, rbox2, criterion=-1):
area1 = rbox1[2] * rbox1[3]
area2 = rbox2[2] * rbox2[3]
area_inter = inter(rbox1, rbox2)
if criterion == -1:
return area_inter / (area1 + area2 - area_inter)
elif criterion == 0:
return area_inter / area1
elif criterion == 1:
return area_inter / area2
else:
return area_inter
@cuda.jit('(int64, int64, float32[:], float32[:], float32[:], int32)', fastmath=False)
def rotate_iou_kernel_eval(N, K, dev_boxes, dev_query_boxes, dev_iou, criterion=-1):
threadsPerBlock = 8 * 8
row_start = cuda.blockIdx.x
col_start = cuda.blockIdx.y
tx = cuda.threadIdx.x
row_size = min(N - row_start * threadsPerBlock, threadsPerBlock)
col_size = min(K - col_start * threadsPerBlock, threadsPerBlock)
block_boxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32)
block_qboxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32)
dev_query_box_idx = threadsPerBlock * col_start + tx
dev_box_idx = threadsPerBlock * row_start + tx
if (tx < col_size):
block_qboxes[tx * 5 + 0] = dev_query_boxes[dev_query_box_idx * 5 + 0]
block_qboxes[tx * 5 + 1] = dev_query_boxes[dev_query_box_idx * 5 + 1]
block_qboxes[tx * 5 + 2] = dev_query_boxes[dev_query_box_idx * 5 + 2]
block_qboxes[tx * 5 + 3] = dev_query_boxes[dev_query_box_idx * 5 + 3]
block_qboxes[tx * 5 + 4] = dev_query_boxes[dev_query_box_idx * 5 + 4]
if (tx < row_size):
block_boxes[tx * 5 + 0] = dev_boxes[dev_box_idx * 5 + 0]
block_boxes[tx * 5 + 1] = dev_boxes[dev_box_idx * 5 + 1]
block_boxes[tx * 5 + 2] = dev_boxes[dev_box_idx * 5 + 2]
block_boxes[tx * 5 + 3] = dev_boxes[dev_box_idx * 5 + 3]
block_boxes[tx * 5 + 4] = dev_boxes[dev_box_idx * 5 + 4]
cuda.syncthreads()
if tx < row_size:
for i in range(col_size):
offset = row_start * threadsPerBlock * K + col_start * threadsPerBlock + tx * K + i
dev_iou[offset] = devRotateIoUEval(block_qboxes[i * 5:i * 5 + 5],
block_boxes[tx * 5:tx * 5 + 5], criterion)
def rotate_iou_gpu_eval(boxes, query_boxes, criterion=-1, device_id=0):
"""rotated box iou running in gpu. 500x faster than cpu version
(take 5ms in one example with numba.cuda code).
convert from [this project](
https://github.com/hongzhenwang/RRPN-revise/tree/master/pcdet/rotation).
Args:
boxes (float tensor: [N, 5]): rbboxes. format: centers, dims,
angles(clockwise when positive)
query_boxes (float tensor: [K, 5]): [description]
device_id (int, optional): Defaults to 0. [description]
Returns:
[type]: [description]
"""
box_dtype = boxes.dtype
boxes = boxes.astype(np.float32)
query_boxes = query_boxes.astype(np.float32)
N = boxes.shape[0]
K = query_boxes.shape[0]
iou = np.zeros((N, K), dtype=np.float32)
if N == 0 or K == 0:
return iou
threadsPerBlock = 8 * 8
cuda.select_device(device_id)
blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock))
stream = cuda.stream()
with stream.auto_synchronize():
boxes_dev = cuda.to_device(boxes.reshape([-1]), stream)
query_boxes_dev = cuda.to_device(query_boxes.reshape([-1]), stream)
iou_dev = cuda.to_device(iou.reshape([-1]), stream)
rotate_iou_kernel_eval[blockspergrid, threadsPerBlock, stream](
N, K, boxes_dev, query_boxes_dev, iou_dev, criterion)
iou_dev.copy_to_host(iou.reshape([-1]), stream=stream)
return iou.astype(boxes.dtype)
| 11,552
| 33.903323
| 95
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/kitti/kitti_object_eval_python/evaluate.py
|
import time
import fire
import .kitti_common as kitti
from .eval import get_coco_eval_result, get_official_eval_result
def _read_imageset_file(path):
with open(path, 'r') as f:
lines = f.readlines()
return [int(line) for line in lines]
def evaluate(label_path,
result_path,
label_split_file,
current_class=0,
coco=False,
score_thresh=-1):
dt_annos = kitti.get_label_annos(result_path)
if score_thresh > 0:
dt_annos = kitti.filter_annos_low_score(dt_annos, score_thresh)
val_image_ids = _read_imageset_file(label_split_file)
gt_annos = kitti.get_label_annos(label_path, val_image_ids)
if coco:
return get_coco_eval_result(gt_annos, dt_annos, current_class)
else:
return get_official_eval_result(gt_annos, dt_annos, current_class)
if __name__ == '__main__':
fire.Fire()
| 909
| 25.764706
| 74
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/kitti/kitti_object_eval_python/kitti_common.py
|
import concurrent.futures as futures
import os
import pathlib
import re
from collections import OrderedDict
import numpy as np
from skimage import io
def get_image_index_str(img_idx):
return "{:06d}".format(img_idx)
def get_kitti_info_path(idx,
prefix,
info_type='image_2',
file_tail='.png',
training=True,
relative_path=True):
img_idx_str = get_image_index_str(idx)
img_idx_str += file_tail
prefix = pathlib.Path(prefix)
if training:
file_path = pathlib.Path('training') / info_type / img_idx_str
else:
file_path = pathlib.Path('testing') / info_type / img_idx_str
if not (prefix / file_path).exists():
raise ValueError("file not exist: {}".format(file_path))
if relative_path:
return str(file_path)
else:
return str(prefix / file_path)
def get_image_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'image_2', '.png', training,
relative_path)
def get_label_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'label_2', '.txt', training,
relative_path)
def get_velodyne_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'velodyne', '.bin', training,
relative_path)
def get_calib_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'calib', '.txt', training,
relative_path)
def _extend_matrix(mat):
mat = np.concatenate([mat, np.array([[0., 0., 0., 1.]])], axis=0)
return mat
def get_kitti_image_info(path,
training=True,
label_info=True,
velodyne=False,
calib=False,
image_ids=7481,
extend_matrix=True,
num_worker=8,
relative_path=True,
with_imageshape=True):
# image_infos = []
root_path = pathlib.Path(path)
if not isinstance(image_ids, list):
image_ids = list(range(image_ids))
def map_func(idx):
image_info = {'image_idx': idx}
annotations = None
if velodyne:
image_info['velodyne_path'] = get_velodyne_path(
idx, path, training, relative_path)
image_info['img_path'] = get_image_path(idx, path, training,
relative_path)
if with_imageshape:
img_path = image_info['img_path']
if relative_path:
img_path = str(root_path / img_path)
image_info['img_shape'] = np.array(
io.imread(img_path).shape[:2], dtype=np.int32)
if label_info:
label_path = get_label_path(idx, path, training, relative_path)
if relative_path:
label_path = str(root_path / label_path)
annotations = get_label_anno(label_path)
if calib:
calib_path = get_calib_path(
idx, path, training, relative_path=False)
with open(calib_path, 'r') as f:
lines = f.readlines()
P0 = np.array(
[float(info) for info in lines[0].split(' ')[1:13]]).reshape(
[3, 4])
P1 = np.array(
[float(info) for info in lines[1].split(' ')[1:13]]).reshape(
[3, 4])
P2 = np.array(
[float(info) for info in lines[2].split(' ')[1:13]]).reshape(
[3, 4])
P3 = np.array(
[float(info) for info in lines[3].split(' ')[1:13]]).reshape(
[3, 4])
if extend_matrix:
P0 = _extend_matrix(P0)
P1 = _extend_matrix(P1)
P2 = _extend_matrix(P2)
P3 = _extend_matrix(P3)
image_info['calib/P0'] = P0
image_info['calib/P1'] = P1
image_info['calib/P2'] = P2
image_info['calib/P3'] = P3
R0_rect = np.array([
float(info) for info in lines[4].split(' ')[1:10]
]).reshape([3, 3])
if extend_matrix:
rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype)
rect_4x4[3, 3] = 1.
rect_4x4[:3, :3] = R0_rect
else:
rect_4x4 = R0_rect
image_info['calib/R0_rect'] = rect_4x4
Tr_velo_to_cam = np.array([
float(info) for info in lines[5].split(' ')[1:13]
]).reshape([3, 4])
Tr_imu_to_velo = np.array([
float(info) for info in lines[6].split(' ')[1:13]
]).reshape([3, 4])
if extend_matrix:
Tr_velo_to_cam = _extend_matrix(Tr_velo_to_cam)
Tr_imu_to_velo = _extend_matrix(Tr_imu_to_velo)
image_info['calib/Tr_velo_to_cam'] = Tr_velo_to_cam
image_info['calib/Tr_imu_to_velo'] = Tr_imu_to_velo
if annotations is not None:
image_info['annos'] = annotations
add_difficulty_to_annos(image_info)
return image_info
with futures.ThreadPoolExecutor(num_worker) as executor:
image_infos = executor.map(map_func, image_ids)
return list(image_infos)
def filter_kitti_anno(image_anno,
used_classes,
used_difficulty=None,
dontcare_iou=None):
if not isinstance(used_classes, (list, tuple)):
used_classes = [used_classes]
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, x in enumerate(image_anno['name']) if x in used_classes
]
for key in image_anno.keys():
img_filtered_annotations[key] = (
image_anno[key][relevant_annotation_indices])
if used_difficulty is not None:
relevant_annotation_indices = [
i for i, x in enumerate(img_filtered_annotations['difficulty'])
if x in used_difficulty
]
for key in image_anno.keys():
img_filtered_annotations[key] = (
img_filtered_annotations[key][relevant_annotation_indices])
if 'DontCare' in used_classes and dontcare_iou is not None:
dont_care_indices = [
i for i, x in enumerate(img_filtered_annotations['name'])
if x == 'DontCare'
]
# bounding box format [y_min, x_min, y_max, x_max]
all_boxes = img_filtered_annotations['bbox']
ious = iou(all_boxes, all_boxes[dont_care_indices])
# Remove all bounding boxes that overlap with a dontcare region.
if ious.size > 0:
boxes_to_remove = np.amax(ious, axis=1) > dontcare_iou
for key in image_anno.keys():
img_filtered_annotations[key] = (img_filtered_annotations[key][
np.logical_not(boxes_to_remove)])
return img_filtered_annotations
def filter_annos_low_score(image_annos, thresh):
new_image_annos = []
for anno in image_annos:
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, s in enumerate(anno['score']) if s >= thresh
]
for key in anno.keys():
img_filtered_annotations[key] = (
anno[key][relevant_annotation_indices])
new_image_annos.append(img_filtered_annotations)
return new_image_annos
def kitti_result_line(result_dict, precision=4):
prec_float = "{" + ":.{}f".format(precision) + "}"
res_line = []
all_field_default = OrderedDict([
('name', None),
('truncated', -1),
('occluded', -1),
('alpha', -10),
('bbox', None),
('dimensions', [-1, -1, -1]),
('location', [-1000, -1000, -1000]),
('rotation_y', -10),
('score', None),
])
res_dict = [(key, None) for key, val in all_field_default.items()]
res_dict = OrderedDict(res_dict)
for key, val in result_dict.items():
if all_field_default[key] is None and val is None:
raise ValueError("you must specify a value for {}".format(key))
res_dict[key] = val
for key, val in res_dict.items():
if key == 'name':
res_line.append(val)
elif key in ['truncated', 'alpha', 'rotation_y', 'score']:
if val is None:
res_line.append(str(all_field_default[key]))
else:
res_line.append(prec_float.format(val))
elif key == 'occluded':
if val is None:
res_line.append(str(all_field_default[key]))
else:
res_line.append('{}'.format(val))
elif key in ['bbox', 'dimensions', 'location']:
if val is None:
res_line += [str(v) for v in all_field_default[key]]
else:
res_line += [prec_float.format(v) for v in val]
else:
raise ValueError("unknown key. supported key:{}".format(
res_dict.keys()))
return ' '.join(res_line)
def add_difficulty_to_annos(info):
min_height = [40, 25,
25] # minimum height for evaluated groundtruth/detections
max_occlusion = [
0, 1, 2
] # maximum occlusion level of the groundtruth used for eval_utils
max_trunc = [
0.15, 0.3, 0.5
] # maximum truncation level of the groundtruth used for eval_utils
annos = info['annos']
dims = annos['dimensions'] # lhw format
bbox = annos['bbox']
height = bbox[:, 3] - bbox[:, 1]
occlusion = annos['occluded']
truncation = annos['truncated']
diff = []
easy_mask = np.ones((len(dims), ), dtype=np.bool)
moderate_mask = np.ones((len(dims), ), dtype=np.bool)
hard_mask = np.ones((len(dims), ), dtype=np.bool)
i = 0
for h, o, t in zip(height, occlusion, truncation):
if o > max_occlusion[0] or h <= min_height[0] or t > max_trunc[0]:
easy_mask[i] = False
if o > max_occlusion[1] or h <= min_height[1] or t > max_trunc[1]:
moderate_mask[i] = False
if o > max_occlusion[2] or h <= min_height[2] or t > max_trunc[2]:
hard_mask[i] = False
i += 1
is_easy = easy_mask
is_moderate = np.logical_xor(easy_mask, moderate_mask)
is_hard = np.logical_xor(hard_mask, moderate_mask)
for i in range(len(dims)):
if is_easy[i]:
diff.append(0)
elif is_moderate[i]:
diff.append(1)
elif is_hard[i]:
diff.append(2)
else:
diff.append(-1)
annos["difficulty"] = np.array(diff, np.int32)
return diff
def get_label_anno(label_path):
annotations = {}
annotations.update({
'name': [],
'truncated': [],
'occluded': [],
'alpha': [],
'bbox': [],
'dimensions': [],
'location': [],
'rotation_y': []
})
with open(label_path, 'r') as f:
lines = f.readlines()
# if len(lines) == 0 or len(lines[0]) < 15:
# content = []
# else:
content = [line.strip().split(' ') for line in lines]
annotations['name'] = np.array([x[0] for x in content])
annotations['truncated'] = np.array([float(x[1]) for x in content])
annotations['occluded'] = np.array([int(x[2]) for x in content])
annotations['alpha'] = np.array([float(x[3]) for x in content])
annotations['bbox'] = np.array(
[[float(info) for info in x[4:8]] for x in content]).reshape(-1, 4)
# dimensions will convert hwl format to standard lhw(camera) format.
annotations['dimensions'] = np.array(
[[float(info) for info in x[8:11]] for x in content]).reshape(
-1, 3)[:, [2, 0, 1]]
annotations['location'] = np.array(
[[float(info) for info in x[11:14]] for x in content]).reshape(-1, 3)
annotations['rotation_y'] = np.array(
[float(x[14]) for x in content]).reshape(-1)
if len(content) != 0 and len(content[0]) == 16: # have score
annotations['score'] = np.array([float(x[15]) for x in content])
else:
annotations['score'] = np.zeros([len(annotations['bbox'])])
return annotations
def get_label_annos(label_folder, image_ids=None):
if image_ids is None:
filepaths = pathlib.Path(label_folder).glob('*.txt')
prog = re.compile(r'^\d{6}.txt$')
filepaths = filter(lambda f: prog.match(f.name), filepaths)
image_ids = [int(p.stem) for p in filepaths]
image_ids = sorted(image_ids)
if not isinstance(image_ids, list):
image_ids = list(range(image_ids))
annos = []
label_folder = pathlib.Path(label_folder)
for idx in image_ids:
image_idx = get_image_index_str(idx)
label_filename = label_folder / (image_idx + '.txt')
annos.append(get_label_anno(label_filename))
return annos
def area(boxes, add1=False):
"""Computes area of boxes.
Args:
boxes: Numpy array with shape [N, 4] holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas
"""
if add1:
return (boxes[:, 2] - boxes[:, 0] + 1.0) * (
boxes[:, 3] - boxes[:, 1] + 1.0)
else:
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
def intersection(boxes1, boxes2, add1=False):
"""Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
[y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
[y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)
all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
if add1:
all_pairs_min_ymax += 1.0
intersect_heights = np.maximum(
np.zeros(all_pairs_max_ymin.shape),
all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
if add1:
all_pairs_min_xmax += 1.0
intersect_widths = np.maximum(
np.zeros(all_pairs_max_xmin.shape),
all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou(boxes1, boxes2, add1=False):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
intersect = intersection(boxes1, boxes2, add1)
area1 = area(boxes1, add1)
area2 = area(boxes2, add1)
union = np.expand_dims(
area1, axis=1) + np.expand_dims(
area2, axis=0) - intersect
return intersect / union
| 15,309
| 36.070218
| 79
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/kitti/kitti_object_eval_python/eval.py
|
import io as sysio
import numba
import numpy as np
from .rotate_iou import rotate_iou_gpu_eval
@numba.jit
def get_thresholds(scores: np.ndarray, num_gt, num_sample_pts=41):
scores.sort()
scores = scores[::-1]
current_recall = 0
thresholds = []
for i, score in enumerate(scores):
l_recall = (i + 1) / num_gt
if i < (len(scores) - 1):
r_recall = (i + 2) / num_gt
else:
r_recall = l_recall
if (((r_recall - current_recall) < (current_recall - l_recall))
and (i < (len(scores) - 1))):
continue
# recall = l_recall
thresholds.append(score)
current_recall += 1 / (num_sample_pts - 1.0)
return thresholds
def clean_data(gt_anno, dt_anno, current_class, difficulty):
CLASS_NAMES = ['car', 'pedestrian', 'cyclist', 'van', 'person_sitting', 'truck']
MIN_HEIGHT = [40, 25, 25]
MAX_OCCLUSION = [0, 1, 2]
MAX_TRUNCATION = [0.15, 0.3, 0.5]
dc_bboxes, ignored_gt, ignored_dt = [], [], []
current_cls_name = CLASS_NAMES[current_class].lower()
num_gt = len(gt_anno["name"])
num_dt = len(dt_anno["name"])
num_valid_gt = 0
for i in range(num_gt):
bbox = gt_anno["bbox"][i]
gt_name = gt_anno["name"][i].lower()
height = bbox[3] - bbox[1]
valid_class = -1
if (gt_name == current_cls_name):
valid_class = 1
elif (current_cls_name == "Pedestrian".lower()
and "Person_sitting".lower() == gt_name):
valid_class = 0
elif (current_cls_name == "Car".lower() and "Van".lower() == gt_name):
valid_class = 0
else:
valid_class = -1
ignore = False
if ((gt_anno["occluded"][i] > MAX_OCCLUSION[difficulty])
or (gt_anno["truncated"][i] > MAX_TRUNCATION[difficulty])
or (height <= MIN_HEIGHT[difficulty])):
# if gt_anno["difficulty"][i] > difficulty or gt_anno["difficulty"][i] == -1:
ignore = True
if valid_class == 1 and not ignore:
ignored_gt.append(0)
num_valid_gt += 1
elif (valid_class == 0 or (ignore and (valid_class == 1))):
ignored_gt.append(1)
else:
ignored_gt.append(-1)
# for i in range(num_gt):
if gt_anno["name"][i] == "DontCare":
dc_bboxes.append(gt_anno["bbox"][i])
for i in range(num_dt):
if (dt_anno["name"][i].lower() == current_cls_name):
valid_class = 1
else:
valid_class = -1
height = abs(dt_anno["bbox"][i, 3] - dt_anno["bbox"][i, 1])
if height < MIN_HEIGHT[difficulty]:
ignored_dt.append(1)
elif valid_class == 1:
ignored_dt.append(0)
else:
ignored_dt.append(-1)
return num_valid_gt, ignored_gt, ignored_dt, dc_bboxes
@numba.jit(nopython=True)
def image_box_overlap(boxes, query_boxes, criterion=-1):
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=boxes.dtype)
for k in range(K):
qbox_area = ((query_boxes[k, 2] - query_boxes[k, 0]) *
(query_boxes[k, 3] - query_boxes[k, 1]))
for n in range(N):
iw = (min(boxes[n, 2], query_boxes[k, 2]) -
max(boxes[n, 0], query_boxes[k, 0]))
if iw > 0:
ih = (min(boxes[n, 3], query_boxes[k, 3]) -
max(boxes[n, 1], query_boxes[k, 1]))
if ih > 0:
if criterion == -1:
ua = (
(boxes[n, 2] - boxes[n, 0]) *
(boxes[n, 3] - boxes[n, 1]) + qbox_area - iw * ih)
elif criterion == 0:
ua = ((boxes[n, 2] - boxes[n, 0]) *
(boxes[n, 3] - boxes[n, 1]))
elif criterion == 1:
ua = qbox_area
else:
ua = 1.0
overlaps[n, k] = iw * ih / ua
return overlaps
def bev_box_overlap(boxes, qboxes, criterion=-1):
riou = rotate_iou_gpu_eval(boxes, qboxes, criterion)
return riou
@numba.jit(nopython=True, parallel=True)
def d3_box_overlap_kernel(boxes, qboxes, rinc, criterion=-1):
# ONLY support overlap in CAMERA, not lider.
N, K = boxes.shape[0], qboxes.shape[0]
for i in range(N):
for j in range(K):
if rinc[i, j] > 0:
# iw = (min(boxes[i, 1] + boxes[i, 4], qboxes[j, 1] +
# qboxes[j, 4]) - max(boxes[i, 1], qboxes[j, 1]))
iw = (min(boxes[i, 1], qboxes[j, 1]) - max(
boxes[i, 1] - boxes[i, 4], qboxes[j, 1] - qboxes[j, 4]))
if iw > 0:
area1 = boxes[i, 3] * boxes[i, 4] * boxes[i, 5]
area2 = qboxes[j, 3] * qboxes[j, 4] * qboxes[j, 5]
inc = iw * rinc[i, j]
if criterion == -1:
ua = (area1 + area2 - inc)
elif criterion == 0:
ua = area1
elif criterion == 1:
ua = area2
else:
ua = inc
rinc[i, j] = inc / ua
else:
rinc[i, j] = 0.0
def d3_box_overlap(boxes, qboxes, criterion=-1):
rinc = rotate_iou_gpu_eval(boxes[:, [0, 2, 3, 5, 6]],
qboxes[:, [0, 2, 3, 5, 6]], 2)
d3_box_overlap_kernel(boxes, qboxes, rinc, criterion)
return rinc
@numba.jit(nopython=True)
def compute_statistics_jit(overlaps,
gt_datas,
dt_datas,
ignored_gt,
ignored_det,
dc_bboxes,
metric,
min_overlap,
thresh=0,
compute_fp=False,
compute_aos=False):
det_size = dt_datas.shape[0]
gt_size = gt_datas.shape[0]
dt_scores = dt_datas[:, -1]
dt_alphas = dt_datas[:, 4]
gt_alphas = gt_datas[:, 4]
dt_bboxes = dt_datas[:, :4]
gt_bboxes = gt_datas[:, :4]
assigned_detection = [False] * det_size
ignored_threshold = [False] * det_size
if compute_fp:
for i in range(det_size):
if (dt_scores[i] < thresh):
ignored_threshold[i] = True
NO_DETECTION = -10000000
tp, fp, fn, similarity = 0, 0, 0, 0
# thresholds = [0.0]
# delta = [0.0]
thresholds = np.zeros((gt_size, ))
thresh_idx = 0
delta = np.zeros((gt_size, ))
delta_idx = 0
for i in range(gt_size):
if ignored_gt[i] == -1:
continue
det_idx = -1
valid_detection = NO_DETECTION
max_overlap = 0
assigned_ignored_det = False
for j in range(det_size):
if (ignored_det[j] == -1):
continue
if (assigned_detection[j]):
continue
if (ignored_threshold[j]):
continue
overlap = overlaps[j, i]
dt_score = dt_scores[j]
if (not compute_fp and (overlap > min_overlap)
and dt_score > valid_detection):
det_idx = j
valid_detection = dt_score
elif (compute_fp and (overlap > min_overlap)
and (overlap > max_overlap or assigned_ignored_det)
and ignored_det[j] == 0):
max_overlap = overlap
det_idx = j
valid_detection = 1
assigned_ignored_det = False
elif (compute_fp and (overlap > min_overlap)
and (valid_detection == NO_DETECTION)
and ignored_det[j] == 1):
det_idx = j
valid_detection = 1
assigned_ignored_det = True
if (valid_detection == NO_DETECTION) and ignored_gt[i] == 0:
fn += 1
elif ((valid_detection != NO_DETECTION)
and (ignored_gt[i] == 1 or ignored_det[det_idx] == 1)):
assigned_detection[det_idx] = True
elif valid_detection != NO_DETECTION:
tp += 1
# thresholds.append(dt_scores[det_idx])
thresholds[thresh_idx] = dt_scores[det_idx]
thresh_idx += 1
if compute_aos:
# delta.append(gt_alphas[i] - dt_alphas[det_idx])
delta[delta_idx] = gt_alphas[i] - dt_alphas[det_idx]
delta_idx += 1
assigned_detection[det_idx] = True
if compute_fp:
for i in range(det_size):
if (not (assigned_detection[i] or ignored_det[i] == -1
or ignored_det[i] == 1 or ignored_threshold[i])):
fp += 1
nstuff = 0
if metric == 0:
overlaps_dt_dc = image_box_overlap(dt_bboxes, dc_bboxes, 0)
for i in range(dc_bboxes.shape[0]):
for j in range(det_size):
if (assigned_detection[j]):
continue
if (ignored_det[j] == -1 or ignored_det[j] == 1):
continue
if (ignored_threshold[j]):
continue
if overlaps_dt_dc[j, i] > min_overlap:
assigned_detection[j] = True
nstuff += 1
fp -= nstuff
if compute_aos:
tmp = np.zeros((fp + delta_idx, ))
# tmp = [0] * fp
for i in range(delta_idx):
tmp[i + fp] = (1.0 + np.cos(delta[i])) / 2.0
# tmp.append((1.0 + np.cos(delta[i])) / 2.0)
# assert len(tmp) == fp + tp
# assert len(delta) == tp
if tp > 0 or fp > 0:
similarity = np.sum(tmp)
else:
similarity = -1
return tp, fp, fn, similarity, thresholds[:thresh_idx]
def get_split_parts(num, num_part):
same_part = num // num_part
remain_num = num % num_part
if same_part == 0:
return [num]
if remain_num == 0:
return [same_part] * num_part
else:
return [same_part] * num_part + [remain_num]
@numba.jit(nopython=True)
def fused_compute_statistics(overlaps,
pr,
gt_nums,
dt_nums,
dc_nums,
gt_datas,
dt_datas,
dontcares,
ignored_gts,
ignored_dets,
metric,
min_overlap,
thresholds,
compute_aos=False):
gt_num = 0
dt_num = 0
dc_num = 0
for i in range(gt_nums.shape[0]):
for t, thresh in enumerate(thresholds):
overlap = overlaps[dt_num:dt_num + dt_nums[i], gt_num:
gt_num + gt_nums[i]]
gt_data = gt_datas[gt_num:gt_num + gt_nums[i]]
dt_data = dt_datas[dt_num:dt_num + dt_nums[i]]
ignored_gt = ignored_gts[gt_num:gt_num + gt_nums[i]]
ignored_det = ignored_dets[dt_num:dt_num + dt_nums[i]]
dontcare = dontcares[dc_num:dc_num + dc_nums[i]]
tp, fp, fn, similarity, _ = compute_statistics_jit(
overlap,
gt_data,
dt_data,
ignored_gt,
ignored_det,
dontcare,
metric,
min_overlap=min_overlap,
thresh=thresh,
compute_fp=True,
compute_aos=compute_aos)
pr[t, 0] += tp
pr[t, 1] += fp
pr[t, 2] += fn
if similarity != -1:
pr[t, 3] += similarity
gt_num += gt_nums[i]
dt_num += dt_nums[i]
dc_num += dc_nums[i]
def calculate_iou_partly(gt_annos, dt_annos, metric, num_parts=50):
"""fast iou algorithm. this function can be used independently to
do result analysis. Must be used in CAMERA coordinate system.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
metric: eval type. 0: bbox, 1: bev, 2: 3d
num_parts: int. a parameter for fast calculate algorithm
"""
assert len(gt_annos) == len(dt_annos)
total_dt_num = np.stack([len(a["name"]) for a in dt_annos], 0)
total_gt_num = np.stack([len(a["name"]) for a in gt_annos], 0)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
parted_overlaps = []
example_idx = 0
for num_part in split_parts:
gt_annos_part = gt_annos[example_idx:example_idx + num_part]
dt_annos_part = dt_annos[example_idx:example_idx + num_part]
if metric == 0:
gt_boxes = np.concatenate([a["bbox"] for a in gt_annos_part], 0)
dt_boxes = np.concatenate([a["bbox"] for a in dt_annos_part], 0)
overlap_part = image_box_overlap(gt_boxes, dt_boxes)
elif metric == 1:
loc = np.concatenate(
[a["location"][:, [0, 2]] for a in gt_annos_part], 0)
dims = np.concatenate(
[a["dimensions"][:, [0, 2]] for a in gt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)
gt_boxes = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1)
loc = np.concatenate(
[a["location"][:, [0, 2]] for a in dt_annos_part], 0)
dims = np.concatenate(
[a["dimensions"][:, [0, 2]] for a in dt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)
dt_boxes = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1)
overlap_part = bev_box_overlap(gt_boxes, dt_boxes).astype(
np.float64)
elif metric == 2:
loc = np.concatenate([a["location"] for a in gt_annos_part], 0)
dims = np.concatenate([a["dimensions"] for a in gt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)
gt_boxes = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1)
loc = np.concatenate([a["location"] for a in dt_annos_part], 0)
dims = np.concatenate([a["dimensions"] for a in dt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)
dt_boxes = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1)
overlap_part = d3_box_overlap(gt_boxes, dt_boxes).astype(
np.float64)
else:
raise ValueError("unknown metric")
parted_overlaps.append(overlap_part)
example_idx += num_part
overlaps = []
example_idx = 0
for j, num_part in enumerate(split_parts):
gt_annos_part = gt_annos[example_idx:example_idx + num_part]
dt_annos_part = dt_annos[example_idx:example_idx + num_part]
gt_num_idx, dt_num_idx = 0, 0
for i in range(num_part):
gt_box_num = total_gt_num[example_idx + i]
dt_box_num = total_dt_num[example_idx + i]
overlaps.append(
parted_overlaps[j][gt_num_idx:gt_num_idx + gt_box_num,
dt_num_idx:dt_num_idx + dt_box_num])
gt_num_idx += gt_box_num
dt_num_idx += dt_box_num
example_idx += num_part
return overlaps, parted_overlaps, total_gt_num, total_dt_num
def _prepare_data(gt_annos, dt_annos, current_class, difficulty):
gt_datas_list = []
dt_datas_list = []
total_dc_num = []
ignored_gts, ignored_dets, dontcares = [], [], []
total_num_valid_gt = 0
for i in range(len(gt_annos)):
rets = clean_data(gt_annos[i], dt_annos[i], current_class, difficulty)
num_valid_gt, ignored_gt, ignored_det, dc_bboxes = rets
ignored_gts.append(np.array(ignored_gt, dtype=np.int64))
ignored_dets.append(np.array(ignored_det, dtype=np.int64))
if len(dc_bboxes) == 0:
dc_bboxes = np.zeros((0, 4)).astype(np.float64)
else:
dc_bboxes = np.stack(dc_bboxes, 0).astype(np.float64)
total_dc_num.append(dc_bboxes.shape[0])
dontcares.append(dc_bboxes)
total_num_valid_gt += num_valid_gt
gt_datas = np.concatenate(
[gt_annos[i]["bbox"], gt_annos[i]["alpha"][..., np.newaxis]], 1)
dt_datas = np.concatenate([
dt_annos[i]["bbox"], dt_annos[i]["alpha"][..., np.newaxis],
dt_annos[i]["score"][..., np.newaxis]
], 1)
gt_datas_list.append(gt_datas)
dt_datas_list.append(dt_datas)
total_dc_num = np.stack(total_dc_num, axis=0)
return (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares,
total_dc_num, total_num_valid_gt)
def eval_class(gt_annos,
dt_annos,
current_classes,
difficultys,
metric,
min_overlaps,
compute_aos=False,
num_parts=100):
"""Kitti eval. support 2d/bev/3d/aos eval. support 0.5:0.05:0.95 coco AP.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
current_classes: list of int, 0: car, 1: pedestrian, 2: cyclist
difficultys: list of int. eval difficulty, 0: easy, 1: normal, 2: hard
metric: eval type. 0: bbox, 1: bev, 2: 3d
min_overlaps: float, min overlap. format: [num_overlap, metric, class].
num_parts: int. a parameter for fast calculate algorithm
Returns:
dict of recall, precision and aos
"""
assert len(gt_annos) == len(dt_annos)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
rets = calculate_iou_partly(dt_annos, gt_annos, metric, num_parts)
overlaps, parted_overlaps, total_dt_num, total_gt_num = rets
N_SAMPLE_PTS = 41
num_minoverlap = len(min_overlaps)
num_class = len(current_classes)
num_difficulty = len(difficultys)
precision = np.zeros(
[num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
recall = np.zeros(
[num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
for m, current_class in enumerate(current_classes):
for l, difficulty in enumerate(difficultys):
rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty)
(gt_datas_list, dt_datas_list, ignored_gts, ignored_dets,
dontcares, total_dc_num, total_num_valid_gt) = rets
for k, min_overlap in enumerate(min_overlaps[:, metric, m]):
thresholdss = []
for i in range(len(gt_annos)):
rets = compute_statistics_jit(
overlaps[i],
gt_datas_list[i],
dt_datas_list[i],
ignored_gts[i],
ignored_dets[i],
dontcares[i],
metric,
min_overlap=min_overlap,
thresh=0.0,
compute_fp=False)
tp, fp, fn, similarity, thresholds = rets
thresholdss += thresholds.tolist()
thresholdss = np.array(thresholdss)
thresholds = get_thresholds(thresholdss, total_num_valid_gt)
thresholds = np.array(thresholds)
pr = np.zeros([len(thresholds), 4])
idx = 0
for j, num_part in enumerate(split_parts):
gt_datas_part = np.concatenate(
gt_datas_list[idx:idx + num_part], 0)
dt_datas_part = np.concatenate(
dt_datas_list[idx:idx + num_part], 0)
dc_datas_part = np.concatenate(
dontcares[idx:idx + num_part], 0)
ignored_dets_part = np.concatenate(
ignored_dets[idx:idx + num_part], 0)
ignored_gts_part = np.concatenate(
ignored_gts[idx:idx + num_part], 0)
fused_compute_statistics(
parted_overlaps[j],
pr,
total_gt_num[idx:idx + num_part],
total_dt_num[idx:idx + num_part],
total_dc_num[idx:idx + num_part],
gt_datas_part,
dt_datas_part,
dc_datas_part,
ignored_gts_part,
ignored_dets_part,
metric,
min_overlap=min_overlap,
thresholds=thresholds,
compute_aos=compute_aos)
idx += num_part
for i in range(len(thresholds)):
recall[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2])
precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1])
if compute_aos:
aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1])
for i in range(len(thresholds)):
precision[m, l, k, i] = np.max(
precision[m, l, k, i:], axis=-1)
recall[m, l, k, i] = np.max(recall[m, l, k, i:], axis=-1)
if compute_aos:
aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1)
ret_dict = {
"recall": recall,
"precision": precision,
"orientation": aos,
}
return ret_dict
def get_mAP(prec):
sums = 0
for i in range(0, prec.shape[-1], 4):
sums = sums + prec[..., i]
return sums / 11 * 100
def get_mAP_R40(prec):
sums = 0
for i in range(1, prec.shape[-1]):
sums = sums + prec[..., i]
return sums / 40 * 100
def print_str(value, *arg, sstream=None):
if sstream is None:
sstream = sysio.StringIO()
sstream.truncate(0)
sstream.seek(0)
print(value, *arg, file=sstream)
return sstream.getvalue()
def do_eval(gt_annos,
dt_annos,
current_classes,
min_overlaps,
compute_aos=False,
PR_detail_dict=None):
# min_overlaps: [num_minoverlap, metric, num_class]
difficultys = [0, 1, 2]
ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 0,
min_overlaps, compute_aos)
# ret: [num_class, num_diff, num_minoverlap, num_sample_points]
mAP_bbox = get_mAP(ret["precision"])
mAP_bbox_R40 = get_mAP_R40(ret["precision"])
if PR_detail_dict is not None:
PR_detail_dict['bbox'] = ret['precision']
mAP_aos = mAP_aos_R40 = None
if compute_aos:
mAP_aos = get_mAP(ret["orientation"])
mAP_aos_R40 = get_mAP_R40(ret["orientation"])
if PR_detail_dict is not None:
PR_detail_dict['aos'] = ret['orientation']
ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 1,
min_overlaps)
mAP_bev = get_mAP(ret["precision"])
mAP_bev_R40 = get_mAP_R40(ret["precision"])
if PR_detail_dict is not None:
PR_detail_dict['bev'] = ret['precision']
ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 2,
min_overlaps)
mAP_3d = get_mAP(ret["precision"])
mAP_3d_R40 = get_mAP_R40(ret["precision"])
if PR_detail_dict is not None:
PR_detail_dict['3d'] = ret['precision']
return mAP_bbox, mAP_bev, mAP_3d, mAP_aos, mAP_bbox_R40, mAP_bev_R40, mAP_3d_R40, mAP_aos_R40
def do_coco_style_eval(gt_annos, dt_annos, current_classes, overlap_ranges,
compute_aos):
# overlap_ranges: [range, metric, num_class]
min_overlaps = np.zeros([10, *overlap_ranges.shape[1:]])
for i in range(overlap_ranges.shape[1]):
for j in range(overlap_ranges.shape[2]):
min_overlaps[:, i, j] = np.linspace(*overlap_ranges[:, i, j])
mAP_bbox, mAP_bev, mAP_3d, mAP_aos = do_eval(
gt_annos, dt_annos, current_classes, min_overlaps, compute_aos)
# ret: [num_class, num_diff, num_minoverlap]
mAP_bbox = mAP_bbox.mean(-1)
mAP_bev = mAP_bev.mean(-1)
mAP_3d = mAP_3d.mean(-1)
if mAP_aos is not None:
mAP_aos = mAP_aos.mean(-1)
return mAP_bbox, mAP_bev, mAP_3d, mAP_aos
def get_official_eval_result(gt_annos, dt_annos, current_classes, PR_detail_dict=None):
overlap_0_7 = np.array([[0.7, 0.5, 0.5, 0.7,
0.5, 0.7], [0.7, 0.5, 0.5, 0.7, 0.5, 0.7],
[0.7, 0.5, 0.5, 0.7, 0.5, 0.7]])
overlap_0_5 = np.array([[0.7, 0.5, 0.5, 0.7,
0.5, 0.5], [0.5, 0.25, 0.25, 0.5, 0.25, 0.5],
[0.5, 0.25, 0.25, 0.5, 0.25, 0.5]])
min_overlaps = np.stack([overlap_0_7, overlap_0_5], axis=0) # [2, 3, 5]
class_to_name = {
0: 'Car',
1: 'Pedestrian',
2: 'Cyclist',
3: 'Van',
4: 'Person_sitting',
5: 'Truck'
}
name_to_class = {v: n for n, v in class_to_name.items()}
if not isinstance(current_classes, (list, tuple)):
current_classes = [current_classes]
current_classes_int = []
for curcls in current_classes:
if isinstance(curcls, str):
current_classes_int.append(name_to_class[curcls])
else:
current_classes_int.append(curcls)
current_classes = current_classes_int
min_overlaps = min_overlaps[:, :, current_classes]
result = ''
# check whether alpha is valid
compute_aos = False
for anno in dt_annos:
if anno['alpha'].shape[0] != 0:
if anno['alpha'][0] != -10:
compute_aos = True
break
mAPbbox, mAPbev, mAP3d, mAPaos, mAPbbox_R40, mAPbev_R40, mAP3d_R40, mAPaos_R40 = do_eval(
gt_annos, dt_annos, current_classes, min_overlaps, compute_aos, PR_detail_dict=PR_detail_dict)
ret_dict = {}
for j, curcls in enumerate(current_classes):
# mAP threshold array: [num_minoverlap, metric, class]
# mAP result: [num_class, num_diff, num_minoverlap]
for i in range(min_overlaps.shape[0]):
result += print_str(
(f"{class_to_name[curcls]} "
"AP@{:.2f}, {:.2f}, {:.2f}:".format(*min_overlaps[i, :, j])))
result += print_str((f"bbox AP:{mAPbbox[j, 0, i]:.4f}, "
f"{mAPbbox[j, 1, i]:.4f}, "
f"{mAPbbox[j, 2, i]:.4f}"))
result += print_str((f"bev AP:{mAPbev[j, 0, i]:.4f}, "
f"{mAPbev[j, 1, i]:.4f}, "
f"{mAPbev[j, 2, i]:.4f}"))
result += print_str((f"3d AP:{mAP3d[j, 0, i]:.4f}, "
f"{mAP3d[j, 1, i]:.4f}, "
f"{mAP3d[j, 2, i]:.4f}"))
if compute_aos:
result += print_str((f"aos AP:{mAPaos[j, 0, i]:.2f}, "
f"{mAPaos[j, 1, i]:.2f}, "
f"{mAPaos[j, 2, i]:.2f}"))
# if i == 0:
# ret_dict['%s_aos/easy' % class_to_name[curcls]] = mAPaos[j, 0, 0]
# ret_dict['%s_aos/moderate' % class_to_name[curcls]] = mAPaos[j, 1, 0]
# ret_dict['%s_aos/hard' % class_to_name[curcls]] = mAPaos[j, 2, 0]
result += print_str(
(f"{class_to_name[curcls]} "
"AP_R40@{:.2f}, {:.2f}, {:.2f}:".format(*min_overlaps[i, :, j])))
result += print_str((f"bbox AP:{mAPbbox_R40[j, 0, i]:.4f}, "
f"{mAPbbox_R40[j, 1, i]:.4f}, "
f"{mAPbbox_R40[j, 2, i]:.4f}"))
result += print_str((f"bev AP:{mAPbev_R40[j, 0, i]:.4f}, "
f"{mAPbev_R40[j, 1, i]:.4f}, "
f"{mAPbev_R40[j, 2, i]:.4f}"))
result += print_str((f"3d AP:{mAP3d_R40[j, 0, i]:.4f}, "
f"{mAP3d_R40[j, 1, i]:.4f}, "
f"{mAP3d_R40[j, 2, i]:.4f}"))
if compute_aos:
result += print_str((f"aos AP:{mAPaos_R40[j, 0, i]:.2f}, "
f"{mAPaos_R40[j, 1, i]:.2f}, "
f"{mAPaos_R40[j, 2, i]:.2f}"))
if i == 0:
ret_dict['%s_aos/easy_R40' % class_to_name[curcls]] = mAPaos_R40[j, 0, 0]
ret_dict['%s_aos/moderate_R40' % class_to_name[curcls]] = mAPaos_R40[j, 1, 0]
ret_dict['%s_aos/hard_R40' % class_to_name[curcls]] = mAPaos_R40[j, 2, 0]
if i == 0:
# ret_dict['%s_3d/easy' % class_to_name[curcls]] = mAP3d[j, 0, 0]
# ret_dict['%s_3d/moderate' % class_to_name[curcls]] = mAP3d[j, 1, 0]
# ret_dict['%s_3d/hard' % class_to_name[curcls]] = mAP3d[j, 2, 0]
# ret_dict['%s_bev/easy' % class_to_name[curcls]] = mAPbev[j, 0, 0]
# ret_dict['%s_bev/moderate' % class_to_name[curcls]] = mAPbev[j, 1, 0]
# ret_dict['%s_bev/hard' % class_to_name[curcls]] = mAPbev[j, 2, 0]
# ret_dict['%s_image/easy' % class_to_name[curcls]] = mAPbbox[j, 0, 0]
# ret_dict['%s_image/moderate' % class_to_name[curcls]] = mAPbbox[j, 1, 0]
# ret_dict['%s_image/hard' % class_to_name[curcls]] = mAPbbox[j, 2, 0]
ret_dict['%s_3d/easy_R40' % class_to_name[curcls]] = mAP3d_R40[j, 0, 0]
ret_dict['%s_3d/moderate_R40' % class_to_name[curcls]] = mAP3d_R40[j, 1, 0]
ret_dict['%s_3d/hard_R40' % class_to_name[curcls]] = mAP3d_R40[j, 2, 0]
ret_dict['%s_bev/easy_R40' % class_to_name[curcls]] = mAPbev_R40[j, 0, 0]
ret_dict['%s_bev/moderate_R40' % class_to_name[curcls]] = mAPbev_R40[j, 1, 0]
ret_dict['%s_bev/hard_R40' % class_to_name[curcls]] = mAPbev_R40[j, 2, 0]
ret_dict['%s_image/easy_R40' % class_to_name[curcls]] = mAPbbox_R40[j, 0, 0]
ret_dict['%s_image/moderate_R40' % class_to_name[curcls]] = mAPbbox_R40[j, 1, 0]
ret_dict['%s_image/hard_R40' % class_to_name[curcls]] = mAPbbox_R40[j, 2, 0]
return result, ret_dict
def get_coco_eval_result(gt_annos, dt_annos, current_classes):
class_to_name = {
0: 'Car',
1: 'Pedestrian',
2: 'Cyclist',
3: 'Van',
4: 'Person_sitting',
}
class_to_range = {
0: [0.5, 0.95, 10],
1: [0.25, 0.7, 10],
2: [0.25, 0.7, 10],
3: [0.5, 0.95, 10],
4: [0.25, 0.7, 10],
}
name_to_class = {v: n for n, v in class_to_name.items()}
if not isinstance(current_classes, (list, tuple)):
current_classes = [current_classes]
current_classes_int = []
for curcls in current_classes:
if isinstance(curcls, str):
current_classes_int.append(name_to_class[curcls])
else:
current_classes_int.append(curcls)
current_classes = current_classes_int
overlap_ranges = np.zeros([3, 3, len(current_classes)])
for i, curcls in enumerate(current_classes):
overlap_ranges[:, :, i] = np.array(
class_to_range[curcls])[:, np.newaxis]
result = ''
# check whether alpha is valid
compute_aos = False
for anno in dt_annos:
if anno['alpha'].shape[0] != 0:
if anno['alpha'][0] != -10:
compute_aos = True
break
mAPbbox, mAPbev, mAP3d, mAPaos = do_coco_style_eval(
gt_annos, dt_annos, current_classes, overlap_ranges, compute_aos)
for j, curcls in enumerate(current_classes):
# mAP threshold array: [num_minoverlap, metric, class]
# mAP result: [num_class, num_diff, num_minoverlap]
o_range = np.array(class_to_range[curcls])[[0, 2, 1]]
o_range[1] = (o_range[2] - o_range[0]) / (o_range[1] - 1)
result += print_str((f"{class_to_name[curcls]} "
"coco AP@{:.2f}:{:.2f}:{:.2f}:".format(*o_range)))
result += print_str((f"bbox AP:{mAPbbox[j, 0]:.2f}, "
f"{mAPbbox[j, 1]:.2f}, "
f"{mAPbbox[j, 2]:.2f}"))
result += print_str((f"bev AP:{mAPbev[j, 0]:.2f}, "
f"{mAPbev[j, 1]:.2f}, "
f"{mAPbev[j, 2]:.2f}"))
result += print_str((f"3d AP:{mAP3d[j, 0]:.2f}, "
f"{mAP3d[j, 1]:.2f}, "
f"{mAP3d[j, 2]:.2f}"))
if compute_aos:
result += print_str((f"aos AP:{mAPaos[j, 0]:.2f}, "
f"{mAPaos[j, 1]:.2f}, "
f"{mAPaos[j, 2]:.2f}"))
return result
| 33,659
| 40.606922
| 102
|
py
|
3DTrans
|
3DTrans-master/pcdet/datasets/kitti/kitti_object_eval_python/__init__.py
| 0
| 0
| 0
|
py
|
|
3DTrans
|
3DTrans-master/pcdet/utils/box_utils.py
|
import numpy as np
import scipy
import torch
import copy
from scipy.spatial import Delaunay
from ..ops.roiaware_pool3d import roiaware_pool3d_utils
from . import common_utils
def in_hull(p, hull):
"""
:param p: (N, K) test points
:param hull: (M, K) M corners of a box
:return (N) bool
"""
try:
if not isinstance(hull, Delaunay):
hull = Delaunay(hull)
flag = hull.find_simplex(p) >= 0
except scipy.spatial.qhull.QhullError:
print('Warning: not a hull %s' % str(hull))
flag = np.zeros(p.shape[0], dtype=np.bool)
return flag
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
"""
boxes3d, is_numpy = common_utils.check_numpy_to_torch(boxes3d)
template = boxes3d.new_tensor((
[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1],
[1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1],
)) / 2
corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :]
corners3d = common_utils.rotate_points_along_z(corners3d.view(-1, 8, 3), boxes3d[:, 6]).view(-1, 8, 3)
corners3d += boxes3d[:, None, 0:3]
return corners3d.numpy() if is_numpy else corners3d
def mask_boxes_outside_range_numpy(boxes, limit_range, min_num_corners=1):
"""
Args:
boxes: (N, 7) [x, y, z, dx, dy, dz, heading, ...], (x, y, z) is the box center
limit_range: [minx, miny, minz, maxx, maxy, maxz]
min_num_corners:
Returns:
"""
if boxes.shape[1] > 7:
boxes = boxes[:, 0:7]
corners = boxes_to_corners_3d(boxes) # (N, 8, 3)
mask = ((corners >= limit_range[0:3]) & (corners <= limit_range[3:6])).all(axis=2)
mask = mask.sum(axis=1) >= min_num_corners # (N)
return mask
def remove_points_in_boxes3d(points, boxes3d):
"""
Args:
points: (num_points, 3 + C)
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center, each box DO NOT overlaps
Returns:
"""
boxes3d, is_numpy = common_utils.check_numpy_to_torch(boxes3d)
points, is_numpy = common_utils.check_numpy_to_torch(points)
point_masks = roiaware_pool3d_utils.points_in_boxes_cpu(points[:, 0:3], boxes3d)
points = points[point_masks.sum(dim=0) == 0]
return points.numpy() if is_numpy else points
def boxes3d_kitti_camera_to_lidar(boxes3d_camera, calib):
"""
Args:
boxes3d_camera: (N, 7) [x, y, z, l, h, w, r] in rect camera coords
calib:
Returns:
boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
"""
boxes3d_camera_copy = copy.deepcopy(boxes3d_camera)
xyz_camera, r = boxes3d_camera_copy[:, 0:3], boxes3d_camera_copy[:, 6:7]
l, h, w = boxes3d_camera_copy[:, 3:4], boxes3d_camera_copy[:, 4:5], boxes3d_camera_copy[:, 5:6]
xyz_lidar = calib.rect_to_lidar(xyz_camera)
xyz_lidar[:, 2] += h[:, 0] / 2
return np.concatenate([xyz_lidar, l, w, h, -(r + np.pi / 2)], axis=-1)
def boxes3d_kitti_fakelidar_to_lidar(boxes3d_lidar):
"""
Args:
boxes3d_fakelidar: (N, 7) [x, y, z, w, l, h, r] in old LiDAR coordinates, z is bottom center
Returns:
boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
"""
boxes3d_lidar_copy = copy.deepcopy(boxes3d_lidar)
w, l, h = boxes3d_lidar_copy[:, 3:4], boxes3d_lidar_copy[:, 4:5], boxes3d_lidar_copy[:, 5:6]
r = boxes3d_lidar_copy[:, 6:7]
boxes3d_lidar_copy[:, 2] += h[:, 0] / 2
return np.concatenate([boxes3d_lidar_copy[:, 0:3], l, w, h, -(r + np.pi / 2)], axis=-1)
def boxes3d_kitti_lidar_to_fakelidar(boxes3d_lidar):
"""
Args:
boxes3d_lidar: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
boxes3d_fakelidar: [x, y, z, w, l, h, r] in old LiDAR coordinates, z is bottom center
"""
boxes3d_lidar_copy = copy.deepcopy(boxes3d_lidar)
dx, dy, dz = boxes3d_lidar_copy[:, 3:4], boxes3d_lidar_copy[:, 4:5], boxes3d_lidar_copy[:, 5:6]
heading = boxes3d_lidar_copy[:, 6:7]
boxes3d_lidar_copy[:, 2] -= dz[:, 0] / 2
return np.concatenate([boxes3d_lidar_copy[:, 0:3], dy, dx, dz, -heading - np.pi / 2], axis=-1)
def enlarge_box3d(boxes3d, extra_width=(0, 0, 0)):
"""
Args:
boxes3d: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
extra_width: [extra_x, extra_y, extra_z]
Returns:
"""
boxes3d, is_numpy = common_utils.check_numpy_to_torch(boxes3d)
large_boxes3d = boxes3d.clone()
large_boxes3d[:, 3:6] += boxes3d.new_tensor(extra_width)[None, :]
return large_boxes3d
def boxes3d_lidar_to_kitti_camera(boxes3d_lidar, calib):
"""
:param boxes3d_lidar: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
:param calib:
:return:
boxes3d_camera: (N, 7) [x, y, z, l, h, w, r] in rect camera coords
"""
boxes3d_lidar_copy = copy.deepcopy(boxes3d_lidar)
xyz_lidar = boxes3d_lidar_copy[:, 0:3]
l, w, h = boxes3d_lidar_copy[:, 3:4], boxes3d_lidar_copy[:, 4:5], boxes3d_lidar_copy[:, 5:6]
r = boxes3d_lidar_copy[:, 6:7]
xyz_lidar[:, 2] -= h.reshape(-1) / 2
xyz_cam = calib.lidar_to_rect(xyz_lidar)
# xyz_cam[:, 1] += h.reshape(-1) / 2
r = -r - np.pi / 2
return np.concatenate([xyz_cam, l, h, w, r], axis=-1)
def boxes3d_to_corners3d_kitti_camera(boxes3d, bottom_center=True):
"""
:param boxes3d: (N, 7) [x, y, z, l, h, w, ry] in camera coords, see the definition of ry in KITTI dataset
:param bottom_center: whether y is on the bottom center of object
:return: corners3d: (N, 8, 3)
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
"""
boxes_num = boxes3d.shape[0]
l, h, w = boxes3d[:, 3], boxes3d[:, 4], boxes3d[:, 5]
x_corners = np.array([l / 2., l / 2., -l / 2., -l / 2., l / 2., l / 2., -l / 2., -l / 2], dtype=np.float32).T
z_corners = np.array([w / 2., -w / 2., -w / 2., w / 2., w / 2., -w / 2., -w / 2., w / 2.], dtype=np.float32).T
if bottom_center:
y_corners = np.zeros((boxes_num, 8), dtype=np.float32)
y_corners[:, 4:8] = -h.reshape(boxes_num, 1).repeat(4, axis=1) # (N, 8)
else:
y_corners = np.array([h / 2., h / 2., h / 2., h / 2., -h / 2., -h / 2., -h / 2., -h / 2.], dtype=np.float32).T
ry = boxes3d[:, 6]
zeros, ones = np.zeros(ry.size, dtype=np.float32), np.ones(ry.size, dtype=np.float32)
rot_list = np.array([[np.cos(ry), zeros, -np.sin(ry)],
[zeros, ones, zeros],
[np.sin(ry), zeros, np.cos(ry)]]) # (3, 3, N)
R_list = np.transpose(rot_list, (2, 0, 1)) # (N, 3, 3)
temp_corners = np.concatenate((x_corners.reshape(-1, 8, 1), y_corners.reshape(-1, 8, 1),
z_corners.reshape(-1, 8, 1)), axis=2) # (N, 8, 3)
rotated_corners = np.matmul(temp_corners, R_list) # (N, 8, 3)
x_corners, y_corners, z_corners = rotated_corners[:, :, 0], rotated_corners[:, :, 1], rotated_corners[:, :, 2]
x_loc, y_loc, z_loc = boxes3d[:, 0], boxes3d[:, 1], boxes3d[:, 2]
x = x_loc.reshape(-1, 1) + x_corners.reshape(-1, 8)
y = y_loc.reshape(-1, 1) + y_corners.reshape(-1, 8)
z = z_loc.reshape(-1, 1) + z_corners.reshape(-1, 8)
corners = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1), z.reshape(-1, 8, 1)), axis=2)
return corners.astype(np.float32)
def boxes3d_kitti_camera_to_imageboxes(boxes3d, calib, image_shape=None):
"""
:param boxes3d: (N, 7) [x, y, z, l, h, w, r] in rect camera coords
:param calib:
:return:
box_2d_preds: (N, 4) [x1, y1, x2, y2]
"""
corners3d = boxes3d_to_corners3d_kitti_camera(boxes3d)
pts_img, _ = calib.rect_to_img(corners3d.reshape(-1, 3))
corners_in_image = pts_img.reshape(-1, 8, 2)
min_uv = np.min(corners_in_image, axis=1) # (N, 2)
max_uv = np.max(corners_in_image, axis=1) # (N, 2)
boxes2d_image = np.concatenate([min_uv, max_uv], axis=1)
if image_shape is not None:
boxes2d_image[:, 0] = np.clip(boxes2d_image[:, 0], a_min=0, a_max=image_shape[1] - 1)
boxes2d_image[:, 1] = np.clip(boxes2d_image[:, 1], a_min=0, a_max=image_shape[0] - 1)
boxes2d_image[:, 2] = np.clip(boxes2d_image[:, 2], a_min=0, a_max=image_shape[1] - 1)
boxes2d_image[:, 3] = np.clip(boxes2d_image[:, 3], a_min=0, a_max=image_shape[0] - 1)
return boxes2d_image
def boxes_iou_normal(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 4) [x1, y1, x2, y2]
boxes_b: (M, 4) [x1, y1, x2, y2]
Returns:
"""
assert boxes_a.shape[1] == boxes_b.shape[1] == 4
x_min = torch.max(boxes_a[:, 0, None], boxes_b[None, :, 0])
x_max = torch.min(boxes_a[:, 2, None], boxes_b[None, :, 2])
y_min = torch.max(boxes_a[:, 1, None], boxes_b[None, :, 1])
y_max = torch.min(boxes_a[:, 3, None], boxes_b[None, :, 3])
x_len = torch.clamp_min(x_max - x_min, min=0)
y_len = torch.clamp_min(y_max - y_min, min=0)
area_a = (boxes_a[:, 2] - boxes_a[:, 0]) * (boxes_a[:, 3] - boxes_a[:, 1])
area_b = (boxes_b[:, 2] - boxes_b[:, 0]) * (boxes_b[:, 3] - boxes_b[:, 1])
a_intersect_b = x_len * y_len
iou = a_intersect_b / torch.clamp_min(area_a[:, None] + area_b[None, :] - a_intersect_b, min=1e-6)
return iou
def boxes3d_lidar_to_aligned_bev_boxes(boxes3d):
"""
Args:
boxes3d: (N, 7 + C) [x, y, z, dx, dy, dz, heading] in lidar coordinate
Returns:
aligned_bev_boxes: (N, 4) [x1, y1, x2, y2] in the above lidar coordinate
"""
rot_angle = common_utils.limit_period(boxes3d[:, 6], offset=0.5, period=np.pi).abs()
choose_dims = torch.where(rot_angle[:, None] < np.pi / 4, boxes3d[:, [3, 4]], boxes3d[:, [4, 3]])
aligned_bev_boxes = torch.cat((boxes3d[:, 0:2] - choose_dims / 2, boxes3d[:, 0:2] + choose_dims / 2), dim=1)
return aligned_bev_boxes
def boxes3d_nearest_bev_iou(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
Returns:
"""
boxes_bev_a = boxes3d_lidar_to_aligned_bev_boxes(boxes_a)
boxes_bev_b = boxes3d_lidar_to_aligned_bev_boxes(boxes_b)
return boxes_iou_normal(boxes_bev_a, boxes_bev_b)
def transform_boxes3d(boxes, pose):
"""
Args:
boxes: N * 9 x,y,z,dx,dy,dz,heading,vx,vy
pose:
Returns:
"""
center = boxes[:, :3]
center = np.concatenate([center, np.ones((center.shape[0], 1))], axis=-1)
center = center @ pose.T
heading = boxes[:, [6]] + np.arctan2(pose[1, 0], pose[0, 0])
velocity = boxes[:, 7:]
velocity = np.concatenate([velocity, np.zeros((velocity.shape[0], 1))], axis=-1)
velocity = velocity @ pose[:3, :3].T
return np.concatenate([center[:, :3], boxes[:, 3:6], heading, velocity[:, :2]], axis=-1)
| 11,158
| 33.981191
| 118
|
py
|
3DTrans
|
3DTrans-master/pcdet/utils/loss_utils.py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(SigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedClassificationLoss(nn.Module):
def __init__(self):
super(WeightedClassificationLoss, self).__init__()
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights=None, reduction='none'):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
if weights is not None:
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == bce_loss.shape.__len__()
loss = weights * bce_loss
else:
loss = bce_loss
if reduction == 'none':
return loss
elif reduction == 'sum':
loss = loss.sum(dim=-1)
elif reduction == 'mean':
loss = loss.mean(dim=-1)
return loss
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedBinaryCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedBinaryCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
loss = F.binary_cross_entropy_with_logits(input, target, reduction='none').mean(dim=-1) * weights
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
class WeightedCrossEntropyLoss_ver1(nn.Module):
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor, reduction='none'):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
loss = self.sigmoid_cross_entropy_with_logits(input, target)
if reduction == 'none':
return loss
elif reduction == 'sum':
loss = loss.sum(dim=-1)
elif reduction == 'mean':
loss = loss.mean(dim=-1)
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
def compute_fg_mask(gt_boxes2d, shape, downsample_factor=1, device=torch.device("cpu")):
"""
Compute foreground mask for images
Args:
gt_boxes2d: (B, N, 4), 2D box labels
shape: torch.Size or tuple, Foreground mask desired shape
downsample_factor: int, Downsample factor for image
device: torch.device, Foreground mask desired device
Returns:
fg_mask (shape), Foreground mask
"""
fg_mask = torch.zeros(shape, dtype=torch.bool, device=device)
# Set box corners
gt_boxes2d /= downsample_factor
gt_boxes2d[:, :, :2] = torch.floor(gt_boxes2d[:, :, :2])
gt_boxes2d[:, :, 2:] = torch.ceil(gt_boxes2d[:, :, 2:])
gt_boxes2d = gt_boxes2d.long()
# Set all values within each box to True
B, N = gt_boxes2d.shape[:2]
for b in range(B):
for n in range(N):
u1, v1, u2, v2 = gt_boxes2d[b, n]
fg_mask[b, v1:v2, u1:u2] = True
return fg_mask
def neg_loss_cornernet(pred, gt, mask=None):
"""
Refer to https://github.com/tianweiy/CenterPoint.
Modified focal loss. Exactly the same as CornerNet. Runs faster and costs a little bit more memory
Args:
pred: (batch x c x h x w)
gt: (batch x c x h x w)
mask: (batch x h x w)
Returns:
"""
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
if mask is not None:
mask = mask[:, None, :, :].float()
pos_loss = pos_loss * mask
neg_loss = neg_loss * mask
num_pos = (pos_inds.float() * mask).sum()
else:
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
class FocalLossCenterNet(nn.Module):
"""
Refer to https://github.com/tianweiy/CenterPoint
"""
def __init__(self):
super(FocalLossCenterNet, self).__init__()
self.neg_loss = neg_loss_cornernet
def forward(self, out, target, mask=None):
return self.neg_loss(out, target, mask=mask)
def _reg_loss(regr, gt_regr, mask):
"""
Refer to https://github.com/tianweiy/CenterPoint
L1 regression loss
Args:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
Returns:
"""
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
isnotnan = (~ torch.isnan(gt_regr)).float()
mask *= isnotnan
regr = regr * mask
gt_regr = gt_regr * mask
loss = torch.abs(regr - gt_regr)
loss = loss.transpose(2, 0)
loss = torch.sum(loss, dim=2)
loss = torch.sum(loss, dim=1)
# else:
# # D x M x B
# loss = loss.reshape(loss.shape[0], -1)
# loss = loss / (num + 1e-4)
loss = loss / torch.clamp_min(num, min=1.0)
# import pdb; pdb.set_trace()
return loss
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = _gather_feat(feat, ind)
return feat
class RegLossCenterNet(nn.Module):
"""
Refer to https://github.com/tianweiy/CenterPoint
"""
def __init__(self):
super(RegLossCenterNet, self).__init__()
def forward(self, output, mask, ind=None, target=None):
"""
Args:
output: (batch x dim x h x w) or (batch x max_objects)
mask: (batch x max_objects)
ind: (batch x max_objects)
target: (batch x max_objects x dim)
Returns:
"""
if ind is None:
pred = output
else:
pred = _transpose_and_gather_feat(output, ind)
loss = _reg_loss(pred, target, mask)
return loss
class FastFocalLoss(nn.Module):
'''
Reimplemented focal loss, exactly the same as the CornerNet version.
Faster and costs much less memory.
'''
def __init__(self, alpha, beta):
super(FastFocalLoss, self).__init__()
self.alpha = alpha
self.beta = beta
def _gather_feat(self, feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.reshape(-1, dim)
return feat
def forward(self, out, target, ind, mask, cat):
'''
Arguments:
out, target: B x C x H x W
ind, mask: B x M
cat (category id for peaks): B x M
'''
mask = mask.float()
gt = torch.pow(1 - target, self.beta)
neg_loss = torch.log(1 - out + 1e-30) * torch.pow(out, self.alpha) * gt
neg_loss = neg_loss.sum()
out = out.reshape(out.size(0), -1, out.size(3))
pos_pred_pix = self._gather_feat(out, ind) # B x M x C
pos_pred = pos_pred_pix.gather(2, cat.unsqueeze(2)) # B x M
num_pos = mask.sum()
pos_loss = torch.log(pos_pred + 1e-30) * torch.pow(1 - pos_pred, self.alpha) * \
mask.unsqueeze(2)
pos_loss = pos_loss.sum()
if num_pos == 0:
return - neg_loss
return - (pos_loss + neg_loss) / num_pos
class BCELoss(nn.Module):
def __init__(self):
super(BCELoss, self).__init__()
def forward(self, pred, target, weights=None):
pred = torch.sigmoid(pred)
loss = -target * torch.log(pred) - (1 - target) * torch.log(1 - pred)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class RegLoss(nn.Module):
'''Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
'''
def __init__(self, code_weights: list = None):
super(RegLoss, self).__init__()
def _gather_feat(self, feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.reshape(-1, dim)
return feat
def forward(self, out, target, ind, mask):
mask = mask.float().unsqueeze(2)
out = out.reshape(out.size(0), -1, out.size(3))
pred = self._gather_feat(out, ind) # B x M x C
loss = F.l1_loss(pred * mask, target * mask, reduction='none')
loss = loss / (mask.sum() + 1e-4)
loss = loss.transpose(2, 0).sum(dim=2).sum(dim=1)
return loss
| 20,744
| 32.622366
| 107
|
py
|
3DTrans
|
3DTrans-master/pcdet/utils/box_coder_utils.py
|
import numpy as np
import torch
class ResidualCoder(object):
def __init__(self, code_size=7, encode_angle_by_sincos=False, **kwargs):
super().__init__()
self.code_size = code_size
self.encode_angle_by_sincos = encode_angle_by_sincos
if self.encode_angle_by_sincos:
self.code_size += 1
def encode_torch(self, boxes, anchors):
"""
Args:
boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
anchors: (N, 7 + C) [x, y, z, dx, dy, dz, heading or *[cos, sin], ...]
Returns:
"""
anchors[:, 3:6] = torch.clamp_min(anchors[:, 3:6], min=1e-5)
boxes[:, 3:6] = torch.clamp_min(boxes[:, 3:6], min=1e-5)
xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
xg, yg, zg, dxg, dyg, dzg, rg, *cgs = torch.split(boxes, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / dza
dxt = torch.log(dxg / dxa)
dyt = torch.log(dyg / dya)
dzt = torch.log(dzg / dza)
if self.encode_angle_by_sincos:
rt_cos = torch.cos(rg) - torch.cos(ra)
rt_sin = torch.sin(rg) - torch.sin(ra)
rts = [rt_cos, rt_sin]
else:
rts = [rg - ra]
cts = [g - a for g, a in zip(cgs, cas)]
return torch.cat([xt, yt, zt, dxt, dyt, dzt, *rts, *cts], dim=-1)
def decode_torch(self, box_encodings, anchors):
"""
Args:
box_encodings: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading or *[cos, sin], ...]
anchors: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
Returns:
"""
xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
if not self.encode_angle_by_sincos:
xt, yt, zt, dxt, dyt, dzt, rt, *cts = torch.split(box_encodings, 1, dim=-1)
else:
xt, yt, zt, dxt, dyt, dzt, cost, sint, *cts = torch.split(box_encodings, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = torch.exp(dxt) * dxa
dyg = torch.exp(dyt) * dya
dzg = torch.exp(dzt) * dza
if self.encode_angle_by_sincos:
rg_cos = cost + torch.cos(ra)
rg_sin = sint + torch.sin(ra)
rg = torch.atan2(rg_sin, rg_cos)
else:
rg = rt + ra
cgs = [t + a for t, a in zip(cts, cas)]
return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)
class PreviousResidualDecoder(object):
def __init__(self, code_size=7, **kwargs):
super().__init__()
self.code_size = code_size
@staticmethod
def decode_torch(box_encodings, anchors):
"""
Args:
box_encodings: (B, N, 7 + ?) x, y, z, w, l, h, r, custom values
anchors: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
Returns:
"""
xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
xt, yt, zt, wt, lt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = torch.exp(lt) * dxa
dyg = torch.exp(wt) * dya
dzg = torch.exp(ht) * dza
rg = rt + ra
cgs = [t + a for t, a in zip(cts, cas)]
return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)
class PreviousResidualRoIDecoder(object):
def __init__(self, code_size=7, **kwargs):
super().__init__()
self.code_size = code_size
@staticmethod
def decode_torch(box_encodings, anchors):
"""
Args:
box_encodings: (B, N, 7 + ?) x, y, z, w, l, h, r, custom values
anchors: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
Returns:
"""
xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
xt, yt, zt, wt, lt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = torch.exp(lt) * dxa
dyg = torch.exp(wt) * dya
dzg = torch.exp(ht) * dza
rg = ra - rt
cgs = [t + a for t, a in zip(cts, cas)]
return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)
class PointResidualCoder(object):
def __init__(self, code_size=8, use_mean_size=True, **kwargs):
super().__init__()
self.code_size = code_size
self.use_mean_size = use_mean_size
if self.use_mean_size:
self.mean_size = torch.from_numpy(np.array(kwargs['mean_size'])).cuda().float()
assert self.mean_size.min() > 0
def encode_torch(self, gt_boxes, points, gt_classes=None):
"""
Args:
gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
points: (N, 3) [x, y, z]
gt_classes: (N) [1, num_classes]
Returns:
box_coding: (N, 8 + C)
"""
gt_boxes[:, 3:6] = torch.clamp_min(gt_boxes[:, 3:6], min=1e-5)
xg, yg, zg, dxg, dyg, dzg, rg, *cgs = torch.split(gt_boxes, 1, dim=-1)
xa, ya, za = torch.split(points, 1, dim=-1)
if self.use_mean_size:
assert gt_classes.max() <= self.mean_size.shape[0]
point_anchor_size = self.mean_size[gt_classes - 1]
dxa, dya, dza = torch.split(point_anchor_size, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / dza
dxt = torch.log(dxg / dxa)
dyt = torch.log(dyg / dya)
dzt = torch.log(dzg / dza)
else:
xt = (xg - xa)
yt = (yg - ya)
zt = (zg - za)
dxt = torch.log(dxg)
dyt = torch.log(dyg)
dzt = torch.log(dzg)
cts = [g for g in cgs]
return torch.cat([xt, yt, zt, dxt, dyt, dzt, torch.cos(rg), torch.sin(rg), *cts], dim=-1)
def decode_torch(self, box_encodings, points, pred_classes=None):
"""
Args:
box_encodings: (N, 8 + C) [x, y, z, dx, dy, dz, cos, sin, ...]
points: [x, y, z]
pred_classes: (N) [1, num_classes]
Returns:
"""
xt, yt, zt, dxt, dyt, dzt, cost, sint, *cts = torch.split(box_encodings, 1, dim=-1)
xa, ya, za = torch.split(points, 1, dim=-1)
if self.use_mean_size:
assert pred_classes.max() <= self.mean_size.shape[0]
point_anchor_size = self.mean_size[pred_classes - 1]
dxa, dya, dza = torch.split(point_anchor_size, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = torch.exp(dxt) * dxa
dyg = torch.exp(dyt) * dya
dzg = torch.exp(dzt) * dza
else:
xg = xt + xa
yg = yt + ya
zg = zt + za
dxg, dyg, dzg = torch.split(torch.exp(box_encodings[..., 3:6]), 1, dim=-1)
rg = torch.atan2(sint, cost)
cgs = [t for t in cts]
return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)
class PointResidual_BinOri_Coder(object):
def __init__(self, code_size=8, use_mean_size=True, **kwargs):
super().__init__()
self.bin_size = kwargs.get('bin_size', 12)
# self.bin_size = 12
self.code_size = 6 + 2 * self.bin_size
self.bin_inter = 2 * np.pi / self.bin_size
self.use_mean_size = use_mean_size
if self.use_mean_size:
self.mean_size = torch.from_numpy(np.array(kwargs['mean_size'])).cuda().float()
assert self.mean_size.min() > 0
def encode_torch(self, gt_boxes, points, gt_classes=None):
"""
Args:
gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
points: (N, 3) [x, y, z]
gt_classes: (N) [1, num_classes]
Returns:
box_coding: (N, 8 + C)
"""
gt_boxes[:, 3:6] = torch.clamp_min(gt_boxes[:, 3:6], min=1e-5)
xg, yg, zg, dxg, dyg, dzg, rg, *cgs = torch.split(gt_boxes, 1, dim=-1)
xa, ya, za = torch.split(points, 1, dim=-1)
if self.use_mean_size:
assert gt_classes.max() <= self.mean_size.shape[0]
point_anchor_size = self.mean_size[gt_classes - 1]
# gt_classes.unique()
dxa, dya, dza = torch.split(point_anchor_size, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / dza
dxt = torch.log(dxg / dxa)
dyt = torch.log(dyg / dya)
dzt = torch.log(dzg / dza)
else:
xt = (xg - xa)
yt = (yg - ya)
zt = (zg - za)
dxt = torch.log(dxg)
dyt = torch.log(dyg)
dzt = torch.log(dzg)
rg = torch.clamp(rg, max=np.pi - 1e-5, min=-np.pi + 1e-5) #################
bin_id = torch.floor((rg + np.pi) / self.bin_inter)
# if bin_id.max() >= self.bin_size:
# a = 1
bin_res = ((rg + np.pi) - (bin_id * self.bin_inter + self.bin_inter / 2)) / (self.bin_inter / 2) # norm to [-1, 1]
cts = [g for g in cgs]
return torch.cat([xt, yt, zt, dxt, dyt, dzt, bin_id, bin_res, *cts], dim=-1)
def decode_torch(self, box_encodings, points, pred_classes=None):
"""
Args:
box_encodings: (N, 8 + C) [x, y, z, dx, dy, dz, bin_id, bin_res , ...]
points: [x, y, z]
pred_classes: (N) [1, num_classes]
Returns:
"""
xt, yt, zt, dxt, dyt, dzt = torch.split(box_encodings[..., :6], 1, dim=-1)
xa, ya, za = torch.split(points, 1, dim=-1)
if self.use_mean_size:
assert pred_classes.max() <= self.mean_size.shape[0]
point_anchor_size = self.mean_size[pred_classes - 1]
dxa, dya, dza = torch.split(point_anchor_size, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = torch.exp(dxt) * dxa
dyg = torch.exp(dyt) * dya
dzg = torch.exp(dzt) * dza
else:
xg = xt + xa
yg = yt + ya
zg = zt + za
dxg, dyg, dzg = torch.split(torch.exp(box_encodings[..., 3:6]), 1, dim=-1)
bin_id = box_encodings[..., 6:6+self.bin_size]
bin_res = box_encodings[..., 6+self.bin_size:]
_, bin_id = torch.max(bin_id, dim=-1)
bin_id_one_hot = torch.nn.functional.one_hot(bin_id.long(), self.bin_size)
bin_res = torch.sum(bin_res * bin_id_one_hot.float(), dim=-1)
rg = bin_id.float() * self.bin_inter - np.pi + self.bin_inter / 2
rg = rg + bin_res * (self.bin_inter / 2)
rg = rg.unsqueeze(-1)
return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg], dim=-1)
class PointBinResidualCoder(object):
def __init__(self, code_size=30, use_mean_size=True, angle_bin_num=12, pred_velo=False, **kwargs):
super().__init__()
self.code_size = 6 + 2 * angle_bin_num
self.angle_bin_num = angle_bin_num
self.pred_velo = pred_velo
if pred_velo:
self.code_size += 2
self.use_mean_size = use_mean_size
if self.use_mean_size:
self.mean_size = torch.from_numpy(np.array(kwargs['mean_size'])).cuda().float()
assert self.mean_size.min() > 0
def encode_angle_torch(self, angle):
"""
Args:
angle: (N)
Returns:
angle_cls: (N, angle_bin_num)
angle_res: (N, angle_bin_num)
"""
angle = torch.remainder(angle, np.pi * 2.0) # -pi, pi -> 0, 2pi
angle_per_class = np.pi * 2.0 / float(self.angle_bin_num) #0.5235987755982988 (pi/6)
shifted_angle = torch.remainder(angle + angle_per_class / 2.0, np.pi * 2.0)
angle_cls_f = (shifted_angle / angle_per_class).floor()
angle_cls = angle_cls_f.new_zeros(*list(angle_cls_f.shape), self.angle_bin_num)
angle_cls.scatter_(-1, angle_cls_f.unsqueeze(-1).long(), 1.0)
angle_res = shifted_angle - (angle_cls_f * angle_per_class + angle_per_class / 2.0)
angle_res = angle_res / angle_per_class # normalize residual angle to [0, 1]
angle_res = angle_cls * angle_res.unsqueeze(-1)
return angle_cls, angle_res
def decode_angle_torch(self, angle_cls, angle_res):
"""
Args:
angle_cls: (N, angle_bin_num)
angle_res: (N, angle_bin_num)
Returns:
angle: (N)
"""
angle_cls_idx = angle_cls.argmax(dim=-1)
angle_cls_onehot = angle_cls.new_zeros(angle_cls.shape)
angle_cls_onehot.scatter_(-1, angle_cls_idx.unsqueeze(-1), 1.0)
angle_res = (angle_cls_onehot * angle_res).sum(dim=-1)
angle = (angle_cls_idx.float() + angle_res) * (np.pi * 2.0 / float(self.angle_bin_num))
return angle
def encode_torch(self, gt_boxes, points, gt_classes=None):
"""
Args:
gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
points: (N, 3) [x, y, z]
gt_classes: (N) [1, num_classes]
Returns:
box_coding: (N, 6 + 2 * B + C)
"""
gt_boxes[:, 3:6] = torch.clamp_min(gt_boxes[:, 3:6], min=1e-5)
xg, yg, zg, dxg, dyg, dzg, rg, *cgs = torch.split(gt_boxes, 1, dim=-1)
xa, ya, za = torch.split(points, 1, dim=-1)
if self.use_mean_size:
assert gt_classes.max() <= self.mean_size.shape[0]
point_anchor_size = self.mean_size[gt_classes - 1]
dxa, dya, dza = torch.split(point_anchor_size, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / dza
dxt = torch.log(dxg / dxa)
dyt = torch.log(dyg / dya)
dzt = torch.log(dzg / dza)
else:
xt = (xg - xa)
yt = (yg - ya)
zt = (zg - za)
dxt = torch.log(dxg)
dyt = torch.log(dyg)
dzt = torch.log(dzg)
rg_cls, rg_reg = self.encode_angle_torch(rg.squeeze(-1))
cts = [g for g in cgs]
return torch.cat([xt, yt, zt, dxt, dyt, dzt, rg_cls, rg_reg, *cts], dim=-1)
def decode_torch_kernel(self, box_offsets, box_angle_cls, box_angle_reg, points, pred_classes=None):
"""
Args:
box_offsets: (N, 6) [x, y, z, dx, dy, dz]
box_angle_cls: (N, angle_bin_num)
box_angle_reg: (N, angle_bin_num)
points: [x, y, z]
pred_classes: (N) [1, num_classes]
Returns:
boxes3d: (N, 7)
"""
xt, yt, zt, dxt, dyt, dzt = torch.split(box_offsets, 1, dim=-1)
xa, ya, za = torch.split(points, 1, dim=-1)
if self.use_mean_size:
assert pred_classes.max() <= self.mean_size.shape[0]
point_anchor_size = self.mean_size[pred_classes - 1]
dxa, dya, dza = torch.split(point_anchor_size, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = torch.exp(dxt) * dxa
dyg = torch.exp(dyt) * dya
dzg = torch.exp(dzt) * dza
else:
xg = xt + xa
yg = yt + ya
zg = zt + za
dxg = torch.exp(dxt)
dyg = torch.exp(dyt)
dzg = torch.exp(dzt)
rg = self.decode_angle_torch(box_angle_cls, box_angle_reg).unsqueeze(-1)
return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg], dim=-1)
def decode_torch(self, box_encodings, points, pred_classes=None):
"""
Args:
box_encodings: (N, 8 + C) [x, y, z, dx, dy, dz, bin_id, bin_res , ...]
points: [x, y, z]
pred_classes: (N) [1, num_classes]
Returns:
boxes3d: (N, 7)
"""
box_offsets = box_encodings[:, :6]
box_angle_cls = box_encodings[:, 6:6 + self.angle_bin_num]
box_angle_reg = box_encodings[:, 6 + self.angle_bin_num:6 + self.angle_bin_num * 2]
cgs = box_encodings[:, 6 + self.angle_bin_num * 2:]
boxes3d = self.decode_torch_kernel(box_offsets, box_angle_cls, box_angle_reg, points, pred_classes)
return torch.cat([boxes3d, cgs], dim=-1)
| 17,075
| 36.041215
| 123
|
py
|
3DTrans
|
3DTrans-master/pcdet/utils/object3d_kitti.py
|
import numpy as np
def get_objects_from_label(label_file):
with open(label_file, 'r') as f:
lines = f.readlines()
objects = [Object3d(line) for line in lines]
return objects
def cls_type_to_id(cls_type):
type_to_id = {'Car': 1, 'Pedestrian': 2, 'Cyclist': 3, 'Van': 4}
if cls_type not in type_to_id.keys():
return -1
return type_to_id[cls_type]
class Object3d(object):
def __init__(self, line):
label = line.strip().split(' ')
self.src = line
self.cls_type = label[0]
self.cls_id = cls_type_to_id(self.cls_type)
self.truncation = float(label[1])
self.occlusion = float(label[2]) # 0:fully visible 1:partly occluded 2:largely occluded 3:unknown
self.alpha = float(label[3])
self.box2d = np.array((float(label[4]), float(label[5]), float(label[6]), float(label[7])), dtype=np.float32)
self.h = float(label[8])
self.w = float(label[9])
self.l = float(label[10])
self.loc = np.array((float(label[11]), float(label[12]), float(label[13])), dtype=np.float32)
self.dis_to_cam = np.linalg.norm(self.loc)
self.ry = float(label[14])
self.score = float(label[15]) if label.__len__() == 16 else -1.0
self.level_str = None
self.level = self.get_kitti_obj_level()
def get_kitti_obj_level(self):
height = float(self.box2d[3]) - float(self.box2d[1]) + 1
if height >= 40 and self.truncation <= 0.15 and self.occlusion <= 0:
self.level_str = 'Easy'
return 0 # Easy
elif height >= 25 and self.truncation <= 0.3 and self.occlusion <= 1:
self.level_str = 'Moderate'
return 1 # Moderate
elif height >= 25 and self.truncation <= 0.5 and self.occlusion <= 2:
self.level_str = 'Hard'
return 2 # Hard
else:
self.level_str = 'UnKnown'
return -1
def generate_corners3d(self):
"""
generate corners3d representation for this object
:return corners_3d: (8, 3) corners of box3d in camera coord
"""
l, h, w = self.l, self.h, self.w
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
R = np.array([[np.cos(self.ry), 0, np.sin(self.ry)],
[0, 1, 0],
[-np.sin(self.ry), 0, np.cos(self.ry)]])
corners3d = np.vstack([x_corners, y_corners, z_corners]) # (3, 8)
corners3d = np.dot(R, corners3d).T
corners3d = corners3d + self.loc
return corners3d
def to_str(self):
print_str = '%s %.3f %.3f %.3f box2d: %s hwl: [%.3f %.3f %.3f] pos: %s ry: %.3f' \
% (self.cls_type, self.truncation, self.occlusion, self.alpha, self.box2d, self.h, self.w, self.l,
self.loc, self.ry)
return print_str
def to_kitti_format(self):
kitti_str = '%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f' \
% (self.cls_type, self.truncation, int(self.occlusion), self.alpha, self.box2d[0], self.box2d[1],
self.box2d[2], self.box2d[3], self.h, self.w, self.l, self.loc[0], self.loc[1], self.loc[2],
self.ry)
return kitti_str
| 3,449
| 40.071429
| 119
|
py
|
3DTrans
|
3DTrans-master/pcdet/utils/active_learning_2D_utils.py
|
import enum
import io
import os
import tqdm
import pickle
import random
import torch
import numpy as np
import torch.nn as nn
import torch.distributed as dist
import torch.nn.functional as F
from pathlib import Path
from pcdet.models import load_data_to_gpu
from pcdet.utils import common_utils, commu_utils
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import euclidean_distances
def active_evaluate(model, target_loader, rank):
if rank == 0:
print("======> Active Evaluate <======")
dataloader_iter_tar = iter(target_loader)
total_iter_tar = len(dataloader_iter_tar)
frame_scores = []
return_scores = []
model.eval()
if rank == 0:
pbar = tqdm.tqdm(total=total_iter_tar, leave=False, desc='active_evaluate', dynamic_ncols=True)
for cur_it in range(total_iter_tar):
try:
batch = next(dataloader_iter_tar)
except StopIteration:
dataloader_iter_tar = iter(target_loader)
batch = next(dataloader_iter_tar)
print('new iter')
with torch.no_grad():
load_data_to_gpu(batch)
forward_args = {
'mode': 'active_evaluate'
}
sample_score = model(batch, **forward_args)
frame_scores.append(sample_score)
if rank == 0:
pbar.update()
pbar.refresh()
if rank == 0:
pbar.close()
gather_scores = gather_all_scores(frame_scores)
for score in gather_scores:
for f_score in score:
return_scores += f_score
return return_scores
def active_evaluate_dual(model, target_loader, rank, domain):
if rank == 0:
print("======> Active Evaluate <======")
dataloader_iter_tar = iter(target_loader)
total_iter_tar = len(dataloader_iter_tar)
frame_scores = []
return_scores = []
model.eval()
if rank == 0:
pbar = tqdm.tqdm(total=total_iter_tar, leave=False, desc='active_evaluate', dynamic_ncols=True)
for cur_it in range(total_iter_tar):
try:
batch = next(dataloader_iter_tar)
except StopIteration:
dataloader_iter_tar = iter(target_loader)
batch = next(dataloader_iter_tar)
print('new iter')
with torch.no_grad():
load_data_to_gpu(batch)
forward_args = {
'mode': 'active_evaluate',
'domain': domain
}
sample_score = model(batch, **forward_args)
frame_scores.append(sample_score)
if rank == 0:
pbar.update()
pbar.refresh()
if rank == 0:
pbar.close()
gather_scores = gather_all_scores(frame_scores)
for score in gather_scores:
for f_score in score:
return_scores += f_score
return return_scores
# evaluate all frame (including sampled frame)
def active_evaluate_dual_2(model, target_loader, rank, domain, sampled_frame_id=None):
if rank == 0:
print("======> Active Evaluate <======")
dataloader_iter_tar = iter(target_loader)
total_iter_tar = len(dataloader_iter_tar)
frame_scores = []
sampled_frame_scores = []
return_scores = []
model.eval()
if rank == 0:
pbar = tqdm.tqdm(total=total_iter_tar, leave=False, desc='active_evaluate', dynamic_ncols=True)
for cur_it in range(total_iter_tar):
try:
batch = next(dataloader_iter_tar)
except StopIteration:
dataloader_iter_tar = iter(target_loader)
batch = next(dataloader_iter_tar)
print('new iter')
with torch.no_grad():
load_data_to_gpu(batch)
forward_args = {
'mode': 'active_evaluate',
'domain': domain
}
sample_score = model(batch, **forward_args)
frame_scores.append(sample_score)
if rank == 0:
pbar.update()
pbar.refresh()
if rank == 0:
pbar.close()
gather_scores = gather_all_scores(frame_scores)
for score in gather_scores:
for f_score in score:
return_scores += f_score
for i, score in enumerate(return_scores):
if score['frame_id'] in sampled_frame_id:
sampled_frame_scores.append(score)
return_scores.pop(i)
return return_scores, sampled_frame_scores
def gather_all_scores(frame_scores):
commu_utils.synchronize()
if dist.is_initialized():
scores = commu_utils.all_gather(frame_scores)
else:
scores = [frame_scores]
commu_utils.synchronize()
return scores
def distributed_concat(tensor):
output_tensor = [tensor.clone() for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(output_tensor, tensor)
concat_tensor = torch.cat(output_tensor, dim=0)
return concat_tensor
def get_target_list(target_pkl_file, oss):
if oss == True:
from petrel_client.client import Client
client = Client('~/.petreloss.conf')
pkl_bytes = client.get(target_pkl_file, update_cache=True)
target_list = pickle.load(io.BytesIO(pkl_bytes))
else:
with open(target_pkl_file, 'rb') as f:
target_list = pickle.load(f)
return target_list
def get_dataset_list(dataset_file, oss, sample_interval=10, waymo=False):
if oss == True:
from petrel_client.client import Client
client = Client('~/.petreloss.conf')
if waymo == False:
if oss == True:
# from petrel_client.client import Client
# client = Client('~/.petreloss.conf')
pkl_bytes = client.get(dataset_file, update_cache=True)
target_list = pickle.load(io.BytesIO(pkl_bytes))
else:
with open(dataset_file, 'rb') as f:
target_list = pickle.load(f)
else:
data_path = '../data/waymo/ImageSets/train.txt'
target_list = []
sample_sequence_list = [x.strip() for x in open(data_path).readlines()]
for k in tqdm.tqdm(range(len(sample_sequence_list))):
sequence_name = os.path.splitext(sample_sequence_list[k])[0]
if oss == False:
info_path = Path(dataset_file) / sequence_name / ('%s.pkl' % sequence_name)
if not Path(info_path).exists():
continue
else:
info_path = os.path.join(dataset_file, sequence_name, ('%s.pkl' % sequence_name))
# if not Path(info_path).exists():
# continue
if oss == False:
with open(info_path, 'rb') as f:
infos = pickle.load(f)
target_list.extend(infos)
else:
pkl_bytes = client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
target_list.extend(infos)
if sample_interval > 1:
sampled_waymo_infos = []
for k in range(0, len(target_list), sample_interval):
sampled_waymo_infos.append(target_list[k])
target_list = sampled_waymo_infos
return target_list
def update_sample_list(sample_list, target_list, sample_frame_id, epoch, save_path, target_name, rank):
if target_name == 'ActiveKittiDataset':
new_sample_list = [item for item in target_list if item['point_cloud']['lidar_idx'] in sample_frame_id]
elif target_name == 'ActiveNuScenesDataset':
new_sample_list = [item for item in target_list if Path(item['lidar_path']).stem in sample_frame_id]
sample_list = sample_list + new_sample_list
sample_list_path = save_path / ('epoch-%d_sample_list.pkl' % epoch)
if rank == 0:
with open(sample_list_path, 'wb') as f:
pickle.dump(sample_list, f)
commu_utils.synchronize()
return sample_list, sample_list_path
def update_sample_list_dual(sample_list, dataset_list, sample_frame_id, epoch, save_path, dataset_name, rank, domain='source'):
if dataset_name == 'ActiveKittiDataset':
assert domain == 'target'
new_sample_list = [item for item in dataset_list if item['point_cloud']['lidar_idx'] in sample_frame_id]
sample_list = sample_list + new_sample_list
elif dataset_name == 'ActiveNuScenesDataset':
if domain == 'target':
new_sample_list = [item for item in dataset_list if Path(item['lidar_path']).stem in sample_frame_id]
sample_list = sample_list + new_sample_list
else:
sample_list = [item for item in dataset_list if Path(item['lidar_path']).stem in sample_frame_id]
elif dataset_name == 'ActiveWaymoDataset':
assert domain == 'source'
# if rank == 0:
# print(sample_frame_id)
sample_list = [item for item in dataset_list if str(item['frame_id']) in sample_frame_id]
# if rank == 0:
# print('dataset_list: %d' % len(dataset_list))
# print('sample frame number: %d' % len(sample_list))
sample_list_path = save_path / ('epoch-%d_sample_list_' % epoch + '_' + domain + '.pkl')
if rank == 0:
with open(sample_list_path, 'wb') as f:
pickle.dump(sample_list, f)
commu_utils.synchronize()
return sample_list, sample_list_path
def update_target_list(target_list, sample_frame_id, epoch, save_path, target_name, rank):
if target_name == 'ActiveKittiDataset':
target_list = [item for item in target_list if item['point_cloud']['lidar_idx'] not in sample_frame_id]
elif target_name == 'ActiveNuScenesDataset':
target_list = [item for item in target_list if Path(item['lidar_path']).stem not in sample_frame_id]
target_list_path = save_path / ('epoch-%d_target_list.pkl' % epoch)
if rank == 0:
with open(target_list_path, 'wb') as f:
pickle.dump(target_list, f)
commu_utils.synchronize()
return target_list, target_list_path
def active_sample(frame_scores, budget):
frame_sorted = sorted(frame_scores, key=lambda keys: keys.get("total_score"), reverse=True)
sampled_frame_info = frame_sorted[:budget]
sampled_frame_id = [frame['frame_id'] for frame in sampled_frame_info]
# fused_frame_info = [item for item in frame_scores if item['total_score'] > 0]
# print('fused_frame: %d' % len(fused_frame_info))
return sampled_frame_id, sampled_frame_info
def active_sample_source(frame_scores, budget):
sampled_frame_info = [item for item in frame_scores if item['total_score'] > 0]
sampled_frame_id = [frame['frame_id'] for frame in sampled_frame_info]
return sampled_frame_id, sampled_frame_info
def active_sample_CLUE(frame_scores, budget):
roi_feature = frame_scores[0].get('roi_feature', None)
tgt_emb_pen = roi_feature.new_zeros((len(frame_scores), roi_feature.shape[-1]))
tgt_scores = roi_feature.new_zeros((len(frame_scores), 1))
for i, cur_score in enumerate (frame_scores):
cur_feature = cur_score.get('roi_feature', None).to(tgt_emb_pen.device)
cur_roi_score = cur_score.get('roi_score', None).to(tgt_scores.device)
tgt_emb_pen[i] = cur_feature
tgt_scores[i] = cur_roi_score
tgt_emb_pen = tgt_emb_pen.cpu().numpy()
tgt_scores = tgt_scores.view(-1)
sample_weights = -(tgt_scores*torch.log(tgt_scores)).cpu().numpy()
km = KMeans(budget)
km.fit(tgt_emb_pen, sample_weight=sample_weights)
dists = euclidean_distances(km.cluster_centers_, tgt_emb_pen)
sort_idxs = dists.argsort(axis=1)
q_idxs = []
ax, rem = 0, budget
while rem > 0:
q_idxs.extend(list(sort_idxs[:, ax][:rem]))
q_idxs = list(set(q_idxs))
rem = budget - len(q_idxs)
ax += 1
sample_frame_info = []
sample_frame_id = []
for i, cur_score in enumerate(frame_scores):
if i in q_idxs:
sample_frame_info.append(cur_score)
sample_frame_id.append(cur_score.get('frame_id'))
return sample_frame_id, sample_frame_info
| 12,079
| 36.055215
| 127
|
py
|
3DTrans
|
3DTrans-master/pcdet/utils/common_utils.py
|
import logging
import os
import pickle
import random
import shutil
import subprocess
import SharedArray
import numpy as np
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.autograd import Variable,Function
from ..utils.spconv_utils import spconv
def check_numpy_to_torch(x):
if isinstance(x, np.ndarray):
return torch.from_numpy(x).float(), True
elif isinstance(x, (int, float)):
# modified
d = np.array([x],dtype=np.float32)
return torch.from_numpy(d).float(), True
return x, False
def limit_period(val, offset=0.5, period=np.pi):
val, is_numpy = check_numpy_to_torch(val)
ans = val - torch.floor(val / period + offset) * period
return ans.numpy() if is_numpy else ans
def drop_info_with_name(info, name):
ret_info = {}
keep_indices = [i for i, x in enumerate(info['name']) if x != name]
for key in info.keys():
ret_info[key] = info[key][keep_indices]
return ret_info
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3 + C)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
points, is_numpy = check_numpy_to_torch(points)
angle, _ = check_numpy_to_torch(angle)
cosa = torch.cos(angle)
sina = torch.sin(angle)
zeros = angle.new_zeros(points.shape[0])
ones = angle.new_ones(points.shape[0])
rot_matrix = torch.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), dim=1).view(-1, 3, 3).float()
points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)
points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)
return points_rot.numpy() if is_numpy else points_rot
def mask_points_by_range(points, limit_range):
mask = (points[:, 0] >= limit_range[0]) & (points[:, 0] <= limit_range[3]) \
& (points[:, 1] >= limit_range[1]) & (points[:, 1] <= limit_range[4])
return mask
def get_voxel_centers(voxel_coords, downsample_times, voxel_size, point_cloud_range):
"""
Args:
voxel_coords: (N, 3)
downsample_times:
voxel_size:
point_cloud_range:
Returns:
"""
assert voxel_coords.shape[1] == 3
voxel_centers = voxel_coords[:, [2, 1, 0]].float() # (xyz)
voxel_size = torch.tensor(voxel_size, device=voxel_centers.device).float() * downsample_times
pc_range = torch.tensor(point_cloud_range[0:3], device=voxel_centers.device).float()
voxel_centers = (voxel_centers + 0.5) * voxel_size + pc_range
return voxel_centers
def create_logger(log_file=None, rank=0, log_level=logging.INFO):
logger = logging.getLogger(__name__)
logger.setLevel(log_level if rank == 0 else 'ERROR')
formatter = logging.Formatter('%(asctime)s %(filename)s %(funcName)s %(lineno)d %(levelname)5s %(message)s')
console = logging.StreamHandler()
console.setLevel(log_level if rank == 0 else 'ERROR')
console.setFormatter(formatter)
logger.addHandler(console)
if log_file is not None:
file_handler = logging.FileHandler(filename=log_file)
file_handler.setLevel(log_level if rank == 0 else 'ERROR')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.propagate = False
return logger
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def get_pad_params(desired_size, cur_size):
"""
Get padding parameters for np.pad function
Args:
desired_size: int, Desired padded output size
cur_size: int, Current size. Should always be less than or equal to cur_size
Returns:
pad_params: tuple(int), Number of values padded to the edges (before, after)
"""
assert desired_size >= cur_size
# Calculate amount to pad
diff = desired_size - cur_size
pad_params = (0, diff)
return pad_params
def keep_arrays_by_name(gt_names, used_classes):
inds = [i for i, x in enumerate(gt_names) if x in used_classes]
inds = np.array(inds, dtype=np.int64)
return inds
def init_dist_slurm(tcp_port, local_rank, backend='nccl'):
"""
modified from https://github.com/open-mmlab/mmdetection
Args:
tcp_port:
backend:
Returns:
"""
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(proc_id % num_gpus)
addr = subprocess.getoutput('scontrol show hostname {} | head -n1'.format(node_list))
os.environ['MASTER_PORT'] = str(tcp_port)
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['RANK'] = str(proc_id)
dist.init_process_group(backend=backend)
total_gpus = dist.get_world_size()
rank = dist.get_rank()
return total_gpus, rank
def init_dist_pytorch(tcp_port, local_rank, backend='nccl'):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
# os.environ['MASTER_PORT'] = str(tcp_port)
# os.environ['MASTER_ADDR'] = 'localhost'
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(local_rank % num_gpus)
dist.init_process_group(
backend=backend,
# init_method='tcp://127.0.0.1:%d' % tcp_port,
# rank=local_rank,
# world_size=num_gpus
)
rank = dist.get_rank()
return num_gpus, rank
def get_dist_info(return_gpu_per_machine=False):
if torch.__version__ < '1.0':
initialized = dist._initialized
else:
if dist.is_available():
initialized = dist.is_initialized()
else:
initialized = False
if initialized:
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
if return_gpu_per_machine:
gpu_per_machine = torch.cuda.device_count()
return rank, world_size, gpu_per_machine
return rank, world_size
def merge_results_dist(result_part, size, tmpdir):
rank, world_size = get_dist_info()
os.makedirs(tmpdir, exist_ok=True)
dist.barrier()
pickle.dump(result_part, open(os.path.join(tmpdir, 'result_part_{}.pkl'.format(rank)), 'wb'))
dist.barrier()
if rank != 0:
return None
part_list = []
for i in range(world_size):
part_file = os.path.join(tmpdir, 'result_part_{}.pkl'.format(i))
part_list.append(pickle.load(open(part_file, 'rb')))
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:size]
shutil.rmtree(tmpdir)
return ordered_results
def scatter_point_inds(indices, point_inds, shape):
ret = -1 * torch.ones(*shape, dtype=point_inds.dtype, device=point_inds.device)
ndim = indices.shape[-1]
flattened_indices = indices.view(-1, ndim)
slices = [flattened_indices[:, i] for i in range(ndim)]
ret[slices] = point_inds
return ret
def generate_voxel2pinds(sparse_tensor):
device = sparse_tensor.indices.device
batch_size = sparse_tensor.batch_size
spatial_shape = sparse_tensor.spatial_shape
indices = sparse_tensor.indices.long()
point_indices = torch.arange(indices.shape[0], device=device, dtype=torch.int32)
output_shape = [batch_size] + list(spatial_shape)
v2pinds_tensor = scatter_point_inds(indices, point_indices, output_shape)
return v2pinds_tensor
def sa_create(name, var):
"""
Args:
name: identify the shared memory, file:// prefix to indicate file while shm:// to indicate to be a POSIX shared memory object
var: only use the var.shape and var.dtype to create SA object
see more: https://pypi.org/project/SharedArray/
"""
x = SharedArray.create(name, var.shape, dtype=var.dtype)
x[...] = var[...]
x.flags.writeable = False
return x
def add_prefix_to_dict(dict, prefix):
for key in list(dict.keys()):
dict[prefix + key] = dict.pop(key)
return dict
class DataReader(object):
def __init__(self, dataloader, sampler):
self.dataloader = dataloader
self.sampler = sampler
def construct_iter(self):
self.dataloader_iter = iter(self.dataloader)
def set_cur_epoch(self, cur_epoch):
self.cur_epoch = cur_epoch
def read_data(self):
try:
return self.dataloader_iter.next()
except:
if self.sampler is not None:
self.sampler.set_epoch(self.cur_epoch)
self.construct_iter()
return self.dataloader_iter.next()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def set_bn_train(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.train()
def calculate_gradient_norm(model):
total_norm = 0
for p in model.parameters():
param_norm = p.grad.data.norm(2)
total_norm += param_norm.item() ** 2
total_norm = total_norm ** (1. / 2)
return total_norm
class GRLayer(Function):
@staticmethod
def forward(ctx, input, weight):
ctx.alpha = weight
return input.view_as(input)
@staticmethod
def backward(ctx, grad_outputs):
output = grad_outputs.neg() * ctx.alpha
return output, None
def grad_reverse(x, weight):
return GRLayer.apply(x, weight)
def split_two_spare_tensor(split_tag_s1, split_tag_s2, sparse_tensor):
"""
Function: split the sparse_tensor into two sparse_tensor, accodring to the given batch_size
Args:
split_tag_s1: array (batch_len)
split_tag_s2: array (batch_len)
sparse_tensor:
Returns:
"""
voxel_features = sparse_tensor.features
voxel_coords = sparse_tensor.indices
# split the voxel_coords of the dataset-merged voxel_coords
tar_coor_s1 = []
tar_coor_s2 = []
bs_s1 = 0
bs_s2 = 0
for i in split_tag_s1:
voxel_coords_s1 = voxel_coords[i==voxel_coords[:,0]]
voxel_coords_s1[:,0] = bs_s1
bs_s1 += 1
tar_coor_s1.append(voxel_coords_s1)
tar_s1 = torch.cat(tar_coor_s1, axis=0)
for j in split_tag_s2:
voxel_coords_s2 = voxel_coords[j==voxel_coords[:,0]]
voxel_coords_s2[:,0] = bs_s2
bs_s2 += 1
tar_coor_s2.append(voxel_coords_s2)
tar_s2 = torch.cat(tar_coor_s2, axis=0)
# split the voxel_tensor of the dataset-merged voxel_coords
tar_list_s1 = []
tar_list_s2 = []
for i in split_tag_s1:
voxel_s1 = voxel_features[i==voxel_coords[:,0]]
tar_list_s1.append(voxel_s1.reshape(-1, voxel_s1.shape[-1]))
for j in split_tag_s2:
voxel_s2 = voxel_features[j==voxel_coords[:,0]]
tar_list_s2.append(voxel_s2.reshape(-1, voxel_s2.shape[-1]))
voxel_features_s1 = torch.cat(tar_list_s1, axis=0)
voxel_features_s2 = torch.cat(tar_list_s2, axis=0)
# convert the dense_tensor of voxel into the sparse representations
input_sp_tensor_s1 = spconv.SparseConvTensor(
features=voxel_features_s1,
indices=tar_s1.int(),
spatial_shape=sparse_tensor.spatial_shape,
batch_size=len(split_tag_s1)
)
input_sp_tensor_s2 = spconv.SparseConvTensor(
features=voxel_features_s2,
indices=tar_s2.int(),
spatial_shape=sparse_tensor.spatial_shape,
batch_size=len(split_tag_s2)
)
return input_sp_tensor_s1, input_sp_tensor_s2
# For split the batch_dict for two head
def split_two_batch_dict(split_tag_s1, split_tag_s2, batch_dict):
tar_dicts_s1 = {}
tar_dicts_s2 = {}
for key, val in batch_dict.items():
if key in ['db_flag', 'frame_id', 'use_lead_xyz']:
tar_list_s1 = []
tar_list_s2 = []
for i in split_tag_s1:
tar_list_s1.append(val[i])
for j in split_tag_s2:
tar_list_s2.append(val[j])
tar_dicts_s1[key] = tar_list_s1
tar_dicts_s2[key] = tar_list_s2
elif key in ['points', 'voxel_coords']:
tar_list_s1 = []
tar_list_s2 = []
bs_s1 = 0
bs_s2 = 0
for i in split_tag_s1:
idx_bs_s1 = [np.where(i==val[:,0])]
point_s1 = val[tuple(idx_bs_s1)]
point_s1[0,:,0] = bs_s1
bs_s1 = bs_s1 + 1
tar_list_s1.append(point_s1.reshape(-1, point_s1.shape[2]))
for j in split_tag_s2:
idx_bs_s2 = [np.where(j==val[:,0])]
point_s2 = val[tuple(idx_bs_s2)]
point_s2[0,:,0] = bs_s2
bs_s2 += 1
tar_list_s2.append(point_s2.reshape(-1, point_s2.shape[2]))
tar_dicts_s1[key] = np.concatenate(tar_list_s1, axis=0)
tar_dicts_s2[key] = np.concatenate(tar_list_s2, axis=0)
elif key in ['gt_boxes']:
tar_dicts_s1[key] = val[split_tag_s1, :, :]
tar_dicts_s2[key] = val[split_tag_s2, :, :]
elif key in ['voxels']:
tar_list_s1 = []
tar_list_s2 = []
for i in split_tag_s1:
idx_bs_s1 = [np.where(i==batch_dict['voxel_coords'][:,0])]
voxel_s1 = val[tuple(idx_bs_s1)]
tar_list_s1.append(voxel_s1.reshape(-1, voxel_s1.shape[-2], voxel_s1.shape[-1]))
for j in split_tag_s2:
idx_bs_s2 = [np.where(j==batch_dict['voxel_coords'][:,0])]
voxel_s2 = val[tuple(idx_bs_s2)]
tar_list_s2.append(voxel_s2.reshape(-1, voxel_s2.shape[-2], voxel_s2.shape[-1]))
tar_dicts_s1[key] = np.concatenate(tar_list_s1, axis=0)
tar_dicts_s2[key] = np.concatenate(tar_list_s2, axis=0)
elif key in ['voxel_num_points']:
tar_list_s1 = []
tar_list_s2 = []
for i in split_tag_s1:
idx_bs_s1 = [np.where(i==batch_dict['voxel_coords'][:,0])]
voxel_s1 = val[tuple(idx_bs_s1)]
tar_list_s1.append(voxel_s1.reshape(-1))
for j in split_tag_s2:
idx_bs_s2 = [np.where(j==batch_dict['voxel_coords'][:,0])]
voxel_s2 = val[tuple(idx_bs_s2)]
tar_list_s2.append(voxel_s2.reshape(-1))
tar_dicts_s1[key] = np.concatenate(tar_list_s1, axis=0)
tar_dicts_s2[key] = np.concatenate(tar_list_s2, axis=0)
elif key in [ 'metadata' ]:
# Due to that the kitti do not have the 'metadata' key, and give the 'metadata' key to nusc branch
if "kitti" in batch_dict['db_flag']:
tar_dicts_s2[key] = val
else:
tar_list_s1 = []
tar_list_s2 = []
for i in split_tag_s1:
tar_list_s1.append(val[i])
for j in split_tag_s2:
tar_list_s2.append(val[j])
tar_dicts_s1[key] = tar_list_s1
tar_dicts_s2[key] = tar_list_s2
elif key in ['image_shape']:
# Due to that the waymo and nusc do not have the 'image_shape' key,
# and assume that kitti feeds into the Branch ONE
if "kitti" in batch_dict['db_flag']:
tar_list_s1 = []
for i in split_tag_s1:
tar_list_s1.append(val[i])
tar_dicts_s1[key] = tar_list_s1
elif key in ['batch_size']:
tar_dicts_s1[key] = len(split_tag_s1)
tar_dicts_s2[key] = len(split_tag_s2)
else:
continue
return tar_dicts_s1, tar_dicts_s2
def split_two_batch_dict_gpu(split_tag_s1, split_tag_s2, batch_dict):
tar_dicts_s1 = {}
tar_dicts_s2 = {}
for key, val in batch_dict.items():
if key in ['db_flag', 'frame_id', 'use_lead_xyz']:
tar_list_s1 = []
tar_list_s2 = []
for i in split_tag_s1:
tar_list_s1.append(val[i])
for j in split_tag_s2:
tar_list_s2.append(val[j])
tar_dicts_s1[key] = tar_list_s1
tar_dicts_s2[key] = tar_list_s2
elif key in ['points', 'voxel_coords', 'point_coords']:
tar_list_s1 = []
tar_list_s2 = []
bs_s1 = 0
bs_s2 = 0
for i in split_tag_s1:
point_s1 = val[i==val[:,0]]
point_s1[:,0] = bs_s1
bs_s1 += 1
tar_list_s1.append(point_s1)
for j in split_tag_s2:
point_s2 = val[j==val[:,0]]
point_s2[:,0] = bs_s2
bs_s2 += 1
tar_list_s2.append(point_s2)
tar_dicts_s1[key] = torch.cat(tar_list_s1, axis=0)
tar_dicts_s2[key] = torch.cat(tar_list_s2, axis=0)
elif key in ['gt_boxes']:
tar_dicts_s1[key] = val[split_tag_s1, :, :]
tar_dicts_s2[key] = val[split_tag_s2, :, :]
elif key in ['voxel_features']:
tar_list_s1 = []
tar_list_s2 = []
for i in split_tag_s1:
voxel_s1 = val[i==batch_dict['voxel_coords'][:,0]]
tar_list_s1.append(voxel_s1.reshape(-1, voxel_s1.shape[-1]))
for j in split_tag_s2:
voxel_s2 = val[j==batch_dict['voxel_coords'][:,0]]
tar_list_s2.append(voxel_s2.reshape(-1, voxel_s2.shape[-1]))
tar_dicts_s1[key] = torch.cat(tar_list_s1, axis=0)
tar_dicts_s2[key] = torch.cat(tar_list_s2, axis=0)
elif key in ['point_features', 'point_features_before_fusion']:
tar_list_s1 = []
tar_list_s2 = []
for i in split_tag_s1:
point_s1 = val[i==batch_dict['point_coords'][:,0]]
tar_list_s1.append(point_s1.reshape(-1, point_s1.shape[-1]))
for j in split_tag_s2:
point_s2 = val[j==batch_dict['point_coords'][:,0]]
tar_list_s2.append(point_s2.reshape(-1, point_s2.shape[-1]))
tar_dicts_s1[key] = torch.cat(tar_list_s1, axis=0)
tar_dicts_s2[key] = torch.cat(tar_list_s2, axis=0)
elif key in ['spatial_features', 'spatial_features_2d']:
tar_dicts_s1[key] = val[split_tag_s1, :, :, :]
tar_dicts_s2[key] = val[split_tag_s2, :, :, :]
elif key in [ 'metadata' ]:
# Due to that the kitti and once do not have the 'metadata' key,
# and only give the 'metadata' key to nusc branch
if "kitti" or "once" in batch_dict['db_flag']:
tar_dicts_s1[key] = val
else:
tar_list_s1 = []
tar_list_s2 = []
for i in split_tag_s1:
tar_list_s1.append(val[i])
for j in split_tag_s2:
tar_list_s2.append(val[j])
tar_dicts_s1[key] = tar_list_s1
tar_dicts_s2[key] = tar_list_s2
elif key in ['image_shape']:
# Due to that the waymo and nusc do not have the 'image_shape' key,
if "kitti" in batch_dict['db_flag']:
# assume that kitti feeds into the Branch ONE
if batch_dict['db_flag'][0] == 'kitti':
tar_list_s1 = []
for i in split_tag_s1:
tar_list_s1.append(val)
tar_dicts_s1[key] = torch.cat(tar_list_s1, axis=0)
# assume that kitti feeds into the Branch TWO
else:
tar_list_s2 = []
for i in split_tag_s2:
tar_list_s2.append(val)
tar_dicts_s2[key] = torch.cat(tar_list_s2, axis=0)
elif key in ['multi_scale_3d_strides']:
# Since different datasets for the 'multi_scale_3d_strides' key have the same value,
# we directly copy this value into tar_dicts_s1, tar_dicts_s2
tar_dicts_s1[key] = val
tar_dicts_s2[key] = val
elif key in ['multi_scale_3d_features']:
# We need to transfer the sparse tensor into the dense tensor
sp_3d_s1 = {}
sp_3d_s2 = {}
for src_name in ['x_conv1', 'x_conv2', 'x_conv3', 'x_conv4']:
input_sp_tensor_s1, input_sp_tensor_s2 = split_two_spare_tensor(split_tag_s1, split_tag_s2, val[src_name])
sp_3d_s1[src_name] = input_sp_tensor_s1
sp_3d_s2[src_name] = input_sp_tensor_s2
tar_dicts_s1[key] = sp_3d_s1
tar_dicts_s2[key] = sp_3d_s2
elif key in ['batch_size']:
tar_dicts_s1[key] = len(split_tag_s1)
tar_dicts_s2[key] = len(split_tag_s2)
elif key in ['spatial_features_stride']:
tar_dicts_s1[key] = val
tar_dicts_s2[key] = val
else:
continue
return tar_dicts_s1, tar_dicts_s2
def split_batch_dict(source_one_name, batch_dict):
split_tag_s1 = []
split_tag_s2 = []
for k in range(batch_dict['batch_size']):
if source_one_name in batch_dict['db_flag'][k]:
split_tag_s1.append(k)
else:
split_tag_s2.append(k)
return split_tag_s1, split_tag_s2
def merge_two_batch_dict(batch_dict_1, batch_dict_2):
"""
To support a custom dataset, implement this function to merge two batch_dict (and labels)
from different datasets
Args:
batch_dict_1:
batch_dict_2:
Returns:
batch_merge_dict:
"""
batch_merge_dict = {}
batch_merge_dict['batch_size'] = batch_dict_1['batch_size'] + batch_dict_2['batch_size']
for key, val in batch_dict_1.items():
if key in ['batch_size']:
continue
elif key in ['db_flag', 'frame_id', 'use_lead_xyz']:
tar_list_merge = []
tar_list_merge = [val, batch_dict_2[key]]
batch_merge_dict[key] = np.concatenate(tar_list_merge, axis=0)
elif key in ['voxels', 'voxel_num_points']:
tar_list_merge = []
tar_list_merge = [val, batch_dict_2[key]]
batch_merge_dict[key] = np.concatenate(tar_list_merge, axis=0)
elif key in ['points', 'voxel_coords']:
tar_list_merge = []
batch_bias = batch_dict_1['batch_size']
val_2 = batch_dict_2[key]
val_2[:,0] = val_2[:,0] + batch_bias
tar_list_merge = [val, val_2]
batch_merge_dict[key] = np.concatenate(tar_list_merge, axis=0)
elif key in ['gt_boxes']:
max_gt_1 = max([len(x) for x in val])
max_gt_2 = max([len(x) for x in batch_dict_2[key]])
if max_gt_1 > max_gt_2:
val_2 = batch_dict_2['gt_boxes']
batch_gt_boxes3d = np.zeros((batch_dict_2['batch_size'], max_gt_1, val_2[0].shape[-1]), dtype=np.float32)
#filling the gt_boxes of the batch_dict_2
for k in range(batch_dict_2['batch_size']):
batch_gt_boxes3d[k, :val_2[k].__len__(), :] = val_2[k]
tar_list_merge = []
tar_list_merge = [val, batch_gt_boxes3d]
batch_merge_dict[key] = np.concatenate(tar_list_merge, axis=0)
else:
val_2 = batch_dict_2['gt_boxes']
batch_gt_boxes3d = np.zeros((batch_dict_1['batch_size'], max_gt_2, val[0].shape[-1]), dtype=np.float32)
#filling the gt_boxes of the batch_dict_1
for k in range(batch_dict_1['batch_size']):
batch_gt_boxes3d[k, :val[k].__len__(), :] = val[k]
tar_list_merge = []
tar_list_merge = [batch_gt_boxes3d, val_2]
batch_merge_dict[key] = np.concatenate(tar_list_merge, axis=0)
elif key in ['metadata', 'image_shape', 'road_plane', 'calib']:
# Due to that the kitti do not have the 'metadata' key, and give the 'metadata' key to nusc branch
if key in batch_dict_2.keys():
# both dataset have the 'metadata key'
tar_list_merge = []
tar_list_merge = [val, batch_dict_2[key]]
batch_merge_dict[key] = np.concatenate(tar_list_merge, axis=0)
else:
batch_merge_dict[key] = val
if 'metadata' in batch_dict_2.keys() and 'metadata' not in batch_dict_1.keys() :
batch_merge_dict['metadata'] = batch_dict_2['metadata']
if 'image_shape' in batch_dict_2.keys() and 'image_shape' not in batch_dict_1.keys() :
batch_merge_dict['image_shape'] = batch_dict_2['image_shape']
if 'road_plane' in batch_dict_2.keys() and 'road_plane' not in batch_dict_1.keys() :
batch_merge_dict['road_plane'] = batch_dict_2['road_plane']
if 'calib' in batch_dict_2.keys() and 'calib' not in batch_dict_1.keys() :
batch_merge_dict['calib'] = batch_dict_2['calib']
return batch_merge_dict
def merge_two_batch_dict_gpu(split_tag_s1, split_tag_s2, batch_dict):
"""
To support a custom dataset, implement this function to merge two batch_dict (and labels)
from different datasets
Args:
split_tag_s1:
split_tag_s2:
batch_dict:
Returns:
batch_merge_dict:
"""
raise NotImplementedError
| 25,895
| 34.966667
| 137
|
py
|
3DTrans
|
3DTrans-master/pcdet/utils/uni3d_norm_2_in.py
|
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
import torch
import itertools
class _UniNorm(Module):
def __init__(self, num_features, dataset_from_flag=1, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, voxel_coord=False):
super(_UniNorm, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
self.voxel_coord = voxel_coord
self.dataset_from_flag = dataset_from_flag
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
if self.track_running_stats:
self.register_buffer('running_mean_source', torch.zeros(num_features))
self.register_buffer('running_mean_target', torch.zeros(num_features))
self.register_buffer('running_var_source', torch.ones(num_features))
self.register_buffer('running_var_target', torch.ones(num_features))
self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
else:
self.register_parameter('running_mean_source', None)
self.register_parameter('running_mean_target', None)
self.register_parameter('running_var_source', None)
self.register_parameter('running_var_target', None)
self.reset_parameters()
def reset_parameters(self):
if self.track_running_stats:
self.running_mean_source.zero_()
self.running_mean_target.zero_()
self.running_var_source.fill_(1)
self.running_var_target.fill_(1)
if self.affine:
self.weight.data.uniform_()
self.bias.data.zero_()
def _check_input_dim(self, input):
return NotImplemented
def _load_from_state_dict_from_pretrained_model(self, state_dict, prefix, metadata, strict, missing_keys, unexpected_keys, error_msgs):
r"""Copies parameters and buffers from :attr:`state_dict` into only
this module, but not its descendants. This is called on every submodule
in :meth:`~torch.nn.Module.load_state_dict`. Metadata saved for this
module in input :attr:`state_dict` is provided as :attr`metadata`.
For state dicts without meta data, :attr`metadata` is empty.
Subclasses can achieve class-specific backward compatible loading using
the version number at `metadata.get("version", None)`.
.. note::
:attr:`state_dict` is not the same object as the input
:attr:`state_dict` to :meth:`~torch.nn.Module.load_state_dict`. So
it can be modified.
Arguments:
state_dict (dict): a dict containing parameters and
persistent buffers.
prefix (str): the prefix for parameters and buffers used in this
module
metadata (dict): a dict containing the metadata for this moodule.
See
strict (bool): whether to strictly enforce that the keys in
:attr:`state_dict` with :attr:`prefix` match the names of
parameters and buffers in this module
missing_keys (list of str): if ``strict=False``, add missing keys to
this list
unexpected_keys (list of str): if ``strict=False``, add unexpected
keys to this list
error_msgs (list of str): error messages should be added to this
list, and will be reported together in
:meth:`~torch.nn.Module.load_state_dict`
"""
local_name_params = itertools.chain(self._parameters.items(), self._buffers.items())
local_state = {k: v.data for k, v in local_name_params if v is not None}
for name, param in local_state.items():
key = prefix + name
if 'source' in key or 'target' in key:
key = key[:-7]
print(key)
if key in state_dict:
input_param = state_dict[key]
if input_param.shape != param.shape:
# local shape should match the one in checkpoint
error_msgs.append('size mismatch for {}: copying a param of {} from checkpoint, '
'where the shape is {} in current model.'
.format(key, param.shape, input_param.shape))
continue
if isinstance(input_param, Parameter):
# backwards compatibility for serialized parameters
input_param = input_param.data
try:
param.copy_(input_param)
except Exception:
error_msgs.append('While copying the parameter named "{}", '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(key, param.size(), input_param.size()))
elif strict:
missing_keys.append(key)
def forward(self, input, voxel_coords):
self._check_input_dim(input)
if self.training: ## train mode
## Split the input into the source and target batches
## and calculate the corresponding variances
input_source_list = []
input_target_list = []
if self.voxel_coord:
bs = (voxel_coords[-1,0]+1) // 2
for i in range(0, int(bs)):
input_source_list.append(input[voxel_coords[:,0]==i])
input_source = torch.cat(input_source_list, axis=0)
for j in range(int(bs), int(voxel_coords[-1,0]+1)):
input_target_list.append(input[voxel_coords[:,0]==j])
input_target = torch.cat(input_target_list, axis=0)
else:
batch_size = input.size()[0] // 2
input_source = input[:batch_size]
input_target = input[batch_size:]
## In order to remap the rescaled source or target features into
## the shared space, we use the shared self.weight and self.bias
z_source = F.batch_norm(
input_source, self.running_mean_source, self.running_var_source, self.weight, self.bias,
self.training or not self.track_running_stats, self.momentum, self.eps)
z_target = F.batch_norm(
input_target, self.running_mean_target, self.running_var_target, self.weight, self.bias,
self.training or not self.track_running_stats, self.momentum, self.eps)
z = torch.cat((z_source, z_target), dim=0)
# In order to address different dims
if input.dim() == 4:
input_source = input_source.permute(0,2,3,1).contiguous().view(-1,self.num_features)
input_target = input_target.permute(0,2,3,1).contiguous().view(-1,self.num_features)
## Obtain the channel-wise transferability
## Assume that source_one and source_two features have different transferability along with channel dimension
cur_mean_source = torch.mean(input_source, dim=0)
cur_var_source = torch.var(input_source,dim=0)
cur_mean_target = torch.mean(input_target, dim=0)
cur_var_target = torch.var(input_target, dim=0)
## Global Statistic-level channel-wise transferability
dis = torch.abs(cur_mean_source / torch.sqrt(cur_var_source + self.eps) -
cur_mean_target / torch.sqrt(cur_var_target + self.eps))
## Convert the channel-wise transferability into the probability distribution
prob = 1.0 / (1.0 + dis)
alpha = self.num_features * prob / sum(prob)
if input.dim() == 2:
alpha = alpha.view(1, self.num_features)
elif input.dim() == 4:
alpha = alpha.view(1, self.num_features, 1, 1)
## Attention
return z * (1 + alpha.detach())
else: ##test mode
if self.dataset_from_flag == 1:
z = F.batch_norm(
input, self.running_mean_source, self.running_var_source, self.weight, self.bias,
self.training or not self.track_running_stats, self.momentum, self.eps)
dis = torch.abs(self.running_mean_source / torch.sqrt(self.running_var_source + self.eps)
- self.running_mean_target / torch.sqrt(self.running_var_target + self.eps))
prob = 1.0 / (1.0 + dis)
alpha = self.num_features * prob / sum(prob)
elif self.dataset_from_flag == 2:
z = F.batch_norm(
input, self.running_mean_target, self.running_var_target, self.weight, self.bias,
self.training or not self.track_running_stats, self.momentum, self.eps)
dis = torch.abs(self.running_mean_source / torch.sqrt(self.running_var_source + self.eps)
- self.running_mean_target / torch.sqrt(self.running_var_target + self.eps))
prob = 1.0 / (1.0 + dis)
alpha = self.num_features * prob / sum(prob)
if input.dim() == 2:
alpha = alpha.view(1, self.num_features)
elif input.dim() == 4:
alpha = alpha.view(1, self.num_features, 1, 1)
return z * (1 + alpha.detach())
def extra_repr(self):
return '{num_features}, eps={eps}, momentum={momentum}, affine={affine}, ' \
'track_running_stats={track_running_stats}'.format(**self.__dict__)
class UniNorm1d(_UniNorm):
r"""Applies Batch Normalization over a 2D or 3D input (a mini-batch of 1D
inputs with optional additional channel dimension) as described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size).
By default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal Batch Normalization.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, L)` or :math:`L` from input of size :math:`(N, L)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True``
Shape:
- Input: :math:`(N, C)` or :math:`(N, C, L)`
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
Examples::
>>> # With Learnable Parameters
>>> m = nn.BatchNorm1d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm1d(100, affine=False)
>>> input = torch.randn(20, 100)
>>> output = m(input)
.. _`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`:
https://arxiv.org/abs/1502.03167
"""
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
class UniNorm2d(_UniNorm):
r"""Applies Batch Normalization over a 4D input (a mini-batch of 2D inputs
with additional channel dimension) as described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size).
By default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial Batch Normalization.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, H, W)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True``
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples::
>>> # With Learnable Parameters
>>> m = nn.BatchNorm2d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm2d(100, affine=False)
>>> input = torch.randn(20, 100, 35, 45)
>>> output = m(input)
.. _`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`:
https://arxiv.org/abs/1502.03167
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
class UniNorm3d(_UniNorm):
r"""Applies Batch Normalization over a 5D input (a mini-batch of 3D inputs
with additional channel dimension) as described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size).
By default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, D, H, W)` slices, it's common terminology to call this Volumetric Batch Normalization
or Spatio-temporal Batch Normalization.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, D, H, W)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True``
Shape:
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples::
>>> # With Learnable Parameters
>>> m = nn.BatchNorm3d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm3d(100, affine=False)
>>> input = torch.randn(20, 100, 35, 45, 10)
>>> output = m(input)
.. _`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`:
https://arxiv.org/abs/1502.03167
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim()))
| 19,844
| 46.362768
| 140
|
py
|
3DTrans
|
3DTrans-master/pcdet/utils/transform_utils.py
|
import math
import torch
try:
from kornia.geometry.conversions import (
convert_points_to_homogeneous,
convert_points_from_homogeneous,
)
except:
pass
# print('Warning: kornia is not installed. This package is only required by CaDDN')
def project_to_image(project, points):
"""
Project points to image
Args:
project [torch.tensor(..., 3, 4)]: Projection matrix
points [torch.Tensor(..., 3)]: 3D points
Returns:
points_img [torch.Tensor(..., 2)]: Points in image
points_depth [torch.Tensor(...)]: Depth of each point
"""
# Reshape tensors to expected shape
points = convert_points_to_homogeneous(points)
points = points.unsqueeze(dim=-1)
project = project.unsqueeze(dim=1)
# Transform points to image and get depths
points_t = project @ points
points_t = points_t.squeeze(dim=-1)
points_img = convert_points_from_homogeneous(points_t)
points_depth = points_t[..., -1] - project[..., 2, 3]
return points_img, points_depth
def normalize_coords(coords, shape):
"""
Normalize coordinates of a grid between [-1, 1]
Args:
coords: (..., 3), Coordinates in grid
shape: (3), Grid shape
Returns:
norm_coords: (.., 3), Normalized coordinates in grid
"""
min_n = -1
max_n = 1
shape = torch.flip(shape, dims=[0]) # Reverse ordering of shape
# Subtract 1 since pixel indexing from [0, shape - 1]
norm_coords = coords / (shape - 1) * (max_n - min_n) + min_n
return norm_coords
def bin_depths(depth_map, mode, depth_min, depth_max, num_bins, target=False):
"""
Converts depth map into bin indices
Args:
depth_map: (H, W), Depth Map
mode: string, Discretiziation mode (See https://arxiv.org/pdf/2005.13423.pdf for more details)
UD: Uniform discretiziation
LID: Linear increasing discretiziation
SID: Spacing increasing discretiziation
depth_min: float, Minimum depth value
depth_max: float, Maximum depth value
num_bins: int, Number of depth bins
target: bool, Whether the depth bins indices will be used for a target tensor in loss comparison
Returns:
indices: (H, W), Depth bin indices
"""
if mode == "UD":
bin_size = (depth_max - depth_min) / num_bins
indices = ((depth_map - depth_min) / bin_size)
elif mode == "LID":
bin_size = 2 * (depth_max - depth_min) / (num_bins * (1 + num_bins))
indices = -0.5 + 0.5 * torch.sqrt(1 + 8 * (depth_map - depth_min) / bin_size)
elif mode == "SID":
indices = num_bins * (torch.log(1 + depth_map) - math.log(1 + depth_min)) / \
(math.log(1 + depth_max) - math.log(1 + depth_min))
else:
raise NotImplementedError
if target:
# Remove indicies outside of bounds
mask = (indices < 0) | (indices > num_bins) | (~torch.isfinite(indices))
indices[mask] = num_bins
# Convert to integer
indices = indices.type(torch.int64)
return indices
| 3,092
| 32.619565
| 104
|
py
|
3DTrans
|
3DTrans-master/pcdet/utils/calibration_kitti.py
|
import numpy as np
def get_calib_from_file(calib_file, oss_flag):
if oss_flag == False:
with open(calib_file) as f:
lines = f.readlines()
obj = lines[2].strip().split(' ')[1:]
P2 = np.array(obj, dtype=np.float32)
obj = lines[3].strip().split(' ')[1:]
P3 = np.array(obj, dtype=np.float32)
obj = lines[4].strip().split(' ')[1:]
R0 = np.array(obj, dtype=np.float32)
obj = lines[5].strip().split(' ')[1:]
Tr_velo_to_cam = np.array(obj, dtype=np.float32)
else:
# split the text buffer
lines = calib_file.readlines()
obj = lines[2].strip().split(' ')[1:]
P2 = np.array(obj, dtype=np.float32)
obj = lines[3].strip().split(' ')[1:]
P3 = np.array(obj, dtype=np.float32)
obj = lines[4].strip().split(' ')[1:]
R0 = np.array(obj, dtype=np.float32)
obj = lines[5].strip().split(' ')[1:]
Tr_velo_to_cam = np.array(obj, dtype=np.float32)
return {'P2': P2.reshape(3, 4),
'P3': P3.reshape(3, 4),
'R0': R0.reshape(3, 3),
'Tr_velo2cam': Tr_velo_to_cam.reshape(3, 4)}
class Calibration(object):
def __init__(self, calib_file, oss_flag):
if not isinstance(calib_file, dict):
calib = get_calib_from_file(calib_file, oss_flag)
else:
calib = calib_file
self.P2 = calib['P2'] # 3 x 4
self.R0 = calib['R0'] # 3 x 3
self.V2C = calib['Tr_velo2cam'] # 3 x 4
# Camera intrinsics and extrinsics
self.cu = self.P2[0, 2]
self.cv = self.P2[1, 2]
self.fu = self.P2[0, 0]
self.fv = self.P2[1, 1]
self.tx = self.P2[0, 3] / (-self.fu)
self.ty = self.P2[1, 3] / (-self.fv)
def cart_to_hom(self, pts):
"""
:param pts: (N, 3 or 2)
:return pts_hom: (N, 4 or 3)
"""
pts_hom = np.hstack((pts, np.ones((pts.shape[0], 1), dtype=np.float32)))
return pts_hom
def rect_to_lidar(self, pts_rect):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_rect_hom = self.cart_to_hom(pts_rect) # (N, 4)
R0_ext = np.hstack((self.R0, np.zeros((3, 1), dtype=np.float32))) # (3, 4)
R0_ext = np.vstack((R0_ext, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
R0_ext[3, 3] = 1
V2C_ext = np.vstack((self.V2C, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
V2C_ext[3, 3] = 1
pts_lidar = np.dot(pts_rect_hom, np.linalg.inv(np.dot(R0_ext, V2C_ext).T))
return pts_lidar[:, 0:3]
def lidar_to_rect(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_lidar_hom = self.cart_to_hom(pts_lidar)
pts_rect = np.dot(pts_lidar_hom, np.dot(self.V2C.T, self.R0.T))
# pts_rect = reduce(np.dot, (pts_lidar_hom, self.V2C.T, self.R0.T))
return pts_rect
def rect_to_img(self, pts_rect):
"""
:param pts_rect: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect_hom = self.cart_to_hom(pts_rect)
pts_2d_hom = np.dot(pts_rect_hom, self.P2.T)
pts_img = (pts_2d_hom[:, 0:2].T / pts_rect_hom[:, 2]).T # (N, 2)
pts_rect_depth = pts_2d_hom[:, 2] - self.P2.T[3, 2] # depth in rect camera coord
return pts_img, pts_rect_depth
def lidar_to_img(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect = self.lidar_to_rect(pts_lidar)
pts_img, pts_depth = self.rect_to_img(pts_rect)
return pts_img, pts_depth
def img_to_rect(self, u, v, depth_rect):
"""
:param u: (N)
:param v: (N)
:param depth_rect: (N)
:return:
"""
x = ((u - self.cu) * depth_rect) / self.fu + self.tx
y = ((v - self.cv) * depth_rect) / self.fv + self.ty
pts_rect = np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1), depth_rect.reshape(-1, 1)), axis=1)
return pts_rect
def corners3d_to_img_boxes(self, corners3d):
"""
:param corners3d: (N, 8, 3) corners in rect coordinate
:return: boxes: (None, 4) [x1, y1, x2, y2] in rgb coordinate
:return: boxes_corner: (None, 8) [xi, yi] in rgb coordinate
"""
sample_num = corners3d.shape[0]
corners3d_hom = np.concatenate((corners3d, np.ones((sample_num, 8, 1))), axis=2) # (N, 8, 4)
img_pts = np.matmul(corners3d_hom, self.P2.T) # (N, 8, 3)
x, y = img_pts[:, :, 0] / img_pts[:, :, 2], img_pts[:, :, 1] / img_pts[:, :, 2]
x1, y1 = np.min(x, axis=1), np.min(y, axis=1)
x2, y2 = np.max(x, axis=1), np.max(y, axis=1)
boxes = np.concatenate((x1.reshape(-1, 1), y1.reshape(-1, 1), x2.reshape(-1, 1), y2.reshape(-1, 1)), axis=1)
boxes_corner = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1)), axis=2)
return boxes, boxes_corner
| 5,026
| 35.165468
| 116
|
py
|
3DTrans
|
3DTrans-master/pcdet/utils/uni3d_norm_parallel.py
|
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
import torch
import itertools
class _UniNorm(Module):
def __init__(self, num_features, dataset_from_flag=1, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, voxel_coord=False):
super(_UniNorm, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
self.voxel_coord = voxel_coord
self.dataset_from_flag = dataset_from_flag
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
if self.track_running_stats:
self.register_buffer('running_mean_source', torch.zeros(num_features))
self.register_buffer('running_mean_target', torch.zeros(num_features))
self.register_buffer('running_var_source', torch.ones(num_features))
self.register_buffer('running_var_target', torch.ones(num_features))
self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
else:
self.register_parameter('running_mean_source', None)
self.register_parameter('running_mean_target', None)
self.register_parameter('running_var_source', None)
self.register_parameter('running_var_target', None)
self.reset_parameters()
def reset_parameters(self):
if self.track_running_stats:
self.running_mean_source.zero_()
self.running_mean_target.zero_()
self.running_var_source.fill_(1)
self.running_var_target.fill_(1)
if self.affine:
self.weight.data.uniform_()
self.bias.data.zero_()
def _check_input_dim(self, input):
return NotImplemented
def _load_from_state_dict_from_pretrained_model(self, state_dict, prefix, metadata, strict, missing_keys, unexpected_keys, error_msgs):
r"""Copies parameters and buffers from :attr:`state_dict` into only
this module, but not its descendants. This is called on every submodule
in :meth:`~torch.nn.Module.load_state_dict`. Metadata saved for this
module in input :attr:`state_dict` is provided as :attr`metadata`.
For state dicts without meta data, :attr`metadata` is empty.
Subclasses can achieve class-specific backward compatible loading using
the version number at `metadata.get("version", None)`.
.. note::
:attr:`state_dict` is not the same object as the input
:attr:`state_dict` to :meth:`~torch.nn.Module.load_state_dict`. So
it can be modified.
Arguments:
state_dict (dict): a dict containing parameters and
persistent buffers.
prefix (str): the prefix for parameters and buffers used in this
module
metadata (dict): a dict containing the metadata for this moodule.
See
strict (bool): whether to strictly enforce that the keys in
:attr:`state_dict` with :attr:`prefix` match the names of
parameters and buffers in this module
missing_keys (list of str): if ``strict=False``, add missing keys to
this list
unexpected_keys (list of str): if ``strict=False``, add unexpected
keys to this list
error_msgs (list of str): error messages should be added to this
list, and will be reported together in
:meth:`~torch.nn.Module.load_state_dict`
"""
local_name_params = itertools.chain(self._parameters.items(), self._buffers.items())
local_state = {k: v.data for k, v in local_name_params if v is not None}
for name, param in local_state.items():
key = prefix + name
if 'source' in key or 'target' in key:
key = key[:-7]
print(key)
if key in state_dict:
input_param = state_dict[key]
if input_param.shape != param.shape:
# local shape should match the one in checkpoint
error_msgs.append('size mismatch for {}: copying a param of {} from checkpoint, '
'where the shape is {} in current model.'
.format(key, param.shape, input_param.shape))
continue
if isinstance(input_param, Parameter):
# backwards compatibility for serialized parameters
input_param = input_param.data
try:
param.copy_(input_param)
except Exception:
error_msgs.append('While copying the parameter named "{}", '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(key, param.size(), input_param.size()))
elif strict:
missing_keys.append(key)
def forward(self, input):
self._check_input_dim(input)
if self.training : ## train mode
## Split the input into the source and target batches
## and calculate the corresponding variances
batch_size = input.size()[0] // 2
input_source = input[:batch_size]
input_target = input[batch_size:]
## In order to remap the rescaled source or target features into
## the shared space, we use the shared self.weight and self.bias
z_source = F.batch_norm(
input_source, self.running_mean_source, self.running_var_source, self.weight, self.bias,
self.training or not self.track_running_stats, self.momentum, self.eps)
z_target = F.batch_norm(
input_target, self.running_mean_target, self.running_var_target, self.weight, self.bias,
self.training or not self.track_running_stats, self.momentum, self.eps)
z = torch.cat((z_source, z_target), dim=0)
# In order to address different dims
if input.dim() == 4: ## UniNorm2d
input_source = input_source.permute(0,2,3,1).contiguous().view(-1,self.num_features)
input_target = input_target.permute(0,2,3,1).contiguous().view(-1,self.num_features)
cur_mean_source = torch.mean(input_source, dim=0)
cur_var_source = torch.var(input_source,dim=0)
cur_mean_target = torch.mean(input_target, dim=0)
cur_var_target = torch.var(input_target, dim=0)
## Obtain the channel-wise transferability
## Assume that source_one and source_two features have different transferability along with channel dimension
## Global Statistic-level channel-wise transferability
dis = torch.abs(cur_mean_source / torch.sqrt(cur_var_source + self.eps) -
cur_mean_target / torch.sqrt(cur_var_target + self.eps))
## Convert the channel-wise transferability into the probability distribution
prob = 1.0 / (1.0 + dis)
alpha = self.num_features * prob / sum(prob)
# # Calculate the Cov Matrix
# # Cov Matrix
# if input_source.shape[0] == input_target.shape[0]:
# cov_matrix_src_tar = torch.matmul((input_source - cur_mean_source).T, (input_target - cur_mean_target)) / (input_source.shape[0]-1)
# # cov_vector = self.vote_cov(cov_matrix_src_tar)
# cov_vector = torch.diag(cov_matrix_src_tar)
# cov_vector = cov_vector.view(-1)
# alpha_cov = self.num_features * cov_vector / sum(cov_vector)
# alpha = (alpha + alpha_cov) / 2
if input.dim() == 2:
alpha = alpha.view(1, self.num_features)
elif input.dim() == 4:
alpha = alpha.view(1, self.num_features, 1, 1)
## Attention
return z * (1 + alpha.detach())
else: ##test mode
# for testing using multiple datasets in parallel,
# we need to split the input into the source and target batches
# and calculate the corresponding variances
batch_size = input.size()[0] // 2
input_source = input[:batch_size]
input_target = input[batch_size:]
assert len(input_source) != 0
assert len(input_target) != 0
# for source domain scaling:
z_source = F.batch_norm(
input_source, self.running_mean_source, self.running_var_source, self.weight, self.bias,
self.training or not self.track_running_stats, self.momentum, self.eps)
# for target domain scaling:
z_target = F.batch_norm(
input_target, self.running_mean_target, self.running_var_target, self.weight, self.bias,
self.training or not self.track_running_stats, self.momentum, self.eps)
dis = torch.abs(self.running_mean_source / torch.sqrt(self.running_var_source + self.eps)
- self.running_mean_target / torch.sqrt(self.running_var_target + self.eps))
prob = 1.0 / (1.0 + dis)
alpha = self.num_features * prob / sum(prob)
z = torch.cat((z_source, z_target), dim=0)
if input.dim() == 2:
alpha = alpha.view(1, self.num_features)
elif input.dim() == 4:
alpha = alpha.view(1, self.num_features, 1, 1)
return z * (1 + alpha.detach())
def extra_repr(self):
return '{num_features}, eps={eps}, momentum={momentum}, affine={affine}, ' \
'track_running_stats={track_running_stats}'.format(**self.__dict__)
class UniNorm1d(_UniNorm):
r"""Applies Batch Normalization over a 2D or 3D input (a mini-batch of 1D
inputs with optional additional channel dimension) as described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size).
By default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal Batch Normalization.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, L)` or :math:`L` from input of size :math:`(N, L)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True``
Shape:
- Input: :math:`(N, C)` or :math:`(N, C, L)`
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
Examples::
>>> # With Learnable Parameters
>>> m = nn.BatchNorm1d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm1d(100, affine=False)
>>> input = torch.randn(20, 100)
>>> output = m(input)
.. _`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`:
https://arxiv.org/abs/1502.03167
"""
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
class UniNorm2d(_UniNorm):
r"""Applies Batch Normalization over a 4D input (a mini-batch of 2D inputs
with additional channel dimension) as described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size).
By default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial Batch Normalization.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, H, W)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True``
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples::
>>> # With Learnable Parameters
>>> m = nn.BatchNorm2d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm2d(100, affine=False)
>>> input = torch.randn(20, 100, 35, 45)
>>> output = m(input)
.. _`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`:
https://arxiv.org/abs/1502.03167
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
class UniNorm3d(_UniNorm):
r"""Applies Batch Normalization over a 5D input (a mini-batch of 3D inputs
with additional channel dimension) as described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size).
By default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, D, H, W)` slices, it's common terminology to call this Volumetric Batch Normalization
or Spatio-temporal Batch Normalization.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, D, H, W)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True``
Shape:
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples::
>>> # With Learnable Parameters
>>> m = nn.BatchNorm3d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm3d(100, affine=False)
>>> input = torch.randn(20, 100, 35, 45, 10)
>>> output = m(input)
.. _`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`:
https://arxiv.org/abs/1502.03167
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim()))
| 20,022
| 46.112941
| 149
|
py
|
3DTrans
|
3DTrans-master/pcdet/utils/uni3d_norm.py
|
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
import torch
import itertools
class _UniNorm(Module):
def __init__(self, num_features, dataset_from_flag=1, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, voxel_coord=False):
super(_UniNorm, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
self.voxel_coord = voxel_coord
self.dataset_from_flag = dataset_from_flag
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
if self.track_running_stats:
self.register_buffer('running_mean_source', torch.zeros(num_features))
self.register_buffer('running_mean_target', torch.zeros(num_features))
self.register_buffer('running_var_source', torch.ones(num_features))
self.register_buffer('running_var_target', torch.ones(num_features))
self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
else:
self.register_parameter('running_mean_source', None)
self.register_parameter('running_mean_target', None)
self.register_parameter('running_var_source', None)
self.register_parameter('running_var_target', None)
self.reset_parameters()
def reset_parameters(self):
if self.track_running_stats:
self.running_mean_source.zero_()
self.running_mean_target.zero_()
self.running_var_source.fill_(1)
self.running_var_target.fill_(1)
if self.affine:
self.weight.data.uniform_()
self.bias.data.zero_()
def _check_input_dim(self, input):
return NotImplemented
def _load_from_state_dict_from_pretrained_model(self, state_dict, prefix, metadata, strict, missing_keys, unexpected_keys, error_msgs):
r"""Copies parameters and buffers from :attr:`state_dict` into only
this module, but not its descendants. This is called on every submodule
in :meth:`~torch.nn.Module.load_state_dict`. Metadata saved for this
module in input :attr:`state_dict` is provided as :attr`metadata`.
For state dicts without meta data, :attr`metadata` is empty.
Subclasses can achieve class-specific backward compatible loading using
the version number at `metadata.get("version", None)`.
.. note::
:attr:`state_dict` is not the same object as the input
:attr:`state_dict` to :meth:`~torch.nn.Module.load_state_dict`. So
it can be modified.
Arguments:
state_dict (dict): a dict containing parameters and
persistent buffers.
prefix (str): the prefix for parameters and buffers used in this
module
metadata (dict): a dict containing the metadata for this moodule.
See
strict (bool): whether to strictly enforce that the keys in
:attr:`state_dict` with :attr:`prefix` match the names of
parameters and buffers in this module
missing_keys (list of str): if ``strict=False``, add missing keys to
this list
unexpected_keys (list of str): if ``strict=False``, add unexpected
keys to this list
error_msgs (list of str): error messages should be added to this
list, and will be reported together in
:meth:`~torch.nn.Module.load_state_dict`
"""
local_name_params = itertools.chain(self._parameters.items(), self._buffers.items())
local_state = {k: v.data for k, v in local_name_params if v is not None}
for name, param in local_state.items():
key = prefix + name
if 'source' in key or 'target' in key:
key = key[:-7]
print(key)
if key in state_dict:
input_param = state_dict[key]
if input_param.shape != param.shape:
# local shape should match the one in checkpoint
error_msgs.append('size mismatch for {}: copying a param of {} from checkpoint, '
'where the shape is {} in current model.'
.format(key, param.shape, input_param.shape))
continue
if isinstance(input_param, Parameter):
# backwards compatibility for serialized parameters
input_param = input_param.data
try:
param.copy_(input_param)
except Exception:
error_msgs.append('While copying the parameter named "{}", '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(key, param.size(), input_param.size()))
elif strict:
missing_keys.append(key)
def forward(self, input):
self._check_input_dim(input)
if self.training : ## train mode
## Split the input into the source and target batches
## and calculate the corresponding variances
batch_size = input.size()[0] // 2
input_source = input[:batch_size]
input_target = input[batch_size:]
## In order to remap the rescaled source or target features into
## the shared space, we use the shared self.weight and self.bias
z_source = F.batch_norm(
input_source, self.running_mean_source, self.running_var_source, self.weight, self.bias,
self.training or not self.track_running_stats, self.momentum, self.eps)
z_target = F.batch_norm(
input_target, self.running_mean_target, self.running_var_target, self.weight, self.bias,
self.training or not self.track_running_stats, self.momentum, self.eps)
z = torch.cat((z_source, z_target), dim=0)
# In order to address different dims
if input.dim() == 4:
input_source = input_source.permute(0,2,3,1).contiguous().view(-1,self.num_features)
input_target = input_target.permute(0,2,3,1).contiguous().view(-1,self.num_features)
cur_mean_source = torch.mean(input_source, dim=0)
cur_var_source = torch.var(input_source,dim=0)
cur_mean_target = torch.mean(input_target, dim=0)
cur_var_target = torch.var(input_target, dim=0)
## Obtain the channel-wise transferability
## Assume that source_one and source_two features have different transferability along with channel dimension
## Global Statistic-level channel-wise transferability
dis = torch.abs(cur_mean_source / torch.sqrt(cur_var_source + self.eps) -
cur_mean_target / torch.sqrt(cur_var_target + self.eps))
## Convert the channel-wise transferability into the probability distribution
prob = 1.0 / (1.0 + dis)
alpha = self.num_features * prob / sum(prob)
# # Calculate the Cov Matrix
# # Cov Matrix
# if input_source.shape[0] == input_target.shape[0]:
# cov_matrix_src_tar = torch.matmul((input_source - cur_mean_source).T, (input_target - cur_mean_target)) / (input_source.shape[0]-1)
# # cov_vector = self.vote_cov(cov_matrix_src_tar)
# cov_vector = torch.diag(cov_matrix_src_tar)
# cov_vector = cov_vector.view(-1)
# alpha_cov = self.num_features * cov_vector / sum(cov_vector)
# alpha = (alpha + alpha_cov) / 2
if input.dim() == 2:
alpha = alpha.view(1, self.num_features)
elif input.dim() == 4:
alpha = alpha.view(1, self.num_features, 1, 1)
## Attention
return z * (1 + alpha.detach())
else: ##test mode
if self.dataset_from_flag == 1:
z = F.batch_norm(
input, self.running_mean_source, self.running_var_source, self.weight, self.bias,
self.training or not self.track_running_stats, self.momentum, self.eps)
dis = torch.abs(self.running_mean_source / torch.sqrt(self.running_var_source + self.eps)
- self.running_mean_target / torch.sqrt(self.running_var_target + self.eps))
prob = 1.0 / (1.0 + dis)
alpha = self.num_features * prob / sum(prob)
# # Add Cov Matrix:
# if input.dim() == 4:
# input = input.permute(0,2,3,1).contiguous().view(-1,self.num_features)
# cov_matrix_src_tar = torch.matmul((input - self.running_mean_source).T, (input - self.running_mean_source)) / (input.shape[0]-1)
# cov_vector = torch.diag(cov_matrix_src_tar)
# cov_vector = cov_vector.view(-1)
# alpha_cov = self.num_features * cov_vector / sum(cov_vector)
# alpha = (alpha + alpha_cov) / 2
elif self.dataset_from_flag == 2:
z = F.batch_norm(
input, self.running_mean_target, self.running_var_target, self.weight, self.bias,
self.training or not self.track_running_stats, self.momentum, self.eps)
dis = torch.abs(self.running_mean_source / torch.sqrt(self.running_var_source + self.eps)
- self.running_mean_target / torch.sqrt(self.running_var_target + self.eps))
prob = 1.0 / (1.0 + dis)
alpha = self.num_features * prob / sum(prob)
# # Add Cov Matrix:
# if input.dim() == 4:
# input = input.permute(0,2,3,1).contiguous().view(-1,self.num_features)
# cov_matrix_src_tar = torch.matmul((input - self.running_mean_target).T, (input - self.running_mean_target)) / (input.shape[0]-1)
# cov_vector = torch.diag(cov_matrix_src_tar)
# cov_vector = cov_vector.view(-1)
# alpha_cov = self.num_features * cov_vector / sum(cov_vector)
# alpha = (alpha + alpha_cov) / 2
if input.dim() == 2:
alpha = alpha.view(1, self.num_features)
elif input.dim() == 4:
alpha = alpha.view(1, self.num_features, 1, 1)
return z * (1 + alpha.detach())
def extra_repr(self):
return '{num_features}, eps={eps}, momentum={momentum}, affine={affine}, ' \
'track_running_stats={track_running_stats}'.format(**self.__dict__)
class UniNorm1d(_UniNorm):
r"""Applies Batch Normalization over a 2D or 3D input (a mini-batch of 1D
inputs with optional additional channel dimension) as described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size).
By default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal Batch Normalization.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, L)` or :math:`L` from input of size :math:`(N, L)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True``
Shape:
- Input: :math:`(N, C)` or :math:`(N, C, L)`
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
Examples::
>>> # With Learnable Parameters
>>> m = nn.BatchNorm1d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm1d(100, affine=False)
>>> input = torch.randn(20, 100)
>>> output = m(input)
.. _`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`:
https://arxiv.org/abs/1502.03167
"""
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
class UniNorm2d(_UniNorm):
r"""Applies Batch Normalization over a 4D input (a mini-batch of 2D inputs
with additional channel dimension) as described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size).
By default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial Batch Normalization.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, H, W)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True``
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples::
>>> # With Learnable Parameters
>>> m = nn.BatchNorm2d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm2d(100, affine=False)
>>> input = torch.randn(20, 100, 35, 45)
>>> output = m(input)
.. _`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`:
https://arxiv.org/abs/1502.03167
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
class UniNorm3d(_UniNorm):
r"""Applies Batch Normalization over a 5D input (a mini-batch of 3D inputs
with additional channel dimension) as described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size).
By default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, D, H, W)` slices, it's common terminology to call this Volumetric Batch Normalization
or Spatio-temporal Batch Normalization.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, D, H, W)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True``
Shape:
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples::
>>> # With Learnable Parameters
>>> m = nn.BatchNorm3d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm3d(100, affine=False)
>>> input = torch.randn(20, 100, 35, 45, 10)
>>> output = m(input)
.. _`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`:
https://arxiv.org/abs/1502.03167
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim()))
| 21,012
| 46.648526
| 149
|
py
|
3DTrans
|
3DTrans-master/pcdet/utils/__init__.py
| 0
| 0
| 0
|
py
|
|
3DTrans
|
3DTrans-master/pcdet/utils/self_training_utils.py
|
import torch
import os
import glob
import tqdm
import numpy as np
import torch.distributed as dist
from pcdet.config import cfg
from pcdet.models import load_data_to_gpu
from pcdet.utils import common_utils, commu_utils, memory_ensemble_utils
import pickle as pkl
import re
#PSEUDO_LABELS = {}
from multiprocessing import Manager
PSEUDO_LABELS = Manager().dict() #for multiple GPU training
NEW_PSEUDO_LABELS = {}
def check_already_exsit_pseudo_label(ps_label_dir, start_epoch):
"""
if we continue training, use this to directly
load pseudo labels from exsiting result pkl
if exsit, load latest result pkl to PSEUDO LABEL
otherwise, return false and
Args:
ps_label_dir: dir to save pseudo label results pkls.
start_epoch: start epoc
Returns:
"""
# support init ps_label given by cfg
if start_epoch == 0 and cfg.SELF_TRAIN.get('INIT_PS', None):
if os.path.exists(cfg.SELF_TRAIN.INIT_PS):
print ("********LOADING PS FROM:", cfg.SELF_TRAIN.INIT_PS)
init_ps_label = pkl.load(open(cfg.SELF_TRAIN.INIT_PS, 'rb'))
PSEUDO_LABELS.update(init_ps_label)
if cfg.LOCAL_RANK == 0:
ps_path = os.path.join(ps_label_dir, "ps_label_e0.pkl")
with open(ps_path, 'wb') as f:
pkl.dump(PSEUDO_LABELS, f)
return cfg.SELF_TRAIN.INIT_PS
ps_label_list = glob.glob(os.path.join(ps_label_dir, 'ps_label_e*.pkl'))
if len(ps_label_list) == 0:
return
ps_label_list.sort(key=os.path.getmtime, reverse=True)
for cur_pkl in ps_label_list:
num_epoch = re.findall('ps_label_e(.*).pkl', cur_pkl)
assert len(num_epoch) == 1
# load pseudo label and return
if int(num_epoch[0]) <= start_epoch:
latest_ps_label = pkl.load(open(cur_pkl, 'rb'))
PSEUDO_LABELS.update(latest_ps_label)
return cur_pkl
return None
def save_pseudo_label_epoch(model, val_loader, rank, leave_pbar, ps_label_dir, cur_epoch):
"""
Generate pseudo label with given model.
Args:
model: model to predict result for pseudo label
val_loader: data_loader to predict pseudo label
rank: process rank
leave_pbar: tqdm bar controller
ps_label_dir: dir to save pseudo label
cur_epoch
"""
val_dataloader_iter = iter(val_loader)
total_it_each_epoch = len(val_loader)
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar,
desc='generate_ps_e%d' % cur_epoch, dynamic_ncols=True)
pos_ps_meter = common_utils.AverageMeter()
ign_ps_meter = common_utils.AverageMeter()
# Since the model is eval status, some object-level data augmentation methods such as
# 'random_object_rotation', 'random_object_scaling', 'normalize_object_size' are not used
model.eval()
for cur_it in range(total_it_each_epoch):
try:
target_batch = next(val_dataloader_iter)
except StopIteration:
target_dataloader_iter = iter(val_loader)
target_batch = next(target_dataloader_iter)
# generate gt_boxes for target_batch and update model weights
with torch.no_grad():
load_data_to_gpu(target_batch)
pred_dicts, ret_dict = model(target_batch)
pos_ps_batch, ign_ps_batch = save_pseudo_label_batch(
target_batch, pred_dicts=pred_dicts,
need_update=(cfg.SELF_TRAIN.get('MEMORY_ENSEMBLE', None) and
cfg.SELF_TRAIN.MEMORY_ENSEMBLE.ENABLED and
cur_epoch > 0)
)
# log to console and tensorboard
pos_ps_meter.update(pos_ps_batch)
ign_ps_meter.update(ign_ps_batch)
disp_dict = {'pos_ps_box': "{:.3f}({:.3f})".format(pos_ps_meter.val, pos_ps_meter.avg),
'ign_ps_box': "{:.3f}({:.3f})".format(ign_ps_meter.val, ign_ps_meter.avg)}
if rank == 0:
pbar.update()
pbar.set_postfix(disp_dict)
pbar.refresh()
if rank == 0:
pbar.close()
gather_and_dump_pseudo_label_result(rank, ps_label_dir, cur_epoch)
print(len(PSEUDO_LABELS))
def gather_and_dump_pseudo_label_result(rank, ps_label_dir, cur_epoch):
commu_utils.synchronize()
if dist.is_initialized():
part_pseudo_labels_list = commu_utils.all_gather(NEW_PSEUDO_LABELS)
new_pseudo_label_dict = {}
for pseudo_labels in part_pseudo_labels_list:
new_pseudo_label_dict.update(pseudo_labels)
NEW_PSEUDO_LABELS.update(new_pseudo_label_dict)
# dump new pseudo label to given dir
if rank == 0:
ps_path = os.path.join(ps_label_dir, "ps_label_e{}.pkl".format(cur_epoch))
with open(ps_path, 'wb') as f:
pkl.dump(NEW_PSEUDO_LABELS, f)
commu_utils.synchronize()
PSEUDO_LABELS.clear()
PSEUDO_LABELS.update(NEW_PSEUDO_LABELS)
NEW_PSEUDO_LABELS.clear()
def save_pseudo_label_batch(input_dict,
pred_dicts=None,
need_update=True):
"""
Save pseudo label for give batch.
If model is given, use model to inference pred_dicts,
otherwise, directly use given pred_dicts.
Args:
input_dict: batch data read from dataloader
pred_dicts: Dict if not given model.
predict results to be generated pseudo label and saved
need_update: Bool.
If set to true, use consistency matching to update pseudo label
"""
pos_ps_meter = common_utils.AverageMeter()
ign_ps_meter = common_utils.AverageMeter()
batch_size = len(pred_dicts)
for b_idx in range(batch_size):
pred_cls_scores = pred_iou_scores = None
if 'pred_boxes' in pred_dicts[b_idx]:
# Exist predicted boxes passing self-training score threshold
pred_boxes = pred_dicts[b_idx]['pred_boxes'].detach().cpu().numpy()
pred_labels = pred_dicts[b_idx]['pred_labels'].detach().cpu().numpy()
pred_scores = pred_dicts[b_idx]['pred_scores'].detach().cpu().numpy()
if 'pred_cls_scores' in pred_dicts[b_idx]:
pred_cls_scores = pred_dicts[b_idx]['pred_cls_scores'].detach().cpu().numpy()
if 'pred_iou_scores' in pred_dicts[b_idx]:
pred_iou_scores = pred_dicts[b_idx]['pred_iou_scores'].detach().cpu().numpy()
# remove boxes under negative threshold
if cfg.SELF_TRAIN.get('NEG_THRESH', None):
labels_remove_scores = np.array(cfg.SELF_TRAIN.NEG_THRESH)[pred_labels - 1]
remain_mask = pred_scores >= labels_remove_scores
pred_labels = pred_labels[remain_mask]
pred_scores = pred_scores[remain_mask]
pred_boxes = pred_boxes[remain_mask]
if 'pred_cls_scores' in pred_dicts[b_idx]:
pred_cls_scores = pred_cls_scores[remain_mask]
if 'pred_iou_scores' in pred_dicts[b_idx]:
pred_iou_scores = pred_iou_scores[remain_mask]
labels_ignore_scores = np.array(cfg.SELF_TRAIN.SCORE_THRESH)[pred_labels - 1]
ignore_mask = pred_scores < labels_ignore_scores
pred_labels[ignore_mask] = -1
gt_box = np.concatenate((pred_boxes,
pred_labels.reshape(-1, 1),
pred_scores.reshape(-1, 1)), axis=1)
else:
# no predicted boxes passes self-training score threshold
gt_box = np.zeros((0, 9), dtype=np.float32)
gt_infos = {
'gt_boxes': gt_box,
'cls_scores': pred_cls_scores,
'iou_scores': pred_iou_scores,
'memory_counter': np.zeros(gt_box.shape[0])
}
# record pseudo label to pseudo label dict
if need_update:
ensemble_func = getattr(memory_ensemble_utils, cfg.SELF_TRAIN.MEMORY_ENSEMBLE.NAME)
gt_infos = ensemble_func(PSEUDO_LABELS[input_dict['frame_id'][b_idx]],
gt_infos, cfg.SELF_TRAIN.MEMORY_ENSEMBLE)
if gt_infos['gt_boxes'].shape[0] > 0:
ign_ps_meter.update((gt_infos['gt_boxes'][:, 7] < 0).sum())
else:
ign_ps_meter.update(0)
pos_ps_meter.update(gt_infos['gt_boxes'].shape[0] - ign_ps_meter.val)
NEW_PSEUDO_LABELS[input_dict['frame_id'][b_idx]] = gt_infos
return pos_ps_meter.avg, ign_ps_meter.avg
def load_ps_label(frame_id):
"""
:param frame_id: file name of pseudo label
:return gt_box: loaded gt boxes (N, 9) [x, y, z, w, l, h, ry, label, scores]
"""
if frame_id in PSEUDO_LABELS:
gt_box = PSEUDO_LABELS[frame_id]['gt_boxes']
else:
raise ValueError('Cannot find pseudo label for frame: %s' % frame_id)
return gt_box
| 8,934
| 35.769547
| 95
|
py
|
3DTrans
|
3DTrans-master/pcdet/utils/spconv_utils.py
|
from typing import Set
try:
import spconv.pytorch as spconv
except:
import spconv as spconv
import torch.nn as nn
def find_all_spconv_keys(model: nn.Module, prefix="") -> Set[str]:
"""
Finds all spconv keys that need to have weight's transposed
"""
found_keys: Set[str] = set()
for name, child in model.named_children():
new_prefix = f"{prefix}.{name}" if prefix != "" else name
if isinstance(child, spconv.conv.SparseConvolution):
new_prefix = f"{new_prefix}.weight"
found_keys.add(new_prefix)
found_keys.update(find_all_spconv_keys(child, prefix=new_prefix))
return found_keys
def replace_feature(out, new_features):
if "replace_feature" in out.__dir__():
# spconv 2.x behaviour
return out.replace_feature(new_features)
else:
out.features = new_features
return out
| 896
| 24.628571
| 73
|
py
|
3DTrans
|
3DTrans-master/pcdet/utils/active_learning_utils.py
|
import io
import os
import tqdm
import pickle
import random
import torch
import numpy as np
import torch.distributed as dist
import torch.nn.functional as F
from pathlib import Path
from pcdet.models import load_data_to_gpu
from pcdet.utils import common_utils, commu_utils
def active_evaluate(model, target_loader, rank):
if rank == 0:
print("======> Active Evaluate <======")
dataloader_iter_tar = iter(target_loader)
total_iter_tar = len(dataloader_iter_tar)
frame_scores = []
return_scores = []
model.eval()
if rank == 0:
pbar = tqdm.tqdm(total=total_iter_tar, leave=False, desc='active_evaluate', dynamic_ncols=True)
for cur_it in range(total_iter_tar):
try:
batch = next(dataloader_iter_tar)
except StopIteration:
dataloader_iter_tar = iter(target_loader)
batch = next(dataloader_iter_tar)
print('new iter')
with torch.no_grad():
load_data_to_gpu(batch)
forward_args = {
'mode': 'active_evaluate'
}
sample_score = model(batch, **forward_args)
frame_scores.append(sample_score)
if rank == 0:
pbar.update()
pbar.refresh()
if rank == 0:
pbar.close()
gather_scores = gather_all_scores(frame_scores)
for score in gather_scores:
for f_score in score:
return_scores += f_score
return return_scores
def active_evaluate_dual(model, target_loader, rank, domain):
if rank == 0:
print("======> Active Evaluate <======")
dataloader_iter_tar = iter(target_loader)
total_iter_tar = len(dataloader_iter_tar)
frame_scores = []
return_scores = []
model.eval()
if rank == 0:
pbar = tqdm.tqdm(total=total_iter_tar, leave=False, desc='active_evaluate', dynamic_ncols=True)
for cur_it in range(total_iter_tar):
try:
batch = next(dataloader_iter_tar)
except StopIteration:
dataloader_iter_tar = iter(target_loader)
batch = next(dataloader_iter_tar)
print('new iter')
with torch.no_grad():
load_data_to_gpu(batch)
forward_args = {
'mode': 'active_evaluate',
'domain': domain
}
sample_score = model(batch, **forward_args)
frame_scores.append(sample_score)
if rank == 0:
pbar.update()
pbar.refresh()
if rank == 0:
pbar.close()
gather_scores = gather_all_scores(frame_scores)
for score in gather_scores:
for f_score in score:
return_scores += f_score
return return_scores
def gather_all_scores(frame_scores):
commu_utils.synchronize()
if dist.is_initialized():
scores = commu_utils.all_gather(frame_scores)
else:
scores = [frame_scores]
commu_utils.synchronize()
return scores
def distributed_concat(tensor):
output_tensor = [tensor.clone() for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(output_tensor, tensor)
concat_tensor = torch.cat(output_tensor, dim=0)
return concat_tensor
def get_target_list(target_pkl_file, oss):
if oss == True:
from petrel_client.client import Client
client = Client('~/.petreloss.conf')
pkl_bytes = client.get(target_pkl_file, update_cache=True)
target_list = pickle.load(io.BytesIO(pkl_bytes))
else:
with open(target_pkl_file, 'rb') as f:
target_list = pickle.load(f)
return target_list
def get_dataset_list(dataset_file, oss, sample_interval=10, waymo=False):
if oss == True:
from petrel_client.client import Client
client = Client('~/.petreloss.conf')
if waymo == False:
if oss == True:
# from petrel_client.client import Client
# client = Client('~/.petreloss.conf')
pkl_bytes = client.get(dataset_file, update_cache=True)
target_list = pickle.load(io.BytesIO(pkl_bytes))
else:
with open(dataset_file, 'rb') as f:
target_list = pickle.load(f)
else:
data_path = '../data/waymo/ImageSets/train.txt'
target_list = []
sample_sequence_list = [x.strip() for x in open(data_path).readlines()]
for k in tqdm.tqdm(range(len(sample_sequence_list))):
sequence_name = os.path.splitext(sample_sequence_list[k])[0]
if oss == False:
info_path = Path(dataset_file) / sequence_name / ('%s.pkl' % sequence_name)
if not Path(info_path).exists():
continue
else:
info_path = os.path.join(dataset_file, sequence_name, ('%s.pkl' % sequence_name))
# if not Path(info_path).exists():
# continue
if oss == False:
with open(info_path, 'rb') as f:
infos = pickle.load(f)
target_list.extend(infos)
else:
pkl_bytes = client.get(info_path, update_cache=True)
infos = pickle.load(io.BytesIO(pkl_bytes))
target_list.extend(infos)
if sample_interval > 1:
sampled_waymo_infos = []
for k in range(0, len(target_list), sample_interval):
sampled_waymo_infos.append(target_list[k])
target_list = sampled_waymo_infos
return target_list
def update_sample_list(sample_list, target_list, sample_frame_id, epoch, save_path, target_name, rank):
if target_name == 'ActiveKittiDataset':
new_sample_list = [item for item in target_list if item['point_cloud']['lidar_idx'] in sample_frame_id]
elif target_name == 'ActiveNuScenesDataset':
new_sample_list = [item for item in target_list if Path(item['lidar_path']).stem in sample_frame_id]
sample_list = sample_list + new_sample_list
sample_list_path = save_path / ('epoch-%d_sample_list.pkl' % epoch)
if rank == 0:
with open(sample_list_path, 'wb') as f:
pickle.dump(sample_list, f)
commu_utils.synchronize()
return sample_list, sample_list_path
def update_sample_list_dual(sample_list, dataset_list, sample_frame_id, epoch, save_path, dataset_name, rank, domain='source'):
if dataset_name == 'ActiveKittiDataset':
assert domain == 'target'
new_sample_list = [item for item in dataset_list if item['point_cloud']['lidar_idx'] in sample_frame_id]
sample_list = sample_list + new_sample_list
elif dataset_name == 'ActiveNuScenesDataset':
if domain == 'target':
new_sample_list = [item for item in dataset_list if Path(item['lidar_path']).stem in sample_frame_id]
sample_list = sample_list + new_sample_list
else:
sample_list = [item for item in dataset_list if Path(item['lidar_path']).stem in sample_frame_id]
elif dataset_name =='ActiveLyftDataset':
assert domain == 'target'
new_sample_list = [item for item in dataset_list if Path(item['lidar_path']).stem in sample_frame_id]
sample_list = sample_list + new_sample_list
elif dataset_name == 'ActiveWaymoDataset':
assert domain == 'source'
sample_list = [item for item in dataset_list if str(item['frame_id']) in sample_frame_id]
sample_list_path = save_path / ('epoch-%d_sample_list_' % epoch + '_' + domain + '.pkl')
if rank == 0:
with open(sample_list_path, 'wb') as f:
pickle.dump(sample_list, f)
commu_utils.synchronize()
return sample_list, sample_list_path
def update_target_list(target_list, sample_frame_id, epoch, save_path, target_name, rank):
if target_name == 'ActiveKittiDataset':
target_list = [item for item in target_list if item['point_cloud']['lidar_idx'] not in sample_frame_id]
elif target_name == 'ActiveNuScenesDataset':
target_list = [item for item in target_list if Path(item['lidar_path']).stem not in sample_frame_id]
target_list_path = save_path / ('epoch-%d_target_list.pkl' % epoch)
if rank == 0:
with open(target_list_path, 'wb') as f:
pickle.dump(target_list, f)
commu_utils.synchronize()
return target_list, target_list_path
def active_sample(frame_scores, budget):
frame_sorted = sorted(frame_scores, key=lambda keys: keys.get("total_score"), reverse=True)
sampled_frame_info = frame_sorted[:budget]
sampled_frame_id = [frame['frame_id'] for frame in sampled_frame_info]
return sampled_frame_id, sampled_frame_info
def active_sample_source(frame_scores, budget):
sampled_frame_info = [item for item in frame_scores if item['total_score'] > 0]
sampled_frame_id = [frame['frame_id'] for frame in sampled_frame_info]
return sampled_frame_id, sampled_frame_info
def active_sample_tar(frame_scores, budget, logger=None):
roi_feature = frame_scores[0].get('roi_feature', None)
feature_dim = roi_feature.shape[-1]
sample_roi_feature = roi_feature.new_zeros((budget, feature_dim))
prototype_roi_feature = roi_feature.new_zeros((budget, feature_dim))
roi_feature = roi_feature.new_zeros((budget+1, feature_dim))
domainness_list = []
feature_list = []
sample_feature_list = []
for item in frame_scores:
cur_roi_feature = item.get('roi_feature')
cur_roi_feature = cur_roi_feature.to(roi_feature.device)
cur_domainness = item.get('domainness_evaluate')
cur_domainness = cur_domainness.to(roi_feature.device)
if len(feature_list) < budget:
sample_roi_feature[len(feature_list)] = cur_roi_feature
prototype_roi_feature[len(feature_list)] = cur_roi_feature
roi_feature[len(feature_list)] = cur_roi_feature
feature_list.append([item])
sample_feature_list.append(item)
domainness_list.append(cur_domainness)
else:
roi_feature[-1, :] = cur_roi_feature
similarity_matrix = F.normalize(roi_feature, dim=1) @ F.normalize(roi_feature, dim=1).transpose(1,0).contiguous()
similarity, inds = similarity_matrix.topk(k=2, dim=0)
similarity = similarity[1]
inds = inds[1]
if similarity.min(dim=-1)[0] == similarity[-1]:
similarity_max = similarity.max(dim=-1)
inds_y = similarity.argmax(dim=-1)
inds_x = inds[inds_y]
domainness_x = domainness_list[inds_x]
domainness_y = domainness_list[inds_y]
roi_feature_1 = sample_roi_feature[inds_x]
roi_feature_2 = sample_roi_feature[inds_y]
num_merge_prototype_1 = len(feature_list[inds_x])
num_merge_prototype_2 = len(feature_list[inds_y])
merge_proto = (num_merge_prototype_1 * prototype_roi_feature[inds_x] + num_merge_prototype_2 * prototype_roi_feature[inds_y]) / (num_merge_prototype_1 + num_merge_prototype_2)
if domainness_x > domainness_y:
prototype_roi_feature[inds_x] = merge_proto
sample_roi_feature[inds_y] = cur_roi_feature
roi_feature[inds_y] = cur_roi_feature
feature_list[inds_x] = feature_list[inds_x] + feature_list[inds_y]
feature_list[inds_y] = [item]
sample_feature_list[inds_y] = item
domainness_list[inds_y] = cur_domainness
else:
prototype_roi_feature[inds_y] = merge_proto
sample_roi_feature[inds_x] = cur_roi_feature
roi_feature[inds_x] = cur_roi_feature
feature_list[inds_y] = feature_list[inds_x] + feature_list[inds_y]
feature_list[inds_x] = [item]
sample_feature_list[inds_x] = item
domainness_list[inds_x] = cur_domainness
else:
merge_inds = inds[budget]
merge_domainness = domainness_list[merge_inds]
num_merge_proto = len(feature_list[merge_inds])
merge_proto = (num_merge_proto * prototype_roi_feature[merge_inds] + cur_roi_feature) / (num_merge_proto + 1)
prototype_roi_feature[merge_inds] = merge_proto
if cur_domainness > merge_domainness:
sample_roi_feature[merge_inds] = cur_roi_feature
roi_feature[merge_inds] = cur_roi_feature
sample_feature_list[merge_inds] = item
domainness_list[merge_inds] = cur_domainness
feature_list[merge_inds].append(item)
for l in feature_list:
sorted(l, key=lambda keys: keys.get("total_score"), reverse=True)
num_each_group = [len(l) for l in feature_list]
distance_matrix = F.normalize(prototype_roi_feature, dim=1) @ F.normalize(prototype_roi_feature, dim=1).transpose(0, 1).contiguous()
distance, inds = distance_matrix.topk(k=2, dim=0)
distance_each_group = distance[1]
sample_num_each_group = distance_each_group.cpu() * torch.Tensor(num_each_group).cpu()
sample_num_each_group = assign_sample_num(sample_num_each_group.numpy().tolist(), budget)
sample_feature_list_new = []
for i, sample_num in enumerate(sample_num_each_group):
sampled = 0
while sampled < sample_num:
sample_feature_list_new.append(feature_list[i][sampled])
sampled += 1
sample_list_new = []
for item in sample_feature_list_new:
sample_list_new.append(item['frame_id'])
sample_list = []
for i, list in enumerate(feature_list):
num = len(list)
print('group %d has %d sample_frame' % (i, sample_num_each_group[i]))
print('group %d has %d frame' % (i, num))
if logger != None:
logger.info('group %d has %d sample_frame' % (i, sample_num_each_group[i]))
logger.info('group %d has %d frame' % (i, num))
for item in sample_feature_list:
sample_list.append(item['frame_id'])
return sample_list_new, sample_feature_list_new
def assign_sample_num(sample_num_list, budget):
sampled_num = 0
sample_num_each_list = [0] * len(sample_num_list)
while sampled_num < budget:
ind = sample_num_list.index(max(sample_num_list))
sample_num_each_list[ind] += 1
sample_num_list[ind] /= 2
sampled_num += 1
return sample_num_each_list
def random_sample(source_list, target_list, source_budget, target_budget, save_path):
random.shuffle(target_list)
random.shuffle(source_list)
target_sample_list = random.sample(target_list, target_budget)
source_sample_list = random.sample(source_list, source_budget)
target_sample_path = save_path / ('random_target_list.pkl')
source_sample_path = save_path / ('random_source_list.pkl')
with open(target_sample_path, 'wb') as f:
pickle.dump(target_sample_list, f)
with open(source_sample_path, 'wb') as f:
pickle.dump(source_sample_list, f)
return source_sample_path, target_sample_path
def random_sample_target(target_list, target_budget, save_path):
random.shuffle(target_list)
target_sample_list = random.sample(target_list, target_budget)
target_sample_path = save_path / ('random_target_list.pkl')
with open(target_sample_path, 'wb') as f:
pickle.dump(target_sample_list, f)
return target_sample_path
| 15,595
| 40.589333
| 191
|
py
|
3DTrans
|
3DTrans-master/pcdet/utils/memory_ensemble_utils.py
|
import torch
import numpy as np
from scipy.optimize import linear_sum_assignment
from pcdet.utils import common_utils
from pcdet.ops.iou3d_nms import iou3d_nms_utils
from pcdet.models.model_utils.model_nms_utils import class_agnostic_nms
def consistency_ensemble(gt_infos_a, gt_infos_b, memory_ensemble_cfg):
"""
Args:
gt_infos_a:
gt_boxes: (N, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for previous pseudo boxes
cls_scores: (N)
iou_scores: (N)
memory_counter: (N)
gt_infos_b:
gt_boxes: (M, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for current pseudo boxes
cls_scores: (M)
iou_scores: (M)
memory_counter: (M)
memory_ensemble_cfg:
Returns:
gt_infos:
gt_boxes: (K, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for merged pseudo boxes
cls_scores: (K)
iou_scores: (K)
memory_counter: (K)
"""
gt_box_a, _ = common_utils.check_numpy_to_torch(gt_infos_a['gt_boxes'])
gt_box_b, _ = common_utils.check_numpy_to_torch(gt_infos_b['gt_boxes'])
gt_box_a, gt_box_b = gt_box_a.cuda(), gt_box_b.cuda()
new_gt_box = gt_infos_a['gt_boxes']
new_cls_scores = gt_infos_a['cls_scores']
new_iou_scores = gt_infos_a['iou_scores']
new_memory_counter = gt_infos_a['memory_counter']
# if gt_box_b or gt_box_a don't have any predictions
if gt_box_b.shape[0] == 0:
gt_infos_a['memory_counter'] += 1
return gt_infos_a
elif gt_box_a.shape[0] == 0:
return gt_infos_b
# get ious
iou_matrix = iou3d_nms_utils.boxes_iou3d_gpu(gt_box_a[:, :7], gt_box_b[:, :7]).cpu()
ious, match_idx = torch.max(iou_matrix, dim=1)
ious, match_idx = ious.numpy(), match_idx.numpy()
gt_box_a, gt_box_b = gt_box_a.cpu().numpy(), gt_box_b.cpu().numpy()
match_pairs_idx = np.concatenate((
np.array(list(range(gt_box_a.shape[0]))).reshape(-1, 1),
match_idx.reshape(-1, 1)), axis=1)
#########################################################
# filter matched pair boxes by IoU
# if matching succeeded, use boxes with higher confidence
#########################################################
iou_mask = (ious >= memory_ensemble_cfg.IOU_THRESH)
matching_selected = match_pairs_idx[iou_mask]
gt_box_selected_a = gt_box_a[matching_selected[:, 0]]
gt_box_selected_b = gt_box_b[matching_selected[:, 1]]
# assign boxes with higher confidence
score_mask = gt_box_selected_a[:, 8] < gt_box_selected_b[:, 8]
if memory_ensemble_cfg.get('WEIGHTED', None):
weight = gt_box_selected_a[:, 8] / (gt_box_selected_a[:, 8] + gt_box_selected_b[:, 8])
min_scores = np.minimum(gt_box_selected_a[:, 8], gt_box_selected_b[:, 8])
max_scores = np.maximum(gt_box_selected_a[:, 8], gt_box_selected_b[:, 8])
weighted_score = weight * (max_scores - min_scores) + min_scores
new_gt_box[matching_selected[:, 0], :7] = weight.reshape(-1, 1) * gt_box_selected_a[:, :7] + \
(1 - weight.reshape(-1, 1)) * gt_box_selected_b[:, :7]
new_gt_box[matching_selected[:, 0], 8] = weighted_score
else:
new_gt_box[matching_selected[score_mask, 0], :] = gt_box_selected_b[score_mask, :]
if gt_infos_a['cls_scores'] is not None:
new_cls_scores[matching_selected[score_mask, 0]] = gt_infos_b['cls_scores'][
matching_selected[score_mask, 1]]
if gt_infos_a['iou_scores'] is not None:
new_iou_scores[matching_selected[score_mask, 0]] = gt_infos_b['iou_scores'][
matching_selected[score_mask, 1]]
# for matching pairs, clear the ignore counter
new_memory_counter[matching_selected[:, 0]] = 0
#######################################################
# If previous bboxes disappeared: ious <= 0.1
#######################################################
disappear_idx = (ious < memory_ensemble_cfg.IOU_THRESH).nonzero()[0]
if memory_ensemble_cfg.get('MEMORY_VOTING', None) and memory_ensemble_cfg.MEMORY_VOTING.ENABLED:
new_memory_counter[disappear_idx] += 1
# ignore gt_boxes that ignore_count == IGNORE_THRESH
ignore_mask = new_memory_counter >= memory_ensemble_cfg.MEMORY_VOTING.IGNORE_THRESH
new_gt_box[ignore_mask, 7] = -1
# remove gt_boxes that ignore_count >= RM_THRESH
remain_mask = new_memory_counter < memory_ensemble_cfg.MEMORY_VOTING.RM_THRESH
new_gt_box = new_gt_box[remain_mask]
new_memory_counter = new_memory_counter[remain_mask]
if gt_infos_a['cls_scores'] is not None:
new_cls_scores = new_cls_scores[remain_mask]
if gt_infos_a['iou_scores'] is not None:
new_iou_scores = new_iou_scores[remain_mask]
# Add new appear boxes
ious_b2a, match_idx_b2a = torch.max(iou_matrix, dim=0)
ious_b2a, match_idx_b2a = ious_b2a.numpy(), match_idx_b2a.numpy()
newboxes_idx = (ious_b2a < memory_ensemble_cfg.IOU_THRESH).nonzero()[0]
if newboxes_idx.shape[0] != 0:
new_gt_box = np.concatenate((new_gt_box, gt_infos_b['gt_boxes'][newboxes_idx, :]), axis=0)
if gt_infos_a['cls_scores'] is not None:
new_cls_scores = np.concatenate((new_cls_scores, gt_infos_b['cls_scores'][newboxes_idx]), axis=0)
if gt_infos_a['iou_scores'] is not None:
new_iou_scores = np.concatenate((new_iou_scores, gt_infos_b['iou_scores'][newboxes_idx]), axis=0)
new_memory_counter = np.concatenate((new_memory_counter, gt_infos_b['memory_counter'][newboxes_idx]), axis=0)
new_gt_infos = {
'gt_boxes': new_gt_box,
'cls_scores': new_cls_scores if gt_infos_a['cls_scores'] is not None else None,
'iou_scores': new_iou_scores if gt_infos_a['iou_scores'] is not None else None,
'memory_counter': new_memory_counter
}
return new_gt_infos
def nms_ensemble(gt_infos_a, gt_infos_b, memory_ensemble_cfg):
"""
Args:
gt_infos_a:
gt_boxes: (N, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for previous pseudo boxes
cls_scores: (N)
iou_scores: (N)
memory_counter: (N)
gt_infos_b:
gt_boxes: (M, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for current pseudo boxes
cls_scores: (M)
iou_scores: (M)
memory_counter: (M)
memory_ensemble_cfg:
Returns:
gt_infos:
gt_boxes: (K, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for merged pseudo boxes
cls_scores: (K)
iou_scores: (K)
memory_counter: (K)
"""
gt_box_a, _ = common_utils.check_numpy_to_torch(gt_infos_a['gt_boxes'])
gt_box_b, _ = common_utils.check_numpy_to_torch(gt_infos_b['gt_boxes'])
if gt_box_b.shape[0] == 0:
if memory_ensemble_cfg.get('MEMORY_VOTING', None) and memory_ensemble_cfg.MEMORY_VOTING.ENABLED:
gt_infos_a['memory_counter'] += 1
return gt_infos_a
elif gt_box_a.shape[0] == 0:
return gt_infos_b
gt_box_a, gt_box_b = gt_box_a.cuda(), gt_box_b.cuda()
gt_boxes = torch.cat((gt_box_a, gt_box_b), dim=0)
if gt_infos_a['cls_scores'] is not None:
new_cls_scores = np.concatenate((gt_infos_a['cls_scores'], gt_infos_b['cls_scores']), axis=0)
if gt_infos_a['iou_scores'] is not None:
new_iou_scores = np.concatenate((gt_infos_a['iou_scores'], gt_infos_b['iou_scores']), axis=0)
new_memory_counter = np.concatenate((gt_infos_a['memory_counter'], gt_infos_b['memory_counter']), axis=0)
selected, selected_scores = class_agnostic_nms(
box_scores=gt_boxes[:, -1], box_preds=gt_boxes[:, :7], nms_config=memory_ensemble_cfg.NMS_CONFIG
)
gt_boxes = gt_boxes.cpu().numpy()
if isinstance(selected, list):
selected = np.array(selected)
else:
selected = selected.cpu().numpy()
if memory_ensemble_cfg.get('MEMORY_VOTING', None) and memory_ensemble_cfg.MEMORY_VOTING.ENABLED:
iou_matrix = iou3d_nms_utils.boxes_iou3d_gpu(gt_box_a[:, :7], gt_box_b[:, :7])
ious, _ = torch.max(iou_matrix, dim=1)
ious = ious.cpu().numpy()
gt_box_a_size = gt_box_a.shape[0]
selected_a = selected[selected < gt_box_a_size]
matched_mask = (ious[selected_a] > memory_ensemble_cfg.NMS_CONFIG.NMS_THRESH)
match_idx = selected_a[matched_mask]
new_memory_counter[match_idx] = 0
# for previous bboxes disappeared
disappear_idx = (ious < memory_ensemble_cfg.NMS_CONFIG.NMS_THRESH).nonzero()[0]
new_memory_counter[disappear_idx] += 1
# ignore gt_boxes that ignore_count == IGNORE_THRESH
ignore_mask = new_memory_counter >= memory_ensemble_cfg.MEMORY_VOTING.IGNORE_THRESH
gt_boxes[ignore_mask, 7] = -1
# remove gt_boxes that ignore_count >= RM_THRESH
rm_idx = (new_memory_counter >= memory_ensemble_cfg.MEMORY_VOTING.RM_THRESH).nonzero()[0]
selected = np.setdiff1d(selected, rm_idx)
selected_gt_boxes = gt_boxes[selected]
new_gt_infos = {
'gt_boxes': selected_gt_boxes,
'cls_scores': new_cls_scores[selected] if gt_infos_a['cls_scores'] is not None else None,
'iou_scores': new_iou_scores[selected] if gt_infos_a['iou_scores'] is not None else None,
'memory_counter': new_memory_counter[selected]
}
return new_gt_infos
def bipartite_ensemble(gt_infos_a, gt_infos_b, memory_ensemble_cfg):
"""
Args:
gt_infos_a:
gt_boxes: (N, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for previous pseudo boxes
cls_scores: (N)
iou_scores: (N)
memory_counter: (N)
gt_infos_b:
gt_boxes: (M, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for current pseudo boxes
cls_scores: (M)
iou_scores: (M)
memory_counter: (M)
memory_ensemble_cfg:
Returns:
gt_infos:
gt_boxes: (K, 9) [x, y, z, dx, dy, dz, heading, label, scores] in LiDAR for merged pseudo boxes
cls_scores: (K)
iou_scores: (K)
memory_counter: (K)
"""
gt_box_a, _ = common_utils.check_numpy_to_torch(gt_infos_a['gt_boxes'])
gt_box_b, _ = common_utils.check_numpy_to_torch(gt_infos_b['gt_boxes'])
gt_box_a, gt_box_b = gt_box_a.cuda(), gt_box_b.cuda()
new_gt_box = gt_infos_a['gt_boxes']
new_cls_scores = gt_infos_a['cls_scores']
new_iou_scores = gt_infos_a['iou_scores']
new_memory_counter = gt_infos_a['memory_counter']
# if gt_box_b or gt_box_a don't have any predictions
if gt_box_b.shape[0] == 0:
gt_infos_a['memory_counter'] += 1
return gt_infos_a
elif gt_box_a.shape[0] == 0:
return gt_infos_b
# bipartite matching
iou_matrix = iou3d_nms_utils.boxes_iou3d_gpu(gt_box_a[:, :7], gt_box_b[:, :7])
iou_matrix = iou_matrix.cpu().numpy()
a_idx, b_idx = linear_sum_assignment(-iou_matrix)
gt_box_a, gt_box_b = gt_box_a.cpu().numpy(), gt_box_b.cpu().numpy()
matching_paris_idx = np.concatenate((a_idx.reshape(-1, 1), b_idx.reshape(-1, 1)), axis=1)
ious = iou_matrix[matching_paris_idx[:, 0], matching_paris_idx[:, 1]]
# matched a boxes.
matched_mask = ious > memory_ensemble_cfg.IOU_THRESH
matching_selected = matching_paris_idx[matched_mask]
gt_box_selected_a = gt_box_a[matching_selected[:, 0]]
gt_box_selected_b = gt_box_b[matching_selected[:, 1]]
# assign boxes with higher confidence
score_mask = gt_box_selected_a[:, 8] < gt_box_selected_b[:, 8]
new_gt_box[matching_selected[score_mask, 0], :] = gt_box_selected_b[score_mask, :]
if gt_infos_a['cls_scores'] is not None:
new_cls_scores[matching_selected[score_mask, 0]] = gt_infos_b['cls_scores'][
matching_selected[score_mask, 1]]
if gt_infos_a['iou_scores'] is not None:
new_iou_scores[matching_selected[score_mask, 0]] = gt_infos_b['iou_scores'][
matching_selected[score_mask, 1]]
# for matched pairs, clear the ignore counter
new_memory_counter[matching_selected[:, 0]] = 0
##############################################
# disppeared boxes for previous pseudo boxes
##############################################
gt_box_a_idx = np.array(list(range(gt_box_a.shape[0])))
disappear_idx = np.setdiff1d(gt_box_a_idx, matching_selected[:, 0])
if memory_ensemble_cfg.get('MEMORY_VOTING', None) and memory_ensemble_cfg.MEMORY_VOTING.ENABLED:
new_memory_counter[disappear_idx] += 1
# ignore gt_boxes that ignore_count == IGNORE_THRESH
ignore_mask = new_memory_counter >= memory_ensemble_cfg.MEMORY_VOTING.IGNORE_THRESH
new_gt_box[ignore_mask, 7] = -1
# remove gt_boxes that ignore_count >= RM_THRESH
remain_mask = new_memory_counter < memory_ensemble_cfg.MEMORY_VOTING.RM_THRESH
new_gt_box = new_gt_box[remain_mask]
new_memory_counter = new_memory_counter[remain_mask]
if gt_infos_a['cls_scores'] is not None:
new_cls_scores = new_cls_scores[remain_mask]
if gt_infos_a['iou_scores'] is not None:
new_iou_scores = new_iou_scores[remain_mask]
##############################################
# new appear boxes for current pseudo boxes
##############################################
gt_box_b_idx = np.array(list(range(gt_box_b.shape[0])))
newboxes_idx = np.setdiff1d(gt_box_b_idx, matching_selected[:, 1])
if newboxes_idx.shape[0] != 0:
new_gt_box = np.concatenate((new_gt_box, gt_infos_b['gt_boxes'][newboxes_idx, :]), axis=0)
if gt_infos_a['cls_scores'] is not None:
new_cls_scores = np.concatenate((new_cls_scores,
gt_infos_b['cls_scores'][newboxes_idx]), axis=0)
if gt_infos_a['iou_scores'] is not None:
new_iou_scores = np.concatenate((new_iou_scores,
gt_infos_b['iou_scores'][newboxes_idx]), axis=0)
new_memory_counter = np.concatenate((new_memory_counter,
gt_infos_b['memory_counter'][newboxes_idx]), axis=0)
new_gt_infos = {
'gt_boxes': new_gt_box,
'cls_scores': new_cls_scores if gt_infos_a['cls_scores'] is not None else None,
'iou_scores': new_iou_scores if gt_infos_a['iou_scores'] is not None else None,
'memory_counter': new_memory_counter
}
return new_gt_infos
| 14,715
| 41.90379
| 117
|
py
|
3DTrans
|
3DTrans-master/pcdet/utils/commu_utils.py
|
"""
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
deeply borrow from maskrcnn-benchmark and ST3D
"""
import pickle
import time
import torch
import torch.distributed as dist
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
origin_size = None
if not isinstance(data, torch.Tensor):
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
else:
origin_size = data.size()
tensor = data.reshape(-1)
tensor_type = tensor.dtype
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.FloatTensor(size=(max_size,)).cuda().to(tensor_type))
if local_size != max_size:
padding = torch.FloatTensor(size=(max_size - local_size,)).cuda().to(tensor_type)
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
if origin_size is None:
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
else:
buffer = tensor[:size]
data_list.append(buffer)
if origin_size is not None:
new_shape = [-1] + list(origin_size[1:])
resized_list = []
for data in data_list:
# suppose the difference of tensor size exist in first dimension
data = data.reshape(new_shape)
resized_list.append(data)
return resized_list
else:
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
def average_reduce_value(data):
data_list = all_gather(data)
return sum(data_list) / len(data_list)
def all_reduce(data, op="sum", average=False):
def op_map(op):
op_dict = {
"SUM": dist.ReduceOp.SUM,
"MAX": dist.ReduceOp.MAX,
"MIN": dist.ReduceOp.MIN,
"PRODUCT": dist.ReduceOp.PRODUCT,
}
return op_dict[op]
world_size = get_world_size()
if world_size > 1:
reduced_data = data.clone()
dist.all_reduce(reduced_data, op=op_map(op.upper()))
if average:
assert op.upper() == 'SUM'
return reduced_data / world_size
else:
return reduced_data
return data
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
| 5,253
| 27.710383
| 89
|
py
|
3DTrans
|
3DTrans-master/pcdet/ops/__init__.py
| 0
| 0
| 0
|
py
|
|
3DTrans
|
3DTrans-master/pcdet/ops/roipoint_pool3d/roipoint_pool3d_utils.py
|
import torch
import torch.nn as nn
from torch.autograd import Function
from ...utils import box_utils
from . import roipoint_pool3d_cuda
class RoIPointPool3d(nn.Module):
def __init__(self, num_sampled_points=512, pool_extra_width=1.0):
super().__init__()
self.num_sampled_points = num_sampled_points
self.pool_extra_width = pool_extra_width
def forward(self, points, point_features, boxes3d):
"""
Args:
points: (B, N, 3)
point_features: (B, N, C)
boxes3d: (B, M, 7), [x, y, z, dx, dy, dz, heading]
Returns:
pooled_features: (B, M, 512, 3 + C)
pooled_empty_flag: (B, M)
"""
return RoIPointPool3dFunction.apply(
points, point_features, boxes3d, self.pool_extra_width, self.num_sampled_points
)
class RoIPointPool3dFunction(Function):
@staticmethod
def forward(ctx, points, point_features, boxes3d, pool_extra_width, num_sampled_points=512):
"""
Args:
ctx:
points: (B, N, 3)
point_features: (B, N, C)
boxes3d: (B, num_boxes, 7), [x, y, z, dx, dy, dz, heading]
pool_extra_width:
num_sampled_points:
Returns:
pooled_features: (B, num_boxes, 512, 3 + C)
pooled_empty_flag: (B, num_boxes)
"""
assert points.shape.__len__() == 3 and points.shape[2] == 3
batch_size, boxes_num, feature_len = points.shape[0], boxes3d.shape[1], point_features.shape[2]
pooled_boxes3d = box_utils.enlarge_box3d(boxes3d.view(-1, 7), pool_extra_width).view(batch_size, -1, 7)
pooled_features = point_features.new_zeros((batch_size, boxes_num, num_sampled_points, 3 + feature_len))
pooled_empty_flag = point_features.new_zeros((batch_size, boxes_num)).int()
roipoint_pool3d_cuda.forward(
points.contiguous(), pooled_boxes3d.contiguous(),
point_features.contiguous(), pooled_features, pooled_empty_flag
)
return pooled_features, pooled_empty_flag
@staticmethod
def backward(ctx, grad_out):
raise NotImplementedError
if __name__ == '__main__':
pass
| 2,226
| 31.75
| 112
|
py
|
3DTrans
|
3DTrans-master/pcdet/ops/roipoint_pool3d/__init__.py
| 0
| 0
| 0
|
py
|
|
3DTrans
|
3DTrans-master/pcdet/ops/pointnet2/__init__.py
| 0
| 0
| 0
|
py
|
|
3DTrans
|
3DTrans-master/pcdet/ops/pointnet2/pointnet2_stack/voxel_query_utils.py
|
import torch
from torch.autograd import Variable
from torch.autograd import Function
import torch.nn as nn
from typing import List
from . import pointnet2_stack_cuda as pointnet2
from . import pointnet2_utils
class VoxelQuery(Function):
@staticmethod
def forward(ctx, max_range: int, radius: float, nsample: int, xyz: torch.Tensor, \
new_xyz: torch.Tensor, new_coords: torch.Tensor, point_indices: torch.Tensor):
"""
Args:
ctx:
max_range: int, max range of voxels to be grouped
nsample: int, maximum number of features in the balls
new_coords: (M1 + M2, 4), [batch_id, z, y, x] cooridnates of keypoints
new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
point_indices: (batch_size, Z, Y, X) 4-D tensor recording the point indices of voxels
Returns:
idx: (M1 + M2, nsample) tensor with the indicies of the features that form the query balls
"""
assert new_xyz.is_contiguous()
assert xyz.is_contiguous()
assert new_coords.is_contiguous()
assert point_indices.is_contiguous()
M = new_coords.shape[0]
B, Z, Y, X = point_indices.shape
idx = torch.cuda.IntTensor(M, nsample).zero_()
z_range, y_range, x_range = max_range
pointnet2.voxel_query_wrapper(M, Z, Y, X, nsample, radius, z_range, y_range, x_range, \
new_xyz, xyz, new_coords, point_indices, idx)
empty_ball_mask = (idx[:, 0] == -1)
idx[empty_ball_mask] = 0
return idx, empty_ball_mask
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
voxel_query = VoxelQuery.apply
class VoxelQueryAndGrouping(nn.Module):
def __init__(self, max_range: int, radius: float, nsample: int):
"""
Args:
radius: float, radius of ball
nsample: int, maximum number of features to gather in the ball
"""
super().__init__()
self.max_range, self.radius, self.nsample = max_range, radius, nsample
def forward(self, new_coords: torch.Tensor, xyz: torch.Tensor, xyz_batch_cnt: torch.Tensor,
new_xyz: torch.Tensor, new_xyz_batch_cnt: torch.Tensor,
features: torch.Tensor, voxel2point_indices: torch.Tensor):
"""
Args:
new_coords: (M1 + M2 ..., 3) centers voxel indices of the ball query
xyz: (N1 + N2 ..., 3) xyz coordinates of the features
xyz_batch_cnt: (batch_size), [N1, N2, ...]
new_xyz: (M1 + M2 ..., 3) centers of the ball query
new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
features: (N1 + N2 ..., C) tensor of features to group
voxel2point_indices: (B, Z, Y, X) tensor of points indices of voxels
Returns:
new_features: (M1 + M2, C, nsample) tensor
"""
assert xyz.shape[0] == xyz_batch_cnt.sum(), 'xyz: %s, xyz_batch_cnt: %s' % (str(xyz.shape), str(new_xyz_batch_cnt))
assert new_coords.shape[0] == new_xyz_batch_cnt.sum(), \
'new_coords: %s, new_xyz_batch_cnt: %s' % (str(new_coords.shape), str(new_xyz_batch_cnt))
batch_size = xyz_batch_cnt.shape[0]
# idx: (M1 + M2 ..., nsample), empty_ball_mask: (M1 + M2 ...)
idx1, empty_ball_mask1 = voxel_query(self.max_range, self.radius, self.nsample, xyz, new_xyz, new_coords, voxel2point_indices)
idx1 = idx1.view(batch_size, -1, self.nsample)
count = 0
for bs_idx in range(batch_size):
idx1[bs_idx] -= count
count += xyz_batch_cnt[bs_idx]
idx1 = idx1.view(-1, self.nsample)
idx1[empty_ball_mask1] = 0
idx = idx1
empty_ball_mask = empty_ball_mask1
grouped_xyz = pointnet2_utils.grouping_operation(xyz, xyz_batch_cnt, idx, new_xyz_batch_cnt)
# grouped_features: (M1 + M2, C, nsample)
grouped_features = pointnet2_utils.grouping_operation(features, xyz_batch_cnt, idx, new_xyz_batch_cnt)
return grouped_features, grouped_xyz, empty_ball_mask
| 4,148
| 40.079208
| 134
|
py
|
3DTrans
|
3DTrans-master/pcdet/ops/pointnet2/pointnet2_stack/pointnet2_utils.py
|
import torch
import torch.nn as nn
from torch.autograd import Function, Variable
from . import pointnet2_stack_cuda as pointnet2
class BallQuery(Function):
@staticmethod
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, xyz_batch_cnt: torch.Tensor,
new_xyz: torch.Tensor, new_xyz_batch_cnt):
"""
Args:
ctx:
radius: float, radius of the balls
nsample: int, maximum number of features in the balls
xyz: (N1 + N2 ..., 3) xyz coordinates of the features
xyz_batch_cnt: (batch_size), [N1, N2, ...]
new_xyz: (M1 + M2 ..., 3) centers of the ball query
new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
Returns:
idx: (M1 + M2, nsample) tensor with the indicies of the features that form the query balls
"""
assert new_xyz.is_contiguous()
assert new_xyz_batch_cnt.is_contiguous()
assert xyz.is_contiguous()
assert xyz_batch_cnt.is_contiguous()
B = xyz_batch_cnt.shape[0]
M = new_xyz.shape[0]
idx = torch.cuda.IntTensor(M, nsample).zero_()
pointnet2.ball_query_wrapper(B, M, radius, nsample, new_xyz, new_xyz_batch_cnt, xyz, xyz_batch_cnt, idx)
empty_ball_mask = (idx[:, 0] == -1)
idx[empty_ball_mask] = 0
ctx.mark_non_differentiable(idx)
ctx.mark_non_differentiable(empty_ball_mask)
return idx, empty_ball_mask
@staticmethod
def backward(ctx, a=None, b=None):
return None, None, None, None, None, None
ball_query = BallQuery.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, features_batch_cnt: torch.Tensor,
idx: torch.Tensor, idx_batch_cnt: torch.Tensor):
"""
Args:
ctx:
features: (N1 + N2 ..., C) tensor of features to group
features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the indicies of features to group with
idx: (M1 + M2 ..., nsample) tensor containing the indicies of features to group with
idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of features to group with
Returns:
output: (M1 + M2, C, nsample) tensor
"""
assert features.is_contiguous()
assert features_batch_cnt.is_contiguous()
assert idx.is_contiguous()
assert idx_batch_cnt.is_contiguous()
assert features.shape[0] == features_batch_cnt.sum(), \
'features: %s, features_batch_cnt: %s' % (str(features.shape), str(features_batch_cnt))
assert idx.shape[0] == idx_batch_cnt.sum(), \
'idx: %s, idx_batch_cnt: %s' % (str(idx.shape), str(idx_batch_cnt))
M, nsample = idx.size()
N, C = features.size()
B = idx_batch_cnt.shape[0]
output = torch.cuda.FloatTensor(M, C, nsample)
pointnet2.group_points_wrapper(B, M, C, nsample, features, features_batch_cnt, idx, idx_batch_cnt, output)
ctx.for_backwards = (B, N, idx, features_batch_cnt, idx_batch_cnt)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor):
"""
Args:
ctx:
grad_out: (M1 + M2 ..., C, nsample) tensor of the gradients of the output from forward
Returns:
grad_features: (N1 + N2 ..., C) gradient of the features
"""
B, N, idx, features_batch_cnt, idx_batch_cnt = ctx.for_backwards
M, C, nsample = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(N, C).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.group_points_grad_wrapper(B, M, C, N, nsample, grad_out_data, idx,
idx_batch_cnt, features_batch_cnt, grad_features.data)
return grad_features, None, None, None
grouping_operation = GroupingOperation.apply
class QueryAndGroup(nn.Module):
def __init__(self, radius: float, nsample: int, use_xyz: bool = True):
"""
Args:
radius: float, radius of ball
nsample: int, maximum number of features to gather in the ball
use_xyz:
"""
super().__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, xyz_batch_cnt: torch.Tensor,
new_xyz: torch.Tensor, new_xyz_batch_cnt: torch.Tensor,
features: torch.Tensor = None):
"""
Args:
xyz: (N1 + N2 ..., 3) xyz coordinates of the features
xyz_batch_cnt: (batch_size), [N1, N2, ...]
new_xyz: (M1 + M2 ..., 3) centers of the ball query
new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
features: (N1 + N2 ..., C) tensor of features to group
Returns:
new_features: (M1 + M2, C, nsample) tensor
"""
assert xyz.shape[0] == xyz_batch_cnt.sum(), 'xyz: %s, xyz_batch_cnt: %s' % (str(xyz.shape), str(new_xyz_batch_cnt))
assert new_xyz.shape[0] == new_xyz_batch_cnt.sum(), \
'new_xyz: %s, new_xyz_batch_cnt: %s' % (str(new_xyz.shape), str(new_xyz_batch_cnt))
# idx: (M1 + M2 ..., nsample), empty_ball_mask: (M1 + M2 ...)
idx, empty_ball_mask = ball_query(self.radius, self.nsample, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt)
grouped_xyz = grouping_operation(xyz, xyz_batch_cnt, idx, new_xyz_batch_cnt) # (M1 + M2, 3, nsample)
grouped_xyz -= new_xyz.unsqueeze(-1)
grouped_xyz[empty_ball_mask] = 0
if features is not None:
grouped_features = grouping_operation(features, xyz_batch_cnt, idx, new_xyz_batch_cnt) # (M1 + M2, C, nsample)
grouped_features[empty_ball_mask] = 0
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (M1 + M2 ..., C + 3, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features, idx
class FarthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz: torch.Tensor, npoint: int):
"""
Args:
ctx:
xyz: (B, N, 3) where N > npoint
npoint: int, number of features in the sampled set
Returns:
output: (B, npoint) tensor containing the set
"""
assert xyz.is_contiguous()
B, N, _ = xyz.size()
output = torch.cuda.IntTensor(B, npoint)
temp = torch.cuda.FloatTensor(B, N).fill_(1e10)
pointnet2.farthest_point_sampling_wrapper(B, N, npoint, xyz, temp, output)
return output
@staticmethod
def backward(xyz, a=None):
return None, None
farthest_point_sample = furthest_point_sample = FarthestPointSampling.apply
class StackFarthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz, xyz_batch_cnt, npoint):
"""
Args:
ctx:
xyz: (N1 + N2 + ..., 3) where N > npoint
xyz_batch_cnt: [N1, N2, ...]
npoint: int, number of features in the sampled set
Returns:
output: (npoint.sum()) tensor containing the set,
npoint: (M1, M2, ...)
"""
assert xyz.is_contiguous() and xyz.shape[1] == 3
batch_size = xyz_batch_cnt.__len__()
if not isinstance(npoint, torch.Tensor):
if not isinstance(npoint, list):
npoint = [npoint for i in range(batch_size)]
npoint = torch.tensor(npoint, device=xyz.device).int()
N, _ = xyz.size()
temp = torch.cuda.FloatTensor(N).fill_(1e10)
output = torch.cuda.IntTensor(npoint.sum().item())
pointnet2.stack_farthest_point_sampling_wrapper(xyz, temp, xyz_batch_cnt, output, npoint)
return output
@staticmethod
def backward(xyz, a=None):
return None, None
stack_farthest_point_sample = StackFarthestPointSampling.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown, unknown_batch_cnt, known, known_batch_cnt):
"""
Args:
ctx:
unknown: (N1 + N2..., 3)
unknown_batch_cnt: (batch_size), [N1, N2, ...]
known: (M1 + M2..., 3)
known_batch_cnt: (batch_size), [M1, M2, ...]
Returns:
dist: (N1 + N2 ..., 3) l2 distance to the three nearest neighbors
idx: (N1 + N2 ..., 3) index of the three nearest neighbors, range [0, M1+M2+...]
"""
assert unknown.shape.__len__() == 2 and unknown.shape[1] == 3
assert known.shape.__len__() == 2 and known.shape[1] == 3
assert unknown_batch_cnt.__len__() == known_batch_cnt.__len__()
dist2 = unknown.new_zeros(unknown.shape)
idx = unknown_batch_cnt.new_zeros(unknown.shape).int()
pointnet2.three_nn_wrapper(
unknown.contiguous(), unknown_batch_cnt.contiguous(),
known.contiguous(), known_batch_cnt.contiguous(), dist2, idx
)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor):
"""
Args:
ctx:
features: (M1 + M2 ..., C)
idx: [N1 + N2 ..., 3]
weight: [N1 + N2 ..., 3]
Returns:
out_tensor: (N1 + N2 ..., C)
"""
assert idx.shape[0] == weight.shape[0] and idx.shape[1] == weight.shape[1] == 3
ctx.three_interpolate_for_backward = (idx, weight, features.shape[0])
output = features.new_zeros((idx.shape[0], features.shape[1]))
pointnet2.three_interpolate_wrapper(features.contiguous(), idx.contiguous(), weight.contiguous(), output)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor):
"""
Args:
ctx:
grad_out: (N1 + N2 ..., C)
Returns:
grad_features: (M1 + M2 ..., C)
"""
idx, weight, M = ctx.three_interpolate_for_backward
grad_features = grad_out.new_zeros((M, grad_out.shape[1]))
pointnet2.three_interpolate_grad_wrapper(
grad_out.contiguous(), idx.contiguous(), weight.contiguous(), grad_features
)
return grad_features, None, None
three_interpolate = ThreeInterpolate.apply
class ThreeNNForVectorPoolByTwoStep(Function):
@staticmethod
def forward(ctx, support_xyz, xyz_batch_cnt, new_xyz, new_xyz_grid_centers, new_xyz_batch_cnt,
max_neighbour_distance, nsample, neighbor_type, avg_length_of_neighbor_idxs, num_total_grids,
neighbor_distance_multiplier):
"""
Args:
ctx:
// support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// xyz_batch_cnt: (batch_size), [N1, N2, ...]
// new_xyz: (M1 + M2 ..., 3) centers of the ball query
// new_xyz_grid_centers: (M1 + M2 ..., num_total_grids, 3) grids centers of each grid
// new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
// nsample: find all (-1), find limited number(>0)
// neighbor_type: 1: ball, others: cube
// neighbor_distance_multiplier: query_distance = neighbor_distance_multiplier * max_neighbour_distance
Returns:
// new_xyz_grid_idxs: (M1 + M2 ..., num_total_grids, 3) three-nn
// new_xyz_grid_dist2: (M1 + M2 ..., num_total_grids, 3) square of dist of three-nn
"""
num_new_xyz = new_xyz.shape[0]
new_xyz_grid_dist2 = new_xyz_grid_centers.new_zeros(new_xyz_grid_centers.shape)
new_xyz_grid_idxs = new_xyz_grid_centers.new_zeros(new_xyz_grid_centers.shape).int().fill_(-1)
while True:
num_max_sum_points = avg_length_of_neighbor_idxs * num_new_xyz
stack_neighbor_idxs = new_xyz_grid_idxs.new_zeros(num_max_sum_points)
start_len = new_xyz_grid_idxs.new_zeros(num_new_xyz, 2).int()
cumsum = new_xyz_grid_idxs.new_zeros(1)
pointnet2.query_stacked_local_neighbor_idxs_wrapper_stack(
support_xyz.contiguous(), xyz_batch_cnt.contiguous(),
new_xyz.contiguous(), new_xyz_batch_cnt.contiguous(),
stack_neighbor_idxs.contiguous(), start_len.contiguous(), cumsum,
avg_length_of_neighbor_idxs, max_neighbour_distance * neighbor_distance_multiplier,
nsample, neighbor_type
)
avg_length_of_neighbor_idxs = cumsum[0].item() // num_new_xyz + int(cumsum[0].item() % num_new_xyz > 0)
if cumsum[0] <= num_max_sum_points:
break
stack_neighbor_idxs = stack_neighbor_idxs[:cumsum[0]]
pointnet2.query_three_nn_by_stacked_local_idxs_wrapper_stack(
support_xyz, new_xyz, new_xyz_grid_centers, new_xyz_grid_idxs, new_xyz_grid_dist2,
stack_neighbor_idxs, start_len, num_new_xyz, num_total_grids
)
return torch.sqrt(new_xyz_grid_dist2), new_xyz_grid_idxs, torch.tensor(avg_length_of_neighbor_idxs)
three_nn_for_vector_pool_by_two_step = ThreeNNForVectorPoolByTwoStep.apply
class VectorPoolWithVoxelQuery(Function):
@staticmethod
def forward(ctx, support_xyz: torch.Tensor, xyz_batch_cnt: torch.Tensor, support_features: torch.Tensor,
new_xyz: torch.Tensor, new_xyz_batch_cnt: torch.Tensor, num_grid_x, num_grid_y, num_grid_z,
max_neighbour_distance, num_c_out_each_grid, use_xyz,
num_mean_points_per_grid=100, nsample=-1, neighbor_type=0, pooling_type=0):
"""
Args:
ctx:
support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
xyz_batch_cnt: (batch_size), [N1, N2, ...]
support_features: (N1 + N2 ..., C)
new_xyz: (M1 + M2 ..., 3) centers of new positions
new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
num_grid_x: number of grids in each local area centered at new_xyz
num_grid_y:
num_grid_z:
max_neighbour_distance:
num_c_out_each_grid:
use_xyz:
neighbor_type: 1: ball, others: cube:
pooling_type: 0: avg_pool, 1: random choice
Returns:
new_features: (M1 + M2 ..., num_c_out)
"""
assert support_xyz.is_contiguous()
assert support_features.is_contiguous()
assert xyz_batch_cnt.is_contiguous()
assert new_xyz.is_contiguous()
assert new_xyz_batch_cnt.is_contiguous()
num_total_grids = num_grid_x * num_grid_y * num_grid_z
num_c_out = num_c_out_each_grid * num_total_grids
N, num_c_in = support_features.shape
M = new_xyz.shape[0]
assert num_c_in % num_c_out_each_grid == 0, \
f'the input channels ({num_c_in}) should be an integral multiple of num_c_out_each_grid({num_c_out_each_grid})'
while True:
new_features = support_features.new_zeros((M, num_c_out))
new_local_xyz = support_features.new_zeros((M, 3 * num_total_grids))
point_cnt_of_grid = xyz_batch_cnt.new_zeros((M, num_total_grids))
num_max_sum_points = num_mean_points_per_grid * M
grouped_idxs = xyz_batch_cnt.new_zeros((num_max_sum_points, 3))
num_cum_sum = pointnet2.vector_pool_wrapper(
support_xyz, xyz_batch_cnt, support_features, new_xyz, new_xyz_batch_cnt,
new_features, new_local_xyz, point_cnt_of_grid, grouped_idxs,
num_grid_x, num_grid_y, num_grid_z, max_neighbour_distance, use_xyz,
num_max_sum_points, nsample, neighbor_type, pooling_type
)
num_mean_points_per_grid = num_cum_sum // M + int(num_cum_sum % M > 0)
if num_cum_sum <= num_max_sum_points:
break
grouped_idxs = grouped_idxs[:num_cum_sum]
normalizer = torch.clamp_min(point_cnt_of_grid[:, :, None].float(), min=1e-6)
new_features = (new_features.view(-1, num_total_grids, num_c_out_each_grid) / normalizer).view(-1, num_c_out)
if use_xyz:
new_local_xyz = (new_local_xyz.view(-1, num_total_grids, 3) / normalizer).view(-1, num_total_grids * 3)
num_mean_points_per_grid = torch.Tensor([num_mean_points_per_grid]).int()
nsample = torch.Tensor([nsample]).int()
ctx.vector_pool_for_backward = (point_cnt_of_grid, grouped_idxs, N, num_c_in)
ctx.mark_non_differentiable(new_local_xyz, num_mean_points_per_grid, nsample, point_cnt_of_grid)
return new_features, new_local_xyz, num_mean_points_per_grid, point_cnt_of_grid
@staticmethod
def backward(ctx, grad_new_features: torch.Tensor, grad_local_xyz: torch.Tensor, grad_num_cum_sum, grad_point_cnt_of_grid):
"""
Args:
ctx:
grad_new_features: (M1 + M2 ..., num_c_out), num_c_out = num_c_out_each_grid * num_total_grids
Returns:
grad_support_features: (N1 + N2 ..., C_in)
"""
point_cnt_of_grid, grouped_idxs, N, num_c_in = ctx.vector_pool_for_backward
grad_support_features = grad_new_features.new_zeros((N, num_c_in))
if grouped_idxs.shape[0] > 0:
pointnet2.vector_pool_grad_wrapper(
grad_new_features.contiguous(), point_cnt_of_grid, grouped_idxs,
grad_support_features
)
return None, None, grad_support_features, None, None, None, None, None, None, None, None, None, None, None, None
vector_pool_with_voxel_query_op = VectorPoolWithVoxelQuery.apply
if __name__ == '__main__':
pass
| 18,073
| 38.462882
| 127
|
py
|
3DTrans
|
3DTrans-master/pcdet/ops/pointnet2/pointnet2_stack/voxel_pool_modules.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import voxel_query_utils
from typing import List
class NeighborVoxelSAModuleMSG(nn.Module):
def __init__(self, *, query_ranges: List[List[int]], radii: List[float],
nsamples: List[int], mlps: List[List[int]], use_xyz: bool = True, pool_method='max_pool'):
"""
Args:
query_ranges: list of int, list of neighbor ranges to group with
nsamples: list of int, number of samples in each ball query
mlps: list of list of int, spec of the pointnet before the global pooling for each scale
use_xyz:
pool_method: max_pool / avg_pool
"""
super().__init__()
assert len(query_ranges) == len(nsamples) == len(mlps)
self.groupers = nn.ModuleList()
self.mlps_in = nn.ModuleList()
self.mlps_pos = nn.ModuleList()
self.mlps_out = nn.ModuleList()
for i in range(len(query_ranges)):
max_range = query_ranges[i]
nsample = nsamples[i]
radius = radii[i]
self.groupers.append(voxel_query_utils.VoxelQueryAndGrouping(max_range, radius, nsample))
mlp_spec = mlps[i]
cur_mlp_in = nn.Sequential(
nn.Conv1d(mlp_spec[0], mlp_spec[1], kernel_size=1, bias=False),
nn.BatchNorm1d(mlp_spec[1])
)
cur_mlp_pos = nn.Sequential(
nn.Conv2d(3, mlp_spec[1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp_spec[1])
)
cur_mlp_out = nn.Sequential(
nn.Conv1d(mlp_spec[1], mlp_spec[2], kernel_size=1, bias=False),
nn.BatchNorm1d(mlp_spec[2]),
nn.ReLU()
)
self.mlps_in.append(cur_mlp_in)
self.mlps_pos.append(cur_mlp_pos)
self.mlps_out.append(cur_mlp_out)
self.relu = nn.ReLU()
self.pool_method = pool_method
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
def forward(self, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, \
new_coords, features, voxel2point_indices):
"""
:param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features
:param xyz_batch_cnt: (batch_size), [N1, N2, ...]
:param new_xyz: (M1 + M2 ..., 3)
:param new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
:param features: (N1 + N2 ..., C) tensor of the descriptors of the the features
:param point_indices: (B, Z, Y, X) tensor of point indices
:return:
new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz
new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
# change the order to [batch_idx, z, y, x]
new_coords = new_coords[:, [0, 3, 2, 1]].contiguous()
new_features_list = []
for k in range(len(self.groupers)):
# features_in: (1, C, M1+M2)
features_in = features.permute(1, 0).unsqueeze(0)
features_in = self.mlps_in[k](features_in)
# features_in: (1, M1+M2, C)
features_in = features_in.permute(0, 2, 1).contiguous()
# features_in: (M1+M2, C)
features_in = features_in.view(-1, features_in.shape[-1])
# grouped_features: (M1+M2, C, nsample)
# grouped_xyz: (M1+M2, 3, nsample)
grouped_features, grouped_xyz, empty_ball_mask = self.groupers[k](
new_coords, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features_in, voxel2point_indices
)
grouped_features[empty_ball_mask] = 0
# grouped_features: (1, C, M1+M2, nsample)
grouped_features = grouped_features.permute(1, 0, 2).unsqueeze(dim=0)
# grouped_xyz: (M1+M2, 3, nsample)
grouped_xyz = grouped_xyz - new_xyz.unsqueeze(-1)
grouped_xyz[empty_ball_mask] = 0
# grouped_xyz: (1, 3, M1+M2, nsample)
grouped_xyz = grouped_xyz.permute(1, 0, 2).unsqueeze(0)
# grouped_xyz: (1, C, M1+M2, nsample)
position_features = self.mlps_pos[k](grouped_xyz)
new_features = grouped_features + position_features
new_features = self.relu(new_features)
if self.pool_method == 'max_pool':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
elif self.pool_method == 'avg_pool':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
else:
raise NotImplementedError
new_features = self.mlps_out[k](new_features)
new_features = new_features.squeeze(dim=0).permute(1, 0) # (M1 + M2 ..., C)
new_features_list.append(new_features)
# (M1 + M2 ..., C)
new_features = torch.cat(new_features_list, dim=1)
return new_features
| 5,672
| 41.977273
| 108
|
py
|
3DTrans
|
3DTrans-master/pcdet/ops/pointnet2/pointnet2_stack/__init__.py
| 0
| 0
| 0
|
py
|
|
3DTrans
|
3DTrans-master/pcdet/ops/pointnet2/pointnet2_stack/pointnet2_modules.py
|
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import pointnet2_utils
def build_local_aggregation_module(input_channels, config):
local_aggregation_name = config.get('NAME', 'StackSAModuleMSG')
if local_aggregation_name == 'StackSAModuleMSG':
mlps = config.MLPS
for k in range(len(mlps)):
mlps[k] = [input_channels] + mlps[k]
cur_layer = StackSAModuleMSG(
radii=config.POOL_RADIUS, nsamples=config.NSAMPLE, mlps=mlps, use_xyz=True, pool_method='max_pool',
)
num_c_out = sum([x[-1] for x in mlps])
elif local_aggregation_name == 'VectorPoolAggregationModuleMSG':
cur_layer = VectorPoolAggregationModuleMSG(input_channels=input_channels, config=config)
num_c_out = config.MSG_POST_MLPS[-1]
else:
raise NotImplementedError
return cur_layer, num_c_out
class StackSAModuleMSG(nn.Module):
def __init__(self, *, radii: List[float], nsamples: List[int], mlps: List[List[int]],
use_xyz: bool = True, pool_method='max_pool'):
"""
Args:
radii: list of float, list of radii to group with
nsamples: list of int, number of samples in each ball query
mlps: list of list of int, spec of the pointnet before the global pooling for each scale
use_xyz:
pool_method: max_pool / avg_pool
"""
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz))
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
shared_mlps = []
for k in range(len(mlp_spec) - 1):
shared_mlps.extend([
nn.Conv2d(mlp_spec[k], mlp_spec[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp_spec[k + 1]),
nn.ReLU()
])
self.mlps.append(nn.Sequential(*shared_mlps))
self.pool_method = pool_method
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
def forward(self, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features=None, empty_voxel_set_zeros=True):
"""
:param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features
:param xyz_batch_cnt: (batch_size), [N1, N2, ...]
:param new_xyz: (M1 + M2 ..., 3)
:param new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
:param features: (N1 + N2 ..., C) tensor of the descriptors of the the features
:return:
new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz
new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
for k in range(len(self.groupers)):
new_features, ball_idxs = self.groupers[k](
xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features
) # (M1 + M2, C, nsample)
new_features = new_features.permute(1, 0, 2).unsqueeze(dim=0) # (1, C, M1 + M2 ..., nsample)
new_features = self.mlps[k](new_features) # (1, C, M1 + M2 ..., nsample)
if self.pool_method == 'max_pool':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
elif self.pool_method == 'avg_pool':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
else:
raise NotImplementedError
new_features = new_features.squeeze(dim=0).permute(1, 0) # (M1 + M2 ..., C)
new_features_list.append(new_features)
new_features = torch.cat(new_features_list, dim=1) # (M1 + M2 ..., C)
return new_xyz, new_features
class StackPointnetFPModule(nn.Module):
def __init__(self, *, mlp: List[int]):
"""
Args:
mlp: list of int
"""
super().__init__()
shared_mlps = []
for k in range(len(mlp) - 1):
shared_mlps.extend([
nn.Conv2d(mlp[k], mlp[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp[k + 1]),
nn.ReLU()
])
self.mlp = nn.Sequential(*shared_mlps)
def forward(self, unknown, unknown_batch_cnt, known, known_batch_cnt, unknown_feats=None, known_feats=None):
"""
Args:
unknown: (N1 + N2 ..., 3)
known: (M1 + M2 ..., 3)
unknow_feats: (N1 + N2 ..., C1)
known_feats: (M1 + M2 ..., C2)
Returns:
new_features: (N1 + N2 ..., C_out)
"""
dist, idx = pointnet2_utils.three_nn(unknown, unknown_batch_cnt, known, known_batch_cnt)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=-1, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(known_feats, idx, weight)
if unknown_feats is not None:
new_features = torch.cat([interpolated_feats, unknown_feats], dim=1) # (N1 + N2 ..., C2 + C1)
else:
new_features = interpolated_feats
new_features = new_features.permute(1, 0)[None, :, :, None] # (1, C, N1 + N2 ..., 1)
new_features = self.mlp(new_features)
new_features = new_features.squeeze(dim=0).squeeze(dim=-1).permute(1, 0) # (N1 + N2 ..., C)
return new_features
class VectorPoolLocalInterpolateModule(nn.Module):
def __init__(self, mlp, num_voxels, max_neighbour_distance, nsample, neighbor_type, use_xyz=True,
neighbour_distance_multiplier=1.0, xyz_encoding_type='concat'):
"""
Args:
mlp:
num_voxels:
max_neighbour_distance:
neighbor_type: 1: ball, others: cube
nsample: find all (-1), find limited number(>0)
use_xyz:
neighbour_distance_multiplier:
xyz_encoding_type:
"""
super().__init__()
self.num_voxels = num_voxels # [num_grid_x, num_grid_y, num_grid_z]: number of grids in each local area centered at new_xyz
self.num_total_grids = self.num_voxels[0] * self.num_voxels[1] * self.num_voxels[2]
self.max_neighbour_distance = max_neighbour_distance
self.neighbor_distance_multiplier = neighbour_distance_multiplier
self.nsample = nsample
self.neighbor_type = neighbor_type
self.use_xyz = use_xyz
self.xyz_encoding_type = xyz_encoding_type
if mlp is not None:
if self.use_xyz:
mlp[0] += 9 if self.xyz_encoding_type == 'concat' else 0
shared_mlps = []
for k in range(len(mlp) - 1):
shared_mlps.extend([
nn.Conv2d(mlp[k], mlp[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp[k + 1]),
nn.ReLU()
])
self.mlp = nn.Sequential(*shared_mlps)
else:
self.mlp = None
self.num_avg_length_of_neighbor_idxs = 1000
def forward(self, support_xyz, support_features, xyz_batch_cnt, new_xyz, new_xyz_grid_centers, new_xyz_batch_cnt):
"""
Args:
support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features
support_features: (N1 + N2 ..., C) point-wise features
xyz_batch_cnt: (batch_size), [N1, N2, ...]
new_xyz: (M1 + M2 ..., 3) centers of the ball query
new_xyz_grid_centers: (M1 + M2 ..., num_total_grids, 3) grids centers of each grid
new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
Returns:
new_features: (N1 + N2 ..., C_out)
"""
with torch.no_grad():
dist, idx, num_avg_length_of_neighbor_idxs = pointnet2_utils.three_nn_for_vector_pool_by_two_step(
support_xyz, xyz_batch_cnt, new_xyz, new_xyz_grid_centers, new_xyz_batch_cnt,
self.max_neighbour_distance, self.nsample, self.neighbor_type,
self.num_avg_length_of_neighbor_idxs, self.num_total_grids, self.neighbor_distance_multiplier
)
self.num_avg_length_of_neighbor_idxs = max(self.num_avg_length_of_neighbor_idxs, num_avg_length_of_neighbor_idxs.item())
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=-1, keepdim=True)
weight = dist_recip / torch.clamp_min(norm, min=1e-8)
empty_mask = (idx.view(-1, 3)[:, 0] == -1)
idx.view(-1, 3)[empty_mask] = 0
interpolated_feats = pointnet2_utils.three_interpolate(support_features, idx.view(-1, 3), weight.view(-1, 3))
interpolated_feats = interpolated_feats.view(idx.shape[0], idx.shape[1], -1) # (M1 + M2 ..., num_total_grids, C)
if self.use_xyz:
near_known_xyz = support_xyz[idx.view(-1, 3).long()].view(-1, 3, 3) # ( (M1 + M2 ...)*num_total_grids, 3)
local_xyz = (new_xyz_grid_centers.view(-1, 1, 3) - near_known_xyz).view(-1, idx.shape[1], 9)
if self.xyz_encoding_type == 'concat':
interpolated_feats = torch.cat((interpolated_feats, local_xyz), dim=-1) # ( M1 + M2 ..., num_total_grids, 9+C)
else:
raise NotImplementedError
new_features = interpolated_feats.view(-1, interpolated_feats.shape[-1]) # ((M1 + M2 ...) * num_total_grids, C)
new_features[empty_mask, :] = 0
if self.mlp is not None:
new_features = new_features.permute(1, 0)[None, :, :, None] # (1, C, N1 + N2 ..., 1)
new_features = self.mlp(new_features)
new_features = new_features.squeeze(dim=0).squeeze(dim=-1).permute(1, 0) # (N1 + N2 ..., C)
return new_features
class VectorPoolAggregationModule(nn.Module):
def __init__(
self, input_channels, num_local_voxel=(3, 3, 3), local_aggregation_type='local_interpolation',
num_reduced_channels=30, num_channels_of_local_aggregation=32, post_mlps=(128,),
max_neighbor_distance=None, neighbor_nsample=-1, neighbor_type=0, neighbor_distance_multiplier=2.0):
super().__init__()
self.num_local_voxel = num_local_voxel
self.total_voxels = self.num_local_voxel[0] * self.num_local_voxel[1] * self.num_local_voxel[2]
self.local_aggregation_type = local_aggregation_type
assert self.local_aggregation_type in ['local_interpolation', 'voxel_avg_pool', 'voxel_random_choice']
self.input_channels = input_channels
self.num_reduced_channels = input_channels if num_reduced_channels is None else num_reduced_channels
self.num_channels_of_local_aggregation = num_channels_of_local_aggregation
self.max_neighbour_distance = max_neighbor_distance
self.neighbor_nsample = neighbor_nsample
self.neighbor_type = neighbor_type # 1: ball, others: cube
if self.local_aggregation_type == 'local_interpolation':
self.local_interpolate_module = VectorPoolLocalInterpolateModule(
mlp=None, num_voxels=self.num_local_voxel,
max_neighbour_distance=self.max_neighbour_distance,
nsample=self.neighbor_nsample,
neighbor_type=self.neighbor_type,
neighbour_distance_multiplier=neighbor_distance_multiplier,
)
num_c_in = (self.num_reduced_channels + 9) * self.total_voxels
else:
self.local_interpolate_module = None
num_c_in = (self.num_reduced_channels + 3) * self.total_voxels
num_c_out = self.total_voxels * self.num_channels_of_local_aggregation
self.separate_local_aggregation_layer = nn.Sequential(
nn.Conv1d(num_c_in, num_c_out, kernel_size=1, groups=self.total_voxels, bias=False),
nn.BatchNorm1d(num_c_out),
nn.ReLU()
)
post_mlp_list = []
c_in = num_c_out
for cur_num_c in post_mlps:
post_mlp_list.extend([
nn.Conv1d(c_in, cur_num_c, kernel_size=1, bias=False),
nn.BatchNorm1d(cur_num_c),
nn.ReLU()
])
c_in = cur_num_c
self.post_mlps = nn.Sequential(*post_mlp_list)
self.num_mean_points_per_grid = 20
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
def extra_repr(self) -> str:
ret = f'radius={self.max_neighbour_distance}, local_voxels=({self.num_local_voxel}, ' \
f'local_aggregation_type={self.local_aggregation_type}, ' \
f'num_c_reduction={self.input_channels}->{self.num_reduced_channels}, ' \
f'num_c_local_aggregation={self.num_channels_of_local_aggregation}'
return ret
def vector_pool_with_voxel_query(self, xyz, xyz_batch_cnt, features, new_xyz, new_xyz_batch_cnt):
use_xyz = 1
pooling_type = 0 if self.local_aggregation_type == 'voxel_avg_pool' else 1
new_features, new_local_xyz, num_mean_points_per_grid, point_cnt_of_grid = pointnet2_utils.vector_pool_with_voxel_query_op(
xyz, xyz_batch_cnt, features, new_xyz, new_xyz_batch_cnt,
self.num_local_voxel[0], self.num_local_voxel[1], self.num_local_voxel[2],
self.max_neighbour_distance, self.num_reduced_channels, use_xyz,
self.num_mean_points_per_grid, self.neighbor_nsample, self.neighbor_type,
pooling_type
)
self.num_mean_points_per_grid = max(self.num_mean_points_per_grid, num_mean_points_per_grid.item())
num_new_pts = new_features.shape[0]
new_local_xyz = new_local_xyz.view(num_new_pts, -1, 3) # (N, num_voxel, 3)
new_features = new_features.view(num_new_pts, -1, self.num_reduced_channels) # (N, num_voxel, C)
new_features = torch.cat((new_local_xyz, new_features), dim=-1).view(num_new_pts, -1)
return new_features, point_cnt_of_grid
@staticmethod
def get_dense_voxels_by_center(point_centers, max_neighbour_distance, num_voxels):
"""
Args:
point_centers: (N, 3)
max_neighbour_distance: float
num_voxels: [num_x, num_y, num_z]
Returns:
voxel_centers: (N, total_voxels, 3)
"""
R = max_neighbour_distance
device = point_centers.device
x_grids = torch.arange(-R + R / num_voxels[0], R - R / num_voxels[0] + 1e-5, 2 * R / num_voxels[0], device=device)
y_grids = torch.arange(-R + R / num_voxels[1], R - R / num_voxels[1] + 1e-5, 2 * R / num_voxels[1], device=device)
z_grids = torch.arange(-R + R / num_voxels[2], R - R / num_voxels[2] + 1e-5, 2 * R / num_voxels[2], device=device)
x_offset, y_offset, z_offset = torch.meshgrid(x_grids, y_grids, z_grids) # shape: [num_x, num_y, num_z]
xyz_offset = torch.cat((
x_offset.contiguous().view(-1, 1),
y_offset.contiguous().view(-1, 1),
z_offset.contiguous().view(-1, 1)), dim=-1
)
voxel_centers = point_centers[:, None, :] + xyz_offset[None, :, :]
return voxel_centers
def vector_pool_with_local_interpolate(self, xyz, xyz_batch_cnt, features, new_xyz, new_xyz_batch_cnt):
"""
Args:
xyz: (N, 3)
xyz_batch_cnt: (batch_size)
features: (N, C)
new_xyz: (M, 3)
new_xyz_batch_cnt: (batch_size)
Returns:
new_features: (M, total_voxels * C)
"""
voxel_centers = self.get_dense_voxels_by_center(
point_centers=new_xyz, max_neighbour_distance=self.max_neighbour_distance, num_voxels=self.num_local_voxel
) # (M1 + M2 + ..., total_voxels, 3)
voxel_features = self.local_interpolate_module.forward(
support_xyz=xyz, support_features=features, xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz, new_xyz_grid_centers=voxel_centers, new_xyz_batch_cnt=new_xyz_batch_cnt
) # ((M1 + M2 ...) * total_voxels, C)
voxel_features = voxel_features.contiguous().view(-1, self.total_voxels * voxel_features.shape[-1])
return voxel_features
def forward(self, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features, **kwargs):
"""
:param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features
:param xyz_batch_cnt: (batch_size), [N1, N2, ...]
:param new_xyz: (M1 + M2 ..., 3)
:param new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
:param features: (N1 + N2 ..., C) tensor of the descriptors of the the features
:return:
new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz
new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
N, C = features.shape
assert C % self.num_reduced_channels == 0, \
f'the input channels ({C}) should be an integral multiple of num_reduced_channels({self.num_reduced_channels})'
features = features.view(N, -1, self.num_reduced_channels).sum(dim=1)
if self.local_aggregation_type in ['voxel_avg_pool', 'voxel_random_choice']:
vector_features, point_cnt_of_grid = self.vector_pool_with_voxel_query(
xyz=xyz, xyz_batch_cnt=xyz_batch_cnt, features=features,
new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt
)
elif self.local_aggregation_type == 'local_interpolation':
vector_features = self.vector_pool_with_local_interpolate(
xyz=xyz, xyz_batch_cnt=xyz_batch_cnt, features=features,
new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt
) # (M1 + M2 + ..., total_voxels * C)
else:
raise NotImplementedError
vector_features = vector_features.permute(1, 0)[None, :, :] # (1, num_voxels * C, M1 + M2 ...)
new_features = self.separate_local_aggregation_layer(vector_features)
new_features = self.post_mlps(new_features)
new_features = new_features.squeeze(dim=0).permute(1, 0)
return new_xyz, new_features
class VectorPoolAggregationModuleMSG(nn.Module):
def __init__(self, input_channels, config):
super().__init__()
self.model_cfg = config
self.num_groups = self.model_cfg.NUM_GROUPS
self.layers = []
c_in = 0
for k in range(self.num_groups):
cur_config = self.model_cfg[f'GROUP_CFG_{k}']
cur_vector_pool_module = VectorPoolAggregationModule(
input_channels=input_channels, num_local_voxel=cur_config.NUM_LOCAL_VOXEL,
post_mlps=cur_config.POST_MLPS,
max_neighbor_distance=cur_config.MAX_NEIGHBOR_DISTANCE,
neighbor_nsample=cur_config.NEIGHBOR_NSAMPLE,
local_aggregation_type=self.model_cfg.LOCAL_AGGREGATION_TYPE,
num_reduced_channels=self.model_cfg.get('NUM_REDUCED_CHANNELS', None),
num_channels_of_local_aggregation=self.model_cfg.NUM_CHANNELS_OF_LOCAL_AGGREGATION,
neighbor_distance_multiplier=2.0
)
self.__setattr__(f'layer_{k}', cur_vector_pool_module)
c_in += cur_config.POST_MLPS[-1]
c_in += 3 # use_xyz
shared_mlps = []
for cur_num_c in self.model_cfg.MSG_POST_MLPS:
shared_mlps.extend([
nn.Conv1d(c_in, cur_num_c, kernel_size=1, bias=False),
nn.BatchNorm1d(cur_num_c),
nn.ReLU()
])
c_in = cur_num_c
self.msg_post_mlps = nn.Sequential(*shared_mlps)
def forward(self, **kwargs):
features_list = []
for k in range(self.num_groups):
cur_xyz, cur_features = self.__getattr__(f'layer_{k}')(**kwargs)
features_list.append(cur_features)
features = torch.cat(features_list, dim=-1)
features = torch.cat((cur_xyz, features), dim=-1)
features = features.permute(1, 0)[None, :, :] # (1, C, N)
new_features = self.msg_post_mlps(features)
new_features = new_features.squeeze(dim=0).permute(1, 0) # (N, C)
return cur_xyz, new_features
| 21,385
| 44.40552
| 132
|
py
|
3DTrans
|
3DTrans-master/pcdet/ops/pointnet2/pointnet2_batch/pointnet2_utils.py
|
from typing import Tuple
import torch
import torch.nn as nn
from torch.autograd import Function, Variable
from . import pointnet2_batch_cuda as pointnet2
class FarthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz: torch.Tensor, npoint: int) -> torch.Tensor:
"""
Uses iterative farthest point sampling to select a set of npoint features that have the largest
minimum distance
:param ctx:
:param xyz: (B, N, 3) where N > npoint
:param npoint: int, number of features in the sampled set
:return:
output: (B, npoint) tensor containing the set
"""
assert xyz.is_contiguous()
B, N, _ = xyz.size()
output = torch.cuda.IntTensor(B, npoint)
temp = torch.cuda.FloatTensor(B, N).fill_(1e10)
pointnet2.farthest_point_sampling_wrapper(B, N, npoint, xyz, temp, output)
return output
@staticmethod
def backward(xyz, a=None):
return None, None
farthest_point_sample = furthest_point_sample = FarthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param features: (B, C, N)
:param idx: (B, npoint) index tensor of the features to gather
:return:
output: (B, C, npoint)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
B, npoint = idx.size()
_, C, N = features.size()
output = torch.cuda.FloatTensor(B, C, npoint)
pointnet2.gather_points_wrapper(B, C, N, npoint, features, idx, output)
ctx.for_backwards = (idx, C, N)
return output
@staticmethod
def backward(ctx, grad_out):
idx, C, N = ctx.for_backwards
B, npoint = idx.size()
grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.gather_points_grad_wrapper(B, C, N, npoint, grad_out_data, idx, grad_features.data)
return grad_features, None
gather_operation = GatherOperation.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Find the three nearest neighbors of unknown in known
:param ctx:
:param unknown: (B, N, 3)
:param known: (B, M, 3)
:return:
dist: (B, N, 3) l2 distance to the three nearest neighbors
idx: (B, N, 3) index of 3 nearest neighbors
"""
assert unknown.is_contiguous()
assert known.is_contiguous()
B, N, _ = unknown.size()
m = known.size(1)
dist2 = torch.cuda.FloatTensor(B, N, 3)
idx = torch.cuda.IntTensor(B, N, 3)
pointnet2.three_nn_wrapper(B, N, m, unknown, known, dist2, idx)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
"""
Performs weight linear interpolation on 3 features
:param ctx:
:param features: (B, C, M) Features descriptors to be interpolated from
:param idx: (B, n, 3) three nearest neighbors of the target features in features
:param weight: (B, n, 3) weights
:return:
output: (B, C, N) tensor of the interpolated features
"""
assert features.is_contiguous()
assert idx.is_contiguous()
assert weight.is_contiguous()
B, c, m = features.size()
n = idx.size(1)
ctx.three_interpolate_for_backward = (idx, weight, m)
output = torch.cuda.FloatTensor(B, c, n)
pointnet2.three_interpolate_wrapper(B, c, m, n, features, idx, weight, output)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
:param ctx:
:param grad_out: (B, C, N) tensor with gradients of outputs
:return:
grad_features: (B, C, M) tensor with gradients of features
None:
None:
"""
idx, weight, m = ctx.three_interpolate_for_backward
B, c, n = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(B, c, m).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.three_interpolate_grad_wrapper(B, c, n, m, grad_out_data, idx, weight, grad_features.data)
return grad_features, None, None
three_interpolate = ThreeInterpolate.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param features: (B, C, N) tensor of features to group
:param idx: (B, npoint, nsample) tensor containing the indicies of features to group with
:return:
output: (B, C, npoint, nsample) tensor
"""
assert features.is_contiguous()
assert idx.is_contiguous()
B, nfeatures, nsample = idx.size()
_, C, N = features.size()
output = torch.cuda.FloatTensor(B, C, nfeatures, nsample)
pointnet2.group_points_wrapper(B, C, N, nfeatures, nsample, features, idx, output)
ctx.for_backwards = (idx, N)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param ctx:
:param grad_out: (B, C, npoint, nsample) tensor of the gradients of the output from forward
:return:
grad_features: (B, C, N) gradient of the features
"""
idx, N = ctx.for_backwards
B, C, npoint, nsample = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.group_points_grad_wrapper(B, C, N, npoint, nsample, grad_out_data, idx, grad_features.data)
return grad_features, None
grouping_operation = GroupingOperation.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param radius: float, radius of the balls
:param nsample: int, maximum number of features in the balls
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, npoint, 3) centers of the ball query
:return:
idx: (B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
assert new_xyz.is_contiguous()
assert xyz.is_contiguous()
B, N, _ = xyz.size()
npoint = new_xyz.size(1)
idx = torch.cuda.IntTensor(B, npoint, nsample).zero_()
pointnet2.ball_query_wrapper(B, N, npoint, radius, nsample, new_xyz, xyz, idx)
return idx
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ball_query = BallQuery.apply
class QueryAndGroup(nn.Module):
def __init__(self, radius: float, nsample: int, use_xyz: bool = True):
"""
:param radius: float, radius of ball
:param nsample: int, maximum number of features to gather in the ball
:param use_xyz:
"""
super().__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, npoint, 3) centroids
:param features: (B, C, N) descriptors of the features
:return:
new_features: (B, 3 + C, npoint, nsample)
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class GroupAll(nn.Module):
def __init__(self, use_xyz: bool = True):
super().__init__()
self.use_xyz = use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None):
"""
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: ignored
:param features: (B, C, N) descriptors of the features
:return:
new_features: (B, C + 3, 1, N)
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, 3 + C, 1, N)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features
| 9,717
| 32.395189
| 118
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.