repo_id
stringlengths 19
138
| file_path
stringlengths 32
200
| content
stringlengths 1
12.9M
| __index_level_0__
int64 0
0
|
|---|---|---|---|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/caddn
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/caddn/f2v/frustum_grid_generator.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import paddle
import paddle.nn as nn
from paddle3d.utils.depth import bin_depths
from paddle3d.utils.grid import create_meshgrid3d, normalize_coords
from paddle3d.utils.transform import project_to_image, transform_points_3d
class FrustumGridGenerator(nn.Layer):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/models/backbones_3d/f2v/frustum_grid_generator.py#L8
"""
def __init__(self, voxel_size, pc_range, disc_cfg):
"""
Initializes Grid Generator for frustum features
Args:
voxel_size [np.array(3)]: Voxel size [X, Y, Z]
pc_range [list]: Voxelization point cloud range [X_min, Y_min, Z_min, X_max, Y_max, Z_max]
disc_cfg [int]: Depth discretiziation configuration
"""
super().__init__()
self.dtype = 'float32'
point_cloud_range = np.asarray(pc_range)
grid_size = (point_cloud_range[3:] - point_cloud_range[:3]) / voxel_size
grid_size = np.round(grid_size).astype(np.int64)
self.grid_size = paddle.to_tensor(grid_size)
self.out_of_bounds_val = -2
self.disc_cfg = disc_cfg
pc_range = paddle.to_tensor(pc_range).reshape([2, 3])
self.pc_min = pc_range[0]
self.pc_max = pc_range[1]
self.voxel_size = paddle.to_tensor(voxel_size)
# Create voxel grid
self.depth, self.width, self.height = self.grid_size.cast('int32')
self.voxel_grid = create_meshgrid3d(
depth=self.depth,
height=self.height,
width=self.width,
normalized_coordinates=False)
self.voxel_grid = self.voxel_grid.transpose([0, 1, 3, 2,
4]) # XZY-> XYZ
# Add offsets to center of voxel
self.voxel_grid += 0.5
self.grid_to_lidar = self.grid_to_lidar_unproject(
pc_min=self.pc_min, voxel_size=self.voxel_size)
def grid_to_lidar_unproject(self, pc_min, voxel_size):
"""
Calculate grid to LiDAR unprojection for each plane
Args:
pc_min [paddle.Tensor(3)]: Minimum of point cloud range [X, Y, Z] (m)
voxel_size [paddle.Tensor(3)]: Size of each voxel [X, Y, Z] (m)
Returns:
unproject [paddle.Tensor(4, 4)]: Voxel grid to LiDAR unprojection matrix
"""
x_size, y_size, z_size = voxel_size.cpu().numpy()
x_min, y_min, z_min = pc_min.cpu().numpy()
unproject = paddle.to_tensor(
[[x_size, 0, 0, x_min], [0, y_size, 0, y_min],
[0, 0, z_size, z_min], [0, 0, 0, 1]],
dtype=self.dtype) # (4, 4)
return unproject
def transform_grid(self, voxel_grid, grid_to_lidar, lidar_to_cam,
cam_to_img):
"""
Transforms voxel sampling grid into frustum sampling grid
Args:
grid [paddle.Tensor(B, X, Y, Z, 3)]: Voxel sampling grid
grid_to_lidar [paddle.Tensor(4, 4)]: Voxel grid to LiDAR unprojection matrix
lidar_to_cam [paddle.Tensor(B, 4, 4)]: LiDAR to camera frame transformation
cam_to_img [paddle.Tensor(B, 3, 4)]: Camera projection matrix
Returns:
frustum_grid [paddle.Tensor(B, X, Y, Z, 3)]: Frustum sampling grid
"""
B = paddle.shape(lidar_to_cam)[0]
# Create transformation matricies
V_G = grid_to_lidar # Voxel Grid -> LiDAR (4, 4)
C_V = lidar_to_cam # LiDAR -> Camera (B, 4, 4)
I_C = cam_to_img # Camera -> Image (B, 3, 4)
trans = C_V @ V_G
# Reshape to match dimensions
trans01 = trans.reshape((B, 1, 1, 4, 4))
voxel_grid = voxel_grid.tile((B, 1, 1, 1, 1))
# Transform to camera frame
camera_grid = transform_points_3d(trans_01=trans01, points_1=voxel_grid)
# Project to image
I_C = I_C.reshape([B, 1, 1, 3, 4])
image_grid, image_depths = project_to_image(
project=I_C, points=camera_grid)
# Convert depths to depth bins
image_depths = bin_depths(depth_map=image_depths, **self.disc_cfg)
# Stack to form frustum grid
image_depths = image_depths.unsqueeze(-1)
frustum_grid = paddle.concat((image_grid, image_depths), axis=-1)
return frustum_grid
def forward(self, lidar_to_cam, cam_to_img, image_shape):
"""
Generates sampling grid for frustum features
Args:
lidar_to_cam [paddle.Tensor(B, 4, 4)]: LiDAR to camera frame transformation
cam_to_img [paddle.Tensor(B, 3, 4)]: Camera projection matrix
image_shape [paddle.Tensor(B, 2)]: Image shape [H, W]
Returns:
frustum_grid [paddle.Tensor(B, X, Y, Z, 3)]: Sampling grids for frustum features
"""
frustum_grid = self.transform_grid(
voxel_grid=self.voxel_grid,
grid_to_lidar=self.grid_to_lidar,
lidar_to_cam=lidar_to_cam,
cam_to_img=cam_to_img)
# Normalize grid
image_shape = paddle.max(image_shape, axis=0)
image_depth = paddle.to_tensor([self.disc_cfg["num_bins"]]).cast(
image_shape.dtype)
frustum_shape = paddle.concat((image_depth, image_shape))
frustum_grid = normalize_coords(
coords=frustum_grid, shape=frustum_shape)
# Replace any NaNs or infinites with out of bounds
mask = ~paddle.isfinite(frustum_grid)
sub_val = paddle.full(
shape=paddle.shape(mask), fill_value=self.out_of_bounds_val)
frustum_grid = paddle.where(mask, sub_val, frustum_grid)
return frustum_grid
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/caddn
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/caddn/f2v/frustum_to_voxel.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.nn as nn
from .frustum_grid_generator import FrustumGridGenerator
from .sampler import Sampler
class FrustumToVoxel(nn.Layer):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/models/backbones_3d/f2v/frustum_to_voxel.py#L8
"""
def __init__(self, voxel_size, pc_range, sample_cfg, disc_cfg):
"""
Initializes module to transform frustum features to voxel features via 3D transformation and sampling
Args:
voxel_size [np.array(3)]: Voxel size [X, Y, Z]
pc_range [list]: Voxelization point cloud range [X_min, Y_min, Z_min, X_max, Y_max, Z_max]
disc_cfg [dict]: Depth discretiziation configuration
"""
super().__init__()
point_cloud_range = np.asarray(pc_range)
grid_size = (point_cloud_range[3:] - point_cloud_range[:3]) / voxel_size
grid_size = np.round(grid_size).astype(np.int64)
self.pc_range = pc_range
self.disc_cfg = disc_cfg
self.grid_generator = FrustumGridGenerator(
voxel_size=voxel_size, pc_range=pc_range, disc_cfg=disc_cfg)
self.sampler = Sampler(**sample_cfg)
def forward(self, batch_dict):
"""
Generates voxel features via 3D transformation and sampling
Args:
batch_dict:
frustum_features [paddle.Tensor(B, C, D, H_image, W_image)]: Image frustum features
lidar_to_cam [paddle.Tensor(B, 4, 4)]: LiDAR to camera frame transformation
cam_to_img [paddle.Tensor(B, 3, 4)]: Camera projection matrix
image_shape [paddle.Tensor(B, 2)]: Image shape [H, W]
Returns:
batch_dict:
voxel_features [paddle.Tensor(B, C, Z, Y, X)]: Image voxel features
"""
# Generate sampling grid for frustum volume
grid = self.grid_generator(
lidar_to_cam=batch_dict["trans_lidar_to_cam"],
cam_to_img=batch_dict["trans_cam_to_img"],
image_shape=batch_dict["image_shape"]) # (B, X, Y, Z, 3)
# Sample frustum volume to generate voxel volume
voxel_features = self.sampler(
input_features=batch_dict["frustum_features"],
grid=grid) # (B, C, X, Y, Z)
# (B, C, X, Y, Z) -> (B, C, Z, Y, X)
voxel_features = voxel_features.transpose([0, 1, 4, 3, 2])
batch_dict["voxel_features"] = voxel_features
return batch_dict
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/pv_rcnn/pv_rcnn.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import os
from typing import Dict, List
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.static import InputSpec
from paddle3d.apis import manager
from paddle3d.geometries import BBoxes3D
from paddle3d.models.common.model_nms_utils import class_agnostic_nms
from paddle3d.sample import Sample, SampleMeta
from paddle3d.utils.logger import logger
from paddle3d.models.layers.param_init import uniform_init
@manager.MODELS.add_component
class PVRCNN(nn.Layer):
def __init__(self, num_class, voxelizer, voxel_encoder, middle_encoder,
point_encoder, backbone, neck, dense_head, point_head,
roi_head, post_process_cfg):
super(PVRCNN, self).__init__()
self.num_class = num_class
self.voxelizer = voxelizer
self.voxel_encoder = voxel_encoder
self.middle_encoder = middle_encoder
self.point_encoder = point_encoder
self.backbone = backbone
self.neck = neck
self.dense_head = dense_head
self.point_head = point_head
self.roi_head = roi_head
self.post_process_cfg = post_process_cfg
self.init_weights()
def init_weights(self):
need_uniform_init_bn_weight_modules = [
self.middle_encoder, self.point_encoder.vsa_point_feature_fusion,
self.backbone, self.neck, self.point_head,
self.roi_head.shared_fc_layer, self.roi_head.cls_layers,
self.roi_head.reg_layers
]
for module in need_uniform_init_bn_weight_modules:
for layer in module.sublayers():
if 'BatchNorm' in layer.__class__.__name__:
uniform_init(layer.weight, 0, 1)
def voxelize(self, points):
voxels, coordinates, num_points_in_voxel = self.voxelizer(points)
return voxels, coordinates, num_points_in_voxel
def forward(self, batch_dict, **kwargs):
voxel_features, coordinates, voxel_num_points = self.voxelizer(
batch_dict['data'])
batch_dict["voxel_coords"] = coordinates
points_pad = []
if not getattr(self, "export_model", False):
for bs_idx, point in enumerate(batch_dict['data']):
point_dim = point.shape[-1]
point = point.reshape([1, -1, point_dim])
point_pad = F.pad(
point, [1, 0],
value=bs_idx,
mode='constant',
data_format="NCL")
point_pad = point_pad.reshape([-1, point_dim + 1])
points_pad.append(point_pad)
batch_dict['points'] = paddle.concat(points_pad, axis=0)
else:
point = batch_dict['data']
batch_dict['batch_size'] = 1
point = point.unsqueeze(1)
point_pad = F.pad(
point, [1, 0], value=0, mode='constant', data_format="NCL")
batch_dict['points'] = point_pad.squeeze(1)
voxel_features = self.voxel_encoder(voxel_features, voxel_num_points)
middle_out = self.middle_encoder(voxel_features,
batch_dict['voxel_coords'],
batch_dict['batch_size'])
batch_dict.update(middle_out)
backbone_out = self.backbone(middle_out['spatial_features'])
batch_dict['spatial_features_2d'] = self.neck(backbone_out)
batch_dict = self.dense_head(batch_dict)
batch_dict = self.point_encoder(batch_dict)
batch_dict = self.point_head(batch_dict)
batch_dict = self.roi_head(batch_dict)
if self.training:
return self.get_training_loss()
else:
pred_dicts = self.post_processing(batch_dict)
if not getattr(self, "export_model", False):
preds = self._parse_results_to_sample(pred_dicts, batch_dict)
return {'preds': preds}
else:
return pred_dicts[0]
def collate_fn(self, batch: List):
"""
"""
sample_merged = collections.defaultdict(list)
for sample in batch:
for k, v in sample.items():
sample_merged[k].append(v)
batch_size = len(sample_merged['meta'])
ret = {}
for key, elems in sample_merged.items():
if key in ["meta"]:
ret[key] = [elem.id for elem in elems]
elif key in ["path", "modality", "calibs"]:
ret[key] = elems
elif key == "data":
ret[key] = [elem for elem in elems]
elif key in ['gt_boxes']:
max_gt = max([len(x) for x in elems])
batch_gt_boxes3d = np.zeros(
(batch_size, max_gt, elems[0].shape[-1]), dtype=np.float32)
for k in range(batch_size):
batch_gt_boxes3d[k, :elems[k].__len__(), :] = elems[k]
ret[key] = batch_gt_boxes3d
ret['batch_size'] = batch_size
return ret
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_point, tb_dict = self.point_head.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
return {"loss": loss}
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...]
multihead_label_mapping: [(num_class1), (num_class2), ...]
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
has_class_labels: True/False
roi_labels: (B, num_rois) 1 .. num_classes
batch_pred_labels: (B, num_boxes, 1)
Returns:
"""
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_box_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_box_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
src_box_preds = box_preds
if not isinstance(batch_dict['batch_cls_preds'], list):
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
src_cls_preds = cls_preds
assert cls_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
cls_preds = F.sigmoid(cls_preds)
else:
cls_preds = [
x[batch_mask] for x in batch_dict['batch_cls_preds']
]
src_cls_preds = cls_preds
if not batch_dict['cls_preds_normalized']:
cls_preds = [F.sigmoid(x) for x in cls_preds]
if self.post_process_cfg["nms_config"]["multi_classes_nms"]:
raise NotImplementedError
else:
label_preds = paddle.argmax(cls_preds, axis=-1)
cls_preds = paddle.max(cls_preds, axis=-1)
if self.dense_head.num_class > 1:
label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels'
label_preds = batch_dict[label_key][index]
else:
label_preds = label_preds + 1
final_scores, final_labels, final_boxes = class_agnostic_nms(
box_scores=cls_preds,
box_preds=box_preds,
label_preds=label_preds,
nms_config=self.post_process_cfg["nms_config"],
score_thresh=self.post_process_cfg["score_thresh"])
if not getattr(self, "export_model", False):
record_dict = {
'box3d_lidar': final_boxes,
'scores': final_scores,
'label_preds': final_labels
}
pred_dicts.append(record_dict)
else:
pred_dicts.append([final_boxes, final_scores, final_labels])
return pred_dicts
def _convert_origin_for_eval(self, sample: dict):
if sample.bboxes_3d.origin != [.5, .5, 0]:
sample.bboxes_3d[:, :3] += sample.bboxes_3d[:, 3:6] * (
np.array([.5, .5, 0]) - np.array(sample.bboxes_3d.origin))
sample.bboxes_3d.origin = [.5, .5, 0]
return sample
def _parse_results_to_sample(self, results: dict, sample: dict):
num_samples = len(results)
new_results = []
for i in range(num_samples):
data = Sample(sample["path"][i], sample["modality"][i])
bboxes_3d = results[i]["box3d_lidar"].numpy()
labels = results[i]["label_preds"].numpy() - 1
confidences = results[i]["scores"].numpy()
bboxes_3d[..., 3:5] = bboxes_3d[..., [4, 3]]
bboxes_3d[..., -1] = -(bboxes_3d[..., -1] + np.pi / 2.)
data.bboxes_3d = BBoxes3D(bboxes_3d)
data.bboxes_3d.coordmode = 'Lidar'
data.bboxes_3d.origin = [0.5, 0.5, 0.5]
data.bboxes_3d.rot_axis = 2
data.labels = labels
data.confidences = confidences
data.meta = SampleMeta(id=sample["meta"][i])
if "calibs" in sample:
data.calibs = [calib.numpy() for calib in sample["calibs"][i]]
data = self._convert_origin_for_eval(data)
new_results.append(data)
return new_results
def export(self, save_dir: str, **kwargs):
self.export_model = True
self.voxelizer.export_model = True
self.middle_encoder.export_model = True
save_path = os.path.join(save_dir, 'pv_rcnn')
points_shape = [-1, self.voxel_encoder.in_channels]
input_spec = [{
"data":
InputSpec(shape=points_shape, name='data', dtype='float32')
}]
paddle.jit.to_static(self, input_spec=input_spec)
paddle.jit.save(self, save_path, input_spec=input_spec)
logger.info("Exported model is saved in {}".format(save_dir))
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/pv_rcnn/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .pv_rcnn import PVRCNN
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/pointpillars/pointpillars_coder.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
__all__ = ["PointPillarsCoder"]
class PointPillarsCoder(object):
@staticmethod
def encode(boxes, anchors):
return second_box_encode_paddle(boxes, anchors)
@staticmethod
def decode(encodings, anchors):
return second_box_decode_paddle(encodings, anchors)
@staticmethod
def corners_2d(bboxes_3d):
w, l = bboxes_3d[:, 3:5].t()
b = bboxes_3d.shape[0]
x_corners = paddle.to_tensor([[0., 0., 1., 1.]]).repeat_interleave(
b, axis=0)
y_corners = paddle.to_tensor([[0., 1., 1., 0.]]).repeat_interleave(
b, axis=0)
x_corners = (w[:, None] * (x_corners - .5))[:, :, None]
y_corners = (l[:, None] * (y_corners - .5))[:, :, None]
corners_2d = paddle.concat([x_corners, y_corners], axis=-1)
angle = bboxes_3d[:, -1]
rot_sin = paddle.sin(angle)
rot_cos = paddle.cos(angle)
rotation_matrix = paddle.to_tensor([[rot_cos, -rot_sin],
[rot_sin, rot_cos]])
corners_2d = paddle.einsum("aij,jka->aik", corners_2d, rotation_matrix)
centers = bboxes_3d[:, 0:2][:, None, :]
corners_2d += centers
return corners_2d
@staticmethod
def corner_to_standup(corners):
ndim = corners.shape[-1]
standup_boxes = []
for i in range(ndim):
standup_boxes.append(paddle.min(corners[:, :, i], axis=1))
for i in range(ndim):
standup_boxes.append(paddle.max(corners[:, :, i], axis=1))
return paddle.stack(standup_boxes, axis=1)
@staticmethod
def corners_3d(bboxes_3d, origin=(.5, .5, .5)):
# corners_3d format: x0y0z0, x0y0z1, x0y1z1, x0y1z0, x1y0z0, x1y0z1, x1y1z1, x1y1z0
h, w, l = bboxes_3d[:, 3:6].t()
b = h.shape[0]
x_corners = paddle.to_tensor([[0., 0., 0., 0., 1., 1., 1., 1.]],
bboxes_3d.dtype).repeat_interleave(
b, axis=0)
y_corners = paddle.to_tensor([[0., 0., 1., 1., 0., 0., 1., 1.]],
bboxes_3d.dtype).repeat_interleave(
b, axis=0)
z_corners = paddle.to_tensor([[0., 1., 1., 0., 0., 1., 1., 0.]],
bboxes_3d.dtype).repeat_interleave(
b, axis=0)
x_corners = (w[:, None] * (x_corners - origin[0]))[:, :, None]
y_corners = (l[:, None] * (y_corners - origin[1]))[:, :, None]
z_corners = (h[:, None] * (z_corners - origin[2]))[:, :, None]
corners = paddle.concat([x_corners, y_corners, z_corners], axis=-1)
angle = bboxes_3d[:, 6:7].squeeze(-1)
rot_sin = paddle.sin(angle)
rot_cos = paddle.cos(angle)
ones = paddle.ones_like(rot_cos)
zeros = paddle.zeros_like(rot_cos)
rotation_matrix = paddle.to_tensor(
[[rot_cos, -rot_sin, zeros], [rot_sin, rot_cos, zeros],
[zeros, zeros, ones]],
dtype=bboxes_3d.dtype)
corners = paddle.einsum("aij,jka->aik", corners, rotation_matrix)
centers = bboxes_3d[:, 0:3][:, None, :]
corners += centers
return corners
def second_box_encode_paddle(boxes, anchors):
"""
Encode 3D bboxes for VoxelNet/PointPillars.
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, l, w, h, r
anchors ([N, 7] Tensor): anchors
"""
xa, ya, za, wa, la, ha, ra = paddle.split(anchors, 7, axis=-1)
xg, yg, zg, wg, lg, hg, rg = paddle.split(boxes, 7, axis=-1)
diagonal = paddle.sqrt(la**2 + wa**2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / ha
lt = paddle.log(lg / la)
wt = paddle.log(wg / wa)
ht = paddle.log(hg / ha)
rt = rg - ra
return paddle.concat([xt, yt, zt, wt, lt, ht, rt], axis=-1)
def second_box_decode_paddle(encodings, anchors):
"""
Decode 3D bboxes for VoxelNet/PointPillars.
Args:
encodings ([N, 7] Tensor): encoded boxes: x, y, z, w, l, h, r
anchors ([N, 7] Tensor): anchors
"""
xa, ya, za, wa, la, ha, ra = paddle.split(anchors, 7, axis=-1)
xt, yt, zt, wt, lt, ht, rt = paddle.split(encodings, 7, axis=-1)
diagonal = paddle.sqrt(la**2 + wa**2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * ha + za
lg = paddle.exp(lt) * la
wg = paddle.exp(wt) * wa
hg = paddle.exp(ht) * ha
rg = rt + ra
return paddle.concat([xg, yg, zg, wg, lg, hg, rg], axis=-1)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/pointpillars/pointpillars_loss.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
__all__ = [
"PointPillarsLoss", "SigmoidFocalClassificationLoss",
"WeightedSmoothL1RegressionLoss", "WeightedSoftmaxClassificationLoss"
]
@manager.LOSSES.add_component
class PointPillarsLoss(nn.Layer):
def __init__(self,
num_classes,
classification_loss,
regression_loss,
direction_loss=None,
classification_loss_weight=1.0,
regression_loss_weight=2.0,
direction_loss_weight=1.0,
fg_cls_weight=1.0,
bg_cls_weight=1.0,
encode_rot_error_by_sin=True,
use_direction_classifier=True,
encode_background_as_zeros=True,
box_code_size=7):
super(PointPillarsLoss, self).__init__()
self.num_classes = num_classes
self.cls_loss = classification_loss
self.cls_loss_w = classification_loss_weight
self.reg_loss = regression_loss
self.reg_loss_w = regression_loss_weight
self.dir_loss = direction_loss
self.dir_loss_w = direction_loss_weight
self.fg_cls_weight = fg_cls_weight
self.bg_cls_weight = bg_cls_weight
self.encode_rot_error_by_sin = encode_rot_error_by_sin
self.use_direction_classifier = use_direction_classifier
self.encode_background_as_zeros = encode_background_as_zeros
self.box_code_size = box_code_size
def compute_loss_weights(self, labels):
"""get cls_weights and reg_weights from labels.
"""
cared = labels >= 0
foregrounds = (labels > 0).astype('float32')
backgrounds = (labels == 0).astype("float32")
cls_weights = self.bg_cls_weight * backgrounds + self.fg_cls_weight * foregrounds
reg_weights = foregrounds
fg_normalizer = foregrounds.sum(1, keepdim=True)
reg_weights /= paddle.clip(fg_normalizer, min=1.0)
cls_weights /= paddle.clip(fg_normalizer, min=1.0)
return cls_weights, reg_weights, cared
def compute_cls_loss(self, cls_preds, cls_targets, cls_weights):
cls_targets_onehot = F.one_hot(cls_targets, self.num_classes + 1)
if self.encode_background_as_zeros:
cls_targets_onehot = cls_targets_onehot[..., 1:]
cls_loss = self.cls_loss(
cls_preds, cls_targets_onehot, weights=cls_weights) # [N, M]
return cls_loss
def compute_reg_loss(self, box_preds, reg_targets, reg_weights):
if self.encode_rot_error_by_sin:
# sin(pred - target) = sin(pred)cos(target) - cos(pred)sin(target)
rad_pred_encoding = paddle.sin(box_preds[..., -1:]) * paddle.cos(
reg_targets[..., -1:])
rad_target_encoding = paddle.cos(box_preds[..., -1:]) * paddle.sin(
reg_targets[..., -1:])
box_preds = paddle.concat([box_preds[..., :-1], rad_pred_encoding],
axis=-1)
reg_targets = paddle.concat(
[reg_targets[..., :-1], rad_target_encoding], axis=-1)
reg_loss = self.reg_loss(
box_preds, reg_targets, weights=reg_weights) # [N, M]
return reg_loss
def compute_fg_bg_loss(self, cls_loss, labels):
# cls_loss: [N, num_anchors, num_class]
# labels: [N, num_anchors]
batch_size = cls_loss.shape[0]
if cls_loss.shape[-1] == 1 or len(cls_loss.shape) == 2:
cls_fg_loss = (labels > 0).astype(
cls_loss.dtype) * cls_loss.reshape((batch_size, -1))
cls_bg_loss = (labels == 0).astype(
cls_loss.type) * cls_loss.reshape((batch_size, -1))
cls_fg_loss = cls_fg_loss.sum() / batch_size
cls_bg_loss = cls_bg_loss.sum() / batch_size
else:
cls_fg_loss = cls_loss[..., 1:].sum() / batch_size
cls_bg_loss = cls_loss[..., 0].sum() / batch_size
cls_fg_loss /= self.fg_cls_weight
cls_bg_loss /= self.bg_cls_weight
return cls_fg_loss, cls_bg_loss
def compute_dir_cls_loss(self, dir_preds, reg_targets, labels, anchors):
# batch_size = dir_preds.shape[0]
# anchors = paddle.broadcast_to(anchors, [batch_size] + anchors.shape)
rot_gt = reg_targets[..., -1] + anchors[..., -1]
dir_targets = (rot_gt > 0).astype("int32")
weights = (labels > 0).astype(dir_preds.dtype)
weights /= paddle.clip(weights.sum(-1, keepdim=True), min=1.0)
dir_loss = self.dir_loss(dir_preds, dir_targets, weights=weights)
return dir_loss
def forward(self,
box_preds,
cls_preds,
reg_targets,
labels,
dir_preds=None,
anchors=None):
cls_weights, reg_weights, cared = self.compute_loss_weights(labels)
cls_targets = labels * cared.astype(labels.dtype)
cls_loss = self.compute_cls_loss(cls_preds, cls_targets, cls_weights)
reg_loss = self.compute_reg_loss(box_preds, reg_targets, reg_weights)
# cls_fg_loss, cls_bg_loss = self.compute_fg_bg_loss(cls_loss, labels)
batch_size = box_preds.shape[0]
total_loss = self.reg_loss_w * (reg_loss.sum(
) / batch_size) + self.cls_loss_w * (cls_loss.sum() / batch_size)
loss_dict = dict(loss=total_loss)
if self.use_direction_classifier:
dir_loss = self.compute_dir_cls_loss(dir_preds, reg_targets, labels,
anchors)
total_loss += self.dir_loss_w * (dir_loss.sum() / batch_size)
loss_dict.update(dict(loss=total_loss))
return loss_dict
@manager.LOSSES.add_component
class SigmoidFocalClassificationLoss(nn.Layer):
"""
Sigmoid focal cross entropy loss.
Focal loss down-weights well classified examples and focusses on the hard
examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition.
Args:
gamma: exponent of the modulating factor (1 - p_t) ^ gamma.
alpha: optional alpha weighting factor to balance positives vs negatives.
"""
def __init__(self, gamma=2.0, alpha=0.25):
super(SigmoidFocalClassificationLoss, self).__init__()
self._alpha = alpha
self._gamma = gamma
def forward(self, prediction, target, weights, class_indices=None):
"""
Compute loss function.
Args:
prediction: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
weights = weights.unsqueeze(2)
if class_indices is not None:
mask = paddle.zeros((prediction.shape[2], ), dtype=prediction.dtype)
mask[class_indices] = 1.
weights *= mask.reshape((1, 1, -1))
per_entry_cross_entropy = paddle.clip(
prediction, min=0) - prediction * target.astype(
prediction.dtype) + paddle.log1p(
paddle.exp(-paddle.abs(prediction)))
pred_prob = F.sigmoid(prediction)
p_t = (target * pred_prob) + ((1 - target) * (1 - pred_prob))
modulating_factor = 1.0
if self._gamma:
modulating_factor = paddle.pow(1.0 - p_t, self._gamma)
alpha_weight_factor = 1.0
if self._alpha is not None:
alpha_weight_factor = target * self._alpha + (1 - target) * (
1 - self._alpha)
focal_cross_entropy_loss = modulating_factor * alpha_weight_factor * per_entry_cross_entropy
return focal_cross_entropy_loss * weights
@manager.LOSSES.add_component
class WeightedSmoothL1RegressionLoss(nn.Layer):
"""
Smooth L1 regression loss function.
The smooth L1_loss is defined elementwise as .5 x^2 if |x|<1 and |x|-.5
otherwise, where x is the difference between predictions and target.
See also Equation (3) in the Fast R-CNN paper by Ross Girshick (ICCV 2015)
"""
def __init__(self, sigma=3.0, code_weights=None, codewise=True):
super(WeightedSmoothL1RegressionLoss, self).__init__()
self._sigma = sigma
if code_weights is not None:
self._code_weights = paddle.to_tensor(
code_weights, dtype="float32").reshape((1, 1, -1))
else:
self._code_weights = None
self._codewise = codewise
def forward(self, prediction, target, weights=None):
"""Compute loss function.
Args:
prediction: A float tensor of shape [batch_size, num_anchors,
code_size] representing the (encoded) predicted locations of objects.
target: A float tensor of shape [batch_size, num_anchors,
code_size] representing the regression targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors] tensor
representing the value of the loss function.
"""
diff = prediction - target
if self._code_weights is not None:
diff *= self._code_weights
abs_diff = paddle.abs(diff)
abs_diff_lt_1 = (abs_diff <= 1 / (self._sigma**2)).astype(
abs_diff.dtype)
loss = abs_diff_lt_1 * 0.5 * paddle.pow(abs_diff * self._sigma, 2) \
+ (abs_diff - 0.5 / (self._sigma ** 2)) * (1. - abs_diff_lt_1)
if self._codewise:
anchorwise_smooth_l1norm = loss
if weights is not None:
anchorwise_smooth_l1norm *= weights.unsqueeze(-1)
else:
anchorwise_smooth_l1norm = paddle.sum(loss, 2)
if weights is not None:
anchorwise_smooth_l1norm *= weights
return anchorwise_smooth_l1norm
@manager.LOSSES.add_component
class WeightedSoftmaxClassificationLoss(nn.Layer):
def __init__(self, logit_scale=1.):
super(WeightedSoftmaxClassificationLoss, self).__init__()
self._logit_scale = logit_scale
def forward(self, prediction, target, weights=None):
"""
Compute loss function.
Args:
prediction: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors]
representing the value of the loss function.
"""
num_classes = prediction.shape[-1]
prediction /= self._logit_scale
per_row_cross_ent = F.cross_entropy(
prediction.reshape((-1, num_classes)),
target.reshape((-1, 1)),
reduction="none")
return per_row_cross_ent.reshape(weights.shape) * weights
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/pointpillars/pointpillars_head.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.geometries import BBoxes3D, CoordMode
from paddle3d.models.detection.pointpillars.pointpillars_coder import \
PointPillarsCoder
from paddle3d.models.layers.layer_libs import rotate_nms_pcdet
from paddle3d.sample import Sample
__all__ = ["SSDHead"]
@manager.HEADS.add_component
class SSDHead(nn.Layer):
def __init__(self,
num_classes,
feature_channels=384,
num_anchor_per_loc=2,
encode_background_as_zeros=True,
use_direction_classifier=True,
box_code_size=7,
nms_score_threshold=0.05,
nms_pre_max_size=1000,
nms_post_max_size=300,
nms_iou_threshold=0.5,
prediction_center_limit_range=None):
super(SSDHead, self).__init__()
self.encode_background_as_zeros = encode_background_as_zeros
self.use_direction_classifier = use_direction_classifier
self.box_code_size = box_code_size
self.nms_score_threshold = nms_score_threshold
self.nms_pre_max_size = nms_pre_max_size
self.nms_post_max_size = nms_post_max_size
self.nms_iou_threshold = nms_iou_threshold
if prediction_center_limit_range is not None:
self._limit_pred = True
self.pred_center_limit_range = paddle.to_tensor(
prediction_center_limit_range)
else:
self._limit_pred = False
if encode_background_as_zeros:
self._num_classes = num_classes
else:
self._num_classes = num_classes + 1
self.cls_head = nn.Conv2D(feature_channels,
num_anchor_per_loc * self._num_classes, 1)
self.box_head = nn.Conv2D(feature_channels,
num_anchor_per_loc * box_code_size, 1)
if use_direction_classifier:
self.dir_head = nn.Conv2D(feature_channels, num_anchor_per_loc * 2,
1)
def forward(self, features):
batch_size = features.shape[0]
cls_preds = self.cls_head(features).transpose((0, 2, 3, 1)).reshape(
(batch_size, -1, self._num_classes))
box_preds = self.box_head(features).transpose((0, 2, 3, 1)).reshape(
(batch_size, -1, self.box_code_size))
ret = dict(cls_preds=cls_preds, box_preds=box_preds)
if self.use_direction_classifier:
dir_preds = self.dir_head(features).transpose((0, 2, 3, 1)).reshape(
(batch_size, -1, 2))
ret.update(dict(dir_preds=dir_preds))
return ret
@paddle.no_grad()
def post_process(self,
samples,
preds,
anchors,
anchors_mask,
batch_size=None):
preds["box_preds"] = PointPillarsCoder.decode(preds["box_preds"],
anchors)
if getattr(self, "in_export_mode", False):
box_preds = preds["box_preds"].squeeze(0)
cls_preds = preds["cls_preds"].squeeze(0)
if self.use_direction_classifier:
dir_preds = preds["dir_preds"].squeeze(0).reshape((-1, 2))
return paddle.static.nn.cond(
anchors_mask.any(),
true_fn=lambda: self._single_post_process(
box_preds,
cls_preds,
dir_preds=dir_preds
if self.use_direction_classifier else None,
anchors_mask=anchors_mask),
false_fn=lambda: self._box_empty())
else:
batch_box_preds = preds["box_preds"]
batch_cls_preds = preds["cls_preds"]
if self.use_direction_classifier:
batch_dir_preds = preds["dir_preds"].reshape((batch_size, -1,
2))
results = []
for i in range(batch_size):
result = paddle.static.nn.cond(
anchors_mask[i].any(),
true_fn=lambda: self._single_post_process(
batch_box_preds[i],
batch_cls_preds[i],
dir_preds=batch_dir_preds[i]
if self.use_direction_classifier else None,
anchors_mask=anchors_mask[i]),
false_fn=lambda: self._box_empty())
results.append(
self._parse_result_to_sample(
result, samples["path"][i], samples["calibs"][i], {
key: value[i]
for key, value in samples["meta"].items()
}))
return {"preds": results}
@paddle.no_grad()
def _single_post_process(self,
box_preds,
cls_preds,
dir_preds=None,
anchors_mask=None):
box_preds = box_preds[anchors_mask]
cls_preds = cls_preds[anchors_mask]
if self.encode_background_as_zeros:
cls_confs = F.sigmoid(cls_preds)
else:
cls_confs = F.sigmoid(cls_preds[..., 1:])
cls_scores = cls_confs.max(-1)
cls_labels = cls_confs.argmax(-1)
if self.use_direction_classifier:
dir_preds = dir_preds[anchors_mask]
dir_labels = dir_preds.argmax(axis=-1)
kept = cls_scores >= self.nms_score_threshold
if self._limit_pred:
distance_kept = (box_preds[..., :3] >= self.pred_center_limit_range[:3]).all(1) \
& (box_preds[..., :3] <= self.pred_center_limit_range[3:]).all(1)
kept = kept & distance_kept
return paddle.static.nn.cond(
kept.any(),
true_fn=lambda: self._box_not_empty(
box_preds[kept],
cls_scores[kept],
cls_labels[kept],
dir_labels=dir_labels[kept]
if self.use_direction_classifier else None),
false_fn=lambda: self._box_empty())
def _box_empty(self):
pretiction_dict = {
'box3d_lidar': paddle.zeros([1, self.box_code_size],
dtype="float32"),
'scores': -paddle.ones([1], dtype="float32"),
'label_preds': -paddle.ones([1], dtype="int64")
}
return pretiction_dict
def _box_not_empty(self, box_preds, cls_scores, cls_labels, dir_labels):
if self.use_direction_classifier:
box_preds[..., 6] += paddle.where(
(box_preds[..., 6] > 0) ^ dir_labels.astype("bool"),
paddle.to_tensor(math.pi), paddle.to_tensor(0.))
# bottom center to object center
box_preds[:, 2] = box_preds[:, 2] + box_preds[:, 5] * 0.5
selected = rotate_nms_pcdet(
box_preds,
cls_scores,
pre_max_size=self.nms_pre_max_size,
post_max_size=self.nms_post_max_size,
thresh=self.nms_iou_threshold)
box_preds = paddle.index_select(box_preds, selected, axis=0)
# object center to bottom center
box_preds[:, 2] = box_preds[:, 2] - box_preds[:, 5] * 0.5
cls_labels = paddle.index_select(cls_labels, selected, axis=0)
cls_scores = paddle.index_select(cls_scores, selected, axis=0)
prediction_dict = {
'box3d_lidar': box_preds,
'scores': cls_scores,
'label_preds': cls_labels
}
return prediction_dict
@staticmethod
def _parse_result_to_sample(result, path, calibs, meta):
if (result["scores"] == -1).any():
sample = Sample(path=path, modality="lidar")
else:
sample = Sample(path=path, modality="lidar")
sample.calibs = [calib.numpy() for calib in calibs]
box_preds = result["box3d_lidar"]
cls_labels = result["label_preds"]
cls_scores = result["scores"]
sample.bboxes_3d = BBoxes3D(
box_preds.numpy(),
origin=[.5, .5, 0],
coordmode=CoordMode.KittiLidar,
rot_axis=2)
sample.labels = cls_labels.numpy()
sample.confidences = cls_scores.numpy()
sample.alpha = (-paddle.atan2(-box_preds[:, 1], box_preds[:, 0]) +
box_preds[:, 6]).numpy()
sample.meta.update(meta)
return sample
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/pointpillars/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .pointpillars import *
from .pointpillars_head import *
from .pointpillars_loss import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/pointpillars/pointpillars.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numbers
import os
from collections.abc import Mapping, Sequence
from typing import List
import numpy as np
import paddle
import paddle.nn as nn
from paddle.static import InputSpec
from paddle3d.apis import manager
from paddle3d.models.base import BaseLidarModel
from paddle3d.models.detection.pointpillars.anchors_generator import \
AnchorGenerator
from paddle3d.sample import Sample
from paddle3d.utils import checkpoint
from paddle3d.utils.logger import logger
__all__ = ["PointPillars"]
@manager.MODELS.add_component
class PointPillars(BaseLidarModel):
def __init__(self,
voxelizer,
pillar_encoder,
middle_encoder,
backbone,
neck,
head,
loss,
anchor_configs,
anchor_area_threshold=1,
pretrained=None,
box_with_velocity: bool = False):
super().__init__(
with_voxelizer=False,
box_with_velocity=box_with_velocity,
max_num_points_in_voxel=pillar_encoder.max_num_points_in_voxel,
in_channels=pillar_encoder.in_channels)
self.voxelizer = voxelizer
self.pillar_encoder = pillar_encoder
self.middle_encoder = middle_encoder
self.backbone = backbone
self.neck = neck
self.head = head
self.loss = loss
self.anchor_generator = AnchorGenerator(
output_stride_factor=self.backbone.downsample_strides[0] //
self.neck.upsample_strides[0],
point_cloud_range=self.voxelizer.point_cloud_range,
voxel_size=self.voxelizer.voxel_size,
anchor_configs=anchor_configs,
anchor_area_threshold=anchor_area_threshold)
self.pretrained = pretrained
self.init_weight()
def train_forward(self, samples):
voxels = samples["voxels"]
coordinates = samples["coords"]
num_points_per_voxel = samples["num_points_per_voxel"]
# yapf: disable
batch_size = len(samples["data"])
pillar_features = self.pillar_encoder(
voxels, num_points_per_voxel, coordinates)
spatial_features = self.middle_encoder(
pillar_features, coordinates, batch_size)
# yapf: enable
final_features = self.backbone(spatial_features)
fused_final_features = self.neck(final_features)
preds = self.head(fused_final_features)
box_preds = preds["box_preds"]
cls_preds = preds["cls_preds"]
if self.head.use_direction_classifier:
dir_preds = preds["dir_preds"]
loss_dict = self.loss(box_preds, cls_preds, samples["reg_targets"],
samples["labels"], dir_preds,
self.anchor_generator.anchors)
else:
loss_dict = self.loss(box_preds, cls_preds, samples["reg_targets"],
samples["labels"])
return loss_dict
def test_forward(self, samples):
voxels = samples["voxels"]
coordinates = samples["coords"]
num_points_per_voxel = samples["num_points_per_voxel"]
# yapf: disable
batch_size = len(samples["data"])
pillar_features = self.pillar_encoder(
voxels, num_points_per_voxel, coordinates)
spatial_features = self.middle_encoder(
pillar_features, coordinates, batch_size)
# yapf: enable
final_features = self.backbone(spatial_features)
fused_final_features = self.neck(final_features)
preds = self.head(fused_final_features)
anchors_mask = []
for i in range(batch_size):
batch_mask = coordinates[:, 0] == i
this_coords = coordinates[batch_mask][:, 1:]
anchors_mask.append(self.anchor_generator(this_coords))
return self.head.post_process(samples, preds,
self.anchor_generator.anchors,
anchors_mask, batch_size)
def export_forward(self, samples):
voxels = samples["voxels"]
coordinates = samples["coords"]
num_points_per_voxel = samples["num_points_per_voxel"]
# yapf: disable
coordinates = paddle.concat([
paddle.zeros([coordinates.shape[0], 1], dtype=coordinates.dtype),
coordinates
], axis=-1)
batch_size = None
pillar_features = self.pillar_encoder(
voxels, num_points_per_voxel, coordinates)
spatial_features = self.middle_encoder(
pillar_features, coordinates, batch_size)
# yapf: enable
final_features = self.backbone(spatial_features)
fused_final_features = self.neck(final_features)
preds = self.head(fused_final_features)
anchors_mask = self.anchor_generator(coordinates[:, 1:])
return self.head.post_process(samples, preds,
self.anchor_generator.anchors,
anchors_mask, batch_size)
def init_weight(self):
if self.pretrained is not None:
checkpoint.load_pretrained_model(self, self.pretrained)
def collate_fn(self, batch: List):
sample = batch[0]
if isinstance(sample, np.ndarray):
batch = np.stack(batch, axis=0)
return batch
elif isinstance(sample, paddle.Tensor):
return paddle.stack(batch, axis=0)
elif isinstance(sample, numbers.Number):
batch = np.array(batch)
return batch
elif isinstance(sample, (str, bytes)):
return batch
elif isinstance(sample, (Sample, Mapping)):
var_len_fields = {"data", "calibs"}
concat_fields = {"voxels", "num_points_per_voxel"}
collated_batch = {}
for key, value in sample.items():
if value is None:
continue
if key == "coords":
collated_batch[key] = np.concatenate([
np.pad(
d[key], ((0, 0), (1, 0)),
mode="constant",
constant_values=i) for i, d in enumerate(batch)
])
elif key in concat_fields:
collated_batch[key] = np.concatenate(
[d[key] for d in batch], axis=0)
elif key not in var_len_fields or isinstance(
value, (Sample, Mapping)):
collated_batch[key] = self.collate_fn(
[d[key] for d in batch])
else:
collated_batch[key] = [d[key] for d in batch]
return collated_batch
elif isinstance(sample, Sequence):
sample_fields_num = len(sample)
if not all(
len(sample) == sample_fields_num for sample in iter(batch)):
raise RuntimeError(
"fileds number not same among samples in a batch")
return [self.collate_fn(fields) for fields in zip(*batch)]
raise TypeError(
"batch data can only contains: tensor, numpy.ndarray, "
"dict, list, number, paddle3d.Sample, but got {}".format(
type(sample)))
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/pointpillars/anchors_generator.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
import paddle.nn as nn
class AnchorGenerator(nn.Layer):
"""
Generate SSD style anchors for PointPillars.
Args:
output_stride_factor (int): Output stride of the network.
point_cloud_range (list[float]): [x_min, y_min, z_min, x_max, y_max, z_max].
voxel_size (list[float]): [x_size, y_size, z_size].
anchor_configs (list[Dict[str, Any]]): Anchor configuration for each class. Attributes must include:
"sizes": (list[float]) Anchor size (in wlh order).
"strides": (list[float]) Anchor stride.
"offsets": (list[float]) Anchor offset.
"rotations": (list[float]): Anchor rotation.
"matched_threshold": (float) IoU threshold for positive anchors.
"unmatched_threshold": (float) IoU threshold for negative anchors.
anchor_area_threshold (float): Threshold for filtering out anchor area. Defaults to 1.
"""
def __init__(self,
output_stride_factor,
point_cloud_range,
voxel_size,
anchor_configs,
anchor_area_threshold=1):
super(AnchorGenerator, self).__init__()
self.pc_range = paddle.to_tensor(point_cloud_range, dtype="float32")
self.voxel_size = paddle.to_tensor(voxel_size, dtype="float32")
self.grid_size = paddle.round((self.pc_range[3:6] - self.pc_range[:3]) /
self.voxel_size).astype("int64")
anchor_generators = [
AnchorGeneratorStride(**anchor_cfg) for anchor_cfg in anchor_configs
]
feature_map_size = self.grid_size[:2] // output_stride_factor
feature_map_size = [*feature_map_size, 1][::-1]
self._generate_anchors(feature_map_size, anchor_generators)
self.anchor_area_threshold = float(anchor_area_threshold)
def _generate_anchors(self, feature_map_size, anchor_generators):
anchors_list = []
# match_list = []
# unmatch_list = []
for gen in anchor_generators:
anchors = gen.generate(feature_map_size)
anchors = anchors.reshape(
[*anchors.shape[:3], -1, anchors.shape[-1]])
anchors_list.append(anchors)
anchors = paddle.concat(anchors_list, axis=-2)
self.anchors = anchors.reshape([-1, anchors.shape[-1]])
anchors_bv = rbbox2d_to_circumscribed(
paddle.index_select(
self.anchors, paddle.to_tensor([0, 1, 3, 4, 6]), axis=1))
anchors_bv[:, 0] = paddle.clip(
paddle.floor(
(anchors_bv[:, 0] - self.pc_range[0]) / self.voxel_size[0]),
min=0)
anchors_bv[:, 1] = paddle.clip(
paddle.floor(
(anchors_bv[:, 1] - self.pc_range[1]) / self.voxel_size[1]),
min=0)
anchors_bv[:, 2] = paddle.clip(
paddle.floor(
(anchors_bv[:, 2] - self.pc_range[0]) / self.voxel_size[0]),
max=self.grid_size[0] - 1)
anchors_bv[:, 3] = paddle.clip(
paddle.floor(
(anchors_bv[:, 3] - self.pc_range[1]) / self.voxel_size[1]),
max=self.grid_size[1] - 1)
self.anchors_bv = anchors_bv.astype("int64")
def generate_anchors_mask(self, coords):
# find anchors with area < threshold
dense_voxel_map = sparse_sum_for_anchors_mask(coords,
self.grid_size[[1, 0]])
dense_voxel_map = dense_voxel_map.cumsum(0)
dense_voxel_map = dense_voxel_map.cumsum(1)
anchors_area = fused_get_anchors_area(dense_voxel_map, self.anchors_bv)
anchors_mask = anchors_area > self.anchor_area_threshold
return anchors_mask
@paddle.no_grad()
def forward(self, coords):
return self.generate_anchors_mask(coords)
class AnchorGeneratorStride(object):
def __init__(self,
sizes=[1.6, 3.9, 1.56],
anchor_strides=[0.4, 0.4, 1.0],
anchor_offsets=[0.2, -39.8, -1.78],
rotations=[0, math.pi / 2],
matched_threshold=-1,
unmatched_threshold=-1):
self._sizes = sizes
self._anchor_strides = anchor_strides
self._anchor_offsets = anchor_offsets
self._rotations = rotations
self._match_threshold = matched_threshold
self._unmatch_threshold = unmatched_threshold
@property
def match_threshold(self):
return self._match_threshold
@property
def unmatch_threshold(self):
return self._unmatch_threshold
def generate(self, feature_map_size):
"""
Args:
feature_map_size: list [D, H, W](zyx)
Returns:
anchors: [*feature_size, num_sizes, num_rots, 7] tensor.
"""
x_stride, y_stride, z_stride = self._anchor_strides
x_offset, y_offset, z_offset = self._anchor_offsets
z_centers = paddle.arange(feature_map_size[0], dtype="float32")
y_centers = paddle.arange(feature_map_size[1], dtype="float32")
x_centers = paddle.arange(feature_map_size[2], dtype="float32")
z_centers = z_centers * z_stride + z_offset
y_centers = y_centers * y_stride + y_offset
x_centers = x_centers * x_stride + x_offset
sizes = paddle.reshape(
paddle.to_tensor(self._sizes, dtype="float32"), [-1, 3])
rotations = paddle.to_tensor(self._rotations, dtype="float32")
rets = paddle.meshgrid(x_centers, y_centers, z_centers, rotations)
tile_shape = [1] * 5
tile_shape[-2] = sizes.shape[0]
for i in range(len(rets)):
rets[i] = paddle.tile(rets[i][..., None, :], tile_shape)
rets[i] = rets[i][..., None]
sizes = paddle.reshape(sizes, [1, 1, 1, -1, 1, 3])
tile_size_shape = list(rets[0].shape)
tile_size_shape[3] = 1
sizes = paddle.tile(sizes, tile_size_shape)
rets.insert(3, sizes)
ret = paddle.concat(rets, axis=-1)
return paddle.transpose(ret, [2, 1, 0, 3, 4, 5])
def rbbox2d_to_circumscribed(rbboxes):
"""convert rotated 2D bbox to nearest 'standing' or 'lying' bbox.
Args:
rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes
Returns:
bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes
"""
rots = rbboxes[..., -1]
rots = paddle.abs(limit_period(rots, 0.5, math.pi))
cond = (rots > math.pi / 4)[..., None]
bboxes_center_dim = paddle.where(
cond,
paddle.index_select(rbboxes, paddle.to_tensor([0, 1, 3, 2]), axis=1),
rbboxes[:, :4])
centers, dims = bboxes_center_dim[:, :2], bboxes_center_dim[:, 2:]
bboxes = paddle.concat([centers - dims / 2, centers + dims / 2], axis=-1)
return bboxes
def limit_period(val, offset: float = 0.5, period: float = math.pi):
return val - paddle.floor(val / period + offset) * period
def sparse_sum_for_anchors_mask(coors, shape):
ret = paddle.zeros(shape, dtype="float32")
ret = paddle.scatter_nd_add(ret, coors[:, 1:3],
paddle.ones([coors.shape[0]], dtype="float32"))
return ret
def fused_get_anchors_area(dense_map, anchors_bv):
D_idx = paddle.index_select(anchors_bv, paddle.to_tensor([3, 2]), axis=1)
A_idx = paddle.index_select(anchors_bv, paddle.to_tensor([1, 0]), axis=1)
B_idx = paddle.index_select(anchors_bv, paddle.to_tensor([3, 0]), axis=1)
C_idx = paddle.index_select(anchors_bv, paddle.to_tensor([1, 2]), axis=1)
ID = paddle.gather_nd(dense_map, D_idx)
IA = paddle.gather_nd(dense_map, A_idx)
IB = paddle.gather_nd(dense_map, B_idx)
IC = paddle.gather_nd(dense_map, C_idx)
return ID - IB - IC + IA
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/bevformer/bevformer.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Modified from BEVFormer (https://github.com/fundamentalvision/BEVFormer)
# Copyright (c) OpenMMLab. All rights reserved.
# ------------------------------------------------------------------------
import collections
import copy
import os
from typing import Dict, List
import numpy as np
import paddle
import paddle.nn as nn
from paddle3d.apis import manager
from paddle3d.geometries import BBoxes3D
from paddle3d.sample import Sample, SampleMeta
from paddle3d.utils import dtype2float32
from paddle3d.utils.grid import GridMask
from paddle3d.utils.logger import logger
@manager.MODELS.add_component
class BEVFormer(nn.Layer):
def __init__(self,
backbone,
neck,
pts_bbox_head,
use_grid_mask=False,
pretrained=None,
video_test_mode=False):
super(BEVFormer, self).__init__()
self.grid_mask = GridMask(
True, True, rotate=1, offset=False, ratio=0.5, mode=1, prob=0.7)
self.use_grid_mask = use_grid_mask
self.video_test_mode = video_test_mode
self.prev_frame_info = {
'prev_bev': None,
'scene_token': None,
'prev_pos': 0,
'prev_angle': 0,
}
self.backbone = backbone
self.neck = neck
self.pts_bbox_head = pts_bbox_head
self.pretrained = pretrained
self.video_test_mode = video_test_mode
def extract_img_feat(self, img, img_metas, len_queue=None):
"""Extract features of images."""
if not getattr(self, 'export_model', False):
B = img.shape[0]
if img is not None:
if img.dim() == 5 and img.shape[0] == 1:
img.squeeze_()
elif img.dim() == 5 and img.shape[0] > 1:
B, N, C, H, W = img.shape
img = img.reshape([B * N, C, H, W])
if self.use_grid_mask:
img = self.grid_mask(img)
data = {'image': img}
img_feats = self.backbone(data)
if isinstance(img_feats, dict):
img_feats = list(img_feats.values())
else:
return None
else:
B = 1
if self.use_grid_mask:
img = self.grid_mask(img)
data = {'image': img}
img_feats = self.backbone(data)
if isinstance(img_feats, dict):
img_feats = list(img_feats.values())
img_feats = self.neck(img_feats)
img_feats_reshaped = []
for img_feat in img_feats:
BN, C, H, W = img_feat.shape
if len_queue is not None:
img_feats_reshaped.append(
img_feat.reshape(
[int(B / len_queue), len_queue,
int(BN / B), C, H, W]))
else:
img_feats_reshaped.append(
img_feat.reshape([B, int(BN / B), C, H, W]))
return img_feats_reshaped
def extract_feat(self, img, img_metas=None, len_queue=None):
"""Extract features from images and points."""
img_feats = self.extract_img_feat(
img=img, img_metas=img_metas, len_queue=len_queue)
return img_feats
def obtain_history_bev(self, imgs_queue, img_metas_list):
"""Obtain history BEV features iteratively. To save GPU memory, gradients are not calculated.
"""
self.eval()
with paddle.no_grad():
prev_bev = None
bs, len_queue, num_cams, C, H, W = imgs_queue.shape
imgs_queue = imgs_queue.reshape([bs * len_queue, num_cams, C, H, W])
img_feats_list = self.extract_feat(
img=imgs_queue, len_queue=len_queue)
for i in range(len_queue):
img_metas = [each[i] for each in img_metas_list]
if not img_metas[0]['prev_bev_exists']:
prev_bev = None
if prev_bev is None:
prev_bev = paddle.zeros([
self.pts_bbox_head.bev_w * self.pts_bbox_head.bev_w, bs,
self.pts_bbox_head.transformer.embed_dims
],
dtype='float32')
img_feats = [each_scale[:, i] for each_scale in img_feats_list]
prev_bev = self.pts_bbox_head(
img_feats, img_metas, prev_bev, only_bev=True)
self.train()
return prev_bev
def forward(self, samples, **kwargs):
"""
"""
if self.training:
if hasattr(self, 'amp_cfg_'):
self.pts_bbox_head.amp_cfg_ = self.amp_cfg_
with paddle.amp.auto_cast(
**self.amp_cfg_, custom_black_list=['linspace']):
return self.forward_train(samples, **kwargs)
else:
return self.forward_train(samples, **kwargs)
else:
return self.forward_test(samples, **kwargs)
def forward_train(
self,
samples,
gt_labels=None,
gt_bboxes=None,
proposals=None,
gt_bboxes_ignore=None,
img_depth=None,
img_mask=None,
):
img_metas = samples['meta']
img = samples['img']
gt_labels_3d = samples['gt_labels_3d']
gt_bboxes_3d = samples['gt_bboxes_3d']
bs = img.shape[0]
len_queue = img.shape[1]
prev_img = img[:, :-1, ...]
img = img[:, -1, ...]
prev_img_metas = copy.deepcopy(img_metas)
prev_bev = self.obtain_history_bev(prev_img, prev_img_metas)
img_metas = [each[len_queue - 1] for each in img_metas]
if not img_metas[0]['prev_bev_exists']:
prev_bev = None
if prev_bev is None:
prev_bev = paddle.zeros([
self.pts_bbox_head.bev_w * self.pts_bbox_head.bev_w, bs,
self.pts_bbox_head.transformer.embed_dims
],
dtype='float32')
img_feats = self.extract_feat(img=img, img_metas=img_metas)
outs = self.pts_bbox_head(img_feats, img_metas, prev_bev)
loss_inputs = [gt_bboxes_3d, gt_labels_3d, outs]
losses_pts = self.pts_bbox_head.loss(*loss_inputs, img_metas=img_metas)
return losses_pts
def forward_test(self, samples, **kwargs):
img_metas = samples['meta']
img = samples['img']
for var, name in [(img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError('{} must be a list, but got {}'.format(
name, type(var)))
img = [img] if img is None else img
if img_metas[0]['scene_token'] != self.prev_frame_info['scene_token']:
# the first sample of each scene is truncated
self.prev_frame_info['prev_bev'] = None
# update idx
self.prev_frame_info['scene_token'] = img_metas[0]['scene_token']
# do not use temporal information
if not self.video_test_mode:
self.prev_frame_info['prev_bev'] = None
# Get the delta of ego position and angle between two timestamps.
tmp_pos = copy.deepcopy(img_metas[0]['can_bus'][:3])
tmp_angle = copy.deepcopy(img_metas[0]['can_bus'][-1])
if self.prev_frame_info['prev_bev'] is not None:
img_metas[0]['can_bus'][:3] -= self.prev_frame_info['prev_pos']
img_metas[0]['can_bus'][-1] -= self.prev_frame_info['prev_angle']
else:
img_metas[0]['can_bus'][-1] = 0
img_metas[0]['can_bus'][:3] = 0
if self.prev_frame_info['prev_bev'] is None:
self.prev_frame_info['prev_bev'] = paddle.zeros([
self.pts_bbox_head.bev_w * self.pts_bbox_head.bev_w,
img.shape[0], self.pts_bbox_head.transformer.embed_dims
],
dtype='float32')
new_prev_bev, bbox_results = self.simple_test(
img_metas, img, prev_bev=self.prev_frame_info['prev_bev'], **kwargs)
self.prev_frame_info['prev_pos'] = tmp_pos
self.prev_frame_info['prev_angle'] = tmp_angle
self.prev_frame_info['prev_bev'] = new_prev_bev
return dict(preds=self._parse_results_to_sample(bbox_results, samples))
def simple_test_pts(self, x, img_metas, prev_bev, rescale=False):
"""Test function"""
outs = self.pts_bbox_head(x, img_metas, prev_bev=prev_bev)
bbox_list = self.pts_bbox_head.get_bboxes(
outs, img_metas, rescale=rescale)
bbox_results = [
bbox3d2result(bboxes, scores, labels)
for bboxes, scores, labels in bbox_list
]
return outs['bev_embed'], bbox_results
def simple_test(self, img_metas, img=None, prev_bev=None, rescale=False):
"""Test function without augmentaiton."""
img_feats = self.extract_feat(img=img, img_metas=img_metas)
bbox_list = [dict() for i in range(len(img_metas))]
new_prev_bev, bbox_pts = self.simple_test_pts(
img_feats, img_metas, prev_bev, rescale=rescale)
for result_dict, pts_bbox in zip(bbox_list, bbox_pts):
result_dict['pts_bbox'] = pts_bbox
return new_prev_bev, bbox_list
def _parse_results_to_sample(self, results: dict, sample: dict):
num_samples = len(results)
new_results = []
for i in range(num_samples):
data = Sample(None, sample["modality"][i])
bboxes_3d = results[i]['pts_bbox']["boxes_3d"].numpy()
labels = results[i]['pts_bbox']["labels_3d"].numpy()
confidences = results[i]['pts_bbox']["scores_3d"].numpy()
bottom_center = bboxes_3d[:, :3]
gravity_center = np.zeros_like(bottom_center)
gravity_center[:, :2] = bottom_center[:, :2]
gravity_center[:, 2] = bottom_center[:, 2] + bboxes_3d[:, 5] * 0.5
bboxes_3d[:, :3] = gravity_center
data.bboxes_3d = BBoxes3D(bboxes_3d[:, 0:7])
data.bboxes_3d.coordmode = 'Lidar'
data.bboxes_3d.origin = [0.5, 0.5, 0.5]
data.bboxes_3d.rot_axis = 2
data.bboxes_3d.velocities = bboxes_3d[:, 7:9]
data['bboxes_3d_numpy'] = bboxes_3d[:, 0:7]
data['bboxes_3d_coordmode'] = 'Lidar'
data['bboxes_3d_origin'] = [0.5, 0.5, 0.5]
data['bboxes_3d_rot_axis'] = 2
data['bboxes_3d_velocities'] = bboxes_3d[:, 7:9]
data.labels = labels
data.confidences = confidences
data.meta = SampleMeta(id=sample["meta"][i]['id'])
if "calibs" in sample:
calib = [calibs.numpy()[i] for calibs in sample["calibs"]]
data.calibs = calib
new_results.append(data)
return new_results
def export_forward(self, img, prev_bev, img_metas):
img_metas = [img_metas]
new_prev_bev, bbox_results = self.simple_test(
img_metas, img, prev_bev=prev_bev)
return new_prev_bev, bbox_results
def export(self, save_dir: str, **kwargs):
self.forward = self.export_forward
self.export_model = True
self.pts_bbox_head.transformer.export_model = True
self.pts_bbox_head.transformer.encoder.export_model = True
image_spec = paddle.static.InputSpec(
shape=[6, 3, 480, 800], dtype="float32", name='image')
pre_bev_spec = paddle.static.InputSpec(
shape=[
self.pts_bbox_head.bev_w * self.pts_bbox_head.bev_w, 1,
self.pts_bbox_head.transformer.embed_dims
],
dtype="float32",
name='pre_bev')
img_metas_spec = {
"can_bus":
paddle.static.InputSpec(
shape=[18], dtype="float32", name='can_bus'),
"lidar2img":
paddle.static.InputSpec(
shape=[-1, -1, 4, 4], dtype="float32", name='lidar2img'),
"img_shape":
paddle.static.InputSpec(
shape=[6, 3], dtype="int32", name='img_shape'),
}
input_spec = [image_spec, pre_bev_spec, img_metas_spec]
paddle.jit.to_static(self, input_spec=input_spec)
paddle.jit.save(self, os.path.join(save_dir, "bevformer_inference"))
logger.info("Exported model is saved in {}".format(
os.path.join(save_dir, "bevformer_inference")))
def bbox3d2result(bboxes, scores, labels, attrs=None):
"""Convert detection results to a list of numpy arrays.
"""
result_dict = dict(boxes_3d=bboxes, scores_3d=scores, labels_3d=labels)
if attrs is not None:
result_dict['attrs_3d'] = attrs
return result_dict
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/bevformer/__init__.py
|
from . import bevformer, bevformer_head
from .bevformer import *
from .bevformer_head import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/bevformer/bevformer_head.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Modified from BEVFormer (https://github.com/fundamentalvision/BEVFormer)
# Copyright (c) OpenMMLab. All rights reserved.
# ------------------------------------------------------------------------
import copy
from functools import partial
import numpy as np
import paddle
import paddle.distributed as dist
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from paddle.nn.initializer import Normal
from paddle3d.apis import manager
from paddle3d.models.layers.param_init import constant_init, reset_parameters
from paddle3d.models.transformers.transformer import inverse_sigmoid
from paddle3d.models.transformers.utils import nan_to_num
from paddle3d.utils import dtype2float32
from paddle3d.utils.box import normalize_bbox
def multi_apply(func, *args, **kwargs):
"""Apply function to a list of arguments.
Note:
This function applies the ``func`` to multiple inputs and
map the multiple outputs of the ``func`` into different
list. Each list contains the same type of outputs corresponding
to different inputs.
Args:
func (Function): A function that will be applied to a list of
arguments
Returns:
tuple(list): A tuple containing multiple list, each list contains \
a kind of returned results by the function
"""
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
def reduce_mean(tensor):
""""Obtain the mean of tensor on different GPUs."""
if not dist.is_initialized():
return tensor
tensor = tensor.clone()
dist.all_reduce(
tensor.scale_(1. / dist.get_world_size()), op=dist.ReduceOp.SUM)
return tensor
@manager.HEADS.add_component
class BEVFormerHead(nn.Layer):
"""Head of Detr3D.
Args:
with_box_refine (bool): Whether to refine the reference points
in the decoder. Defaults to False.
as_two_stage (bool) : Whether to generate the proposal from
the outputs of encoder.
transformer (obj:`ConfigDict`): ConfigDict is used for building
the Encoder and Decoder.
bev_h, bev_w (int): spatial shape of BEV queries.
"""
def __init__(self,
num_classes,
in_channels,
transformer,
positional_encoding,
num_query=100,
num_reg_fcs=2,
num_cls_fcs=2,
sync_cls_avg_factor=False,
loss_cls=None,
loss_bbox=None,
loss_iou=None,
assigner=dict(
type='HungarianAssigner',
cls_cost=dict(type='ClassificationCost', weight=1.),
reg_cost=dict(type='BBoxL1Cost', weight=5.0),
iou_cost=dict(type='IoUCost', iou_mode='giou',
weight=2.0)),
sampler=None,
with_box_refine=False,
as_two_stage=False,
bbox_coder=None,
code_weights=None,
bev_h=30,
bev_w=30,
**kwargs):
super(BEVFormerHead, self).__init__()
self.bev_h = bev_h
self.bev_w = bev_w
self.with_box_refine = with_box_refine
self.as_two_stage = as_two_stage
if self.as_two_stage:
transformer['as_two_stage'] = self.as_two_stage
if 'code_size' in kwargs:
self.code_size = kwargs['code_size']
else:
self.code_size = 10
if code_weights is not None:
self.code_weights = code_weights
else:
self.code_weights = [
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2
]
self.code_weights = paddle.to_tensor(self.code_weights)
self.bbox_coder = bbox_coder
self.point_cloud_range = self.bbox_coder.point_cloud_range
self.real_w = self.point_cloud_range[3] - self.point_cloud_range[0]
self.real_h = self.point_cloud_range[4] - self.point_cloud_range[1]
self.num_cls_fcs = num_cls_fcs - 1
self.bg_cls_weight = 0
self.sync_cls_avg_factor = sync_cls_avg_factor
assert loss_cls.loss_weight == assigner.cls_cost.weight, \
'The classification weight for loss and matcher should be' \
'exactly the same.'
assert loss_bbox.loss_weight == assigner.reg_cost.weight, \
'The regression L1 weight for loss and matcher ' \
'should be exactly the same.'
assert loss_iou.loss_weight == assigner.iou_cost.weight, \
'The regression iou weight for loss and matcher should be' \
'exactly the same.'
self.assigner = assigner
# DETR sampling=False, so use PseudoSampler
self.sampler = sampler
self.num_query = num_query
self.num_classes = num_classes
self.in_channels = in_channels
self.num_reg_fcs = num_reg_fcs
self.fp16_enabled = False
self.loss_cls = loss_cls
self.loss_bbox = loss_bbox
self.loss_iou = loss_iou
if self.loss_cls.use_sigmoid:
self.cls_out_channels = num_classes
else:
self.cls_out_channels = num_classes + 1
self.act_cfg = getattr(transformer, 'act_cfg', dict(type='ReLU'))
self.activate = getattr(nn, self.act_cfg.pop('type'))()
self.positional_encoding = positional_encoding
self.transformer = transformer
self.embed_dims = self.transformer.embed_dims
num_feats = positional_encoding.num_feats
assert num_feats * 2 == self.embed_dims, 'embed_dims should' \
f' be exactly 2 times of num_feats. Found {self.embed_dims}' \
f' and {num_feats}.'
self._init_layers()
self.init_weights()
def _init_layers(self):
"""Initialize classification branch and regression branch of head."""
cls_branch = []
for _ in range(self.num_reg_fcs):
cls_branch.append(nn.Linear(self.embed_dims, self.embed_dims))
cls_branch.append(nn.LayerNorm(self.embed_dims))
cls_branch.append(nn.ReLU())
cls_branch.append(nn.Linear(self.embed_dims, self.cls_out_channels))
fc_cls = nn.Sequential(*cls_branch)
reg_branch = []
for _ in range(self.num_reg_fcs):
reg_branch.append(nn.Linear(self.embed_dims, self.embed_dims))
reg_branch.append(nn.ReLU())
reg_branch.append(nn.Linear(self.embed_dims, self.code_size))
reg_branch = nn.Sequential(*reg_branch)
def _get_clones(module, N):
return nn.LayerList([copy.deepcopy(module) for i in range(N)])
# last reg_branch is used to generate proposal from
# encode feature map when as_two_stage is True.
num_pred = (self.transformer.decoder.num_layers + 1) if \
self.as_two_stage else self.transformer.decoder.num_layers
if self.with_box_refine:
self.cls_branches = _get_clones(fc_cls, num_pred)
self.reg_branches = _get_clones(reg_branch, num_pred)
else:
self.cls_branches = nn.LayerList([fc_cls for _ in range(num_pred)])
self.reg_branches = nn.LayerList(
[reg_branch for _ in range(num_pred)])
if not self.as_two_stage:
self.bev_embedding = nn.Embedding(
self.bev_h * self.bev_w,
self.embed_dims,
weight_attr=ParamAttr(initializer=Normal()))
self.query_embedding = nn.Embedding(
self.num_query,
self.embed_dims * 2,
weight_attr=ParamAttr(initializer=Normal()))
@paddle.no_grad()
def init_weights(self):
"""Initialize weights of the DeformDETR head."""
for cls_layerlist in self.cls_branches:
for cls_layer in cls_layerlist:
if isinstance(cls_layer, nn.Linear):
reset_parameters(cls_layer)
elif isinstance(cls_layer, nn.LayerNorm):
constant_init(cls_layer.weight, value=1)
constant_init(cls_layer.bias, value=0)
for reg_layerlist in self.reg_branches:
for reg_layer in reg_layerlist:
if isinstance(reg_layer, nn.Linear):
reset_parameters(reg_layer)
if self.loss_cls.use_sigmoid:
prior_prob = 0.01
bias_init = float(-np.log((1 - prior_prob) / prior_prob))
for m in self.cls_branches:
constant_init(m[-1].bias, value=bias_init)
def forward(self, mlvl_feats, img_metas, prev_bev=None, only_bev=False):
if hasattr(self, 'amp_cfg_'):
for key, mlvl_feat in enumerate(mlvl_feats):
mlvl_feats[key] = mlvl_feat.cast(paddle.float16)
bs, num_cam, _, _, _ = mlvl_feats[0].shape
dtype = mlvl_feats[0].dtype
object_query_embeds = self.query_embedding.weight.cast(dtype)
bev_queries = self.bev_embedding.weight.cast(dtype)
bev_mask = paddle.zeros((bs, self.bev_h, self.bev_w), dtype=dtype)
bev_pos = self.positional_encoding(bev_mask).cast(dtype)
if only_bev: # only use encoder to obtain BEV features, TODO: refine the workaround
bev_embed = self.transformer.get_bev_features(
mlvl_feats,
bev_queries,
self.bev_h,
self.bev_w,
grid_length=(self.real_h / self.bev_h,
self.real_w / self.bev_w),
bev_pos=bev_pos,
img_metas=img_metas,
prev_bev=prev_bev,
)
bev_embed = bev_embed.transpose([1, 0, 2])
return bev_embed
else:
outputs = self.transformer(
mlvl_feats,
bev_queries,
object_query_embeds,
self.bev_h,
self.bev_w,
grid_length=(self.real_h / self.bev_h,
self.real_w / self.bev_w),
bev_pos=bev_pos,
reg_branches=self.reg_branches
if self.with_box_refine else None, # noqa:E501
cls_branches=self.cls_branches if self.as_two_stage else None,
img_metas=img_metas,
prev_bev=prev_bev)
bev_embed, hs, init_reference, inter_references = outputs
hs = hs.transpose([0, 2, 1, 3])
outputs_classes = []
outputs_coords = []
for lvl in range(hs.shape[0]):
if lvl == 0:
reference = init_reference
else:
reference = inter_references[lvl - 1]
reference = inverse_sigmoid(reference)
outputs_class = self.cls_branches[lvl](hs[lvl])
tmp = self.reg_branches[lvl](hs[lvl])
# TODO: check the shape of reference
assert reference.shape[-1] == 3
tmp[..., 0:2] += reference[..., 0:2]
tmp[..., 0:2] = F.sigmoid(tmp[..., 0:2])
tmp[..., 4:5] += reference[..., 2:3]
tmp[..., 4:5] = F.sigmoid(tmp[..., 4:5])
tmp[..., 0:1] = (
tmp[..., 0:1] *
(self.point_cloud_range[3] - self.point_cloud_range[0]) +
self.point_cloud_range[0])
tmp[..., 1:2] = (
tmp[..., 1:2] *
(self.point_cloud_range[4] - self.point_cloud_range[1]) +
self.point_cloud_range[1])
tmp[..., 4:5] = (
tmp[..., 4:5] *
(self.point_cloud_range[5] - self.point_cloud_range[2]) +
self.point_cloud_range[2])
# TODO: check if using sigmoid
outputs_coord = tmp
outputs_classes.append(outputs_class)
outputs_coords.append(outputs_coord)
outputs_classes = paddle.stack(outputs_classes)
outputs_coords = paddle.stack(outputs_coords)
outs = {
'bev_embed': bev_embed,
'all_cls_scores': outputs_classes,
'all_bbox_preds': outputs_coords,
'enc_cls_scores': None,
'enc_bbox_preds': None,
}
return outs
def _get_target_single(self,
cls_score,
bbox_pred,
gt_labels,
gt_bboxes,
gt_bboxes_ignore=None):
""""Compute regression and classification targets for one image.
Outputs from a single decoder layer of a single feature level are used.
Args:
cls_score (Tensor): Box score logits from a single decoder layer
for one image. Shape [num_query, cls_out_channels].
bbox_pred (Tensor): Sigmoid outputs from a single decoder layer
for one image, with normalized coordinate (cx, cy, w, h) and
shape [num_query, 4].
gt_bboxes (Tensor): Ground truth bboxes for one image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (Tensor): Ground truth class indices for one image
with shape (num_gts, ).
gt_bboxes_ignore (Tensor, optional): Bounding boxes
which can be ignored. Default None.
Returns:
tuple[Tensor]: a tuple containing the following for one image.
- labels (Tensor): Labels of each image.
- label_weights (Tensor]): Label weights of each image.
- bbox_targets (Tensor): BBox targets of each image.
- bbox_weights (Tensor): BBox weights of each image.
- pos_inds (Tensor): Sampled positive indices for each image.
- neg_inds (Tensor): Sampled negative indices for each image.
"""
num_bboxes = bbox_pred.shape[0]
# assigner and sampler
gt_c = gt_bboxes.shape[-1]
assign_result = self.assigner.assign(bbox_pred, cls_score, gt_bboxes,
gt_labels, gt_bboxes_ignore)
sampling_result = self.sampler.sample(assign_result, bbox_pred,
gt_bboxes)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
# label targets
labels = paddle.full((num_bboxes, ),
self.num_classes,
dtype=paddle.int64)
labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]
label_weights = paddle.ones((num_bboxes, ))
# bbox targets
bbox_targets = paddle.zeros_like(bbox_pred)[..., :gt_c]
bbox_weights = paddle.zeros_like(bbox_pred)
bbox_weights[pos_inds] = 1.0
# DETR
bbox_targets[pos_inds] = sampling_result.pos_gt_bboxes
return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
neg_inds)
def get_targets(self,
cls_scores_list,
bbox_preds_list,
gt_bboxes_list,
gt_labels_list,
gt_bboxes_ignore_list=None):
""""Compute regression and classification targets for a batch image.
Outputs from a single decoder layer of a single feature level are used.
Args:
cls_scores_list (list[Tensor]): Box score logits from a single
decoder layer for each image with shape [num_query,
cls_out_channels].
bbox_preds_list (list[Tensor]): Sigmoid outputs from a single
decoder layer for each image, with normalized coordinate
(cx, cy, w, h) and shape [num_query, 4].
gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image
with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels_list (list[Tensor]): Ground truth class indices for each
image with shape (num_gts, ).
gt_bboxes_ignore_list (list[Tensor], optional): Bounding
boxes which can be ignored for each image. Default None.
Returns:
tuple: a tuple containing the following targets.
- labels_list (list[Tensor]): Labels for all images.
- label_weights_list (list[Tensor]): Label weights for all \
images.
- bbox_targets_list (list[Tensor]): BBox targets for all \
images.
- bbox_weights_list (list[Tensor]): BBox weights for all \
images.
- num_total_pos (int): Number of positive samples in all \
images.
- num_total_neg (int): Number of negative samples in all \
images.
"""
assert gt_bboxes_ignore_list is None, \
'Only supports for gt_bboxes_ignore setting to None.'
num_imgs = len(cls_scores_list)
gt_bboxes_ignore_list = [gt_bboxes_ignore_list for _ in range(num_imgs)]
labels_list, label_weights_list, bbox_targets_list, \
bbox_weights_list, pos_inds_list, neg_inds_list = multi_apply(
self._get_target_single, cls_scores_list, bbox_preds_list,
gt_labels_list, gt_bboxes_list, gt_bboxes_ignore_list)
num_total_pos = sum((inds.numel() for inds in pos_inds_list))
num_total_neg = sum((inds.numel() for inds in neg_inds_list))
return (labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, num_total_pos, num_total_neg)
def loss_single(self,
cls_scores,
bbox_preds,
gt_bboxes_list,
gt_labels_list,
gt_bboxes_ignore_list=None):
""""Loss function for outputs from a single decoder layer of a single
feature level.
Args:
cls_scores (Tensor): Box score logits from a single decoder layer
for all images. Shape [bs, num_query, cls_out_channels].
bbox_preds (Tensor): Sigmoid outputs from a single decoder layer
for all images, with normalized coordinate (cx, cy, w, h) and
shape [bs, num_query, 4].
gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image
with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels_list (list[Tensor]): Ground truth class indices for each
image with shape (num_gts, ).
gt_bboxes_ignore_list (list[Tensor], optional): Bounding
boxes which can be ignored for each image. Default None.
Returns:
dict[str, Tensor]: A dictionary of loss components for outputs from
a single decoder layer.
"""
num_imgs = cls_scores.shape[0]
cls_scores_list = [cls_scores[i] for i in range(num_imgs)]
bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)]
cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list,
gt_bboxes_list, gt_labels_list,
gt_bboxes_ignore_list)
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg) = cls_reg_targets
labels = paddle.concat(labels_list, 0)
label_weights = paddle.concat(label_weights_list, 0)
bbox_targets = paddle.concat(bbox_targets_list, 0)
bbox_weights = paddle.concat(bbox_weights_list, 0)
# classification loss
cls_scores = cls_scores.reshape((-1, self.cls_out_channels))
# construct weighted avg_factor to match with the official DETR repo
cls_avg_factor = num_total_pos * 1.0 + \
num_total_neg * self.bg_cls_weight
if self.sync_cls_avg_factor:
cls_avg_factor = reduce_mean(paddle.to_tensor([cls_avg_factor]))
cls_avg_factor = max(cls_avg_factor, 1)
loss_cls = self.loss_cls(
cls_scores, labels, label_weights, avg_factor=cls_avg_factor)
# Compute the average number of gt boxes accross all gpus, for
# normalization purposes
num_total_pos = paddle.to_tensor([num_total_pos], dtype=loss_cls.dtype)
num_total_pos = paddle.clip(reduce_mean(num_total_pos), min=1).item()
# regression L1 loss
bbox_preds = bbox_preds.reshape([-1, bbox_preds.shape[-1]])
normalized_bbox_targets = normalize_bbox(bbox_targets,
self.point_cloud_range)
isnotnan = paddle.isfinite(normalized_bbox_targets).all(axis=-1)
bbox_weights = bbox_weights * self.code_weights
loss_bbox = self.loss_bbox(
bbox_preds[isnotnan],
normalized_bbox_targets[isnotnan],
bbox_weights[isnotnan],
avg_factor=num_total_pos)
loss_cls = nan_to_num(loss_cls)
loss_bbox = nan_to_num(loss_bbox)
return loss_cls, loss_bbox
def loss(self,
gt_bboxes_list,
gt_labels_list,
preds_dicts,
gt_bboxes_ignore=None,
img_metas=None):
""""Loss function.
Args:
gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image
with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels_list (list[Tensor]): Ground truth class indices for each
image with shape (num_gts, ).
preds_dicts:
all_cls_scores (Tensor): Classification score of all
decoder layers, has shape
[nb_dec, bs, num_query, cls_out_channels].
all_bbox_preds (Tensor): Sigmoid regression
outputs of all decode layers. Each is a 4D-tensor with
normalized coordinate format (cx, cy, w, h) and shape
[nb_dec, bs, num_query, 4].
enc_cls_scores (Tensor): Classification scores of
points on encode feature map , has shape
(N, h*w, num_classes). Only be passed when as_two_stage is
True, otherwise is None.
enc_bbox_preds (Tensor): Regression results of each points
on the encode feature map, has shape (N, h*w, 4). Only be
passed when as_two_stage is True, otherwise is None.
gt_bboxes_ignore (list[Tensor], optional): Bounding boxes
which can be ignored for each image. Default None.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert gt_bboxes_ignore is None, \
f'{self.__class__.__name__} only supports ' \
f'for gt_bboxes_ignore setting to None.'
all_cls_scores = preds_dicts['all_cls_scores']
all_bbox_preds = preds_dicts['all_bbox_preds']
enc_cls_scores = preds_dicts['enc_cls_scores']
enc_bbox_preds = preds_dicts['enc_bbox_preds']
all_cls_scores = dtype2float32(all_cls_scores)
all_bbox_preds = dtype2float32(all_bbox_preds)
enc_cls_scores = dtype2float32(enc_cls_scores)
enc_bbox_preds = dtype2float32(enc_bbox_preds)
num_dec_layers = len(all_cls_scores)
bboxes_list = []
for gt_bboxes in gt_bboxes_list:
bottom_center = gt_bboxes[:, :3]
gravity_center = paddle.zeros_like(bottom_center)
gravity_center[:, :2] = bottom_center[:, :2]
gravity_center[:, 2] = bottom_center[:, 2] + gt_bboxes[:, 5] * 0.5
bboxes_list.append(
paddle.concat([gravity_center, gt_bboxes[:, 3:]], axis=-1))
all_gt_bboxes_list = [bboxes_list for _ in range(num_dec_layers)]
all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)]
all_gt_bboxes_ignore_list = [
gt_bboxes_ignore for _ in range(num_dec_layers)
]
losses_cls, losses_bbox = multi_apply(
self.loss_single, all_cls_scores, all_bbox_preds,
all_gt_bboxes_list, all_gt_labels_list, all_gt_bboxes_ignore_list)
loss_dict = dict()
# loss of proposal generated from encode feature map.
if enc_cls_scores is not None:
binary_labels_list = [
paddle.zeros_like(gt_labels_list[i])
for i in range(len(all_gt_labels_list))
]
enc_loss_cls, enc_losses_bbox = \
self.loss_single(enc_cls_scores, enc_bbox_preds,
gt_bboxes_list, binary_labels_list, gt_bboxes_ignore)
loss_dict['enc_loss_cls'] = enc_loss_cls
loss_dict['enc_loss_bbox'] = enc_losses_bbox
# loss from the last decoder layer
loss_dict['loss_cls'] = losses_cls[-1]
loss_dict['loss_bbox'] = losses_bbox[-1]
# loss from other decoder layers
num_dec_layer = 0
for loss_cls_i, loss_bbox_i in zip(losses_cls[:-1], losses_bbox[:-1]):
loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i
loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i
num_dec_layer += 1
total_loss = sum(loss_dict.values())
loss_dict['loss'] = total_loss
return loss_dict
def get_bboxes(self, preds_dicts, img_metas, rescale=False):
"""Generate bboxes from bbox head predictions.
Args:
preds_dicts (tuple[list[dict]]): Prediction results.
img_metas (list[dict]): Point cloud and image's meta info.
Returns:
list[dict]: Decoded bbox, scores and labels after nms.
"""
for key, value in preds_dicts.items():
preds_dicts[key] = dtype2float32(value)
preds_dicts = self.bbox_coder.decode(preds_dicts)
num_samples = len(preds_dicts)
ret_list = []
for i in range(num_samples):
preds = preds_dicts[i]
bboxes = preds['bboxes']
bboxes[:, 2] = bboxes[:, 2] - bboxes[:, 5] * 0.5
scores = preds['scores']
labels = preds['labels']
ret_list.append([bboxes, scores, labels])
return ret_list
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/voxel_rcnn/voxel_rcnn.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import os
from typing import Dict, List
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.static import InputSpec
from paddle3d.apis import manager
from paddle3d.geometries import BBoxes3D
from paddle3d.models.common.model_nms_utils import class_agnostic_nms
from paddle3d.sample import Sample, SampleMeta
from paddle3d.utils.logger import logger
from paddle3d.models.layers.param_init import uniform_init
@manager.MODELS.add_component
class VoxelRCNN(nn.Layer):
def __init__(self, num_class, voxelizer, voxel_encoder, middle_encoder,
backbone, neck, dense_head, roi_head, post_process_cfg):
super(VoxelRCNN, self).__init__()
self.num_class = num_class
self.voxelizer = voxelizer
self.voxel_encoder = voxel_encoder
self.middle_encoder = middle_encoder
self.backbone = backbone
self.neck = neck
self.dense_head = dense_head
self.roi_head = roi_head
self.post_process_cfg = post_process_cfg
self.init_weights()
def init_weights(self):
need_uniform_init_bn_weight_modules = [
self.middle_encoder, self.backbone, self.neck,
self.roi_head.shared_fc_layer, self.roi_head.cls_fc_layers,
self.roi_head.reg_fc_layers
]
for module in need_uniform_init_bn_weight_modules:
for layer in module.sublayers():
if 'BatchNorm' in layer.__class__.__name__:
uniform_init(layer.weight, 0, 1)
def voxelize(self, points):
voxels, coordinates, num_points_in_voxel = self.voxelizer(points)
return voxels, coordinates, num_points_in_voxel
def forward(self, batch_dict, **kwargs):
voxel_features, coordinates, voxel_num_points = self.voxelizer(
batch_dict['data'])
batch_dict["voxel_coords"] = coordinates
points_pad = []
if not getattr(self, "export_model", False):
for bs_idx, point in enumerate(batch_dict['data']):
point_dim = point.shape[-1]
point = point.reshape([1, -1, point_dim])
point_pad = F.pad(
point, [1, 0],
value=bs_idx,
mode='constant',
data_format="NCL")
point_pad = point_pad.reshape([-1, point_dim + 1])
points_pad.append(point_pad)
batch_dict['points'] = paddle.concat(points_pad, axis=0)
else:
point = batch_dict['data']
batch_dict['batch_size'] = 1
point = point.unsqueeze(1)
point_pad = F.pad(
point, [1, 0], value=0, mode='constant', data_format="NCL")
batch_dict['points'] = point_pad.squeeze(1)
voxel_features = self.voxel_encoder(voxel_features, voxel_num_points)
middle_out = self.middle_encoder(voxel_features,
batch_dict['voxel_coords'],
batch_dict['batch_size'])
batch_dict.update(middle_out)
backbone_out = self.backbone(middle_out['spatial_features'])
batch_dict['spatial_features_2d'] = self.neck(backbone_out)
batch_dict = self.dense_head(batch_dict)
batch_dict = self.roi_head(batch_dict)
if self.training:
loss = self.get_training_loss()
return loss
else:
pred_dicts = self.post_processing(batch_dict)
if not getattr(self, "export_model", False):
preds = self._parse_results_to_sample(pred_dicts, batch_dict)
return {'preds': preds}
else:
return pred_dicts[0]
def collate_fn(self, batch: List):
"""
"""
sample_merged = collections.defaultdict(list)
for sample in batch:
for k, v in sample.items():
sample_merged[k].append(v)
batch_size = len(sample_merged['meta'])
ret = {}
for key, elems in sample_merged.items():
if key in ["meta"]:
ret[key] = [elem.id for elem in elems]
elif key in ["path", "modality", "calibs"]:
ret[key] = elems
elif key == "data":
ret[key] = [elem for elem in elems]
elif key in ['gt_boxes']:
max_gt = max([len(x) for x in elems])
batch_gt_boxes3d = np.zeros(
(batch_size, max_gt, elems[0].shape[-1]), dtype=np.float32)
for k in range(batch_size):
batch_gt_boxes3d[k, :elems[k].__len__(), :] = elems[k]
ret[key] = batch_gt_boxes3d
ret['batch_size'] = batch_size
return ret
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_rcnn
return {"loss": loss}
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...]
multihead_label_mapping: [(num_class1), (num_class2), ...]
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
has_class_labels: True/False
roi_labels: (B, num_rois) 1 .. num_classes
batch_pred_labels: (B, num_boxes, 1)
Returns:
"""
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_box_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_box_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
src_box_preds = box_preds
if not isinstance(batch_dict['batch_cls_preds'], list):
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
src_cls_preds = cls_preds
assert cls_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
cls_preds = F.sigmoid(cls_preds)
else:
cls_preds = [
x[batch_mask] for x in batch_dict['batch_cls_preds']
]
src_cls_preds = cls_preds
if not batch_dict['cls_preds_normalized']:
cls_preds = [F.sigmoid(x) for x in cls_preds]
if self.post_process_cfg["nms_config"]["multi_classes_nms"]:
raise NotImplementedError
else:
label_preds = paddle.argmax(cls_preds, axis=-1)
cls_preds = paddle.max(cls_preds, axis=-1)
if self.dense_head.num_class > 1:
label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels'
label_preds = batch_dict[label_key][index]
else:
label_preds = label_preds + 1
final_scores, final_labels, final_boxes = class_agnostic_nms(
box_scores=cls_preds,
box_preds=box_preds,
label_preds=label_preds,
nms_config=self.post_process_cfg["nms_config"],
score_thresh=self.post_process_cfg["score_thresh"])
if not getattr(self, "export_model", False):
record_dict = {
'box3d_lidar': final_boxes,
'scores': final_scores,
'label_preds': final_labels
}
pred_dicts.append(record_dict)
else:
pred_dicts.append([final_boxes, final_scores, final_labels])
return pred_dicts
def _convert_origin_for_eval(self, sample: dict):
if sample.bboxes_3d.origin != [.5, .5, 0]:
sample.bboxes_3d[:, :3] += sample.bboxes_3d[:, 3:6] * (
np.array([.5, .5, 0]) - np.array(sample.bboxes_3d.origin))
sample.bboxes_3d.origin = [.5, .5, 0]
return sample
def _parse_results_to_sample(self, results: dict, sample: dict):
num_samples = len(results)
new_results = []
for i in range(num_samples):
data = Sample(sample["path"][i], sample["modality"][i])
bboxes_3d = results[i]["box3d_lidar"].numpy()
labels = results[i]["label_preds"].numpy() - 1
confidences = results[i]["scores"].numpy()
bboxes_3d[..., 3:5] = bboxes_3d[..., [4, 3]]
bboxes_3d[..., -1] = -(bboxes_3d[..., -1] + np.pi / 2.)
data.bboxes_3d = BBoxes3D(bboxes_3d)
data.bboxes_3d.coordmode = 'Lidar'
data.bboxes_3d.origin = [0.5, 0.5, 0.5]
data.bboxes_3d.rot_axis = 2
data.labels = labels
data.confidences = confidences
data.meta = SampleMeta(id=sample["meta"][i])
if "calibs" in sample:
data.calibs = [calib.numpy() for calib in sample["calibs"][i]]
data = self._convert_origin_for_eval(data)
new_results.append(data)
return new_results
def export(self, save_dir: str, **kwargs):
self.export_model = True
self.voxelizer.export_model = True
self.middle_encoder.export_model = True
save_path = os.path.join(save_dir, 'voxel_rcnn')
points_shape = [-1, self.voxel_encoder.in_channels]
input_spec = [{
"data":
InputSpec(shape=points_shape, name='data', dtype='float32')
}]
paddle.jit.to_static(self, input_spec=input_spec)
paddle.jit.save(self, save_path, input_spec=input_spec)
logger.info("Exported model is saved in {}".format(save_dir))
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/voxel_rcnn/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .voxel_rcnn import VoxelRCNN
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/dd3d/dd3d.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from pyquaternion import Quaternion
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.static import InputSpec
from paddle3d.apis import manager
from paddle3d.utils import checkpoint
from paddle3d.utils.logger import logger
from paddle3d.models.losses import unproject_points2d
from paddle3d.geometries import BBoxes3D, CoordMode
from paddle3d.sample import Sample, SampleMeta
@manager.MODELS.add_component
class DD3D(nn.Layer):
"""
"""
def __init__(self,
backbone,
feature_locations_offset,
fpn,
fcos2d_head,
fcos2d_loss,
fcos2d_inference,
fcos3d_head,
fcos3d_loss,
fcos3d_inference,
prepare_targets,
do_nms,
nusc_sample_aggregate,
num_classes,
pixel_mean,
pixel_std,
input_strides,
size_divisibility,
pretrained=None):
super().__init__()
self.backbone = backbone
self.feature_locations_offset = feature_locations_offset
self.fpn = fpn
self.fcos2d_head = fcos2d_head
self.fcos2d_loss = fcos2d_loss
self.fcos2d_inference = fcos2d_inference
self.only_box2d = True
self.fcos3d_head = fcos3d_head
if self.fcos3d_head is not None:
self.only_box2d = False
self.fcos3d_loss = fcos3d_loss
self.fcos3d_inference = fcos3d_inference
self.prepare_targets = prepare_targets
self.do_nms = do_nms
# nuScenes inference aggregates detections over all 6 cameras.
self.nusc_sample_aggregate_in_inference = nusc_sample_aggregate
self.num_classes = num_classes
self.register_buffer(
"pixel_mean",
paddle.to_tensor(pixel_mean).reshape([1, -1, 1, 1]))
self.register_buffer("pixel_std",
paddle.to_tensor(pixel_std).reshape([1, -1, 1, 1]))
self.input_strides = input_strides
self.size_divisibility = size_divisibility
self.pretrained = pretrained
self.init_weight()
def preprocess_image(self, x, size_divisibility):
x = (x.cast('float32') - self.pixel_mean) / self.pixel_std
h_old, w_old = x.shape[-2:]
h_new = (
(h_old +
(size_divisibility - 1)) // size_divisibility) * size_divisibility
w_new = (
(w_old +
(size_divisibility - 1)) // size_divisibility) * size_divisibility
x = F.pad(
x, [0, w_new - w_old, 0, h_new - h_old], value=0.0, mode='constant')
return x
def preprocess_box3d(self, box3d, intrinsic):
box_pose = box3d[:, :, 0:4]
tvec = box3d[:, :, 4:7]
proj_ctr = paddle.mm(
intrinsic.unsqueeze(1).tile([1, tvec.shape[1], 1, 1]),
tvec.unsqueeze(-1)).squeeze(-1)
proj_ctr = proj_ctr[:, :, :2] / proj_ctr[:, :, 2:]
box3d_new = paddle.concat([box3d[:, :, 0:4], proj_ctr, box3d[:, :, 6:]],
axis=2)
return box3d_new
def forward(self, samples):
images = self.preprocess_image(samples["data"], self.size_divisibility)
samples["bboxes_3d"] = self.preprocess_box3d(
samples["bboxes_3d"], samples['meta']['camera_intrinsic'])
features = self.backbone(images)
features = self.fpn(features)
features = [features[f] for f in features.keys()]
locations = self.compute_locations(features)
logits, box2d_reg, centerness, _ = self.fcos2d_head(features)
if not self.only_box2d:
box3d_quat, box3d_ctr, box3d_depth, box3d_size, box3d_conf, dense_depth = self.fcos3d_head(
features)
inv_intrinsics = samples['meta']['camera_intrinsic'].inverse()
if self.training:
feature_shapes = [x.shape[-2:] for x in features]
training_targets = self.prepare_targets(
locations, samples["bboxes_2d"], samples["bboxes_3d"],
samples["labels"], feature_shapes)
losses = {}
fcos2d_loss, fcos2d_info = self.fcos2d_loss(
logits, box2d_reg, centerness, training_targets)
losses.update(fcos2d_loss)
if not self.only_box2d:
fcos3d_loss = self.fcos3d_loss(
box3d_quat, box3d_ctr, box3d_depth, box3d_size, box3d_conf,
dense_depth, inv_intrinsics, fcos2d_info, training_targets)
losses.update(fcos3d_loss)
loss_total = 0
for key in losses.keys():
loss_total += losses[key]
losses.update({'loss': loss_total})
return losses
else:
pred_instances, fcos2d_info = self.fcos2d_inference(
logits, box2d_reg, centerness, locations)
if not self.only_box2d:
self.fcos3d_inference(box3d_quat, box3d_ctr, box3d_depth,
box3d_size, box3d_conf, inv_intrinsics,
pred_instances, fcos2d_info)
score_key = "scores_3d"
else:
score_key = "scores"
# Transpose to "image-first", i.e. (B, L)
pred_instances = list(zip(*pred_instances))
pred_instances_cat = []
for i, pred_instance in enumerate(pred_instances):
pred_instance_cat = {}
for key in pred_instance[0].keys():
sum_c = sum([
pred_instance[j][key].shape[0]
for j in range(len(pred_instance))
])
if sum_c == 0:
pred_instance_cat[key] = pred_instance[0][key]
else:
pred_instance_cat[key] = paddle.concat([
pred_instance[j][key]
for j in range(len(pred_instance))
], 0)
pred_instances_cat.append(pred_instance_cat)
# 2D NMS and pick top-K.
if self.do_nms:
pred_instances = self.fcos2d_inference.nms_and_top_k(
pred_instances_cat, score_key)
# print('pred_instances', pred_instances)
pred_dicts = self.post_process(pred_instances, samples)
# print('pred_dicts', pred_dicts)
return {'preds': pred_dicts}
def compute_locations(self, features):
locations = []
for level, feature in enumerate(features):
h, w = feature.shape[-2:]
locations_per_level = self.compute_features_locations(
h,
w,
self.input_strides[level],
feature.dtype,
offset=self.feature_locations_offset)
locations.append(locations_per_level)
return locations
def compute_features_locations(self,
h,
w,
stride,
dtype='float32',
offset="none"):
shifts_x = paddle.arange(0, w * stride, step=stride, dtype=dtype)
shifts_y = paddle.arange(0, h * stride, step=stride, dtype=dtype)
shift_y, shift_x = paddle.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape([-1])
shift_y = shift_y.reshape([-1])
locations = paddle.stack((shift_x, shift_y), axis=1)
if offset == "half":
locations += stride // 2
else:
assert offset == "none"
return locations
def resize_instances(self, pred_boxes, height, width, image_size):
image_size = [float(image_size[0]), float(image_size[1])]
scale_x, scale_y = (width.cast('float32') / image_size[1],
height.cast('float32') / image_size[0])
pred_boxes[:, 0::2] *= scale_x
pred_boxes[:, 1::2] *= scale_y
pred_boxes[:, 0::2] = pred_boxes[:, 0::2].clip(
min=0.0, max=image_size[1])
pred_boxes[:, 1::2] = pred_boxes[:, 1::2].clip(
min=0.0, max=image_size[0])
return pred_boxes
def post_process(self, pred_instances, samples):
pred_dicts = []
for i, results_per_image in enumerate(pred_instances):
if results_per_image['pred_boxes'].shape[0] == 0:
data = Sample(samples["path"][i], samples["modality"][i])
data.meta = SampleMeta(id=samples["meta"]['id'][i])
pred_dicts.append(data)
continue
height = samples['image_sizes'][i, 0]
width = samples['image_sizes'][i, 1]
bboxes_2d = self.resize_instances(results_per_image['pred_boxes'],
height, width,
samples["data"].shape[-2:])
bboxes_3d = []
alpha = []
for j in range(results_per_image['pred_boxes3d'].shape[0]):
bbox_3d, alpha_ = self.convert_3d_box_to_kitti(
results_per_image['pred_boxes3d'][j:j + 1, :],
samples['meta']['camera_intrinsic'][i:i + 1, ...].inverse())
bboxes_3d.append(bbox_3d)
alpha.append(alpha_)
data = Sample(samples["path"][i], samples["modality"][i])
data.meta = SampleMeta(id=samples["meta"]['id'][i])
data.calibs = samples["calibs"]
bboxes_3d = np.array(bboxes_3d)
labels = results_per_image['pred_classes'].numpy()
confidences = results_per_image['scores_3d'].numpy()
bboxes_2d = bboxes_2d.numpy()
data.bboxes_3d = BBoxes3D(bboxes_3d)
data.bboxes_3d.origin = [.5, 1., .5]
data.bboxes_3d.coordmode = CoordMode.KittiCamera
data.labels = labels
data.confidences = confidences
data.alpha = np.stack(alpha, 0)
data.bboxes_2d = bboxes_2d
pred_dicts.append(data)
return pred_dicts
def convert_3d_box_to_kitti(self, boxes3d, inv_intrinsics):
quat = Quaternion(*boxes3d[:, :4].tolist()[0])
ray = unproject_points2d(boxes3d[:, 4:6], inv_intrinsics)
tvec = (ray * boxes3d[:, 6:7]).numpy()[0]
sizes = boxes3d[:, 7:].numpy()[0]
tvec += np.array([0., sizes[2] / 2.0, 0])
inversion = Quaternion(axis=[1, 0, 0], radians=np.pi / 2).inverse
quat = inversion * quat
v_ = np.float64([[0, 0, 1], [0, 0, 0]])
if quat.axis[2] > 0:
v = self.pose(
wxyz=Quaternion(axis=[0, 1, 0], radians=-quat.angle),
tvec=tvec,
v_=v_)
rot_y = -quat.angle
else:
v = self.pose(
wxyz=Quaternion(axis=[0, 1, 0], radians=quat.angle),
tvec=tvec,
v_=v_)
rot_y = quat.angle
v_ = v[:, ::2]
theta = np.arctan2(abs(v_[1, 0]), abs(v_[1, 1]))
alpha = rot_y + theta if v_[1, 0] < 0 else rot_y - theta
# Bound from [-pi, pi]
if alpha > np.pi:
alpha -= 2.0 * np.pi
elif alpha < -np.pi:
alpha += 2.0 * np.pi
alpha = np.around(alpha, decimals=2) # KITTI precision
return tvec.tolist() + [sizes[1], sizes[2], sizes[0]] + [rot_y], alpha
def pose(self, wxyz, tvec, v_):
quat = Quaternion(wxyz)
matrix = quat.transformation_matrix
matrix[:3, 3] = tvec
X = np.hstack([v_, np.ones((len(v_), 1))]).T
return (np.dot(matrix, X).T)[:, :3]
def init_weight(self):
if self.pretrained:
checkpoint.load_pretrained_model(self, self.pretrained)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/dd3d/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .dd3d import *
from .prepare_targets import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/dd3d/prepare_targets.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle import nn
from paddle3d.apis import manager
INF = 100000000.
@manager.MODELS.add_component
class DD3DTargetPreparer(nn.Layer):
"""
This code is based on https://github.com/TRI-ML/dd3d/blob/main/tridet/modeling/dd3d/prepare_targets.py#L11
"""
def __init__(self,
input_strides,
num_classes=5,
center_sample=True,
radius=1.5,
dd3d_on=True,
sizes_of_interest=[64, 128, 256, 512]):
super(DD3DTargetPreparer, self).__init__()
self.num_classes = num_classes
self.center_sample = center_sample
self.strides = input_strides
self.radius = radius
self.dd3d_enabled = dd3d_on
# generate sizes of interest
soi = []
prev_size = -1
for s in sizes_of_interest:
soi.append([prev_size, s])
prev_size = s
soi.append([prev_size, INF])
self.sizes_of_interest = soi
def forward(self, locations, bboxes_2d, bboxes_3d, labels, feature_shapes):
# gt_instances
num_loc_list = [len(loc) for loc in locations]
# compute locations to size ranges
loc_to_size_range = []
for l, loc_per_level in enumerate(locations):
loc_to_size_range_per_level = paddle.to_tensor(
self.sizes_of_interest[l], dtype=loc_per_level.dtype)
loc_to_size_range.append(loc_to_size_range_per_level[None].expand(
[num_loc_list[l], -1]))
loc_to_size_range = paddle.concat(loc_to_size_range, axis=0)
locations = paddle.concat(locations, axis=0)
training_targets = self.compute_targets_for_locations(
locations, bboxes_2d, bboxes_3d, labels, loc_to_size_range,
num_loc_list)
training_targets["locations"] = [
locations.clone() for _ in range(bboxes_2d.shape[0])
]
training_targets["im_inds"] = [
paddle.ones([locations.shape[0]], dtype='int64') * i
for i in range(bboxes_2d.shape[0])
]
box2d = training_targets.pop("box2d", None)
# transpose im first training_targets to level first ones
training_targets = {
k: self._transpose(k, v, num_loc_list)
for k, v in training_targets.items() if k != "box2d"
}
training_targets["fpn_levels"] = [
paddle.ones([len(loc)], dtype='int64') * level
for level, loc in enumerate(training_targets["locations"])
]
# Flatten targets: (L x B x H x W, TARGET_SIZE)
labels = paddle.concat(
[x.reshape([-1]) for x in training_targets["labels"]], axis=0)
box2d_reg_targets = paddle.concat(
[x.reshape([-1, 4]) for x in training_targets["box2d_reg"]], axis=0)
target_inds = paddle.concat(
[x.reshape([-1]) for x in training_targets["target_inds"]], axis=0)
locations = paddle.concat(
[x.reshape([-1, 2]) for x in training_targets["locations"]], axis=0)
im_inds = paddle.concat(
[x.reshape([-1]) for x in training_targets["im_inds"]], axis=0)
fpn_levels = paddle.concat(
[x.reshape([-1]) for x in training_targets["fpn_levels"]], axis=0)
pos_inds = paddle.nonzero(labels != self.num_classes).squeeze(1)
targets = {
"labels": labels,
"box2d_reg_targets": box2d_reg_targets,
"locations": locations,
"target_inds": target_inds,
"im_inds": im_inds,
"fpn_levels": fpn_levels,
"pos_inds": pos_inds
}
if self.dd3d_enabled:
box3d_targets = paddle.concat(
[x.reshape([-1, 10]) for x in training_targets["box3d"]],
axis=0)
# box3d_targets = Boxes3D.cat(training_targets["box3d"])
targets.update({"box3d_targets": box3d_targets})
if box2d is not None:
# Original format is B x L x (H x W, 4)
# Need to be in L x (B, 4, H, W).
batched_box2d = []
for lvl, per_lvl_box2d in enumerate(zip(*box2d)):
# B x (H x W, 4)
h, w = feature_shapes[lvl]
batched_box2d_lvl = paddle.stack(
[x.T.reshape([4, h, w]) for x in per_lvl_box2d], axis=0)
batched_box2d.append(batched_box2d_lvl)
targets.update({"batched_box2d": batched_box2d})
return targets
def compute_targets_for_locations(self, locations, bboxes_2d, bboxes_3d,
labels_batch, size_ranges, num_loc_list):
# targets
labels = []
box2d_reg = []
if self.dd3d_enabled:
box3d = []
target_inds = []
xs, ys = locations[:, 0], locations[:, 1]
num_targets = 0
for im_i in range(bboxes_2d.shape[0]):
bboxes = bboxes_2d[im_i, ...]
labels_per_im = labels_batch[im_i, ...]
# no gt
if bboxes.numel() == 0:
labels.append(
paddle.zeros([locations.shape[0]]) + self.num_classes)
# reg_targets.append(paddle.zeros((locations.shape[0], 4)))
box2d_reg.append(paddle.zeros((locations.shape[0], 4)))
target_inds.append(paddle.zeros([locations.shape[0]]) - 1)
if self.dd3d_enabled:
box3d.append(paddle.zeros((locations.shape[0], 10)))
continue
area = (bboxes[:, 2] - bboxes[:, 0]) * (bboxes[:, 3] - bboxes[:, 1])
l = xs[:, None] - bboxes[:, 0][None]
t = ys[:, None] - bboxes[:, 1][None]
r = bboxes[:, 2][None] - xs[:, None]
b = bboxes[:, 3][None] - ys[:, None]
box2d_reg_per_im = paddle.stack([l, t, r, b], axis=2)
if self.center_sample:
is_in_boxes = self.get_sample_region(bboxes, num_loc_list, xs,
ys)
else:
is_in_boxes = box2d_reg_per_im.min(axis=2) > 0
max_reg_targets_per_im = box2d_reg_per_im.max(axis=2)
# limit the regression range for each location
is_cared_in_the_level = \
(max_reg_targets_per_im >= size_ranges[:, 0:1]) & \
(max_reg_targets_per_im <= size_ranges[:, 1:2])
locations_to_gt_area = area[None].tile([len(locations), 1])
locations_to_gt_area[is_in_boxes == 0] = INF
locations_to_gt_area[is_cared_in_the_level == 0] = INF
# if there are still more than one objects for a location,
# we choose the one with minimal area
locations_to_min_area = locations_to_gt_area.min(axis=1)
locations_to_gt_inds = locations_to_gt_area.argmin(axis=1)
indes = paddle.stack(
[paddle.arange(len(locations)), locations_to_gt_inds], 1)
box2d_reg_per_im = paddle.gather_nd(box2d_reg_per_im, indes)
# box2d_reg_per_im = box2d_reg_per_im[range(len(locations)), locations_to_gt_inds]
target_inds_per_im = locations_to_gt_inds + num_targets
num_targets += bboxes_2d.shape[0]
labels_per_im = labels_per_im[locations_to_gt_inds]
labels_per_im[locations_to_min_area == INF] = self.num_classes
labels.append(labels_per_im)
box2d_reg.append(box2d_reg_per_im)
target_inds.append(target_inds_per_im)
if self.dd3d_enabled:
# 3D box targets
box3d_per_im = bboxes_3d[im_i, ...][locations_to_gt_inds]
box3d.append(box3d_per_im)
ret = {
"labels": labels,
"box2d_reg": box2d_reg,
"target_inds": target_inds
}
if self.dd3d_enabled:
ret.update({"box3d": box3d})
return ret
def get_sample_region(self, boxes, num_loc_list, loc_xs, loc_ys):
center_x = boxes[:, 0::2].sum(axis=-1) * 0.5
center_y = boxes[:, 1::2].sum(axis=-1) * 0.5
num_gts = boxes.shape[0]
K = len(loc_xs)
boxes = boxes[None].expand([K, num_gts, 4])
center_x = center_x[None].expand([K, num_gts])
center_y = center_y[None].expand([K, num_gts])
center_gt = paddle.zeros(boxes.shape)
# no gt
if center_x.numel() == 0 or center_x[..., 0].sum() == 0:
return paddle.zeros(loc_xs.shape).cast('bool')
beg = 0
for level, num_loc in enumerate(num_loc_list):
end = beg + num_loc
stride = self.strides[level] * self.radius
xmin = center_x[beg:end] - stride
ymin = center_y[beg:end] - stride
xmax = center_x[beg:end] + stride
ymax = center_y[beg:end] + stride
# limit sample region in gt
center_gt[beg:end, :, 0] = paddle.where(xmin > boxes[beg:end, :, 0],
xmin, boxes[beg:end, :, 0])
center_gt[beg:end, :, 1] = paddle.where(ymin > boxes[beg:end, :, 1],
ymin, boxes[beg:end, :, 1])
center_gt[beg:end, :, 2] = paddle.where(xmax > boxes[beg:end, :, 2],
boxes[beg:end, :, 2], xmax)
center_gt[beg:end, :, 3] = paddle.where(ymax > boxes[beg:end, :, 3],
boxes[beg:end, :, 3], ymax)
beg = end
left = loc_xs[:, None] - center_gt[..., 0]
right = center_gt[..., 2] - loc_xs[:, None]
top = loc_ys[:, None] - center_gt[..., 1]
bottom = center_gt[..., 3] - loc_ys[:, None]
center_bbox = paddle.stack((left, top, right, bottom), -1)
inside_gt_bbox_mask = center_bbox.min(-1) > 0
return inside_gt_bbox_mask
def _transpose(self, k, training_targets, num_loc_list):
'''
This function is used to transpose image first training targets to level first ones
:return: level first training targets
'''
if k == "box3d":
for im_i in range(len(training_targets)):
# training_targets[im_i] = paddle.split(training_targets[im_i], num_loc_list, axis=0)
training_targets[im_i] = training_targets[im_i].split(
num_loc_list, axis=0)
targets_level_first = []
for targets_per_level in zip(*training_targets):
targets_level_first.append(
paddle.concat(targets_per_level, axis=0))
return targets_level_first
for im_i in range(len(training_targets)):
training_targets[im_i] = paddle.split(
training_targets[im_i], num_loc_list, axis=0)
targets_level_first = []
for targets_per_level in zip(*training_targets):
targets_level_first.append(paddle.concat(targets_per_level, axis=0))
return targets_level_first
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/base/base_multiview_detection.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
import paddle
import paddle.nn as nn
from paddle3d.models.base import BaseDetectionModel
class BaseMultiViewModel(BaseDetectionModel):
def __init__(self,
box_with_velocity: bool = False,
num_cameras: int = 6,
need_timestamp: bool = False,
image_height: Optional[int] = -1,
image_width: Optional[int] = -1):
super().__init__(box_with_velocity=box_with_velocity)
self.num_cameras = num_cameras
self.image_height = image_height
self.image_width = image_width
self.need_timestamp = need_timestamp
@property
def inputs(self) -> List[dict]:
images = {
'name': 'images',
'dtype': 'float32',
'shape':
[1, self.num_cameras, 3, self.image_height, self.image_width]
}
res = [images]
img2lidars = {
'name': 'img2lidars',
'dtype': 'float32',
'shape': [1, self.num_cameras, 4, 4]
}
res.append(img2lidars)
if self.need_timestamp:
timestamps = {
'name': 'timestamps',
'dtype': 'float32',
'shape': [1, self.num_cameras]
}
res.append(timestamps)
return res
@property
def sensor(self) -> str:
return "camera"
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/base/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base_model import add_export_args, Base3DModel
from .base_detection import BaseDetectionModel
from .base_lidar_detection import BaseLidarModel
from .base_mono_detection import BaseMonoModel
from .base_multiview_detection import BaseMultiViewModel
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/base/base_model.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import contextlib
import os
from typing import List, Optional
import paddle
import paddle.nn as nn
from paddle3d.slim.quant import QAT
from paddle3d.utils.logger import logger
def add_export_args(*args, **kwargs):
def _wrapper(func):
if not hasattr(func, 'arg_dict'):
func.arg_dict = {}
key = args[0]
if not key.startswith('--'):
key = '--{}'.format(key)
func.arg_dict[key] = kwargs.copy()
return func
return _wrapper
class Base3DModel(abc.ABC, nn.Layer):
def __init__(self):
super().__init__()
self.in_export_mode = False
self._quant = False
@property
def input_spec(self) -> paddle.static.InputSpec:
"""Input Tensor specifier when exporting the model."""
data = {
_input['name']: paddle.static.InputSpec(**_input)
for _input in self.inputs
}
return [data]
@abc.abstractproperty
def inputs(self) -> List[dict]:
"""Model input description. This attribute will be used to construct input_spec."""
@abc.abstractproperty
def outputs(self) -> List[dict]:
"""Model output description."""
def forward(self, samples, *args, **kwargs):
if self.in_export_mode:
return self.export_forward(samples, *args, **kwargs)
elif self.training:
return self.train_forward(samples, *args, **kwargs)
return self.test_forward(samples, *args, **kwargs)
@abc.abstractproperty
def sensor(self) -> str:
"""The sensor type used in the model sample, usually camera or lidar."""
def set_export_mode(self, mode: bool = True):
for sublayer in self.sublayers(include_self=True):
sublayer.in_export_mode = mode
@abc.abstractmethod
def test_forward(self):
"""Test forward function."""
@abc.abstractmethod
def train_forward(self):
"""Training forward function."""
@abc.abstractmethod
def export_forward(self):
"""Export forward function."""
@contextlib.contextmanager
def export_guard(self):
self.set_export_mode(True)
yield
self.set_export_mode(False)
@property
def save_name(self):
return self.__class__.__name__.lower()
@property
def apollo_deploy_name(self):
return self.__class__.__name__
@property
def is_quant_model(self) -> bool:
return self._quant
def build_slim_model(self, slim_config: str):
""" Slim the model and update the cfg params
"""
self._quant = True
logger.info("Build QAT model.")
self.qat = QAT(quant_config=slim_config)
# slim the model
self.qat(self)
def export(self, save_dir: str, name: Optional[str] = None, **kwargs):
name = name or self.save_name
with self.export_guard():
paddle.jit.to_static(self, input_spec=self.input_spec)
path = os.path.join(save_dir, name)
if self.is_quant_model:
self.qat.save_quantized_model(
self, path, input_spec=[self.input_spec], **kwargs)
else:
paddle.jit.save(self, path, input_spec=[self.input_spec])
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/base/base_lidar_detection.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import paddle
import paddle.nn as nn
from paddle3d.geometries import CoordMode
from paddle3d.models.base.base_detection import BaseDetectionModel
class BaseLidarModel(BaseDetectionModel):
def __init__(self,
box_with_velocity: bool = False,
with_voxelizer: bool = False,
max_num_points_in_voxel: int = -1,
in_channels: int = None):
super().__init__(box_with_velocity=box_with_velocity)
self.with_voxelizer = with_voxelizer
self.max_num_points_in_voxel = max_num_points_in_voxel
self.in_channels = in_channels
self.point_dim = -1
@property
def inputs(self) -> List[dict]:
if self.with_voxelizer:
points = {
'name': 'data',
'dtype': 'float32',
'shape': [-1, self.point_dim]
}
res = [points]
else:
voxels = {
'name': 'voxels',
'dtype': 'float32',
'shape': [-1, self.max_num_points_in_voxel, self.in_channels]
}
coords = {'name': 'coords', 'dtype': 'int32', 'shape': [-1, 3]}
num_points_per_voxel = {
'name': 'num_points_per_voxel',
'dtype': 'int32',
'shape': [-1]
}
res = [voxels, coords, num_points_per_voxel]
return res
@property
def sensor(self) -> str:
return "lidar"
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/base/base_mono_detection.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
import paddle
import paddle.nn as nn
from paddle3d.models.base import BaseDetectionModel
class BaseMonoModel(BaseDetectionModel):
def __init__(self,
box_with_velocity: bool = False,
need_camera_to_image: bool = True,
need_lidar_to_camera: bool = False,
need_down_ratios: bool = False,
image_height: Optional[int] = -1,
image_width: Optional[int] = -1):
super().__init__(box_with_velocity=box_with_velocity)
self.need_camera_to_image = need_camera_to_image
self.need_lidar_to_camera = need_lidar_to_camera
self.image_height = image_height
self.image_width = image_width
self.need_down_ratios = need_down_ratios
@property
def inputs(self) -> List[dict]:
images = {
'name': 'images',
'dtype': 'float32',
'shape': [1, 3, self.image_height, self.image_width]
}
res = [images]
if self.need_camera_to_image:
intrinsics = {
'name': 'trans_cam_to_img',
'dtype': 'float32',
'shape': [1, 3, 4]
}
res.append(intrinsics)
if self.need_lidar_to_camera:
poses = {
'name': 'trans_lidar_to_cam',
'dtype': 'float32',
'shape': [1, 4, 4]
}
res.append(poses)
return res
@property
def sensor(self) -> str:
return "camera"
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/base/base_detection.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle3d.models.base import Base3DModel
from typing import List
class BaseDetectionModel(Base3DModel):
def __init__(self, box_with_velocity: bool = False):
super().__init__()
self.box_with_velocity = box_with_velocity
@property
def outputs(self) -> List[dict]:
"""Model output description."""
boxdim = 7 if not self.box_with_velocity else 9
box3ds = {'name': 'box3d', 'dtype': 'float32', 'shape': [-1, boxdim]}
labels = {'name': 'label', 'dtype': 'int32', 'shape': [-1]}
confidences = {'name': 'confidence', 'dtype': 'float32', 'shape': [-1]}
return [box3ds, labels, confidences]
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/middle_encoders/sparsenet.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/models/backbones_3d/spconv_backbone.py#L69
Ths copyright of OpenPCDet is as follows:
Apache-2.0 license [see LICENSE for details].
"""
import numpy as np
import paddle
from paddle import sparse
from paddle.sparse import nn
from paddle3d.apis import manager
from paddle3d.models.layers import param_init
__all__ = ['SparseNet3D']
def sparse_conv_bn_relu(in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
conv_type='subm'):
if conv_type == 'subm':
conv = nn.SubmConv3D(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
padding=0,
bias_attr=False)
elif conv_type == 'spconv':
conv = nn.Conv3D(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias_attr=False)
elif conv_type == 'inverseconv':
raise NotImplementedError
else:
raise NotImplementedError
m = paddle.nn.Sequential(
conv,
nn.BatchNorm(out_channels, epsilon=1e-3, momentum=1 - 0.01),
nn.ReLU(),
)
return m
@manager.MIDDLE_ENCODERS.add_component
class SparseNet3D(paddle.nn.Layer):
def __init__(self,
in_channels=128,
voxel_size=(0.2, 0.2, 4),
point_cloud_range=(0, -40, -3, 70.4, 40, 1)):
super(SparseNet3D, self).__init__()
self.conv_input = paddle.nn.Sequential(
nn.SubmConv3D(in_channels, 16, 3, padding=1, bias_attr=False),
nn.BatchNorm(16, epsilon=1e-3, momentum=1 - 0.01), nn.ReLU())
self.conv1 = paddle.nn.Sequential(
sparse_conv_bn_relu(16, 16, 3, padding=1), )
self.conv2 = paddle.nn.Sequential(
sparse_conv_bn_relu(
16, 32, 3, stride=2, padding=1, conv_type='spconv'),
sparse_conv_bn_relu(32, 32, 3, padding=1),
sparse_conv_bn_relu(32, 32, 3, padding=1))
self.conv3 = paddle.nn.Sequential(
sparse_conv_bn_relu(
32, 64, 3, stride=2, padding=1, conv_type='spconv'),
sparse_conv_bn_relu(64, 64, 3, padding=1),
sparse_conv_bn_relu(64, 64, 3, padding=1))
self.conv4 = paddle.nn.Sequential(
sparse_conv_bn_relu(
64, 64, 3, stride=2, padding=(0, 1, 1), conv_type='spconv'),
sparse_conv_bn_relu(64, 64, 3, padding=1),
sparse_conv_bn_relu(64, 64, 3, padding=1),
)
last_pad = 0
self.extra_conv = paddle.nn.Sequential(
nn.Conv3D(
64,
128, (3, 1, 1),
stride=(2, 1, 1),
padding=last_pad,
bias_attr=False), # [200, 150, 5] -> [200, 150, 2]
nn.BatchNorm(128, epsilon=1e-3, momentum=1 - 0.01),
nn.ReLU(),
)
point_cloud_range = np.array(point_cloud_range, dtype=np.float32)
voxel_size = np.array(voxel_size, dtype=np.float32)
grid_size = (point_cloud_range[3:] - point_cloud_range[:3]) / voxel_size
grid_size = np.round(grid_size).astype(np.int64)
self.sparse_shape = np.array(grid_size[::-1]) + [1, 0, 0]
self.in_channels = in_channels
self.num_point_features = 128
self.backbone_channels = {
'x_conv1': 16,
'x_conv2': 32,
'x_conv3': 64,
'x_conv4': 64
}
self.init_weight()
def init_weight(self):
for layer in self.sublayers():
if isinstance(layer, (nn.Conv3D, nn.SubmConv3D)):
param_init.reset_parameters(layer)
if isinstance(layer, nn.BatchNorm):
param_init.constant_init(layer.weight, value=1)
param_init.constant_init(layer.bias, value=0)
def forward(self, voxel_features, coors, batch_size):
shape = [batch_size] + list(self.sparse_shape) + [self.in_channels]
sp_x = sparse.sparse_coo_tensor(
coors.transpose((1, 0)),
voxel_features,
shape=shape,
stop_gradient=False)
x = self.conv_input(sp_x)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
out = self.extra_conv(x_conv4)
out = out.to_dense()
out = paddle.transpose(out, perm=[0, 4, 1, 2, 3])
N, C, D, H, W = out.shape
out = paddle.reshape(out, shape=[N, C * D, H, W])
batch_dict = {}
batch_dict.update({
'spatial_features': out,
'spatial_features_stride': 8
})
batch_dict.update({
'multi_scale_3d_features': {
'x_conv1': x_conv1,
'x_conv2': x_conv2,
'x_conv3': x_conv3,
'x_conv4': x_conv4,
}
})
batch_dict.update({
'multi_scale_3d_strides': {
'x_conv1': 1,
'x_conv2': 2,
'x_conv3': 4,
'x_conv4': 8,
}
})
return batch_dict
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/middle_encoders/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import pillar_scatter
from .pillar_scatter import PointPillarsScatter
from .sparse_resnet import SparseResNet3D
from .sparsenet import SparseNet3D
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/middle_encoders/sparse_resnet.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/tianweiy/CenterPoint/blob/master/det3d/models/backbones/scn.py
Ths copyright of tianweiy/CenterPoint is as follows:
MIT License [see LICENSE for details].
"""
import numpy as np
import paddle
from paddle import sparse
from paddle.sparse import nn
from paddle3d.apis import manager
from paddle3d.models.layers import param_init
__all__ = ['SparseResNet3D']
def conv3x3(in_out_channels, out_out_channels, stride=1, bias_attr=True):
"""3x3 convolution with padding"""
return nn.SubmConv3D(
in_out_channels,
out_out_channels,
kernel_size=3,
stride=stride,
padding=1,
bias_attr=bias_attr)
def conv1x1(in_out_channels, out_out_channels, stride=1, bias_attr=True):
"""1x1 convolution"""
return nn.SubmConv3D(
in_out_channels,
out_out_channels,
kernel_size=1,
stride=stride,
padding=1,
bias_attr=bias_attr)
class SparseBasicBlock(paddle.nn.Layer):
expansion = 1
def __init__(
self,
in_channels,
out_channels,
stride=1,
downsample=None,
):
super(SparseBasicBlock, self).__init__()
bias_attr = True
self.conv1 = conv3x3(
in_channels, out_channels, stride, bias_attr=bias_attr)
self.bn1 = nn.BatchNorm(out_channels, epsilon=1e-3, momentum=0.01)
self.relu = nn.ReLU()
self.conv2 = conv3x3(out_channels, out_channels, bias_attr=bias_attr)
self.bn2 = nn.BatchNorm(out_channels, epsilon=1e-3, momentum=0.01)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = sparse.add(out, identity)
out = self.relu(out)
return out
@manager.MIDDLE_ENCODERS.add_component
class SparseResNet3D(paddle.nn.Layer):
def __init__(self,
in_channels=128,
voxel_size=(0.2, 0.2, 4),
point_cloud_range=(0, -40, -3, 70.4, 40, 1)):
super(SparseResNet3D, self).__init__()
self.zero_init_residual = False
# input: # [1600, 1200, 41]
self.conv_input = paddle.nn.Sequential(
nn.SubmConv3D(in_channels, 16, 3, bias_attr=False),
nn.BatchNorm(16, epsilon=1e-3, momentum=0.01), nn.ReLU())
self.conv1 = paddle.nn.Sequential(
SparseBasicBlock(16, 16),
SparseBasicBlock(16, 16),
)
self.conv2 = paddle.nn.Sequential(
nn.Conv3D(16, 32, 3, 2, padding=1,
bias_attr=False), # [1600, 1200, 41] -> [800, 600, 21]
nn.BatchNorm(32, epsilon=1e-3, momentum=0.01),
nn.ReLU(),
SparseBasicBlock(32, 32),
SparseBasicBlock(32, 32),
)
self.conv3 = paddle.nn.Sequential(
nn.Conv3D(32, 64, 3, 2, padding=1,
bias_attr=False), # [800, 600, 21] -> [400, 300, 11]
nn.BatchNorm(64, epsilon=1e-3, momentum=0.01),
nn.ReLU(),
SparseBasicBlock(64, 64),
SparseBasicBlock(64, 64),
)
self.conv4 = paddle.nn.Sequential(
nn.Conv3D(64, 128, 3, 2, padding=[0, 1, 1],
bias_attr=False), # [400, 300, 11] -> [200, 150, 5]
nn.BatchNorm(128, epsilon=1e-3, momentum=0.01),
nn.ReLU(),
SparseBasicBlock(128, 128),
SparseBasicBlock(128, 128),
)
self.extra_conv = paddle.nn.Sequential(
nn.Conv3D(128, 128, (3, 1, 1), (2, 1, 1),
bias_attr=False), # [200, 150, 5] -> [200, 150, 2]
nn.BatchNorm(128, epsilon=1e-3, momentum=0.01),
nn.ReLU(),
)
point_cloud_range = np.array(point_cloud_range, dtype=np.float32)
voxel_size = np.array(voxel_size, dtype=np.float32)
grid_size = (point_cloud_range[3:] - point_cloud_range[:3]) / voxel_size
grid_size = np.round(grid_size).astype(np.int64)
self.sparse_shape = np.array(grid_size[::-1]) + [1, 0, 0]
self.in_channels = in_channels
self.init_weight()
def init_weight(self):
for layer in self.sublayers():
if isinstance(layer, (nn.Conv3D, nn.SubmConv3D)):
param_init.reset_parameters(layer)
if isinstance(layer, nn.BatchNorm):
param_init.constant_init(layer.weight, value=1)
param_init.constant_init(layer.bias, value=0)
def forward(self, voxel_features, coors, batch_size):
shape = [batch_size] + list(self.sparse_shape) + [self.in_channels]
sp_x = sparse.sparse_coo_tensor(
coors.transpose((1, 0)),
voxel_features,
shape=shape,
stop_gradient=False)
x = self.conv_input(sp_x)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
out = self.extra_conv(x_conv4)
out = out.to_dense()
out = paddle.transpose(out, perm=[0, 4, 1, 2, 3])
N, C, D, H, W = out.shape
out = paddle.reshape(out, shape=[N, C * D, H, W])
return out
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/middle_encoders/pillar_scatter.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/tianweiy/CenterPoint/blob/master/det3d/models/readers/pillar_encoder.py
Ths copyright of tianweiy/CenterPoint is as follows:
MIT License [see LICENSE for details].
https://github.com/tianweiy/CenterPoint/blob/master/det3d/models/readers/pillar_encoder.py fork from SECOND.
Code written by Alex Lang and Oscar Beijbom, 2018.
Licensed under MIT License [see LICENSE].
"""
import numpy as np
import paddle
import paddle.nn as nn
from paddle3d.apis import manager
__all__ = ['PointPillarsScatter']
@manager.MIDDLE_ENCODERS.add_component
class PointPillarsScatter(nn.Layer):
"""Point Pillar's Scatter.
Converts learned features from dense tensor to sparse pseudo image.
Args:
in_channels (int): Channels of input features.
"""
def __init__(self, in_channels, voxel_size, point_cloud_range):
super().__init__()
self.in_channels = in_channels
point_cloud_range = np.array(point_cloud_range, dtype=np.float32)
voxel_size = np.array(voxel_size, dtype=np.float32)
grid_size = (point_cloud_range[3:] - point_cloud_range[:3]) / voxel_size
grid_size = np.round(grid_size).astype(np.int64)
self.nx = int(grid_size[0])
self.ny = int(grid_size[1])
def forward(self, voxel_features, coords, batch_size):
"""Foraward function to scatter features."""
return self.forward_batch(voxel_features, coords, batch_size)
def forward_batch(self, voxel_features, coords, batch_size):
"""Scatter features of single sample.
Args:
voxel_features (paddle.Tensor): Voxel features in shape (N, M, C).
coords (paddle.Tensor): Coordinates of each voxel in shape (N, 4).
The first column indicates the sample ID.
batch_size (int): Number of samples in the current batch.
"""
if not getattr(self, "in_export_mode", False):
# batch_canvas will be the final output.
batch_canvas = []
for batch_itt in range(batch_size):
# Create the canvas for this sample
canvas = paddle.zeros([self.nx * self.ny, self.in_channels],
dtype=voxel_features.dtype)
# Only include non-empty pillars
batch_mask = coords[:, 0] == batch_itt
this_coords = coords[batch_mask]
indices = this_coords[:, 2] * self.nx + this_coords[:, 3]
indices = indices.astype('int32')
voxels = voxel_features[batch_mask]
# Now scatter the blob back to the canvas.
canvas = paddle.scatter(canvas, indices, voxels, overwrite=True)
canvas = canvas.transpose([1, 0])
# Append to a list for later stacking.
batch_canvas.append(canvas)
# Stack to 3-dim tensor (batch-size, in_channels, nrows*ncols)
batch_canvas = paddle.concat(batch_canvas, 0)
# Undo the column stacking to final 4-dim tensor
batch_canvas = batch_canvas.reshape(
[batch_size, self.in_channels, self.ny, self.nx])
return batch_canvas
else:
canvas = paddle.zeros([self.nx * self.ny, self.in_channels],
dtype=voxel_features.dtype)
# Only include non-empty pillars
indices = coords[:, 2] * self.nx + coords[:, 3]
indices = indices.astype('int32')
canvas = paddle.scatter(
canvas, indices, voxel_features, overwrite=True)
canvas = canvas.transpose([1, 0])
canvas = canvas.reshape([1, self.in_channels, self.ny, self.nx])
return canvas
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/transforms/normalize.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import numpy as np
from paddle3d.apis import manager
from paddle3d.sample import Sample
from paddle3d.transforms import functional as F
from paddle3d.transforms.base import TransformABC
__all__ = ["Normalize", "NormalizeRangeImage"]
@manager.TRANSFORMS.add_component
class Normalize(TransformABC):
"""
"""
def __init__(self, mean: Tuple[float, float, float],
std: Tuple[float, float, float]):
self.mean = mean
self.std = std
if not (isinstance(self.mean, (list, tuple))
and isinstance(self.std, (list, tuple))):
raise ValueError(
"{}: input type is invalid. It should be list or tuple".format(
self))
from functools import reduce
if reduce(lambda x, y: x * y, self.std) == 0:
raise ValueError('{}: std is invalid!'.format(self))
def __call__(self, sample: Sample):
"""
"""
mean = np.array(self.mean)[:, np.newaxis, np.newaxis]
std = np.array(self.std)[:, np.newaxis, np.newaxis]
if sample.modality == 'image':
sample.data = sample.data.astype(np.float32, copy=False) / 255.0
if sample.meta.channel_order != 'chw':
mean = np.array(self.mean)
std = np.array(self.std)
sample.data = F.normalize(sample.data, mean, std)
return sample
@manager.TRANSFORMS.add_component
class NormalizeRangeImage(TransformABC):
"""
Normalize range image.
Args:
mean (list or tuple): Mean of range image.
std (list or tuple): Standard deviation of range image.
"""
def __init__(self, mean: Tuple[float, float, float],
std: Tuple[float, float, float]):
if not (isinstance(mean,
(list, tuple)) and isinstance(std, (list, tuple))):
raise ValueError(
"{}: input type is invalid. It should be list or tuple".format(
self))
from functools import reduce
if reduce(lambda x, y: x * y, std) == 0:
raise ValueError('{}: std is invalid!'.format(self))
self.mean = np.array(mean)[:, None, None]
self.std = np.array(std)[:, None, None]
def __call__(self, sample: Sample):
"""
"""
sample.data = F.normalize(sample.data, self.mean, self.std)
return sample
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/transforms/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle3d.transforms.anchor_generator import *
from paddle3d.transforms.base import *
from paddle3d.transforms.functional import *
from paddle3d.transforms.normalize import *
from paddle3d.transforms.reader import *
from paddle3d.transforms.sampling import *
from paddle3d.transforms.samplingV2 import *
from paddle3d.transforms.target_generator import *
from paddle3d.transforms.transform import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/transforms/samplingV2.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ["SamplingDatabaseV2"]
import os
import os.path as osp
import pickle
from collections import defaultdict
from typing import Dict, List
import numpy as np
from paddle3d.apis import manager
from paddle3d.datasets.apollo import apollo_utils
from paddle3d.geometries.bbox import BBoxes3D, box_collision_test
from paddle3d.geometries.pointcloud import PointCloud
from paddle3d.sample import Sample
from paddle3d.transforms.base import TransformABC
from paddle3d.utils.logger import logger
@manager.TRANSFORMS.add_component
class SamplingDatabaseV2(TransformABC):
"""
Sample objects from ground truth database and paste on current scene.
Args:
min_num_points_in_box_per_class (Dict[str, int]): Minimum number of points in sampled object for each class.
max_num_samples_per_class (Dict[str, int]): Maximum number of objects sampled from each class.
database_anno_path (List[str]): Path to database annotation file (.pkl).
database_root (str): Path to database root directory.
class_names (List[str]): List of class names.
ignored_difficulty (List[int]): List of difficulty levels to be ignored.
"""
def __init__(self,
min_num_points_in_box_per_class: Dict[str, int],
max_num_samples_per_class: Dict[str, int],
database_anno_list: List[str],
database_root: str,
class_names: List[str],
ignored_difficulty: List[int] = None,
ignored_image_idx: int = 0):
self.min_num_points_in_box_per_class = min_num_points_in_box_per_class
self.max_num_samples_per_class = max_num_samples_per_class
self.ignored_image_idx = ignored_image_idx
exist_database_anno = {}
for database_anno_path in database_anno_list:
pkl_path = os.path.join(database_root, database_anno_path,
'apollo_train_gt_database', 'anno_info_train.pkl')
with open(pkl_path, "rb") as f:
current_database_anno = pickle.load(f)
# merge the database from different datasets
# if datasets_1.class_1.lower() == datasets_2.class_2.lower(), merge them
exist_classes = [exist_class.lower() for exist_class in exist_database_anno.keys()]
for current_class, current_value in current_database_anno.items():
if current_class.lower() in exist_classes:
exist_database_anno[current_class].extend(current_value)
else:
exist_database_anno[current_class] = current_value
# map and filter the class
self.database_anno = {}
for class_name in class_names:
self.database_anno[class_name] = []
for key, value in exist_database_anno.items():
map_class_name = apollo_utils.map_class(key)
if map_class_name in class_names:
self.database_anno[map_class_name].extend(value)
if not osp.exists(database_root):
raise ValueError(
f"Database root path {database_root} does not exist!!!")
self.database_root = database_root
self.class_names = class_names
self.database_anno = self._filter_min_num_points_in_box(self.database_anno)
self.ignored_difficulty = ignored_difficulty
if ignored_difficulty is not None:
self.database_anno = self._filter_ignored_difficulty(
self.database_anno)
if ignored_image_idx != 0:
self.database_anno = self._filter_ignored_image_idx(
self.database_anno)
self.sampler_per_class = dict()
for cls_name, annos in self.database_anno.items():
self.sampler_per_class[cls_name] = Sampler(cls_name, annos)
def _filter_min_num_points_in_box(self, database_anno: Dict[str, list]):
new_database_anno = defaultdict(list)
for cls_name, annos in database_anno.items():
if cls_name not in self.class_names or cls_name not in self.min_num_points_in_box_per_class:
continue
logger.info("Load {} {} database infos".format(
len(annos), cls_name))
for anno in annos:
if anno["num_points_in_box"] >= self.min_num_points_in_box_per_class[
cls_name]:
new_database_anno[cls_name].append(anno)
logger.info("After filtering min_num_points_in_box:")
for cls_name, annos in new_database_anno.items():
logger.info("Load {} {} database infos".format(
len(annos), cls_name))
return new_database_anno
def _filter_ignored_difficulty(self, database_anno: Dict[str, list]):
new_database_anno = defaultdict(list)
for cls_name, annos in database_anno.items():
if cls_name not in self.class_names or cls_name not in self.min_num_points_in_box_per_class:
continue
for anno in annos:
if anno["difficulty"] not in self.ignored_difficulty:
new_database_anno[cls_name].append(anno)
logger.info("After filtering ignored difficulty:")
for cls_name, annos in new_database_anno.items():
logger.info("Load {} {} database infos".format(
len(annos), cls_name))
return new_database_anno
def _filter_ignored_image_idx(self, database_anno: Dict[str, list]):
new_database_anno = defaultdict(list)
for cls_name, annos in database_anno.items():
if cls_name not in self.class_names or cls_name not in self.min_num_points_in_box_per_class:
continue
for anno in annos:
if int(anno["data_idx"]) <= self.ignored_image_idx:
new_database_anno[cls_name].append(anno)
logger.info("After filtering ignored image idx:")
for cls_name, annos in new_database_anno.items():
logger.info("Load {} {} database infos".format(
len(annos), cls_name))
return new_database_anno
def _convert_box_format(self, bboxes_3d):
bboxes_3d[:, 2] += bboxes_3d[:, 5] / 2
bboxes_3d[:, 3:6] = bboxes_3d[:, [4, 3, 5]]
bboxes_3d[:, 6] = -(bboxes_3d[:, 6] + np.pi / 2)
return bboxes_3d
def _convert_box_format_back(self, bboxes_3d):
bboxes_3d[:, 2] -= bboxes_3d[:, 5] / 2
bboxes_3d[:, 3:6] = bboxes_3d[:, [4, 3, 5]]
bboxes_3d[:, 6] = -(bboxes_3d[:, 6] + np.pi / 2)
return bboxes_3d
def _lidar_to_rect(self, pts_lidar, R0, V2C):
pts_lidar_hom = self._cart_to_hom(pts_lidar)
pts_rect = np.dot(pts_lidar_hom, np.dot(V2C.T, R0.T))
return pts_rect
def _rect_to_lidar(self, pts_rect, R0, V2C):
pts_rect_hom = self._cart_to_hom(pts_rect) # (N, 4)
R0_ext = np.hstack((R0, np.zeros((3, 1), dtype=np.float32))) # (3, 4)
R0_ext = np.vstack((R0_ext, np.zeros((1, 4),
dtype=np.float32))) # (4, 4)
R0_ext[3, 3] = 1
V2C_ext = np.vstack((V2C, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
V2C_ext[3, 3] = 1
pts_lidar = np.dot(pts_rect_hom, np.linalg.inv(
np.dot(R0_ext, V2C_ext).T))
return pts_lidar[:, 0:3]
def _cart_to_hom(self, pts):
pts_hom = np.hstack((pts, np.ones((pts.shape[0], 1), dtype=np.float32)))
return pts_hom
def _put_boxes_on_road_planes(self, sampled_boxes, road_planes, calibs):
a, b, c, d = road_planes
R0, V2C = calibs[4], calibs[5]
sampled_boxes = self._convert_box_format(sampled_boxes)
center_cam = self._lidar_to_rect(sampled_boxes[:, 0:3], R0, V2C)
cur_height_cam = (-d - a * center_cam[:, 0] - c * center_cam[:, 2]) / b
center_cam[:, 1] = cur_height_cam
cur_lidar_height = self._rect_to_lidar(center_cam, R0, V2C)[:, 2]
mv_height = sampled_boxes[:,
2] - sampled_boxes[:, 5] / 2 - cur_lidar_height
sampled_boxes[:, 2] -= mv_height
sampled_boxes = self._convert_box_format_back(sampled_boxes)
return sampled_boxes, mv_height
def sampling(self, sample: Sample, num_samples_per_class: Dict[str, int]):
existing_bboxes_3d = sample.bboxes_3d.copy()
existing_velocities = None
if sample.bboxes_3d.velocities is not None:
existing_velocities = sample.bboxes_3d.velocities.copy()
existing_labels = sample.labels.copy()
existing_data = sample.data.copy()
existing_difficulties = getattr(sample, "difficulties", None)
ignored_bboxes_3d = getattr(
sample, "ignored_bboxes_3d",
np.zeros([0, existing_bboxes_3d.shape[1]],
dtype=existing_bboxes_3d.dtype))
avoid_coll_bboxes_3d = np.vstack(
[existing_bboxes_3d, ignored_bboxes_3d])
for cls_name, num_samples in num_samples_per_class.items():
if num_samples > 0:
sampling_annos = self.sampler_per_class[cls_name].sampling(
num_samples)
num_sampling = len(sampling_annos)
indices = np.arange(num_sampling)
sampling_bboxes_3d = np.vstack(
[sampling_annos[i]["bbox_3d"] for i in range(num_sampling)])
sampling_bboxes = BBoxes3D(
sampling_bboxes_3d,
coordmode=sample.bboxes_3d.coordmode,
origin=sample.bboxes_3d.origin)
avoid_coll_bboxes = BBoxes3D(
avoid_coll_bboxes_3d,
coordmode=sample.bboxes_3d.coordmode,
origin=sample.bboxes_3d.origin)
s_bboxes_bev = sampling_bboxes.corners_2d
e_bboxes_bev = avoid_coll_bboxes.corners_2d
# filter the sampling bboxes which cross over the existing bboxes
total_bv = np.concatenate([e_bboxes_bev, s_bboxes_bev], axis=0)
coll_mat = box_collision_test(total_bv, total_bv)
diag = np.arange(total_bv.shape[0])
coll_mat[diag, diag] = False
idx = e_bboxes_bev.shape[0]
mask = []
for num in range(num_sampling):
if coll_mat[idx + num].any():
coll_mat[idx + num] = False
coll_mat[:, idx + num] = False
mask.append(False)
else:
mask.append(True)
indices = indices[mask]
# put all boxes(without filter) on road plane
sampling_bboxes_3d_copy = sampling_bboxes_3d.copy()
if hasattr(sample, "road_plane"):
sampling_bboxes_3d, mv_height = self._put_boxes_on_road_planes(
sampling_bboxes_3d, sample.road_plane, sample.calibs)
if len(indices) > 0:
sampling_data = []
sampling_labels = []
sampling_velocities = []
sampling_difficulties = []
label = self.class_names.index(cls_name)
for i in indices:
if existing_velocities is not None:
sampling_velocities.append(
sampling_annos[i]["velocity"])
if existing_difficulties is not None:
sampling_difficulties.append(
sampling_annos[i]["difficulty"])
sampling_labels.append(label)
lidar_data = np.fromfile(
osp.join(self.database_root,
sampling_annos[i]["dataset"],
sampling_annos[i]["lidar_file"]),
"float32").reshape(
[-1, sampling_annos[i]["lidar_dim"]])
lidar_data[:, 0:3] += sampling_bboxes_3d_copy[i, 0:3]
if hasattr(sample, "road_plane"):
lidar_data[:, 2] -= mv_height[i]
sampling_data.append(lidar_data)
existing_bboxes_3d = np.vstack(
[existing_bboxes_3d, sampling_bboxes_3d[indices]])
avoid_coll_bboxes_3d = np.vstack(
[avoid_coll_bboxes_3d, sampling_bboxes_3d[indices]])
if sample.bboxes_3d.velocities is not None:
existing_velocities = np.vstack(
[existing_velocities, sampling_velocities])
existing_labels = np.hstack(
[existing_labels, sampling_labels])
existing_data = np.vstack(
[np.vstack(sampling_data), existing_data])
if existing_difficulties is not None:
existing_difficulties = np.hstack(
[existing_difficulties, sampling_difficulties])
result = {
"bboxes_3d": existing_bboxes_3d,
"data": existing_data,
"labels": existing_labels
}
if existing_velocities is not None:
result.update({"velocities": existing_velocities})
if existing_difficulties is not None:
result.update({"difficulties": existing_difficulties})
return result
def _cal_num_samples_per_class(self, sample: Sample):
labels = sample.labels
num_samples_per_class = dict()
for cls_name, max_num_samples in self.max_num_samples_per_class.items():
label = self.class_names.index(cls_name)
if label in labels:
num_existing = np.sum([int(label) == int(l) for l in labels])
num_samples = 0 if num_existing > max_num_samples else max_num_samples - num_existing
num_samples_per_class[cls_name] = num_samples
else:
num_samples_per_class[cls_name] = max_num_samples
return num_samples_per_class
def __call__(self, sample: Sample):
if sample.modality != "lidar":
raise ValueError(
"Sampling from a database only supports lidar data!")
num_samples_per_class = self._cal_num_samples_per_class(sample)
samples = self.sampling(sample, num_samples_per_class)
sample.bboxes_3d = BBoxes3D(
samples["bboxes_3d"],
coordmode=sample.bboxes_3d.coordmode,
origin=sample.bboxes_3d.origin)
sample.labels = samples["labels"]
if "velocities" in samples:
sample.bboxes_3d.velocities = samples["velocities"]
if "difficulties" in samples:
sample.difficulties = samples["difficulties"]
sample.data = PointCloud(samples["data"])
return sample
class Sampler(object):
def __init__(self, cls_name: str, annos: List[dict], shuffle: bool = True):
self.shuffle = shuffle
self.cls_name = cls_name
self.annos = annos
self.idx = 0
self.length = len(annos)
self.indices = np.arange(len(annos))
if shuffle:
np.random.shuffle(self.indices)
def reset(self):
if self.shuffle:
np.random.shuffle(self.indices)
self.idx = 0
def sampling(self, num_samples):
if self.idx + num_samples >= self.length:
indices = self.indices[self.idx:].copy()
self.reset()
else:
indices = self.indices[self.idx:self.idx + num_samples]
self.idx += num_samples
sampling_annos = [self.annos[i] for i in indices]
return sampling_annos
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/transforms/reader.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
from typing import List, Union
import cv2
import numpy as np
from PIL import Image
from paddle3d.apis import manager
from paddle3d.datasets.kitti import kitti_utils
from paddle3d.datasets.semantic_kitti.semantic_kitti import \
SemanticKITTIDataset
from paddle3d.geometries import PointCloud
from paddle3d.geometries.bbox import points_in_convex_polygon_3d_jit
from paddle3d.sample import Sample
from paddle3d.transforms import functional as F
from paddle3d.transforms.base import TransformABC
from paddle3d.utils.logger import logger
__all__ = [
"LoadImage", "LoadPointCloud", "RemoveCameraInvisiblePointsKITTI",
"RemoveCameraInvisiblePointsKITTIV2", "LoadSemanticKITTIRange"
]
@manager.TRANSFORMS.add_component
class LoadImage(TransformABC):
"""
"""
_READER_MAPPER = {"cv2": cv2.imread, "pillow": Image.open}
def __init__(self,
to_chw: bool = True,
to_rgb: bool = True,
reader: str = "cv2"):
if reader not in self._READER_MAPPER.keys():
raise ValueError('Unsupported reader {}'.format(reader))
self.reader = reader
self.to_rgb = to_rgb
self.to_chw = to_chw
def __call__(self, sample: Sample) -> Sample:
"""
"""
sample.data = np.array(self._READER_MAPPER[self.reader](sample.path))
sample.meta.image_reader = self.reader
sample.meta.image_format = "bgr" if self.reader == "cv2" else "rgb"
sample.meta.channel_order = "hwc"
if sample.meta.image_format != "rgb" and self.to_rgb:
if sample.meta.image_format == "bgr":
sample.data = cv2.cvtColor(sample.data, cv2.COLOR_BGR2RGB)
sample.meta.image_format = "rgb"
else:
raise RuntimeError('Unsupported image format {}'.format(
sample.meta.image_format))
elif sample.meta.image_format != "bgr" and (self.to_rgb is False):
if sample.meta.image_format == "rgb":
sample.data = sample.data[:, :, ::-1]
sample.meta.image_format = "bgr"
else:
raise RuntimeError('Unsupported image format {}'.format(
sample.meta.image_format))
if self.to_chw:
sample.data = sample.data.transpose((2, 0, 1))
sample.meta.channel_order = "chw"
return sample
@manager.TRANSFORMS.add_component
class LoadPointCloud(TransformABC):
"""
Load point cloud.
Args:
dim: The dimension of each point.
use_dim: The dimension of each point to use.
use_time_lag: Whether to use time lag.
sweep_remove_radius: The radius within which points are removed in sweeps.
"""
def __init__(self,
dim,
use_dim: Union[int, List[int]] = None,
use_time_lag: bool = False,
sweep_remove_radius: float = 1,
sep: str = ''):
self.dim = dim
self.use_dim = range(use_dim) if isinstance(use_dim, int) else use_dim
self.use_time_lag = use_time_lag
self.sweep_remove_radius = sweep_remove_radius
self.sep = sep
def __call__(self, sample: Sample):
"""
"""
if sample.modality != "lidar":
raise ValueError('{} Only Support samples in modality lidar'.format(
self.__class__.__name__))
if sample.data is not None:
raise ValueError(
'The data for this sample has been processed before.')
data = np.fromfile(sample.path, np.float32, sep=self.sep).reshape(-1, self.dim)
if self.use_dim is not None:
data = data[:, self.use_dim]
if self.use_time_lag:
time_lag = np.zeros((data.shape[0], 1), dtype=data.dtype)
data = np.hstack([data, time_lag])
if len(sample.sweeps) > 0:
data_sweep_list = [
data,
]
for i in np.random.choice(
len(sample.sweeps), len(sample.sweeps), replace=False):
sweep = sample.sweeps[i]
sweep_data = np.fromfile(sweep.path, np.float32).reshape(
-1, self.dim)
if self.use_dim:
sweep_data = sweep_data[:, self.use_dim]
sweep_data = sweep_data.T
# Remove points that are in a certain radius from origin.
x_filter_mask = np.abs(
sweep_data[0, :]) < self.sweep_remove_radius
y_filter_mask = np.abs(
sweep_data[1, :]) < self.sweep_remove_radius
not_close = np.logical_not(
np.logical_and(x_filter_mask, y_filter_mask))
sweep_data = sweep_data[:, not_close]
# Homogeneous transform of current sample to reference coordinate
if sweep.meta.ref_from_curr is not None:
sweep_data[:3, :] = sweep.meta.ref_from_curr.dot(
np.vstack((sweep_data[:3, :],
np.ones(sweep_data.shape[1]))))[:3, :]
sweep_data = sweep_data.T
if self.use_time_lag:
curr_time_lag = sweep.meta.time_lag * np.ones(
(sweep_data.shape[0], 1)).astype(sweep_data.dtype)
sweep_data = np.hstack([sweep_data, curr_time_lag])
data_sweep_list.append(sweep_data)
data = np.concatenate(data_sweep_list, axis=0)
sample.data = PointCloud(data)
return sample
@manager.TRANSFORMS.add_component
class RemoveCameraInvisiblePointsKITTI(TransformABC):
"""
Remove camera invisible points for KITTI dataset.
"""
def __call__(self, sample: Sample):
calibs = sample.calibs
C, Rinv, T = kitti_utils.projection_matrix_decomposition(calibs[2])
im_path = (Path(sample.path).parents[1] / "image_2" / Path(
sample.path).stem).with_suffix(".png")
if os.path.exists(im_path):
im_shape = cv2.imread(str(im_path)).shape[:2]
else:
im_shape = (375, 1242)
im_shape = np.array(im_shape, dtype=np.int32)
im_bbox = [0, 0, im_shape[1], im_shape[0]]
frustum = F.get_frustum(im_bbox, C)
frustum = (Rinv @ (frustum - T).T).T
frustum = kitti_utils.coord_camera_to_velodyne(frustum, calibs)
frustum_normals = F.corner_to_surface_normal(frustum[None, ...])
indices = points_in_convex_polygon_3d_jit(sample.data[:, :3],
frustum_normals)
sample.data = sample.data[indices.reshape([-1])]
return sample
@manager.TRANSFORMS.add_component
class RemoveCameraInvisiblePointsKITTIV2(TransformABC):
"""
Remove camera invisible points for KITTI dataset, unlike `RemoveCameraInvisiblePointsKITTI` which projects image plane to a frustum,
this version projects poinst into image plane and remove the points outside the image boundary.
"""
def __init__(self):
self.V2C = None
self.R0 = None
def __call__(self, sample: Sample):
calibs = sample.calibs
self.R0 = calibs[4]
self.V2C = calibs[5]
self.P2 = calibs[2]
im_path = (Path(sample.path).parents[1] / "image_2" / Path(
sample.path).stem).with_suffix(".png")
if os.path.exists(im_path):
im_shape = cv2.imread(str(im_path)).shape[:2]
else:
im_shape = (375, 1242)
im_shape = np.array(im_shape, dtype=np.int32)
pts = sample.data[:, 0:3]
# lidar to rect
pts_lidar_hom = self.cart_to_hom(pts)
pts_rect = np.dot(pts_lidar_hom, np.dot(self.V2C.T, self.R0.T))
# rect to img
pts_img, pts_rect_depth = self.rect_to_img(pts_rect)
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0,
pts_img[:, 0] < im_shape[1])
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0,
pts_img[:, 1] < im_shape[0])
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
sample.data = sample.data[pts_valid_flag]
return sample
def cart_to_hom(self, pts):
pts_hom = np.hstack((pts, np.ones((pts.shape[0], 1), dtype=np.float32)))
return pts_hom
def rect_to_img(self, pts_rect):
pts_rect_hom = self.cart_to_hom(pts_rect)
pts_2d_hom = np.dot(pts_rect_hom, self.P2.T)
pts_img = (pts_2d_hom[:, 0:2].T / pts_rect_hom[:, 2]).T # (N, 2)
pts_rect_depth = pts_2d_hom[:, 2] - self.P2.T[
3, 2] # depth in rect camera coord
return pts_img, pts_rect_depth
@manager.TRANSFORMS.add_component
class LoadSemanticKITTIRange(TransformABC):
"""
Load SemanticKITTI range image.
Please refer to <https://github.com/PRBonn/semantic-kitti-api/blob/master/auxiliary/laserscan.py>.
Args:
project_label (bool, optional): Whether project label to range view or not.
"""
def __init__(self, project_label=True):
self.project_label = project_label
self.proj_H = 64
self.proj_W = 1024
self.upper_inclination = 3. / 180. * np.pi
self.lower_inclination = -25. / 180. * np.pi
self.fov = self.upper_inclination - self.lower_inclination
self.remap_lut = SemanticKITTIDataset.build_remap_lut()
def _remap_semantic_labels(self, sem_label):
"""
Remap semantic labels to cross entropy format.
Please refer to <https://github.com/PRBonn/semantic-kitti-api/blob/master/remap_semantic_labels.py>.
"""
return self.remap_lut[sem_label]
def __call__(self, sample: Sample) -> Sample:
raw_scan = np.fromfile(sample.path, dtype=np.float32).reshape((-1, 4))
points = raw_scan[:, 0:3]
remissions = raw_scan[:, 3]
# get depth of all points (L-2 norm of [x, y, z])
depth = np.linalg.norm(points, ord=2, axis=1)
# get angles of all points
scan_x = points[:, 0]
scan_y = points[:, 1]
scan_z = points[:, 2]
yaw = -np.arctan2(scan_y, scan_x)
pitch = np.arcsin(scan_z / depth)
# get projections in image coords
proj_x = 0.5 * (yaw / np.pi + 1.0) # in [0.0, 1.0]
proj_y = 1.0 - (
pitch + abs(self.lower_inclination)) / self.fov # in [0.0, 1.0]
# scale to image size using angular resolution
proj_x *= self.proj_W # in [0.0, W]
proj_y *= self.proj_H # in [0.0, H]
# round and clamp for use as index
proj_x = np.floor(proj_x)
proj_x = np.minimum(self.proj_W - 1, proj_x)
proj_x = np.maximum(0, proj_x).astype(np.int32) # in [0,W-1]
proj_x_copy = np.copy(
proj_x
) # save a copy in original order, for each point, where it is in the range image
proj_y = np.floor(proj_y)
proj_y = np.minimum(self.proj_H - 1, proj_y)
proj_y = np.maximum(0, proj_y).astype(np.int32) # in [0,H-1]
proj_y_copy = np.copy(
proj_y
) # save a copy in original order, for each point, where it is in the range image
# unproj_range_copy = np.copy(depth) # copy of depth in original order
# order in decreasing depth
indices = np.arange(depth.shape[0])
order = np.argsort(depth)[::-1]
depth = depth[order]
indices = indices[order]
points = points[order]
remission = remissions[order]
proj_y = proj_y[order]
proj_x = proj_x[order]
# projected range image - [H,W] range (-1 is no data)
proj_range = np.full((self.proj_H, self.proj_W), -1, dtype=np.float32)
# projected point cloud xyz - [H,W,3] xyz coord (-1 is no data)
proj_xyz = np.full((self.proj_H, self.proj_W, 3), -1, dtype=np.float32)
# projected remission - [H,W] intensity (-1 is no data)
proj_remission = np.full((self.proj_H, self.proj_W),
-1,
dtype=np.float32)
# projected index (for each pixel, what I am in the pointcloud)
# [H,W] index (-1 is no data)
proj_idx = np.full((self.proj_H, self.proj_W), -1, dtype=np.int32)
proj_range[proj_y, proj_x] = depth
proj_xyz[proj_y, proj_x] = points
proj_remission[proj_y, proj_x] = remission
proj_idx[proj_y, proj_x] = indices
proj_mask = proj_idx > 0 # mask containing for each pixel, if it contains a point or not
sample.data = np.concatenate([
proj_range[None, ...],
proj_xyz.transpose([2, 0, 1]), proj_remission[None, ...]
])
sample.meta["proj_mask"] = proj_mask.astype(np.float32)
sample.meta["proj_x"] = proj_x_copy
sample.meta["proj_y"] = proj_y_copy
if sample.labels is not None:
# load labels
raw_label = np.fromfile(
sample.labels, dtype=np.uint32).reshape((-1))
# only fill in attribute if the right size
if raw_label.shape[0] == points.shape[0]:
sem_label = raw_label & 0xFFFF # semantic label in lower half
sem_label = self._remap_semantic_labels(sem_label)
# inst_label = raw_label >> 16 # instance id in upper half
else:
logger.error("Point cloud shape: {}".format(points.shape))
logger.error("Label shape: {}".format(raw_label.shape))
raise ValueError(
"Scan and Label don't contain same number of points. {}".
format(sample.path))
# # sanity check
# assert ((sem_label + (inst_label << 16) == raw_label).all())
if self.project_label:
# project label to range view
# semantics
proj_sem_label = np.zeros((self.proj_H, self.proj_W),
dtype=np.int32) # [H,W] label
proj_sem_label[proj_mask] = sem_label[proj_idx[proj_mask]]
# # instances
# proj_inst_label = np.zeros((self.proj_H, self.proj_W),
# dtype=np.int32) # [H,W] label
# proj_inst_label[proj_mask] = self.inst_label[proj_idx[proj_mask]]
sample.labels = proj_sem_label.astype(np.int64)[None, ...]
else:
sample.labels = sem_label.astype(np.int64)
return sample
@manager.TRANSFORMS.add_component
class LoadSemanticKITTIPointCloud(TransformABC):
"""
Load SemanticKITTI range image.
Please refer to <https://github.com/PRBonn/semantic-kitti-api/blob/master/auxiliary/laserscan.py>.
"""
def __init__(self, use_dim: List[int] = None):
self.proj_H = 64
self.proj_W = 1024
self.upper_inclination = 3. / 180. * np.pi
self.lower_inclination = -25. / 180. * np.pi
self.fov = self.upper_inclination - self.lower_inclination
self.remap_lut = SemanticKITTIDataset.build_remap_lut()
self.use_dim = use_dim
def _remap_semantic_labels(self, sem_label):
"""
Remap semantic labels to cross entropy format.
Please refer to <https://github.com/PRBonn/semantic-kitti-api/blob/master/remap_semantic_labels.py>.
"""
return self.remap_lut[sem_label]
def __call__(self, sample: Sample) -> Sample:
raw_scan = np.fromfile(sample.path, dtype=np.float32).reshape(-1, 4)
points = raw_scan[:, 0:3]
sample.data = PointCloud(raw_scan[:, self.use_dim])
if sample.labels is not None:
# load labels
raw_label = np.fromfile(sample.labels, dtype=np.int32).reshape(-1)
# only fill in attribute if the right size
if raw_label.shape[0] == points.shape[0]:
sem_label = raw_label & 0xFFFF # semantic label in lower half
sem_label = self._remap_semantic_labels(sem_label)
# self.inst_label = raw_label >> 16 # instance id in upper half
else:
logger.error("Point cloud shape: {}".format(points.shape))
logger.error("Label shape: {}".format(raw_label.shape))
raise ValueError(
"Scan and Label don't contain same number of points. {}".
format(sample.path))
# # sanity check
# assert ((sem_label + (inst_label << 16) == raw_label).all())
sample.labels = sem_label
return sample
@manager.TRANSFORMS.add_component
class LoadMultiViewImageFromFiles(TransformABC):
"""
load multi-view image from files
Args:
to_float32 (bool): Whether to convert the img to float32.
Default: False.
color_type (str): Color type of the file. Default: -1.
- -1: cv2.IMREAD_UNCHANGED
- 0: cv2.IMREAD_GRAYSCALE
- 1: cv2.IMREAD_COLOR
"""
def __init__(self, to_float32=False, imread_flag=-1):
self.to_float32 = to_float32
self.imread_flag = imread_flag
def __call__(self, sample):
"""
Call function to load multi-view image from files.
"""
filename = sample['img_filename']
img = np.stack(
[cv2.imread(name, self.imread_flag) for name in filename], axis=-1)
if self.to_float32:
img = img.astype(np.float32)
sample['filename'] = filename
sample['img'] = [img[..., i] for i in range(img.shape[-1])]
sample['img_shape'] = img.shape
sample['ori_shape'] = img.shape
sample['pad_shape'] = img.shape
sample['scale_factor'] = 1.0
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
sample['img_norm_cfg'] = dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False)
return sample
@manager.TRANSFORMS.add_component
class LoadAnnotations3D(TransformABC):
"""
load annotation
"""
def __init__(
self,
with_bbox_3d=True,
with_label_3d=True,
with_attr_label=False,
with_mask_3d=False,
with_seg_3d=False,
):
self.with_bbox_3d = with_bbox_3d
self.with_label_3d = with_label_3d
self.with_attr_label = with_attr_label
self.with_mask_3d = with_mask_3d
self.with_seg_3d = with_seg_3d
def _load_bboxes_3d(self, sample) -> Sample:
"""
"""
sample['gt_bboxes_3d'] = sample['ann_info']['gt_bboxes_3d']
sample['bbox3d_fields'].append('gt_bboxes_3d')
return sample
def _load_labels_3d(self, sample) -> Sample:
"""
"""
sample['gt_labels_3d'] = sample['ann_info']['gt_labels_3d']
return sample
def _load_attr_labels(self, sample) -> Sample:
"""
"""
sample['attr_labels'] = sample['ann_info']['attr_labels']
return sample
def __call__(self, sample) -> Sample:
"""Call function to load multiple types annotations.
"""
if self.with_bbox_3d:
sample = self._load_bboxes_3d(sample)
if sample is None:
return None
if self.with_label_3d:
sample = self._load_labels_3d(sample)
if self.with_attr_label:
sample = self._load_attr_labels(sample)
return sample
@manager.TRANSFORMS.add_component
class LoadMultiViewImageFromMultiSweepsFiles(object):
"""Load multi channel images from a list of separate channel files.
Expects results['img_filename'] to be a list of filenames.
Args:
to_float32 (bool): Whether to convert the img to float32.
Defaults to False.
color_type (str): Color type of the file. Defaults to 'unchanged'.
"""
def __init__(
self,
sweeps_num=5,
to_float32=False,
pad_empty_sweeps=False,
sweep_range=[3, 27],
sweeps_id=None,
imread_flag=-1, #'unchanged'
sensors=[
'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_FRONT_LEFT', 'CAM_BACK',
'CAM_BACK_LEFT', 'CAM_BACK_RIGHT'
],
test_mode=True,
prob=1.0,
):
self.sweeps_num = sweeps_num
self.to_float32 = to_float32
self.imread_flag = imread_flag
self.pad_empty_sweeps = pad_empty_sweeps
self.sensors = sensors
self.test_mode = test_mode
self.sweeps_id = sweeps_id
self.sweep_range = sweep_range
self.prob = prob
if self.sweeps_id:
assert len(self.sweeps_id) == self.sweeps_num
def __call__(self, sample):
"""Call function to load multi-view sweep image from filenames.
"""
sweep_imgs_list = []
timestamp_imgs_list = []
imgs = sample['img']
img_timestamp = sample['img_timestamp']
lidar_timestamp = sample['timestamp']
img_timestamp = [
lidar_timestamp - timestamp for timestamp in img_timestamp
]
sweep_imgs_list.extend(imgs)
timestamp_imgs_list.extend(img_timestamp)
nums = len(imgs)
if self.pad_empty_sweeps and len(sample['sweeps']) == 0:
for i in range(self.sweeps_num):
sweep_imgs_list.extend(imgs)
mean_time = (
self.sweep_range[0] + self.sweep_range[1]) / 2.0 * 0.083
timestamp_imgs_list.extend(
[time + mean_time for time in img_timestamp])
for j in range(nums):
sample['filename'].append(sample['filename'][j])
sample['lidar2img'].append(np.copy(sample['lidar2img'][j]))
sample['intrinsics'].append(
np.copy(sample['intrinsics'][j]))
sample['extrinsics'].append(
np.copy(sample['extrinsics'][j]))
else:
if self.sweeps_id:
choices = self.sweeps_id
elif len(sample['sweeps']) <= self.sweeps_num:
choices = np.arange(len(sample['sweeps']))
elif self.test_mode:
choices = [
int((self.sweep_range[0] + self.sweep_range[1]) / 2) - 1
]
else:
if np.random.random() < self.prob:
if self.sweep_range[0] < len(sample['sweeps']):
sweep_range = list(
range(
self.sweep_range[0],
min(self.sweep_range[1],
len(sample['sweeps']))))
else:
sweep_range = list(
range(self.sweep_range[0], self.sweep_range[1]))
choices = np.random.choice(
sweep_range, self.sweeps_num, replace=False)
else:
choices = [
int((self.sweep_range[0] + self.sweep_range[1]) / 2) - 1
]
for idx in choices:
sweep_idx = min(idx, len(sample['sweeps']) - 1)
sweep = sample['sweeps'][sweep_idx]
if len(sweep.keys()) < len(self.sensors):
sweep = sample['sweeps'][sweep_idx - 1]
sample['filename'].extend(
[sweep[sensor]['data_path'] for sensor in self.sensors])
img = np.stack([
cv2.imread(sweep[sensor]['data_path'], self.imread_flag)
for sensor in self.sensors
],
axis=-1)
if self.to_float32:
img = img.astype(np.float32)
img = [img[..., i] for i in range(img.shape[-1])]
sweep_imgs_list.extend(img)
sweep_ts = [
lidar_timestamp - sweep[sensor]['timestamp'] / 1e6
for sensor in self.sensors
]
timestamp_imgs_list.extend(sweep_ts)
for sensor in self.sensors:
sample['lidar2img'].append(sweep[sensor]['lidar2img'])
sample['intrinsics'].append(sweep[sensor]['intrinsics'])
sample['extrinsics'].append(sweep[sensor]['extrinsics'])
sample['img'] = sweep_imgs_list
sample['timestamp'] = timestamp_imgs_list
return sample
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/transforms/anchor_generator.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ["GenerateAnchors"]
from typing import Any, Dict, List
import numpy as np
from paddle3d.apis import manager
from paddle3d.geometries.bbox import rbbox2d_to_near_bbox
from paddle3d.sample import Sample
from paddle3d.transforms import functional as F
from paddle3d.transforms.base import TransformABC
@manager.TRANSFORMS.add_component
class GenerateAnchors(TransformABC):
"""
Generate SSD style anchors for PointPillars.
Args:
output_stride_factor (int): Output stride of the network.
point_cloud_range (List[float]): [x_min, y_min, z_min, x_max, y_max, z_max].
voxel_size (List[float]): [x_size, y_size, z_size].
anchor_configs (List[Dict[str, Any]]): Anchor configuration for each class. Attributes must include:
"sizes": (List[float]) Anchor size (in wlh order).
"strides": (List[float]) Anchor stride.
"offsets": (List[float]) Anchor offset.
"rotations": (List[float]): Anchor rotation.
"matched_threshold": (float) IoU threshold for positive anchors.
"unmatched_threshold": (float) IoU threshold for negative anchors.
anchor_area_threshold (float): Threshold for filtering out anchor area. Defaults to 1.
"""
def __init__(self,
output_stride_factor: int,
point_cloud_range: List[float],
voxel_size: List[float],
anchor_configs: List[Dict[str, Any]],
anchor_area_threshold: int = 1):
self.point_cloud_range = np.array(point_cloud_range, dtype=np.float32)
self.voxel_size = np.array(voxel_size, dtype=np.float32)
self.grid_size = np.round(
(self.point_cloud_range[3:6] - self.point_cloud_range[:3]) /
self.voxel_size).astype(np.int64)
anchor_generators = [
AnchorGeneratorStride(**anchor_cfg) for anchor_cfg in anchor_configs
]
feature_map_size = self.grid_size[:2] // output_stride_factor
feature_map_size = [*feature_map_size, 1][::-1]
self._generate_anchors(feature_map_size, anchor_generators)
self.anchor_area_threshold = anchor_area_threshold
def _generate_anchors(self, feature_map_size, anchor_generators):
anchors_list = []
match_list = []
unmatch_list = []
for gen in anchor_generators:
anchors = gen.generate(feature_map_size)
anchors = anchors.reshape(
[*anchors.shape[:3], -1, anchors.shape[-1]])
anchors_list.append(anchors)
num_anchors = np.prod(anchors.shape[:-1])
match_list.append(
np.full((num_anchors, ), gen.match_threshold, anchors.dtype))
unmatch_list.append(
np.full((num_anchors, ), gen.unmatch_threshold, anchors.dtype))
anchors = np.concatenate(anchors_list, axis=-2)
self.matched_thresholds = np.concatenate(match_list, axis=0)
self.unmatched_thresholds = np.concatenate(unmatch_list, axis=0)
self.anchors = anchors.reshape([-1, anchors.shape[-1]])
self.anchors_bv = rbbox2d_to_near_bbox(self.anchors[:, [0, 1, 3, 4, 6]])
def __call__(self, sample: Sample):
if sample.modality != "lidar":
raise ValueError("GenerateAnchors only supports lidar data!")
sample.anchors = self.anchors
sample.matched_thresholds = self.matched_thresholds
sample.unmatched_thresholds = self.unmatched_thresholds
if self.anchor_area_threshold >= 0:
# find anchors with area < threshold
dense_voxel_map = F.sparse_sum_for_anchors_mask(
sample.coords, tuple(self.grid_size[::-1][1:]))
dense_voxel_map = dense_voxel_map.cumsum(0)
dense_voxel_map = dense_voxel_map.cumsum(1)
anchors_area = F.fused_get_anchors_area(
dense_voxel_map, self.anchors_bv, self.voxel_size,
self.point_cloud_range, self.grid_size)
anchors_mask = anchors_area > self.anchor_area_threshold
sample.anchors_mask = anchors_mask
return sample
class AnchorGeneratorStride(object):
def __init__(self,
sizes=[1.6, 3.9, 1.56],
anchor_strides=[0.4, 0.4, 1.0],
anchor_offsets=[0.2, -39.8, -1.78],
rotations=[0, np.pi / 2],
matched_threshold=-1,
unmatched_threshold=-1):
self._sizes = sizes
self._anchor_strides = anchor_strides
self._anchor_offsets = anchor_offsets
self._rotations = rotations
self._match_threshold = matched_threshold
self._unmatch_threshold = unmatched_threshold
@property
def match_threshold(self):
return self._match_threshold
@property
def unmatch_threshold(self):
return self._unmatch_threshold
def generate(self, feature_map_size):
return F.create_anchors_3d_stride(feature_map_size, self._sizes,
self._anchor_strides,
self._anchor_offsets, self._rotations)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/transforms/functional.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from typing import Tuple
import cv2
import numba
import numpy as np
from paddle3d.geometries.bbox import (box_collision_test, iou_2d_jit,
rbbox2d_to_near_bbox)
def horizontal_flip(im: np.ndarray) -> np.ndarray:
if len(im.shape) == 3:
im = im[:, ::-1, :]
elif len(im.shape) == 2:
im = im[:, ::-1]
return im
def vertical_flip(im: np.ndarray) -> np.ndarray:
if len(im.shape) == 3:
im = im[::-1, :, :]
elif len(im.shape) == 2:
im = im[::-1, :]
return im
def normalize(im: np.ndarray, mean: Tuple[float, float, float],
std: Tuple[float, float, float]) -> np.ndarray:
im -= mean
im /= std
return im
def normalize_use_cv2(im: np.ndarray,
mean: np.ndarray,
std: np.ndarray,
to_rgb=True):
"""normalize an image with mean and std use cv2.
"""
img = im.copy().astype(np.float32)
mean = np.float64(mean.reshape(1, -1))
stdinv = 1 / np.float64(std.reshape(1, -1))
if to_rgb:
# inplace
cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)
# inplace
cv2.subtract(img, mean, img)
# inplace
cv2.multiply(img, stdinv, img)
return img
def get_frustum(im_bbox, C, near_clip=0.001, far_clip=100):
"""
Please refer to:
<https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py#L521>
"""
fku = C[0, 0]
fkv = -C[1, 1]
u0v0 = C[0:2, 2]
z_points = np.array(
[near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis]
b = im_bbox
box_corners = np.array(
[[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype)
near_box_corners = (box_corners - u0v0) / np.array(
[fku / near_clip, -fkv / near_clip], dtype=C.dtype)
far_box_corners = (box_corners - u0v0) / np.array(
[fku / far_clip, -fkv / far_clip], dtype=C.dtype)
ret_xy = np.concatenate([near_box_corners, far_box_corners],
axis=0) # [8, 2]
ret_xyz = np.concatenate([ret_xy, z_points], axis=1)
return ret_xyz
@numba.jit(nopython=True)
def corner_to_surface_normal(corners):
"""
Given coordinates the 3D bounding box's corners,
compute surface normal located at each corner oriented towards inside the box.
Please refer to:
<https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py#L764>
Args:
corners (float array, [N, 8, 3]): Coordinates of 8 3d box corners.
Returns:
normals (float array, [N, 6, 4, 3]): Normals of 6 surfaces. Each surface is represented by 4 normals,
located at the 4 corners.
"""
num_boxes = corners.shape[0]
normals = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype)
corner_indices = np.array([
0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7
]).reshape(6, 4)
for i in range(num_boxes):
for j in range(6):
for k in range(4):
normals[i, j, k] = corners[i, corner_indices[j, k]]
return normals
@numba.jit(nopython=True)
def points_to_voxel(points, voxel_size, point_cloud_range, grid_size, voxels,
coords, num_points_per_voxel, grid_idx_to_voxel_idx,
max_points_in_voxel, max_voxel_num):
num_voxels = 0
num_points = points.shape[0]
# x, y, z
coord = np.zeros(shape=(3, ), dtype=np.int32)
for point_idx in range(num_points):
outside = False
for i in range(3):
coord[i] = np.floor(
(points[point_idx, i] - point_cloud_range[i]) / voxel_size[i])
if coord[i] < 0 or coord[i] >= grid_size[i]:
outside = True
break
if outside:
continue
voxel_idx = grid_idx_to_voxel_idx[coord[2], coord[1], coord[0]]
if voxel_idx == -1:
voxel_idx = num_voxels
if num_voxels >= max_voxel_num:
continue
num_voxels += 1
grid_idx_to_voxel_idx[coord[2], coord[1], coord[0]] = voxel_idx
coords[voxel_idx, 0:3] = coord[::-1]
curr_num_point = num_points_per_voxel[voxel_idx]
if curr_num_point < max_points_in_voxel:
voxels[voxel_idx, curr_num_point] = points[point_idx]
num_points_per_voxel[voxel_idx] = curr_num_point + 1
return num_voxels
def create_anchors_3d_stride(feature_size,
sizes=[1.6, 3.9, 1.56],
anchor_strides=[0.4, 0.4, 0.0],
anchor_offsets=[0.2, -39.8, -1.78],
rotations=[0, np.pi / 2]):
"""
Generate 3D anchors according to specified strides.
Please refer to:
<https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py#L561>
Args:
feature_size: list [D, H, W](zyx)
sizes: [N, 3] list of list or array, size of anchors, xyz
Returns:
anchors: [*feature_size, num_sizes, num_rots, 7] tensor.
"""
x_stride, y_stride, z_stride = anchor_strides
x_offset, y_offset, z_offset = anchor_offsets
z_centers = np.arange(feature_size[0], dtype=np.float32)
y_centers = np.arange(feature_size[1], dtype=np.float32)
x_centers = np.arange(feature_size[2], dtype=np.float32)
z_centers = z_centers * z_stride + z_offset
y_centers = y_centers * y_stride + y_offset
x_centers = x_centers * x_stride + x_offset
sizes = np.reshape(np.array(sizes, dtype=np.float32), [-1, 3])
rotations = np.array(rotations, dtype=np.float32)
rets = np.meshgrid(
x_centers, y_centers, z_centers, rotations, indexing='ij')
tile_shape = [1] * 5
tile_shape[-2] = sizes.shape[0]
for i in range(len(rets)):
rets[i] = np.tile(rets[i][..., np.newaxis, :], tile_shape)
rets[i] = rets[i][..., None]
sizes = np.reshape(sizes, [1, 1, 1, -1, 1, 3])
tile_size_shape = list(rets[0].shape)
tile_size_shape[3] = 1
sizes = np.tile(sizes, tile_size_shape)
rets.insert(3, sizes)
ret = np.concatenate(rets, axis=-1)
return np.transpose(ret, [2, 1, 0, 3, 4, 5])
@numba.jit(nopython=True)
def sparse_sum_for_anchors_mask(coors, shape):
ret = np.zeros(shape, dtype=np.float32)
for i in range(coors.shape[0]):
ret[coors[i, 1], coors[i, 2]] += 1
return ret
@numba.jit(nopython=True)
def fused_get_anchors_area(dense_map, anchors_bv, stride, offset, grid_size):
anchor_coor = np.zeros(anchors_bv.shape[1:], dtype=np.int32)
grid_size_x = grid_size[0] - 1
grid_size_y = grid_size[1] - 1
N = anchors_bv.shape[0]
ret = np.zeros((N, ), dtype=dense_map.dtype)
for i in range(N):
anchor_coor[0] = np.floor((anchors_bv[i, 0] - offset[0]) / stride[0])
anchor_coor[1] = np.floor((anchors_bv[i, 1] - offset[1]) / stride[1])
anchor_coor[2] = np.floor((anchors_bv[i, 2] - offset[0]) / stride[0])
anchor_coor[3] = np.floor((anchors_bv[i, 3] - offset[1]) / stride[1])
anchor_coor[0] = max(anchor_coor[0], 0)
anchor_coor[1] = max(anchor_coor[1], 0)
anchor_coor[2] = min(anchor_coor[2], grid_size_x)
anchor_coor[3] = min(anchor_coor[3], grid_size_y)
ID = dense_map[anchor_coor[3], anchor_coor[2]]
IA = dense_map[anchor_coor[1], anchor_coor[0]]
IB = dense_map[anchor_coor[3], anchor_coor[0]]
IC = dense_map[anchor_coor[1], anchor_coor[2]]
ret[i] = ID - IB - IC + IA
return ret
@numba.jit(nopython=True)
def noise_per_box(bev_boxes, corners_2d, ignored_corners_2d, rotation_noises,
translation_noises):
num_boxes = bev_boxes.shape[0]
num_attempts = translation_noises.shape[1]
selected_rotation_noises = np.zeros(num_boxes, dtype=rotation_noises.dtype)
selected_translation_noises = np.zeros((num_boxes, 3),
dtype=translation_noises.dtype)
all_corners = np.concatenate((corners_2d, ignored_corners_2d), axis=0)
for i in range(num_boxes):
for j in range(num_attempts):
# rotation
current_corners = np.ascontiguousarray(corners_2d[i] -
bev_boxes[i, :2])
rot_sin = np.sin(rotation_noises[i, j])
rot_cos = np.cos(rotation_noises[i, j])
rotation_matrix = np.array(
[[rot_cos, -rot_sin], [rot_sin, rot_cos]], corners_2d.dtype)
current_corners = current_corners @ rotation_matrix
# translation
current_corners += bev_boxes[i, :2] + translation_noises[i, j, :2]
coll_mat = box_collision_test(
current_corners.reshape(1, 4, 2), all_corners)
coll_mat[0, i] = False
if not coll_mat.any():
# valid perturbation found
selected_rotation_noises[i] = rotation_noises[i, j]
selected_translation_noises[i] = translation_noises[i, j]
break
return selected_rotation_noises, selected_translation_noises
@numba.jit(nopython=True)
def perturb_object_points_(points, centers, point_masks, rotation_noises,
translation_noises):
num_boxes = centers.shape[0]
num_points = points.shape[0]
rotation_matrices = np.zeros((num_boxes, 3, 3), dtype=points.dtype)
for i in range(num_boxes):
angle = rotation_noises[i]
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rotation_matrix = np.eye(3, dtype=points.dtype)
rotation_matrix[0, 0] = rot_cos
rotation_matrix[0, 1] = -rot_sin
rotation_matrix[1, 0] = rot_sin
rotation_matrix[1, 1] = rot_cos
rotation_matrices[i] = rotation_matrix
for i in range(num_points):
for j in range(num_boxes):
if point_masks[i, j] == 1:
# rotation
points[i, :3] -= centers[j, :3]
points[i:i + 1, :3] = np.ascontiguousarray(
points[i:i + 1, :3]) @ rotation_matrices[j]
points[i, :3] += centers[j, :3]
# translation
points[i, :3] += translation_noises[j]
break
@numba.jit(nopython=True)
def perturb_object_bboxes_3d_(bboxes_3d, rotation_noises, translation_noises):
bboxes_3d[:, 6] += rotation_noises
bboxes_3d[:, :3] += translation_noises
def nearest_iou_similarity(bboxes_3d_1, bboxes_3d_2):
"""
Compute similarity based on the squared distance metric.
This function computes pairwise similarity between two BoxLists based on the
negative squared distance metric.
"""
boxes_bv_1 = rbbox2d_to_near_bbox(bboxes_3d_1[:, [0, 1, 3, 4, 6]])
boxes_bv_2 = rbbox2d_to_near_bbox(bboxes_3d_2[:, [0, 1, 3, 4, 6]])
return iou_2d_jit(boxes_bv_1, boxes_bv_2)
def random_depth_image_horizontal(data_dict=None):
"""
Performs random horizontal flip augmentation
Args:
data_dict:
image [np.ndarray(H_image, W_image, 3)]: Image
depth_map [np.ndarray(H_depth, W_depth]: Depth map
gt_boxes [np.ndarray(N, 7)]: 3D box labels in LiDAR coordinates [x, y, z, w, l, h, ry]
calib [calibration.Calibration]: Calibration object
Returns:
data_dict:
aug_image [np.ndarray(H_image, W_image, 3)]: Augmented image
aug_depth_map [np.ndarray(H_depth, W_depth]: Augmented depth map
aug_gt_boxes [np.ndarray(N, 7)]: Augmented 3D box labels in LiDAR coordinates [x, y, z, w, l, h, ry]
"""
if data_dict is None:
return
image = data_dict["images"]
depth_map = data_dict["depth_maps"]
gt_boxes = data_dict['gt_boxes']
calib = data_dict["calib"]
# Randomly augment with 50% chance
enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
if enable:
# Flip images
aug_image = np.fliplr(image)
aug_depth_map = np.fliplr(depth_map)
# Flip 3D gt_boxes by flipping the centroids in image space
aug_gt_boxes = copy.copy(gt_boxes)
locations = aug_gt_boxes[:, :3]
img_pts, img_depth = calib.lidar_to_img(locations)
W = image.shape[1]
img_pts[:, 0] = W - img_pts[:, 0]
pts_rect = calib.img_to_rect(
u=img_pts[:, 0], v=img_pts[:, 1], depth_rect=img_depth)
pts_lidar = calib.rect_to_lidar(pts_rect)
aug_gt_boxes[:, :3] = pts_lidar
aug_gt_boxes[:, 6] = -1 * aug_gt_boxes[:, 6]
else:
aug_image = image
aug_depth_map = depth_map
aug_gt_boxes = gt_boxes
data_dict['images'] = aug_image
data_dict['depth_maps'] = aug_depth_map
data_dict['gt_boxes'] = aug_gt_boxes
return data_dict
def blend_transform(img: np.ndarray, src_image: np.ndarray, src_weight: float,
dst_weight: float):
"""
Transforms pixel colors with PIL enhance functions.
"""
if img.dtype == np.uint8:
img = img.astype(np.float32)
img = src_weight * src_image + dst_weight * img
out = np.clip(img, 0, 255).astype(np.uint8)
else:
out = src_weight * src_image + dst_weight * img
return out
def sample_point(sample, num_points):
""" Randomly sample points by distance
"""
if num_points == -1:
return sample
points = sample.data
if num_points < len(points):
pts_depth = np.linalg.norm(points[:, 0:3], axis=1)
pts_near_flag = pts_depth < 40.0
far_idxs_choice = np.where(pts_near_flag == 0)[0]
near_idxs = np.where(pts_near_flag == 1)[0]
choice = []
if num_points > len(far_idxs_choice):
near_idxs_choice = np.random.choice(
near_idxs, num_points - len(far_idxs_choice), replace=False)
choice = np.concatenate((near_idxs_choice, far_idxs_choice), axis=0) \
if len(far_idxs_choice) > 0 else near_idxs_choice
else:
choice = np.arange(0, len(points), dtype=np.int32)
choice = np.random.choice(choice, num_points, replace=False)
np.random.shuffle(choice)
else:
choice = np.arange(0, len(points), dtype=np.int32)
if num_points > len(points):
extra_choice = np.random.choice(choice, num_points - len(points))
choice = np.concatenate((choice, extra_choice), axis=0)
np.random.shuffle(choice)
sample.data = sample.data[choice]
return sample
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/transforms/transform.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numbers
from typing import Any, List, Tuple, Union
import cv2
import numpy as np
from PIL import Image
import paddle
import paddle.nn as nn
from paddle3d.apis import manager
from paddle3d.geometries.bbox import BBoxes3D, CoordMode, points_in_convex_polygon_3d_jit
from paddle3d.ops import voxelize
from paddle3d.sample import Sample
from paddle3d.transforms import functional as F
from paddle3d.transforms.base import TransformABC
from paddle3d.transforms.functional import points_to_voxel
from paddle3d.utils import box_utils
__all__ = [
"RandomHorizontalFlip", "RandomVerticalFlip", "GlobalRotate", "GlobalScale",
"GlobalTranslate", "ShufflePoint", "SamplePoint", "SamplePointByVoxels",
"FilterPointOutsideRange", "FilterPointInsideRange", "FilterBBoxOutsideRange",
"HardVoxelize", "RandomObjectPerturb", "ConvertBoxFormat", "ResizeShortestEdge",
"RandomContrast", "RandomBrightness", "RandomSaturation",
"ToVisionBasedBox", "PhotoMetricDistortionMultiViewImage",
"RandomScaleImageMultiViewImage", "FilterSmallBBox"
]
@manager.TRANSFORMS.add_component
class RandomHorizontalFlip(TransformABC):
"""
Note:
If the inputs are pixel indices, they are flipped by `(W - 1 - x, H - 1 - y)`.
If the inputs are floating point coordinates, they are flipped by `(W - x, H - y)`.
"""
def __init__(self, prob: float = 0.5, input_type='pixel_indices'):
self.prob = prob
self.input_type = input_type
def __call__(self, sample: Sample):
if np.random.random() < self.prob:
if sample.modality == "image":
sample.data = F.horizontal_flip(sample.data)
h, w, c = sample.data.shape
elif sample.modality == "lidar":
sample.data.flip(axis=1)
if self.input_type == 'pixel_indices':
# Flip camera intrinsics
if "camera_intrinsic" in sample.meta:
sample.meta.camera_intrinsic[
0, 2] = w - sample.meta.camera_intrinsic[0, 2] - 1
# Flip bbox
if sample.bboxes_3d is not None:
sample.bboxes_3d.horizontal_flip()
if sample.bboxes_2d is not None and sample.modality == "image":
sample.bboxes_2d.horizontal_flip(image_width=w)
elif self.input_type == 'floating_point_coordinates':
# Flip camera intrinsics
if "camera_intrinsic" in sample.meta:
sample.meta.camera_intrinsic[
0, 2] = w - sample.meta.camera_intrinsic[0, 2]
# Flip bbox
if sample.bboxes_3d is not None:
sample.bboxes_3d.horizontal_flip_coords()
if sample.bboxes_2d is not None and sample.modality == "image":
sample.bboxes_2d.horizontal_flip_coords(image_width=w)
return sample
@manager.TRANSFORMS.add_component
class ToVisionBasedBox(TransformABC):
"""
"""
def __call__(self, sample: Sample):
bboxes_3d_new = sample.bboxes_3d.to_vision_based_3d_box()
sample.bboxes_3d = BBoxes3D(
bboxes_3d_new,
origin=[.5, 1, .5],
coordmode=CoordMode.KittiCamera,
rot_axis=1)
return sample
@manager.TRANSFORMS.add_component
class RandomVerticalFlip(TransformABC):
"""
"""
def __init__(self, prob: float = 0.5):
self.prob = prob
def __call__(self, sample: Sample):
if np.random.random() < self.prob:
if sample.modality == "image":
sample.data = F.vertical_flip(sample.data)
h, w, c = sample.data.shape
elif sample.modality == "lidar":
sample.data.flip(axis=0)
# Flip camera intrinsics
if "camera_intrinsic" in sample.meta:
sample.meta.camera_intrinsic[
1, 2] = h - sample.meta.camera_intrinsic[1, 2] - 1
# Flip bbox
if sample.bboxes_3d is not None:
sample.bboxes_3d.vertical_flip()
if sample.bboxes_2d is not None and sample.modality == "image":
sample.bboxes_2d.vertical_flip(image_height=h)
return sample
@manager.TRANSFORMS.add_component
class GlobalRotate(TransformABC):
"""
"""
def __init__(self, min_rot: float = -np.pi / 4, max_rot: float = np.pi / 4):
self.min_rot = min_rot
self.max_rot = max_rot
def __call__(self, sample: Sample):
if sample.modality != "lidar":
raise ValueError("GlobalRotate only supports lidar data!")
angle = np.random.uniform(self.min_rot, self.max_rot)
# Rotate points
sample.data.rotate_around_z(angle)
# Rotate bboxes_3d
if sample.bboxes_3d is not None:
sample.bboxes_3d.rotate_around_z(angle)
return sample
@manager.TRANSFORMS.add_component
class GlobalScale(TransformABC):
"""
"""
def __init__(self,
min_scale: float = 0.95,
max_scale: float = 1.05,
size=None):
self.min_scale = min_scale
self.max_scale = max_scale
self.size = size
def __call__(self, sample: Sample):
if sample.modality != "lidar":
raise ValueError("GlobalScale only supports lidar data!")
factor = np.random.uniform(
self.min_scale, self.max_scale, size=self.size)
# Scale points
sample.data.scale(factor)
# Scale bboxes_3d
if sample.bboxes_3d is not None:
sample.bboxes_3d.scale(factor)
return sample
@manager.TRANSFORMS.add_component
class GlobalTranslate(TransformABC):
"""
Translate sample by a random offset.
Args:
translation_std (Union[float, List[float], Tuple[float]], optional):
The standard deviation of the translation offset. Defaults to (.2, .2, .2).
distribution (str):
The random distribution. Defaults to normal.
"""
def __init__(
self,
translation_std: Union[float, List[float], Tuple[float]] = (.2, .2,
.2),
distribution="normal"):
if not isinstance(translation_std, (list, tuple)):
translation_std = [
translation_std, translation_std, translation_std
]
self.translation_std = translation_std
self.distribution = distribution
def __call__(self, sample: Sample):
if sample.modality != "lidar":
raise ValueError("GlobalScale only supports lidar data!")
if self.distribution not in ["normal", "uniform"]:
raise ValueError(
"GlobalScale only supports normal and uniform random distribution!"
)
if self.distribution == "normal":
translation = np.random.normal(scale=self.translation_std, size=3)
elif self.distribution == "uniform":
translation = np.random.uniform(
low=-self.translation_std[0],
high=self.translation_std[0],
size=3)
else:
raise ValueError(
"GlobalScale only supports normal and uniform random distribution!"
)
sample.data.translate(translation)
if sample.bboxes_3d is not None:
sample.bboxes_3d.translate(translation)
return sample
@manager.TRANSFORMS.add_component
class ShufflePoint(TransformABC):
def __call__(self, sample: Sample):
if sample.modality != "lidar":
raise ValueError("ShufflePoint only supports lidar data!")
sample.data.shuffle()
return sample
@manager.TRANSFORMS.add_component
class ConvertBoxFormat(TransformABC):
def __call__(self, sample: Sample):
# convert boxes from [x,y,z,w,l,h,yaw] to [x,y,z,l,w,h,heading], bottom_center -> obj_center
bboxes_3d = box_utils.boxes3d_kitti_lidar_to_lidar(sample.bboxes_3d)
# limit heading
bboxes_3d[:, -1] = box_utils.limit_period(
bboxes_3d[:, -1], offset=0.5, period=2 * np.pi)
# stack labels into gt_boxes, label starts from 1, instead of 0.
labels = sample.labels + 1
bboxes_3d = np.concatenate(
[bboxes_3d, labels.reshape(-1, 1).astype(np.float32)], axis=-1)
sample.bboxes_3d = bboxes_3d
sample.pop('labels', None)
return sample
@manager.TRANSFORMS.add_component
class SamplePoint(TransformABC):
def __init__(self, num_points):
self.num_points = num_points
def __call__(self, sample: Sample):
sample = F.sample_point(sample, self.num_points)
return sample
@manager.TRANSFORMS.add_component
class SamplePointByVoxels(TransformABC):
def __init__(self, voxel_size, max_points_per_voxel, max_num_of_voxels,
num_points, point_cloud_range):
self.voxel_size = voxel_size
self.max_points_per_voxel = max_points_per_voxel
self.max_num_of_voxels = max_num_of_voxels
self.num_points = num_points
self.point_cloud_range = point_cloud_range
def transform_points_to_voxels(self, sample):
points = sample.data
points = paddle.to_tensor(points)
voxels, coordinates, num_points, voxels_num = voxelize.hard_voxelize(
points, self.voxel_size, self.point_cloud_range,
self.max_points_per_voxel, self.max_num_of_voxels)
voxels = voxels[:voxels_num, :, :].numpy()
coordinates = coordinates[:voxels_num, :].numpy()
num_points = num_points[:voxels_num, :].numpy()
sample['voxels'] = voxels
sample['voxel_coords'] = coordinates
sample['voxel_num_points'] = num_points
return sample
def sample_points_by_voxels(self, sample):
if self.num_points == -1: # dynamic voxelization !
return sample
# voxelization
sample = self.transform_points_to_voxels(sample)
points = sample['voxels'][:, 0] # remain only one point per voxel
sample.data = points
# sampling
sample = F.sample_point(sample, self.num_points)
sample.pop('voxels')
sample.pop('voxel_coords')
sample.pop('voxel_num_points')
return sample
def __call__(self, sample):
return self.sample_points_by_voxels(sample)
@manager.TRANSFORMS.add_component
class FilterBBoxOutsideRange(TransformABC):
def __init__(self, point_cloud_range: Tuple[float]):
self.point_cloud_range = np.asarray(point_cloud_range, dtype='float32')
def __call__(self, sample: Sample):
if sample.bboxes_3d.size == 0:
return sample
mask = sample.bboxes_3d.get_mask_of_bboxes_outside_range(
self.point_cloud_range)
sample.bboxes_3d = sample.bboxes_3d.masked_select(mask)
sample.labels = sample.labels[mask]
return sample
@manager.TRANSFORMS.add_component
class FilterSmallBBox(TransformABC):
def __init__(self, size_thr: Tuple[float]):
self.size_thr = np.asarray(size_thr, dtype='float32')
def __call__(self, sample: Sample):
if sample.bboxes_3d.size == 0:
return sample
mask = sample.bboxes_3d.get_mask_of_small_bboxes(
self.size_thr)
sample.bboxes_3d = sample.bboxes_3d.masked_select(mask)
sample.labels = sample.labels[mask]
return sample
@manager.TRANSFORMS.add_component
class FilterPointOutsideRange(TransformABC):
def __init__(self, point_cloud_range: Tuple[float]):
self.point_cloud_range = np.asarray(point_cloud_range, dtype='float32')
def __call__(self, sample: Sample):
mask = sample.data.get_mask_of_points_outside_range(
self.point_cloud_range)
sample.data = sample.data[mask]
return sample
@manager.TRANSFORMS.add_component
class FilterPointInsideRange(TransformABC):
def __init__(self, point_cloud_range: Tuple[float]):
self.point_cloud_range = np.asarray(point_cloud_range, dtype='float32')
def __call__(self, sample: Sample):
mask = sample.data.get_mask_of_points_inside_range(
self.point_cloud_range)
sample.data = sample.data[mask]
return sample
@manager.TRANSFORMS.add_component
class HardVoxelize(TransformABC):
def __init__(self, point_cloud_range: Tuple[float],
voxel_size: Tuple[float], max_points_in_voxel: int,
max_voxel_num: int):
self.max_points_in_voxel = max_points_in_voxel
self.max_voxel_num = max_voxel_num
self.voxel_size = np.asarray(voxel_size, dtype='float32')
self.point_cloud_range = np.asarray(point_cloud_range, dtype='float32')
self.grid_size = np.round(
(self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) /
self.voxel_size).astype('int32')
def __call__(self, sample: Sample):
if sample.modality != "lidar":
raise ValueError("Voxelize only supports lidar data!")
# Voxelize
num_points, num_point_dim = sample.data.shape[0:2]
voxels = np.zeros(
(self.max_voxel_num, self.max_points_in_voxel, num_point_dim),
dtype=sample.data.dtype)
coords = np.zeros((self.max_voxel_num, 3), dtype=np.int32)
num_points_per_voxel = np.zeros((self.max_voxel_num, ), dtype=np.int32)
grid_size_z, grid_size_y, grid_size_x = self.grid_size[::-1]
grid_idx_to_voxel_idx = np.full((grid_size_z, grid_size_y, grid_size_x),
-1,
dtype=np.int32)
num_voxels = points_to_voxel(
sample.data, self.voxel_size, self.point_cloud_range,
self.grid_size, voxels, coords, num_points_per_voxel,
grid_idx_to_voxel_idx, self.max_points_in_voxel, self.max_voxel_num)
voxels = voxels[:num_voxels]
coords = coords[:num_voxels]
num_points_per_voxel = num_points_per_voxel[:num_voxels]
sample.voxels = voxels
sample.coords = coords
sample.num_points_per_voxel = num_points_per_voxel
sample.pop('sweeps', None)
return sample
@manager.TRANSFORMS.add_component
class RandomObjectPerturb(TransformABC):
"""
Randomly perturb (rotate and translate) each object.
Args:
rotation_range (Union[float, List[float], Tuple[float]], optional):
Range of random rotation. Defaults to pi / 4.
translation_std (Union[float, List[float], Tuple[float]], optional):
Standard deviation of random translation. Defaults to 1.0.
max_num_attempts (int): Maximum number of perturbation attempts. Defaults to 100.
"""
def __init__(
self,
rotation_range: Union[float, List[float], Tuple[float]] = np.pi / 4,
translation_std: Union[float, List[float], Tuple[float]] = 1.0,
max_num_attempts: int = 100):
if not isinstance(rotation_range, (list, tuple)):
rotation_range = [-rotation_range, rotation_range]
self.rotation_range = rotation_range
if not isinstance(translation_std, (list, tuple)):
translation_std = [
translation_std, translation_std, translation_std
]
self.translation_std = translation_std
self.max_num_attempts = max_num_attempts
def __call__(self, sample: Sample):
num_objects = sample.bboxes_3d.shape[0]
rotation_noises = np.random.uniform(
self.rotation_range[0],
self.rotation_range[1],
size=[num_objects, self.max_num_attempts])
translation_noises = np.random.normal(
scale=self.translation_std,
size=[num_objects, self.max_num_attempts, 3])
rotation_noises, translation_noises = F.noise_per_box(
sample.bboxes_3d[:, [0, 1, 3, 4, 6]], sample.bboxes_3d.corners_2d,
sample.ignored_bboxes_3d.corners_2d, rotation_noises,
translation_noises)
# perturb points w.r.t objects' centers (inplace operation)
normals = F.corner_to_surface_normal(sample.bboxes_3d.corners_3d)
point_masks = points_in_convex_polygon_3d_jit(sample.data[:, :3],
normals)
F.perturb_object_points_(sample.data, sample.bboxes_3d[:, :3],
point_masks, rotation_noises,
translation_noises)
# perturb bboxes_3d w.r.t to objects' centers (inplace operation)
F.perturb_object_bboxes_3d_(sample.bboxes_3d, rotation_noises,
translation_noises)
return sample
@manager.TRANSFORMS.add_component
class ResizeShortestEdge(TransformABC):
"""
"""
def __init__(self,
short_edge_length,
max_size,
sample_style="range",
interp=Image.BILINEAR):
super().__init__()
assert sample_style in ["range", "choice"], sample_style
self.is_range = sample_style == "range"
self.short_edge_length = short_edge_length
self.max_size = max_size
self.interp = interp
if isinstance(short_edge_length, int):
short_edge_length = (short_edge_length, short_edge_length)
if self.is_range:
assert len(short_edge_length) == 2, (
"short_edge_length must be two values using 'range' sample style."
f" Got {short_edge_length}!")
def __call__(self, sample: Sample):
h, w = sample.data.shape[:2]
if self.is_range:
size = np.random.randint(self.short_edge_length[0],
self.short_edge_length[1] + 1)
else:
size = np.random.choice(self.short_edge_length)
newh, neww = self.get_output_shape(h, w, size, self.max_size)
sample.data = self.apply_image(sample.data, h, w, newh, neww)
sample.image_sizes = np.asarray((h, w))
if "camera_intrinsic" in sample.meta:
sample.meta.camera_intrinsic = self.apply_intrinsics(
sample.meta.camera_intrinsic, h, w, newh, neww)
if sample.bboxes_2d is not None and sample.modality == "image":
sample.bboxes_2d.resize(h, w, newh, neww)
return sample
def apply_image(self, img, h, w, newh, neww):
assert len(img.shape) <= 4
if img.dtype == np.uint8:
if len(img.shape) > 2 and img.shape[2] == 1:
pil_image = Image.fromarray(img[:, :, 0], mode="L")
else:
pil_image = Image.fromarray(img)
pil_image = pil_image.resize((neww, newh), self.interp)
ret = np.asarray(pil_image)
if len(img.shape) > 2 and img.shape[2] == 1:
ret = np.expand_dims(ret, -1)
else:
# PIL only supports uint8
if any(x < 0 for x in img.strides):
img = np.ascontiguousarray(img)
img = paddle.to_tensor(img)
shape = list(img.shape)
shape_4d = shape[:2] + [1] * (4 - len(shape)) + shape[2:]
img = img.reshape(shape_4d).transpose([2, 3, 0, 1]) # hw(c) -> nchw
_PIL_RESIZE_TO_INTERPOLATE_MODE = {
Image.NEAREST: "nearest",
Image.BILINEAR: "bilinear",
Image.BICUBIC: "bicubic",
}
mode = _PIL_RESIZE_TO_INTERPOLATE_MODE[self.interp]
align_corners = None if mode == "nearest" else False
img = nn.functional.interpolate(
img, (newh, neww), mode=mode, align_corners=align_corners)
shape[:2] = (newh, neww)
ret = img.transpose([2, 3, 0,
1]).reshape(shape).numpy() # nchw -> hw(c)
return ret
def apply_intrinsics(self, intrinsics, h, w, newh, neww):
assert intrinsics.shape == (3, 3)
assert intrinsics[0, 1] == 0 # undistorted
assert np.allclose(intrinsics,
np.triu(intrinsics)) # check if upper triangular
factor_x = neww / w
factor_y = newh / h
new_intrinsics = intrinsics * np.float32([factor_x, factor_y, 1
]).reshape(3, 1)
return new_intrinsics
@staticmethod
def get_output_shape(oldh: int, oldw: int, short_edge_length: int,
max_size: int) -> Tuple[int, int]:
"""
Compute the output size given input size and target short edge length.
"""
h, w = oldh, oldw
size = short_edge_length * 1.0
scale = size / min(h, w)
if h < w:
newh, neww = size, scale * w
else:
newh, neww = scale * h, size
if max(newh, neww) > max_size:
scale = max_size * 1.0 / max(newh, neww)
newh = newh * scale
neww = neww * scale
neww = int(neww + 0.5)
newh = int(newh + 0.5)
return (newh, neww)
@manager.TRANSFORMS.add_component
class RandomContrast(TransformABC):
"""
Randomly transforms image contrast.
Contrast intensity is uniformly sampled in (intensity_min, intensity_max).
- intensity < 1 will reduce contrast
- intensity = 1 will preserve the input image
- intensity > 1 will increase contrast
"""
def __init__(self, intensity_min: float, intensity_max: float):
super().__init__()
self.intensity_min = intensity_min
self.intensity_max = intensity_max
def __call__(self, sample: Sample):
w = np.random.uniform(self.intensity_min, self.intensity_max)
sample.data = F.blend_transform(
sample.data,
src_image=sample.data.mean(),
src_weight=1 - w,
dst_weight=w)
return sample
@manager.TRANSFORMS.add_component
class RandomBrightness(TransformABC):
"""
Randomly transforms image contrast.
Contrast intensity is uniformly sampled in (intensity_min, intensity_max).
- intensity < 1 will reduce contrast
- intensity = 1 will preserve the input image
- intensity > 1 will increase contrast
"""
def __init__(self, intensity_min: float, intensity_max: float):
super().__init__()
self.intensity_min = intensity_min
self.intensity_max = intensity_max
def __call__(self, sample: Sample):
w = np.random.uniform(self.intensity_min, self.intensity_max)
sample.data = F.blend_transform(
sample.data, src_image=0, src_weight=1 - w, dst_weight=w)
return sample
@manager.TRANSFORMS.add_component
class RandomSaturation(TransformABC):
"""
Randomly transforms image contrast.
Contrast intensity is uniformly sampled in (intensity_min, intensity_max).
- intensity < 1 will reduce contrast
- intensity = 1 will preserve the input image
- intensity > 1 will increase contrast
"""
def __init__(self, intensity_min: float, intensity_max: float):
super().__init__()
self.intensity_min = intensity_min
self.intensity_max = intensity_max
def __call__(self, sample: Sample):
assert sample.data.shape[
-1] == 3, "RandomSaturation only works on RGB images"
w = np.random.uniform(self.intensity_min, self.intensity_max)
grayscale = sample.data.dot([0.299, 0.587, 0.114])[:, :, np.newaxis]
sample.data = F.blend_transform(
sample.data, src_image=grayscale, src_weight=1 - w, dst_weight=w)
return sample
def limit_period(val, offset=0.5, period=np.pi):
"""Limit the value into a period for periodic function.
Args:
val (paddle.Tensor): The value to be converted.
offset (float, optional): Offset to set the value range. \
Defaults to 0.5.
period ([type], optional): Period of the value. Defaults to np.pi.
Returns:
paddle.Tensor: Value in the range of \
[-offset * period, (1-offset) * period]
"""
return val - np.floor(val / period + offset) * period
@manager.TRANSFORMS.add_component
class SampleRangeFilter(object):
"""
Filter samples by the range.
Args:
point_cloud_range (list[float]): Point cloud range.
"""
def __init__(self, point_cloud_range):
self.pcd_range = np.array(point_cloud_range, dtype=np.float32)
def in_range_bev(self, box_range, gt_bboxes_3d):
"""
Check whether the boxes are in the given range.
"""
in_range_flags = ((gt_bboxes_3d[:, 0] > box_range[0])
& (gt_bboxes_3d[:, 1] > box_range[1])
& (gt_bboxes_3d[:, 0] < box_range[2])
& (gt_bboxes_3d[:, 1] < box_range[3]))
return in_range_flags
def limit_yaw(self, gt_bboxes_3d, offset=0.5, period=np.pi):
"""Limit the yaw to a given period and offset.
Args:
offset (float): The offset of the yaw.
period (float): The expected period.
"""
gt_bboxes_3d[:, 6] = limit_period(gt_bboxes_3d[:, 6], offset, period)
return gt_bboxes_3d
def __call__(self, sample):
"""Call function to filter objects by the range.
Args:
sample (dict): Result dict from loading pipeline.
Returns:
dict: Results after filtering, 'gt_bboxes_3d', 'gt_labels_3d' \
keys are updated in the Sample.
"""
if isinstance(sample['gt_bboxes_3d'], (BBoxes3D)):
bev_range = self.pcd_range[[0, 1, 3, 4]]
else:
bev_range = self.pcd_range[[0, 2, 3, 5]]
gt_bboxes_3d = sample['gt_bboxes_3d']
gt_labels_3d = sample['gt_labels_3d']
mask = self.in_range_bev(bev_range, gt_bboxes_3d)
gt_bboxes_3d = gt_bboxes_3d[mask]
gt_labels_3d = gt_labels_3d[mask.astype(np.bool_)]
# limit rad to [-pi, pi]
gt_bboxes_3d = self.limit_yaw(
gt_bboxes_3d, offset=0.5, period=2 * np.pi)
sample['gt_bboxes_3d'] = gt_bboxes_3d
sample['gt_labels_3d'] = gt_labels_3d
return sample
@manager.TRANSFORMS.add_component
class SampleNameFilter(object):
"""Filter GT objects by their names.
Args:
classes (list[str]): List of class names to be kept for training.
"""
def __init__(self, classes):
self.classes = classes
self.labels = list(range(len(self.classes)))
def __call__(self, sample):
"""Call function to filter objects by their names.
Args:
sample (dict): Result dict from loading pipeline.
Returns:
dict: Results after filtering, 'gt_bboxes_3d', 'gt_labels_3d' \
keys are updated in the Sample.
"""
gt_labels_3d = sample['gt_labels_3d']
gt_bboxes_mask = np.array([n in self.labels for n in gt_labels_3d],
dtype=np.bool_)
sample['gt_bboxes_3d'] = sample['gt_bboxes_3d'][gt_bboxes_mask]
sample['gt_labels_3d'] = sample['gt_labels_3d'][gt_bboxes_mask]
return sample
@manager.TRANSFORMS.add_component
class ResizeCropFlipImage(object):
"""Random resize, Crop and flip the image
Args:
size (tuple, optional): Fixed padding size.
"""
def __init__(self, sample_aug_cfg=None, training=True):
self.sample_aug_cfg = sample_aug_cfg
self.training = training
def __call__(self, sample):
"""Call function to pad images, masks, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Updated result dict.
"""
imgs = sample["img"]
N = len(imgs)
new_imgs = []
resize, resize_dims, crop, flip, rotate = self._sample_augmentation()
for i in range(N):
img = Image.fromarray(np.uint8(imgs[i]))
# augmentation (resize, crop, horizontal flip, rotate)
# resize, resize_dims, crop, flip, rotate = self._sample_augmentation() ###different view use different aug (BEV Det)
img, ida_mat = self._img_transform(
img,
resize=resize,
resize_dims=resize_dims,
crop=crop,
flip=flip,
rotate=rotate,
)
new_imgs.append(np.array(img).astype(np.float32))
sample['intrinsics'][
i][:3, :3] = ida_mat @ sample['intrinsics'][i][:3, :3]
sample["img"] = new_imgs
sample['lidar2img'] = [
sample['intrinsics'][i] @ sample['extrinsics'][i].T
for i in range(len(sample['extrinsics']))
]
return sample
def _get_rot(self, h):
return np.array([
[np.cos(h), np.sin(h)],
[-np.sin(h), np.cos(h)],
])
def _img_transform(self, img, resize, resize_dims, crop, flip, rotate):
ida_rot = np.eye(2)
ida_tran = np.zeros(2)
# adjust image
img = img.resize(resize_dims)
img = img.crop(crop)
if flip:
img = img.transpose(method=Image.FLIP_LEFT_RIGHT)
img = img.rotate(rotate)
# post-homography transformation
ida_rot *= resize
ida_tran -= np.array(crop[:2])
if flip:
A = np.array([[-1, 0], [0, 1]])
b = np.array([crop[2] - crop[0], 0])
ida_rot = np.matmul(A, ida_rot)
ida_tran = np.matmul(A, ida_tran) + b
A = self._get_rot(rotate / 180 * np.pi)
b = np.array([crop[2] - crop[0], crop[3] - crop[1]]) / 2
b = np.matmul(A, -b) + b
ida_rot = np.matmul(A, ida_rot)
ida_tran = np.matmul(A, ida_tran) + b
ida_mat = np.eye(3)
ida_mat[:2, :2] = ida_rot
ida_mat[:2, 2] = ida_tran
return img, ida_mat
def _sample_augmentation(self):
H, W = self.sample_aug_cfg["H"], self.sample_aug_cfg["W"]
fH, fW = self.sample_aug_cfg["final_dim"]
if self.training:
resize = np.random.uniform(*self.sample_aug_cfg["resize_lim"])
resize_dims = (int(W * resize), int(H * resize))
newW, newH = resize_dims
crop_h = int(
(1 - np.random.uniform(*self.sample_aug_cfg["bot_pct_lim"])) *
newH) - fH
crop_w = int(np.random.uniform(0, max(0, newW - fW)))
crop = (crop_w, crop_h, crop_w + fW, crop_h + fH)
flip = False
if self.sample_aug_cfg["rand_flip"] and np.random.choice([0, 1]):
flip = True
rotate = np.random.uniform(*self.sample_aug_cfg["rot_lim"])
else:
resize = max(fH / H, fW / W)
resize_dims = (int(W * resize), int(H * resize))
newW, newH = resize_dims
crop_h = int(
(1 - np.mean(self.sample_aug_cfg["bot_pct_lim"])) * newH) - fH
crop_w = int(max(0, newW - fW) / 2)
crop = (crop_w, crop_h, crop_w + fW, crop_h + fH)
flip = False
rotate = 0
return resize, resize_dims, crop, flip, rotate
@manager.TRANSFORMS.add_component
class MSResizeCropFlipImage(object):
"""Random resize, Crop and flip the image
Args:
size (tuple, optional): Fixed padding size.
"""
def __init__(self,
sample_aug_cfg=None,
training=True,
view_num=1,
center_size=2.0):
self.sample_aug_cfg = sample_aug_cfg
self.training = training
self.view_num = view_num
self.center_size = center_size
def __call__(self, sample):
"""Call function to pad images, masks, semantic segmentation maps.
Args:
sample (dict): Result dict from loading pipeline.
Returns:
dict: Updated result dict.
"""
imgs = sample["img"]
N = len(imgs)
new_imgs = []
resize, resize_dims, crop, flip, rotate = self._sample_augmentation()
copy_intrinsics = []
copy_extrinsics = []
for i in range(self.view_num):
copy_intrinsics.append(np.copy(sample['intrinsics'][i]))
copy_extrinsics.append(np.copy(sample['extrinsics'][i]))
for i in range(N):
img = Image.fromarray(np.uint8(imgs[i]))
# augmentation (resize, crop, horizontal flip, rotate)
img, ida_mat = self._img_transform(
img,
resize=resize,
resize_dims=resize_dims,
crop=crop,
flip=flip,
rotate=rotate,
)
new_imgs.append(np.array(img).astype(np.float32))
sample['intrinsics'][
i][:3, :3] = ida_mat @ sample['intrinsics'][i][:3, :3]
resize, resize_dims, crop, flip, rotate = self._crop_augmentation(
resize)
for i in range(self.view_num):
img = Image.fromarray(np.copy(np.uint8(imgs[i])))
img, ida_mat = self._img_transform(
img,
resize=resize,
resize_dims=resize_dims,
crop=crop,
flip=flip,
rotate=rotate,
)
new_imgs.append(np.array(img).astype(np.float32))
copy_intrinsics[i][:3, :3] = ida_mat @ copy_intrinsics[i][:3, :3]
sample['intrinsics'].append(copy_intrinsics[i])
sample['extrinsics'].append(copy_extrinsics[i])
sample['filename'].append(sample['filename'][i].replace(
".jpg", "_crop.jpg"))
sample['timestamp'].append(sample['timestamp'][i])
sample["img"] = new_imgs
sample['lidar2img'] = [
sample['intrinsics'][i] @ sample['extrinsics'][i].T
for i in range(len(sample['extrinsics']))
]
return sample
def _get_rot(self, h):
return np.array([
[np.cos(h), np.sin(h)],
[-np.sin(h), np.cos(h)],
])
def _img_transform(self, img, resize, resize_dims, crop, flip, rotate):
ida_rot = np.eye(2)
ida_tran = np.zeros(2)
# adjust image
img = img.resize(resize_dims)
img = img.crop(crop)
if flip:
img = img.transpose(method=Image.FLIP_LEFT_RIGHT)
img = img.rotate(rotate)
# post-homography transformation
ida_rot *= resize
ida_tran -= np.array(crop[:2])
if flip:
A = np.array([[-1, 0], [0, 1]])
b = np.array([crop[2] - crop[0], 0])
ida_rot = np.matmul(A, ida_rot)
ida_tran = np.matmul(A, ida_tran) + b
A = self._get_rot(rotate / 180 * np.pi)
b = np.array([crop[2] - crop[0], crop[3] - crop[1]]) / 2
b = np.matmul(A, -b) + b
ida_rot = np.matmul(A, ida_rot)
ida_tran = np.matmul(A, ida_tran) + b
ida_mat = np.eye(3)
ida_mat[:2, :2] = ida_rot
ida_mat[:2, 2] = ida_tran
return img, ida_mat
def _sample_augmentation(self):
H, W = self.sample_aug_cfg["H"], self.sample_aug_cfg["W"]
fH, fW = self.sample_aug_cfg["final_dim"]
if self.training:
resize = np.random.uniform(*self.sample_aug_cfg["resize_lim"])
resize_dims = (int(W * resize), int(H * resize))
newW, newH = resize_dims
crop_h = int(
(1 - np.random.uniform(*self.sample_aug_cfg["bot_pct_lim"])) *
newH) - fH
crop_w = int(np.random.uniform(0, max(0, newW - fW)))
crop = (crop_w, crop_h, crop_w + fW, crop_h + fH)
flip = False
if self.sample_aug_cfg["rand_flip"] and np.random.choice([0, 1]):
flip = True
rotate = np.random.uniform(*self.sample_aug_cfg["rot_lim"])
else:
resize = max(fH / H, fW / W)
resize_dims = (int(W * resize), int(H * resize))
newW, newH = resize_dims
crop_h = int(
(1 - np.mean(self.sample_aug_cfg["bot_pct_lim"])) * newH) - fH
crop_w = int(max(0, newW - fW) / 2)
crop = (crop_w, crop_h, crop_w + fW, crop_h + fH)
flip = False
rotate = 0
return resize, resize_dims, crop, flip, rotate
def _crop_augmentation(self, resize):
H, W = self.sample_aug_cfg["H"], self.sample_aug_cfg["W"]
fH, fW = self.sample_aug_cfg["final_dim"]
resize = self.center_size * resize
resize_dims = (int(W * resize), int(H * resize))
newW, newH = resize_dims
crop_h = int(max(0, newH - fH) / 2)
crop_w = int(max(0, newW - fW) / 2)
crop = (crop_w, crop_h, crop_w + fW, crop_h + fH)
flip = False
rotate = 0
return resize, resize_dims, crop, flip, rotate
@manager.TRANSFORMS.add_component
class GlobalRotScaleTransImage(object):
"""Random resize, Crop and flip the image
Args:
size (tuple, optional): Fixed padding size.
"""
def __init__(
self,
rot_range=[-0.3925, 0.3925],
scale_ratio_range=[0.95, 1.05],
translation_std=[0, 0, 0],
reverse_angle=False,
training=True,
):
self.rot_range = rot_range
self.scale_ratio_range = scale_ratio_range
self.translation_std = translation_std
self.reverse_angle = reverse_angle
self.training = training
def __call__(self, results):
"""Call function to pad images, masks, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Updated result dict.
"""
# random rotate
rot_angle = np.random.uniform(*self.rot_range)
self.rotate_bev_along_z(results, rot_angle)
# random scale
scale_ratio = np.random.uniform(*self.scale_ratio_range)
self.scale_xyz(results, scale_ratio)
# results["gt_bboxes_3d"].scale(scale_ratio)
# TODO: support translation
return results
def rotate_bev_along_z(self, results, angle):
rot_cos = np.cos(angle)
rot_sin = np.sin(angle)
rot_mat = np.array([[rot_cos, -rot_sin, 0, 0], [rot_sin, rot_cos, 0, 0],
[0, 0, 1, 0], [0, 0, 0, 1]])
rot_mat_inv = np.linalg.inv(rot_mat)
num_view = len(results["lidar2img"])
for view in range(num_view):
results["lidar2img"][view] = np.array(
results["lidar2img"][view]).astype('float32') @ rot_mat_inv
results["extrinsics"][view] = np.array(
results["extrinsics"][view]).astype('float32') @ rot_mat_inv
if self.reverse_angle:
rot_angle = np.array(-1 * angle)
else:
rot_angle = np.array(angle)
rot_cos = np.cos(rot_angle)
rot_sin = np.sin(rot_angle)
rot_mat = np.array([[
rot_cos,
-rot_sin,
0,
], [
rot_sin,
rot_cos,
0,
], [0, 0, 1]])
results.gt_bboxes_3d[:, :3] = results.gt_bboxes_3d[:, :3] @ rot_mat
results.gt_bboxes_3d[:, 6] += rot_angle
results.gt_bboxes_3d[:, 7:
9] = results.gt_bboxes_3d[:, 7:9] @ rot_mat[:2, :2]
def scale_xyz(self, results, scale_ratio):
rot_mat = np.array([
[scale_ratio, 0, 0, 0],
[0, scale_ratio, 0, 0],
[0, 0, scale_ratio, 0],
[0, 0, 0, 1],
])
rot_mat_inv = np.linalg.inv(rot_mat)
num_view = len(results["lidar2img"])
for view in range(num_view):
results["lidar2img"][view] = np.array(
results["lidar2img"][view]).astype('float32') @ rot_mat_inv
results["extrinsics"][view] = np.array(
rot_mat_inv.T @ results["extrinsics"][view]).astype('float32')
results.gt_bboxes_3d[:, :6] *= scale_ratio
results.gt_bboxes_3d[:, 7:] *= scale_ratio
return
@manager.TRANSFORMS.add_component
class NormalizeMultiviewImage(object):
"""Normalize the image.
Added key is "img_norm_cfg".
Args:
mean (sequence): Mean values of 3 channels.
std (sequence): Std values of 3 channels.
to_rgb (bool): Whether to convert the image from BGR to RGB,
default is true.
"""
def __init__(self, mean, std, to_rgb=False):
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.to_rgb = to_rgb
def __call__(self, sample):
"""Call function to normalize images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Normalized results, 'img_norm_cfg' key is added into
result dict.
"""
sample['img'] = [
F.normalize_use_cv2(img, self.mean, self.std, self.to_rgb)
for img in sample['img']
]
sample['img_norm_cfg'] = dict(
mean=self.mean, std=self.std, to_rgb=self.to_rgb)
return sample
def impad(img, *, shape=None, padding=None, pad_val=0, padding_mode='constant'):
"""Pad the given image to a certain shape or pad on all sides with
specified padding mode and padding value.
"""
assert (shape is not None) ^ (padding is not None)
if shape is not None:
padding = (0, 0, shape[1] - img.shape[1], shape[0] - img.shape[0])
# check pad_val
if isinstance(pad_val, tuple):
assert len(pad_val) == img.shape[-1]
elif not isinstance(pad_val, numbers.Number):
raise TypeError('pad_val must be a int or a tuple. '
f'But received {type(pad_val)}')
# check padding
if isinstance(padding, tuple) and len(padding) in [2, 4]:
if len(padding) == 2:
padding = (padding[0], padding[1], padding[0], padding[1])
elif isinstance(padding, numbers.Number):
padding = (padding, padding, padding, padding)
else:
raise ValueError('Padding must be a int or a 2, or 4 element tuple.'
f'But received {padding}')
# check padding mode
assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric']
border_type = {
'constant': cv2.BORDER_CONSTANT,
'edge': cv2.BORDER_REPLICATE,
'reflect': cv2.BORDER_REFLECT_101,
'symmetric': cv2.BORDER_REFLECT
}
img = cv2.copyMakeBorder(
img,
padding[1],
padding[3],
padding[0],
padding[2],
border_type[padding_mode],
value=pad_val)
return img
def impad_to_multiple(img, divisor, pad_val=0):
"""Pad an image to ensure each edge to be multiple to some number.
"""
pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor
pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor
return impad(img, shape=(pad_h, pad_w), pad_val=pad_val)
@manager.TRANSFORMS.add_component
class PadMultiViewImage(object):
"""Pad the multi-view image.
There are two padding modes: (1) pad to a fixed size and (2) pad to the
minimum size that is divisible by some number.
Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor",
Args:
size (tuple, optional): Fixed padding size.
size_divisor (int, optional): The divisor of padded size.
pad_val (float, optional): Padding value, 0 by default.
"""
def __init__(self, size=None, size_divisor=None, pad_val=0):
self.size = size
self.size_divisor = size_divisor
self.pad_val = pad_val
# only one of size and size_divisor should be valid
assert size is not None or size_divisor is not None
assert size is None or size_divisor is None
def __call__(self, sample):
"""Call function to pad images, masks, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Updated result dict.
"""
if self.size is not None:
padded_img = [
impad(img, shape=self.size, pad_val=self.pad_val)
for img in sample['img']
]
elif self.size_divisor is not None:
padded_img = [
impad_to_multiple(img, self.size_divisor, pad_val=self.pad_val)
for img in sample['img']
]
sample['img_shape'] = [img.shape for img in sample['img']]
sample['img'] = padded_img
sample['pad_shape'] = [img.shape for img in padded_img]
sample['pad_fixed_size'] = self.size
sample['pad_size_divisor'] = self.size_divisor
return sample
@manager.TRANSFORMS.add_component
class SampleFilerByKey(object):
"""Collect data from the loader relevant to the specific task.
"""
def __init__(
self,
keys,
meta_keys=('filename', 'ori_shape', 'img_shape', 'lidar2img',
'depth2img', 'cam2img', 'pad_shape', 'scale_factor',
'flip', 'pcd_horizontal_flip', 'pcd_vertical_flip',
'box_mode_3d', 'box_type_3d', 'img_norm_cfg',
'pcd_trans', 'sample_idx', 'prev_idx', 'next_idx',
'pcd_scale_factor', 'pcd_rotation', 'pts_filename',
'transformation_3d_flow', 'scene_token', 'can_bus')):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, sample):
"""Call function to filter sample by keys. The keys in ``meta_keys``
Args:
sample (dict): Result dict contains the data.
Returns:
dict: The result dict contains the following keys
- keys in ``self.keys``
- ``img_metas``
"""
filtered_sample = Sample(path=sample.path, modality=sample.modality)
filtered_sample.meta.id = sample.meta.id
for key in self.meta_keys:
if key in sample:
filtered_sample.meta[key] = sample[key]
for key in self.keys:
filtered_sample[key] = sample[key]
return filtered_sample
@manager.TRANSFORMS.add_component
class PhotoMetricDistortionMultiViewImage(object):
"""Apply photometric distortion to image sequentially, every transformation
is applied with a probability of 0.5. The position of np.random.contrast is in
second or second to last.
1. np.random.brightness
2. np.random.contrast (mode 0)
3. convert color from BGR to HSV
4. np.random.saturation
5. np.random.hue
6. convert color from HSV to BGR
7. np.random.contrast (mode 1)
8. np.random.y swap channels
This class is modified from https://github.com/fundamentalvision/BEVFormer/blob/master/projects/mmdet3d_plugin/datasets/pipelines/transform_3d.py#L99
Args:
brightness_delta (int): delta of brightness.
contrast_range (tuple): range of contrast.
saturation_range (tuple): range of saturation.
hue_delta (int): delta of hue.
"""
def __init__(self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18):
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
def convert_color_factory(self, src, dst):
code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}')
def convert_color(img):
out_img = cv2.cvtColor(img, code)
return out_img
convert_color.__doc__ = f"""Convert a {src.upper()} image to {dst.upper()}
image.
Args:
img (ndarray or str): The input image.
Returns:
ndarray: The converted {dst.upper()} image.
"""
return convert_color
def __call__(self, sample):
"""Call function to perform photometric distortion on images.
Args:
sample (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images distorted.
"""
imgs = sample['img']
new_imgs = []
for img in imgs:
assert img.dtype == np.float32, \
'PhotoMetricDistortion needs the input image of dtype np.float32,'\
' please set "to_float32=True" in "LoadImageFromFile" pipeline'
# np.random.brightness
if np.random.randint(2):
delta = np.random.uniform(-self.brightness_delta,
self.brightness_delta)
img += delta
# mode == 0 --> do np.random.contrast first
# mode == 1 --> do np.random.contrast last
mode = np.random.randint(2)
if mode == 1:
if np.random.randint(2):
alpha = np.random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# convert color from BGR to HSV
img = self.convert_color_factory('bgr', 'hsv')(img)
# np.random.saturation
if np.random.randint(2):
img[..., 1] *= np.random.uniform(self.saturation_lower,
self.saturation_upper)
# np.random.hue
if np.random.randint(2):
img[..., 0] += np.random.uniform(-self.hue_delta,
self.hue_delta)
img[..., 0][img[..., 0] > 360] -= 360
img[..., 0][img[..., 0] < 0] += 360
# convert color from HSV to BGR
img = self.convert_color_factory('hsv', 'bgr')(img)
# np.random.contrast
if mode == 0:
if np.random.randint(2):
alpha = np.random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# np.random.y swap channels
if np.random.randint(2):
img = img[..., np.random.permutation(3)]
new_imgs.append(img)
sample['img'] = new_imgs
return sample
@manager.TRANSFORMS.add_component
class RandomScaleImageMultiViewImage(object):
"""Random scale the image
This class is modified from https://github.com/fundamentalvision/BEVFormer/blob/master/projects/mmdet3d_plugin/datasets/pipelines/transform_3d.py#L289
Args:
scales
"""
def __init__(self, scales=[]):
self.scales = scales
assert len(self.scales) == 1
def __call__(self, sample):
"""Call function to pad images, masks, semantic segmentation maps.
Args:
sample (dict): Result dict from loading pipeline.
Returns:
dict: Updated result dict.
"""
rand_ind = np.random.permutation(range(len(self.scales)))[0]
rand_scale = self.scales[rand_ind]
y_size = [int(img.shape[0] * rand_scale) for img in sample['img']]
x_size = [int(img.shape[1] * rand_scale) for img in sample['img']]
scale_factor = np.eye(4)
scale_factor[0, 0] *= rand_scale
scale_factor[1, 1] *= rand_scale
sample['img'] = [
imresize(img, (x_size[idx], y_size[idx]), return_scale=False)
for idx, img in enumerate(sample['img'])
]
lidar2img = [scale_factor @ l2i for l2i in sample['lidar2img']]
sample['lidar2img'] = lidar2img
sample['img_shape'] = [img.shape for img in sample['img']]
sample['ori_shape'] = [img.shape for img in sample['img']]
return sample
def imresize(img,
size,
return_scale=False,
interpolation='bilinear',
out=None,
backend=None):
"""Resize image to a given size.
Args:
img (ndarray): The input image.
size (tuple[int]): Target size (w, h).
return_scale (bool): Whether to return `w_scale` and `h_scale`.
interpolation (str): Interpolation method, accepted values are
"nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2'
backend, "nearest", "bilinear" for 'pillow' backend.
out (ndarray): The output destination.
backend (str | None): The image resize backend type. Options are `cv2`,
`pillow`, `None`. If backend is None, the global imread_backend
specified by ``cv2`` will be used. Default: None.
Returns:
tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or
`resized_img`.
"""
h, w = img.shape[:2]
if backend is None:
backend = 'cv2'
if backend not in ['cv2', 'pillow']:
raise ValueError(f'backend: {backend} is not supported for resize.'
f"Supported backends are 'cv2', 'pillow'")
if backend == 'pillow':
assert img.dtype == np.uint8, 'Pillow backend only support uint8 type'
pil_image = Image.fromarray(img)
pil_image = pil_image.resize(size, pillow_interp_codes[interpolation])
resized_img = np.array(pil_image)
else:
resized_img = cv2.resize(
img, size, dst=out, interpolation=cv2_interp_codes[interpolation])
if not return_scale:
return resized_img
else:
w_scale = size[0] / w
h_scale = size[1] / h
return resized_img, w_scale, h_scale
cv2_interp_codes = {
'nearest': cv2.INTER_NEAREST,
'bilinear': cv2.INTER_LINEAR,
'bicubic': cv2.INTER_CUBIC,
'area': cv2.INTER_AREA,
'lanczos': cv2.INTER_LANCZOS4
}
if Image is not None:
pillow_interp_codes = {
'nearest': Image.NEAREST,
'bilinear': Image.BILINEAR,
'bicubic': Image.BICUBIC,
'box': Image.BOX,
'lanczos': Image.LANCZOS,
'hamming': Image.HAMMING
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/transforms/sampling.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ["SamplingDatabase"]
import os.path as osp
import pickle
from collections import defaultdict
from typing import Dict, List
import numpy as np
from paddle3d.apis import manager
from paddle3d.geometries.bbox import BBoxes3D, box_collision_test
from paddle3d.geometries.pointcloud import PointCloud
from paddle3d.sample import Sample
from paddle3d.transforms.base import TransformABC
from paddle3d.utils.logger import logger
@manager.TRANSFORMS.add_component
class SamplingDatabase(TransformABC):
"""
Sample objects from ground truth database and paste on current scene.
Args:
min_num_points_in_box_per_class (Dict[str, int]): Minimum number of points in sampled object for each class.
max_num_samples_per_class (Dict[str, int]): Maximum number of objects sampled from each class.
database_anno_path (str): Path to database annotation file (.pkl).
database_root (str): Path to database root directory.
class_names (List[str]): List of class names.
ignored_difficulty (List[int]): List of difficulty levels to be ignored.
"""
def __init__(self,
min_num_points_in_box_per_class: Dict[str, int],
max_num_samples_per_class: Dict[str, int],
database_anno_path: str,
database_root: str,
class_names: List[str],
ignored_difficulty: List[int] = None):
self.min_num_points_in_box_per_class = min_num_points_in_box_per_class
self.max_num_samples_per_class = max_num_samples_per_class
self.database_anno_path = database_anno_path
with open(database_anno_path, "rb") as f:
database_anno = pickle.load(f)
if not osp.exists(database_root):
raise ValueError(
f"Database root path {database_root} does not exist!!!")
self.database_root = database_root
self.class_names = class_names
self.database_anno = self._filter_min_num_points_in_box(database_anno)
self.ignored_difficulty = ignored_difficulty
if ignored_difficulty is not None:
self.database_anno = self._filter_ignored_difficulty(
self.database_anno)
self.sampler_per_class = dict()
for cls_name, annos in self.database_anno.items():
self.sampler_per_class[cls_name] = Sampler(cls_name, annos)
def _filter_min_num_points_in_box(self, database_anno: Dict[str, list]):
new_database_anno = defaultdict(list)
for cls_name, annos in database_anno.items():
if cls_name not in self.class_names or cls_name not in self.min_num_points_in_box_per_class:
continue
logger.info("Load {} {} database infos".format(
len(annos), cls_name))
for anno in annos:
if anno["num_points_in_box"] >= self.min_num_points_in_box_per_class[
cls_name]:
new_database_anno[cls_name].append(anno)
logger.info("After filtering min_num_points_in_box:")
for cls_name, annos in new_database_anno.items():
logger.info("Load {} {} database infos".format(
len(annos), cls_name))
return new_database_anno
def _filter_ignored_difficulty(self, database_anno: Dict[str, list]):
new_database_anno = defaultdict(list)
for cls_name, annos in database_anno.items():
if cls_name not in self.class_names or cls_name not in self.min_num_points_in_box_per_class:
continue
for anno in annos:
if anno["difficulty"] not in self.ignored_difficulty:
new_database_anno[cls_name].append(anno)
logger.info("After filtering ignored difficulty:")
for cls_name, annos in new_database_anno.items():
logger.info("Load {} {} database infos".format(
len(annos), cls_name))
return new_database_anno
def _convert_box_format(self, bboxes_3d):
# convert to [x,y,z,l,w,h,heading], original is [x,y,z,w,l,h,yaw]
bboxes_3d[:, 2] += bboxes_3d[:, 5] / 2
bboxes_3d[:, 3:6] = bboxes_3d[:, [4, 3, 5]]
bboxes_3d[:, 6] = -(bboxes_3d[:, 6] + np.pi / 2)
return bboxes_3d
def _convert_box_format_back(self, bboxes_3d):
bboxes_3d[:, 2] -= bboxes_3d[:, 5] / 2
bboxes_3d[:, 3:6] = bboxes_3d[:, [4, 3, 5]]
bboxes_3d[:, 6] = -(bboxes_3d[:, 6] + np.pi / 2)
return bboxes_3d
def _lidar_to_rect(self, pts_lidar, R0, V2C):
pts_lidar_hom = self._cart_to_hom(pts_lidar)
pts_rect = np.dot(pts_lidar_hom, np.dot(V2C.T, R0.T))
return pts_rect
def _rect_to_lidar(self, pts_rect, R0, V2C):
pts_rect_hom = self._cart_to_hom(pts_rect) # (N, 4)
R0_ext = np.hstack((R0, np.zeros((3, 1), dtype=np.float32))) # (3, 4)
R0_ext = np.vstack((R0_ext, np.zeros((1, 4),
dtype=np.float32))) # (4, 4)
R0_ext[3, 3] = 1
V2C_ext = np.vstack((V2C, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
V2C_ext[3, 3] = 1
pts_lidar = np.dot(pts_rect_hom, np.linalg.inv(
np.dot(R0_ext, V2C_ext).T))
return pts_lidar[:, 0:3]
def _cart_to_hom(self, pts):
pts_hom = np.hstack((pts, np.ones((pts.shape[0], 1), dtype=np.float32)))
return pts_hom
def _put_boxes_on_road_planes(self, sampled_boxes, road_planes, calibs):
a, b, c, d = road_planes
R0, V2C = calibs[4], calibs[5]
sampled_boxes = self._convert_box_format(sampled_boxes)
center_cam = self._lidar_to_rect(sampled_boxes[:, 0:3], R0, V2C)
cur_height_cam = (-d - a * center_cam[:, 0] - c * center_cam[:, 2]) / b
center_cam[:, 1] = cur_height_cam
cur_lidar_height = self._rect_to_lidar(center_cam, R0, V2C)[:, 2]
mv_height = sampled_boxes[:,
2] - sampled_boxes[:, 5] / 2 - cur_lidar_height
sampled_boxes[:, 2] -= mv_height
sampled_boxes = self._convert_box_format_back(sampled_boxes)
return sampled_boxes, mv_height
def sampling(self, sample: Sample, num_samples_per_class: Dict[str, int]):
existing_bboxes_3d = sample.bboxes_3d.copy()
existing_velocities = None
if sample.bboxes_3d.velocities is not None:
existing_velocities = sample.bboxes_3d.velocities.copy()
existing_labels = sample.labels.copy()
existing_data = sample.data.copy()
existing_difficulties = getattr(sample, "difficulties", None)
ignored_bboxes_3d = getattr(
sample, "ignored_bboxes_3d",
np.zeros([0, existing_bboxes_3d.shape[1]],
dtype=existing_bboxes_3d.dtype))
avoid_coll_bboxes_3d = np.vstack(
[existing_bboxes_3d, ignored_bboxes_3d])
for cls_name, num_samples in num_samples_per_class.items():
if num_samples > 0:
sampling_annos = self.sampler_per_class[cls_name].sampling(
num_samples)
num_sampling = len(sampling_annos)
indices = np.arange(num_sampling)
sampling_bboxes_3d = np.vstack(
[sampling_annos[i]["bbox_3d"] for i in range(num_sampling)])
sampling_bboxes = BBoxes3D(
sampling_bboxes_3d,
coordmode=sample.bboxes_3d.coordmode,
origin=sample.bboxes_3d.origin)
avoid_coll_bboxes = BBoxes3D(
avoid_coll_bboxes_3d,
coordmode=sample.bboxes_3d.coordmode,
origin=sample.bboxes_3d.origin)
s_bboxes_bev = sampling_bboxes.corners_2d
e_bboxes_bev = avoid_coll_bboxes.corners_2d
# filter the sampling bboxes which cross over the existing bboxes
total_bv = np.concatenate([e_bboxes_bev, s_bboxes_bev], axis=0)
coll_mat = box_collision_test(total_bv, total_bv)
diag = np.arange(total_bv.shape[0])
coll_mat[diag, diag] = False
idx = e_bboxes_bev.shape[0]
mask = []
for num in range(num_sampling):
if coll_mat[idx + num].any():
coll_mat[idx + num] = False
coll_mat[:, idx + num] = False
mask.append(False)
else:
mask.append(True)
indices = indices[mask]
# put all boxes(without filter) on road plane
sampling_bboxes_3d_copy = sampling_bboxes_3d.copy()
if hasattr(sample, "road_plane"):
sampling_bboxes_3d, mv_height = self._put_boxes_on_road_planes(
sampling_bboxes_3d, sample.road_plane, sample.calibs)
if len(indices) > 0:
sampling_data = []
sampling_labels = []
sampling_velocities = []
sampling_difficulties = []
label = self.class_names.index(cls_name)
for i in indices:
if existing_velocities is not None:
sampling_velocities.append(
sampling_annos[i]["velocity"])
if existing_difficulties is not None:
sampling_difficulties.append(
sampling_annos[i]["difficulty"])
sampling_labels.append(label)
lidar_data = np.fromfile(
osp.join(self.database_root,
sampling_annos[i]["lidar_file"]),
"float32").reshape(
[-1, sampling_annos[i]["lidar_dim"]])
lidar_data[:, 0:3] += sampling_bboxes_3d_copy[i, 0:3]
if hasattr(sample, "road_plane"):
lidar_data[:, 2] -= mv_height[i]
sampling_data.append(lidar_data)
existing_bboxes_3d = np.vstack(
[existing_bboxes_3d, sampling_bboxes_3d[indices]])
avoid_coll_bboxes_3d = np.vstack(
[avoid_coll_bboxes_3d, sampling_bboxes_3d[indices]])
if sample.bboxes_3d.velocities is not None:
existing_velocities = np.vstack(
[existing_velocities, sampling_velocities])
existing_labels = np.hstack(
[existing_labels, sampling_labels])
existing_data = np.vstack(
[np.vstack(sampling_data), existing_data])
if existing_difficulties is not None:
existing_difficulties = np.hstack(
[existing_difficulties, sampling_difficulties])
result = {
"bboxes_3d": existing_bboxes_3d,
"data": existing_data,
"labels": existing_labels
}
if existing_velocities is not None:
result.update({"velocities": existing_velocities})
if existing_difficulties is not None:
result.update({"difficulties": existing_difficulties})
return result
def _cal_num_samples_per_class(self, sample: Sample):
labels = sample.labels
num_samples_per_class = dict()
for cls_name, max_num_samples in self.max_num_samples_per_class.items():
label = self.class_names.index(cls_name)
if label in labels:
num_existing = np.sum([int(label) == int(l) for l in labels])
num_samples = 0 if num_existing > max_num_samples else max_num_samples - num_existing
num_samples_per_class[cls_name] = num_samples
else:
num_samples_per_class[cls_name] = max_num_samples
return num_samples_per_class
def __call__(self, sample: Sample):
if sample.modality != "lidar":
raise ValueError(
"Sampling from a database only supports lidar data!")
num_samples_per_class = self._cal_num_samples_per_class(sample)
samples = self.sampling(sample, num_samples_per_class)
sample.bboxes_3d = BBoxes3D(
samples["bboxes_3d"],
coordmode=sample.bboxes_3d.coordmode,
origin=sample.bboxes_3d.origin)
sample.labels = samples["labels"]
if "velocities" in samples:
sample.bboxes_3d.velocities = samples["velocities"]
if "difficulties" in samples:
sample.difficulties = samples["difficulties"]
sample.data = PointCloud(samples["data"])
return sample
class Sampler(object):
def __init__(self, cls_name: str, annos: List[dict], shuffle: bool = True):
self.shuffle = shuffle
self.cls_name = cls_name
self.annos = annos
self.idx = 0
self.length = len(annos)
self.indices = np.arange(len(annos))
if shuffle:
np.random.shuffle(self.indices)
def reset(self):
if self.shuffle:
np.random.shuffle(self.indices)
self.idx = 0
def sampling(self, num_samples):
if self.idx + num_samples >= self.length:
indices = self.indices[self.idx:].copy()
self.reset()
else:
indices = self.indices[self.idx:self.idx + num_samples]
self.idx += num_samples
sampling_annos = [self.annos[i] for i in indices]
return sampling_annos
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/transforms/target_generator.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import random
from typing import Tuple
import numpy as np
from PIL import Image
from skimage import transform as trans
from paddle3d.apis import manager
from paddle3d.geometries.bbox import BBoxes3D, second_box_encode
from paddle3d.sample import Sample
from paddle3d.transforms import functional as F
from paddle3d.transforms.base import TransformABC
"""
The smoke heatmap processing(encode_label/get_transfrom_matrix/affine_transform/get_3rd_point/gaussian_radius/gaussian2D/draw_umich_gaussian)
is based on https://github.com/lzccccc/SMOKE/blob/master/smoke/modeling/heatmap_coder.py
Ths copyright is MIT License
"""
def encode_label(K, ry, dims, locs):
"""get bbox 3d and 2d by model output
Args:
K (np.ndarray): camera intrisic matrix
ry (np.ndarray): rotation y
dims (np.ndarray): dimensions
locs (np.ndarray): locations
"""
l, h, w = dims[0], dims[1], dims[2]
x, y, z = locs[0], locs[1], locs[2]
x_corners = [0, l, l, l, l, 0, 0, 0]
y_corners = [0, 0, h, h, 0, 0, h, h]
z_corners = [0, 0, 0, w, w, w, w, 0]
x_corners += -np.float32(l) / 2
y_corners += -np.float32(h)
z_corners += -np.float32(w) / 2
corners_3d = np.array([x_corners, y_corners, z_corners])
rot_mat = np.array([[np.cos(ry), 0, np.sin(ry)], [0, 1, 0],
[-np.sin(ry), 0, np.cos(ry)]])
corners_3d = np.matmul(rot_mat, corners_3d)
corners_3d += np.array([x, y, z]).reshape([3, 1])
loc_center = np.array([x, y - h / 2, z])
proj_point = np.matmul(K, loc_center)
proj_point = proj_point[:2] / proj_point[2]
corners_2d = np.matmul(K, corners_3d)
corners_2d = corners_2d[:2] / corners_2d[2]
box2d = np.array([
min(corners_2d[0]),
min(corners_2d[1]),
max(corners_2d[0]),
max(corners_2d[1])
])
return proj_point, box2d, corners_3d
def get_transfrom_matrix(center_scale, output_size):
"""get transform matrix
"""
center, scale = center_scale[0], center_scale[1]
# todo: further add rot and shift here.
src_w = scale[0]
dst_w = output_size[0]
dst_h = output_size[1]
src_dir = np.array([0, src_w * -0.5])
dst_dir = np.array([0, dst_w * -0.5])
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center
src[1, :] = center + src_dir
dst[0, :] = np.array([dst_w * 0.5, dst_h * 0.5])
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
src[2, :] = get_3rd_point(src[0, :], src[1, :])
dst[2, :] = get_3rd_point(dst[0, :], dst[1, :])
get_matrix = trans.estimate_transform("affine", src, dst)
matrix = get_matrix.params
return matrix.astype(np.float32)
def affine_transform(point, matrix):
"""do affine transform to label
"""
point_exd = np.array([point[0], point[1], 1.])
new_point = np.matmul(matrix, point_exd)
return new_point[:2]
def get_3rd_point(point_a, point_b):
"""get 3rd point
"""
d = point_a - point_b
point_c = point_b + np.array([-d[1], d[0]])
return point_c
def gaussian_radius(h, w, thresh_min=0.7):
"""gaussian radius
"""
a1 = 1
b1 = h + w
c1 = h * w * (1 - thresh_min) / (1 + thresh_min)
sq1 = np.sqrt(b1**2 - 4 * a1 * c1)
r1 = (b1 - sq1) / (2 * a1)
a2 = 4
b2 = 2 * (h + w)
c2 = (1 - thresh_min) * w * h
sq2 = np.sqrt(b2**2 - 4 * a2 * c2)
r2 = (b2 - sq2) / (2 * a2)
a3 = 4 * thresh_min
b3 = -2 * thresh_min * (h + w)
c3 = (thresh_min - 1) * w * h
sq3 = np.sqrt(b3**2 - 4 * a3 * c3)
r3 = (b3 + sq3) / (2 * a3)
return min(r1, r2, r3)
def gaussian2D(shape, sigma=1):
"""get 2D gaussian map
"""
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m + 1, -n:n + 1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_umich_gaussian(heatmap, center, radius, k=1):
"""draw umich gaussian
"""
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom, radius -
left:radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0:
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
@manager.TRANSFORMS.add_component
class Gt2SmokeTarget(TransformABC):
def __init__(self,
mode: str,
num_classes: int,
flip_prob: float = 0.5,
aug_prob: float = 0.3,
max_objs: int = 50,
input_size: Tuple[int, int] = (1280, 384),
output_stride: Tuple[int, int] = (4, 4),
shift_range: Tuple[float, float, float] = (),
scale_range: Tuple[float, float, float] = ()):
self.max_objs = max_objs
self.input_width = input_size[0]
self.input_height = input_size[1]
self.output_width = self.input_width // output_stride[0]
self.output_height = self.input_height // output_stride[1]
self.shift_range = shift_range
self.scale_range = scale_range
self.shift_scale = (0.2, 0.4)
self.flip_prob = flip_prob
self.aug_prob = aug_prob
self.is_train = True if mode == 'train' else False
self.num_classes = num_classes
def __call__(self, sample: Sample):
img = Image.fromarray(sample.data)
K = sample.meta.camera_intrinsic
bboxes_3d = sample.bboxes_3d
labels = sample.labels
center = np.array([i / 2 for i in img.size], dtype=np.float32)
size = np.array([i for i in img.size], dtype=np.float32)
flipped = False
if (self.is_train) and (random.random() < self.flip_prob):
flipped = True
img = img.transpose(Image.FLIP_LEFT_RIGHT)
center[0] = size[0] - center[0] - 1
K[0, 2] = size[0] - K[0, 2] - 1
affine = False
if (self.is_train) and (random.random() < self.aug_prob):
affine = True
shift, scale = self.shift_scale[0], self.shift_scale[1]
shift_ranges = np.arange(-shift, shift + 0.1, 0.1)
center[0] += size[0] * random.choice(shift_ranges)
center[1] += size[1] * random.choice(shift_ranges)
scale_ranges = np.arange(1 - scale, 1 + scale + 0.1, 0.1)
size *= random.choice(scale_ranges)
center_size = [center, size]
trans_affine = get_transfrom_matrix(
center_size, [self.input_width, self.input_height])
trans_affine_inv = np.linalg.inv(trans_affine)
img = img.transform(
(self.input_width, self.input_height),
method=Image.AFFINE,
data=trans_affine_inv.flatten()[:6],
resample=Image.BILINEAR,
)
trans_mat = get_transfrom_matrix(
center_size, [self.output_width, self.output_height])
if not self.is_train:
# for inference we parametrize with original size
target = {}
target["image_size"] = size
target["is_train"] = self.is_train
target["trans_mat"] = trans_mat
target["K"] = K
sample.target = target
sample.data = np.array(img)
return sample
heat_map = np.zeros(
[self.num_classes, self.output_height, self.output_width],
dtype=np.float32)
regression = np.zeros([self.max_objs, 3, 8], dtype=np.float32)
cls_ids = np.zeros([self.max_objs], dtype=np.int32)
proj_points = np.zeros([self.max_objs, 2], dtype=np.int32)
p_offsets = np.zeros([self.max_objs, 2], dtype=np.float32)
c_offsets = np.zeros([self.max_objs, 2], dtype=np.float32)
dimensions = np.zeros([self.max_objs, 3], dtype=np.float32)
locations = np.zeros([self.max_objs, 3], dtype=np.float32)
rotys = np.zeros([self.max_objs], dtype=np.float32)
reg_mask = np.zeros([self.max_objs], dtype=np.uint8)
flip_mask = np.zeros([self.max_objs], dtype=np.uint8)
bbox2d_size = np.zeros([self.max_objs, 2], dtype=np.float32)
for i, (box3d, label) in enumerate(zip(bboxes_3d, labels)):
if i == self.max_objs:
break
locs = np.array(box3d[0:3])
rot_y = np.array(box3d[6])
if flipped:
locs[0] *= -1
rot_y *= -1
height, width, length = box3d[3:6]
point, box2d, box3d = encode_label(
K, rot_y, np.array([length, height, width]), locs)
if np.all(box2d == 0):
continue
point = affine_transform(point, trans_mat)
box2d[:2] = affine_transform(box2d[:2], trans_mat)
box2d[2:] = affine_transform(box2d[2:], trans_mat)
box2d[[0, 2]] = box2d[[0, 2]].clip(0, self.output_width - 1)
box2d[[1, 3]] = box2d[[1, 3]].clip(0, self.output_height - 1)
h, w = box2d[3] - box2d[1], box2d[2] - box2d[0]
center = np.array([(box2d[0] + box2d[2]) / 2,
(box2d[1] + box2d[3]) / 2],
dtype=np.float32)
if (0 < center[0] < self.output_width) and (0 < center[1] <
self.output_height):
point_int = center.astype(np.int32)
p_offset = point - point_int
c_offset = center - point_int
radius = gaussian_radius(h, w)
radius = max(0, int(radius))
heat_map[label] = draw_umich_gaussian(heat_map[label],
point_int, radius)
cls_ids[i] = label
regression[i] = box3d
proj_points[i] = point_int
p_offsets[i] = p_offset
c_offsets[i] = c_offset
dimensions[i] = np.array([length, height, width])
locations[i] = locs
rotys[i] = rot_y
reg_mask[i] = 1 if not affine else 0
flip_mask[i] = 1 if not affine and flipped else 0
# targets for 2d bbox
bbox2d_size[i, 0] = w
bbox2d_size[i, 1] = h
target = {}
target["image_size"] = np.array(img.size)
target["is_train"] = self.is_train
target["trans_mat"] = trans_mat
target["K"] = K
target["hm"] = heat_map
target["reg"] = regression
target["cls_ids"] = cls_ids
target["proj_p"] = proj_points
target["dimensions"] = dimensions
target["locations"] = locations
target["rotys"] = rotys
target["reg_mask"] = reg_mask
target["flip_mask"] = flip_mask
target["bbox_size"] = bbox2d_size
target["c_offsets"] = c_offsets
sample.target = target
sample.data = np.array(img)
return sample
@manager.TRANSFORMS.add_component
class Gt2CenterPointTarget(TransformABC):
def __init__(self,
tasks: Tuple[dict],
down_ratio: int,
point_cloud_range: Tuple[float],
voxel_size: Tuple[float],
gaussian_overlap: float = 0.1,
max_objs: int = 500,
min_radius: int = 2):
self.tasks = tasks
self.down_ratio = down_ratio
self.gaussian_overlap = gaussian_overlap
self.max_objs = max_objs
self.min_radius = min_radius
self.voxel_size_x, self.voxel_size_y, self.voxel_size_z = voxel_size[0:
3]
self.point_cloud_range_x_min, self.point_cloud_range_y_min, self.point_cloud_range_z_min = point_cloud_range[
0:3]
self.point_cloud_range_x_max, self.point_cloud_range_y_max, self.point_cloud_range_z_max = point_cloud_range[
3:6]
self.grid_size_x = int(
round((point_cloud_range[3] - point_cloud_range[0]) /
self.voxel_size_x))
self.grid_size_y = int(
round((point_cloud_range[4] - point_cloud_range[1]) /
self.voxel_size_y))
self.grid_size_z = int(
round((point_cloud_range[5] - point_cloud_range[2]) /
self.voxel_size_z))
self.num_classes_by_task = [task["num_class"] for task in tasks]
self.class_names_by_task = [task["class_names"] for task in tasks]
self.all_class_names = list(itertools.chain(*self.class_names_by_task))
def _gaussian_radius(self, height, width, min_overlap=0.5):
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = np.sqrt(b1**2 - 4 * a1 * c1)
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = np.sqrt(b2**2 - 4 * a2 * c2)
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = np.sqrt(b3**2 - 4 * a3 * c3)
r3 = (b3 + sq3) / 2
return min(r1, r2, r3)
def __call__(self, sample: Sample):
# Get feature map size
feature_map_size_x = self.grid_size_x // self.down_ratio
feature_map_size_y = self.grid_size_y // self.down_ratio
# Reorder the bboxes_3d and labels for each task
labels = sample.labels
bboxes_3d = sample.bboxes_3d
velocities = getattr(sample.bboxes_3d, "velocities", None)
bboxes_3d_origin = sample.bboxes_3d.origin
required_origin = [0.5, 0.5, 0.5]
if list(bboxes_3d_origin) != required_origin:
bboxes_3d_origin = np.asarray(bboxes_3d_origin)
required_origin = np.asarray([0.5, 0.5, 0.5])
bboxes_3d[..., :3] += bboxes_3d[..., 3:6] * (
required_origin - bboxes_3d_origin)
bboxes_3d_by_task = []
labels_by_task = []
velocities_by_task = []
task_label_begin = 0
for task_idx, class_names in enumerate(self.class_names_by_task):
task_bboxes_3d = []
task_labels = []
task_velocities = []
for class_name in class_names:
mask = np.where(
labels == self.all_class_names.index(class_name))
task_bboxes_3d.append(bboxes_3d[mask])
task_labels.append(labels[mask] - task_label_begin)
if velocities is not None:
task_velocities.append(velocities[mask])
task_label_begin += len(class_names)
bboxes_3d_by_task.append(np.concatenate(task_bboxes_3d, axis=0))
labels_by_task.append(np.concatenate(task_labels))
if velocities is not None:
velocities_by_task.append(
np.concatenate(task_velocities, axis=0))
# Limit the orientation angle within [-np.pi, +np.pi]
for task_bboxes_3d in bboxes_3d_by_task:
task_bboxes_3d[:, -1] = BBoxes3D.limit_period(
task_bboxes_3d[:, -1], offset=0.5, period=np.pi * 2)
heat_maps, target_bboxs, center_idxs, target_masks, target_labels = [], [], [], [], []
for task_idx, task in enumerate(self.tasks):
heat_map = np.zeros((len(self.class_names_by_task[task_idx]),
feature_map_size_y, feature_map_size_x),
dtype=np.float32)
# [x, y, z, w, l, h, vx, vy, rots, rotc]
if len(velocities_by_task) > 0:
target_bbox = np.zeros((self.max_objs, 10), dtype=np.float32)
else:
target_bbox = np.zeros((self.max_objs, 8), dtype=np.float32)
center_idx = np.zeros((self.max_objs), dtype=np.int64)
target_mask = np.zeros((self.max_objs), dtype=np.uint8)
target_label = np.zeros((self.max_objs), dtype=np.int64)
num_objs = min(bboxes_3d_by_task[task_idx].shape[0], self.max_objs)
for obj_idx in range(num_objs):
cls_id = labels_by_task[task_idx][obj_idx]
w, l, h = bboxes_3d_by_task[task_idx][obj_idx][3:6]
w = w / self.voxel_size_x / self.down_ratio
l = l / self.voxel_size_y / self.down_ratio
if w > 0 and l > 0:
radius = self._gaussian_radius(
l, w, min_overlap=self.gaussian_overlap)
radius = max(self.min_radius, int(radius))
x, y, z = bboxes_3d_by_task[task_idx][obj_idx][0:3]
center = np.array([(x - self.point_cloud_range_x_min) /
self.voxel_size_x / self.down_ratio,
(y - self.point_cloud_range_y_min) /
self.voxel_size_y / self.down_ratio],
dtype=np.float32)
center_int = center.astype(np.int32)
if not (0 <= center_int[0] < feature_map_size_x
and 0 <= center_int[1] < feature_map_size_y):
continue
draw_umich_gaussian(heat_map[cls_id], center, radius)
target_label[obj_idx] = cls_id
center_idx[obj_idx] = center_int[
1] * feature_map_size_x + center_int[0]
target_mask[obj_idx] = 1
angle = bboxes_3d_by_task[task_idx][obj_idx][-1]
if len(velocities_by_task) > 0:
vx, vy = velocities_by_task[task_idx][obj_idx][0:2]
target_bbox[obj_idx] = np.concatenate(
(center - center_int, z,
np.log(bboxes_3d_by_task[task_idx][obj_idx][3:6]),
np.array(vx), np.array(vy), np.sin(angle),
np.cos(angle)),
axis=None)
else:
target_bbox[obj_idx] = np.concatenate(
(center - center_int, z,
np.log(bboxes_3d_by_task[task_idx][obj_idx][3:6]),
np.sin(angle), np.cos(angle)),
axis=None)
heat_maps.append(heat_map)
target_bboxs.append(target_bbox)
target_masks.append(target_mask)
center_idxs.append(center_idx)
target_labels.append(target_label)
sample.heat_map = heat_maps
sample.target_bbox = target_bboxs
sample.center_idx = center_idxs
sample.target_mask = target_masks
sample.target_label = target_labels
sample.pop('bboxes_2d', None)
sample.pop('bboxes_3d', None)
sample.pop('path', None)
sample.pop('labels', None)
sample.pop('attrs', None)
sample.pop('ignored_bboxes_3d', None)
return sample
@manager.TRANSFORMS.add_component
class Gt2PointPillarsTarget(object):
"""
Assign ground truth to anchors.
Args:
positive_fraction (float, optional): None or a float between 0 and 1. If not None, the ratio between the
number of positive samples and the number of negative samples will be kept to `positive_fraction`.
If there are not enough positives, fill the rest with negatives.
rpn_batch_size (int, optional): Sample size. Defaults to 512.
norm_by_num_examples (bool, optional): Whether to normalize box_weight by number of samples.
Defaults to False.
"""
def __init__(self,
positive_fraction=None,
rpn_batch_size=512,
norm_by_num_examples=False):
self.region_similarity_calculator = F.nearest_iou_similarity
self.positive_fraction = positive_fraction
self.rpn_batch_size = rpn_batch_size
self.norm_by_num_examples = norm_by_num_examples
def assign(self,
all_anchors,
gt_boxes,
anchors_mask=None,
gt_classes=None,
matched_thresholds=0.6,
unmatched_thresholds=0.45):
"""
Calculate the target for each sample.
Args:
all_anchors: [num_of_anchors, box_ndim] float array.
gt_boxes: [num_gt_boxes, box_ndim] float array.
anchors_mask: Bool array indicates valid anchors.
gt_classes: [num_gt_boxes] int array. indicate gt classes, must
start with 1.
matched_thresholds: float, iou less than matched_threshold will
be treated as positives.
unmatched_thresholds: float, iou less than unmatched_threshold will
be treated as negatives.
Returns:
labels, reg_targets, reg_weights
"""
total_anchors = all_anchors.shape[0]
if anchors_mask is not None:
# Filter out invalid anchors whose area < threshold
inds_inside = np.where(anchors_mask)[0]
anchors = all_anchors[inds_inside, :]
if not isinstance(matched_thresholds, float):
matched_thresholds = matched_thresholds[inds_inside]
if not isinstance(unmatched_thresholds, float):
unmatched_thresholds = unmatched_thresholds[inds_inside]
else:
anchors = all_anchors
num_valid_anchors = len(anchors)
if gt_classes is None:
gt_classes = np.ones([gt_boxes.shape[0]], dtype=np.int32)
# Compute anchor labels:
# 0 is negative, -1 is don't care (ignore)
labels = np.full((num_valid_anchors, ), -1, dtype=np.int32)
gt_ids = np.full((num_valid_anchors, ), -1, dtype=np.int32)
if len(gt_boxes) > 0 and anchors.shape[0] > 0:
# Compute overlaps between the anchors and the gt boxes overlaps
anchor_by_gt_overlap = self.region_similarity_calculator(
anchors, gt_boxes)
# Map from anchor to gt box that has the highest overlap
anchor_to_gt_argmax = anchor_by_gt_overlap.argmax(axis=1)
# For each anchor, amount of overlap with most overlapping gt box
anchor_to_gt_max = anchor_by_gt_overlap[np.arange(
num_valid_anchors), anchor_to_gt_argmax]
# Map from gt box to an anchor that has the highest overlap
gt_to_anchor_argmax = anchor_by_gt_overlap.argmax(axis=0)
# For each gt box, amount of overlap with most overlapping anchor
gt_to_anchor_max = anchor_by_gt_overlap[
gt_to_anchor_argmax,
np.arange(anchor_by_gt_overlap.shape[1])]
# must remove gt which doesn't match any anchor.
empty_gt_mask = gt_to_anchor_max == 0
gt_to_anchor_max[empty_gt_mask] = -1
# Find all anchors that share the max overlap amount
# (this includes many ties)
anchors_with_max_overlap = np.where(
anchor_by_gt_overlap == gt_to_anchor_max)[0]
# Fg label: for each gt use anchors with the highest overlap
# (including ties)
gt_inds_force = anchor_to_gt_argmax[anchors_with_max_overlap]
labels[anchors_with_max_overlap] = gt_classes[gt_inds_force]
gt_ids[anchors_with_max_overlap] = gt_inds_force
# Fg label: above threshold IOU
pos_inds = anchor_to_gt_max >= matched_thresholds
gt_inds = anchor_to_gt_argmax[pos_inds]
labels[pos_inds] = gt_classes[gt_inds]
gt_ids[pos_inds] = gt_inds
bg_inds = np.where(anchor_to_gt_max < unmatched_thresholds)[0]
else:
bg_inds = np.arange(num_valid_anchors)
fg_inds = np.where(labels > 0)[0]
fg_max_overlap = None
if len(gt_boxes) > 0 and anchors.shape[0] > 0:
fg_max_overlap = anchor_to_gt_max[fg_inds]
gt_pos_ids = gt_ids[fg_inds]
# subsample positive labels if there are too many
if self.positive_fraction is not None:
num_fg = int(self.positive_fraction * self.rpn_batch_size)
if len(fg_inds) > num_fg:
disable_inds = np.random.choice(
fg_inds, size=(len(fg_inds) - num_fg), replace=False)
labels[disable_inds] = -1
fg_inds = np.where(labels > 0)[0]
# subsample negative labels if there are too many
# (samples with replacement, but since the set of bg inds is large,
# most samples will not have repeats)
num_bg = self.rpn_batch_size - np.sum(labels > 0)
if len(bg_inds) > num_bg:
enable_inds = bg_inds[np.random.randint(
len(bg_inds), size=num_bg)]
labels[enable_inds] = 0
else:
if len(gt_boxes) == 0 or anchors.shape[0] == 0:
labels[:] = 0
else:
labels[bg_inds] = 0
# re-enable anchors_with_max_overlap
labels[anchors_with_max_overlap] = gt_classes[gt_inds_force]
reg_targets = np.zeros((num_valid_anchors, all_anchors.shape[-1]),
dtype=all_anchors.dtype)
if len(gt_boxes) > 0 and anchors.shape[0] > 0:
reg_targets[fg_inds, :] = second_box_encode(
gt_boxes[anchor_to_gt_argmax[fg_inds], :], anchors[fg_inds, :])
reg_weights = np.zeros((num_valid_anchors, ), dtype=all_anchors.dtype)
# uniform weighting of examples (given non-uniform sampling)
if self.norm_by_num_examples:
num_examples = np.sum(labels >= 0) # neg + pos
num_examples = np.maximum(1.0, num_examples)
reg_weights[labels > 0] = 1.0 / num_examples
else:
reg_weights[labels > 0] = 1.0
# Map up to original set of anchors
if anchors_mask is not None:
labels = self._unmap(labels, total_anchors, inds_inside, fill=-1)
reg_targets = self._unmap(
reg_targets, total_anchors, inds_inside, fill=0)
reg_weights = self._unmap(
reg_weights, total_anchors, inds_inside, fill=0)
ret = {
"labels": labels,
"reg_targets": reg_targets,
"reg_weights": reg_weights,
"assigned_anchors_overlap": fg_max_overlap,
"positive_gt_id": gt_pos_ids,
}
return ret
def _unmap(self, data, count, inds, fill=0):
"""
Unmap a subset of item (data) back to the original set of items (of size count)
"""
if count == len(inds):
return data
if len(data.shape) == 1:
ret = np.full((count, ), fill, dtype=data.dtype)
ret[inds] = data
else:
ret = np.full((count, ) + data.shape[1:], fill, dtype=data.dtype)
ret[inds, :] = data
return ret
def __call__(self, sample: Sample):
sample.bboxes_3d[:, -1] = BBoxes3D.limit_period(
sample.bboxes_3d[:, -1], offset=0.5, period=np.pi * 2)
ret = self.assign(
sample.anchors,
sample.bboxes_3d,
anchors_mask=sample.get("anchors_mask", None),
gt_classes=sample.labels +
1, # background is regarded as class 0, thus shift labels
matched_thresholds=sample.matched_thresholds,
unmatched_thresholds=sample.unmatched_thresholds)
sample.reg_targets = ret["reg_targets"]
sample.reg_weights = ret["reg_weights"]
sample.labels = ret["labels"]
# the followings are not used in training
sample.pop("anchors", None)
sample.pop("bboxes_3d", None)
sample.pop('path', None)
sample.pop("difficulties", None)
sample.pop("ignored_bboxes_3d", None)
return sample
@manager.TRANSFORMS.add_component
class Gt2PVRCNNTarget(TransformABC):
def __init__(self):
pass
def __call__(self, sample: Sample):
# Reorder the bboxes_3d and labels for each task
labels = sample.labels
bboxes_3d = sample.bboxes_3d
bboxes_3d_origin = sample.bboxes_3d.origin
required_origin = [0.5, 0.5, 0.5]
if list(bboxes_3d_origin) != required_origin:
bboxes_3d_origin = np.asarray(bboxes_3d_origin)
required_origin = np.asarray([0.5, 0.5, 0.5])
bboxes_3d[..., :3] += bboxes_3d[..., 3:6] * (
required_origin - bboxes_3d_origin)
bboxes_3d[..., 3:5] = bboxes_3d[..., [4, 3]]
bboxes_3d[..., -1] = -(bboxes_3d[..., -1] + np.pi / 2.)
bboxes_3d[..., -1] = BBoxes3D.limit_period(
bboxes_3d[..., -1], offset=0.5, period=2 * np.pi)
labels = labels + 1
gt_boxes = np.concatenate(
(bboxes_3d, labels.reshape(-1, 1).astype(np.float32)), axis=1)
sample.gt_boxes = gt_boxes
sample.pop('bboxes_2d', None)
sample.pop('bboxes_3d', None)
sample.pop('path', None)
sample.pop('labels', None)
sample.pop('attrs', None)
sample.pop('ignored_bboxes_3d', None)
return sample
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/transforms/base.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from typing import Optional
import numpy as np
from paddle3d.apis import manager
from paddle3d.sample import Sample
class TransformABC(abc.ABC):
@abc.abstractmethod
def __call__(self, sample: Sample):
"""
"""
@manager.TRANSFORMS.add_component
class Compose(TransformABC):
"""
"""
def __init__(self, transforms):
if not isinstance(transforms, list):
raise TypeError('The transforms must be a list!')
self.transforms = transforms
def __call__(self, sample: Sample):
"""
"""
for t in self.transforms:
sample = t(sample)
if sample.modality == 'image' and sample.meta.channel_order == 'hwc':
sample.data = sample.data.transpose((2, 0, 1))
sample.meta.channel_order = "chw"
elif sample.modality == 'multiview':
sample.img = np.stack(
[img.transpose(2, 0, 1) for img in sample.img], axis=0)
return sample
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/geometries/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .bbox import BBoxes2D, BBoxes3D, CoordMode
from .pointcloud import PointCloud
from .structure import StructureEncoder
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/geometries/pointcloud.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from paddle3d.geometries.structure import _Structure
class PointCloud(_Structure):
def __init__(self, data: np.ndarray):
if not isinstance(data, np.ndarray):
data = np.array(data)
if data.ndim != 2 and data.ndim != 3:
# When the data expands in 8 directions, the data.ndim is 3
# [-1, 3] --> [-1, 8, 3]
# 7 -------- 4
# /| /|
# 6 -------- 5 .
# | | | |
# . 3 -------- 0
# |/ |/
# 2 -------- 1
raise ValueError(
'Illegal PointCloud data with number of dim {}'.format(
data.ndim))
if data.shape[-1] < 3:
raise ValueError('Illegal PointCloud data with shape {}'.format(
data.shape))
def scale(self, factor: float):
"""
"""
self[..., :3] = self[..., :3] * factor
def translate(self, translation: np.ndarray):
"""
"""
self[..., :3] = self[..., :3] + translation
def rotate_around_z(self, angle: np.ndarray):
"""
"""
# Rotation matrix around the z-axis
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
if self.ndim == 2:
rotation_matrix = np.array(
[[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]],
dtype=self.dtype)
elif self.ndim == 3:
zeros = np.zeros(self.shape[0])
ones = np.ones(self.shape[0])
rotation_matrix = np.array(
[[rot_cos, -rot_sin, zeros], [rot_sin, rot_cos, zeros],
[zeros, zeros, ones]],
dtype=self.dtype)
rotation_matrix = rotation_matrix.reshape([-1, 3, 3])
# Rotate x,y,z
self[..., :3] = self[..., :3] @ rotation_matrix
def flip(self, axis: int):
"""
"""
if axis not in [0, 1]:
raise ValueError(
"Flip axis should be 0 or 1, but recieved is {}".format(axis))
if axis == 0: # flip along x-axis
self[:, 1] = -self[:, 1]
elif axis == 1: # flip along y-axis
self[:, 0] = -self[:, 0]
def shuffle(self):
self[...] = np.random.permutation(
self[...]) # permutation is fater than shuffle
def get_mask_of_points_outside_range(self, limit_range):
mask = (self[:, 0] >= limit_range[0]) & (self[:, 0] <= limit_range[3]) \
& (self[:, 1] >= limit_range[1]) & (self[:, 1] <= limit_range[4])
return mask
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/geometries/structure.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from json import JSONEncoder
import numpy as np
class StructureEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, _Structure):
return obj.tolist()
return super().default(obj)
class _Structure(np.ndarray):
"""
"""
def __new__(cls, data: np.ndarray, *args, **kwargs):
if not isinstance(data, np.ndarray):
data = np.array(data)
data = data.astype(np.float32)
obj = np.asarray(data).view(cls)
return obj
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/geometries/bbox.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The geometry transformations for bounding box is modified from
https://github.com/tianweiy/CenterPoint and https://github.com/traveller59/second.pytorch
Ths copyright of tianweiy/CenterPoint is as follows:
MIT License [see LICENSE for details].
Ths copyright of traveller59/second is as follows:
MIT License [see LICENSE for details].
"""
from enum import Enum
from typing import List
import numba
import numpy as np
import scipy
from scipy.spatial import Delaunay
from pyquaternion import Quaternion
from paddle3d.geometries.structure import _Structure
class CoordMode(Enum):
"""
"""
# z front
# /
# /
# 0 ------> x right
# |
# |
# v
# y down
KittiCamera = 0
# up z
# ^ x front
# | /
# | /
# left y <------ 0
KittiLidar = 1
# up z
# ^ y front
# | /
# | /
# 0 ------> x right
NuScenesLidar = 2
class BBoxes2D(_Structure):
"""
"""
def __init__(self, data: np.ndarray):
if not isinstance(data, np.ndarray):
data = np.array(data)
if data.ndim != 2:
raise ValueError('Illegal 2D box data with number of dim {}'.format(
data.ndim))
if data.shape[1] != 4:
raise ValueError('Illegal 2D box data with shape {}'.format(
data.shape))
def scale(self, factor: float):
...
def translate(self, translation: np.ndarray):
...
def rotate(self, rotation: np.ndarray):
...
def horizontal_flip(self, image_width: float):
"""
The inputs are pixel indices, they are flipped by `(W - 1 - x, H - 1 - y)`.
"""
self[:, 0] = image_width - self[:, 0] - 1
def horizontal_flip_coords(self, image_width: float):
"""
The inputs are floating point coordinates, they are flipped by `(W - x, H - y)`.
"""
self[:, 0], self[:,
2] = image_width - self[:, 2], image_width - self[:, 0]
def vertical_flip(self, image_height: float):
self[:, 1] = image_height - self[:, 1] - 1
def resize(self, h: int, w: int, newh: int, neww: int):
factor_x = neww / w
factor_y = newh / h
self[:, 0::2] *= factor_x
self[:, 1::2] *= factor_y
class BBoxes3D(_Structure):
"""
"""
def __init__(self,
data: np.ndarray,
coordmode: CoordMode = 0,
velocities: List[float] = None,
origin: List[float] = [0.5, 0.5, 0.5],
rot_axis: int = 2):
if not isinstance(data, np.ndarray):
data = np.array(data)
self.coordmode = coordmode
self.velocities = velocities
self.origin = origin
self.rot_axis = rot_axis
@property
def corners_3d(self):
# corners_3d format: x0y0z0, x0y0z1, x0y1z1, x0y1z0, x1y0z0, x1y0z1, x1y1z1, x1y1z0
dx, dy, dz = self[:, 3:6].T
b = dz.shape[0]
x_corners = np.array([[0., 0., 0., 0., 1., 1., 1., 1.]],
self.dtype).repeat(
b, axis=0)
y_corners = np.array([[0., 0., 1., 1., 0., 0., 1., 1.]],
self.dtype).repeat(
b, axis=0)
z_corners = np.array([[0., 1., 1., 0., 0., 1., 1., 0.]],
self.dtype).repeat(
b, axis=0)
x_corners = (
dx[:, np.newaxis] * (x_corners - self.origin[0]))[:, :, np.newaxis]
y_corners = (
dy[:, np.newaxis] * (y_corners - self.origin[1]))[:, :, np.newaxis]
z_corners = (
dz[:, np.newaxis] * (z_corners - self.origin[2]))[:, :, np.newaxis]
corners = np.concatenate([x_corners, y_corners, z_corners], axis=-1)
angle = self[:, -1]
corners = rotation_3d_in_axis(corners, angle, axis=self.rot_axis)
centers = self[:, 0:3][:, np.newaxis, :]
corners += centers
return corners
@property
def corners_2d(self):
# corners_2d format: x0y0, x0y1, x1y1, x1y0
dx, dy = self[:, 3:5].T
b = dy.shape[0]
x_corners = np.array([[0., 0., 1., 1.]], self.dtype).repeat(b, axis=0)
y_corners = np.array([[0., 1., 1., 0.]], self.dtype).repeat(b, axis=0)
x_corners = (
dx[:, np.newaxis] * (x_corners - self.origin[0]))[:, :, np.newaxis]
y_corners = (
dy[:, np.newaxis] * (y_corners - self.origin[1]))[:, :, np.newaxis]
corners = np.concatenate([x_corners, y_corners], axis=-1)
angle = self[:, -1]
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rotation_matrix = np.array([[rot_cos, -rot_sin], [rot_sin, rot_cos]],
dtype=self.dtype)
#rotation_matrix = rotation_matrix.transpose([2, 0, 1])
#corners = corners @ rotation_matrix #TODO(luoqianhui)
corners = np.einsum("aij,jka->aik", corners, rotation_matrix)
centers = self[:, 0:2][:, np.newaxis, :]
corners += centers
return corners
def scale(self, factor: float):
"""
"""
# Scale x, y, z, w, l, h, except the orientation
self[..., :-1] = self[..., :-1] * factor
# Scale velocities
if self.velocities is not None:
self.velocities[..., :] = self.velocities[..., :] * factor
def translate(self, translation: np.ndarray):
self[..., :3] = self[..., :3] + translation
def rotate_around_z(self, angle: np.ndarray):
# Rotation matrix around the z-axis
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rotation_matrix = np.array(
[[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]],
dtype=self.dtype)
# Rotate x,y,z
self[..., :3] = self[..., :3] @ rotation_matrix
# Rotate velocities
if self.velocities is not None:
self.velocities[..., :2] = (np.hstack([
self.velocities[..., :2],
np.zeros(
(self.velocities.shape[0], 1), dtype=self.velocities.dtype)
]) @ rotation_matrix)[..., :2]
# Update orientation
self[..., -1] += angle
def horizontal_flip(self):
"""
The inputs are pixel indices
"""
self[:, 0] = -self[:, 0]
if self.velocities is not None:
self.velocities[:, 0] = -self.velocities[:, 0]
self[:,
-1] = -self[:,
-1] + 2 * np.pi # TODO(luoqianhui): CHECK THIS 2 * np.pi is needed
def horizontal_flip_coords(self):
"""
The inputs are floating point coordinates
"""
new_box3d_quat = np.stack(
[self[:, 3], -self[:, 2], -self[:, 1], self[:, 0]], 1)
self[:, :4] = new_box3d_quat
self[:, 4] = -self[:, 4]
def to_vision_based_3d_box(self):
height, width, length = self[:, 3:4], self[:, 4:5], self[:, 5:6]
x, y, z = self[:, 0:1], self[:, 1:2], self[:, 2:3]
rotation = self[:, 6]
tvec = np.concatenate([x, y - height / 2, z], axis=1)
box_pose = []
for i in range(rotation.shape[0]):
wxyz = Quaternion(
Quaternion(axis=[1, 0, 0], radians=np.pi / 2) * Quaternion(
axis=[0, 0, 1], radians=-rotation[i]))
box_pose.append(wxyz.elements.astype(np.float32))
box_pose = np.stack(box_pose, axis=0)
box3d_new = np.concatenate([box_pose, tvec, width, length, height],
axis=1)
return box3d_new
def vertical_flip(self):
self[:, 1] = -self[:, 1]
if self.velocities is not None:
self.velocities[:, 1] = -self.velocities[:, 1]
self[:, -1] = -self[:, -1] + np.pi
@staticmethod
def limit_period(val, offset: float = 0.5, period: float = np.pi):
return val - np.floor(val / period + offset) * period
def get_mask_of_bboxes_outside_range(self, point_cloud_range: np.ndarray):
bboxes_bev = self.corners_2d
# Represent the bev range as a bounding box
limit_polygons = minmax_range_3d_to_corner_2d(point_cloud_range)
mask = points_in_convex_polygon_2d(
bboxes_bev.reshape(-1, 2), limit_polygons)
return np.any(mask.reshape(-1, 4), axis=1)
def get_mask_of_small_bboxes(self, size_thr: np.ndarray):
dim = self[:, 3:6]
thr = size_thr.reshape(1, 3).repeat(self.shape[0], axis=0)
mask = np.array((dim > thr))
mask = np.all(mask, axis=1)
return mask.nonzero()
def masked_select(self, mask):
selected_data = self[mask]
selected_velocities = self.velocities
if self.velocities is not None:
selected_velocities = self.velocities[mask]
selected_bbox = BBoxes3D(selected_data, self.coordmode,
selected_velocities, self.origin,
self.rot_axis)
return selected_bbox
@numba.jit(nopython=True)
def get_mask_points_in_polygon_2d(num_points, num_polygons,
num_points_per_polygon, points, polygons,
vector, mask):
inside = True
slope_diff = 0
for point_idx in range(num_points): # N
for polygon_idx in range(num_polygons): # M
inside = True
for idx in range(num_points_per_polygon): # 2
#vector_slop = vector[polygon_idx, idx, 1] / [polygon_idx, idx, 0]
#point_slop = (polygons[polygon_idx, idx, 1] - points[point_idx, 1]) / (polygons[polygon_idx, idx, 0] - points[point_idx, 0])
slope_diff = (
polygons[polygon_idx, idx, 0] -
points[point_idx, 0]) * vector[polygon_idx, idx, 1]
slope_diff -= (
polygons[polygon_idx, idx, 1] -
points[point_idx, 1]) * vector[polygon_idx, idx, 0]
if slope_diff >= 0:
inside = False
break
mask[point_idx, polygon_idx] = inside
return mask
def points_in_convex_polygon_2d(points: np.ndarray,
polygons: np.ndarray,
clockwise: bool = True):
# Convert polygons to directed vectors, the slope for each vector is vec_y / vec_x
num_points = points.shape[0] # [N, 2]
num_polygons = polygons.shape[0] # [M, 4, 2]
num_points_per_polygon = polygons.shape[1] # [M, 4, 2]
if clockwise:
vector = (
polygons - polygons[:, [num_points_per_polygon - 1] +
list(range(num_points_per_polygon - 1)), :]
) # [M, 4, 2]
else:
vector = (
polygons[:, [num_points_per_polygon - 1] +
list(range(num_points_per_polygon - 1)), :] - polygons
) # [M, 4, 2]
mask = np.zeros((num_points, num_polygons), dtype='bool')
mask = get_mask_points_in_polygon_2d(num_points, num_polygons,
num_points_per_polygon, points,
polygons, vector, mask)
return mask
@numba.jit(nopython=True)
def corner_to_standup_nd_jit(boxes_corner):
num_boxes = boxes_corner.shape[0]
ndim = boxes_corner.shape[-1]
result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype)
for i in range(num_boxes):
for j in range(ndim):
result[i, j] = np.min(boxes_corner[i, :, j])
for j in range(ndim):
result[i, j + ndim] = np.max(boxes_corner[i, :, j])
return result
@numba.jit(nopython=True)
def box_collision_test(boxes, qboxes, clockwise=True):
N = boxes.shape[0]
K = qboxes.shape[0]
ret = np.zeros((N, K), dtype=np.bool_)
slices = np.array([1, 2, 3, 0])
lines_boxes = np.stack((boxes, boxes[:, slices, :]),
axis=2) # [N, 4, 2(line), 2(xy)]
lines_qboxes = np.stack((qboxes, qboxes[:, slices, :]), axis=2)
# vec = np.zeros((2,), dtype=boxes.dtype)
boxes_standup = corner_to_standup_nd_jit(boxes)
qboxes_standup = corner_to_standup_nd_jit(qboxes)
for i in range(N):
for j in range(K):
# calculate standup first
iw = min(boxes_standup[i, 2], qboxes_standup[j, 2]) - max(
boxes_standup[i, 0], qboxes_standup[j, 0])
if iw > 0:
ih = min(boxes_standup[i, 3], qboxes_standup[j, 3]) - max(
boxes_standup[i, 1], qboxes_standup[j, 1])
if ih > 0:
for k in range(4):
for l in range(4):
A = lines_boxes[i, k, 0]
B = lines_boxes[i, k, 1]
C = lines_qboxes[j, l, 0]
D = lines_qboxes[j, l, 1]
acd = (D[1] - A[1]) * (C[0] - A[0]) > (
C[1] - A[1]) * (D[0] - A[0])
bcd = (D[1] - B[1]) * (C[0] - B[0]) > (
C[1] - B[1]) * (D[0] - B[0])
if acd != bcd:
abc = (C[1] - A[1]) * (B[0] - A[0]) > (
B[1] - A[1]) * (C[0] - A[0])
abd = (D[1] - A[1]) * (B[0] - A[0]) > (
B[1] - A[1]) * (D[0] - A[0])
if abc != abd:
ret[i, j] = True # collision.
break
if ret[i, j] is True:
break
if ret[i, j] is False:
# now check complete overlap.
# box overlap qbox:
box_overlap_qbox = True
for l in range(4): # point l in qboxes
for k in range(4): # corner k in boxes
vec = boxes[i, k] - boxes[i, (k + 1) % 4]
if clockwise:
vec = -vec
cross = vec[1] * (
boxes[i, k, 0] - qboxes[j, l, 0])
cross -= vec[0] * (
boxes[i, k, 1] - qboxes[j, l, 1])
if cross >= 0:
box_overlap_qbox = False
break
if box_overlap_qbox is False:
break
if box_overlap_qbox is False:
qbox_overlap_box = True
for l in range(4): # point l in boxes
for k in range(4): # corner k in qboxes
vec = qboxes[j, k] - qboxes[j, (k + 1) % 4]
if clockwise:
vec = -vec
cross = vec[1] * (
qboxes[j, k, 0] - boxes[i, l, 0])
cross -= vec[0] * (
qboxes[j, k, 1] - boxes[i, l, 1])
if cross >= 0: #
qbox_overlap_box = False
break
if qbox_overlap_box is False:
break
if qbox_overlap_box:
ret[i, j] = True # collision.
else:
ret[i, j] = True # collision.
return ret
def minmax_range_3d_to_corner_2d(minmax_range_3d: np.ndarray):
center = minmax_range_3d[0:3]
wlh = minmax_range_3d[3:6] - minmax_range_3d[0:3]
data = np.asarray(np.hstack(
(center, wlh,
[0])))[np.newaxis,
...] # add a fake orientation to construct a BBoxes3D
bbox3d = BBoxes3D(data, origin=[0., 0., 0.])
return bbox3d.corners_2d
@numba.jit(nopython=True)
def circle_nms(dets, thresh):
x1 = dets[:, 0]
y1 = dets[:, 1]
scores = dets[:, 2]
order = scores.argsort()[::-1].astype(np.int32) # highest->lowest
ndets = dets.shape[0]
suppressed = np.zeros((ndets), dtype=np.int32)
keep = []
for _i in range(ndets):
i = order[_i] # start with highest score box
if suppressed[
i] == 1: # if any box have enough iou with this, remove it
continue
keep.append(i)
for _j in range(_i + 1, ndets):
j = order[_j]
if suppressed[j] == 1:
continue
# calculate center distance between i and j box
dist = (x1[i] - x1[j])**2 + (y1[i] - y1[j])**2
# ovr = inter / areas[j]
if dist <= thresh:
suppressed[j] = 1
return keep
def corner_to_surfaces_3d(corners):
"""convert 3d box corners from corner function above
to surfaces that normal vectors all direct to internal.
Args:
corners (float array, [N, 8, 3]): 3d box corners.
Returns:
surfaces (float array, [N, 6, 4, 3]):
"""
# box_corners: [N, 8, 3], must from corner functions in this module
surfaces = np.array([
[corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]],
[corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]],
[corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]],
[corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]],
[corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]],
[corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]],
]).transpose([2, 0, 1, 3])
return surfaces
@numba.jit(nopython=True)
def surface_equ_3d_jit(surfaces):
# polygon_surfaces: [num_polygon, num_surfaces, num_points_of_polygon, 3]
num_polygon = surfaces.shape[0]
max_num_surfaces = surfaces.shape[1]
normal_vec = np.zeros((num_polygon, max_num_surfaces, 3),
dtype=surfaces.dtype)
d = np.zeros((num_polygon, max_num_surfaces), dtype=surfaces.dtype)
sv0 = surfaces[0, 0, 0] - surfaces[0, 0, 1]
sv1 = surfaces[0, 0, 0] - surfaces[0, 0, 1]
for i in range(num_polygon):
for j in range(max_num_surfaces):
sv0[0] = surfaces[i, j, 0, 0] - surfaces[i, j, 1, 0]
sv0[1] = surfaces[i, j, 0, 1] - surfaces[i, j, 1, 1]
sv0[2] = surfaces[i, j, 0, 2] - surfaces[i, j, 1, 2]
sv1[0] = surfaces[i, j, 1, 0] - surfaces[i, j, 2, 0]
sv1[1] = surfaces[i, j, 1, 1] - surfaces[i, j, 2, 1]
sv1[2] = surfaces[i, j, 1, 2] - surfaces[i, j, 2, 2]
normal_vec[i, j, 0] = sv0[1] * sv1[2] - sv0[2] * sv1[1]
normal_vec[i, j, 1] = sv0[2] * sv1[0] - sv0[0] * sv1[2]
normal_vec[i, j, 2] = sv0[0] * sv1[1] - sv0[1] * sv1[0]
d[i, j] = (-surfaces[i, j, 0, 0] * normal_vec[i, j, 0] -
surfaces[i, j, 0, 1] * normal_vec[i, j, 1] -
surfaces[i, j, 0, 2] * normal_vec[i, j, 2])
return normal_vec, d
def points_in_convex_polygon_3d_jit(points, polygon_surfaces,
num_surfaces=None):
"""
Check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
num_surfaces: [num_polygon] array. indicate how many surfaces
a polygon contain
Returns:
[num_points, num_polygon] bool array.
"""
num_polygons = polygon_surfaces.shape[0]
if num_surfaces is None:
num_surfaces = np.full((num_polygons, ), 9999999, dtype=np.int64)
normal_vec, d = surface_equ_3d_jit(polygon_surfaces[:, :, :3, :])
# normal_vec: [num_polygon, max_num_surfaces, 3]
# d: [num_polygon, max_num_surfaces]
return _points_in_convex_polygon_3d_jit(points, polygon_surfaces,
normal_vec, d, num_surfaces)
@numba.njit
def _points_in_convex_polygon_3d_jit(points,
polygon_surfaces,
normal_vec,
d,
num_surfaces=None):
"""
Check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
num_surfaces: [num_polygon] array. indicate how many surfaces
a polygon contain
Returns:
[num_points, num_polygon] bool array.
"""
max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3]
num_points = points.shape[0]
num_polygons = polygon_surfaces.shape[0]
ret = np.ones((num_points, num_polygons), dtype=np.bool_)
sign = 0.0
for i in range(num_points):
for j in range(num_polygons):
for k in range(max_num_surfaces):
if k > num_surfaces[j]:
break
sign = (points[i, 0] * normal_vec[j, k, 0] +
points[i, 1] * normal_vec[j, k, 1] +
points[i, 2] * normal_vec[j, k, 2] + d[j, k])
if sign >= 0:
ret[i, j] = False
break
return ret
def get_mask_of_points_in_bboxes3d(points, bboxes: BBoxes3D):
corners_3d = bboxes.corners_3d
surfaces = corner_to_surfaces_3d(corners_3d)
mask = points_in_convex_polygon_3d_jit(points[:, :3], surfaces)
return mask
def rbbox2d_to_near_bbox(rbboxes):
"""convert rotated 2D bbox to nearest 'standing' or 'lying' bbox.
Args:
rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes
Returns:
bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes
"""
rots = rbboxes[..., -1]
rots = np.abs(BBoxes3D.limit_period(rots, 0.5, np.pi))
cond = (rots > np.pi / 4)[..., np.newaxis]
bboxes_center_dim = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4])
centers, dims = bboxes_center_dim[:, :2], bboxes_center_dim[:, 2:]
bboxes = np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1)
return bboxes
def second_box_encode(boxes_3d, anchors):
"""
Encode 3D bboxes for VoxelNet/PointPillars.
Args:
boxes_3d ([N, 7] np.ndarray): normal boxes: x, y, z, w, l, h, r
anchors ([N, 7] np.ndarray): anchors
"""
xa, ya, za, wa, la, ha, ra = np.split(anchors, 7, axis=-1)
xg, yg, zg, wg, lg, hg, rg = np.split(boxes_3d, 7, axis=-1)
diagonal = np.sqrt(la**2 + wa**2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / ha
lt = np.log(lg / la)
wt = np.log(wg / wa)
ht = np.log(hg / ha)
rt = rg - ra
return np.concatenate([xt, yt, zt, wt, lt, ht, rt], axis=-1)
def second_box_decode(encodings, anchors):
"""
Decode 3D bboxes for VoxelNet/PointPillars.
Args:
encodings ([N, 7] np.ndarray): encoded boxes: x, y, z, w, l, h, r
anchors ([N, 7] np.ndarray): anchors
"""
xa, ya, za, wa, la, ha, ra = np.split(anchors, 7, axis=-1)
xt, yt, zt, wt, lt, ht, rt = np.split(encodings, 7, axis=-1)
diagonal = np.sqrt(la**2 + wa**2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * ha + za
lg = np.exp(lt) * la
wg = np.exp(wt) * wa
hg = np.exp(ht) * ha
rg = rt + ra
return np.concatenate([xg, yg, zg, wg, lg, hg, rg], axis=-1)
@numba.jit(nopython=True)
def iou_2d_jit(boxes, query_boxes, eps=0.0):
"""
Calculate 2D box iou.
Args:
boxes: (N, 4) ndarray of float
query_boxes: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=boxes.dtype)
for k in range(K):
box_area = ((query_boxes[k, 2] - query_boxes[k, 0] + eps) *
(query_boxes[k, 3] - query_boxes[k, 1] + eps))
for n in range(N):
iw = (min(boxes[n, 2], query_boxes[k, 2]) - max(
boxes[n, 0], query_boxes[k, 0]) + eps)
if iw > 0:
ih = (min(boxes[n, 3], query_boxes[k, 3]) - max(
boxes[n, 1], query_boxes[k, 1]) + eps)
if ih > 0:
ua = (
(boxes[n, 2] - boxes[n, 0] + eps) *
(boxes[n, 3] - boxes[n, 1] + eps) + box_area - iw * ih)
overlaps[n, k] = iw * ih / ua
return overlaps
def rotation_3d_in_axis(points, angles, axis=0):
rot_sin = np.sin(angles)
rot_cos = np.cos(angles)
ones = np.ones_like(rot_cos)
zeros = np.zeros_like(rot_cos)
if axis == 1:
rot_mat_T = np.stack([[rot_cos, zeros, -rot_sin], [zeros, ones, zeros],
[rot_sin, zeros, rot_cos]])
elif axis == 2 or axis == -1:
rot_mat_T = np.stack([[rot_cos, -rot_sin, zeros],
[rot_sin, rot_cos, zeros], [zeros, zeros, ones]])
elif axis == 0:
rot_mat_T = np.stack([[zeros, rot_cos, -rot_sin],
[zeros, rot_sin, rot_cos], [ones, zeros, zeros]])
else:
raise ValueError("axis should in range")
return np.einsum('aij,jka->aik', points, rot_mat_T)
def project_to_image(points_3d, proj_mat):
points_shape = list(points_3d.shape)
points_shape[-1] = 1
points_4 = np.concatenate([points_3d, np.zeros(points_shape)], axis=-1)
point_2d = points_4 @ proj_mat.T
point_2d_res = point_2d[..., :2] / point_2d[..., 2:3]
return point_2d_res
def in_hull(p, hull):
"""
param p: (N, K) test points
param hull: (M, K) M corners of a box
return (N) bool
"""
try:
if not isinstance(hull, Delaunay):
hull = Delaunay(hull)
flag = hull.find_simplex(p) >= 0
except scipy.spatial.qhull.QhullError:
print('Warning: not a hull %s' % str(hull))
flag = np.zeros(p.shape[0], dtype=np.bool)
return flag
def boxes_to_corners_3d(boxes3d):
"""
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
"""
template = np.array([
[1, 1, -1],
[1, -1, -1],
[-1, -1, -1],
[-1, 1, -1],
[1, 1, 1],
[1, -1, 1],
[-1, -1, 1],
[-1, 1, 1],
]) / 2
corners3d = np.tile(boxes3d[:, None, 3:6], (1, 8, 1)) * template[None, :, :]
from paddle3d.geometries import PointCloud
pointcloud_ = PointCloud(corners3d.reshape([-1, 8, 3]))
pointcloud_.rotate_around_z(boxes3d[:, 6])
corners3d = corners3d.reshape([-1, 8, 3])
corners3d = corners3d + boxes3d[:, None, 0:3]
return corners3d
def mask_boxes_outside_range_numpy(boxes, limit_range, min_num_corners=1):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/utils/box_utils.py#L55
Args:
boxes: (N, 7) [x, y, z, dx, dy, dz, heading, ...], (x, y, z) is the box center
limit_range: [minx, miny, minz, maxx, maxy, maxz]
min_num_corners:
Returns:
"""
if boxes.shape[1] > 7:
boxes = boxes[:, 0:7]
corners = boxes_to_corners_3d(boxes) # (N, 8, 3)
mask = ((corners >= limit_range[0:3]) &
(corners <= limit_range[3:6])).all(axis=2)
mask = mask.sum(axis=1) >= min_num_corners # (N)
return mask
def boxes3d_kitti_camera_to_lidar(boxes3d_camera, calib):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/utils/box_utils.py#L91
Args:
boxes3d_camera: (N, 7) [x, y, z, l, h, w, r] in rect camera coords
calib:
Returns:
boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
"""
xyz_camera = boxes3d_camera[:, 0:3]
l, h, w, r = boxes3d_camera[:, 3:
4], boxes3d_camera[:, 4:
5], boxes3d_camera[:, 5:
6], boxes3d_camera[:,
6:
7]
xyz_lidar = calib.rect_to_lidar(xyz_camera)
xyz_lidar[:, 2] += h[:, 0] / 2
return np.concatenate([xyz_lidar, l, w, h, -(r + np.pi / 2)], axis=-1)
def boxes3d_lidar_to_kitti_camera(boxes3d_lidar, calib):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/utils/box_utils.py#L152
:param boxes3d_lidar: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
:param calib:
:return:
boxes3d_camera: (N, 7) [x, y, z, l, h, w, r] in rect camera coords
"""
xyz_lidar = boxes3d_lidar[:, 0:3]
l, w, h, r = boxes3d_lidar[:, 3:
4], boxes3d_lidar[:, 4:
5], boxes3d_lidar[:, 5:
6], boxes3d_lidar[:,
6:
7]
xyz_lidar[:, 2] -= h.reshape(-1) / 2
xyz_cam = calib.lidar_to_rect(xyz_lidar)
# xyz_cam[:, 1] += h.reshape(-1) / 2
r = -r - np.pi / 2
return np.concatenate([xyz_cam, l, h, w, r], axis=-1)
def boxes3d_to_corners3d_kitti_camera(boxes3d, bottom_center=True):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/utils/box_utils.py#L169
:param boxes3d: (N, 7) [x, y, z, l, h, w, ry] in camera coords, see the definition of ry in KITTI dataset
:param bottom_center: whether y is on the bottom center of object
:return: corners3d: (N, 8, 3)
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
"""
boxes_num = boxes3d.shape[0]
l, h, w = boxes3d[:, 3], boxes3d[:, 4], boxes3d[:, 5]
x_corners = np.array(
[l / 2., l / 2., -l / 2., -l / 2., l / 2., l / 2., -l / 2., -l / 2],
dtype=np.float32).T
z_corners = np.array(
[w / 2., -w / 2., -w / 2., w / 2., w / 2., -w / 2., -w / 2., w / 2.],
dtype=np.float32).T
if bottom_center:
y_corners = np.zeros((boxes_num, 8), dtype=np.float32)
y_corners[:, 4:8] = -h.reshape(boxes_num, 1).repeat(4, axis=1) # (N, 8)
else:
y_corners = np.array([
h / 2., h / 2., h / 2., h / 2., -h / 2., -h / 2., -h / 2., -h / 2.
],
dtype=np.float32).T
ry = boxes3d[:, 6]
zeros, ones = np.zeros(
ry.size, dtype=np.float32), np.ones(
ry.size, dtype=np.float32)
rot_list = np.array([[np.cos(ry), zeros, -np.sin(ry)], [zeros, ones, zeros],
[np.sin(ry), zeros, np.cos(ry)]]) # (3, 3, N)
R_list = np.transpose(rot_list, (2, 0, 1)) # (N, 3, 3)
temp_corners = np.concatenate(
(x_corners.reshape(-1, 8, 1), y_corners.reshape(-1, 8, 1),
z_corners.reshape(-1, 8, 1)),
axis=2) # (N, 8, 3)
rotated_corners = np.matmul(temp_corners, R_list) # (N, 8, 3)
x_corners, y_corners, z_corners = rotated_corners[:, :,
0], rotated_corners[:, :,
1], rotated_corners[:, :,
2]
x_loc, y_loc, z_loc = boxes3d[:, 0], boxes3d[:, 1], boxes3d[:, 2]
x = x_loc.reshape(-1, 1) + x_corners.reshape(-1, 8)
y = y_loc.reshape(-1, 1) + y_corners.reshape(-1, 8)
z = z_loc.reshape(-1, 1) + z_corners.reshape(-1, 8)
corners = np.concatenate(
(x.reshape(-1, 8, 1), y.reshape(-1, 8, 1), z.reshape(-1, 8, 1)), axis=2)
return corners.astype(np.float32)
def boxes3d_kitti_camera_to_imageboxes(boxes3d, calib, image_shape=None):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/utils/box_utils.py#L215
:param boxes3d: (N, 7) [x, y, z, l, h, w, r] in rect camera coords
:param calib:
:return:
box_2d_preds: (N, 4) [x1, y1, x2, y2]
"""
corners3d = boxes3d_to_corners3d_kitti_camera(boxes3d)
pts_img, _ = calib.rect_to_img(corners3d.reshape(-1, 3))
corners_in_image = pts_img.reshape(-1, 8, 2)
min_uv = np.min(corners_in_image, axis=1) # (N, 2)
max_uv = np.max(corners_in_image, axis=1) # (N, 2)
boxes2d_image = np.concatenate([min_uv, max_uv], axis=1)
if image_shape is not None:
boxes2d_image[:, 0] = np.clip(
boxes2d_image[:, 0], a_min=0, a_max=image_shape[1] - 1)
boxes2d_image[:, 1] = np.clip(
boxes2d_image[:, 1], a_min=0, a_max=image_shape[0] - 1)
boxes2d_image[:, 2] = np.clip(
boxes2d_image[:, 2], a_min=0, a_max=image_shape[1] - 1)
boxes2d_image[:, 3] = np.clip(
boxes2d_image[:, 3], a_min=0, a_max=image_shape[0] - 1)
return boxes2d_image
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import inspect
import os
import sys
from types import ModuleType
import filelock
from paddle.utils.cpp_extension import load as paddle_jit_load
from paddle3d.env import TMP_HOME
from paddle3d.utils.logger import logger
custom_ops = {
'voxelize': {
'sources': ['voxel/voxelize_op.cc', 'voxel/voxelize_op.cu'],
'version': '0.1.0'
},
'iou3d_nms_cuda': {
'sources': [
'iou3d_nms/iou3d_cpu.cpp', 'iou3d_nms/iou3d_nms_api.cpp',
'iou3d_nms/iou3d_nms.cpp', 'iou3d_nms/iou3d_nms_kernel.cu'
],
'version':
'0.1.0'
},
'centerpoint_postprocess': {
'sources': [
'centerpoint_postprocess/iou3d_nms_kernel.cu',
'centerpoint_postprocess/postprocess.cc',
'centerpoint_postprocess/postprocess.cu'
],
'version':
'0.1.0'
},
'grid_sample_3d': {
'sources': [
'grid_sample_3d/grid_sample_3d.cc',
'grid_sample_3d/grid_sample_3d.cu'
],
'version':
'0.1.0',
},
'assign_score_withk': {
'sources': [
"assign_score_withk/assign_score_withk_cuda.cc",
"assign_score_withk/assign_score_withk_kernel.cu"
],
'version':
'0.1.0',
'extra_cuda_cflags': ['-arch=sm_60'],
},
'pointnet2_ops': {
'sources': [
'pointnet2/pointnet2_batch/ball_query_gpu_batch.cu',
'pointnet2/pointnet2_batch/ball_query_batch.cc',
'pointnet2/pointnet2_stack/ball_query_gpu_stack.cu',
'pointnet2/pointnet2_stack/ball_query_stack.cc',
'pointnet2/pointnet2_batch/group_points_batch.cc',
'pointnet2/pointnet2_batch/group_points_gpu_batch.cu',
'pointnet2/pointnet2_stack/group_points_stack.cc',
'pointnet2/pointnet2_stack/group_points_gpu_stack.cu',
'pointnet2/voxel_query.cc', 'pointnet2/voxel_query_gpu.cu',
'pointnet2/sampling.cc', 'pointnet2/sampling_gpu.cu',
'pointnet2/gather_points.cc', 'pointnet2/gather_points_gpu.cu'
],
'version':
'0.1.0'
},
'roiaware_pool3d': {
'sources': [
'roiaware_pool3d/box_utils_gpu.cu',
'roiaware_pool3d/box_utils.cc',
],
'version':
'0.1.0'
},
'ms_deform_attn': {
'sources': [
'ms_deform_attn/ms_deform_attn.cc',
'ms_deform_attn/ms_deform_attn.cu'
],
'version':
'0.1.0',
'extra_cuda_cflags': ['-arch=sm_60'],
}
}
class CustomOpNotFoundException(Exception):
def __init__(self, op_name):
self.op_name = op_name
def __str__(self):
return "Couldn't Found custom op {}".format(self.op_name)
class CustomOperatorPathFinder:
def find_module(self, fullname: str, path: str = None):
if not fullname.startswith('paddle3d.ops'):
return None
return CustomOperatorPathLoader()
class CustomOperatorPathLoader:
def load_module(self, fullname: str):
modulename = fullname.split('.')[-1]
if modulename not in custom_ops:
raise CustomOpNotFoundException(modulename)
if fullname not in sys.modules:
try:
sys.modules[fullname] = importlib.import_module(modulename)
except ImportError:
sys.modules[fullname] = Paddle3dCustomOperatorModule(
modulename, fullname)
return sys.modules[fullname]
class Paddle3dCustomOperatorModule(ModuleType):
def __init__(self, modulename: str, fullname: str):
self.fullname = fullname
self.modulename = modulename
self.module = None
super().__init__(modulename)
def jit_build(self):
try:
lockfile = 'paddle3d.ops.{}'.format(self.modulename)
lockfile = os.path.join(TMP_HOME, lockfile)
file = inspect.getabsfile(sys.modules['paddle3d.ops'])
rootdir = os.path.split(file)[0]
args = custom_ops[self.modulename].copy()
sources = args.pop('sources')
sources = [os.path.join(rootdir, file) for file in sources]
args.pop('version')
with filelock.FileLock(lockfile):
return paddle_jit_load(
name=self.modulename, sources=sources, **args)
except:
logger.error("{} builded fail!".format(self.modulename))
raise
def _load_module(self):
if self.module is None:
try:
self.module = importlib.import_module(self.modulename)
except ImportError:
logger.warning("No custom op {} found, try JIT build".format(
self.modulename))
self.module = self.jit_build()
logger.info("{} builded success!".format(self.modulename))
# refresh
sys.modules[self.fullname] = self.module
return self.module
def __getattr__(self, attr: str):
if attr in ['__path__', '__file__']:
return None
if attr in ['__loader__', '__package__', '__name__', '__spec__']:
return super().__getattr__(attr)
module = self._load_module()
return getattr(module, attr)
sys.meta_path.insert(0, CustomOperatorPathFinder())
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/setup.py
|
import paddle
from paddle.utils.cpp_extension import CppExtension, CUDAExtension, setup
from paddle3d.ops import custom_ops
for op_name, op_dict in custom_ops.items():
sources = op_dict.pop('sources', [])
flags = None
if paddle.device.is_compiled_with_cuda():
extension = CUDAExtension
flags = {'cxx': ['-DPADDLE_WITH_CUDA']}
if 'extra_cuda_cflags' in op_dict:
flags['nvcc'] = op_dict.pop('extra_cuda_cflags')
else:
sources = filter(lambda x: x.endswith('cu'), sources)
extension = CppExtension
if len(sources) == 0:
continue
extension = extension(sources=sources, extra_compile_args=flags)
setup(name=op_name, ext_modules=extension)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/grid_sample_3d/grid_sample_3d.cc
|
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "grid_sample_3d.h"
#include "paddle/extension.h"
std::vector<paddle::Tensor> GridSample3DCUDAForward(
const paddle::Tensor& x, const paddle::Tensor& grid,
const std::string& mode, const std::string& padding_mode,
bool align_corners);
std::vector<paddle::Tensor> GridSample3DForward(const paddle::Tensor& x,
const paddle::Tensor& grid,
const std::string& mode,
const std::string& padding_mode,
bool align_corners) {
return GridSample3DCUDAForward(x, grid, mode, padding_mode, align_corners);
}
std::vector<paddle::Tensor> GridSample3DCUDABackward(
const paddle::Tensor& x, const paddle::Tensor& grid,
const paddle::Tensor& grad_out, const std::string& mode,
const std::string& padding_mode, bool align_corners);
std::vector<paddle::Tensor> GridSample3DBackward(
const paddle::Tensor& x, const paddle::Tensor& grid,
const paddle::Tensor& grad_out, const std::string& mode,
const std::string& padding_mode, bool align_corners) {
return GridSample3DCUDABackward(x, grid, grad_out, mode, padding_mode,
align_corners);
}
std::vector<std::vector<int64_t>> GridSample3DInferShape(
std::vector<int64_t> x_shape, std::vector<int64_t> grid_shape) {
return {
{x_shape[0], x_shape[1], grid_shape[1], grid_shape[2], grid_shape[3]}};
}
std::vector<std::vector<int64_t>> GridSample3DInferBackShape(
std::vector<int64_t> x_shape, std::vector<int64_t> grid_shape) {
return {x_shape};
}
std::vector<paddle::DataType> GridSample3DInferDtype(
paddle::DataType x_dtype, paddle::DataType grid_dtype) {
return {x_dtype};
}
PD_BUILD_OP(grid_sample_3d)
.Inputs({"x", "grid"})
.Attrs({"mode: std::string", "padding_mode: std::string",
"align_corners: bool"})
.Outputs({"out"})
.SetKernelFn(PD_KERNEL(GridSample3DForward))
.SetInferShapeFn(PD_INFER_SHAPE(GridSample3DInferShape))
.SetInferDtypeFn(PD_INFER_DTYPE(GridSample3DInferDtype));
PD_BUILD_GRAD_OP(grid_sample_3d)
.Inputs({"x", "grid", paddle::Grad("out")})
.Attrs({"mode: std::string", "padding_mode: std::string",
"align_corners: bool"})
.Outputs({paddle::Grad("x")})
.SetKernelFn(PD_KERNEL(GridSample3DBackward))
.SetInferShapeFn(PD_INFER_SHAPE(GridSample3DInferBackShape));
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/grid_sample_3d/grid_sample_3d.h
|
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef GRID_SAMPLE_3D_H
#define GRID_SAMPLE_3D_H
#include <cassert>
#include <cmath>
#include <vector>
#define HOST_DEVICE __host__ __device__
#define HOST_DEVICE_INLINE HOST_DEVICE __forceinline__
enum class Mode { bilinear, nearest };
enum class PaddingMode { zeros, border, reflect };
namespace {}
#endif
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/grid_sample_3d/grid_sample_3d.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda.h>
#include "grid_sample_3d.h"
#include "paddle/extension.h"
#define CHECK_INPUT_GPU(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.")
static __forceinline__ __device__ bool InBounds3D(int64_t d, int64_t h,
int64_t w, int64_t D,
int64_t H, int64_t W) {
return d >= 0 && d < D && h >= 0 && h < H && w >= 0 && w < W;
}
#define CUDA_KERNEL_LOOP_TYPE(i, n, index_type) \
index_type _i_n_d_e_x = blockIdx.x * blockDim.x + threadIdx.x; \
for (index_type i = _i_n_d_e_x; _i_n_d_e_x < (n); \
_i_n_d_e_x += blockDim.x * gridDim.x, i = _i_n_d_e_x)
#define CUDA_KERNEL_LOOP(i, n) CUDA_KERNEL_LOOP_TYPE(i, n, int)
template <typename T>
static __forceinline__ __device__ T Unnormalize(T coord, int size,
bool align_corners) {
if (align_corners) {
return ((coord + 1.f) / 2) * (size - 1);
} else {
return ((coord + 1.f) * size - 1) / 2;
}
}
template <typename T>
static __forceinline__ __device__ T ClipIndexes(T in, int max_value) {
return min(static_cast<T>(max_value), max(in, static_cast<T>(0)));
}
template <typename T>
static __forceinline__ __device__ T ReflectIndexes(T in, int twice_low,
int twice_high) {
if (twice_low == twice_high) {
return static_cast<T>(0);
}
T min = static_cast<T>(twice_low) / 2;
T span = static_cast<T>(twice_high - twice_low) / 2;
in = fabs(in - min);
T extra = fmod(in, span);
int flips = static_cast<int>(floor(in / span));
if (flips % 2 == 0) {
return extra + min;
} else {
return span - extra + min;
}
}
template <typename T>
static __forceinline__ __device__ T ComputePositions(T coord, int size,
PaddingMode padding_mode,
bool align_corners) {
coord = Unnormalize<T>(coord, size, align_corners);
if (padding_mode == PaddingMode::border) {
coord = ClipIndexes(coord, size - 1);
} else if (padding_mode == PaddingMode::reflect) {
if (align_corners) {
coord = ReflectIndexes(coord, 0, 2 * (size - 1));
} else {
coord = ReflectIndexes(coord, -1, 2 * size - 1);
}
coord = ClipIndexes(coord, size - 1);
}
return coord;
}
template <typename T, typename index_t>
__global__ void GridSample3DCudaKernel(
const index_t nthreads, index_t out_c, index_t out_d, index_t out_h,
index_t out_w, index_t in_d, index_t in_h, index_t in_w, const T* input,
const T* grid, T* output, const Mode interpolation_mode,
const PaddingMode padding_mode, bool align_corners) {
// printf("size: %d, %d, %d, %d, %d, %d \n", out_c, out_d, out_w, out_h, in_d,
// in_w);
index_t inp_sW = 1;
index_t inp_sH = in_w;
index_t inp_sD = in_h * in_w;
index_t inp_sC = in_d * inp_sD;
index_t inp_sN = out_c * inp_sC;
index_t grid_sCoor = 1;
index_t grid_sW = 3;
index_t grid_sH = out_w * grid_sW;
index_t grid_sD = out_h * grid_sH;
index_t grid_sN = out_d * grid_sD;
index_t out_sW = 1;
index_t out_sH = out_w;
index_t out_sD = out_h * out_w;
index_t out_sC = out_d * out_sD;
index_t out_sN = out_c * out_sC;
CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) {
const index_t w = index % out_w;
const index_t h = (index / out_w) % out_h;
const index_t d = (index / (out_h * out_w)) % out_d;
const index_t n = index / (out_d * out_h * out_w);
const index_t grid_offset =
n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW;
// get the corresponding input x, y, z co-ordinates from grid
T ix = grid[grid_offset];
T iy = grid[grid_offset + grid_sCoor];
T iz = grid[grid_offset + 2 * grid_sCoor];
ix = ComputePositions(ix, in_w, padding_mode, align_corners);
iy = ComputePositions(iy, in_h, padding_mode, align_corners);
iz = ComputePositions(iz, in_d, padding_mode, align_corners);
// printf("ix: %f, iy: %f, iz: %f \n", ix, iy, iz);
if (interpolation_mode == Mode::bilinear) {
// get corner pixel values from (x, y, z)
// for 4d, we used north-east-south-west
// for 5d, we add top-bottom
index_t ix_tnw = static_cast<index_t>(std::floor(ix));
index_t iy_tnw = static_cast<index_t>(std::floor(iy));
index_t iz_tnw = static_cast<index_t>(std::floor(iz));
index_t ix_tne = ix_tnw + 1;
index_t iy_tne = iy_tnw;
index_t iz_tne = iz_tnw;
index_t ix_tsw = ix_tnw;
index_t iy_tsw = iy_tnw + 1;
index_t iz_tsw = iz_tnw;
index_t ix_tse = ix_tnw + 1;
index_t iy_tse = iy_tnw + 1;
index_t iz_tse = iz_tnw;
index_t ix_bnw = ix_tnw;
index_t iy_bnw = iy_tnw;
index_t iz_bnw = iz_tnw + 1;
index_t ix_bne = ix_tnw + 1;
index_t iy_bne = iy_tnw;
index_t iz_bne = iz_tnw + 1;
index_t ix_bsw = ix_tnw;
index_t iy_bsw = iy_tnw + 1;
index_t iz_bsw = iz_tnw + 1;
index_t ix_bse = ix_tnw + 1;
index_t iy_bse = iy_tnw + 1;
index_t iz_bse = iz_tnw + 1;
// get surfaces to each neighbor:
T tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz);
T tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz);
T tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz);
T tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz);
T bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse);
T bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw);
T bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne);
T bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw);
auto inp_ptr_NC = input + n * inp_sN;
auto out_ptr_NCDHW =
output + n * out_sN + d * out_sD + h * out_sH + w * out_sW;
for (index_t c = 0; c < out_c;
++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) {
*out_ptr_NCDHW = static_cast<T>(0);
if (InBounds3D(iz_tnw, iy_tnw, ix_tnw, in_d, in_h, in_w)) {
*out_ptr_NCDHW +=
inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW] *
tnw;
}
if (InBounds3D(iz_tne, iy_tne, ix_tne, in_d, in_h, in_w)) {
*out_ptr_NCDHW +=
inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW] *
tne;
}
if (InBounds3D(iz_tsw, iy_tsw, ix_tsw, in_d, in_h, in_w)) {
*out_ptr_NCDHW +=
inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW] *
tsw;
}
if (InBounds3D(iz_tse, iy_tse, ix_tse, in_d, in_h, in_w)) {
*out_ptr_NCDHW +=
inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW] *
tse;
}
if (InBounds3D(iz_bnw, iy_bnw, ix_bnw, in_d, in_h, in_w)) {
*out_ptr_NCDHW +=
inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW] *
bnw;
}
if (InBounds3D(iz_bne, iy_bne, ix_bne, in_d, in_h, in_w)) {
*out_ptr_NCDHW +=
inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW] *
bne;
}
if (InBounds3D(iz_bsw, iy_bsw, ix_bsw, in_d, in_h, in_w)) {
*out_ptr_NCDHW +=
inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW] *
bsw;
}
if (InBounds3D(iz_bse, iy_bse, ix_bse, in_d, in_h, in_w)) {
*out_ptr_NCDHW +=
inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW] *
bse;
}
}
} else if (interpolation_mode == Mode::nearest) {
index_t ix_nearest = static_cast<index_t>(std::round(ix));
index_t iy_nearest = static_cast<index_t>(std::round(iy));
index_t iz_nearest = static_cast<index_t>(std::round(iz));
// assign nearest neighor pixel value to output pixel
auto inp_ptr_NC = input + n * inp_sN;
auto out_ptr_NCDHW =
output + n * out_sN + d * out_sD + h * out_sH + w * out_sW;
for (index_t c = 0; c < out_c;
++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) {
if (InBounds3D(iz_nearest, iy_nearest, ix_nearest, in_d, in_h, in_w)) {
*out_ptr_NCDHW =
inp_ptr_NC[iz_nearest * inp_sD + iy_nearest * inp_sH +
ix_nearest * inp_sW];
} else {
*out_ptr_NCDHW = static_cast<T>(0);
}
}
}
}
}
std::vector<paddle::Tensor> GridSample3DCUDAForward(
const paddle::Tensor& x, const paddle::Tensor& grid,
const std::string& mode, const std::string& padding_mode,
bool align_corners) {
CHECK_INPUT_GPU(x);
CHECK_INPUT_GPU(grid);
PaddingMode enum_padding_mode;
Mode enum_mode;
if (padding_mode == "border") {
enum_padding_mode = PaddingMode::border;
} else if (padding_mode == "reflection") {
enum_padding_mode = PaddingMode::reflect;
} else {
enum_padding_mode = PaddingMode::zeros;
}
if (mode == "nearest") {
enum_mode = Mode::nearest;
} else {
enum_mode = Mode::bilinear;
}
const int n = grid.shape()[0];
const int out_d = grid.shape()[1];
const int out_h = grid.shape()[2];
const int out_w = grid.shape()[3];
const int c = x.shape()[1];
const int in_d = x.shape()[2];
const int in_h = x.shape()[3];
const int in_w = x.shape()[4];
auto output = paddle::full({n, c, out_d, out_h, out_w}, 0,
paddle::DataType::FLOAT32, paddle::GPUPlace());
const int count = static_cast<int>(n * out_d * out_h * out_w);
int max_threads_per_block = 512;
int block_num = (count - 1) / max_threads_per_block + 1;
// printf("size: %d, %d, %d, %d, %d, %d \n", n, c, out_d, out_h, count,
// block_num);
GridSample3DCudaKernel<float, int>
<<<block_num, max_threads_per_block, 0, x.stream()>>>(
count, c, out_d, out_h, out_w, in_d, in_h, in_w, x.data<float>(),
grid.data<float>(), output.data<float>(), enum_mode,
enum_padding_mode, align_corners);
cudaError_t error_check;
error_check = cudaGetLastError();
if (error_check != cudaSuccess) {
printf("%s\n", cudaGetErrorString(error_check));
}
// printf("size: %d, %d, %d, %d, %d, %d \n", n, c, out_d, out_h, count,
// block_num);
return {output};
}
template <typename T>
static __forceinline__ __device__ T UnnormalizeWithMask(T coord, int size,
bool align_corners,
T* grad_in) {
if (align_corners) {
*grad_in = static_cast<T>(size - 1) / 2;
return ((coord + 1.f) / 2) * (size - 1);
} else {
*grad_in = static_cast<T>(size) / 2;
return ((coord + 1.f) * size - 1) / 2;
}
}
template <typename T>
static __forceinline__ __device__ T ClipIndexesWithMask(T in, int clip_limit,
T* grad_in) {
if (in <= static_cast<T>(0)) {
*grad_in = static_cast<T>(0);
return static_cast<T>(0);
} else {
T max = static_cast<T>(clip_limit - 1);
if (in >= max) {
*grad_in = static_cast<T>(0);
return max;
} else {
*grad_in = static_cast<T>(1);
return in;
}
}
}
template <typename T>
static __forceinline__ __device__ T ReflectIndexesWithMask(T in, int twice_low,
int twice_high,
T* grad_in) {
if (twice_low == twice_high) {
*grad_in = static_cast<T>(0);
return static_cast<T>(0);
}
int grad_in_mult_;
T min = static_cast<T>(twice_low) / 2;
T span = static_cast<T>(twice_high - twice_low) / 2;
in = in - min;
if (in < static_cast<T>(0)) {
grad_in_mult_ = -1;
in = -in;
} else {
grad_in_mult_ = 1;
}
T extra = fmod(in, span);
int flips = static_cast<int>(floor(in / span));
if (flips % 2 == 0) {
*grad_in = static_cast<T>(grad_in_mult_);
return extra + min;
} else {
*grad_in = static_cast<T>(-grad_in_mult_);
return span - extra + min;
}
}
template <typename T>
static __forceinline__ __device__ T
ComputePositionsWithMask(T coord, int size, PaddingMode padding_mode,
bool align_corners, T* grad_in) {
T grad_clip, grad_refl;
coord = UnnormalizeWithMask<T>(coord, size, align_corners, grad_in);
if (padding_mode == PaddingMode::border) {
coord = ClipIndexesWithMask(coord, size, &grad_clip);
*grad_in = (*grad_in) * grad_clip;
} else if (padding_mode == PaddingMode::reflect) {
if (align_corners) {
coord = ReflectIndexesWithMask(coord, 0, 2 * (size - 1), &grad_refl);
} else {
coord = ReflectIndexesWithMask(coord, -1, 2 * size - 1, &grad_refl);
}
coord = ClipIndexesWithMask(coord, size, &grad_clip);
*grad_in = (*grad_in) * grad_refl * grad_clip;
}
return coord;
}
template <typename T>
static __forceinline__ __device__ void AtomicAdd3D(
T* data, int64_t d, int64_t h, int64_t w, int64_t sD, int64_t sH,
int64_t sW, int64_t D, int64_t H, int64_t W, T delta) {
if (InBounds3D(d, h, w, D, H, W)) {
atomicAdd(data + d * sD + h * sH + w * sW, delta);
}
}
template <typename T, typename index_t>
__global__ void GridSample3DCudaBackwardKernel(
const index_t nthreads, const T* grad_output, const T* input, const T* grid,
index_t out_c, index_t out_d, index_t out_h, index_t out_w, index_t in_d,
index_t in_h, index_t in_w, T* grad_input, T* grad_grid, const Mode mode,
const PaddingMode padding_mode, bool align_corners) {
index_t inp_sW = 1;
index_t inp_sH = in_w;
index_t inp_sD = in_h * in_w;
index_t inp_sC = in_d * inp_sD;
index_t inp_sN = out_c * inp_sC;
index_t grid_sCoor = 1;
index_t grid_sW = 3;
index_t grid_sH = out_w * grid_sW;
index_t grid_sD = out_h * grid_sH;
index_t grid_sN = out_d * grid_sD;
index_t gOut_sW = 1;
index_t gOut_sH = out_w;
index_t gOut_sD = out_h * out_w;
index_t gOut_sC = out_d * gOut_sD;
index_t gOut_sN = out_c * gOut_sC;
CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) {
const index_t w = index % out_w;
const index_t h = (index / out_w) % out_h;
const index_t d = (index / (out_h * out_w)) % out_d;
const index_t n = index / (out_d * out_h * out_w);
const auto grid_offset =
n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW;
// get the corresponding input x, y, z co-ordinates from grid
T ix = grid[grid_offset];
T iy = grid[grid_offset + grid_sCoor];
T iz = grid[grid_offset + 2 * grid_sCoor];
// multipliers for gradients on ix, iy, and iz
T gix_mult, giy_mult, giz_mult;
ix = ComputePositionsWithMask(ix, in_w, padding_mode, align_corners,
&gix_mult);
iy = ComputePositionsWithMask(iy, in_h, padding_mode, align_corners,
&giy_mult);
iz = ComputePositionsWithMask(iz, in_d, padding_mode, align_corners,
&giz_mult);
if (mode == Mode::bilinear) {
// get corner pixel values from (x, y, z)
// for 4d, we used north-east-south-west
// for 5d, we add top-bottom
index_t ix_tnw = static_cast<index_t>(std::floor(ix));
index_t iy_tnw = static_cast<index_t>(std::floor(iy));
index_t iz_tnw = static_cast<index_t>(std::floor(iz));
index_t ix_tne = ix_tnw + 1;
index_t iy_tne = iy_tnw;
index_t iz_tne = iz_tnw;
index_t ix_tsw = ix_tnw;
index_t iy_tsw = iy_tnw + 1;
index_t iz_tsw = iz_tnw;
index_t ix_tse = ix_tnw + 1;
index_t iy_tse = iy_tnw + 1;
index_t iz_tse = iz_tnw;
index_t ix_bnw = ix_tnw;
index_t iy_bnw = iy_tnw;
index_t iz_bnw = iz_tnw + 1;
index_t ix_bne = ix_tnw + 1;
index_t iy_bne = iy_tnw;
index_t iz_bne = iz_tnw + 1;
index_t ix_bsw = ix_tnw;
index_t iy_bsw = iy_tnw + 1;
index_t iz_bsw = iz_tnw + 1;
index_t ix_bse = ix_tnw + 1;
index_t iy_bse = iy_tnw + 1;
index_t iz_bse = iz_tnw + 1;
// get surfaces to each neighbor:
T tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz);
T tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz);
T tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz);
T tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz);
T bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse);
T bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw);
T bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne);
T bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw);
T gix = static_cast<T>(0), giy = static_cast<T>(0),
giz = static_cast<T>(0);
index_t gOut_offset =
n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
index_t inp_offset_NC = n * inp_sN;
T* gInp_ptr_NC = grad_input + n * inp_sN;
for (index_t c = 0; c < out_c; ++c, gOut_offset += gOut_sC,
gInp_ptr_NC += inp_sC, inp_offset_NC += inp_sC) {
T gOut = grad_output[gOut_offset];
AtomicAdd3D(gInp_ptr_NC, iz_tnw, iy_tnw, ix_tnw, inp_sD, inp_sH, inp_sW,
in_d, in_h, in_w, tnw * gOut);
AtomicAdd3D(gInp_ptr_NC, iz_tne, iy_tne, ix_tne, inp_sD, inp_sH, inp_sW,
in_d, in_h, in_w, tne * gOut);
AtomicAdd3D(gInp_ptr_NC, iz_tsw, iy_tsw, ix_tsw, inp_sD, inp_sH, inp_sW,
in_d, in_h, in_w, tsw * gOut);
AtomicAdd3D(gInp_ptr_NC, iz_tse, iy_tse, ix_tse, inp_sD, inp_sH, inp_sW,
in_d, in_h, in_w, tse * gOut);
AtomicAdd3D(gInp_ptr_NC, iz_bnw, iy_bnw, ix_bnw, inp_sD, inp_sH, inp_sW,
in_d, in_h, in_w, bnw * gOut);
AtomicAdd3D(gInp_ptr_NC, iz_bne, iy_bne, ix_bne, inp_sD, inp_sH, inp_sW,
in_d, in_h, in_w, bne * gOut);
AtomicAdd3D(gInp_ptr_NC, iz_bsw, iy_bsw, ix_bsw, inp_sD, inp_sH, inp_sW,
in_d, in_h, in_w, bsw * gOut);
AtomicAdd3D(gInp_ptr_NC, iz_bse, iy_bse, ix_bse, inp_sD, inp_sH, inp_sW,
in_d, in_h, in_w, bse * gOut);
// calculate grad_grid
if (InBounds3D(iz_tnw, iy_tnw, ix_tnw, in_d, in_h, in_w)) {
T tnw_val = input[inp_offset_NC + iz_tnw * inp_sD + iy_tnw * inp_sH +
ix_tnw * inp_sW];
gix -= tnw_val * (iy_bse - iy) * (iz_bse - iz) * gOut;
giy -= tnw_val * (ix_bse - ix) * (iz_bse - iz) * gOut;
giz -= tnw_val * (ix_bse - ix) * (iy_bse - iy) * gOut;
}
if (InBounds3D(iz_tne, iy_tne, ix_tne, in_d, in_h, in_w)) {
T tne_val = input[inp_offset_NC + iz_tne * inp_sD + iy_tne * inp_sH +
ix_tne * inp_sW];
gix += tne_val * (iy_bsw - iy) * (iz_bsw - iz) * gOut;
giy -= tne_val * (ix - ix_bsw) * (iz_bsw - iz) * gOut;
giz -= tne_val * (ix - ix_bsw) * (iy_bsw - iy) * gOut;
}
if (InBounds3D(iz_tsw, iy_tsw, ix_tsw, in_d, in_h, in_w)) {
T tsw_val = input[inp_offset_NC + iz_tsw * inp_sD + iy_tsw * inp_sH +
ix_tsw * inp_sW];
gix -= tsw_val * (iy - iy_bne) * (iz_bne - iz) * gOut;
giy += tsw_val * (ix_bne - ix) * (iz_bne - iz) * gOut;
giz -= tsw_val * (ix_bne - ix) * (iy - iy_bne) * gOut;
}
if (InBounds3D(iz_tse, iy_tse, ix_tse, in_d, in_h, in_w)) {
T tse_val = input[inp_offset_NC + iz_tse * inp_sD + iy_tse * inp_sH +
ix_tse * inp_sW];
gix += tse_val * (iy - iy_bnw) * (iz_bnw - iz) * gOut;
giy += tse_val * (ix - ix_bnw) * (iz_bnw - iz) * gOut;
giz -= tse_val * (ix - ix_bnw) * (iy - iy_bnw) * gOut;
}
if (InBounds3D(iz_bnw, iy_bnw, ix_bnw, in_d, in_h, in_w)) {
T bnw_val = input[inp_offset_NC + iz_bnw * inp_sD + iy_bnw * inp_sH +
ix_bnw * inp_sW];
gix -= bnw_val * (iy_tse - iy) * (iz - iz_tse) * gOut;
giy -= bnw_val * (ix_tse - ix) * (iz - iz_tse) * gOut;
giz += bnw_val * (ix_tse - ix) * (iy_tse - iy) * gOut;
}
if (InBounds3D(iz_bne, iy_bne, ix_bne, in_d, in_h, in_w)) {
T bne_val = input[inp_offset_NC + iz_bne * inp_sD + iy_bne * inp_sH +
ix_bne * inp_sW];
gix += bne_val * (iy_tsw - iy) * (iz - iz_tsw) * gOut;
giy -= bne_val * (ix - ix_tsw) * (iz - iz_tsw) * gOut;
giz += bne_val * (ix - ix_tsw) * (iy_tsw - iy) * gOut;
}
if (InBounds3D(iz_bsw, iy_bsw, ix_bsw, in_d, in_h, in_w)) {
T bsw_val = input[inp_offset_NC + iz_bsw * inp_sD + iy_bsw * inp_sH +
ix_bsw * inp_sW];
gix -= bsw_val * (iy - iy_tne) * (iz - iz_tne) * gOut;
giy += bsw_val * (ix_tne - ix) * (iz - iz_tne) * gOut;
giz += bsw_val * (ix_tne - ix) * (iy - iy_tne) * gOut;
}
if (InBounds3D(iz_bse, iy_bse, ix_bse, in_d, in_h, in_w)) {
T bse_val = input[inp_offset_NC + iz_bse * inp_sD + iy_bse * inp_sH +
ix_bse * inp_sW];
gix += bse_val * (iy - iy_tnw) * (iz - iz_tnw) * gOut;
giy += bse_val * (ix - ix_tnw) * (iz - iz_tnw) * gOut;
giz += bse_val * (ix - ix_tnw) * (iy - iy_tnw) * gOut;
}
}
if (grad_grid != nullptr) {
T* gGrid_ptr_NDHW = grad_grid + index * grid_sW;
gGrid_ptr_NDHW[0] = gix_mult * gix;
gGrid_ptr_NDHW[1] = giy_mult * giy;
gGrid_ptr_NDHW[2] = giz_mult * giz;
}
} else if (mode == Mode::nearest) {
auto ix_nearest = static_cast<index_t>(std::round(ix));
auto iy_nearest = static_cast<index_t>(std::round(iy));
auto iz_nearest = static_cast<index_t>(std::round(iz));
// assign nearest neighor pixel value to output pixel
index_t gOut_offset =
n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
T* gInp_ptr_NC = grad_input + n * inp_sN;
for (index_t c = 0; c < out_c;
++c, gOut_offset += gOut_sC, gInp_ptr_NC += inp_sC) {
AtomicAdd3D(gInp_ptr_NC, iz_nearest, iy_nearest, ix_nearest, inp_sD,
inp_sH, inp_sW, in_d, in_h, in_w, grad_output[gOut_offset]);
}
if (grad_grid != nullptr) {
T* gGrid_ptr_NDHW = grad_grid + index * grid_sW;
gGrid_ptr_NDHW[0] = static_cast<T>(0);
gGrid_ptr_NDHW[1] = static_cast<T>(0);
gGrid_ptr_NDHW[2] = static_cast<T>(0);
}
}
}
}
std::vector<paddle::Tensor> GridSample3DCUDABackward(
const paddle::Tensor& x, const paddle::Tensor& grid,
const paddle::Tensor& grad_out, const std::string& mode,
const std::string& padding_mode, bool align_corners) {
PaddingMode enum_padding_mode;
Mode enum_mode;
if (padding_mode == "border") {
enum_padding_mode = PaddingMode::border;
} else if (padding_mode == "reflection") {
enum_padding_mode = PaddingMode::reflect;
} else {
enum_padding_mode = PaddingMode::zeros;
}
if (mode == "nearest") {
enum_mode = Mode::nearest;
} else {
enum_mode = Mode::bilinear;
}
const int out_d = grid.shape()[1];
const int out_h = grid.shape()[2];
const int out_w = grid.shape()[3];
const int n = x.shape()[0];
const int c = x.shape()[1];
const int in_d = x.shape()[2];
const int in_h = x.shape()[3];
const int in_w = x.shape()[4];
auto grid_grad_output =
paddle::empty({n, out_d, out_h, out_w, 3}, paddle::DataType::FLOAT32,
paddle::GPUPlace());
auto x_grad_output =
paddle::full({n, c, in_d, in_h, in_w}, 0, paddle::DataType::FLOAT32,
paddle::GPUPlace());
const int count = static_cast<int>(n * out_d * out_h * out_w);
int max_threads_per_block = 512;
int block_num = (count - 1) / max_threads_per_block + 1;
GridSample3DCudaBackwardKernel<float, int>
<<<block_num, max_threads_per_block, 0, x.stream()>>>(
count, grad_out.data<float>(), x.data<float>(), grid.data<float>(), c,
out_d, out_h, out_w, in_d, in_h, in_w, x_grad_output.data<float>(),
grid_grad_output.data<float>(), enum_mode, enum_padding_mode,
align_corners);
return {x_grad_output};
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/pointnet2/sampling.cc
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include "paddle/extension.h"
#define CHECK_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.")
// cuda launcher declaration
void farthest_point_sampling_kernel_launcher(int b, int n, int m,
const float *dataset, float *temp,
int *idxs);
// op forward wrapper
std::vector<paddle::Tensor> farthest_point_sampling_cuda_forward(
const paddle::Tensor &points_tensor, const int &npoints) {
// points_tensor: (B, N, 3)
// tmp_tensor: (B, N)
// output:
// idx_tensor: (B, npoints)
const int b = points_tensor.shape()[0];
const int n = points_tensor.shape()[1];
auto *points = points_tensor.data<float>();
auto temp_tensor =
paddle::full({b, n}, 1e10, paddle::DataType::FLOAT32, paddle::GPUPlace());
auto idx_tensor =
paddle::empty({b, npoints}, paddle::DataType::INT32, paddle::GPUPlace());
auto *temp = temp_tensor.data<float>();
auto *idx = idx_tensor.data<int>();
farthest_point_sampling_kernel_launcher(b, n, npoints, points, temp, idx);
return {idx_tensor};
}
// shape infer
std::vector<std::vector<int64_t>> FPSInferShape(
std::vector<int64_t> points_shape, const int &npoints) {
return {{points_shape[0], npoints}};
}
// dtype infer
std::vector<paddle::DataType> FPSInferDtype(paddle::DataType points_dtype) {
return {paddle::DataType::INT32};
}
// build op forward
PD_BUILD_OP(farthest_point_sample)
.Inputs({"points_tensor"})
.Outputs({"idx_tensor"})
.Attrs({"npoints: int"})
.SetKernelFn(PD_KERNEL(farthest_point_sampling_cuda_forward))
.SetInferShapeFn(PD_INFER_SHAPE(FPSInferShape))
.SetInferDtypeFn(PD_INFER_DTYPE(FPSInferDtype));
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/pointnet2/sampling_gpu.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cmath>
#include "paddle/extension.h"
#define TOTAL_THREADS 1024
#define THREADS_PER_BLOCK 256
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
inline int opt_n_threads(int work_size) {
const int pow_2 = std::log(static_cast<double>(work_size)) / std::log(2.0);
return max(min(1 << pow_2, TOTAL_THREADS), 1);
}
__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i,
int idx1, int idx2) {
const float v1 = dists[idx1], v2 = dists[idx2];
const int i1 = dists_i[idx1], i2 = dists_i[idx2];
dists[idx1] = max(v1, v2);
dists_i[idx1] = v2 > v1 ? i2 : i1;
}
template <unsigned int block_size>
__global__ void farthest_point_sampling_kernel(
int b, int n, int m, const float *__restrict__ dataset,
float *__restrict__ temp, int *__restrict__ idxs) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
if (m <= 0) return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * 3;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0) idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
float x1 = dataset[old * 3 + 0];
float y1 = dataset[old * 3 + 1];
float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
float x2, y2, z2;
x2 = dataset[k * 3 + 0];
y2 = dataset[k * 3 + 1];
z2 = dataset[k * 3 + 2];
float d =
(x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1);
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 1024) {
if (tid < 512) {
__update(dists, dists_i, tid, tid + 512);
}
__syncthreads();
}
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0) idxs[j] = old;
}
}
void farthest_point_sampling_kernel_launcher(int b, int n, int m,
const float *dataset, float *temp,
int *idxs) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
cudaError_t err;
unsigned int n_threads = opt_n_threads(n);
switch (n_threads) {
case 1024:
farthest_point_sampling_kernel<1024>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 512:
farthest_point_sampling_kernel<512>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 256:
farthest_point_sampling_kernel<256>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 128:
farthest_point_sampling_kernel<128>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 64:
farthest_point_sampling_kernel<64>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 32:
farthest_point_sampling_kernel<32>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 16:
farthest_point_sampling_kernel<16>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 8:
farthest_point_sampling_kernel<8>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 4:
farthest_point_sampling_kernel<4>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 2:
farthest_point_sampling_kernel<2>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
case 1:
farthest_point_sampling_kernel<1>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
break;
default:
farthest_point_sampling_kernel<512>
<<<b, n_threads>>>(b, n, m, dataset, temp, idxs);
}
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/pointnet2/gather_points.cc
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include "paddle/extension.h"
#define CHECK_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.")
// cuda launcher declaration
void gather_points_cuda_launcher(const int b, const int c, const int n,
const int npoints, const float *points,
const int *idx, float *out);
void gather_points_grad_cuda_launcher(const int b, const int c, const int n,
const int npoints, const float *grad_out,
const int *idx, float *grad_points);
// op forward wrapper
std::vector<paddle::Tensor> gather_points_cuda_forward(
const paddle::Tensor &points_tensor, const paddle::Tensor &idx_tensor) {
// points: (B, C, N)
// idx: (B, npoints)
// output:
// out: (B, C, npoints)
CHECK_INPUT(points_tensor);
CHECK_INPUT(idx_tensor);
const int b = points_tensor.shape()[0];
const int c = points_tensor.shape()[1];
const int n = points_tensor.shape()[2];
const int npoints = idx_tensor.shape()[1];
auto *points = points_tensor.data<float>();
auto *idx = idx_tensor.data<int>();
auto out_tensor = paddle::empty({b, c, npoints}, paddle::DataType::FLOAT32,
paddle::GPUPlace());
auto *out = out_tensor.data<float>();
gather_points_cuda_launcher(b, c, n, npoints, points, idx, out);
return {out_tensor};
}
// op backward wrapper
std::vector<paddle::Tensor> gather_points_cuda_backwarad(
const paddle::Tensor &grad_out_tensor, const paddle::Tensor &idx_tensor,
const paddle::Tensor &points_tensor) {
// grad_out: (B, C, npoints)
// idx: (B, npoints)
// output:
// grad_points: (B, C, N)
CHECK_INPUT(grad_out_tensor);
CHECK_INPUT(idx_tensor);
CHECK_INPUT(points_tensor);
const int b = grad_out_tensor.shape()[0];
const int c = grad_out_tensor.shape()[1];
const int npoints = grad_out_tensor.shape()[2];
const int n = points_tensor.shape()[2];
auto *grad_out = grad_out_tensor.data<float>();
auto *idx = idx_tensor.data<int>();
auto grad_points_tensor = paddle::full(
{b, c, n}, 0.0, paddle::DataType::FLOAT32, paddle::GPUPlace());
auto *grad_points = grad_points_tensor.data<float>();
gather_points_grad_cuda_launcher(b, c, n, npoints, grad_out, idx,
grad_points);
return {grad_points_tensor};
}
// shape infer
std::vector<std::vector<int64_t>> GatherInferShape(
std::vector<int64_t> points_shape, std::vector<int64_t> idx_shape) {
const int b = points_shape[0];
const int c = points_shape[1];
const int npoints = idx_shape[1];
return {{b, c, npoints}};
}
// data type infer
std::vector<paddle::DataType> GatherInferDtype(paddle::DataType points_dtype,
paddle::DataType idx_dtype) {
return {points_dtype};
}
// build op forward
PD_BUILD_OP(gather_operation)
.Inputs({"points", "idx"})
.Outputs({"out"})
.SetKernelFn(PD_KERNEL(gather_points_cuda_forward))
.SetInferShapeFn(PD_INFER_SHAPE(GatherInferShape))
.SetInferDtypeFn(PD_INFER_DTYPE(GatherInferDtype));
// build op backward
PD_BUILD_GRAD_OP(gather_operation)
.Inputs({paddle::Grad("out"), "idx", "points"})
.Outputs({paddle::Grad("points")})
.SetKernelFn(PD_KERNEL(gather_points_cuda_backwarad));
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/pointnet2/voxel_query.cc
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include "paddle/extension.h"
#define CHECK_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.")
void voxel_query_kernel_launcher_stack(int M, int R1, int R2, int R3,
int nsample, float radius, int z_range,
int y_range, int x_range,
const float *new_xyz, const float *xyz,
const int *new_coords,
const int *point_indices, int *idx);
std::vector<paddle::Tensor> voxel_query_wrapper_stack(
const paddle::Tensor &new_xyz_tensor, const paddle::Tensor &xyz_tensor,
const paddle::Tensor &new_coords_tensor,
const paddle::Tensor &point_indices_tensor, const float radius,
const int nsample, const int z_range, const int y_range,
const int x_range) {
CHECK_INPUT(new_coords_tensor);
CHECK_INPUT(point_indices_tensor);
CHECK_INPUT(new_xyz_tensor);
CHECK_INPUT(xyz_tensor);
const float *new_xyz = new_xyz_tensor.data<float>();
const float *xyz = xyz_tensor.data<float>();
const int *new_coords = new_coords_tensor.data<int>();
const int *point_indices = point_indices_tensor.data<int>();
const int M = new_coords_tensor.shape()[0];
const int B = point_indices_tensor.shape()[0];
const int Z = point_indices_tensor.shape()[1];
const int Y = point_indices_tensor.shape()[2];
const int X = point_indices_tensor.shape()[3];
auto idx_tensor = paddle::full({M, nsample}, 0, paddle::DataType::INT32,
paddle::GPUPlace());
int *idx = idx_tensor.data<int>();
voxel_query_kernel_launcher_stack(M, Z, Y, X, nsample, radius, z_range,
y_range, x_range, new_xyz, xyz, new_coords,
point_indices, idx);
return {idx_tensor};
}
// shape infer
std::vector<std::vector<int64_t>> VoxelQueryInferShape(
std::vector<int64_t> new_xyz_shape, std::vector<int64_t> xyz_shape,
std::vector<int64_t> new_coords_shape,
std::vector<int64_t> point_indices_shape, const float radius,
const int nsample, const int z_range, const int y_range,
const int x_range) {
return {{new_coords_shape[0], nsample}};
}
// data type infer
std::vector<paddle::DataType> VoxelQueryInferDtype(
paddle::DataType new_xyz_type, paddle::DataType xyz_type,
paddle::DataType new_coords_type, paddle::DataType point_indices_type) {
return {paddle::DataType::INT32};
}
// build forward op
PD_BUILD_OP(voxel_query_wrapper)
.Inputs({"new_xyz_tensor", "xyz_tensor", "new_coords_tensor",
"point_indices_tensor"})
.Outputs({"idx_tensor"})
.Attrs({"radius: float", "nsample: int", "z_range: int", "y_range: int",
"x_range: int"})
.SetKernelFn(PD_KERNEL(voxel_query_wrapper_stack))
.SetInferShapeFn(PD_INFER_SHAPE(VoxelQueryInferShape))
.SetInferDtypeFn(PD_INFER_DTYPE(VoxelQueryInferDtype));
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/pointnet2/voxel_query_gpu.cu
|
#include <curand_kernel.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "paddle/extension.h"
#define THREADS_PER_BLOCK 256
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
__global__ void voxel_query_kernel_stack(int M, int R1, int R2, int R3,
int nsample, float radius, int z_range,
int y_range, int x_range,
const float *new_xyz, const float *xyz,
const int *new_coords,
const int *point_indices, int *idx) {
// :param new_coords: (M1 + M2 ..., 4) centers of the ball query
// :param point_indices: (B, Z, Y, X)
// output:
// idx: (M1 + M2, nsample)
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= M) return;
new_xyz += pt_idx * 3;
new_coords += pt_idx * 4;
idx += pt_idx * nsample;
curandState state;
curand_init(pt_idx, 0, 0, &state);
float radius2 = radius * radius;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
int batch_idx = new_coords[0];
int new_coords_z = new_coords[1];
int new_coords_y = new_coords[2];
int new_coords_x = new_coords[3];
int cnt = 0;
int cnt2 = 0;
// for (int dz = -1*z_range; dz <= z_range; ++dz) {
for (int dz = -1 * z_range; dz <= z_range; ++dz) {
int z_coord = new_coords_z + dz;
if (z_coord < 0 || z_coord >= R1) continue;
for (int dy = -1 * y_range; dy <= y_range; ++dy) {
int y_coord = new_coords_y + dy;
if (y_coord < 0 || y_coord >= R2) continue;
for (int dx = -1 * x_range; dx <= x_range; ++dx) {
int x_coord = new_coords_x + dx;
if (x_coord < 0 || x_coord >= R3) continue;
int index = batch_idx * R1 * R2 * R3 + z_coord * R2 * R3 +
y_coord * R3 + x_coord;
int neighbor_idx = point_indices[index];
if (neighbor_idx < 0) continue;
float x_per = xyz[neighbor_idx * 3 + 0];
float y_per = xyz[neighbor_idx * 3 + 1];
float z_per = xyz[neighbor_idx * 3 + 2];
float dist2 = (x_per - new_x) * (x_per - new_x) +
(y_per - new_y) * (y_per - new_y) +
(z_per - new_z) * (z_per - new_z);
if (dist2 > radius2) continue;
++cnt2;
if (cnt < nsample) {
if (cnt == 0) {
for (int l = 0; l < nsample; ++l) {
idx[l] = neighbor_idx;
}
}
idx[cnt] = neighbor_idx;
++cnt;
}
// else {
// float rnd = curand_uniform(&state);
// if (rnd < (float(nsample) / cnt2)) {
// int insertidx = ceilf(curand_uniform(&state) * nsample) - 1;
// idx[insertidx] = neighbor_idx;
// }
// }
}
}
}
if (cnt == 0) idx[0] = -1;
}
void voxel_query_kernel_launcher_stack(int M, int R1, int R2, int R3,
int nsample, float radius, int z_range,
int y_range, int x_range,
const float *new_xyz, const float *xyz,
const int *new_coords,
const int *point_indices, int *idx) {
// :param new_coords: (M1 + M2 ..., 4) centers of the voxel query
// :param point_indices: (B, Z, Y, X)
// output:
// idx: (M1 + M2, nsample)
cudaError_t err;
dim3 blocks(DIVUP(M, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
voxel_query_kernel_stack<<<blocks, threads>>>(
M, R1, R2, R3, nsample, radius, z_range, y_range, x_range, new_xyz, xyz,
new_coords, point_indices, idx);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/pointnet2/gather_points_gpu.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
batch version of point grouping, modified from the original implementation of
official PointNet++ codes. Written by Shaoshuai Shi All Rights Reserved 2018.
*/
#include "paddle/extension.h"
#define THREADS_PER_BLOCK 512
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
__global__ void gather_points_cuda_kernel(const int b, const int c, const int n,
const int m,
const float *__restrict__ points,
const int *__restrict__ idx,
float *__restrict__ out) {
// points: (B, C, N)
// idx: (B, M)
// output:
// out: (B, C, M)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || c_idx >= c || pt_idx >= m) return;
out += bs_idx * c * m + c_idx * m + pt_idx;
idx += bs_idx * m + pt_idx;
points += bs_idx * c * n + c_idx * n;
out[0] = points[idx[0]];
}
void gather_points_cuda_launcher(const int b, const int c, const int n,
const int npoints, const float *points,
const int *idx, float *out) {
// points: (B, C, N)
// idx: (B, npoints)
// output:
// out: (B, C, npoints)
cudaError_t err;
dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
gather_points_cuda_kernel<<<blocks, threads>>>(b, c, n, npoints, points, idx,
out);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void gather_points_grad_cuda_kernel(
const int b, const int c, const int n, const int m,
const float *__restrict__ grad_out, const int *__restrict__ idx,
float *__restrict__ grad_points) {
// grad_out: (B, C, M)
// idx: (B, M)
// output:
// grad_points: (B, C, N)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || c_idx >= c || pt_idx >= m) return;
grad_out += bs_idx * c * m + c_idx * m + pt_idx;
idx += bs_idx * m + pt_idx;
grad_points += bs_idx * c * n + c_idx * n;
atomicAdd(grad_points + idx[0], grad_out[0]);
}
void gather_points_grad_cuda_launcher(const int b, const int c, const int n,
const int npoints, const float *grad_out,
const int *idx, float *grad_points) {
// grad_out: (B, C, npoints)
// idx: (B, npoints)
// output:
// grad_points: (B, C, N)
cudaError_t err;
dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
gather_points_grad_cuda_kernel<<<blocks, threads>>>(
b, c, n, npoints, grad_out, idx, grad_points);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/pointnet2
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/pointnet2/pointnet2_batch/ball_query_batch.cc
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include "paddle/extension.h"
#define CHECK_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.")
// cuda launcher declaration
void ball_query_cuda_launcher_batch(const int b, const int n, const int m,
const float radius, const int nsample,
const float *new_xyz, const float *xyz,
int *idx);
// op forward wrapper
std::vector<paddle::Tensor> ball_query_cuda_forward_batch(
const paddle::Tensor &new_xyz_tensor, const paddle::Tensor &xyz_tensor,
const float &radius, const int &nsample) {
CHECK_INPUT(new_xyz_tensor);
CHECK_INPUT(xyz_tensor);
const int b = new_xyz_tensor.shape()[0];
const int m = new_xyz_tensor.shape()[1];
const int n = xyz_tensor.shape()[1];
auto *new_xyz = new_xyz_tensor.data<float>();
auto *xyz = xyz_tensor.data<float>();
auto idx_tensor = paddle::empty({b, m, nsample}, paddle::DataType::INT32,
paddle::GPUPlace());
auto *idx = idx_tensor.data<int>();
ball_query_cuda_launcher_batch(b, n, m, radius, nsample, new_xyz, xyz, idx);
return {idx_tensor};
}
// shape infer
std::vector<std::vector<int64_t>> BallQueryInferShapeBatch(
std::vector<int64_t> new_xyz_shape, std::vector<int64_t> xyz_shape,
const float &radius, const int &nsample) {
return {{new_xyz_shape[0], new_xyz_shape[1], nsample}};
}
// data type infer
std::vector<paddle::DataType> BallQueryInferDtypeBatch(paddle::DataType t1,
paddle::DataType t2) {
return {paddle::DataType::INT32};
}
// build forward op
PD_BUILD_OP(ball_query_batch)
.Inputs({"new_xyz_tensor", "xyz_tensor"})
.Outputs({"idx"})
.Attrs({"radius: float", "nsample: int"})
.SetKernelFn(PD_KERNEL(ball_query_cuda_forward_batch))
.SetInferShapeFn(PD_INFER_SHAPE(BallQueryInferShapeBatch))
.SetInferDtypeFn(PD_INFER_DTYPE(BallQueryInferDtypeBatch));
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/pointnet2
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/pointnet2/pointnet2_batch/ball_query_gpu_batch.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/extension.h"
#define THREADS_PER_BLOCK 512
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
__global__ void ball_query_cuda_kernel_batch(const int b, const int n,
const int m, const float radius,
const int nsample,
const float *__restrict__ new_xyz,
const float *__restrict__ xyz,
int *__restrict__ idx) {
// new_xyz: (B, M, 3)
// xyz: (B, N, 3)
// output:
// idx: (B, M, nsample)
int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || pt_idx >= m) return;
new_xyz += bs_idx * m * 3 + pt_idx * 3;
xyz += bs_idx * n * 3;
idx += bs_idx * m * nsample + pt_idx * nsample;
float radius2 = radius * radius;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
int cnt = 0;
for (int k = 0; k < n; ++k) {
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) +
(new_z - z) * (new_z - z);
if (d2 < radius2) {
if (cnt == 0) {
for (int l = 0; l < nsample; ++l) {
idx[l] = k;
}
}
idx[cnt] = k;
++cnt;
if (cnt >= nsample) break;
}
}
}
void ball_query_cuda_launcher_batch(const int b, const int n, const int m,
const float radius, const int nsample,
const float *new_xyz, const float *xyz,
int *idx) {
// new_xyz: (B, M, 3)
// xyz: (B, N, 3)
// output:
// idx: (B, M, nsample)
cudaError_t err;
dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b);
dim3 threads(THREADS_PER_BLOCK);
ball_query_cuda_kernel_batch<<<blocks, threads>>>(b, n, m, radius, nsample,
new_xyz, xyz, idx);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/pointnet2
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/pointnet2/pointnet2_batch/group_points_gpu_batch.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
batch version of point grouping, modified from the original implementation of
official PointNet++ codes. Written by Shaoshuai Shi All Rights Reserved 2018.
*/
#include "paddle/extension.h"
#define THREADS_PER_BLOCK 512
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
__global__ void group_points_cuda_kernel_batch(const int b, const int c,
const int n, const int npoints,
const int nsample,
const float *__restrict__ points,
const int *__restrict__ idx,
float *__restrict__ out) {
// points: (B, C, N)
// idx: (B, npoints, nsample)
// output:
// out: (B, C, npoints, nsample)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int pt_idx = index / nsample;
if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return;
int sample_idx = index % nsample;
idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx;
int in_idx = bs_idx * c * n + c_idx * n + idx[0];
int out_idx = bs_idx * c * npoints * nsample + c_idx * npoints * nsample +
pt_idx * nsample + sample_idx;
out[out_idx] = points[in_idx];
}
void group_points_cuda_launcher_batch(const int b, const int c, const int n,
const int npoints, const int nsample,
const float *points, const int *idx,
float *out) {
// points: (B, C, N)
// idx: (B, npoints, nsample)
// output:
// out: (B, C, npoints, nsample)
cudaError_t err;
dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
group_points_cuda_kernel_batch<<<blocks, threads>>>(b, c, n, npoints, nsample,
points, idx, out);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void group_points_grad_cuda_kernel_batch(
const int b, const int c, const int n, const int npoints, const int nsample,
const float *__restrict__ grad_out, const int *__restrict__ idx,
float *__restrict__ grad_points) {
// grad_out: (B, C, npoints, nsample)
// idx: (B, npoints, nsample)
// output:
// grad_points: (B, C, N)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int pt_idx = index / nsample;
if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return;
int sample_idx = index % nsample;
grad_out += bs_idx * c * npoints * nsample + c_idx * npoints * nsample +
pt_idx * nsample + sample_idx;
idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx;
atomicAdd(grad_points + bs_idx * c * n + c_idx * n + idx[0], grad_out[0]);
}
void group_points_grad_cuda_launcher_batch(const int b, const int c,
const int n, const int npoints,
const int nsample,
const float *grad_out,
const int *idx, float *grad_points) {
// grad_out: (B, C, npoints, nsample)
// idx: (B, npoints, nsample)
// output:
// grad_points: (B, C, N)
cudaError_t err;
dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
group_points_grad_cuda_kernel_batch<<<blocks, threads>>>(
b, c, n, npoints, nsample, grad_out, idx, grad_points);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/pointnet2
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/pointnet2/pointnet2_batch/group_points_batch.cc
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include "paddle/extension.h"
#define CHECK_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.")
// cuda launcher declaration
void group_points_cuda_launcher_batch(const int b, const int c, const int n,
const int npoints, const int nsample,
const float *points, const int *idx,
float *out);
void group_points_grad_cuda_launcher_batch(const int b, const int c,
const int n, const int npoints,
const int nsample,
const float *grad_out,
const int *idx, float *grad_points);
// op forward wrapper
std::vector<paddle::Tensor> group_points_cuda_forward_batch(
const paddle::Tensor &points_tensor, const paddle::Tensor &idx_tensor) {
CHECK_INPUT(points_tensor);
CHECK_INPUT(idx_tensor);
const int b = points_tensor.shape()[0];
const int c = points_tensor.shape()[1];
const int n = points_tensor.shape()[2];
const int npoints = idx_tensor.shape()[1];
const int nsample = idx_tensor.shape()[2];
auto *points = points_tensor.data<float>();
auto *idx = idx_tensor.data<int>();
auto out_tensor = paddle::empty(
{b, c, npoints, nsample}, paddle::DataType::FLOAT32, paddle::GPUPlace());
auto *out = out_tensor.data<float>();
group_points_cuda_launcher_batch(b, c, n, npoints, nsample, points, idx, out);
return {out_tensor};
}
// op backward wrapper
std::vector<paddle::Tensor> group_points_cuda_backward_batch(
const paddle::Tensor &grad_out_tensor, const paddle::Tensor &idx_tensor,
const paddle::Tensor &points_tensor) {
CHECK_INPUT(grad_out_tensor);
CHECK_INPUT(idx_tensor);
const int b = grad_out_tensor.shape()[0];
const int c = grad_out_tensor.shape()[1];
const int npoints = grad_out_tensor.shape()[2];
const int nsample = grad_out_tensor.shape()[3];
const int n = points_tensor.shape()[2];
auto *grad_out = grad_out_tensor.data<float>();
auto *idx = idx_tensor.data<int>();
auto grad_points_tensor = paddle::full(
{b, c, n}, 0.0, paddle::DataType::FLOAT32, paddle::GPUPlace());
auto *grad_points = grad_points_tensor.data<float>();
group_points_grad_cuda_launcher_batch(b, c, n, npoints, nsample, grad_out,
idx, grad_points);
return {grad_points_tensor};
}
// shape infer
std::vector<std::vector<int64_t>> GroupInferShapeBatch(
std::vector<int64_t> points_shape, std::vector<int64_t> idx_shape) {
const int b = points_shape[0];
const int c = points_shape[1];
const int npoints = idx_shape[1];
const int nsample = idx_shape[2];
return {{b, c, npoints, nsample}};
}
// data type infer
std::vector<paddle::DataType> GroupInferDtypeBatch(
paddle::DataType points_dtype, paddle::DataType idx_dtype) {
return {points_dtype};
}
// build forward op
PD_BUILD_OP(grouping_operation_batch)
.Inputs({"points_tensor", "idx_tensor"})
.Outputs({"out_tensor"})
.SetKernelFn(PD_KERNEL(group_points_cuda_forward_batch))
.SetInferShapeFn(PD_INFER_SHAPE(GroupInferShapeBatch))
.SetInferDtypeFn(PD_INFER_DTYPE(GroupInferDtypeBatch));
// build backward op
PD_BUILD_GRAD_OP(grouping_operation_batch)
.Inputs({paddle::Grad("out_tensor"), "idx_tensor", "points_tensor"})
.Outputs({paddle::Grad("points_tensor")})
.SetKernelFn(PD_KERNEL(group_points_cuda_backward_batch));
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/pointnet2
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/pointnet2/pointnet2_stack/group_points_stack.cc
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include "paddle/extension.h"
#define CHECK_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.")
// cuda launcher declaration
void group_points_kernel_launcher_stack(const int B, const int M, const int C,
const int nsample,
const float *features,
const int *features_batch_cnt,
const int *idx,
const int *idx_batch_cnt, float *out);
void group_points_grad_kernel_launcher_stack(
const int B, const int M, const int C, const int N, const int nsample,
const float *grad_out, const int *idx, const int *idx_batch_cnt,
const int *features_batch_cnt, float *grad_features);
// op forward wrapper
std::vector<paddle::Tensor> group_points_cuda_forward_stack(
const paddle::Tensor &features_tensor,
const paddle::Tensor &features_batch_cnt_tensor,
const paddle::Tensor &idx_tensor,
const paddle::Tensor &idx_batch_cnt_tensor) {
CHECK_INPUT(features_tensor);
CHECK_INPUT(features_batch_cnt_tensor);
CHECK_INPUT(idx_tensor);
CHECK_INPUT(idx_batch_cnt_tensor);
const int m = idx_tensor.shape()[0];
const int nsample = idx_tensor.shape()[1];
const int n = features_tensor.shape()[0];
const int c = features_tensor.shape()[1];
const int b = idx_batch_cnt_tensor.shape()[0];
const float *features = features_tensor.data<float>();
const int *features_batch_cnt = features_batch_cnt_tensor.data<int>();
const int *idx = idx_tensor.data<int>();
const int *idx_batch_cnt = idx_batch_cnt_tensor.data<int>();
auto out_tensor = paddle::empty({m, c, nsample}, paddle::DataType::FLOAT32,
paddle::GPUPlace());
float *out = out_tensor.data<float>();
group_points_kernel_launcher_stack(
b, m, c, nsample, features, features_batch_cnt, idx, idx_batch_cnt, out);
return {out_tensor};
}
// op backward wrapper
std::vector<paddle::Tensor> group_points_cuda_backward_stack(
const paddle::Tensor &grad_out_tensor,
const paddle::Tensor &features_tensor,
const paddle::Tensor &features_batch_cnt_tensor,
const paddle::Tensor &idx_tensor,
const paddle::Tensor &idx_batch_cnt_tensor) {
CHECK_INPUT(grad_out_tensor);
CHECK_INPUT(features_tensor);
CHECK_INPUT(features_batch_cnt_tensor);
CHECK_INPUT(idx_tensor);
CHECK_INPUT(idx_batch_cnt_tensor);
const int m = idx_tensor.shape()[0];
const int nsample = idx_tensor.shape()[1];
const int n = features_tensor.shape()[0];
const int c = features_tensor.shape()[1];
const int b = idx_batch_cnt_tensor.shape()[0];
const float *grad_out = grad_out_tensor.data<float>();
const int *features_batch_cnt = features_batch_cnt_tensor.data<int>();
const int *idx = idx_tensor.data<int>();
const int *idx_batch_cnt = idx_batch_cnt_tensor.data<int>();
auto grad_features_tensor =
paddle::full({n, c}, 0., paddle::DataType::FLOAT32, paddle::GPUPlace());
float *grad_features = grad_features_tensor.data<float>();
group_points_grad_kernel_launcher_stack(b, m, c, n, nsample, grad_out, idx,
idx_batch_cnt, features_batch_cnt,
grad_features);
return {grad_features_tensor};
}
// shape infer
std::vector<std::vector<int64_t>> GroupInferShapeStack(
std::vector<int64_t> features_shape,
std::vector<int64_t> features_batch_cnt_shapeshape,
std::vector<int64_t> idx_shape, std::vector<int64_t> idx_batch_cnt_shape) {
const int m = idx_shape[0];
const int nsample = idx_shape[1];
const int c = features_shape[1];
return {{m, c, nsample}};
}
// data type infer
std::vector<paddle::DataType> GroupInferDtypeStack(
paddle::DataType features_dtype, paddle::DataType features_batch_cnt_dtype,
paddle::DataType idx_dtype, paddle::DataType idx_batch_cnt_dtype) {
return {features_dtype};
}
// build forward op
PD_BUILD_OP(grouping_operation_stack)
.Inputs({"features_tensor", "features_batch_cnt_tensor", "idx_tensor",
"idx_batch_cnt_tensor"})
.Outputs({"out_tensor"})
.SetKernelFn(PD_KERNEL(group_points_cuda_forward_stack))
.SetInferShapeFn(PD_INFER_SHAPE(GroupInferShapeStack))
.SetInferDtypeFn(PD_INFER_DTYPE(GroupInferDtypeStack));
// build backward op
PD_BUILD_GRAD_OP(grouping_operation_stack)
.Inputs({paddle::Grad("out_tensor"), "features_tensor",
"features_batch_cnt_tensor", "idx_tensor", "idx_batch_cnt_tensor"})
.Outputs({paddle::Grad("features_tensor")})
.SetKernelFn(PD_KERNEL(group_points_cuda_backward_stack));
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/pointnet2
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/pointnet2/pointnet2_stack/group_points_gpu_stack.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Stacked-batch-data version of point grouping, modified from the original
implementation of official PointNet++ codes. Written by Shaoshuai Shi All Rights
Reserved 2019-2020.
*/
#include "paddle/extension.h"
#define THREADS_PER_BLOCK 256
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
__global__ void group_points_grad_kernel_stack(
int B, int M, int C, int N, int nsample, const float *grad_out,
const int *idx, const int *idx_batch_cnt, const int *features_batch_cnt,
float *grad_features) {
// :param grad_out: (M1 + M2 ..., C, nsample) tensor of the gradients of the
// output from forward :param idx: (M1 + M2 ..., nsample) tensor containing
// the indicies of features to group with :param idx_batch_cnt: (batch_size)
// [M1 + M2 ...] tensor containing the indicies of features to group with
// :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the
// indicies of features to group with :return:
// grad_features: (N1 + N2 ..., C) gradient of the features
int index = blockIdx.x * blockDim.x + threadIdx.x;
int sample_idx = index % nsample;
int C_idx = (index / nsample) % C;
int pt_idx = (index / nsample / C);
if (pt_idx >= M || C_idx >= C || sample_idx >= nsample) return;
int bs_idx = 0, pt_cnt = idx_batch_cnt[0];
for (int k = 1; k < B; k++) {
if (pt_idx < pt_cnt) break;
pt_cnt += idx_batch_cnt[k];
bs_idx = k;
}
int features_batch_start_idx = 0;
for (int k = 0; k < bs_idx; k++)
features_batch_start_idx += features_batch_cnt[k];
grad_out += pt_idx * C * nsample + C_idx * nsample + sample_idx;
idx += pt_idx * nsample + sample_idx;
grad_features += (features_batch_start_idx + idx[0]) * C + C_idx;
atomicAdd(grad_features, grad_out[0]);
}
void group_points_grad_kernel_launcher_stack(int B, int M, int C, int N,
int nsample, const float *grad_out,
const int *idx,
const int *idx_batch_cnt,
const int *features_batch_cnt,
float *grad_features) {
// :param grad_out: (M1 + M2 ..., C, nsample) tensor of the gradients of the
// output from forward :param idx: (M1 + M2 ..., nsample) tensor containing
// the indicies of features to group with :param idx_batch_cnt: (batch_size)
// [M1 + M2 ...] tensor containing the indicies of features to group with
// :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the
// indicies of features to group with :return:
// grad_features: (N1 + N2 ..., C) gradient of the features
cudaError_t err;
// dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); //
// blockIdx.x(col), blockIdx.y(row)
dim3 blocks(DIVUP(M * C * nsample,
THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
group_points_grad_kernel_stack<<<blocks, threads>>>(
B, M, C, N, nsample, grad_out, idx, idx_batch_cnt, features_batch_cnt,
grad_features);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void group_points_kernel_stack(int B, int M, int C, int nsample,
const float *features,
const int *features_batch_cnt,
const int *idx,
const int *idx_batch_cnt,
float *out) {
// :param features: (N1 + N2 ..., C) tensor of features to group
// :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the
// indicies of features to group with :param idx: (M1 + M2 ..., nsample)
// tensor containing the indicies of features to group with :param
// idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of
// features to group with :return:
// output: (M1 + M2, C, nsample) tensor
int index = blockIdx.x * blockDim.x + threadIdx.x;
int sample_idx = index % nsample;
int C_idx = (index / nsample) % C;
int pt_idx = (index / nsample / C);
if (pt_idx >= M || C_idx >= C || sample_idx >= nsample) return;
int bs_idx = 0, pt_cnt = idx_batch_cnt[0];
for (int k = 1; k < B; k++) {
if (pt_idx < pt_cnt) break;
pt_cnt += idx_batch_cnt[k];
bs_idx = k;
}
int features_batch_start_idx = 0;
for (int k = 0; k < bs_idx; k++)
features_batch_start_idx += features_batch_cnt[k];
features += features_batch_start_idx * C;
idx += pt_idx * nsample + sample_idx;
int in_idx = idx[0] * C + C_idx;
int out_idx = pt_idx * C * nsample + C_idx * nsample + sample_idx;
out[out_idx] = features[in_idx];
}
void group_points_kernel_launcher_stack(const int B, const int M, const int C,
const int nsample,
const float *features,
const int *features_batch_cnt,
const int *idx,
const int *idx_batch_cnt, float *out) {
// :param features: (N1 + N2 ..., C) tensor of features to group
// :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the
// indicies of features to group with :param idx: (M1 + M2 ..., nsample)
// tensor containing the indicies of features to group with :param
// idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of
// features to group with :return:
// output: (M1 + M2, C, nsample) tensor
cudaError_t err;
dim3 blocks(DIVUP(M * C * nsample,
THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
group_points_kernel_stack<<<blocks, threads>>>(
B, M, C, nsample, features, features_batch_cnt, idx, idx_batch_cnt, out);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/pointnet2
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/pointnet2/pointnet2_stack/ball_query_gpu_stack.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Stacked-batch-data version of ball query, modified from the original
implementation of official PointNet++ codes. Written by Shaoshuai Shi All Rights
Reserved 2019-2020.
*/
#include "paddle/extension.h"
#define THREADS_PER_BLOCK 256
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
__global__ void ball_query_kernel_stack(int B, int M, float radius, int nsample,
const float *new_xyz,
const int *new_xyz_batch_cnt,
const float *xyz,
const int *xyz_batch_cnt, int *idx) {
// :param xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// :param xyz_batch_cnt: (batch_size), [N1, N2, ...]
// :param new_xyz: (M1 + M2 ..., 3) centers of the ball query
// :param new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
// output:
// idx: (M, nsample)
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= M) return;
int bs_idx = 0, pt_cnt = new_xyz_batch_cnt[0];
for (int k = 1; k < B; k++) {
if (pt_idx < pt_cnt) break;
pt_cnt += new_xyz_batch_cnt[k];
bs_idx = k;
}
int xyz_batch_start_idx = 0;
for (int k = 0; k < bs_idx; k++) xyz_batch_start_idx += xyz_batch_cnt[k];
// for (int k = 0; k < bs_idx; k++) new_xyz_batch_start_idx +=
// new_xyz_batch_cnt[k];
new_xyz += pt_idx * 3;
xyz += xyz_batch_start_idx * 3;
idx += pt_idx * nsample;
float radius2 = radius * radius;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
int n = xyz_batch_cnt[bs_idx];
int cnt = 0;
for (int k = 0; k < n; ++k) {
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) +
(new_z - z) * (new_z - z);
if (d2 < radius2) {
if (cnt == 0) {
for (int l = 0; l < nsample; ++l) {
idx[l] = k;
}
}
idx[cnt] = k;
++cnt;
if (cnt >= nsample) break;
}
}
if (cnt == 0) idx[0] = -1;
}
void ball_query_kernel_launcher_stack(const int B, const int M,
const float radius, const int nsample,
const float *new_xyz,
const int *new_xyz_batch_cnt,
const float *xyz,
const int *xyz_batch_cnt, int *idx) {
// :param xyz: (N1 + N2 ..., 3) xyz coordinates of the features
// :param xyz_batch_cnt: (batch_size), [N1, N2, ...]
// :param new_xyz: (M1 + M2 ..., 3) centers of the ball query
// :param new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
// output:
// idx: (M, nsample)
cudaError_t err;
dim3 blocks(DIVUP(M, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
ball_query_kernel_stack<<<blocks, threads>>>(B, M, radius, nsample, new_xyz,
new_xyz_batch_cnt, xyz,
xyz_batch_cnt, idx);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/pointnet2
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/pointnet2/pointnet2_stack/ball_query_stack.cc
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include "paddle/extension.h"
#define CHECK_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.")
// cuda launcher declaration
void ball_query_kernel_launcher_stack(const int b, const int m,
const float radius, const int nsample,
const float *new_xyz,
const int *new_xyz_batch_cnt,
const float *xyz,
const int *xyz_batch_cnt, int *idx);
// op forward wrapper
std::vector<paddle::Tensor> ball_query_cuda_forward_stack(
const paddle::Tensor &new_xyz_tensor,
const paddle::Tensor &new_xyz_batch_cnt_tensor,
const paddle::Tensor &xyz_tensor,
const paddle::Tensor &xyz_batch_cnt_tensor, const float radius,
const int nsample) {
CHECK_INPUT(new_xyz_tensor);
CHECK_INPUT(new_xyz_batch_cnt_tensor);
CHECK_INPUT(xyz_tensor);
CHECK_INPUT(xyz_batch_cnt_tensor);
const int b = xyz_batch_cnt_tensor.shape()[0];
const int m = new_xyz_tensor.shape()[0];
const float *new_xyz = new_xyz_tensor.data<float>();
const int *new_xyz_batch_cnt = new_xyz_batch_cnt_tensor.data<int>();
const float *xyz = xyz_tensor.data<float>();
const int *xyz_batch_cnt = xyz_batch_cnt_tensor.data<int>();
auto idx_tensor = paddle::full({m, nsample}, 0, paddle::DataType::INT32,
paddle::GPUPlace());
int *idx = idx_tensor.data<int>();
ball_query_kernel_launcher_stack(b, m, radius, nsample, new_xyz,
new_xyz_batch_cnt, xyz, xyz_batch_cnt, idx);
return {idx_tensor};
}
// shape infer
std::vector<std::vector<int64_t>> BallQueryInferShapeStack(
std::vector<int64_t> new_xyz_shape,
std::vector<int64_t> new_xyz_batch_cnt_shape,
std::vector<int64_t> xyz_shape, std::vector<int64_t> xyz_batch_cnt_shape,
const float radius, const int nsample) {
return {{new_xyz_shape[0], nsample}};
}
// data type infer
std::vector<paddle::DataType> BallQueryInferDtypeStack(
paddle::DataType new_xyz_type, paddle::DataType new_xyz_batch_cnt_type,
paddle::DataType xyz_type, paddle::DataType xyz_batch_cnt_type) {
return {paddle::DataType::INT32};
}
// build forward op
PD_BUILD_OP(ball_query_stack)
.Inputs({"new_xyz_tensor", "new_xyz_batch_cnt_tensor", "xyz_tensor",
"xyz_batch_cnt_tensor"})
.Outputs({"idx_tensor"})
.Attrs({"radius: float", "nsample: int"})
.SetKernelFn(PD_KERNEL(ball_query_cuda_forward_stack))
.SetInferShapeFn(PD_INFER_SHAPE(BallQueryInferShapeStack))
.SetInferDtypeFn(PD_INFER_DTYPE(BallQueryInferDtypeStack));
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/assign_score_withk/assign_score_withk_cuda.cc
|
/*
This code is based on https://github.com/CVMI-Lab/PAConv/blob/main/obj_cls/cuda_lib/src/gpu/assign_score_withk_gpu.cu
Ths copyright of CVMI-Lab/PAConv is as follows:
Apache-2.0 License [see LICENSE for details].
*/
#include "paddle/extension.h"
#include <vector>
#define CHECK_INPUT(x) PD_CHECK(x.is_cpu(), #x " must be a CPU Tensor.")
static const int SUM = 0;
static const int AVG = 1;
static const int MAX = 2;
#ifdef PADDLE_WITH_CUDA
std::vector<paddle::Tensor> assign_score_withk_backward_cuda(
const paddle::Tensor &scores,
const paddle::Tensor &points,
const paddle::Tensor ¢ers,
const paddle::Tensor &knn_idx,
const paddle::Tensor &output,
const paddle::Tensor &output_grad
);
std::vector<paddle::Tensor> assign_score_withk_forward_cuda(
const paddle::Tensor &scores,
const paddle::Tensor &points,
const paddle::Tensor ¢ers,
const paddle::Tensor &knn_idx);
#endif
template<typename data_t>
void assign_score_withk_forward_cpu_kernel(
const int nthreads,
const int B,
const int N,
const int M,
const int K,
const int O,
const int aggregate,
const data_t *points,
const data_t *centers,
const data_t *scores,
const int64_t *knn_idx,
data_t *output) {
for (long i = 0; i < nthreads; i++) {
for (int k = 0; k < K; k++) {
// ------- loop for M ----------
for (int m = 0; m < M; m++) {
int b = static_cast<int>(i / (O * N));
int n = static_cast<int>(i % (O * N) / O);
int o = static_cast<int>(i % O);
int kn = static_cast<int>(knn_idx[b * K * N + n * K + k]);
if (aggregate == SUM) {
output[b * N * O + o * N + n] += points[b * N * M * O + kn * M * O + m * O + o] *
scores[b * N * K * M + n * K * M + k * M + m];
output[b * N * O + o * N + n] -= centers[b * N * M * O + n * M * O + m * O + o] *
scores[b * N * K * M + n * K * M + k * M + m];
} else if (aggregate == AVG) {
output[o * N + n] += 2 * points[kn * M * O + m * O + o] * scores[n * K * M + k * M + m] / K;
output[o * N + n] -= points[n * M * O + m * O + o] * scores[n * K * M + k * M + m] / K;
} else if (aggregate == MAX) {
}
}
}
}
}
template<typename data_t>
void assign_score_withk_backward_points_cpu_kernel(
const int nthreads, const int B, const int N, const int M,
const int K, const int O, const int aggregate,
const data_t *grad_out,
const data_t *scores,
const int64_t *knn_idx,
data_t *grad_points,
data_t *grad_centers) {
for (int i = 0; i < nthreads; i++) {
int b = static_cast<int>(i / (M * O));
int m = static_cast<int>(i % (M * O) / O);
int o = static_cast<int>(i % O);
// ----- loop for N,K ---------
for (int n = 0; n < N; n++) {
for (int k = 0; k < K; k++) {
int kn = knn_idx[b * N * K + n * K + k];
grad_points[b * N * M * O + kn * M * O + m * O + o] +=
scores[b * N * K * M + n * K * M + k * M + m] * grad_out[b * O * N + o * N + n];
grad_centers[b * N * M * O + n * M * O + m * O + o] -=
scores[b * N * K * M + n * K * M + k * M + m] * grad_out[b * O * N + o * N + n];
}
}
}
}
template<typename data_t>
void assign_score_withk_backward_scores_cpu_kernel(
const int nthreads, const int B, const int N, const int M,
const int K, const int O, const int aggregate,
const data_t *grad_out,
const data_t *points,
const data_t *centers,
const int64_t *knn_idx,
data_t *grad_scores) {
for (int i = 0; i < nthreads; i++) {
int b = static_cast<int>(i / (N * M * K));
int n = static_cast<int>(i % (N * M * K) / M / K);
int k = static_cast<int>(i % (M * K) / M);
int m = static_cast<int>(i % M);
int kn = knn_idx[b * N * K + n * K + k];
for (int o = 0; o < O; o++) {
grad_scores[b * N * K * M + n * K * M + k * M + m] += (points[b * N * M * O + kn * M * O + m * O + o]
- centers[b * N * M * O + n * M * O + m * O + o]) *
grad_out[b * O * N + o * N + n];
}
}
}
std::vector<paddle::Tensor> assign_score_withk_forward_cpu(
const paddle::Tensor &scores,
const paddle::Tensor &points,
const paddle::Tensor ¢ers,
const paddle::Tensor &knn_idx) {
auto aggregate = SUM;
auto B = points.shape()[0];
auto N = points.shape()[1];
auto M = points.shape()[2];
auto O = points.shape()[3];
auto K = scores.shape()[2];
auto output = paddle::full({B, O, N}, 0, paddle::DataType::FLOAT32, paddle::CPUPlace());
int nthreads = B * N * O;
PD_DISPATCH_FLOATING_TYPES(
points.type(), "assign_score_withk_forward_cpu_kernel", ([&] {
assign_score_withk_forward_cpu_kernel<data_t>(
nthreads,
B, N, M, K, O, aggregate,
points.data<data_t>(),
centers.data<data_t>(),
scores.data<data_t>(),
knn_idx.data<int64_t>(),
output.data<data_t>()
);
})
);
return {output};
}
std::vector<paddle::Tensor> assign_score_withk_backward_cpu(
const paddle::Tensor &scores,
const paddle::Tensor &points,
const paddle::Tensor ¢ers,
const paddle::Tensor &knn_idx,
const paddle::Tensor &output,
const paddle::Tensor &output_grad
) {
auto scores_grad = paddle::full(scores.shape(), 0, paddle::DataType::FLOAT32, paddle::CPUPlace());
auto points_grad = paddle::full(points.shape(), 0, paddle::DataType::FLOAT32, paddle::CPUPlace());
auto centers_grad = paddle::full(centers.shape(), 0, paddle::DataType::FLOAT32, paddle::CPUPlace());
auto aggregate = SUM;
auto B = points.shape()[0];
auto N = points.shape()[1];
auto M = points.shape()[2];
auto O = points.shape()[3];
auto K = scores.shape()[2];
int nthreads_1 = B * M * O;
int nthreads_2 = B * N * K * M;
PD_DISPATCH_FLOATING_TYPES(
points.type(), "assign_score_withk_backward_points_cpu_kernel", ([&] {
assign_score_withk_backward_points_cpu_kernel<data_t>(
nthreads_1, B, N, M, K, O, aggregate,
output_grad.data<data_t>(),
scores.data<data_t>(),
knn_idx.data<int64_t>(),
points_grad.data<data_t>(),
centers_grad.data<data_t>()
);
})
);
PD_DISPATCH_FLOATING_TYPES(
points.type(), "assign_score_withk_backward_scores_cpu_kernel", ([&] {
assign_score_withk_backward_scores_cpu_kernel<data_t>(
nthreads_2, B, N, M, K, O, aggregate,
output_grad.data<data_t>(),
points.data<data_t>(),
centers.data<data_t>(),
knn_idx.data<int64_t>(),
scores_grad.data<data_t>()
);
})
);
return {scores_grad, points_grad, centers_grad};
}
std::vector<paddle::Tensor> assign_score_withk_forward(
const paddle::Tensor &scores,
const paddle::Tensor &points,
const paddle::Tensor ¢ers,
const paddle::Tensor &knn_idx) {
if (scores.is_cpu()) {
return assign_score_withk_forward_cpu(scores, points, centers, knn_idx);
#ifdef PADDLE_WITH_CUDA
} else if (scores.is_gpu()) {
return assign_score_withk_forward_cuda(scores, points, centers, knn_idx);
#endif
} else {
PD_THROW("Unsupported device type for forward function of custom operator.");
}
}
std::vector<paddle::Tensor> assign_score_withk_backward(
const paddle::Tensor &scores,
const paddle::Tensor &points,
const paddle::Tensor ¢ers,
const paddle::Tensor &knn_idx,
const paddle::Tensor &output,
const paddle::Tensor &output_grad
) {
if (scores.is_cpu()) {
return assign_score_withk_backward_cpu(scores, points, centers, knn_idx, output, output_grad);
#ifdef PADDLE_WITH_CUDA
} else if (scores.is_gpu()) {
return assign_score_withk_backward_cuda(scores, points,centers, knn_idx, output, output_grad);
#endif
} else {
PD_THROW("Unsupported device type for backward function of custom operator.");
}
}
std::vector<std::vector<int64_t>> InferShape(std::vector<int64_t> scores_shape,
std::vector<int64_t> points_shape,
std::vector<int64_t> centers_shape,
std::vector<int64_t> knn_idx_shape) {
auto B = points_shape[0];
auto N = points_shape[1];
auto O = points_shape[3];
return {{B, O, N}};
}
std::vector<paddle::DataType>
InferDtype(paddle::DataType t1, paddle::DataType t2, paddle::DataType t3, paddle::DataType t4) {
return {t1};
}
PD_BUILD_OP(assign_score_withk)
.Inputs({"scores", "points", "centers", "knn_idx"})
.Outputs({"output"})
.SetKernelFn(PD_KERNEL(assign_score_withk_forward))
.SetInferShapeFn(PD_INFER_SHAPE(InferShape))
.SetInferDtypeFn(PD_INFER_DTYPE(InferDtype));
PD_BUILD_GRAD_OP(assign_score_withk)
.Inputs({"scores", "points", "centers", "knn_idx", "output",paddle::Grad("output")})
.Outputs({paddle::Grad("scores"), paddle::Grad("points"), paddle::Grad("centers")})
.SetKernelFn(PD_KERNEL(assign_score_withk_backward));
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/assign_score_withk/assign_score_withk_kernel.cu
|
/*
This code is based on https://github.com/CVMI-Lab/PAConv/blob/main/obj_cls/cuda_lib/src/gpu/assign_score_withk_gpu.cu
Ths copyright of CVMI-Lab/PAConv is as follows:
Apache-2.0 License [see LICENSE for details].
*/
#include "paddle/extension.h"
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#define BLOCK 512
const int SUM = 0;
const int AVG = 1;
const int MAX = 2;
template<typename data_t>
__global__ void assign_score_withk_forward_kernel(
const int nthreads,
const int B,
const int N,
const int M,
const int K,
const int O,
const int aggregate,
const data_t *points,
const data_t *centers,
const data_t *scores,
const int64_t *knn_idx,
data_t *output) {
long gid = blockIdx.x * blockDim.x + threadIdx.x;
for (long i = gid; i < nthreads; i += blockDim.x * gridDim.x) {
for (int k = 0; k < K; k++) {
// ------- loop for M ----------
for (int m = 0; m < M; m++) {
int b = static_cast<int>(i / (O * N));
int n = static_cast<int>(i % (O * N) / O);
int o = static_cast<int>(i % O);
int kn = static_cast<int>(knn_idx[b * K * N + n * K + k]);
if (aggregate == SUM) {
// feature concat
atomicAdd(output + b * N * O + o * N + n,
points[b * N * M * O + kn * M * O + m * O + o] *
scores[b * N * K * M + n * K * M + k * M + m]
- centers[b * N * M * O + n * M * O + m * O + o] *
scores[b * N * K * M + n * K * M + k * M + m]);
} else if (aggregate == AVG) {
output[o * N + n] += 2 * points[kn * M * O + m * O + o] * scores[n * K * M + k * M + m] / K;
output[o * N + n] -= points[n * M * O + m * O + o] * scores[n * K * M + k * M + m] / K;
} else if (aggregate == MAX) {
}
}
}
}
}
template<typename data_t>
__global__ void assign_score_withk_backward_scores_kernel(
const int nthreads, const int B, const int N, const int M,
const int K, const int O, const int aggregate,
const data_t *grad_out,
const data_t *points,
const data_t *centers,
const int64_t *knn_idx,
data_t *grad_scores) {
long gid = blockIdx.x * blockDim.x + threadIdx.x;
for (long i = gid; i < nthreads; i += blockDim.x * gridDim.x) {
int b = static_cast<int>(i / (N * M * K));
int n = static_cast<int>(i % (N * M * K) / M / K);
int k = static_cast<int>(i % (M * K) / M);
int m = static_cast<int>(i % M);
int kn = knn_idx[b * N * K + n * K + k];
for (int o = 0; o < O; o++) {
atomicAdd(grad_scores + b * N * K * M + n * K * M + k * M + m,
(points[b * N * M * O + kn * M * O + m * O + o]
- centers[b * N * M * O + n * M * O + m * O + o]) * grad_out[b * O * N + o * N + n]);
}
}
}
template<typename data_t>
__global__ void assign_score_withk_backward_points_kernel(
const int nthreads, const int B, const int N, const int M,
const int K, const int O, const int aggregate,
const data_t *grad_out,
const data_t *scores,
const int64_t *knn_idx,
data_t *grad_points,
data_t *grad_centers) {
long gid = blockIdx.x * blockDim.x + threadIdx.x;
for (long i = gid; i < nthreads; i += blockDim.x * gridDim.x) {
int b = static_cast<int>(i / (M * O));
int m = static_cast<int>(i % (M * O) / O);
int o = static_cast<int>(i % O);
// ----- loop for N,K ---------
for (int n = 0; n < N; n++) {
for (int k = 0; k < K; k++) {
int kn = knn_idx[b * N * K + n * K + k];
atomicAdd(grad_points + b * N * M * O + kn * M * O + m * O + o,
scores[b * N * K * M + n * K * M + k * M + m] * grad_out[b * O * N + o * N + n]);
atomicAdd(grad_centers + b * N * M * O + n * M * O + m * O + o,
-scores[b * N * K * M + n * K * M + k * M + m] * grad_out[b * O * N + o * N + n]);
}
}
}
}
std::vector<paddle::Tensor> assign_score_withk_forward_cuda(
const paddle::Tensor &scores,
const paddle::Tensor &points,
const paddle::Tensor ¢ers,
const paddle::Tensor &knn_idx) {
auto aggregate = SUM;
auto B = points.shape()[0];
auto N = points.shape()[1];
auto M = points.shape()[2];
auto O = points.shape()[3];
auto K = scores.shape()[2];
auto output = paddle::full({B, O, N}, 0, paddle::DataType::FLOAT32, paddle::GPUPlace());
int nthreads = B * N * O;
int grid = (nthreads + BLOCK - 1) / BLOCK;
PD_DISPATCH_FLOATING_TYPES(
points.type(), "assign_score_withk_forward_kernel", ([&] {
assign_score_withk_forward_kernel<data_t><<<grid, BLOCK, 0, points.stream()>>>(
nthreads,
B, N, M, K, O, aggregate,
points.data<data_t>(),
centers.data<data_t>(),
scores.data<data_t>(),
knn_idx.data<int64_t>(),
output.data<data_t>()
);
})
);
return {output};
}
std::vector<paddle::Tensor> assign_score_withk_backward_cuda(
const paddle::Tensor &scores,
const paddle::Tensor &points,
const paddle::Tensor ¢ers,
const paddle::Tensor &knn_idx,
const paddle::Tensor &output,
const paddle::Tensor &output_grad
) {
auto scores_grad = paddle::full(scores.shape(), 0, paddle::DataType::FLOAT32, paddle::GPUPlace());
auto points_grad = paddle::full(points.shape(), 0, paddle::DataType::FLOAT32, paddle::GPUPlace());
auto centers_grad = paddle::full(centers.shape(), 0, paddle::DataType::FLOAT32, paddle::GPUPlace());
auto aggregate = SUM;
auto B = points.shape()[0];
auto N = points.shape()[1];
auto M = points.shape()[2];
auto O = points.shape()[3];
auto K = scores.shape()[2];
int nthreads_1 = B * M * O;
int nthreads_2 = B * N * K * M;
int grid1 = (nthreads_1 + BLOCK - 1) / BLOCK;
int grid2 = (nthreads_2 + BLOCK - 1) / BLOCK;
PD_DISPATCH_FLOATING_TYPES(
scores.type(), "assign_score_withk_backward_points_kernel", ([&] {
assign_score_withk_backward_points_kernel<data_t><<<grid1, BLOCK, 0, scores.stream()>>>(
nthreads_1, B, N, M, K, O, aggregate,
output_grad.data<data_t>(),
scores.data<data_t>(),
knn_idx.data<int64_t>(),
points_grad.data<data_t>(),
centers_grad.data<data_t>()
);
})
);
PD_DISPATCH_FLOATING_TYPES(
points.type(), "assign_score_withk_backward_scores_kernel", ([&] {
assign_score_withk_backward_scores_kernel<data_t><<<grid2, BLOCK, 0, points.stream()>>>(
nthreads_2, B, N, M, K, O, aggregate,
output_grad.data<data_t>(),
points.data<data_t>(),
centers.data<data_t>(),
knn_idx.data<int64_t>(),
scores_grad.data<data_t>()
);
})
);
return {scores_grad, points_grad, centers_grad};
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/voxel/voxelize_op.cc
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include "paddle/extension.h"
template <typename T, typename T_int>
bool hard_voxelize_cpu_kernel(
const T *points, const float point_cloud_range_x_min,
const float point_cloud_range_y_min, const float point_cloud_range_z_min,
const float voxel_size_x, const float voxel_size_y,
const float voxel_size_z, const int grid_size_x, const int grid_size_y,
const int grid_size_z, const int64_t num_points, const int num_point_dim,
const int max_num_points_in_voxel, const int max_voxels, T *voxels,
T_int *coords, T_int *num_points_per_voxel, T_int *grid_idx_to_voxel_idx,
T_int *num_voxels) {
std::fill(voxels,
voxels + max_voxels * max_num_points_in_voxel * num_point_dim,
static_cast<T>(0));
num_voxels[0] = 0;
int voxel_idx, grid_idx, curr_num_point;
int coord_x, coord_y, coord_z;
for (int point_idx = 0; point_idx < num_points; ++point_idx) {
coord_x = floor(
(points[point_idx * num_point_dim + 0] - point_cloud_range_x_min) /
voxel_size_x);
coord_y = floor(
(points[point_idx * num_point_dim + 1] - point_cloud_range_y_min) /
voxel_size_y);
coord_z = floor(
(points[point_idx * num_point_dim + 2] - point_cloud_range_z_min) /
voxel_size_z);
if (coord_x < 0 || coord_x > grid_size_x || coord_x == grid_size_x) {
continue;
}
if (coord_y < 0 || coord_y > grid_size_y || coord_y == grid_size_y) {
continue;
}
if (coord_z < 0 || coord_z > grid_size_z || coord_z == grid_size_z) {
continue;
}
grid_idx =
coord_z * grid_size_y * grid_size_x + coord_y * grid_size_x + coord_x;
voxel_idx = grid_idx_to_voxel_idx[grid_idx];
if (voxel_idx == -1) {
voxel_idx = num_voxels[0];
if (num_voxels[0] == max_voxels || num_voxels[0] > max_voxels) {
continue;
}
num_voxels[0]++;
grid_idx_to_voxel_idx[grid_idx] = voxel_idx;
coords[voxel_idx * 3 + 0] = coord_z;
coords[voxel_idx * 3 + 1] = coord_y;
coords[voxel_idx * 3 + 2] = coord_x;
}
curr_num_point = num_points_per_voxel[voxel_idx];
if (curr_num_point < max_num_points_in_voxel) {
for (int j = 0; j < num_point_dim; ++j) {
voxels[voxel_idx * max_num_points_in_voxel * num_point_dim +
curr_num_point * num_point_dim + j] =
points[point_idx * num_point_dim + j];
}
num_points_per_voxel[voxel_idx] = curr_num_point + 1;
}
}
return true;
}
std::vector<paddle::Tensor> hard_voxelize_cpu(
const paddle::Tensor &points, const std::vector<float> &voxel_size,
const std::vector<float> &point_cloud_range,
const int max_num_points_in_voxel, const int max_voxels) {
auto num_points = points.shape()[0];
auto num_point_dim = points.shape()[1];
const float voxel_size_x = voxel_size[0];
const float voxel_size_y = voxel_size[1];
const float voxel_size_z = voxel_size[2];
const float point_cloud_range_x_min = point_cloud_range[0];
const float point_cloud_range_y_min = point_cloud_range[1];
const float point_cloud_range_z_min = point_cloud_range[2];
int grid_size_x = static_cast<int>(
round((point_cloud_range[3] - point_cloud_range[0]) / voxel_size_x));
int grid_size_y = static_cast<int>(
round((point_cloud_range[4] - point_cloud_range[1]) / voxel_size_y));
int grid_size_z = static_cast<int>(
round((point_cloud_range[5] - point_cloud_range[2]) / voxel_size_z));
auto voxels =
paddle::empty({max_voxels, max_num_points_in_voxel, num_point_dim},
paddle::DataType::FLOAT32, paddle::CPUPlace());
auto coords = paddle::full({max_voxels, 3}, 0, paddle::DataType::INT32,
paddle::CPUPlace());
auto *coords_data = coords.data<int>();
auto num_points_per_voxel = paddle::full(
{max_voxels}, 0, paddle::DataType::INT32, paddle::CPUPlace());
auto *num_points_per_voxel_data = num_points_per_voxel.data<int>();
std::fill(num_points_per_voxel_data,
num_points_per_voxel_data + num_points_per_voxel.size(),
static_cast<int>(0));
auto num_voxels =
paddle::full({1}, 0, paddle::DataType::INT32, paddle::CPUPlace());
auto *num_voxels_data = num_voxels.data<int>();
auto grid_idx_to_voxel_idx =
paddle::full({grid_size_z, grid_size_y, grid_size_x}, -1,
paddle::DataType::INT32, paddle::CPUPlace());
auto *grid_idx_to_voxel_idx_data = grid_idx_to_voxel_idx.data<int>();
PD_DISPATCH_FLOATING_TYPES(
points.type(), "hard_voxelize_cpu_kernel", ([&] {
hard_voxelize_cpu_kernel<data_t, int>(
points.data<data_t>(), point_cloud_range_x_min,
point_cloud_range_y_min, point_cloud_range_z_min, voxel_size_x,
voxel_size_y, voxel_size_z, grid_size_x, grid_size_y, grid_size_z,
num_points, num_point_dim, max_num_points_in_voxel, max_voxels,
voxels.data<data_t>(), coords_data, num_points_per_voxel_data,
grid_idx_to_voxel_idx_data, num_voxels_data);
}));
return {voxels, coords, num_points_per_voxel, num_voxels};
}
#ifdef PADDLE_WITH_CUDA
std::vector<paddle::Tensor> hard_voxelize_cuda(
const paddle::Tensor &points, const std::vector<float> &voxel_size,
const std::vector<float> &point_cloud_range, int max_num_points_in_voxel,
int max_voxels);
#endif
std::vector<paddle::Tensor> hard_voxelize(
const paddle::Tensor &points, const std::vector<float> &voxel_size,
const std::vector<float> &point_cloud_range,
const int max_num_points_in_voxel, const int max_voxels) {
if (points.is_cpu()) {
return hard_voxelize_cpu(points, voxel_size, point_cloud_range,
max_num_points_in_voxel, max_voxels);
#ifdef PADDLE_WITH_CUDA
} else if (points.is_gpu() || points.is_gpu_pinned()) {
return hard_voxelize_cuda(points, voxel_size, point_cloud_range,
max_num_points_in_voxel, max_voxels);
#endif
} else {
PD_THROW(
"Unsupported device type for hard_voxelize "
"operator.");
}
}
std::vector<std::vector<int64_t>> HardInferShape(
std::vector<int64_t> points_shape, const std::vector<float> &voxel_size,
const std::vector<float> &point_cloud_range,
const int &max_num_points_in_voxel, const int &max_voxels) {
return {{max_voxels, max_num_points_in_voxel, points_shape[1]},
{max_voxels, 3},
{max_voxels},
{1}};
}
std::vector<paddle::DataType> HardInferDtype(paddle::DataType points_dtype) {
return {points_dtype, paddle::DataType::INT32, paddle::DataType::INT32,
paddle::DataType::INT32};
}
PD_BUILD_OP(hard_voxelize)
.Inputs({"POINTS"})
.Outputs({"VOXELS", "COORS", "NUM_POINTS_PER_VOXEL", "num_voxels"})
.SetKernelFn(PD_KERNEL(hard_voxelize))
.Attrs({"voxel_size: std::vector<float>",
"point_cloud_range: std::vector<float>",
"max_num_points_in_voxel: int", "max_voxels: int"})
.SetInferShapeFn(PD_INFER_SHAPE(HardInferShape))
.SetInferDtypeFn(PD_INFER_DTYPE(HardInferDtype));
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/voxel/voxelize_op.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/extension.h"
#define CHECK_INPUT_CUDA(x) \
PD_CHECK(x.is_gpu() || x.is_gpu_pinned(), #x " must be a GPU Tensor.")
#define CUDA_KERNEL_LOOP(i, n) \
for (auto i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
template <typename T, typename T_int>
__global__ void map_point_to_grid_kernel(
const T *points, const float point_cloud_range_x_min,
const float point_cloud_range_y_min, const float point_cloud_range_z_min,
const float voxel_size_x, const float voxel_size_y,
const float voxel_size_z, const int grid_size_x, const int grid_size_y,
const int grid_size_z, const int64_t num_points, const int num_point_dim,
const int max_num_points_in_voxel, T_int *points_to_grid_idx,
T_int *points_to_num_idx, T_int *num_points_in_grid, int *points_valid) {
int64_t point_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (point_idx > num_points || point_idx == num_points) {
return;
}
int coord_x =
floor((points[point_idx * num_point_dim + 0] - point_cloud_range_x_min) /
voxel_size_x);
int coord_y =
floor((points[point_idx * num_point_dim + 1] - point_cloud_range_y_min) /
voxel_size_y);
int coord_z =
floor((points[point_idx * num_point_dim + 2] - point_cloud_range_z_min) /
voxel_size_z);
if (coord_x < 0 || coord_x > grid_size_x || coord_x == grid_size_x) {
return;
}
if (coord_y < 0 || coord_y > grid_size_y || coord_y == grid_size_y) {
return;
}
if (coord_z < 0 || coord_z > grid_size_z || coord_z == grid_size_z) {
return;
}
int grid_idx =
coord_z * grid_size_y * grid_size_x + coord_y * grid_size_x + coord_x;
T_int num = atomicAdd(num_points_in_grid + grid_idx, 1);
if (num < max_num_points_in_voxel) {
points_to_num_idx[point_idx] = num;
points_to_grid_idx[point_idx] = grid_idx;
atomicMin(points_valid + grid_idx, static_cast<int>(point_idx));
}
}
template <typename T_int>
__global__ void update_points_flag(const int *points_valid,
const T_int *points_to_grid_idx,
const int num_points, int *points_flag) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < num_points; i += gridDim.x * blockDim.x) {
T_int grid_idx = points_to_grid_idx[i];
if (grid_idx >= 0) {
int id = points_valid[grid_idx];
if (id != num_points && id == i) {
points_flag[i] = 1;
}
}
}
}
template <typename T_int>
__global__ void get_voxel_idx_kernel(const int *points_flag,
const T_int *points_to_grid_idx,
const int *points_flag_prefix_sum,
const int num_points, const int max_voxels,
T_int *num_voxels,
T_int *grid_idx_to_voxel_idx) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < num_points; i += gridDim.x * blockDim.x) {
if (points_flag[i] == 1) {
T_int grid_idx = points_to_grid_idx[i];
int num = points_flag_prefix_sum[i];
if (num < max_voxels) {
grid_idx_to_voxel_idx[grid_idx] = num;
}
}
if (i == num_points - 1) {
int num = points_flag_prefix_sum[i] + points_flag[i];
if (num < max_voxels) {
num_voxels[0] = num;
} else {
num_voxels[0] = max_voxels;
}
}
}
}
template <typename T>
__global__ void init_voxels_kernel(const int64_t num, T *voxels) {
int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > num || idx == num) {
return;
}
voxels[idx] = static_cast<T>(0);
}
template <typename T, typename T_int>
__global__ void assign_voxels_kernel(
const T *points, const T_int *points_to_grid_idx,
const T_int *points_to_num_idx, const T_int *grid_idx_to_voxel_idx,
const int64_t num_points, const int num_point_dim,
const int max_num_points_in_voxel, T *voxels) {
int64_t point_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (point_idx > num_points || point_idx == num_points) {
return;
}
T_int grid_idx = points_to_grid_idx[point_idx];
T_int num_idx = points_to_num_idx[point_idx];
if (grid_idx > -1 && num_idx > -1) {
T_int voxel_idx = grid_idx_to_voxel_idx[grid_idx];
if (voxel_idx > -1) {
for (int64_t i = 0; i < num_point_dim; ++i) {
voxels[voxel_idx * max_num_points_in_voxel * num_point_dim +
num_idx * num_point_dim + i] =
points[point_idx * num_point_dim + i];
}
}
}
}
template <typename T, typename T_int>
__global__ void assign_coords_kernel(const T_int *grid_idx_to_voxel_idx,
const T_int *num_points_in_grid,
const int num_grids, const int grid_size_x,
const int grid_size_y,
const int grid_size_z,
const int max_num_points_in_voxel,
T *coords, T *num_points_per_voxel) {
int64_t grid_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (grid_idx > num_grids || grid_idx == num_grids) {
return;
}
T_int voxel_idx = grid_idx_to_voxel_idx[grid_idx];
if (voxel_idx > -1) {
T_int coord_z = grid_idx / grid_size_x / grid_size_y;
T_int coord_y =
(grid_idx - coord_z * grid_size_x * grid_size_y) / grid_size_x;
T_int coord_x =
grid_idx - coord_z * grid_size_x * grid_size_y - coord_y * grid_size_x;
coords[voxel_idx * 3 + 0] = coord_z;
coords[voxel_idx * 3 + 1] = coord_y;
coords[voxel_idx * 3 + 2] = coord_x;
num_points_per_voxel[voxel_idx] =
min(num_points_in_grid[grid_idx], max_num_points_in_voxel);
}
}
std::vector<paddle::Tensor> hard_voxelize_cuda(
const paddle::Tensor &points, const std::vector<float> &voxel_size,
const std::vector<float> &point_cloud_range, int max_num_points_in_voxel,
int max_voxels) {
// check device
CHECK_INPUT_CUDA(points);
int64_t num_points = points.shape()[0];
int64_t num_point_dim = points.shape()[1];
const float voxel_size_x = voxel_size[0];
const float voxel_size_y = voxel_size[1];
const float voxel_size_z = voxel_size[2];
const float point_cloud_range_x_min = point_cloud_range[0];
const float point_cloud_range_y_min = point_cloud_range[1];
const float point_cloud_range_z_min = point_cloud_range[2];
int grid_size_x = static_cast<int>(
round((point_cloud_range[3] - point_cloud_range[0]) / voxel_size_x));
int grid_size_y = static_cast<int>(
round((point_cloud_range[4] - point_cloud_range[1]) / voxel_size_y));
int grid_size_z = static_cast<int>(
round((point_cloud_range[5] - point_cloud_range[2]) / voxel_size_z));
int num_grids = grid_size_x * grid_size_y * grid_size_z;
auto voxels =
paddle::empty({max_voxels, max_num_points_in_voxel, num_point_dim},
paddle::DataType::FLOAT32, paddle::GPUPlace());
auto coords = paddle::full({max_voxels, 3}, 0, paddle::DataType::INT32,
paddle::GPUPlace());
auto *coords_data = coords.data<int>();
auto num_points_per_voxel = paddle::full(
{max_voxels}, 0, paddle::DataType::INT32, paddle::GPUPlace());
auto *num_points_per_voxel_data = num_points_per_voxel.data<int>();
auto points_to_grid_idx = paddle::full(
{num_points}, -1, paddle::DataType::INT32, paddle::GPUPlace());
auto *points_to_grid_idx_data = points_to_grid_idx.data<int>();
auto points_to_num_idx = paddle::full(
{num_points}, -1, paddle::DataType::INT32, paddle::GPUPlace());
auto *points_to_num_idx_data = points_to_num_idx.data<int>();
auto num_points_in_grid =
paddle::full({grid_size_z, grid_size_y, grid_size_x}, 0,
paddle::DataType::INT32, paddle::GPUPlace());
auto *num_points_in_grid_data = num_points_in_grid.data<int>();
auto grid_idx_to_voxel_idx =
paddle::full({grid_size_z, grid_size_y, grid_size_x}, -1,
paddle::DataType::INT32, paddle::GPUPlace());
auto *grid_idx_to_voxel_idx_data = grid_idx_to_voxel_idx.data<int>();
auto num_voxels =
paddle::full({1}, 0, paddle::DataType::INT32, paddle::GPUPlace());
auto *num_voxels_data = num_voxels.data<int>();
auto points_valid = paddle::full({grid_size_z * grid_size_y * grid_size_x},
static_cast<int>(num_points),
paddle::DataType::INT32, paddle::GPUPlace());
int *points_valid_data = points_valid.data<int>();
auto points_flag = paddle::full({num_points}, 0, paddle::DataType::INT32,
paddle::GPUPlace());
// 1. Find the grid index for each point, compute the
// number of points in each grid
int64_t threads = 512;
int64_t blocks = (num_points + threads - 1) / threads;
PD_DISPATCH_FLOATING_TYPES(
points.type(), "map_point_to_grid_kernel", ([&] {
map_point_to_grid_kernel<data_t, int>
<<<blocks, threads, 0, points.stream()>>>(
points.data<data_t>(), point_cloud_range_x_min,
point_cloud_range_y_min, point_cloud_range_z_min, voxel_size_x,
voxel_size_y, voxel_size_z, grid_size_x, grid_size_y,
grid_size_z, num_points, num_point_dim, max_num_points_in_voxel,
points_to_grid_idx_data, points_to_num_idx_data,
num_points_in_grid_data, points_valid_data);
}));
// 2. Find the number of non-zero voxels
int *points_flag_data = points_flag.data<int>();
threads = 512;
blocks = (num_points + threads - 1) / threads;
update_points_flag<int><<<blocks, threads, 0, points.stream()>>>(
points_valid_data, points_to_grid_idx_data, num_points, points_flag_data);
auto points_flag_prefix_sum =
paddle::experimental::cumsum(points_flag, 0, false, true, false);
int *points_flag_prefix_sum_data = points_flag_prefix_sum.data<int>();
get_voxel_idx_kernel<int><<<blocks, threads, 0, points.stream()>>>(
points_flag_data, points_to_grid_idx_data, points_flag_prefix_sum_data,
num_points, max_voxels, num_voxels_data, grid_idx_to_voxel_idx_data);
// 3. Store points to voxels coords and num_points_per_voxel
int64_t num = max_voxels * max_num_points_in_voxel * num_point_dim;
threads = 512;
blocks = (num + threads - 1) / threads;
PD_DISPATCH_FLOATING_TYPES(points.type(), "init_voxels_kernel", ([&] {
init_voxels_kernel<data_t>
<<<blocks, threads, 0, points.stream()>>>(
num, voxels.data<data_t>());
}));
threads = 512;
blocks = (num_points + threads - 1) / threads;
PD_DISPATCH_FLOATING_TYPES(
points.type(), "assign_voxels_kernel", ([&] {
assign_voxels_kernel<data_t, int>
<<<blocks, threads, 0, points.stream()>>>(
points.data<data_t>(), points_to_grid_idx_data,
points_to_num_idx_data, grid_idx_to_voxel_idx_data, num_points,
num_point_dim, max_num_points_in_voxel, voxels.data<data_t>());
}));
// 4. Store coords, num_points_per_voxel
blocks = (num_grids + threads - 1) / threads;
assign_coords_kernel<int><<<blocks, threads, 0, points.stream()>>>(
grid_idx_to_voxel_idx_data, num_points_in_grid_data, num_grids,
grid_size_x, grid_size_y, grid_size_z, max_num_points_in_voxel,
coords_data, num_points_per_voxel_data);
return {voxels, coords, num_points_per_voxel, num_voxels};
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/ms_deform_attn/ms_deform_attn_cuda_kernel.h
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*!
**************************************************************************************************
* Deformable DETR
* Copyright (c) 2020 SenseTime. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
**************************************************************************************************
* Modified from
*https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
**************************************************************************************************
*/
#include "paddle/extension.h"
const int CUDA_NUM_THREADS = 1024;
inline int GET_BLOCKS(const int N, const int num_threads) {
return (N + num_threads - 1) / num_threads;
}
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
template <typename scalar_t>
__device__ scalar_t ms_deform_attn_im2col_bilinear(
const scalar_t *&bottom_data, const int &height, const int &width,
const int &nheads, const int &channels, const scalar_t &h,
const scalar_t &w, const int &m, const int &c) {
const int h_low = floorf(h);
const int w_low = floorf(w);
const int h_high = h_low + 1;
const int w_high = w_low + 1;
const scalar_t lh = h - h_low;
const scalar_t lw = w - w_low;
const scalar_t hh = 1 - lh, hw = 1 - lw;
const int w_stride = nheads * channels;
const int h_stride = width * w_stride;
const int h_low_ptr_offset = h_low * h_stride;
const int h_high_ptr_offset = h_low_ptr_offset + h_stride;
const int w_low_ptr_offset = w_low * w_stride;
const int w_high_ptr_offset = w_low_ptr_offset + w_stride;
const int base_ptr = m * channels + c;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0) {
const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;
v1 = bottom_data[ptr1];
}
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1) {
const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;
v2 = bottom_data[ptr2];
}
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0) {
const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;
v3 = bottom_data[ptr3];
}
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1) {
const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;
v4 = bottom_data[ptr4];
}
const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ void ms_deform_attn_col2im_bilinear(
const scalar_t *&bottom_data, const int &height, const int &width,
const int &nheads, const int &channels, const scalar_t &h,
const scalar_t &w, const int &m, const int &c, const scalar_t &top_grad,
const scalar_t &attn_weight, scalar_t *&grad_value,
scalar_t *grad_sampling_loc, scalar_t *grad_attn_weight) {
const int h_low = floorf(h);
const int w_low = floorf(w);
const int h_high = h_low + 1;
const int w_high = w_low + 1;
const scalar_t lh = h - h_low;
const scalar_t lw = w - w_low;
const scalar_t hh = 1 - lh, hw = 1 - lw;
const int w_stride = nheads * channels;
const int h_stride = width * w_stride;
const int h_low_ptr_offset = h_low * h_stride;
const int h_high_ptr_offset = h_low_ptr_offset + h_stride;
const int w_low_ptr_offset = w_low * w_stride;
const int w_high_ptr_offset = w_low_ptr_offset + w_stride;
const int base_ptr = m * channels + c;
const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
const scalar_t top_grad_value = top_grad * attn_weight;
scalar_t grad_h_weight = 0, grad_w_weight = 0;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0) {
const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;
v1 = bottom_data[ptr1];
grad_h_weight -= hw * v1;
grad_w_weight -= hh * v1;
atomicAdd(grad_value + ptr1, w1 * top_grad_value);
}
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1) {
const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;
v2 = bottom_data[ptr2];
grad_h_weight -= lw * v2;
grad_w_weight += hh * v2;
atomicAdd(grad_value + ptr2, w2 * top_grad_value);
}
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0) {
const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;
v3 = bottom_data[ptr3];
grad_h_weight += hw * v3;
grad_w_weight -= lh * v3;
atomicAdd(grad_value + ptr3, w3 * top_grad_value);
}
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1) {
const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;
v4 = bottom_data[ptr4];
grad_h_weight += lw * v4;
grad_w_weight += lh * v4;
atomicAdd(grad_value + ptr4, w4 * top_grad_value);
}
const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
*grad_attn_weight = top_grad * val;
*grad_sampling_loc = width * grad_w_weight * top_grad_value;
*(grad_sampling_loc + 1) = height * grad_h_weight * top_grad_value;
}
template <typename scalar_t>
__device__ void ms_deform_attn_col2im_bilinear_gm(
const scalar_t *&bottom_data, const int &height, const int &width,
const int &nheads, const int &channels, const scalar_t &h,
const scalar_t &w, const int &m, const int &c, const scalar_t &top_grad,
const scalar_t &attn_weight, scalar_t *&grad_value,
scalar_t *grad_sampling_loc, scalar_t *grad_attn_weight) {
const int h_low = floorf(h);
const int w_low = floorf(w);
const int h_high = h_low + 1;
const int w_high = w_low + 1;
const scalar_t lh = h - h_low;
const scalar_t lw = w - w_low;
const scalar_t hh = 1 - lh, hw = 1 - lw;
const int w_stride = nheads * channels;
const int h_stride = width * w_stride;
const int h_low_ptr_offset = h_low * h_stride;
const int h_high_ptr_offset = h_low_ptr_offset + h_stride;
const int w_low_ptr_offset = w_low * w_stride;
const int w_high_ptr_offset = w_low_ptr_offset + w_stride;
const int base_ptr = m * channels + c;
const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
const scalar_t top_grad_value = top_grad * attn_weight;
scalar_t grad_h_weight = 0, grad_w_weight = 0;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0) {
const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;
v1 = bottom_data[ptr1];
grad_h_weight -= hw * v1;
grad_w_weight -= hh * v1;
atomicAdd(grad_value + ptr1, w1 * top_grad_value);
}
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1) {
const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;
v2 = bottom_data[ptr2];
grad_h_weight -= lw * v2;
grad_w_weight += hh * v2;
atomicAdd(grad_value + ptr2, w2 * top_grad_value);
}
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0) {
const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;
v3 = bottom_data[ptr3];
grad_h_weight += hw * v3;
grad_w_weight -= lh * v3;
atomicAdd(grad_value + ptr3, w3 * top_grad_value);
}
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1) {
const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;
v4 = bottom_data[ptr4];
grad_h_weight += lw * v4;
grad_w_weight += lh * v4;
atomicAdd(grad_value + ptr4, w4 * top_grad_value);
}
const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
atomicAdd(grad_attn_weight, top_grad * val);
atomicAdd(grad_sampling_loc, width * grad_w_weight * top_grad_value);
atomicAdd(grad_sampling_loc + 1, height * grad_h_weight * top_grad_value);
}
template <typename scalar_t>
__global__ void ms_deformable_im2col_gpu_kernel(
const int n, const scalar_t *data_value, const int64_t *data_spatial_shapes,
const int64_t *data_level_start_index, const scalar_t *data_sampling_loc,
const scalar_t *data_attn_weight, const int batch_size,
const int spatial_size, const int num_heads, const int channels,
const int num_levels, const int num_query, const int num_point,
scalar_t *data_col) {
CUDA_KERNEL_LOOP(index, n) {
int _temp = index;
const int c_col = _temp % channels;
_temp /= channels;
const int sampling_index = _temp;
const int m_col = _temp % num_heads;
_temp /= num_heads;
_temp /= num_query;
const int b_col = _temp;
scalar_t *data_col_ptr = data_col + index;
int data_weight_ptr = sampling_index * num_levels * num_point;
int data_loc_w_ptr = data_weight_ptr << 1;
const int qid_stride = num_heads * channels;
const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
scalar_t col = 0;
for (int l_col = 0; l_col < num_levels; ++l_col) {
const int level_start_id = data_level_start_index[l_col];
const int spatial_h_ptr = l_col << 1;
const int spatial_h = data_spatial_shapes[spatial_h_ptr];
const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
const scalar_t *data_value_ptr =
data_value +
(data_value_ptr_init_offset + level_start_id * qid_stride);
for (int p_col = 0; p_col < num_point; ++p_col) {
const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
const scalar_t weight = data_attn_weight[data_weight_ptr];
const scalar_t h_im = loc_h * spatial_h - 0.5;
const scalar_t w_im = loc_w * spatial_w - 0.5;
if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) {
col += ms_deform_attn_im2col_bilinear(data_value_ptr, spatial_h,
spatial_w, num_heads, channels,
h_im, w_im, m_col, c_col) *
weight;
}
data_weight_ptr += 1;
data_loc_w_ptr += 2;
}
}
*data_col_ptr = col;
}
}
template <typename scalar_t, unsigned int blockSize>
__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1(
const int n, const scalar_t *grad_col, const scalar_t *data_value,
const int64_t *data_spatial_shapes, const int64_t *data_level_start_index,
const scalar_t *data_sampling_loc, const scalar_t *data_attn_weight,
const int batch_size, const int spatial_size, const int num_heads,
const int channels, const int num_levels, const int num_query,
const int num_point, scalar_t *grad_value, scalar_t *grad_sampling_loc,
scalar_t *grad_attn_weight) {
CUDA_KERNEL_LOOP(index, n) {
__shared__ scalar_t cache_grad_sampling_loc[blockSize * 2];
__shared__ scalar_t cache_grad_attn_weight[blockSize];
unsigned int tid = threadIdx.x;
int _temp = index;
const int c_col = _temp % channels;
_temp /= channels;
const int sampling_index = _temp;
const int m_col = _temp % num_heads;
_temp /= num_heads;
_temp /= num_query;
const int b_col = _temp;
const scalar_t top_grad = grad_col[index];
int data_weight_ptr = sampling_index * num_levels * num_point;
int data_loc_w_ptr = data_weight_ptr << 1;
const int grad_sampling_ptr = data_weight_ptr;
grad_sampling_loc += grad_sampling_ptr << 1;
grad_attn_weight += grad_sampling_ptr;
const int grad_weight_stride = 1;
const int grad_loc_stride = 2;
const int qid_stride = num_heads * channels;
const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
for (int l_col = 0; l_col < num_levels; ++l_col) {
const int level_start_id = data_level_start_index[l_col];
const int spatial_h_ptr = l_col << 1;
const int spatial_h = data_spatial_shapes[spatial_h_ptr];
const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
const int value_ptr_offset =
data_value_ptr_init_offset + level_start_id * qid_stride;
const scalar_t *data_value_ptr = data_value + value_ptr_offset;
scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
for (int p_col = 0; p_col < num_point; ++p_col) {
const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
const scalar_t weight = data_attn_weight[data_weight_ptr];
const scalar_t h_im = loc_h * spatial_h - 0.5;
const scalar_t w_im = loc_w * spatial_w - 0.5;
*(cache_grad_sampling_loc + (threadIdx.x << 1)) = 0;
*(cache_grad_sampling_loc + ((threadIdx.x << 1) + 1)) = 0;
*(cache_grad_attn_weight + threadIdx.x) = 0;
if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) {
ms_deform_attn_col2im_bilinear(
data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im,
w_im, m_col, c_col, top_grad, weight, grad_value_ptr,
cache_grad_sampling_loc + (threadIdx.x << 1),
cache_grad_attn_weight + threadIdx.x);
}
__syncthreads();
if (tid == 0) {
scalar_t _grad_w = cache_grad_sampling_loc[0],
_grad_h = cache_grad_sampling_loc[1],
_grad_a = cache_grad_attn_weight[0];
int sid = 2;
for (unsigned int tid = 1; tid < blockSize; ++tid) {
_grad_w += cache_grad_sampling_loc[sid];
_grad_h += cache_grad_sampling_loc[sid + 1];
_grad_a += cache_grad_attn_weight[tid];
sid += 2;
}
*grad_sampling_loc = _grad_w;
*(grad_sampling_loc + 1) = _grad_h;
*grad_attn_weight = _grad_a;
}
__syncthreads();
data_weight_ptr += 1;
data_loc_w_ptr += 2;
grad_attn_weight += grad_weight_stride;
grad_sampling_loc += grad_loc_stride;
}
}
}
}
template <typename scalar_t, unsigned int blockSize>
__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2(
const int n, const scalar_t *grad_col, const scalar_t *data_value,
const int64_t *data_spatial_shapes, const int64_t *data_level_start_index,
const scalar_t *data_sampling_loc, const scalar_t *data_attn_weight,
const int batch_size, const int spatial_size, const int num_heads,
const int channels, const int num_levels, const int num_query,
const int num_point, scalar_t *grad_value, scalar_t *grad_sampling_loc,
scalar_t *grad_attn_weight) {
CUDA_KERNEL_LOOP(index, n) {
__shared__ scalar_t cache_grad_sampling_loc[blockSize * 2];
__shared__ scalar_t cache_grad_attn_weight[blockSize];
unsigned int tid = threadIdx.x;
int _temp = index;
const int c_col = _temp % channels;
_temp /= channels;
const int sampling_index = _temp;
const int m_col = _temp % num_heads;
_temp /= num_heads;
_temp /= num_query;
const int b_col = _temp;
const scalar_t top_grad = grad_col[index];
int data_weight_ptr = sampling_index * num_levels * num_point;
int data_loc_w_ptr = data_weight_ptr << 1;
const int grad_sampling_ptr = data_weight_ptr;
grad_sampling_loc += grad_sampling_ptr << 1;
grad_attn_weight += grad_sampling_ptr;
const int grad_weight_stride = 1;
const int grad_loc_stride = 2;
const int qid_stride = num_heads * channels;
const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
for (int l_col = 0; l_col < num_levels; ++l_col) {
const int level_start_id = data_level_start_index[l_col];
const int spatial_h_ptr = l_col << 1;
const int spatial_h = data_spatial_shapes[spatial_h_ptr];
const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
const int value_ptr_offset =
data_value_ptr_init_offset + level_start_id * qid_stride;
const scalar_t *data_value_ptr = data_value + value_ptr_offset;
scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
for (int p_col = 0; p_col < num_point; ++p_col) {
const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
const scalar_t weight = data_attn_weight[data_weight_ptr];
const scalar_t h_im = loc_h * spatial_h - 0.5;
const scalar_t w_im = loc_w * spatial_w - 0.5;
*(cache_grad_sampling_loc + (threadIdx.x << 1)) = 0;
*(cache_grad_sampling_loc + ((threadIdx.x << 1) + 1)) = 0;
*(cache_grad_attn_weight + threadIdx.x) = 0;
if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) {
ms_deform_attn_col2im_bilinear(
data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im,
w_im, m_col, c_col, top_grad, weight, grad_value_ptr,
cache_grad_sampling_loc + (threadIdx.x << 1),
cache_grad_attn_weight + threadIdx.x);
}
__syncthreads();
for (unsigned int s = blockSize / 2; s > 0; s >>= 1) {
if (tid < s) {
const unsigned int xid1 = tid << 1;
const unsigned int xid2 = (tid + s) << 1;
cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];
cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];
cache_grad_sampling_loc[xid1 + 1] +=
cache_grad_sampling_loc[xid2 + 1];
}
__syncthreads();
}
if (tid == 0) {
*grad_sampling_loc = cache_grad_sampling_loc[0];
*(grad_sampling_loc + 1) = cache_grad_sampling_loc[1];
*grad_attn_weight = cache_grad_attn_weight[0];
}
__syncthreads();
data_weight_ptr += 1;
data_loc_w_ptr += 2;
grad_attn_weight += grad_weight_stride;
grad_sampling_loc += grad_loc_stride;
}
}
}
}
template <typename scalar_t>
__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v1(
const int n, const scalar_t *grad_col, const scalar_t *data_value,
const int64_t *data_spatial_shapes, const int64_t *data_level_start_index,
const scalar_t *data_sampling_loc, const scalar_t *data_attn_weight,
const int batch_size, const int spatial_size, const int num_heads,
const int channels, const int num_levels, const int num_query,
const int num_point, scalar_t *grad_value, scalar_t *grad_sampling_loc,
scalar_t *grad_attn_weight) {
CUDA_KERNEL_LOOP(index, n) {
extern __shared__ int _s[];
scalar_t *cache_grad_sampling_loc = reinterpret_cast<scalar_t *>(_s);
scalar_t *cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;
unsigned int tid = threadIdx.x;
int _temp = index;
const int c_col = _temp % channels;
_temp /= channels;
const int sampling_index = _temp;
const int m_col = _temp % num_heads;
_temp /= num_heads;
_temp /= num_query;
const int b_col = _temp;
const scalar_t top_grad = grad_col[index];
int data_weight_ptr = sampling_index * num_levels * num_point;
int data_loc_w_ptr = data_weight_ptr << 1;
const int grad_sampling_ptr = data_weight_ptr;
grad_sampling_loc += grad_sampling_ptr << 1;
grad_attn_weight += grad_sampling_ptr;
const int grad_weight_stride = 1;
const int grad_loc_stride = 2;
const int qid_stride = num_heads * channels;
const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
for (int l_col = 0; l_col < num_levels; ++l_col) {
const int level_start_id = data_level_start_index[l_col];
const int spatial_h_ptr = l_col << 1;
const int spatial_h = data_spatial_shapes[spatial_h_ptr];
const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
const int value_ptr_offset =
data_value_ptr_init_offset + level_start_id * qid_stride;
const scalar_t *data_value_ptr = data_value + value_ptr_offset;
scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
for (int p_col = 0; p_col < num_point; ++p_col) {
const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
const scalar_t weight = data_attn_weight[data_weight_ptr];
const scalar_t h_im = loc_h * spatial_h - 0.5;
const scalar_t w_im = loc_w * spatial_w - 0.5;
*(cache_grad_sampling_loc + (threadIdx.x << 1)) = 0;
*(cache_grad_sampling_loc + ((threadIdx.x << 1) + 1)) = 0;
*(cache_grad_attn_weight + threadIdx.x) = 0;
if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) {
ms_deform_attn_col2im_bilinear(
data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im,
w_im, m_col, c_col, top_grad, weight, grad_value_ptr,
cache_grad_sampling_loc + (threadIdx.x << 1),
cache_grad_attn_weight + threadIdx.x);
}
__syncthreads();
if (tid == 0) {
scalar_t _grad_w = cache_grad_sampling_loc[0],
_grad_h = cache_grad_sampling_loc[1],
_grad_a = cache_grad_attn_weight[0];
int sid = 2;
for (unsigned int tid = 1; tid < blockDim.x; ++tid) {
_grad_w += cache_grad_sampling_loc[sid];
_grad_h += cache_grad_sampling_loc[sid + 1];
_grad_a += cache_grad_attn_weight[tid];
sid += 2;
}
*grad_sampling_loc = _grad_w;
*(grad_sampling_loc + 1) = _grad_h;
*grad_attn_weight = _grad_a;
}
__syncthreads();
data_weight_ptr += 1;
data_loc_w_ptr += 2;
grad_attn_weight += grad_weight_stride;
grad_sampling_loc += grad_loc_stride;
}
}
}
}
template <typename scalar_t>
__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2(
const int n, const scalar_t *grad_col, const scalar_t *data_value,
const int64_t *data_spatial_shapes, const int64_t *data_level_start_index,
const scalar_t *data_sampling_loc, const scalar_t *data_attn_weight,
const int batch_size, const int spatial_size, const int num_heads,
const int channels, const int num_levels, const int num_query,
const int num_point, scalar_t *grad_value, scalar_t *grad_sampling_loc,
scalar_t *grad_attn_weight) {
CUDA_KERNEL_LOOP(index, n) {
extern __shared__ int _s[];
scalar_t *cache_grad_sampling_loc = reinterpret_cast<scalar_t *>(_s);
scalar_t *cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;
unsigned int tid = threadIdx.x;
int _temp = index;
const int c_col = _temp % channels;
_temp /= channels;
const int sampling_index = _temp;
const int m_col = _temp % num_heads;
_temp /= num_heads;
_temp /= num_query;
const int b_col = _temp;
const scalar_t top_grad = grad_col[index];
int data_weight_ptr = sampling_index * num_levels * num_point;
int data_loc_w_ptr = data_weight_ptr << 1;
const int grad_sampling_ptr = data_weight_ptr;
grad_sampling_loc += grad_sampling_ptr << 1;
grad_attn_weight += grad_sampling_ptr;
const int grad_weight_stride = 1;
const int grad_loc_stride = 2;
const int qid_stride = num_heads * channels;
const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
for (int l_col = 0; l_col < num_levels; ++l_col) {
const int level_start_id = data_level_start_index[l_col];
const int spatial_h_ptr = l_col << 1;
const int spatial_h = data_spatial_shapes[spatial_h_ptr];
const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
const int value_ptr_offset =
data_value_ptr_init_offset + level_start_id * qid_stride;
const scalar_t *data_value_ptr = data_value + value_ptr_offset;
scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
for (int p_col = 0; p_col < num_point; ++p_col) {
const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
const scalar_t weight = data_attn_weight[data_weight_ptr];
const scalar_t h_im = loc_h * spatial_h - 0.5;
const scalar_t w_im = loc_w * spatial_w - 0.5;
*(cache_grad_sampling_loc + (threadIdx.x << 1)) = 0;
*(cache_grad_sampling_loc + ((threadIdx.x << 1) + 1)) = 0;
*(cache_grad_attn_weight + threadIdx.x) = 0;
if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) {
ms_deform_attn_col2im_bilinear(
data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im,
w_im, m_col, c_col, top_grad, weight, grad_value_ptr,
cache_grad_sampling_loc + (threadIdx.x << 1),
cache_grad_attn_weight + threadIdx.x);
}
__syncthreads();
for (unsigned int s = blockDim.x / 2, spre = blockDim.x; s > 0;
s >>= 1, spre >>= 1) {
if (tid < s) {
const unsigned int xid1 = tid << 1;
const unsigned int xid2 = (tid + s) << 1;
cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];
cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];
cache_grad_sampling_loc[xid1 + 1] +=
cache_grad_sampling_loc[xid2 + 1];
if (tid + (s << 1) < spre) {
cache_grad_attn_weight[tid] +=
cache_grad_attn_weight[tid + (s << 1)];
cache_grad_sampling_loc[xid1] +=
cache_grad_sampling_loc[xid2 + (s << 1)];
cache_grad_sampling_loc[xid1 + 1] +=
cache_grad_sampling_loc[xid2 + 1 + (s << 1)];
}
}
__syncthreads();
}
if (tid == 0) {
*grad_sampling_loc = cache_grad_sampling_loc[0];
*(grad_sampling_loc + 1) = cache_grad_sampling_loc[1];
*grad_attn_weight = cache_grad_attn_weight[0];
}
__syncthreads();
data_weight_ptr += 1;
data_loc_w_ptr += 2;
grad_attn_weight += grad_weight_stride;
grad_sampling_loc += grad_loc_stride;
}
}
}
}
template <typename scalar_t>
__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks(
const int n, const scalar_t *grad_col, const scalar_t *data_value,
const int64_t *data_spatial_shapes, const int64_t *data_level_start_index,
const scalar_t *data_sampling_loc, const scalar_t *data_attn_weight,
const int batch_size, const int spatial_size, const int num_heads,
const int channels, const int num_levels, const int num_query,
const int num_point, scalar_t *grad_value, scalar_t *grad_sampling_loc,
scalar_t *grad_attn_weight) {
CUDA_KERNEL_LOOP(index, n) {
extern __shared__ int _s[];
scalar_t *cache_grad_sampling_loc = reinterpret_cast<scalar_t *>(_s);
scalar_t *cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;
unsigned int tid = threadIdx.x;
int _temp = index;
const int c_col = _temp % channels;
_temp /= channels;
const int sampling_index = _temp;
const int m_col = _temp % num_heads;
_temp /= num_heads;
_temp /= num_query;
const int b_col = _temp;
const scalar_t top_grad = grad_col[index];
int data_weight_ptr = sampling_index * num_levels * num_point;
int data_loc_w_ptr = data_weight_ptr << 1;
const int grad_sampling_ptr = data_weight_ptr;
grad_sampling_loc += grad_sampling_ptr << 1;
grad_attn_weight += grad_sampling_ptr;
const int grad_weight_stride = 1;
const int grad_loc_stride = 2;
const int qid_stride = num_heads * channels;
const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
for (int l_col = 0; l_col < num_levels; ++l_col) {
const int level_start_id = data_level_start_index[l_col];
const int spatial_h_ptr = l_col << 1;
const int spatial_h = data_spatial_shapes[spatial_h_ptr];
const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
const int value_ptr_offset =
data_value_ptr_init_offset + level_start_id * qid_stride;
const scalar_t *data_value_ptr = data_value + value_ptr_offset;
scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
for (int p_col = 0; p_col < num_point; ++p_col) {
const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
const scalar_t weight = data_attn_weight[data_weight_ptr];
const scalar_t h_im = loc_h * spatial_h - 0.5;
const scalar_t w_im = loc_w * spatial_w - 0.5;
*(cache_grad_sampling_loc + (threadIdx.x << 1)) = 0;
*(cache_grad_sampling_loc + ((threadIdx.x << 1) + 1)) = 0;
*(cache_grad_attn_weight + threadIdx.x) = 0;
if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) {
ms_deform_attn_col2im_bilinear(
data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im,
w_im, m_col, c_col, top_grad, weight, grad_value_ptr,
cache_grad_sampling_loc + (threadIdx.x << 1),
cache_grad_attn_weight + threadIdx.x);
}
__syncthreads();
for (unsigned int s = blockDim.x / 2, spre = blockDim.x; s > 0;
s >>= 1, spre >>= 1) {
if (tid < s) {
const unsigned int xid1 = tid << 1;
const unsigned int xid2 = (tid + s) << 1;
cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];
cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];
cache_grad_sampling_loc[xid1 + 1] +=
cache_grad_sampling_loc[xid2 + 1];
if (tid + (s << 1) < spre) {
cache_grad_attn_weight[tid] +=
cache_grad_attn_weight[tid + (s << 1)];
cache_grad_sampling_loc[xid1] +=
cache_grad_sampling_loc[xid2 + (s << 1)];
cache_grad_sampling_loc[xid1 + 1] +=
cache_grad_sampling_loc[xid2 + 1 + (s << 1)];
}
}
__syncthreads();
}
if (tid == 0) {
atomicAdd(grad_sampling_loc, cache_grad_sampling_loc[0]);
atomicAdd(grad_sampling_loc + 1, cache_grad_sampling_loc[1]);
atomicAdd(grad_attn_weight, cache_grad_attn_weight[0]);
}
__syncthreads();
data_weight_ptr += 1;
data_loc_w_ptr += 2;
grad_attn_weight += grad_weight_stride;
grad_sampling_loc += grad_loc_stride;
}
}
}
}
template <typename scalar_t>
__global__ void ms_deformable_col2im_gpu_kernel_gm(
const int n, const scalar_t *grad_col, const scalar_t *data_value,
const int64_t *data_spatial_shapes, const int64_t *data_level_start_index,
const scalar_t *data_sampling_loc, const scalar_t *data_attn_weight,
const int batch_size, const int spatial_size, const int num_heads,
const int channels, const int num_levels, const int num_query,
const int num_point, scalar_t *grad_value, scalar_t *grad_sampling_loc,
scalar_t *grad_attn_weight) {
CUDA_KERNEL_LOOP(index, n) {
int _temp = index;
const int c_col = _temp % channels;
_temp /= channels;
const int sampling_index = _temp;
const int m_col = _temp % num_heads;
_temp /= num_heads;
_temp /= num_query;
const int b_col = _temp;
const scalar_t top_grad = grad_col[index];
int data_weight_ptr = sampling_index * num_levels * num_point;
int data_loc_w_ptr = data_weight_ptr << 1;
const int grad_sampling_ptr = data_weight_ptr;
grad_sampling_loc += grad_sampling_ptr << 1;
grad_attn_weight += grad_sampling_ptr;
const int grad_weight_stride = 1;
const int grad_loc_stride = 2;
const int qid_stride = num_heads * channels;
const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
for (int l_col = 0; l_col < num_levels; ++l_col) {
const int level_start_id = data_level_start_index[l_col];
const int spatial_h_ptr = l_col << 1;
const int spatial_h = data_spatial_shapes[spatial_h_ptr];
const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
const int value_ptr_offset =
data_value_ptr_init_offset + level_start_id * qid_stride;
const scalar_t *data_value_ptr = data_value + value_ptr_offset;
scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
for (int p_col = 0; p_col < num_point; ++p_col) {
const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
const scalar_t weight = data_attn_weight[data_weight_ptr];
const scalar_t h_im = loc_h * spatial_h - 0.5;
const scalar_t w_im = loc_w * spatial_w - 0.5;
if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) {
ms_deform_attn_col2im_bilinear_gm(
data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im,
w_im, m_col, c_col, top_grad, weight, grad_value_ptr,
grad_sampling_loc, grad_attn_weight);
}
data_weight_ptr += 1;
data_loc_w_ptr += 2;
grad_attn_weight += grad_weight_stride;
grad_sampling_loc += grad_loc_stride;
}
}
}
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/ms_deform_attn/ms_deform_attn.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*!
**************************************************************************************************
* Deformable DETR
* Copyright (c) 2020 SenseTime. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
**************************************************************************************************
* Modified from
*https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
**************************************************************************************************
*/
#include "ms_deform_attn_cuda_kernel.h"
#include "paddle/extension.h"
#define CHECK_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.")
template <typename scalar_t>
void ms_deformable_col2im_cuda(
cudaStream_t stream, const scalar_t *grad_col, const scalar_t *data_value,
const int64_t *data_spatial_shapes, const int64_t *data_level_start_index,
const scalar_t *data_sampling_loc, const scalar_t *data_attn_weight,
const int batch_size, const int spatial_size, const int num_heads,
const int channels, const int num_levels, const int num_query,
const int num_point, scalar_t *grad_value, scalar_t *grad_sampling_loc,
scalar_t *grad_attn_weight) {
const int num_threads =
(channels > CUDA_NUM_THREADS) ? CUDA_NUM_THREADS : channels;
const int num_kernels = batch_size * num_query * num_heads * channels;
const int num_actual_kernels = batch_size * num_query * num_heads * channels;
if (channels > 1024) {
if ((channels & 1023) == 0) {
ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks<scalar_t>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
num_threads * 3 * sizeof(scalar_t), stream>>>(
num_kernels, grad_col, data_value, data_spatial_shapes,
data_level_start_index, data_sampling_loc, data_attn_weight,
batch_size, spatial_size, num_heads, channels, num_levels,
num_query, num_point, grad_value, grad_sampling_loc,
grad_attn_weight);
} else {
ms_deformable_col2im_gpu_kernel_gm<scalar_t>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0,
stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes,
data_level_start_index, data_sampling_loc,
data_attn_weight, batch_size, spatial_size, num_heads,
channels, num_levels, num_query, num_point, grad_value,
grad_sampling_loc, grad_attn_weight);
}
} else {
switch (channels) {
case 1:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t,
1>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0,
stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes,
data_level_start_index, data_sampling_loc,
data_attn_weight, batch_size, spatial_size, num_heads,
channels, num_levels, num_query, num_point, grad_value,
grad_sampling_loc, grad_attn_weight);
break;
case 2:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t,
2>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0,
stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes,
data_level_start_index, data_sampling_loc,
data_attn_weight, batch_size, spatial_size, num_heads,
channels, num_levels, num_query, num_point, grad_value,
grad_sampling_loc, grad_attn_weight);
break;
case 4:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t,
4>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0,
stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes,
data_level_start_index, data_sampling_loc,
data_attn_weight, batch_size, spatial_size, num_heads,
channels, num_levels, num_query, num_point, grad_value,
grad_sampling_loc, grad_attn_weight);
break;
case 8:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t,
8>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0,
stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes,
data_level_start_index, data_sampling_loc,
data_attn_weight, batch_size, spatial_size, num_heads,
channels, num_levels, num_query, num_point, grad_value,
grad_sampling_loc, grad_attn_weight);
break;
case 16:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t,
16>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0,
stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes,
data_level_start_index, data_sampling_loc,
data_attn_weight, batch_size, spatial_size, num_heads,
channels, num_levels, num_query, num_point, grad_value,
grad_sampling_loc, grad_attn_weight);
break;
case 32:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t,
32>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0,
stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes,
data_level_start_index, data_sampling_loc,
data_attn_weight, batch_size, spatial_size, num_heads,
channels, num_levels, num_query, num_point, grad_value,
grad_sampling_loc, grad_attn_weight);
break;
case 64:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t,
64>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0,
stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes,
data_level_start_index, data_sampling_loc,
data_attn_weight, batch_size, spatial_size, num_heads,
channels, num_levels, num_query, num_point, grad_value,
grad_sampling_loc, grad_attn_weight);
break;
case 128:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t,
128>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0,
stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes,
data_level_start_index, data_sampling_loc,
data_attn_weight, batch_size, spatial_size, num_heads,
channels, num_levels, num_query, num_point, grad_value,
grad_sampling_loc, grad_attn_weight);
break;
case 256:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t,
256>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0,
stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes,
data_level_start_index, data_sampling_loc,
data_attn_weight, batch_size, spatial_size, num_heads,
channels, num_levels, num_query, num_point, grad_value,
grad_sampling_loc, grad_attn_weight);
break;
case 512:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t,
512>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0,
stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes,
data_level_start_index, data_sampling_loc,
data_attn_weight, batch_size, spatial_size, num_heads,
channels, num_levels, num_query, num_point, grad_value,
grad_sampling_loc, grad_attn_weight);
break;
case 1024:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t,
1024>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0,
stream>>>(num_kernels, grad_col, data_value, data_spatial_shapes,
data_level_start_index, data_sampling_loc,
data_attn_weight, batch_size, spatial_size, num_heads,
channels, num_levels, num_query, num_point, grad_value,
grad_sampling_loc, grad_attn_weight);
break;
default:
if (channels < 64) {
ms_deformable_col2im_gpu_kernel_shm_reduce_v1<scalar_t>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
num_threads * 3 * sizeof(scalar_t), stream>>>(
num_kernels, grad_col, data_value, data_spatial_shapes,
data_level_start_index, data_sampling_loc, data_attn_weight,
batch_size, spatial_size, num_heads, channels, num_levels,
num_query, num_point, grad_value, grad_sampling_loc,
grad_attn_weight);
} else {
ms_deformable_col2im_gpu_kernel_shm_reduce_v2<scalar_t>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
num_threads * 3 * sizeof(scalar_t), stream>>>(
num_kernels, grad_col, data_value, data_spatial_shapes,
data_level_start_index, data_sampling_loc, data_attn_weight,
batch_size, spatial_size, num_heads, channels, num_levels,
num_query, num_point, grad_value, grad_sampling_loc,
grad_attn_weight);
}
}
}
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in ms_deformable_col2im_cuda: %s\n", cudaGetErrorString(err));
}
}
std::vector<paddle::Tensor> ms_deform_attn_forward_cuda(
const paddle::Tensor &value, const paddle::Tensor &spatial_shapes,
const paddle::Tensor &level_start_index,
const paddle::Tensor &sampling_locations,
const paddle::Tensor &attention_weights, const int im2col_step) {
CHECK_INPUT(value);
CHECK_INPUT(spatial_shapes);
CHECK_INPUT(level_start_index);
CHECK_INPUT(sampling_locations);
CHECK_INPUT(attention_weights);
const int batch = value.shape()[0];
const int spatial_size = value.shape()[1];
const int num_heads = value.shape()[2];
const int channels = value.shape()[3];
const int num_levels = spatial_shapes.shape()[0];
const int num_query = sampling_locations.shape()[1];
const int num_point = sampling_locations.shape()[4];
const int im2col_step_ = std::min(batch, im2col_step);
PD_CHECK(batch % im2col_step_ == 0, "batch(", batch,
") must divide im2col_step(", im2col_step_, ")");
auto output = paddle::full({batch, num_query, num_heads * channels}, 0,
value.type(), paddle::GPUPlace());
auto per_value_size = spatial_size * num_heads * channels;
auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2;
auto per_attn_weight_size = num_query * num_heads * num_levels * num_point;
auto per_output_size = num_query * num_heads * channels;
for (int n = 0; n < batch / im2col_step_; ++n) {
const int num_kernels = im2col_step_ * per_output_size;
const int num_actual_kernels = im2col_step_ * per_output_size;
const int num_threads = CUDA_NUM_THREADS;
PD_DISPATCH_FLOATING_TYPES(
value.type(), "ms_deform_attn_forward_cuda", ([&] {
ms_deformable_im2col_gpu_kernel<data_t>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads, 0,
value.stream()>>>(
num_kernels,
value.data<data_t>() + n * im2col_step_ * per_value_size,
spatial_shapes.data<int64_t>(),
level_start_index.data<int64_t>(),
sampling_locations.data<data_t>() +
n * im2col_step_ * per_sample_loc_size,
attention_weights.data<data_t>() +
n * im2col_step_ * per_attn_weight_size,
im2col_step_, spatial_size, num_heads, channels, num_levels,
num_query, num_point,
output.data<data_t>() + n * im2col_step_ * per_output_size);
}));
}
return {output};
}
std::vector<paddle::Tensor> ms_deform_attn_backward_cuda(
const paddle::Tensor &grad_out, const paddle::Tensor &value,
const paddle::Tensor &spatial_shapes,
const paddle::Tensor &level_start_index,
const paddle::Tensor &sampling_locations,
const paddle::Tensor &attention_weights, const int im2col_step) {
CHECK_INPUT(value);
CHECK_INPUT(spatial_shapes);
CHECK_INPUT(level_start_index);
CHECK_INPUT(sampling_locations);
CHECK_INPUT(attention_weights);
CHECK_INPUT(grad_out);
const int batch = value.shape()[0];
const int spatial_size = value.shape()[1];
const int num_heads = value.shape()[2];
const int channels = value.shape()[3];
const int num_levels = spatial_shapes.shape()[0];
const int num_query = sampling_locations.shape()[1];
const int num_point = sampling_locations.shape()[4];
const int im2col_step_ = std::min(batch, im2col_step);
const int batch_n = im2col_step_;
auto per_value_size = spatial_size * num_heads * channels;
auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2;
auto per_attn_weight_size = num_query * num_heads * num_levels * num_point;
auto grad_value =
paddle::full(value.shape(), 0, value.type(), paddle::GPUPlace());
auto grad_sampling_loc =
paddle::full(sampling_locations.shape(), 0, sampling_locations.type(),
paddle::GPUPlace());
auto grad_attn_weight =
paddle::full(attention_weights.shape(), 0, attention_weights.type(),
paddle::GPUPlace());
for (int n = 0; n < batch / im2col_step_; ++n) {
PD_DISPATCH_FLOATING_TYPES(
value.type(), "ms_deform_attn_backward_cuda", ([&] {
ms_deformable_col2im_cuda<data_t>(
value.stream(),
grad_out.data<data_t>() + n * im2col_step_ * per_value_size,
value.data<data_t>() + n * im2col_step_ * per_value_size,
spatial_shapes.data<int64_t>(), level_start_index.data<int64_t>(),
sampling_locations.data<data_t>() +
n * im2col_step_ * per_sample_loc_size,
attention_weights.data<data_t>() +
n * im2col_step_ * per_attn_weight_size,
im2col_step_, spatial_size, num_heads, channels, num_levels,
num_query, num_point,
grad_value.data<data_t>() + n * im2col_step_ * per_value_size,
grad_sampling_loc.data<data_t>() +
n * im2col_step_ * per_sample_loc_size,
grad_attn_weight.data<data_t>() +
n * im2col_step_ * per_attn_weight_size);
}));
}
return {grad_value, grad_sampling_loc, grad_attn_weight};
// return {grad_value};
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/ms_deform_attn/ms_deform_attn.cc
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/extension.h"
#define CHECK_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.")
std::vector<paddle::Tensor> ms_deform_attn_forward_cuda(
const paddle::Tensor &value, const paddle::Tensor &spatial_shapes,
const paddle::Tensor &level_start_index,
const paddle::Tensor &sampling_locations,
const paddle::Tensor &attention_weights, const int im2col_step);
std::vector<paddle::Tensor> ms_deform_attn_backward_cuda(
const paddle::Tensor &grad_out, const paddle::Tensor &value,
const paddle::Tensor &spatial_shapes,
const paddle::Tensor &level_start_index,
const paddle::Tensor &sampling_locations,
const paddle::Tensor &attention_weights, const int im2col_step);
std::vector<paddle::Tensor> ms_deform_attn_backward(
const paddle::Tensor &grad_out, const paddle::Tensor &value,
const paddle::Tensor &sampling_locations,
const paddle::Tensor &attention_weights,
const paddle::Tensor &spatial_shapes,
const paddle::Tensor &level_start_index, const int im2col_step) {
if (value.is_gpu()) {
return ms_deform_attn_backward_cuda(grad_out, value, spatial_shapes,
level_start_index, sampling_locations,
attention_weights, im2col_step);
} else {
PD_THROW(
"Unsupported device type for ms_deform_attn_backward "
"operator.");
}
}
std::vector<paddle::Tensor> ms_deform_attn_forward(
const paddle::Tensor &value, const paddle::Tensor &sampling_locations,
const paddle::Tensor &attention_weights,
const paddle::Tensor &spatial_shapes,
const paddle::Tensor &level_start_index, const int im2col_step) {
if (value.is_gpu()) {
return ms_deform_attn_forward_cuda(value, spatial_shapes, level_start_index,
sampling_locations, attention_weights,
im2col_step);
} else {
PD_THROW(
"Unsupported device type for ms_deform_attn_forward "
"operator.");
}
}
// shape infer
std::vector<std::vector<int64_t>> MsDeformAttrnInferShape(
std::vector<int64_t> value_shape,
std::vector<int64_t> sampling_locations_shape,
std::vector<int64_t> attention_weights_shape,
std::vector<int64_t> spatial_shapes_shape,
std::vector<int64_t> level_start_index_shape) {
return {{value_shape[0], sampling_locations_shape[1],
value_shape[2] * value_shape[3]}};
}
// data type infer
std::vector<paddle::DataType> MsDeformAttrnInferDtype(
paddle::DataType value_dtype, paddle::DataType sampling_locations_dtype,
paddle::DataType attention_weights_dtype, paddle::DataType spatial_shapes,
paddle::DataType level_start_index_dtype) {
return {value_dtype};
}
// build forward op
PD_BUILD_OP(ms_deform_attn)
.Inputs({"value", "sampling_locations", "attention_weights",
"spatial_shapes", "level_start_index"})
.Attrs({"im2col_step: int"})
.Outputs({"out"})
.SetKernelFn(PD_KERNEL(ms_deform_attn_forward))
.SetInferShapeFn(PD_INFER_SHAPE(MsDeformAttrnInferShape))
.SetInferDtypeFn(PD_INFER_DTYPE(MsDeformAttrnInferDtype));
// build backward op
PD_BUILD_GRAD_OP(ms_deform_attn)
.Inputs({paddle::Grad("out"), "value", "sampling_locations",
"attention_weights", "spatial_shapes", "level_start_index"})
.Attrs({"im2col_step: int"})
.Outputs({paddle::Grad("value"), paddle::Grad("sampling_locations"),
paddle::Grad("attention_weights")})
// .Outputs({paddle::Grad("value")})
.SetKernelFn(PD_KERNEL(ms_deform_attn_backward));
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/centerpoint_postprocess/iou3d_nms_kernel.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others)
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
*/
#include <stdio.h>
#define THREADS_PER_BLOCK 16
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
// #define DEBUG
const int THREADS_PER_BLOCK_NMS = sizeof(int64_t) * 8;
const float EPS = 1e-8;
struct Point {
float x, y;
__device__ Point() {}
__device__ Point(double _x, double _y) { x = _x, y = _y; }
__device__ void set(float _x, float _y) {
x = _x;
y = _y;
}
__device__ Point operator+(const Point &b) const {
return Point(x + b.x, y + b.y);
}
__device__ Point operator-(const Point &b) const {
return Point(x - b.x, y - b.y);
}
};
__device__ inline float cross(const Point &a, const Point &b) {
return a.x * b.y - a.y * b.x;
}
__device__ inline float cross(const Point &p1, const Point &p2,
const Point &p0) {
return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y);
}
__device__ int check_rect_cross(const Point &p1, const Point &p2,
const Point &q1, const Point &q2) {
int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) &&
min(q1.x, q2.x) <= max(p1.x, p2.x) &&
min(p1.y, p2.y) <= max(q1.y, q2.y) &&
min(q1.y, q2.y) <= max(p1.y, p2.y);
return ret;
}
__device__ inline int check_in_box2d(const float *box, const Point &p) {
// params: (7) [x, y, z, dx, dy, dz, heading]
const float MARGIN = 1e-2;
float center_x = box[0], center_y = box[1];
// rotate the point in the opposite direction of box
float angle_cos = cos(-box[6]), angle_sin = sin(-box[6]);
float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin);
float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos;
return (fabs(rot_x) < box[3] / 2 + MARGIN &&
fabs(rot_y) < box[4] / 2 + MARGIN);
}
__device__ inline int intersection(const Point &p1, const Point &p0,
const Point &q1, const Point &q0,
Point *ans) {
// fast exclusion
if (check_rect_cross(p0, p1, q0, q1) == 0) return 0;
// check cross standing
float s1 = cross(q0, p1, p0);
float s2 = cross(p1, q1, p0);
float s3 = cross(p0, q1, q0);
float s4 = cross(q1, p1, q0);
if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0;
// calculate intersection of two lines
float s5 = cross(q1, p1, p0);
if (fabs(s5 - s1) > EPS) {
ans->x = (s5 * q0.x - s1 * q1.x) / (s5 - s1);
ans->y = (s5 * q0.y - s1 * q1.y) / (s5 - s1);
} else {
float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y;
float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y;
float D = a0 * b1 - a1 * b0;
ans->x = (b0 * c1 - b1 * c0) / D;
ans->y = (a1 * c0 - a0 * c1) / D;
}
return 1;
}
__device__ inline void rotate_around_center(const Point ¢er,
const float angle_cos,
const float angle_sin, Point *p) {
float new_x = (p->x - center.x) * angle_cos +
(p->y - center.y) * (-angle_sin) + center.x;
float new_y =
(p->x - center.x) * angle_sin + (p->y - center.y) * angle_cos + center.y;
p->set(new_x, new_y);
}
__device__ inline int point_cmp(const Point &a, const Point &b,
const Point ¢er) {
return atan2(a.y - center.y, a.x - center.x) >
atan2(b.y - center.y, b.x - center.x);
}
__device__ inline float box_overlap(const float *box_a, const float *box_b) {
// params box_a: [x, y, z, dx, dy, dz, heading]
// params box_b: [x, y, z, dx, dy, dz, heading]
float a_angle = box_a[6], b_angle = box_b[6];
float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2,
a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2;
float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half;
float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half;
float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half;
float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half;
Point center_a(box_a[0], box_a[1]);
Point center_b(box_b[0], box_b[1]);
#ifdef DEBUG
printf(
"a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n",
a_x1, a_y1, a_x2, a_y2, a_angle, b_x1, b_y1, b_x2, b_y2, b_angle);
printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y,
center_b.x, center_b.y);
#endif
Point box_a_corners[5];
box_a_corners[0].set(a_x1, a_y1);
box_a_corners[1].set(a_x2, a_y1);
box_a_corners[2].set(a_x2, a_y2);
box_a_corners[3].set(a_x1, a_y2);
Point box_b_corners[5];
box_b_corners[0].set(b_x1, b_y1);
box_b_corners[1].set(b_x2, b_y1);
box_b_corners[2].set(b_x2, b_y2);
box_b_corners[3].set(b_x1, b_y2);
// get oriented corners
float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle);
float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle);
for (int k = 0; k < 4; k++) {
#ifdef DEBUG
printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k,
box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x,
box_b_corners[k].y);
#endif
rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners + k);
rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners + k);
#ifdef DEBUG
printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x,
box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y);
#endif
}
box_a_corners[4] = box_a_corners[0];
box_b_corners[4] = box_b_corners[0];
// get intersection of lines
Point cross_points[16];
Point poly_center;
int cnt = 0, flag = 0;
poly_center.set(0, 0);
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
flag = intersection(box_a_corners[i + 1], box_a_corners[i],
box_b_corners[j + 1], box_b_corners[j],
cross_points + cnt);
if (flag) {
poly_center = poly_center + cross_points[cnt];
cnt++;
#ifdef DEBUG
printf(
"Cross points (%.3f, %.3f): a(%.3f, %.3f)->(%.3f, %.3f), "
"b(%.3f, %.3f)->(%.3f, %.3f) \n",
cross_points[cnt - 1].x, cross_points[cnt - 1].y,
box_a_corners[i].x, box_a_corners[i].y, box_a_corners[i + 1].x,
box_a_corners[i + 1].y, box_b_corners[i].x, box_b_corners[i].y,
box_b_corners[i + 1].x, box_b_corners[i + 1].y);
#endif
}
}
}
// check corners
for (int k = 0; k < 4; k++) {
if (check_in_box2d(box_a, box_b_corners[k])) {
poly_center = poly_center + box_b_corners[k];
cross_points[cnt] = box_b_corners[k];
cnt++;
#ifdef DEBUG
printf("b corners in a: corner_b(%.3f, %.3f)", cross_points[cnt - 1].x,
cross_points[cnt - 1].y);
#endif
}
if (check_in_box2d(box_b, box_a_corners[k])) {
poly_center = poly_center + box_a_corners[k];
cross_points[cnt] = box_a_corners[k];
cnt++;
#ifdef DEBUG
printf("a corners in b: corner_a(%.3f, %.3f)", cross_points[cnt - 1].x,
cross_points[cnt - 1].y);
#endif
}
}
poly_center.x /= cnt;
poly_center.y /= cnt;
// sort the points of polygon
Point temp;
for (int j = 0; j < cnt - 1; j++) {
for (int i = 0; i < cnt - j - 1; i++) {
if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) {
temp = cross_points[i];
cross_points[i] = cross_points[i + 1];
cross_points[i + 1] = temp;
}
}
}
#ifdef DEBUG
printf("cnt=%d\n", cnt);
for (int i = 0; i < cnt; i++) {
printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x,
cross_points[i].y);
}
#endif
// get the overlap areas
float area = 0;
for (int k = 0; k < cnt - 1; k++) {
area += cross(cross_points[k] - cross_points[0],
cross_points[k + 1] - cross_points[0]);
}
return fabs(area) / 2.0;
}
__device__ inline float iou_bev(const float *box_a, const float *box_b) {
// params box_a: [x, y, z, dx, dy, dz, heading]
// params box_b: [x, y, z, dx, dy, dz, heading]
float sa = box_a[3] * box_a[4];
float sb = box_b[3] * box_b[4];
float s_overlap = box_overlap(box_a, box_b);
return s_overlap / fmaxf(sa + sb - s_overlap, EPS);
}
__global__ void nms_kernel(const int num_bboxes, const int num_bboxes_for_nms,
const float nms_overlap_thresh,
const int decode_bboxes_dims, const float *bboxes,
const int *index, const int64_t *sorted_index,
int64_t *mask) {
// params: boxes (N, 7) [x, y, z, dx, dy, dz, heading]
// params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
fminf(num_bboxes_for_nms - row_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
const int col_size =
fminf(num_bboxes_for_nms - col_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7];
if (threadIdx.x < col_size) {
int box_idx =
index[sorted_index[THREADS_PER_BLOCK_NMS * col_start + threadIdx.x]];
block_boxes[threadIdx.x * 7 + 0] = bboxes[box_idx * decode_bboxes_dims];
block_boxes[threadIdx.x * 7 + 1] = bboxes[box_idx * decode_bboxes_dims + 1];
block_boxes[threadIdx.x * 7 + 2] = bboxes[box_idx * decode_bboxes_dims + 2];
block_boxes[threadIdx.x * 7 + 3] = bboxes[box_idx * decode_bboxes_dims + 4];
block_boxes[threadIdx.x * 7 + 4] = bboxes[box_idx * decode_bboxes_dims + 3];
block_boxes[threadIdx.x * 7 + 5] = bboxes[box_idx * decode_bboxes_dims + 5];
block_boxes[threadIdx.x * 7 + 6] =
-bboxes[box_idx * decode_bboxes_dims + decode_bboxes_dims - 1] -
3.141592653589793 / 2;
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const int act_box_idx = index[sorted_index[cur_box_idx]];
float cur_box[7];
cur_box[0] = bboxes[act_box_idx * decode_bboxes_dims];
cur_box[1] = bboxes[act_box_idx * decode_bboxes_dims + 1];
cur_box[2] = bboxes[act_box_idx * decode_bboxes_dims + 2];
cur_box[3] = bboxes[act_box_idx * decode_bboxes_dims + 4];
cur_box[4] = bboxes[act_box_idx * decode_bboxes_dims + 3];
cur_box[5] = bboxes[act_box_idx * decode_bboxes_dims + 5];
cur_box[6] =
-bboxes[act_box_idx * decode_bboxes_dims + decode_bboxes_dims - 1] -
3.141592653589793 / 2;
int i = 0;
int64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (iou_bev(cur_box, block_boxes + i * 7) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(num_bboxes_for_nms, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void NmsLauncher(const cudaStream_t &stream, const float *bboxes,
const int *index, const int64_t *sorted_index,
const int num_bboxes, const int num_bboxes_for_nms,
const float nms_overlap_thresh, const int decode_bboxes_dims,
int64_t *mask) {
dim3 blocks(DIVUP(num_bboxes_for_nms, THREADS_PER_BLOCK_NMS),
DIVUP(num_bboxes_for_nms, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
nms_kernel<<<blocks, threads, 0, stream>>>(
num_bboxes, num_bboxes_for_nms, nms_overlap_thresh, decode_bboxes_dims,
bboxes, index, sorted_index, mask);
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/centerpoint_postprocess/postprocess.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <paddle/extension.h>
#define CHECK_INPUT_CUDA(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.")
#define CHECK_INPUT_BATCHSIZE(x) \
PD_CHECK(x.shape()[0] == 1, #x " batch size must be 1.")
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
const int THREADS_PER_BLOCK_NMS = sizeof(int64_t) * 8;
void NmsLauncher(const cudaStream_t &stream, const float *bboxes,
const int *index, const int64_t *sorted_index,
const int num_bboxes, const int num_bboxes_for_nms,
const float nms_overlap_thresh, const int decode_bboxes_dims,
int64_t *mask);
__global__ void decode_kernel(
const float *score, const float *reg, const float *height, const float *dim,
const float *vel, const float *rot, const float score_threshold,
const int feat_w, const float down_ratio, const float voxel_size_x,
const float voxel_size_y, const float point_cloud_range_x_min,
const float point_cloud_range_y_min, const float post_center_range_x_min,
const float post_center_range_y_min, const float post_center_range_z_min,
const float post_center_range_x_max, const float post_center_range_y_max,
const float post_center_range_z_max, const int num_bboxes,
const bool with_velocity, const int decode_bboxes_dims, float *bboxes,
bool *mask, int *score_idx) {
int box_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (box_idx == num_bboxes || box_idx > num_bboxes) {
return;
}
const int xs = box_idx % feat_w;
const int ys = box_idx / feat_w;
float x = reg[box_idx];
float y = reg[box_idx + num_bboxes];
float z = height[box_idx];
bboxes[box_idx * decode_bboxes_dims] =
(x + xs) * down_ratio * voxel_size_x + point_cloud_range_x_min;
bboxes[box_idx * decode_bboxes_dims + 1] =
(y + ys) * down_ratio * voxel_size_y + point_cloud_range_y_min;
bboxes[box_idx * decode_bboxes_dims + 2] = z;
bboxes[box_idx * decode_bboxes_dims + 3] = dim[box_idx];
bboxes[box_idx * decode_bboxes_dims + 4] = dim[box_idx + num_bboxes];
bboxes[box_idx * decode_bboxes_dims + 5] = dim[box_idx + 2 * num_bboxes];
if (with_velocity) {
bboxes[box_idx * decode_bboxes_dims + 6] = vel[box_idx];
bboxes[box_idx * decode_bboxes_dims + 7] = vel[box_idx + num_bboxes];
bboxes[box_idx * decode_bboxes_dims + 8] =
atan2f(rot[box_idx], rot[box_idx + num_bboxes]);
} else {
bboxes[box_idx * decode_bboxes_dims + 6] =
atan2f(rot[box_idx], rot[box_idx + num_bboxes]);
}
if (score[box_idx] > score_threshold && x <= post_center_range_x_max &&
y <= post_center_range_y_max && z <= post_center_range_z_max &&
x >= post_center_range_x_min && y >= post_center_range_y_min &&
z >= post_center_range_z_min) {
mask[box_idx] = true;
}
score_idx[box_idx] = box_idx;
}
void DecodeLauncher(
const cudaStream_t &stream, const float *score, const float *reg,
const float *height, const float *dim, const float *vel, const float *rot,
const float score_threshold, const int feat_w, const float down_ratio,
const float voxel_size_x, const float voxel_size_y,
const float point_cloud_range_x_min, const float point_cloud_range_y_min,
const float post_center_range_x_min, const float post_center_range_y_min,
const float post_center_range_z_min, const float post_center_range_x_max,
const float post_center_range_y_max, const float post_center_range_z_max,
const int num_bboxes, const bool with_velocity,
const int decode_bboxes_dims, float *bboxes, bool *mask, int *score_idx) {
dim3 blocks(DIVUP(num_bboxes, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
decode_kernel<<<blocks, threads, 0, stream>>>(
score, reg, height, dim, vel, rot, score_threshold, feat_w, down_ratio,
voxel_size_x, voxel_size_y, point_cloud_range_x_min,
point_cloud_range_y_min, post_center_range_x_min, post_center_range_y_min,
post_center_range_z_min, post_center_range_x_max, post_center_range_y_max,
post_center_range_z_max, num_bboxes, with_velocity, decode_bboxes_dims,
bboxes, mask, score_idx);
}
std::vector<paddle::Tensor> postprocess_gpu(
const std::vector<paddle::Tensor> &hm,
const std::vector<paddle::Tensor> ®,
const std::vector<paddle::Tensor> &height,
const std::vector<paddle::Tensor> &dim,
const std::vector<paddle::Tensor> &vel,
const std::vector<paddle::Tensor> &rot,
const std::vector<float> &voxel_size,
const std::vector<float> &point_cloud_range,
const std::vector<float> &post_center_range,
const std::vector<int> &num_classes, const int down_ratio,
const float score_threshold, const float nms_iou_threshold,
const int nms_pre_max_size, const int nms_post_max_size,
const bool with_velocity) {
int num_tasks = hm.size();
int decode_bboxes_dims = 9;
if (!with_velocity) {
decode_bboxes_dims = 7;
}
float voxel_size_x = voxel_size[0];
float voxel_size_y = voxel_size[1];
float point_cloud_range_x_min = point_cloud_range[0];
float point_cloud_range_y_min = point_cloud_range[1];
float post_center_range_x_min = post_center_range[0];
float post_center_range_y_min = post_center_range[1];
float post_center_range_z_min = post_center_range[2];
float post_center_range_x_max = post_center_range[3];
float post_center_range_y_max = post_center_range[4];
float post_center_range_z_max = post_center_range[5];
std::vector<paddle::Tensor> scores;
std::vector<paddle::Tensor> labels;
std::vector<paddle::Tensor> bboxes;
for (int task_id = 0; task_id < num_tasks; ++task_id) {
CHECK_INPUT_BATCHSIZE(hm[0]);
int feat_h = hm[0].shape()[2];
int feat_w = hm[0].shape()[3];
int num_bboxes = feat_h * feat_w;
// score and label
auto sigmoid_hm_per_task = paddle::experimental::sigmoid(hm[task_id]);
auto label_per_task =
paddle::experimental::argmax(sigmoid_hm_per_task, 1, true, false, 3);
auto score_per_task =
paddle::experimental::max(sigmoid_hm_per_task, {1}, true);
// dim
auto exp_dim_per_task = paddle::experimental::exp(dim[task_id]);
// decode bboxed and get mask of bboxes for nms
const float *score_ptr = score_per_task.data<float>();
const float *reg_ptr = reg[task_id].data<float>();
const float *height_ptr = height[task_id].data<float>();
// const float* dim_ptr = dim[task_id].data<float>();
const float *exp_dim_per_task_ptr = exp_dim_per_task.data<float>();
const float *vel_ptr = vel[task_id].data<float>();
const float *rot_ptr = rot[task_id].data<float>();
auto decode_bboxes =
paddle::empty({num_bboxes, decode_bboxes_dims},
paddle::DataType::FLOAT32, paddle::GPUPlace());
float *decode_bboxes_ptr = decode_bboxes.data<float>();
auto thresh_mask = paddle::full({num_bboxes}, 0, paddle::DataType::BOOL,
paddle::GPUPlace());
bool *thresh_mask_ptr = thresh_mask.data<bool>();
auto score_idx = paddle::empty({num_bboxes}, paddle::DataType::INT32,
paddle::GPUPlace());
int *score_idx_ptr = score_idx.data<int32_t>();
DecodeLauncher(score_per_task.stream(), score_ptr, reg_ptr, height_ptr,
exp_dim_per_task_ptr, vel_ptr, rot_ptr, score_threshold,
feat_w, down_ratio, voxel_size_x, voxel_size_y,
point_cloud_range_x_min, point_cloud_range_y_min,
post_center_range_x_min, post_center_range_y_min,
post_center_range_z_min, post_center_range_x_max,
post_center_range_y_max, post_center_range_z_max, num_bboxes,
with_velocity, decode_bboxes_dims, decode_bboxes_ptr,
thresh_mask_ptr, score_idx_ptr);
// select score by mask
auto selected_score_idx =
paddle::experimental::masked_select(score_idx, thresh_mask);
auto flattened_selected_score =
paddle::experimental::reshape(score_per_task, {num_bboxes});
auto selected_score = paddle::experimental::masked_select(
flattened_selected_score, thresh_mask);
int num_selected = selected_score.numel();
if (num_selected == 0 || num_selected < 0) {
auto fake_out_boxes =
paddle::full({1, decode_bboxes_dims}, 0., paddle::DataType::FLOAT32,
paddle::GPUPlace());
auto fake_out_score =
paddle::full({1}, -1., paddle::DataType::FLOAT32, paddle::GPUPlace());
auto fake_out_label =
paddle::full({1}, 0, paddle::DataType::INT64, paddle::GPUPlace());
scores.push_back(fake_out_score);
labels.push_back(fake_out_label);
bboxes.push_back(fake_out_boxes);
continue;
}
// sort score by descending
auto sort_out = paddle::experimental::argsort(selected_score, 0, true);
auto sorted_index = std::get<1>(sort_out);
int num_bboxes_for_nms =
num_selected > nms_pre_max_size ? nms_pre_max_size : num_selected;
// nms
// in NmsLauncher, rot = - theta - pi / 2
const int col_blocks = DIVUP(num_bboxes_for_nms, THREADS_PER_BLOCK_NMS);
auto nms_mask = paddle::empty({num_bboxes_for_nms * col_blocks},
paddle::DataType::INT64, paddle::GPUPlace());
int64_t *nms_mask_data = nms_mask.data<int64_t>();
NmsLauncher(score_per_task.stream(), decode_bboxes.data<float>(),
selected_score_idx.data<int>(), sorted_index.data<int64_t>(),
num_selected, num_bboxes_for_nms, nms_iou_threshold,
decode_bboxes_dims, nms_mask_data);
const paddle::Tensor nms_mask_cpu_tensor =
nms_mask.copy_to(paddle::CPUPlace(), true);
const int64_t *nms_mask_cpu = nms_mask_cpu_tensor.data<int64_t>();
auto remv_cpu = paddle::full({col_blocks}, 0, paddle::DataType::INT64,
paddle::CPUPlace());
int64_t *remv_cpu_data = remv_cpu.data<int64_t>();
int num_to_keep = 0;
auto keep = paddle::empty({num_bboxes_for_nms}, paddle::DataType::INT32,
paddle::CPUPlace());
int *keep_data = keep.data<int>();
for (int i = 0; i < num_bboxes_for_nms; i++) {
int nblock = i / THREADS_PER_BLOCK_NMS;
int inblock = i % THREADS_PER_BLOCK_NMS;
if (!(remv_cpu_data[nblock] & (1ULL << inblock))) {
keep_data[num_to_keep++] = i;
const int64_t *p = &nms_mask_cpu[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv_cpu_data[j] |= p[j];
}
}
}
int num_for_gather =
num_to_keep > nms_post_max_size ? nms_post_max_size : num_to_keep;
auto keep_gpu = paddle::empty({num_for_gather}, paddle::DataType::INT32,
paddle::GPUPlace());
int *keep_gpu_ptr = keep_gpu.data<int>();
cudaMemcpy(keep_gpu_ptr, keep_data, num_for_gather * sizeof(int),
cudaMemcpyHostToDevice);
auto gather_sorted_index =
paddle::experimental::gather(sorted_index, keep_gpu, 0);
auto gather_index = paddle::experimental::gather(selected_score_idx,
gather_sorted_index, 0);
auto gather_score =
paddle::experimental::gather(selected_score, gather_sorted_index, 0);
auto flattened_label =
paddle::experimental::reshape(label_per_task, {num_bboxes});
auto gather_label =
paddle::experimental::gather(flattened_label, gather_index, 0);
auto gather_bbox =
paddle::experimental::gather(decode_bboxes, gather_index, 0);
auto start_label = paddle::full(
{1}, num_classes[task_id], paddle::DataType::INT64, paddle::GPUPlace());
auto added_label = paddle::experimental::add(gather_label, start_label);
scores.push_back(gather_score);
labels.push_back(added_label);
bboxes.push_back(gather_bbox);
}
auto out_scores = paddle::experimental::concat(scores, 0);
auto out_labels = paddle::experimental::concat(labels, 0);
auto out_bboxes = paddle::experimental::concat(bboxes, 0);
return {out_bboxes, out_scores, out_labels};
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/centerpoint_postprocess/postprocess.cc
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <paddle/extension.h>
std::vector<paddle::Tensor> postprocess_gpu(
const std::vector<paddle::Tensor> &hm,
const std::vector<paddle::Tensor> ®,
const std::vector<paddle::Tensor> &height,
const std::vector<paddle::Tensor> &dim,
const std::vector<paddle::Tensor> &vel,
const std::vector<paddle::Tensor> &rot,
const std::vector<float> &voxel_size,
const std::vector<float> &point_cloud_range,
const std::vector<float> &post_center_range,
const std::vector<int> &num_classes, const int down_ratio,
const float score_threshold, const float nms_iou_threshold,
const int nms_pre_max_size, const int nms_post_max_size,
const bool with_velocity);
std::vector<paddle::Tensor> centerpoint_postprocess(
const std::vector<paddle::Tensor> &hm,
const std::vector<paddle::Tensor> ®,
const std::vector<paddle::Tensor> &height,
const std::vector<paddle::Tensor> &dim,
const std::vector<paddle::Tensor> &vel,
const std::vector<paddle::Tensor> &rot,
const std::vector<float> &voxel_size,
const std::vector<float> &point_cloud_range,
const std::vector<float> &post_center_range,
const std::vector<int> &num_classes, const int down_ratio,
const float score_threshold, const float nms_iou_threshold,
const int nms_pre_max_size, const int nms_post_max_size,
const bool with_velocity) {
if (hm[0].is_gpu()) {
return postprocess_gpu(hm, reg, height, dim, vel, rot, voxel_size,
point_cloud_range, post_center_range, num_classes,
down_ratio, score_threshold, nms_iou_threshold,
nms_pre_max_size, nms_post_max_size, with_velocity);
} else {
PD_THROW(
"Unsupported device type for centerpoint postprocess "
"operator.");
}
}
std::vector<std::vector<int64_t>> PostProcessInferShape(
const std::vector<std::vector<int64_t>> &hm_shape,
const std::vector<std::vector<int64_t>> ®_shape,
const std::vector<std::vector<int64_t>> &height_shape,
const std::vector<std::vector<int64_t>> &dim_shape,
const std::vector<std::vector<int64_t>> &vel_shape,
const std::vector<std::vector<int64_t>> &rot_shape,
const std::vector<float> &voxel_size,
const std::vector<float> &point_cloud_range,
const std::vector<float> &post_center_range,
const std::vector<int> &num_classes, const int down_ratio,
const float score_threshold, const float nms_iou_threshold,
const int nms_pre_max_size, const int nms_post_max_size,
const bool with_velocity) {
if (with_velocity) {
return {{-1, 9}, {-1}, {-1}};
} else {
return {{-1, 7}, {-1}, {-1}};
}
}
std::vector<paddle::DataType> PostProcessInferDtype(
const std::vector<paddle::DataType> &hm_dtype,
const std::vector<paddle::DataType> ®_dtype,
const std::vector<paddle::DataType> &height_dtype,
const std::vector<paddle::DataType> &dim_dtype,
const std::vector<paddle::DataType> &vel_dtype,
const std::vector<paddle::DataType> &rot_dtype) {
return {reg_dtype[0], hm_dtype[0], paddle::DataType::INT64};
}
PD_BUILD_OP(centerpoint_postprocess)
.Inputs({paddle::Vec("HM"), paddle::Vec("REG"), paddle::Vec("HEIGHT"),
paddle::Vec("DIM"), paddle::Vec("VEL"), paddle::Vec("ROT")})
.Outputs({"BBOXES", "SCORES", "LABELS"})
.SetKernelFn(PD_KERNEL(centerpoint_postprocess))
.Attrs({"voxel_size: std::vector<float>",
"point_cloud_range: std::vector<float>",
"post_center_range: std::vector<float>",
"num_classes: std::vector<int>", "down_ratio: int",
"score_threshold: float", "nms_iou_threshold: float",
"nms_pre_max_size: int", "nms_post_max_size: int",
"with_velocity: bool"})
.SetInferShapeFn(PD_INFER_SHAPE(PostProcessInferShape))
.SetInferDtypeFn(PD_INFER_DTYPE(PostProcessInferDtype));
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/iou3d_nms/iou3d_cpu.cpp
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
3D Rotated IoU Calculation (CPU)
Written by Shaoshuai Shi
All Rights Reserved 2020.
*/
#include "iou3d_cpu.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <math.h>
#include <paddle/extension.h>
#include <stdio.h>
#include <vector>
inline float min(float a, float b) { return a > b ? b : a; }
inline float max(float a, float b) { return a > b ? a : b; }
const float EPS = 1e-8;
struct Point {
float x, y;
__device__ Point() {}
__device__ Point(double _x, double _y) { x = _x, y = _y; }
__device__ void set(float _x, float _y) {
x = _x;
y = _y;
}
__device__ Point operator+(const Point &b) const {
return Point(x + b.x, y + b.y);
}
__device__ Point operator-(const Point &b) const {
return Point(x - b.x, y - b.y);
}
};
inline float cross(const Point &a, const Point &b) {
return a.x * b.y - a.y * b.x;
}
inline float cross(const Point &p1, const Point &p2, const Point &p0) {
return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y);
}
inline int check_rect_cross(const Point &p1, const Point &p2, const Point &q1,
const Point &q2) {
int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) &&
min(q1.x, q2.x) <= max(p1.x, p2.x) &&
min(p1.y, p2.y) <= max(q1.y, q2.y) &&
min(q1.y, q2.y) <= max(p1.y, p2.y);
return ret;
}
inline int check_in_box2d(const float *box, const Point &p) {
// params: (7) [x, y, z, dx, dy, dz, heading]
const float MARGIN = 1e-2;
float center_x = box[0], center_y = box[1];
float angle_cos = cos(-box[6]),
angle_sin =
sin(-box[6]); // rotate the point in the opposite direction of box
float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin);
float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos;
return (fabs(rot_x) < box[3] / 2 + MARGIN &&
fabs(rot_y) < box[4] / 2 + MARGIN);
}
inline int intersection(const Point &p1, const Point &p0, const Point &q1,
const Point &q0, Point &ans) {
// fast exclusion
if (check_rect_cross(p0, p1, q0, q1) == 0) return 0;
// check cross standing
float s1 = cross(q0, p1, p0);
float s2 = cross(p1, q1, p0);
float s3 = cross(p0, q1, q0);
float s4 = cross(q1, p1, q0);
if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0;
// calculate intersection of two lines
float s5 = cross(q1, p1, p0);
if (fabs(s5 - s1) > EPS) {
ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1);
ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1);
} else {
float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y;
float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y;
float D = a0 * b1 - a1 * b0;
ans.x = (b0 * c1 - b1 * c0) / D;
ans.y = (a1 * c0 - a0 * c1) / D;
}
return 1;
}
inline void rotate_around_center(const Point ¢er, const float angle_cos,
const float angle_sin, Point &p) {
float new_x =
(p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x;
float new_y =
(p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y;
p.set(new_x, new_y);
}
inline int point_cmp(const Point &a, const Point &b, const Point ¢er) {
return atan2(a.y - center.y, a.x - center.x) >
atan2(b.y - center.y, b.x - center.x);
}
inline float box_overlap(const float *box_a, const float *box_b) {
// params: box_a (7) [x, y, z, dx, dy, dz, heading]
// params: box_b (7) [x, y, z, dx, dy, dz, heading]
// float a_x1 = box_a[0], a_y1 = box_a[1], a_x2 = box_a[2], a_y2 =
// box_a[3], a_angle = box_a[4];
// float b_x1 = box_b[0], b_y1 = box_b[1], b_x2 = box_b[2], b_y2 =
// box_b[3], b_angle = box_b[4];
float a_angle = box_a[6], b_angle = box_b[6];
float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2,
a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2;
float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half;
float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half;
float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half;
float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half;
Point center_a(box_a[0], box_a[1]);
Point center_b(box_b[0], box_b[1]);
Point box_a_corners[5];
box_a_corners[0].set(a_x1, a_y1);
box_a_corners[1].set(a_x2, a_y1);
box_a_corners[2].set(a_x2, a_y2);
box_a_corners[3].set(a_x1, a_y2);
Point box_b_corners[5];
box_b_corners[0].set(b_x1, b_y1);
box_b_corners[1].set(b_x2, b_y1);
box_b_corners[2].set(b_x2, b_y2);
box_b_corners[3].set(b_x1, b_y2);
// get oriented corners
float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle);
float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle);
for (int k = 0; k < 4; k++) {
rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]);
rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]);
}
box_a_corners[4] = box_a_corners[0];
box_b_corners[4] = box_b_corners[0];
// get intersection of lines
Point cross_points[16];
Point poly_center;
int cnt = 0, flag = 0;
poly_center.set(0, 0);
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
flag = intersection(box_a_corners[i + 1], box_a_corners[i],
box_b_corners[j + 1], box_b_corners[j],
cross_points[cnt]);
if (flag) {
poly_center = poly_center + cross_points[cnt];
cnt++;
}
}
}
// check corners
for (int k = 0; k < 4; k++) {
if (check_in_box2d(box_a, box_b_corners[k])) {
poly_center = poly_center + box_b_corners[k];
cross_points[cnt] = box_b_corners[k];
cnt++;
}
if (check_in_box2d(box_b, box_a_corners[k])) {
poly_center = poly_center + box_a_corners[k];
cross_points[cnt] = box_a_corners[k];
cnt++;
}
}
poly_center.x /= cnt;
poly_center.y /= cnt;
// sort the points of polygon
Point temp;
for (int j = 0; j < cnt - 1; j++) {
for (int i = 0; i < cnt - j - 1; i++) {
if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) {
temp = cross_points[i];
cross_points[i] = cross_points[i + 1];
cross_points[i + 1] = temp;
}
}
}
// get the overlap areas
float area = 0;
for (int k = 0; k < cnt - 1; k++) {
area += cross(cross_points[k] - cross_points[0],
cross_points[k + 1] - cross_points[0]);
}
return fabs(area) / 2.0;
}
inline float iou_bev(const float *box_a, const float *box_b) {
// params: box_a (7) [x, y, z, dx, dy, dz, heading]
// params: box_b (7) [x, y, z, dx, dy, dz, heading]
float sa = box_a[3] * box_a[4];
float sb = box_b[3] * box_b[4];
float s_overlap = box_overlap(box_a, box_b);
return s_overlap / fmaxf(sa + sb - s_overlap, EPS);
}
std::vector<paddle::Tensor> boxes_iou_bev_cpu(
const paddle::Tensor &boxes_a_tensor,
const paddle::Tensor &boxes_b_tensor) {
// params boxes_a_tensor: (N, 7) [x, y, z, dx, dy, dz, heading]
// params boxes_b_tensor: (M, 7) [x, y, z, dx, dy, dz, heading]
// params ans_iou_tensor: (N, M)
int num_boxes_a = boxes_a_tensor.shape()[0];
int num_boxes_b = boxes_b_tensor.shape()[0];
const float *boxes_a = boxes_a_tensor.data<float>();
const float *boxes_b = boxes_b_tensor.data<float>();
auto ans_iou_tensor =
paddle::empty({num_boxes_a, num_boxes_b}, paddle::DataType::FLOAT32,
paddle::CPUPlace());
float *ans_iou = ans_iou_tensor.data<float>();
for (int i = 0; i < num_boxes_a; i++) {
for (int j = 0; j < num_boxes_b; j++) {
ans_iou[i * num_boxes_b + j] = iou_bev(boxes_a + i * 7, boxes_b + j * 7);
}
}
return {ans_iou_tensor};
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/iou3d_nms/iou3d_nms.cpp
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others)
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
*/
#include "iou3d_nms.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <paddle/extension.h>
#include <vector>
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
const int THREADS_PER_BLOCK_NMS = sizeof(int64_t) * 8;
void BoxesOverlapLauncher(const cudaStream_t &stream, const int num_a,
const float *boxes_a, const int num_b,
const float *boxes_b, float *ans_overlap);
void BoxesIouBevLauncher(const cudaStream_t &stream, const int num_a,
const float *boxes_a, const int num_b,
const float *boxes_b, float *ans_iou);
void NmsLauncher(const cudaStream_t &stream, const float *boxes, int64_t *mask,
int boxes_num, float nms_overlap_thresh);
void NmsNormalLauncher(const cudaStream_t &stream, const float *boxes,
int64_t *mask, int boxes_num, float nms_overlap_thresh);
std::vector<paddle::Tensor> boxes_overlap_bev_gpu(
const paddle::Tensor &boxes_a, const paddle::Tensor &boxes_b) {
// params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
// params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
// params ans_overlap: (N, M)
int num_a = boxes_a.shape()[0];
int num_b = boxes_b.shape()[0];
const float *boxes_a_data = boxes_a.data<float>();
const float *boxes_b_data = boxes_b.data<float>();
auto ans_overlap = paddle::empty({num_a, num_b}, paddle::DataType::FLOAT32,
paddle::GPUPlace());
float *ans_overlap_data = ans_overlap.data<float>();
BoxesOverlapLauncher(boxes_a.stream(), num_a, boxes_a_data, num_b,
boxes_b_data, ans_overlap_data);
return {ans_overlap};
}
std::vector<paddle::Tensor> boxes_iou_bev_gpu(
const paddle::Tensor &boxes_a_tensor,
const paddle::Tensor &boxes_b_tensor) {
// params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
// params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
// params ans_overlap: (N, M)
int num_a = boxes_a_tensor.shape()[0];
int num_b = boxes_b_tensor.shape()[0];
const float *boxes_a_data = boxes_a_tensor.data<float>();
const float *boxes_b_data = boxes_b_tensor.data<float>();
auto ans_iou_tensor = paddle::empty({num_a, num_b}, paddle::DataType::FLOAT32,
paddle::GPUPlace());
float *ans_iou_data = ans_iou_tensor.data<float>();
BoxesIouBevLauncher(boxes_a_tensor.stream(), num_a, boxes_a_data, num_b,
boxes_b_data, ans_iou_data);
return {ans_iou_tensor};
}
std::vector<paddle::Tensor> nms_gpu(const paddle::Tensor &boxes,
float nms_overlap_thresh) {
// params boxes: (N, 7) [x, y, z, dx, dy, dz, heading]
auto keep = paddle::empty({boxes.shape()[0]}, paddle::DataType::INT32,
paddle::CPUPlace());
auto num_to_keep_tensor =
paddle::empty({1}, paddle::DataType::INT32, paddle::CPUPlace());
int *num_to_keep_data = num_to_keep_tensor.data<int>();
int boxes_num = boxes.shape()[0];
const float *boxes_data = boxes.data<float>();
int *keep_data = keep.data<int>();
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
// int64_t *mask_data = NULL;
// CHECK_ERROR(cudaMalloc((void**)&mask_data, boxes_num * col_blocks *
// sizeof(int64_t)));
auto mask = paddle::empty({boxes_num * col_blocks}, paddle::DataType::INT64,
paddle::GPUPlace());
int64_t *mask_data = mask.data<int64_t>();
NmsLauncher(boxes.stream(), boxes_data, mask_data, boxes_num,
nms_overlap_thresh);
// std::vector<int64_t> mask_cpu(boxes_num * col_blocks);
// CHECK_ERROR(cudaMemcpy(&mask_cpu[0], mask_data, boxes_num * col_blocks *
// sizeof(int64_t),
// cudaMemcpyDeviceToHost));
const paddle::Tensor mask_cpu_tensor = mask.copy_to(paddle::CPUPlace(), true);
const int64_t *mask_cpu = mask_cpu_tensor.data<int64_t>();
// cudaFree(mask_data);
int64_t remv_cpu[col_blocks];
memset(remv_cpu, 0, col_blocks * sizeof(int64_t));
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / THREADS_PER_BLOCK_NMS;
int inblock = i % THREADS_PER_BLOCK_NMS;
if (!(remv_cpu[nblock] & (1ULL << inblock))) {
keep_data[num_to_keep++] = i;
const int64_t *p = &mask_cpu[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv_cpu[j] |= p[j];
}
}
}
num_to_keep_data[0] = num_to_keep;
if (cudaSuccess != cudaGetLastError()) printf("Error!\n");
return {keep, num_to_keep_tensor};
}
std::vector<paddle::Tensor> nms_normal_gpu(const paddle::Tensor &boxes,
float nms_overlap_thresh) {
// params boxes: (N, 7) [x, y, z, dx, dy, dz, heading]
// params keep: (N)
auto keep = paddle::empty({boxes.shape()[0]}, paddle::DataType::INT32,
paddle::CPUPlace());
auto num_to_keep_tensor =
paddle::empty({1}, paddle::DataType::INT32, paddle::CPUPlace());
int *num_to_keep_data = num_to_keep_tensor.data<int>();
int boxes_num = boxes.shape()[0];
const float *boxes_data = boxes.data<float>();
int *keep_data = keep.data<int>();
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
// int64_t *mask_data = NULL;
// CHECK_ERROR(cudaMalloc((void**)&mask_data, boxes_num * col_blocks *
// sizeof(int64_t)));
auto mask = paddle::empty({boxes_num * col_blocks}, paddle::DataType::INT64,
paddle::GPUPlace());
int64_t *mask_data = mask.data<int64_t>();
NmsNormalLauncher(boxes.stream(), boxes_data, mask_data, boxes_num,
nms_overlap_thresh);
// int64_t mask_cpu[boxes_num * col_blocks];
// int64_t *mask_cpu = new int64_t [boxes_num * col_blocks];
// std::vector<int64_t> mask_cpu(boxes_num * col_blocks);
// CHECK_ERROR(cudaMemcpy(&mask_cpu[0], mask_data, boxes_num * col_blocks *
// sizeof(int64_t),
// cudaMemcpyDeviceToHost));
// cudaFree(mask_data);
const paddle::Tensor mask_cpu_tensor = mask.copy_to(paddle::CPUPlace(), true);
const int64_t *mask_cpu = mask_cpu_tensor.data<int64_t>();
int64_t remv_cpu[col_blocks];
memset(remv_cpu, 0, col_blocks * sizeof(int64_t));
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / THREADS_PER_BLOCK_NMS;
int inblock = i % THREADS_PER_BLOCK_NMS;
if (!(remv_cpu[nblock] & (1ULL << inblock))) {
keep_data[num_to_keep++] = i;
const int64_t *p = &mask_cpu[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv_cpu[j] |= p[j];
}
}
}
num_to_keep_data[0] = num_to_keep;
if (cudaSuccess != cudaGetLastError()) {
printf("Error!\n");
}
return {keep, num_to_keep_tensor};
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/iou3d_nms/iou3d_nms_kernel.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others)
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
*/
#include <stdio.h>
#define THREADS_PER_BLOCK 16
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
// #define DEBUG
const int THREADS_PER_BLOCK_NMS = sizeof(int64_t) * 8;
const float EPS = 1e-8;
struct Point {
float x, y;
__device__ Point() {}
__device__ Point(double _x, double _y) { x = _x, y = _y; }
__device__ void set(float _x, float _y) {
x = _x;
y = _y;
}
__device__ Point operator+(const Point &b) const {
return Point(x + b.x, y + b.y);
}
__device__ Point operator-(const Point &b) const {
return Point(x - b.x, y - b.y);
}
};
__device__ inline float cross(const Point &a, const Point &b) {
return a.x * b.y - a.y * b.x;
}
__device__ inline float cross(const Point &p1, const Point &p2,
const Point &p0) {
return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y);
}
__device__ int check_rect_cross(const Point &p1, const Point &p2,
const Point &q1, const Point &q2) {
int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) &&
min(q1.x, q2.x) <= max(p1.x, p2.x) &&
min(p1.y, p2.y) <= max(q1.y, q2.y) &&
min(q1.y, q2.y) <= max(p1.y, p2.y);
return ret;
}
__device__ inline int check_in_box2d(const float *box, const Point &p) {
// params: (7) [x, y, z, dx, dy, dz, heading]
const float MARGIN = 1e-2;
float center_x = box[0], center_y = box[1];
float angle_cos = cos(-box[6]),
angle_sin =
sin(-box[6]); // rotate the point in the opposite direction of box
float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin);
float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos;
return (fabs(rot_x) < box[3] / 2 + MARGIN &&
fabs(rot_y) < box[4] / 2 + MARGIN);
}
__device__ inline int intersection(const Point &p1, const Point &p0,
const Point &q1, const Point &q0,
Point &ans) {
// fast exclusion
if (check_rect_cross(p0, p1, q0, q1) == 0) return 0;
// check cross standing
float s1 = cross(q0, p1, p0);
float s2 = cross(p1, q1, p0);
float s3 = cross(p0, q1, q0);
float s4 = cross(q1, p1, q0);
if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0;
// calculate intersection of two lines
float s5 = cross(q1, p1, p0);
if (fabs(s5 - s1) > EPS) {
ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1);
ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1);
} else {
float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y;
float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y;
float D = a0 * b1 - a1 * b0;
ans.x = (b0 * c1 - b1 * c0) / D;
ans.y = (a1 * c0 - a0 * c1) / D;
}
return 1;
}
__device__ inline void rotate_around_center(const Point ¢er,
const float angle_cos,
const float angle_sin, Point &p) {
float new_x =
(p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x;
float new_y =
(p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y;
p.set(new_x, new_y);
}
__device__ inline int point_cmp(const Point &a, const Point &b,
const Point ¢er) {
return atan2(a.y - center.y, a.x - center.x) >
atan2(b.y - center.y, b.x - center.x);
}
__device__ inline float box_overlap(const float *box_a, const float *box_b) {
// params box_a: [x, y, z, dx, dy, dz, heading]
// params box_b: [x, y, z, dx, dy, dz, heading]
float a_angle = box_a[6], b_angle = box_b[6];
float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2,
a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2;
float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half;
float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half;
float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half;
float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half;
Point center_a(box_a[0], box_a[1]);
Point center_b(box_b[0], box_b[1]);
#ifdef DEBUG
printf(
"a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n",
a_x1, a_y1, a_x2, a_y2, a_angle, b_x1, b_y1, b_x2, b_y2, b_angle);
printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y,
center_b.x, center_b.y);
#endif
Point box_a_corners[5];
box_a_corners[0].set(a_x1, a_y1);
box_a_corners[1].set(a_x2, a_y1);
box_a_corners[2].set(a_x2, a_y2);
box_a_corners[3].set(a_x1, a_y2);
Point box_b_corners[5];
box_b_corners[0].set(b_x1, b_y1);
box_b_corners[1].set(b_x2, b_y1);
box_b_corners[2].set(b_x2, b_y2);
box_b_corners[3].set(b_x1, b_y2);
// get oriented corners
float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle);
float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle);
for (int k = 0; k < 4; k++) {
#ifdef DEBUG
printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k,
box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x,
box_b_corners[k].y);
#endif
rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]);
rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]);
#ifdef DEBUG
printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x,
box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y);
#endif
}
box_a_corners[4] = box_a_corners[0];
box_b_corners[4] = box_b_corners[0];
// get intersection of lines
Point cross_points[16];
Point poly_center;
int cnt = 0, flag = 0;
poly_center.set(0, 0);
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
flag = intersection(box_a_corners[i + 1], box_a_corners[i],
box_b_corners[j + 1], box_b_corners[j],
cross_points[cnt]);
if (flag) {
poly_center = poly_center + cross_points[cnt];
cnt++;
#ifdef DEBUG
printf(
"Cross points (%.3f, %.3f): a(%.3f, %.3f)->(%.3f, %.3f), b(%.3f, "
"%.3f)->(%.3f, %.3f) \n",
cross_points[cnt - 1].x, cross_points[cnt - 1].y,
box_a_corners[i].x, box_a_corners[i].y, box_a_corners[i + 1].x,
box_a_corners[i + 1].y, box_b_corners[i].x, box_b_corners[i].y,
box_b_corners[i + 1].x, box_b_corners[i + 1].y);
#endif
}
}
}
// check corners
for (int k = 0; k < 4; k++) {
if (check_in_box2d(box_a, box_b_corners[k])) {
poly_center = poly_center + box_b_corners[k];
cross_points[cnt] = box_b_corners[k];
cnt++;
#ifdef DEBUG
printf("b corners in a: corner_b(%.3f, %.3f)", cross_points[cnt - 1].x,
cross_points[cnt - 1].y);
#endif
}
if (check_in_box2d(box_b, box_a_corners[k])) {
poly_center = poly_center + box_a_corners[k];
cross_points[cnt] = box_a_corners[k];
cnt++;
#ifdef DEBUG
printf("a corners in b: corner_a(%.3f, %.3f)", cross_points[cnt - 1].x,
cross_points[cnt - 1].y);
#endif
}
}
poly_center.x /= cnt;
poly_center.y /= cnt;
// sort the points of polygon
Point temp;
for (int j = 0; j < cnt - 1; j++) {
for (int i = 0; i < cnt - j - 1; i++) {
if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) {
temp = cross_points[i];
cross_points[i] = cross_points[i + 1];
cross_points[i + 1] = temp;
}
}
}
#ifdef DEBUG
printf("cnt=%d\n", cnt);
for (int i = 0; i < cnt; i++) {
printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x,
cross_points[i].y);
}
#endif
// get the overlap areas
float area = 0;
for (int k = 0; k < cnt - 1; k++) {
area += cross(cross_points[k] - cross_points[0],
cross_points[k + 1] - cross_points[0]);
}
return fabs(area) / 2.0;
}
__device__ inline float iou_bev(const float *box_a, const float *box_b) {
// params box_a: [x, y, z, dx, dy, dz, heading]
// params box_b: [x, y, z, dx, dy, dz, heading]
float sa = box_a[3] * box_a[4];
float sb = box_b[3] * box_b[4];
float s_overlap = box_overlap(box_a, box_b);
return s_overlap / fmaxf(sa + sb - s_overlap, EPS);
}
__global__ void boxes_overlap_kernel(const int num_a, const float *boxes_a,
const int num_b, const float *boxes_b,
float *ans_overlap) {
// params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
// params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b) {
return;
}
const float *cur_box_a = boxes_a + a_idx * 7;
const float *cur_box_b = boxes_b + b_idx * 7;
float s_overlap = box_overlap(cur_box_a, cur_box_b);
ans_overlap[a_idx * num_b + b_idx] = s_overlap;
}
__global__ void boxes_iou_bev_kernel(const int num_a, const float *boxes_a,
const int num_b, const float *boxes_b,
float *ans_iou) {
// params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
// params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b) {
return;
}
const float *cur_box_a = boxes_a + a_idx * 7;
const float *cur_box_b = boxes_b + b_idx * 7;
float cur_iou_bev = iou_bev(cur_box_a, cur_box_b);
ans_iou[a_idx * num_b + b_idx] = cur_iou_bev;
}
__global__ void nms_kernel(const int boxes_num, const float nms_overlap_thresh,
const float *boxes, int64_t *mask) {
// params: boxes (N, 7) [x, y, z, dx, dy, dz, heading]
// params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 7 + 0] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0];
block_boxes[threadIdx.x * 7 + 1] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1];
block_boxes[threadIdx.x * 7 + 2] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2];
block_boxes[threadIdx.x * 7 + 3] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3];
block_boxes[threadIdx.x * 7 + 4] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4];
block_boxes[threadIdx.x * 7 + 5] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5];
block_boxes[threadIdx.x * 7 + 6] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const float *cur_box = boxes + cur_box_idx * 7;
int i = 0;
int64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (iou_bev(cur_box, block_boxes + i * 7) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
__device__ inline float iou_normal(float const *const a, float const *const b) {
// params: a: [x, y, z, dx, dy, dz, heading]
// params: b: [x, y, z, dx, dy, dz, heading]
float left = fmaxf(a[0] - a[3] / 2, b[0] - b[3] / 2),
right = fminf(a[0] + a[3] / 2, b[0] + b[3] / 2);
float top = fmaxf(a[1] - a[4] / 2, b[1] - b[4] / 2),
bottom = fminf(a[1] + a[4] / 2, b[1] + b[4] / 2);
float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f);
float interS = width * height;
float Sa = a[3] * a[4];
float Sb = b[3] * b[4];
return interS / fmaxf(Sa + Sb - interS, EPS);
}
__global__ void nms_normal_kernel(const int boxes_num,
const float nms_overlap_thresh,
const float *boxes, int64_t *mask) {
// params: boxes (N, 7) [x, y, z, dx, dy, dz, heading]
// params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 7 + 0] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0];
block_boxes[threadIdx.x * 7 + 1] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1];
block_boxes[threadIdx.x * 7 + 2] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2];
block_boxes[threadIdx.x * 7 + 3] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3];
block_boxes[threadIdx.x * 7 + 4] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4];
block_boxes[threadIdx.x * 7 + 5] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5];
block_boxes[threadIdx.x * 7 + 6] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const float *cur_box = boxes + cur_box_idx * 7;
int i = 0;
int64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (iou_normal(cur_box, block_boxes + i * 7) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void BoxesOverlapLauncher(const cudaStream_t &stream, const int num_a,
const float *boxes_a, const int num_b,
const float *boxes_b, float *ans_overlap) {
dim3 blocks(
DIVUP(num_b, THREADS_PER_BLOCK),
DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
boxes_overlap_kernel<<<blocks, threads, 0, stream>>>(num_a, boxes_a, num_b,
boxes_b, ans_overlap);
#ifdef DEBUG
cudaDeviceSynchronize(); // for using printf in kernel function
#endif
}
void BoxesIouBevLauncher(const cudaStream_t &stream, const int num_a,
const float *boxes_a, const int num_b,
const float *boxes_b, float *ans_iou) {
dim3 blocks(
DIVUP(num_b, THREADS_PER_BLOCK),
DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
boxes_iou_bev_kernel<<<blocks, threads, 0, stream>>>(num_a, boxes_a, num_b,
boxes_b, ans_iou);
#ifdef DEBUG
cudaDeviceSynchronize(); // for using printf in kernel function
#endif
}
void NmsLauncher(const cudaStream_t &stream, const float *boxes, int64_t *mask,
int boxes_num, float nms_overlap_thresh) {
dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
nms_kernel<<<blocks, threads, 0, stream>>>(boxes_num, nms_overlap_thresh,
boxes, mask);
}
void NmsNormalLauncher(const cudaStream_t &stream, const float *boxes,
int64_t *mask, int boxes_num, float nms_overlap_thresh) {
dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
nms_normal_kernel<<<blocks, threads, 0, stream>>>(
boxes_num, nms_overlap_thresh, boxes, mask);
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/iou3d_nms/iou3d_nms.h
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef IOU3D_NMS_H
#define IOU3D_NMS_H
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <paddle/extension.h>
#include <vector>
std::vector<paddle::Tensor> boxes_overlap_bev_gpu(
const paddle::Tensor &boxes_a, const paddle::Tensor &boxes_b);
std::vector<paddle::Tensor> boxes_iou_bev_gpu(
const paddle::Tensor &boxes_a_tensor, const paddle::Tensor &boxes_b_tensor);
std::vector<paddle::Tensor> nms_gpu(const paddle::Tensor &boxes,
float nms_overlap_thresh);
std::vector<paddle::Tensor> nms_normal_gpu(const paddle::Tensor &boxes,
float nms_overlap_thresh);
#endif
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/iou3d_nms/iou3d_cpu.h
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef IOU3D_CPU_H
#define IOU3D_CPU_H
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <paddle/extension.h>
#include <vector>
std::vector<paddle::Tensor> boxes_iou_bev_cpu(
const paddle::Tensor& boxes_a_tensor, const paddle::Tensor& boxes_b_tensor);
#endif
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/iou3d_nms/iou3d_nms_api.cpp
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <paddle/extension.h>
#include <vector>
#include "iou3d_cpu.h"
#include "iou3d_nms.h"
std::vector<paddle::DataType> BoxesIouBevCpuInferDtype(
paddle::DataType boxes_a_dtype, paddle::DataType boxes_b_dtype) {
return {boxes_a_dtype};
}
std::vector<std::vector<int64_t>> BoxesIouBevCpuInferShape(
std::vector<int64_t> boxes_a_shape, std::vector<int64_t> boxes_b_shape) {
return {{boxes_a_shape[0], boxes_b_shape[0]}};
}
std::vector<paddle::DataType> NmsInferDtype(paddle::DataType boxes_dtype) {
return {paddle::DataType::INT64, paddle::DataType::INT64};
}
std::vector<std::vector<int64_t>> NmsInferShape(
std::vector<int64_t> boxes_shape) {
return {{boxes_shape[0]}, {1}};
}
std::vector<paddle::DataType> NmsNormalInferDtype(
paddle::DataType boxes_dtype) {
return {paddle::DataType::INT64, paddle::DataType::INT64};
}
std::vector<std::vector<int64_t>> NmsNormalInferShape(
std::vector<int64_t> boxes_shape) {
return {{boxes_shape[0]}, {1}};
}
std::vector<paddle::DataType> BoxesIouBevGpuInferDtype(
paddle::DataType boxes_a_dtype, paddle::DataType boxes_b_dtype) {
return {boxes_a_dtype};
}
std::vector<std::vector<int64_t>> BoxesIouBevGpuInferShape(
std::vector<int64_t> boxes_a_shape, std::vector<int64_t> boxes_b_shape) {
return {{boxes_a_shape[0], boxes_b_shape[0]}};
}
std::vector<paddle::DataType> BoxesOverlapBevGpuInferDtype(
paddle::DataType boxes_a_dtype, paddle::DataType boxes_b_dtype) {
return {boxes_a_dtype};
}
std::vector<std::vector<int64_t>> BoxesOverlapBevGpuInferShape(
std::vector<int64_t> boxes_a_shape, std::vector<int64_t> boxes_b_shape) {
return {{boxes_a_shape[0], boxes_b_shape[0]}};
}
PD_BUILD_OP(boxes_iou_bev_cpu)
.Inputs({"boxes_a_tensor", " boxes_b_tensor"})
.Outputs({"ans_iou_tensor"})
.SetKernelFn(PD_KERNEL(boxes_iou_bev_cpu))
.SetInferDtypeFn(PD_INFER_DTYPE(BoxesIouBevCpuInferDtype))
.SetInferShapeFn(PD_INFER_SHAPE(BoxesIouBevCpuInferShape));
PD_BUILD_OP(boxes_iou_bev_gpu)
.Inputs({"boxes_a_tensor", " boxes_b_tensor"})
.Outputs({"ans_iou_tensor"})
.SetKernelFn(PD_KERNEL(boxes_iou_bev_gpu))
.SetInferDtypeFn(PD_INFER_DTYPE(BoxesIouBevGpuInferDtype))
.SetInferShapeFn(PD_INFER_SHAPE(BoxesIouBevGpuInferShape));
PD_BUILD_OP(boxes_overlap_bev_gpu)
.Inputs({"boxes_a", " boxes_b"})
.Outputs({"ans_overlap"})
.SetKernelFn(PD_KERNEL(boxes_overlap_bev_gpu))
.SetInferDtypeFn(PD_INFER_DTYPE(BoxesOverlapBevGpuInferDtype))
.SetInferShapeFn(PD_INFER_SHAPE(BoxesOverlapBevGpuInferShape));
PD_BUILD_OP(nms_gpu)
.Inputs({"boxes"})
.Outputs({"keep", "num_to_keep"})
.Attrs({"nms_overlap_thresh: float"})
.SetKernelFn(PD_KERNEL(nms_gpu))
.SetInferDtypeFn(PD_INFER_DTYPE(NmsInferDtype))
.SetInferShapeFn(PD_INFER_SHAPE(NmsInferShape));
PD_BUILD_OP(nms_normal_gpu)
.Inputs({"boxes"})
.Outputs({"keep", "num_to_keep"})
.Attrs({"nms_overlap_thresh: float"})
.SetInferShapeFn(PD_INFER_SHAPE(NmsNormalInferShape))
.SetKernelFn(PD_KERNEL(nms_normal_gpu))
.SetInferDtypeFn(PD_INFER_DTYPE(NmsNormalInferDtype));
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/roiaware_pool3d/box_utils.cc
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include "paddle/extension.h"
#define CHECK_INPUT(x) PD_CHECK(x.is_gpu(), #x " must be a GPU Tensor.")
// cuda kernel declaration
void points_in_boxes_cuda_launcher(const int batch_size, const int boxes_num,
const int pts_num, const float *boxes,
const float *pts, int *box_idx_of_points);
// op forward
std::vector<paddle::Tensor> points_in_boxes_cuda_forward(
const paddle::Tensor &pts_tensor, const paddle::Tensor &boxes_tensor) {
// boxes: (B, N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center
// pts: (B, npoints, 3) [x, y, z] in LiDAR coordinate
// output:
// boxes_idx_of_points: (B, npoints), default -1
CHECK_INPUT(boxes_tensor);
CHECK_INPUT(pts_tensor);
const int batch_size = boxes_tensor.shape()[0];
const int boxes_num = boxes_tensor.shape()[1];
const int pts_num = pts_tensor.shape()[1];
auto box_idx_of_points_tensor = paddle::full(
{batch_size, pts_num}, -1, paddle::DataType::INT32, paddle::GPUPlace());
auto *boxes = boxes_tensor.data<float>();
auto *pts = pts_tensor.data<float>();
auto *box_idx_of_points = box_idx_of_points_tensor.data<int>();
points_in_boxes_cuda_launcher(batch_size, boxes_num, pts_num, boxes, pts,
box_idx_of_points);
return {box_idx_of_points_tensor};
}
// shape infer
std::vector<std::vector<int64_t>> PtInBoxInferShape(
std::vector<int64_t> pts_shape, std::vector<int64_t> boxes_shape) {
return {{boxes_shape[0], pts_shape[1]}};
}
// dtype infer
std::vector<paddle::DataType> PtInBoxInferDtype(paddle::DataType pts_dtype,
paddle::DataType boxes_dtype) {
return {paddle::DataType::INT32};
}
// build op forward
PD_BUILD_OP(points_in_boxes_gpu)
.Inputs({"pts_tensor", "boxes_tensor"})
.Outputs({"box_idx_of_points"})
.SetKernelFn(PD_KERNEL(points_in_boxes_cuda_forward))
.SetInferShapeFn(PD_INFER_SHAPE(PtInBoxInferShape))
.SetInferDtypeFn(PD_INFER_DTYPE(PtInBoxInferDtype));
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/ops/roiaware_pool3d/box_utils_gpu.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
RoI-aware point cloud feature pooling
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
*/
#include <math.h>
#include "paddle/extension.h"
#define THREADS_PER_BLOCK 512
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
__device__ inline void lidar_to_local_coords(float shift_x, float shift_y,
float rot_angle, float &local_x,
float &local_y) {
float cosa = cos(-rot_angle), sina = sin(-rot_angle);
local_x = shift_x * cosa + shift_y * (-sina);
local_y = shift_x * sina + shift_y * cosa;
}
__device__ inline int check_pt_in_box3d(const float *pt, const float *box3d,
float &local_x, float &local_y) {
// pt: (x, y, z)
// box3d: [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center
const float MARGIN = 1e-5;
float x = pt[0], y = pt[1], z = pt[2];
float cx = box3d[0], cy = box3d[1], cz = box3d[2];
float dx = box3d[3], dy = box3d[4], dz = box3d[5], rz = box3d[6];
if (fabsf(z - cz) > dz / 2.0) return 0;
lidar_to_local_coords(x - cx, y - cy, rz, local_x, local_y);
float in_flag =
(fabs(local_x) < dx / 2.0 + MARGIN) & (fabs(local_y) < dy / 2.0 + MARGIN);
return in_flag;
}
__global__ void points_in_boxes_cuda_kernel(
const int batch_size, const int boxes_num, const int pts_num,
const float *boxes, const float *pts, int *box_idx_of_points) {
// boxes: (B, N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center
// pts: (B, npoints, 3) [x, y, z] in LiDAR coordinate
// output:
// boxes_idx_of_points: (B, npoints), default -1
int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= batch_size || pt_idx >= pts_num) return;
boxes += bs_idx * boxes_num * 7;
pts += bs_idx * pts_num * 3 + pt_idx * 3;
box_idx_of_points += bs_idx * pts_num + pt_idx;
float local_x = 0, local_y = 0;
int cur_in_flag = 0;
for (int k = 0; k < boxes_num; k++) {
cur_in_flag = check_pt_in_box3d(pts, boxes + k * 7, local_x, local_y);
if (cur_in_flag) {
box_idx_of_points[0] = k;
break;
}
}
}
void points_in_boxes_cuda_launcher(const int batch_size, const int boxes_num,
const int pts_num, const float *boxes,
const float *pts, int *box_idx_of_points) {
// boxes: (B, N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center
// pts: (B, npoints, 3) [x, y, z] in LiDAR coordinate
// output:
// boxes_idx_of_points: (B, npoints), default -1
cudaError_t err;
dim3 blocks(DIVUP(pts_num, THREADS_PER_BLOCK), batch_size);
dim3 threads(THREADS_PER_BLOCK);
points_in_boxes_cuda_kernel<<<blocks, threads>>>(
batch_size, boxes_num, pts_num, boxes, pts, box_idx_of_points);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/slim/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import codecs
import yaml
def get_qat_config(qat_config_path: str) -> dict:
with codecs.open(qat_config_path, 'r', 'utf-8') as f:
slim_dic = yaml.load(f, Loader=yaml.FullLoader)
slim_type = slim_dic['slim_type']
if slim_type != "QAT":
raise ValueError("slim method `{}` is not supported yet")
return slim_dic
def update_dic(dic, another_dic):
"""Recursive update dic by another_dic
"""
for k, _ in another_dic.items():
if (k in dic and isinstance(dic[k], dict)) and isinstance(
another_dic[k], dict):
update_dic(dic[k], another_dic[k])
else:
dic[k] = another_dic[k]
return dic
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/slim/quant.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle3d.utils.logger import logger
class QAT:
def __init__(self, quant_config, verbose: bool = False):
self.quant_config = quant_config
self.verbose = verbose
def __call__(self, model):
if self.verbose:
logger.info("model before quant")
logger.info(model)
# lazy import
import paddleslim
self.quanter = paddleslim.QAT(config=self.quant_config)
self.quanter.quantize(model)
if self.verbose:
logger.info("model after quant")
logger.info(model)
return model
def save_quantized_model(self, model, path, input_spec, **kwargs):
self.quanter.save_quantized_model(
model=model, path=path, input_spec=input_spec, **kwargs)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty/__init__.py
|
from collections.abc import Iterable
from typing import List, Union
def kitti_eval(gt_annos: List[dict],
dt_annos: List[dict],
current_classes: Union[None, int, List[int]] = (0, 1, 2),
metric_types=("bbox", "bev", "3d"),
recall_type='R40',
z_axis=1,
z_center=1.0):
"""
"""
from paddle3d.thirdparty.kitti_object_eval_python.eval import \
get_official_eval_result
if not isinstance(current_classes, Iterable):
current_classes = [current_classes]
return get_official_eval_result(
gt_annos,
dt_annos,
current_classes=current_classes,
metric_types=metric_types,
recall_type=recall_type,
z_axis=z_axis,
z_center=z_center)
def apollo_eval(gt_annos: List[dict],
dt_annos: List[dict],
current_classes: Union[None, int, List[int]] = (0, 1, 2),
metric_types=("bbox", "bev", "3d"),
recall_type='R40',
z_axis=1,
z_center=1.0):
"""
"""
from paddle3d.thirdparty.apollo_object_eval_python.apollo_eval import \
get_official_eval_result
if not isinstance(current_classes, Iterable):
current_classes = [current_classes]
return get_official_eval_result(
gt_annos,
dt_annos,
current_classes=current_classes,
metric_types=metric_types,
recall_type=recall_type,
z_axis=z_axis,
z_center=z_center)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty/kitti_object_eval_python/LICENSE
|
MIT License
Copyright (c) 2018
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty/kitti_object_eval_python/README.md
|
# Note
This code is from [traveller59/kitti-object-eval-python](https://github.com/traveller59/kitti-object-eval-python)
# kitti-object-eval-python
Fast kitti object detection eval in python(finish eval in less than 10 second), support 2d/bev/3d/aos. , support coco-style AP. If you use command line interface, numba need some time to compile jit functions.
_WARNING_: The "coco" isn't official metrics. Only "AP(Average Precision)" is.
## Dependencies
Only support python 3.6+, need `numpy`, `skimage`, `numba`, `fire`, `scipy`. If you have Anaconda, just install `cudatoolkit` in anaconda. Otherwise, please reference to this [page](https://github.com/numba/numba#custom-python-environments) to set up llvm and cuda for numba.
* Install by conda:
```
conda install -c numba cudatoolkit=x.x (8.0, 9.0, 10.0, depend on your environment)
```
## Usage
* commandline interface:
```
python evaluate.py evaluate --label_path=/path/to/your_gt_label_folder --result_path=/path/to/your_result_folder --label_split_file=/path/to/val.txt --current_class=0 --coco=False
```
* python interface:
```Python
import kitti_common as kitti
from eval import get_official_eval_result, get_coco_eval_result
def _read_imageset_file(path):
with open(path, 'r') as f:
lines = f.readlines()
return [int(line) for line in lines]
det_path = "/path/to/your_result_folder"
dt_annos = kitti.get_label_annos(det_path)
gt_path = "/path/to/your_gt_label_folder"
gt_split_file = "/path/to/val.txt" # from https://xiaozhichen.github.io/files/mv3d/imagesets.tar.gz
val_image_ids = _read_imageset_file(gt_split_file)
gt_annos = kitti.get_label_annos(gt_path, val_image_ids)
print(get_official_eval_result(gt_annos, dt_annos, 0)) # 6s in my computer
print(get_coco_eval_result(gt_annos, dt_annos, 0)) # 18s in my computer
```
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty/kitti_object_eval_python/kitti_common.py
|
import concurrent.futures as futures
import os
import pathlib
import re
from collections import OrderedDict
import numpy as np
from skimage import io
def get_image_index_str(img_idx):
return "{:06d}".format(img_idx)
def get_kitti_info_path(idx,
prefix,
info_type='image_2',
file_tail='.png',
training=True,
relative_path=True):
img_idx_str = get_image_index_str(idx)
img_idx_str += file_tail
prefix = pathlib.Path(prefix)
if training:
file_path = pathlib.Path('training') / info_type / img_idx_str
else:
file_path = pathlib.Path('testing') / info_type / img_idx_str
if not (prefix / file_path).exists():
raise ValueError("file not exist: {}".format(file_path))
if relative_path:
return str(file_path)
else:
return str(prefix / file_path)
def get_image_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'image_2', '.png', training,
relative_path)
def get_label_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'label_2', '.txt', training,
relative_path)
def get_velodyne_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'velodyne', '.bin', training,
relative_path)
def get_calib_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'calib', '.txt', training,
relative_path)
def _extend_matrix(mat):
mat = np.concatenate([mat, np.array([[0., 0., 0., 1.]])], axis=0)
return mat
def get_kitti_image_info(path,
training=True,
label_info=True,
velodyne=False,
calib=False,
image_ids=7481,
extend_matrix=True,
num_worker=8,
relative_path=True,
with_imageshape=True):
# image_infos = []
root_path = pathlib.Path(path)
if not isinstance(image_ids, list):
image_ids = list(range(image_ids))
def map_func(idx):
image_info = {'image_idx': idx}
annotations = None
if velodyne:
image_info['velodyne_path'] = get_velodyne_path(
idx, path, training, relative_path)
image_info['img_path'] = get_image_path(idx, path, training,
relative_path)
if with_imageshape:
img_path = image_info['img_path']
if relative_path:
img_path = str(root_path / img_path)
image_info['img_shape'] = np.array(
io.imread(img_path).shape[:2], dtype=np.int32)
if label_info:
label_path = get_label_path(idx, path, training, relative_path)
if relative_path:
label_path = str(root_path / label_path)
annotations = get_label_anno(label_path)
if calib:
calib_path = get_calib_path(
idx, path, training, relative_path=False)
with open(calib_path, 'r') as f:
lines = f.readlines()
P0 = np.array(
[float(info) for info in lines[0].split(' ')[1:13]]).reshape(
[3, 4])
P1 = np.array(
[float(info) for info in lines[1].split(' ')[1:13]]).reshape(
[3, 4])
P2 = np.array(
[float(info) for info in lines[2].split(' ')[1:13]]).reshape(
[3, 4])
P3 = np.array(
[float(info) for info in lines[3].split(' ')[1:13]]).reshape(
[3, 4])
if extend_matrix:
P0 = _extend_matrix(P0)
P1 = _extend_matrix(P1)
P2 = _extend_matrix(P2)
P3 = _extend_matrix(P3)
image_info['calib/P0'] = P0
image_info['calib/P1'] = P1
image_info['calib/P2'] = P2
image_info['calib/P3'] = P3
R0_rect = np.array([
float(info) for info in lines[4].split(' ')[1:10]
]).reshape([3, 3])
if extend_matrix:
rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype)
rect_4x4[3, 3] = 1.
rect_4x4[:3, :3] = R0_rect
else:
rect_4x4 = R0_rect
image_info['calib/R0_rect'] = rect_4x4
Tr_velo_to_cam = np.array([
float(info) for info in lines[5].split(' ')[1:13]
]).reshape([3, 4])
Tr_imu_to_velo = np.array([
float(info) for info in lines[6].split(' ')[1:13]
]).reshape([3, 4])
if extend_matrix:
Tr_velo_to_cam = _extend_matrix(Tr_velo_to_cam)
Tr_imu_to_velo = _extend_matrix(Tr_imu_to_velo)
image_info['calib/Tr_velo_to_cam'] = Tr_velo_to_cam
image_info['calib/Tr_imu_to_velo'] = Tr_imu_to_velo
if annotations is not None:
image_info['annos'] = annotations
add_difficulty_to_annos(image_info)
return image_info
with futures.ThreadPoolExecutor(num_worker) as executor:
image_infos = executor.map(map_func, image_ids)
return list(image_infos)
def filter_kitti_anno(image_anno,
used_classes,
used_difficulty=None,
dontcare_iou=None):
if not isinstance(used_classes, (list, tuple)):
used_classes = [used_classes]
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, x in enumerate(image_anno['name']) if x in used_classes
]
for key in image_anno.keys():
img_filtered_annotations[key] = (
image_anno[key][relevant_annotation_indices])
if used_difficulty is not None:
relevant_annotation_indices = [
i for i, x in enumerate(img_filtered_annotations['difficulty'])
if x in used_difficulty
]
for key in image_anno.keys():
img_filtered_annotations[key] = (
img_filtered_annotations[key][relevant_annotation_indices])
if 'DontCare' in used_classes and dontcare_iou is not None:
dont_care_indices = [
i for i, x in enumerate(img_filtered_annotations['name'])
if x == 'DontCare'
]
# bounding box format [y_min, x_min, y_max, x_max]
all_boxes = img_filtered_annotations['bbox']
ious = iou(all_boxes, all_boxes[dont_care_indices])
# Remove all bounding boxes that overlap with a dontcare region.
if ious.size > 0:
boxes_to_remove = np.amax(ious, axis=1) > dontcare_iou
for key in image_anno.keys():
img_filtered_annotations[key] = (img_filtered_annotations[key][
np.logical_not(boxes_to_remove)])
return img_filtered_annotations
def filter_annos_low_score(image_annos, thresh):
new_image_annos = []
for anno in image_annos:
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, s in enumerate(anno['score']) if s >= thresh
]
for key in anno.keys():
img_filtered_annotations[key] = (
anno[key][relevant_annotation_indices])
new_image_annos.append(img_filtered_annotations)
return new_image_annos
def kitti_result_line(result_dict, precision=4):
prec_float = "{" + ":.{}f".format(precision) + "}"
res_line = []
all_field_default = OrderedDict([
('name', None),
('truncated', -1),
('occluded', -1),
('alpha', -10),
('bbox', None),
('dimensions', [-1, -1, -1]),
('location', [-1000, -1000, -1000]),
('rotation_y', -10),
('score', None),
])
res_dict = [(key, None) for key, val in all_field_default.items()]
res_dict = OrderedDict(res_dict)
for key, val in result_dict.items():
if all_field_default[key] is None and val is None:
raise ValueError("you must specify a value for {}".format(key))
res_dict[key] = val
for key, val in res_dict.items():
if key == 'name':
res_line.append(val)
elif key in ['truncated', 'alpha', 'rotation_y', 'score']:
if val is None:
res_line.append(str(all_field_default[key]))
else:
res_line.append(prec_float.format(val))
elif key == 'occluded':
if val is None:
res_line.append(str(all_field_default[key]))
else:
res_line.append('{}'.format(val))
elif key in ['bbox', 'dimensions', 'location']:
if val is None:
res_line += [str(v) for v in all_field_default[key]]
else:
res_line += [prec_float.format(v) for v in val]
else:
raise ValueError("unknown key. supported key:{}".format(
res_dict.keys()))
return ' '.join(res_line)
def add_difficulty_to_annos(info):
min_height = [40, 25,
25] # minimum height for evaluated groundtruth/detections
max_occlusion = [
0, 1, 2
] # maximum occlusion level of the groundtruth used for evaluation
max_trunc = [
0.15, 0.3, 0.5
] # maximum truncation level of the groundtruth used for evaluation
annos = info['annos']
dims = annos['dimensions'] # lhw format
bbox = annos['bbox']
height = bbox[:, 3] - bbox[:, 1]
occlusion = annos['occluded']
truncation = annos['truncated']
diff = []
easy_mask = np.ones((len(dims), ), dtype=np.bool)
moderate_mask = np.ones((len(dims), ), dtype=np.bool)
hard_mask = np.ones((len(dims), ), dtype=np.bool)
i = 0
for h, o, t in zip(height, occlusion, truncation):
if o > max_occlusion[0] or h <= min_height[0] or t > max_trunc[0]:
easy_mask[i] = False
if o > max_occlusion[1] or h <= min_height[1] or t > max_trunc[1]:
moderate_mask[i] = False
if o > max_occlusion[2] or h <= min_height[2] or t > max_trunc[2]:
hard_mask[i] = False
i += 1
is_easy = easy_mask
is_moderate = np.logical_xor(easy_mask, moderate_mask)
is_hard = np.logical_xor(hard_mask, moderate_mask)
for i in range(len(dims)):
if is_easy[i]:
diff.append(0)
elif is_moderate[i]:
diff.append(1)
elif is_hard[i]:
diff.append(2)
else:
diff.append(-1)
annos["difficulty"] = np.array(diff, np.int32)
return diff
def get_label_anno(label_path):
annotations = {}
annotations.update({
'name': [],
'truncated': [],
'occluded': [],
'alpha': [],
'bbox': [],
'dimensions': [],
'location': [],
'rotation_y': []
})
with open(label_path, 'r') as f:
lines = f.readlines()
# if len(lines) == 0 or len(lines[0]) < 15:
# content = []
# else:
content = [line.strip().split(' ') for line in lines]
annotations['name'] = np.array([x[0] for x in content])
annotations['truncated'] = np.array([float(x[1]) for x in content])
annotations['occluded'] = np.array([int(x[2]) for x in content])
annotations['alpha'] = np.array([float(x[3]) for x in content])
annotations['bbox'] = np.array(
[[float(info) for info in x[4:8]] for x in content]).reshape(-1, 4)
# dimensions will convert hwl format to standard lhw(camera) format.
annotations['dimensions'] = np.array(
[[float(info) for info in x[8:11]] for x in content]).reshape(
-1, 3)[:, [2, 0, 1]]
annotations['location'] = np.array(
[[float(info) for info in x[11:14]] for x in content]).reshape(-1, 3)
annotations['rotation_y'] = np.array(
[float(x[14]) for x in content]).reshape(-1)
if len(content) != 0 and len(content[0]) == 16: # have score
annotations['score'] = np.array([float(x[15]) for x in content])
else:
annotations['score'] = np.zeros([len(annotations['bbox'])])
return annotations
def get_label_annos(label_folder, image_ids=None):
if image_ids is None:
filepaths = pathlib.Path(label_folder).glob('*.txt')
prog = re.compile(r'^\d{6}.txt$')
filepaths = filter(lambda f: prog.match(f.name), filepaths)
image_ids = [int(p.stem) for p in filepaths]
image_ids = sorted(image_ids)
if not isinstance(image_ids, list):
image_ids = list(range(image_ids))
annos = []
label_folder = pathlib.Path(label_folder)
for idx in image_ids:
image_idx = get_image_index_str(idx)
label_filename = label_folder / (image_idx + '.txt')
annos.append(get_label_anno(label_filename))
return annos
def area(boxes, add1=False):
"""Computes area of boxes.
Args:
boxes: Numpy array with shape [N, 4] holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas
"""
if add1:
return (boxes[:, 2] - boxes[:, 0] + 1.0) * (
boxes[:, 3] - boxes[:, 1] + 1.0)
else:
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
def intersection(boxes1, boxes2, add1=False):
"""Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
[y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
[y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)
all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
if add1:
all_pairs_min_ymax += 1.0
intersect_heights = np.maximum(
np.zeros(all_pairs_max_ymin.shape),
all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
if add1:
all_pairs_min_xmax += 1.0
intersect_widths = np.maximum(
np.zeros(all_pairs_max_xmin.shape),
all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou(boxes1, boxes2, add1=False):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
intersect = intersection(boxes1, boxes2, add1)
area1 = area(boxes1, add1)
area2 = area(boxes2, add1)
union = np.expand_dims(
area1, axis=1) + np.expand_dims(
area2, axis=0) - intersect
return intersect / union
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty/kitti_object_eval_python/evaluate.py
|
import time
import fire
import kitti_common as kitti
from eval import get_official_eval_result, get_coco_eval_result
def _read_imageset_file(path):
with open(path, 'r') as f:
lines = f.readlines()
return [int(line) for line in lines]
def evaluate(label_path,
result_path,
label_split_file,
current_class=0,
coco=False,
score_thresh=-1):
dt_annos = kitti.get_label_annos(result_path)
if score_thresh > 0:
dt_annos = kitti.filter_annos_low_score(dt_annos, score_thresh)
val_image_ids = _read_imageset_file(label_split_file)
gt_annos = kitti.get_label_annos(label_path, val_image_ids)
if coco:
print(get_coco_eval_result(gt_annos, dt_annos, current_class))
else:
print(get_official_eval_result(gt_annos, dt_annos, current_class))
if __name__ == '__main__':
fire.Fire()
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty/kitti_object_eval_python/eval.py
|
import io as sysio
import time
from collections import OrderedDict
import numba
import numpy as np
from scipy.interpolate import interp1d
from .rotate_iou import rotate_iou_gpu_eval
def get_mAP(prec):
sums = 0
for i in range(0, len(prec), 4):
sums += prec[i]
return sums / 11 * 100
@numba.jit
def get_thresholds(scores: np.ndarray, num_gt, num_sample_pts=41):
scores.sort()
scores = scores[::-1]
current_recall = 0
thresholds = []
for i, score in enumerate(scores):
l_recall = (i + 1) / num_gt
if i < (len(scores) - 1):
r_recall = (i + 2) / num_gt
else:
r_recall = l_recall
if (((r_recall - current_recall) < (current_recall - l_recall))
and (i < (len(scores) - 1))):
continue
# recall = l_recall
thresholds.append(score)
current_recall += 1 / (num_sample_pts - 1.0)
# print(len(thresholds), len(scores), num_gt)
return thresholds
def clean_data(gt_anno, dt_anno, current_class, difficulty):
CLASS_NAMES = [
'car', 'pedestrian', 'cyclist', 'van', 'person_sitting', 'car',
'tractor', 'trailer'
]
MIN_HEIGHT = [40, 25, 25]
MAX_OCCLUSION = [0, 1, 2]
MAX_TRUNCATION = [0.15, 0.3, 0.5]
dc_bboxes, ignored_gt, ignored_dt = [], [], []
current_cls_name = CLASS_NAMES[current_class].lower()
num_gt = len(gt_anno["name"])
num_dt = len(dt_anno["name"])
num_valid_gt = 0
for i in range(num_gt):
bbox = gt_anno["bbox"][i]
gt_name = gt_anno["name"][i].lower()
height = bbox[3] - bbox[1]
valid_class = -1
if (gt_name == current_cls_name):
valid_class = 1
elif (current_cls_name == "Pedestrian".lower()
and "Person_sitting".lower() == gt_name):
valid_class = 0
elif (current_cls_name == "Car".lower() and "Van".lower() == gt_name):
valid_class = 0
else:
valid_class = -1
ignore = False
if ((gt_anno["occluded"][i] > MAX_OCCLUSION[difficulty])
or (gt_anno["truncated"][i] > MAX_TRUNCATION[difficulty])
or (height <= MIN_HEIGHT[difficulty])):
# if gt_anno["difficulty"][i] > difficulty or gt_anno["difficulty"][i] == -1:
ignore = True
if valid_class == 1 and not ignore:
ignored_gt.append(0)
num_valid_gt += 1
elif (valid_class == 0 or (ignore and (valid_class == 1))):
ignored_gt.append(1)
else:
ignored_gt.append(-1)
# for i in range(num_gt):
if gt_anno["name"][i] == "DontCare":
dc_bboxes.append(gt_anno["bbox"][i])
for i in range(num_dt):
if (dt_anno["name"][i].lower() == current_cls_name):
valid_class = 1
else:
valid_class = -1
height = abs(dt_anno["bbox"][i, 3] - dt_anno["bbox"][i, 1])
if height < MIN_HEIGHT[difficulty]:
ignored_dt.append(1)
elif valid_class == 1:
ignored_dt.append(0)
else:
ignored_dt.append(-1)
return num_valid_gt, ignored_gt, ignored_dt, dc_bboxes
@numba.jit(nopython=True)
def image_box_overlap(boxes, query_boxes, criterion=-1):
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=boxes.dtype)
for k in range(K):
qbox_area = ((query_boxes[k, 2] - query_boxes[k, 0]) *
(query_boxes[k, 3] - query_boxes[k, 1]))
for n in range(N):
iw = (min(boxes[n, 2], query_boxes[k, 2]) - max(
boxes[n, 0], query_boxes[k, 0]))
if iw > 0:
ih = (min(boxes[n, 3], query_boxes[k, 3]) - max(
boxes[n, 1], query_boxes[k, 1]))
if ih > 0:
if criterion == -1:
ua = ((boxes[n, 2] - boxes[n, 0]) *
(boxes[n, 3] - boxes[n, 1]) + qbox_area - iw * ih)
elif criterion == 0:
ua = ((boxes[n, 2] - boxes[n, 0]) *
(boxes[n, 3] - boxes[n, 1]))
elif criterion == 1:
ua = qbox_area
else:
ua = 1.0
overlaps[n, k] = iw * ih / ua
return overlaps
def bev_box_overlap(boxes, qboxes, criterion=-1):
riou = rotate_iou_gpu_eval(boxes, qboxes, criterion)
return riou
@numba.jit(nopython=True, parallel=True)
def d3_box_overlap_kernel(boxes,
qboxes,
rinc,
criterion=-1,
z_axis=1,
z_center=1.0):
"""
z_axis: the z (height) axis.
z_center: unified z (height) center of box.
"""
N, K = boxes.shape[0], qboxes.shape[0]
for i in range(N):
for j in range(K):
if rinc[i, j] > 0:
min_z = min(
boxes[i, z_axis] + boxes[i, z_axis + 3] * (1 - z_center),
qboxes[j, z_axis] + qboxes[j, z_axis + 3] * (1 - z_center))
max_z = max(
boxes[i, z_axis] - boxes[i, z_axis + 3] * z_center,
qboxes[j, z_axis] - qboxes[j, z_axis + 3] * z_center)
iw = min_z - max_z
if iw > 0:
area1 = boxes[i, 3] * boxes[i, 4] * boxes[i, 5]
area2 = qboxes[j, 3] * qboxes[j, 4] * qboxes[j, 5]
inc = iw * rinc[i, j]
if criterion == -1:
ua = (area1 + area2 - inc)
elif criterion == 0:
ua = area1
elif criterion == 1:
ua = area2
else:
ua = 1.0
rinc[i, j] = inc / ua
else:
rinc[i, j] = 0.0
def d3_box_overlap(boxes, qboxes, criterion=-1, z_axis=1, z_center=1.0):
"""kitti camera format z_axis=1.
"""
bev_axes = list(range(7))
bev_axes.pop(z_axis + 3)
bev_axes.pop(z_axis)
rinc = rotate_iou_gpu_eval(boxes[:, bev_axes], qboxes[:, bev_axes], 2)
d3_box_overlap_kernel(boxes, qboxes, rinc, criterion, z_axis, z_center)
return rinc
@numba.jit(nopython=True)
def compute_statistics_jit(overlaps,
gt_datas,
dt_datas,
ignored_gt,
ignored_det,
dc_bboxes,
metric,
min_overlap,
thresh=0,
compute_fp=False,
compute_aos=False):
det_size = dt_datas.shape[0]
gt_size = gt_datas.shape[0]
dt_scores = dt_datas[:, -1]
dt_alphas = dt_datas[:, 4]
gt_alphas = gt_datas[:, 4]
dt_bboxes = dt_datas[:, :4]
# gt_bboxes = gt_datas[:, :4]
assigned_detection = [False] * det_size
ignored_threshold = [False] * det_size
if compute_fp:
for i in range(det_size):
if (dt_scores[i] < thresh):
ignored_threshold[i] = True
NO_DETECTION = -10000000
tp, fp, fn, similarity = 0, 0, 0, 0
# thresholds = [0.0]
# delta = [0.0]
thresholds = np.zeros((gt_size, ))
thresh_idx = 0
delta = np.zeros((gt_size, ))
delta_idx = 0
for i in range(gt_size):
if ignored_gt[i] == -1:
continue
det_idx = -1
valid_detection = NO_DETECTION
max_overlap = 0
assigned_ignored_det = False
for j in range(det_size):
if (ignored_det[j] == -1):
continue
if (assigned_detection[j]):
continue
if (ignored_threshold[j]):
continue
overlap = overlaps[j, i]
dt_score = dt_scores[j]
if (not compute_fp and (overlap > min_overlap)
and dt_score > valid_detection):
det_idx = j
valid_detection = dt_score
elif (compute_fp and (overlap > min_overlap)
and (overlap > max_overlap or assigned_ignored_det)
and ignored_det[j] == 0):
max_overlap = overlap
det_idx = j
valid_detection = 1
assigned_ignored_det = False
elif (compute_fp and (overlap > min_overlap)
and (valid_detection == NO_DETECTION)
and ignored_det[j] == 1):
det_idx = j
valid_detection = 1
assigned_ignored_det = True
if (valid_detection == NO_DETECTION) and ignored_gt[i] == 0:
fn += 1
elif ((valid_detection != NO_DETECTION)
and (ignored_gt[i] == 1 or ignored_det[det_idx] == 1)):
assigned_detection[det_idx] = True
elif valid_detection != NO_DETECTION:
# only a tp add a threshold.
tp += 1
# thresholds.append(dt_scores[det_idx])
thresholds[thresh_idx] = dt_scores[det_idx]
thresh_idx += 1
if compute_aos:
# delta.append(gt_alphas[i] - dt_alphas[det_idx])
delta[delta_idx] = gt_alphas[i] - dt_alphas[det_idx]
delta_idx += 1
assigned_detection[det_idx] = True
if compute_fp:
for i in range(det_size):
if (not (assigned_detection[i] or ignored_det[i] == -1
or ignored_det[i] == 1 or ignored_threshold[i])):
fp += 1
nstuff = 0
if metric == 0:
overlaps_dt_dc = image_box_overlap(dt_bboxes, dc_bboxes, 0)
for i in range(dc_bboxes.shape[0]):
for j in range(det_size):
if (assigned_detection[j]):
continue
if (ignored_det[j] == -1 or ignored_det[j] == 1):
continue
if (ignored_threshold[j]):
continue
if overlaps_dt_dc[j, i] > min_overlap:
assigned_detection[j] = True
nstuff += 1
fp -= nstuff
if compute_aos:
tmp = np.zeros((fp + delta_idx, ))
# tmp = [0] * fp
for i in range(delta_idx):
tmp[i + fp] = (1.0 + np.cos(delta[i])) / 2.0
# tmp.append((1.0 + np.cos(delta[i])) / 2.0)
# assert len(tmp) == fp + tp
# assert len(delta) == tp
if tp > 0 or fp > 0:
similarity = np.sum(tmp)
else:
similarity = -1
return tp, fp, fn, similarity, thresholds[:thresh_idx]
def get_split_parts(num, num_part):
same_part = num // num_part
remain_num = num % num_part
if remain_num == 0:
return [same_part] * num_part
else:
return [same_part] * num_part + [remain_num]
@numba.jit(nopython=True)
def fused_compute_statistics(overlaps,
pr,
gt_nums,
dt_nums,
dc_nums,
gt_datas,
dt_datas,
dontcares,
ignored_gts,
ignored_dets,
metric,
min_overlap,
thresholds,
compute_aos=False):
gt_num = 0
dt_num = 0
dc_num = 0
for i in range(gt_nums.shape[0]):
for t, thresh in enumerate(thresholds):
overlap = overlaps[dt_num:dt_num + dt_nums[i], gt_num:gt_num +
gt_nums[i]]
gt_data = gt_datas[gt_num:gt_num + gt_nums[i]]
dt_data = dt_datas[dt_num:dt_num + dt_nums[i]]
ignored_gt = ignored_gts[gt_num:gt_num + gt_nums[i]]
ignored_det = ignored_dets[dt_num:dt_num + dt_nums[i]]
dontcare = dontcares[dc_num:dc_num + dc_nums[i]]
tp, fp, fn, similarity, _ = compute_statistics_jit(
overlap,
gt_data,
dt_data,
ignored_gt,
ignored_det,
dontcare,
metric,
min_overlap=min_overlap,
thresh=thresh,
compute_fp=True,
compute_aos=compute_aos)
pr[t, 0] += tp
pr[t, 1] += fp
pr[t, 2] += fn
if similarity != -1:
pr[t, 3] += similarity
gt_num += gt_nums[i]
dt_num += dt_nums[i]
dc_num += dc_nums[i]
def calculate_iou_partly(dt_annos,
gt_annos,
metric,
num_parts=50,
z_axis=1,
z_center=1.0):
"""fast iou algorithm. this function can be used independently to
do result analysis.
Args:
dt_annos: dict, must from get_label_annos() in kitti_common.py
gt_annos: dict, must from get_label_annos() in kitti_common.py
metric: eval type. 0: bbox, 1: bev, 2: 3d
num_parts: int. a parameter for fast calculate algorithm
z_axis: height axis. kitti camera use 1, lidar use 2.
"""
assert len(dt_annos) == len(gt_annos)
total_dt_num = np.stack([len(a["name"]) for a in dt_annos], 0)
total_gt_num = np.stack([len(a["name"]) for a in gt_annos], 0)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
parted_overlaps = []
example_idx = 0
bev_axes = list(range(3))
bev_axes.pop(z_axis)
for num_part in split_parts:
gt_annos_part = gt_annos[example_idx:example_idx + num_part]
dt_annos_part = dt_annos[example_idx:example_idx + num_part]
if metric == 0:
gt_boxes = np.concatenate([a["bbox"] for a in gt_annos_part], 0)
dt_boxes = np.concatenate([a["bbox"] for a in dt_annos_part], 0)
overlap_part = image_box_overlap(dt_boxes, gt_boxes)
elif metric == 1:
loc = np.concatenate(
[a["location"][:, bev_axes] for a in gt_annos_part], 0)
dims = np.concatenate(
[a["dimensions"][:, bev_axes] for a in gt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)
gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1)
loc = np.concatenate(
[a["location"][:, bev_axes] for a in dt_annos_part], 0)
dims = np.concatenate(
[a["dimensions"][:, bev_axes] for a in dt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)
dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1)
overlap_part = bev_box_overlap(dt_boxes,
gt_boxes).astype(np.float64)
elif metric == 2:
loc = np.concatenate([a["location"] for a in gt_annos_part], 0)
dims = np.concatenate([a["dimensions"] for a in gt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)
gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1)
loc = np.concatenate([a["location"] for a in dt_annos_part], 0)
dims = np.concatenate([a["dimensions"] for a in dt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)
dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1)
overlap_part = d3_box_overlap(
dt_boxes, gt_boxes, z_axis=z_axis,
z_center=z_center).astype(np.float64)
else:
raise ValueError("unknown metric")
parted_overlaps.append(overlap_part)
example_idx += num_part
overlaps = []
example_idx = 0
for j, num_part in enumerate(split_parts):
# gt_annos_part = gt_annos[example_idx:example_idx + num_part]
# dt_annos_part = dt_annos[example_idx:example_idx + num_part]
gt_num_idx, dt_num_idx = 0, 0
for i in range(num_part):
gt_box_num = total_gt_num[example_idx + i]
dt_box_num = total_dt_num[example_idx + i]
overlaps.append(
parted_overlaps[j][dt_num_idx:dt_num_idx +
dt_box_num, gt_num_idx:gt_num_idx +
gt_box_num])
gt_num_idx += gt_box_num
dt_num_idx += dt_box_num
example_idx += num_part
return overlaps, parted_overlaps, total_dt_num, total_gt_num
def _prepare_data(gt_annos, dt_annos, current_class, difficulty):
gt_datas_list = []
dt_datas_list = []
total_dc_num = []
ignored_gts, ignored_dets, dontcares = [], [], []
total_num_valid_gt = 0
for i in range(len(gt_annos)):
rets = clean_data(gt_annos[i], dt_annos[i], current_class, difficulty)
num_valid_gt, ignored_gt, ignored_det, dc_bboxes = rets
ignored_gts.append(np.array(ignored_gt, dtype=np.int64))
ignored_dets.append(np.array(ignored_det, dtype=np.int64))
if len(dc_bboxes) == 0:
dc_bboxes = np.zeros((0, 4)).astype(np.float64)
else:
dc_bboxes = np.stack(dc_bboxes, 0).astype(np.float64)
total_dc_num.append(dc_bboxes.shape[0])
dontcares.append(dc_bboxes)
total_num_valid_gt += num_valid_gt
gt_datas = np.concatenate(
[gt_annos[i]["bbox"], gt_annos[i]["alpha"][..., np.newaxis]], 1)
dt_datas = np.concatenate([
dt_annos[i]["bbox"], dt_annos[i]["alpha"][..., np.newaxis],
dt_annos[i]["score"][..., np.newaxis]
], 1)
gt_datas_list.append(gt_datas)
dt_datas_list.append(dt_datas)
total_dc_num = np.stack(total_dc_num, axis=0)
return (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares,
total_dc_num, total_num_valid_gt)
def eval_class(gt_annos,
dt_annos,
current_classes,
difficultys,
metric,
min_overlaps,
compute_aos=False,
z_axis=1,
z_center=1.0,
num_parts=50):
"""Kitti eval. support 2d/bev/3d/aos eval. support 0.5:0.05:0.95 coco AP.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
current_class: int, 0: car, 1: pedestrian, 2: cyclist
difficulty: int. eval difficulty, 0: easy, 1: normal, 2: hard
metric: eval type. 0: bbox, 1: bev, 2: 3d
min_overlap: float, min overlap. official:
[[0.7, 0.5, 0.5], [0.7, 0.5, 0.5], [0.7, 0.5, 0.5]]
format: [metric, class]. choose one from matrix above.
num_parts: int. a parameter for fast calculate algorithm
Returns:
dict of recall, precision and aos
"""
assert len(gt_annos) == len(dt_annos)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
rets = calculate_iou_partly(
dt_annos, gt_annos, metric, num_parts, z_axis=z_axis, z_center=z_center)
overlaps, parted_overlaps, total_dt_num, total_gt_num = rets
N_SAMPLE_PTS = 41
num_minoverlap = len(min_overlaps)
num_class = len(current_classes)
num_difficulty = len(difficultys)
precision = np.zeros(
[num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
recall = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
all_thresholds = np.zeros(
[num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
for m, current_class in enumerate(current_classes):
for l, difficulty in enumerate(difficultys):
rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty)
(gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares,
total_dc_num, total_num_valid_gt) = rets
for k, min_overlap in enumerate(min_overlaps[:, metric, m]):
thresholdss = []
for i in range(len(gt_annos)):
rets = compute_statistics_jit(
overlaps[i],
gt_datas_list[i],
dt_datas_list[i],
ignored_gts[i],
ignored_dets[i],
dontcares[i],
metric,
min_overlap=min_overlap,
thresh=0.0,
compute_fp=False)
tp, fp, fn, similarity, thresholds = rets
thresholdss += thresholds.tolist()
thresholdss = np.array(thresholdss)
thresholds = get_thresholds(thresholdss, total_num_valid_gt)
thresholds = np.array(thresholds)
all_thresholds[m, l, k, :len(thresholds)] = thresholds
pr = np.zeros([len(thresholds), 4])
idx = 0
for j, num_part in enumerate(split_parts):
gt_datas_part = np.concatenate(
gt_datas_list[idx:idx + num_part], 0)
dt_datas_part = np.concatenate(
dt_datas_list[idx:idx + num_part], 0)
dc_datas_part = np.concatenate(
dontcares[idx:idx + num_part], 0)
ignored_dets_part = np.concatenate(
ignored_dets[idx:idx + num_part], 0)
ignored_gts_part = np.concatenate(
ignored_gts[idx:idx + num_part], 0)
fused_compute_statistics(
parted_overlaps[j],
pr,
total_gt_num[idx:idx + num_part],
total_dt_num[idx:idx + num_part],
total_dc_num[idx:idx + num_part],
gt_datas_part,
dt_datas_part,
dc_datas_part,
ignored_gts_part,
ignored_dets_part,
metric,
min_overlap=min_overlap,
thresholds=thresholds,
compute_aos=compute_aos)
idx += num_part
for i in range(len(thresholds)):
precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1])
if compute_aos:
aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1])
for i in range(len(thresholds)):
precision[m, l, k, i] = np.max(
precision[m, l, k, i:], axis=-1)
if compute_aos:
aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1)
ret_dict = {
# "recall": recall, # [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS]
"precision": precision,
"orientation": aos,
"thresholds": all_thresholds,
"min_overlaps": min_overlaps,
}
return ret_dict
def get_mAP_v2(prec):
sums = 0
for i in range(0, prec.shape[-1], 4):
sums = sums + prec[..., i]
return sums / 11 * 100
def get_mAP_r40(prec):
sums = np.sum(prec[:, 1:], axis=1)
return sums / 40 * 100
def do_eval_v2(gt_annos,
dt_annos,
current_classes,
min_overlaps,
compute_aos=False,
difficultys=(0, 1, 2),
z_axis=1,
z_center=1.0):
# min_overlaps: [num_minoverlap, metric, num_class]
ret = eval_class(
gt_annos,
dt_annos,
current_classes,
difficultys,
0,
min_overlaps,
compute_aos,
z_axis=z_axis,
z_center=z_center)
# ret: [num_class, num_diff, num_minoverlap, num_sample_points]
mAP_bbox = get_mAP_v2(ret["precision"])
mAP_aos = None
if compute_aos:
mAP_aos = get_mAP_v2(ret["orientation"])
ret = eval_class(
gt_annos,
dt_annos,
current_classes,
difficultys,
1,
min_overlaps,
z_axis=z_axis,
z_center=z_center)
mAP_bev = get_mAP_v2(ret["precision"])
ret = eval_class(
gt_annos,
dt_annos,
current_classes,
difficultys,
2,
min_overlaps,
z_axis=z_axis,
z_center=z_center)
mAP_3d = get_mAP_v2(ret["precision"])
return mAP_bbox, mAP_bev, mAP_3d, mAP_aos
def do_eval_v3(gt_annos,
dt_annos,
current_classes,
min_overlaps,
compute_aos=False,
difficultys=(0, 1, 2),
z_axis=1,
z_center=1.0,
metric_types=("bbox", "bev", "3d")):
# min_overlaps: [num_minoverlap, metric, num_class]
types_map = {"bbox": 0, "bev": 1, "3d": 2}
metrics = {}
for t in metric_types:
ret = eval_class(
gt_annos,
dt_annos,
current_classes,
difficultys,
types_map[t],
min_overlaps,
compute_aos,
z_axis=z_axis,
z_center=z_center)
metrics[t] = ret
return metrics
def do_coco_style_eval(gt_annos,
dt_annos,
current_classes,
overlap_ranges,
compute_aos,
z_axis=1,
z_center=1.0):
# overlap_ranges: [range, metric, num_class]
min_overlaps = np.zeros([10, *overlap_ranges.shape[1:]])
for i in range(overlap_ranges.shape[1]):
for j in range(overlap_ranges.shape[2]):
min_overlaps[:, i, j] = np.linspace(*overlap_ranges[:, i, j])
mAP_bbox, mAP_bev, mAP_3d, mAP_aos = do_eval_v2(
gt_annos,
dt_annos,
current_classes,
min_overlaps,
compute_aos,
z_axis=z_axis,
z_center=z_center)
# ret: [num_class, num_diff, num_minoverlap]
mAP_bbox = mAP_bbox.mean(-1)
mAP_bev = mAP_bev.mean(-1)
mAP_3d = mAP_3d.mean(-1)
if mAP_aos is not None:
mAP_aos = mAP_aos.mean(-1)
return mAP_bbox, mAP_bev, mAP_3d, mAP_aos
def print_str(value, *arg, sstream=None):
if sstream is None:
sstream = sysio.StringIO()
sstream.truncate(0)
sstream.seek(0)
print(value, *arg, file=sstream)
return sstream.getvalue()
def get_official_eval_result(gt_annos,
dt_annos,
current_classes,
difficultys=[0, 1, 2],
z_axis=1,
z_center=1.0,
metric_types=("bbox", "bev", "3d"),
recall_type='R40'):
"""
gt_annos and dt_annos must contains following keys:
[bbox, location, dimensions, rotation_y, score]
"""
overlap_mod = np.array([[0.7, 0.5, 0.5, 0.7, 0.5, 0.7, 0.7, 0.7],
[0.7, 0.5, 0.5, 0.7, 0.5, 0.7, 0.7, 0.7],
[0.7, 0.5, 0.5, 0.7, 0.5, 0.7, 0.7, 0.7]])
overlap_easy = np.array([[0.7, 0.5, 0.5, 0.7, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.25, 0.25, 0.5, 0.25, 0.5, 0.5, 0.5],
[0.5, 0.25, 0.25, 0.5, 0.25, 0.5, 0.5, 0.5]])
min_overlaps = np.stack([overlap_mod, overlap_easy], axis=0) # [2, 3, 5]
class_to_name = {
0: 'Car',
1: 'Pedestrian',
2: 'Cyclist',
3: 'Van',
4: 'Person_sitting',
5: 'car',
6: 'tractor',
7: 'trailer',
}
name_to_class = {v: n for n, v in class_to_name.items()}
if not isinstance(current_classes, (list, tuple)):
current_classes = [current_classes]
current_classes_int = []
for curcls in current_classes:
if isinstance(curcls, str):
current_classes_int.append(name_to_class[curcls])
else:
current_classes_int.append(curcls)
current_classes = current_classes_int
min_overlaps = min_overlaps[:, :, current_classes]
result = ''
# check whether alpha is valid
compute_aos = False
for anno in dt_annos:
if anno['alpha'].shape[0] != 0:
if anno['alpha'][0] != -10:
compute_aos = True
break
metrics = do_eval_v3(
gt_annos,
dt_annos,
current_classes,
min_overlaps,
compute_aos,
difficultys,
z_axis=z_axis,
z_center=z_center,
metric_types=metric_types)
res = OrderedDict()
if compute_aos and "bbox" in metric_types:
metric_types = list(metric_types) + ["aos"]
for j, curcls in enumerate(current_classes):
# mAP threshold array: [num_minoverlap, metric, class]
# mAP result: [num_class, num_diff, num_minoverlap]
curcls = class_to_name[curcls]
res[curcls] = OrderedDict()
for i in range(min_overlaps.shape[0]):
overlap = tuple(min_overlaps[i, :, j].tolist())
res[curcls][overlap] = OrderedDict()
for metric_type in metric_types:
if metric_type == "aos":
if recall_type == 'R40':
res[curcls][overlap][metric_type] = get_mAP_r40(
metrics["bbox"]["orientation"][j, :, i])
elif recall_type == 'R11':
res[curcls][overlap][metric_type] = get_mAP_v2(
metrics["bbox"]["orientation"][j, :, i])
else:
if recall_type == 'R40':
res[curcls][overlap][metric_type] = get_mAP_r40(
metrics[metric_type]["precision"][j, :, i])
elif recall_type == 'R11':
res[curcls][overlap][metric_type] = get_mAP_v2(
metrics[metric_type]["precision"][j, :, i])
return res
def get_coco_eval_result(gt_annos,
dt_annos,
current_classes,
z_axis=1,
z_center=1.0):
class_to_name = {
0: 'Car',
1: 'Pedestrian',
2: 'Cyclist',
3: 'Van',
4: 'Person_sitting',
5: 'car',
6: 'tractor',
7: 'trailer',
}
class_to_range = {
0: [0.5, 1.0, 0.05],
1: [0.25, 0.75, 0.05],
2: [0.25, 0.75, 0.05],
3: [0.5, 1.0, 0.05],
4: [0.25, 0.75, 0.05],
5: [0.5, 1.0, 0.05],
6: [0.5, 1.0, 0.05],
7: [0.5, 1.0, 0.05],
}
class_to_range = {
0: [0.5, 0.95, 10],
1: [0.25, 0.7, 10],
2: [0.25, 0.7, 10],
3: [0.5, 0.95, 10],
4: [0.25, 0.7, 10],
5: [0.5, 0.95, 10],
6: [0.5, 0.95, 10],
7: [0.5, 0.95, 10],
}
name_to_class = {v: n for n, v in class_to_name.items()}
if not isinstance(current_classes, (list, tuple)):
current_classes = [current_classes]
current_classes_int = []
for curcls in current_classes:
if isinstance(curcls, str):
current_classes_int.append(name_to_class[curcls])
else:
current_classes_int.append(curcls)
current_classes = current_classes_int
overlap_ranges = np.zeros([3, 3, len(current_classes)])
for i, curcls in enumerate(current_classes):
overlap_ranges[:, :, i] = np.array(
class_to_range[curcls])[:, np.newaxis]
result = ''
# check whether alpha is valid
compute_aos = False
for anno in dt_annos:
if anno['alpha'].shape[0] != 0:
if anno['alpha'][0] != -10:
compute_aos = True
break
mAPbbox, mAPbev, mAP3d, mAPaos = do_coco_style_eval(
gt_annos,
dt_annos,
current_classes,
overlap_ranges,
compute_aos,
z_axis=z_axis,
z_center=z_center)
for j, curcls in enumerate(current_classes):
# mAP threshold array: [num_minoverlap, metric, class]
# mAP result: [num_class, num_diff, num_minoverlap]
o_range = np.array(class_to_range[curcls])[[0, 2, 1]]
o_range[1] = (o_range[2] - o_range[0]) / (o_range[1] - 1)
result += print_str((f"{class_to_name[curcls]} "
"coco AP@{:.2f}:{:.2f}:{:.2f}:".format(*o_range)))
result += print_str((f"bbox AP:{mAPbbox[j, 0]:.2f}, "
f"{mAPbbox[j, 1]:.2f}, "
f"{mAPbbox[j, 2]:.2f}"))
result += print_str((f"bev AP:{mAPbev[j, 0]:.2f}, "
f"{mAPbev[j, 1]:.2f}, "
f"{mAPbev[j, 2]:.2f}"))
result += print_str((f"3d AP:{mAP3d[j, 0]:.2f}, "
f"{mAP3d[j, 1]:.2f}, "
f"{mAP3d[j, 2]:.2f}"))
if compute_aos:
result += print_str((f"aos AP:{mAPaos[j, 0]:.2f}, "
f"{mAPaos[j, 1]:.2f}, "
f"{mAPaos[j, 2]:.2f}"))
return result
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty/kitti_object_eval_python/rotate_iou.py
|
#####################
# Based on https://github.com/hongzhenwang/RRPN-revise
# Licensed under The MIT License
# Author: yanyan, scrin@foxmail.com
#####################
import math
import numba
import numpy as np
from numba import cuda
@numba.jit(nopython=True)
def div_up(m, n):
return m // n + (m % n > 0)
@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True)
def trangle_area(a, b, c):
return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) *
(b[0] - c[0])) / 2.0
@cuda.jit('(float32[:], int32)', device=True, inline=True)
def area(int_pts, num_of_inter):
area_val = 0.0
for i in range(num_of_inter - 2):
area_val += abs(
trangle_area(int_pts[:2], int_pts[2 * i + 2:2 * i + 4],
int_pts[2 * i + 4:2 * i + 6]))
return area_val
@cuda.jit('(float32[:], int32)', device=True, inline=True)
def sort_vertex_in_convex_polygon(int_pts, num_of_inter):
if num_of_inter > 0:
center = cuda.local.array((2, ), dtype=numba.float32)
center[:] = 0.0
for i in range(num_of_inter):
center[0] += int_pts[2 * i]
center[1] += int_pts[2 * i + 1]
center[0] /= num_of_inter
center[1] /= num_of_inter
v = cuda.local.array((2, ), dtype=numba.float32)
vs = cuda.local.array((16, ), dtype=numba.float32)
for i in range(num_of_inter):
v[0] = int_pts[2 * i] - center[0]
v[1] = int_pts[2 * i + 1] - center[1]
d = math.sqrt(v[0] * v[0] + v[1] * v[1])
v[0] = v[0] / d
v[1] = v[1] / d
if v[1] < 0:
v[0] = -2 - v[0]
vs[i] = v[0]
j = 0
temp = 0
for i in range(1, num_of_inter):
if vs[i - 1] > vs[i]:
temp = vs[i]
tx = int_pts[2 * i]
ty = int_pts[2 * i + 1]
j = i
while j > 0 and vs[j - 1] > temp:
vs[j] = vs[j - 1]
int_pts[j * 2] = int_pts[j * 2 - 2]
int_pts[j * 2 + 1] = int_pts[j * 2 - 1]
j -= 1
vs[j] = temp
int_pts[j * 2] = tx
int_pts[j * 2 + 1] = ty
@cuda.jit(
'(float32[:], float32[:], int32, int32, float32[:])',
device=True,
inline=True)
def line_segment_intersection(pts1, pts2, i, j, temp_pts):
A = cuda.local.array((2, ), dtype=numba.float32)
B = cuda.local.array((2, ), dtype=numba.float32)
C = cuda.local.array((2, ), dtype=numba.float32)
D = cuda.local.array((2, ), dtype=numba.float32)
A[0] = pts1[2 * i]
A[1] = pts1[2 * i + 1]
B[0] = pts1[2 * ((i + 1) % 4)]
B[1] = pts1[2 * ((i + 1) % 4) + 1]
C[0] = pts2[2 * j]
C[1] = pts2[2 * j + 1]
D[0] = pts2[2 * ((j + 1) % 4)]
D[1] = pts2[2 * ((j + 1) % 4) + 1]
BA0 = B[0] - A[0]
BA1 = B[1] - A[1]
DA0 = D[0] - A[0]
CA0 = C[0] - A[0]
DA1 = D[1] - A[1]
CA1 = C[1] - A[1]
acd = DA1 * CA0 > CA1 * DA0
bcd = (D[1] - B[1]) * (C[0] - B[0]) > (C[1] - B[1]) * (D[0] - B[0])
if acd != bcd:
abc = CA1 * BA0 > BA1 * CA0
abd = DA1 * BA0 > BA1 * DA0
if abc != abd:
DC0 = D[0] - C[0]
DC1 = D[1] - C[1]
ABBA = A[0] * B[1] - B[0] * A[1]
CDDC = C[0] * D[1] - D[0] * C[1]
DH = BA1 * DC0 - BA0 * DC1
Dx = ABBA * DC0 - BA0 * CDDC
Dy = ABBA * DC1 - BA1 * CDDC
temp_pts[0] = Dx / DH
temp_pts[1] = Dy / DH
return True
return False
@cuda.jit(
'(float32[:], float32[:], int32, int32, float32[:])',
device=True,
inline=True)
def line_segment_intersection_v1(pts1, pts2, i, j, temp_pts):
a = cuda.local.array((2, ), dtype=numba.float32)
b = cuda.local.array((2, ), dtype=numba.float32)
c = cuda.local.array((2, ), dtype=numba.float32)
d = cuda.local.array((2, ), dtype=numba.float32)
a[0] = pts1[2 * i]
a[1] = pts1[2 * i + 1]
b[0] = pts1[2 * ((i + 1) % 4)]
b[1] = pts1[2 * ((i + 1) % 4) + 1]
c[0] = pts2[2 * j]
c[1] = pts2[2 * j + 1]
d[0] = pts2[2 * ((j + 1) % 4)]
d[1] = pts2[2 * ((j + 1) % 4) + 1]
area_abc = trangle_area(a, b, c)
area_abd = trangle_area(a, b, d)
if area_abc * area_abd >= 0:
return False
area_cda = trangle_area(c, d, a)
area_cdb = area_cda + area_abc - area_abd
if area_cda * area_cdb >= 0:
return False
t = area_cda / (area_abd - area_abc)
dx = t * (b[0] - a[0])
dy = t * (b[1] - a[1])
temp_pts[0] = a[0] + dx
temp_pts[1] = a[1] + dy
return True
@cuda.jit('(float32, float32, float32[:])', device=True, inline=True)
def point_in_quadrilateral(pt_x, pt_y, corners):
ab0 = corners[2] - corners[0]
ab1 = corners[3] - corners[1]
ad0 = corners[6] - corners[0]
ad1 = corners[7] - corners[1]
ap0 = pt_x - corners[0]
ap1 = pt_y - corners[1]
abab = ab0 * ab0 + ab1 * ab1
abap = ab0 * ap0 + ab1 * ap1
adad = ad0 * ad0 + ad1 * ad1
adap = ad0 * ap0 + ad1 * ap1
return abab >= abap and abap >= 0 and adad >= adap and adap >= 0
@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True)
def quadrilateral_intersection(pts1, pts2, int_pts):
num_of_inter = 0
for i in range(4):
if point_in_quadrilateral(pts1[2 * i], pts1[2 * i + 1], pts2):
int_pts[num_of_inter * 2] = pts1[2 * i]
int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1]
num_of_inter += 1
if point_in_quadrilateral(pts2[2 * i], pts2[2 * i + 1], pts1):
int_pts[num_of_inter * 2] = pts2[2 * i]
int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1]
num_of_inter += 1
temp_pts = cuda.local.array((2, ), dtype=numba.float32)
for i in range(4):
for j in range(4):
has_pts = line_segment_intersection(pts1, pts2, i, j, temp_pts)
if has_pts:
int_pts[num_of_inter * 2] = temp_pts[0]
int_pts[num_of_inter * 2 + 1] = temp_pts[1]
num_of_inter += 1
return num_of_inter
@cuda.jit('(float32[:], float32[:])', device=True, inline=True)
def rbbox_to_corners(corners, rbbox):
# generate clockwise corners and rotate it clockwise
angle = rbbox[4]
a_cos = math.cos(angle)
a_sin = math.sin(angle)
center_x = rbbox[0]
center_y = rbbox[1]
x_d = rbbox[2]
y_d = rbbox[3]
corners_x = cuda.local.array((4, ), dtype=numba.float32)
corners_y = cuda.local.array((4, ), dtype=numba.float32)
corners_x[0] = -x_d / 2
corners_x[1] = -x_d / 2
corners_x[2] = x_d / 2
corners_x[3] = x_d / 2
corners_y[0] = -y_d / 2
corners_y[1] = y_d / 2
corners_y[2] = y_d / 2
corners_y[3] = -y_d / 2
for i in range(4):
corners[2 *
i] = a_cos * corners_x[i] + a_sin * corners_y[i] + center_x
corners[2 * i
+ 1] = -a_sin * corners_x[i] + a_cos * corners_y[i] + center_y
@cuda.jit('(float32[:], float32[:])', device=True, inline=True)
def inter(rbbox1, rbbox2):
corners1 = cuda.local.array((8, ), dtype=numba.float32)
corners2 = cuda.local.array((8, ), dtype=numba.float32)
intersection_corners = cuda.local.array((16, ), dtype=numba.float32)
rbbox_to_corners(corners1, rbbox1)
rbbox_to_corners(corners2, rbbox2)
num_intersection = quadrilateral_intersection(corners1, corners2,
intersection_corners)
sort_vertex_in_convex_polygon(intersection_corners, num_intersection)
# print(intersection_corners.reshape([-1, 2])[:num_intersection])
return area(intersection_corners, num_intersection)
@cuda.jit('(float32[:], float32[:], int32)', device=True, inline=True)
def devRotateIoUEval(rbox1, rbox2, criterion=-1):
area1 = rbox1[2] * rbox1[3]
area2 = rbox2[2] * rbox2[3]
area_inter = inter(rbox1, rbox2)
if criterion == -1:
return area_inter / (area1 + area2 - area_inter)
elif criterion == 0:
return area_inter / area1
elif criterion == 1:
return area_inter / area2
else:
return area_inter
@cuda.jit('(int64, int64, float32[:], float32[:], float32[:], int32)', fastmath=False)
def rotate_iou_kernel_eval(N, K, dev_boxes, dev_query_boxes, dev_iou, criterion=-1):
threadsPerBlock = 8 * 8
row_start = cuda.blockIdx.x
col_start = cuda.blockIdx.y
tx = cuda.threadIdx.x
row_size = min(N - row_start * threadsPerBlock, threadsPerBlock)
col_size = min(K - col_start * threadsPerBlock, threadsPerBlock)
block_boxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32)
block_qboxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32)
dev_query_box_idx = threadsPerBlock * col_start + tx
dev_box_idx = threadsPerBlock * row_start + tx
if (tx < col_size):
block_qboxes[tx * 5 + 0] = dev_query_boxes[dev_query_box_idx * 5 + 0]
block_qboxes[tx * 5 + 1] = dev_query_boxes[dev_query_box_idx * 5 + 1]
block_qboxes[tx * 5 + 2] = dev_query_boxes[dev_query_box_idx * 5 + 2]
block_qboxes[tx * 5 + 3] = dev_query_boxes[dev_query_box_idx * 5 + 3]
block_qboxes[tx * 5 + 4] = dev_query_boxes[dev_query_box_idx * 5 + 4]
if (tx < row_size):
block_boxes[tx * 5 + 0] = dev_boxes[dev_box_idx * 5 + 0]
block_boxes[tx * 5 + 1] = dev_boxes[dev_box_idx * 5 + 1]
block_boxes[tx * 5 + 2] = dev_boxes[dev_box_idx * 5 + 2]
block_boxes[tx * 5 + 3] = dev_boxes[dev_box_idx * 5 + 3]
block_boxes[tx * 5 + 4] = dev_boxes[dev_box_idx * 5 + 4]
cuda.syncthreads()
if tx < row_size:
for i in range(col_size):
offset = row_start * threadsPerBlock * K + col_start * threadsPerBlock + tx * K + i
dev_iou[offset] = devRotateIoUEval(block_qboxes[i * 5:i * 5 + 5],
block_boxes[tx * 5:tx * 5 + 5], criterion)
def rotate_iou_gpu_eval(boxes, query_boxes, criterion=-1, device_id=0):
"""rotated box iou running in gpu. 500x faster than cpu version
(take 5ms in one example with numba.cuda code).
convert from [this project](
https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation).
Args:
boxes (float tensor: [N, 5]): rbboxes. format: centers, dims,
angles(clockwise when positive)
query_boxes (float tensor: [K, 5]): [description]
device_id (int, optional): Defaults to 0. [description]
Returns:
[type]: [description]
"""
box_dtype = boxes.dtype
boxes = boxes.astype(np.float32)
query_boxes = query_boxes.astype(np.float32)
N = boxes.shape[0]
K = query_boxes.shape[0]
iou = np.zeros((N, K), dtype=np.float32)
if N == 0 or K == 0:
return iou
threadsPerBlock = 8 * 8
cuda.select_device(device_id)
blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock))
stream = cuda.stream()
with stream.auto_synchronize():
boxes_dev = cuda.to_device(boxes.reshape([-1]), stream)
query_boxes_dev = cuda.to_device(query_boxes.reshape([-1]), stream)
iou_dev = cuda.to_device(iou.reshape([-1]), stream)
rotate_iou_kernel_eval[blockspergrid, threadsPerBlock, stream](
N, K, boxes_dev, query_boxes_dev, iou_dev, criterion)
iou_dev.copy_to_host(iou.reshape([-1]), stream=stream)
return iou.astype(boxes.dtype)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty/apollo_object_eval_python/apollo_eval.py
|
import io as sysio
import time
from collections import OrderedDict
import numba
import numpy as np
from scipy.interpolate import interp1d
from ..kitti_object_eval_python.rotate_iou import rotate_iou_gpu_eval
def get_mAP(prec):
sums = 0
for i in range(0, len(prec), 4):
sums += prec[i]
return sums / 11 * 100
@numba.jit
def get_thresholds(scores: np.ndarray, num_gt, num_sample_pts=41):
scores.sort()
scores = scores[::-1]
current_recall = 0
thresholds = []
for i, score in enumerate(scores):
l_recall = (i + 1) / num_gt
if i < (len(scores) - 1):
r_recall = (i + 2) / num_gt
else:
r_recall = l_recall
if (((r_recall - current_recall) < (current_recall - l_recall))
and (i < (len(scores) - 1))):
continue
# recall = l_recall
thresholds.append(score)
current_recall += 1 / (num_sample_pts - 1.0)
# print(len(thresholds), len(scores), num_gt)
return thresholds
def clean_data(gt_anno, dt_anno, current_class, difficulty):
CLASS_NAMES = [
'smallmot', 'bigmot', 'nonmot', 'pedestrian', 'trafficcone'
] # lower
MIN_SIZE = [[0.5, 0.3, 0.1],
[0.5, 0.3, 0.1],
[0.5, 0.2, 0.05],
[0.5, 0.2, 0.05],
[0.3, 0.1, 0.01]]
# MIN_HEIGHT = [40, 25, 25]
# MAX_OCCLUSION = [0, 1, 2]
# MAX_TRUNCATION = [0.15, 0.3, 0.5]
dc_bboxes, ignored_gt, ignored_dt = [], [], []
current_cls_name = CLASS_NAMES[current_class].lower()
num_gt = len(gt_anno["name"])
num_dt = len(dt_anno["name"])
num_valid_gt = 0
num_valid_dt = 0
for i in range(num_gt):
# bbox = gt_anno["bbox"][i]
gt_name = gt_anno["name"][i].lower()
# height = bbox[3] - bbox[1]
valid_class = -1
if (gt_name == current_cls_name):
valid_class = 1
# elif (current_cls_name == "Pedestrian".lower()
# and "Person_sitting".lower() == gt_name):
# valid_class = 0
# elif (current_cls_name == "Car".lower() and "Van".lower() == gt_name):
# valid_class = 0
else:
valid_class = -1
ignore = False
# if ((gt_anno["occluded"][i] > MAX_OCCLUSION[difficulty])
# or (gt_anno["truncated"][i] > MAX_TRUNCATION[difficulty])
# or (height <= MIN_HEIGHT[difficulty])):
if gt_name not in CLASS_NAMES:
gt_size_threshold = MIN_SIZE[0][difficulty]
else:
gt_size_threshold = MIN_SIZE[CLASS_NAMES.index(gt_name)][difficulty]
if ((gt_anno["dimensions"][i][0] <= gt_size_threshold)
or (gt_anno["dimensions"][i][1] <= gt_size_threshold)
or (gt_anno["dimensions"][i][2] <= gt_size_threshold)):
# if gt_anno["difficulty"][i] > difficulty or gt_anno["difficulty"][i] == -1:
ignore = True
if valid_class == 1 and not ignore:
ignored_gt.append(0)
num_valid_gt += 1
elif (valid_class == 0 or (ignore and (valid_class == 1))):
ignored_gt.append(1)
else:
ignored_gt.append(-1)
# for i in range(num_gt):
if gt_anno["name"][i] == "DontCare":
dc_bboxes.append(gt_anno["bbox"][i])
for i in range(num_dt):
dt_name = dt_anno["name"][i].lower()
if (dt_name == current_cls_name):
valid_class = 1
else:
valid_class = -1
# height = abs(dt_anno["bbox"][i, 3] - dt_anno["bbox"][i, 1])
# if height < MIN_HEIGHT[difficulty]:
dt_size_threshold = MIN_SIZE[CLASS_NAMES.index(dt_name)][difficulty]
if ((dt_anno["dimensions"][i][0] <= dt_size_threshold)
or (dt_anno["dimensions"][i][1] <= dt_size_threshold)
or (dt_anno["dimensions"][i][2] <= dt_size_threshold)):
ignored_dt.append(1)
elif valid_class == 1:
ignored_dt.append(0)
num_valid_dt += 1
else:
ignored_dt.append(-1)
return num_valid_gt, num_valid_dt, ignored_gt, ignored_dt, dc_bboxes
@numba.jit(nopython=True)
def image_box_overlap(boxes, query_boxes, criterion=-1):
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=boxes.dtype)
for k in range(K):
qbox_area = ((query_boxes[k, 2] - query_boxes[k, 0]) *
(query_boxes[k, 3] - query_boxes[k, 1]))
for n in range(N):
iw = (min(boxes[n, 2], query_boxes[k, 2]) - max(
boxes[n, 0], query_boxes[k, 0]))
if iw > 0:
ih = (min(boxes[n, 3], query_boxes[k, 3]) - max(
boxes[n, 1], query_boxes[k, 1]))
if ih > 0:
if criterion == -1:
ua = ((boxes[n, 2] - boxes[n, 0]) *
(boxes[n, 3] - boxes[n, 1]) + qbox_area - iw * ih)
elif criterion == 0:
ua = ((boxes[n, 2] - boxes[n, 0]) *
(boxes[n, 3] - boxes[n, 1]))
elif criterion == 1:
ua = qbox_area
else:
ua = 1.0
overlaps[n, k] = iw * ih / ua
return overlaps
def bev_box_overlap(boxes, qboxes, criterion=-1):
riou = rotate_iou_gpu_eval(boxes, qboxes, criterion)
return riou
@numba.jit(nopython=True, parallel=True)
def d3_box_overlap_kernel(boxes,
qboxes,
rinc,
criterion=-1,
z_axis=1,
z_center=1.0):
"""
z_axis: the z (height) axis.
z_center: unified z (height) center of box.
"""
N, K = boxes.shape[0], qboxes.shape[0]
for i in range(N):
for j in range(K):
if rinc[i, j] > 0:
min_z = min(
boxes[i, z_axis] + boxes[i, z_axis + 3] * (1 - z_center),
qboxes[j, z_axis] + qboxes[j, z_axis + 3] * (1 - z_center))
max_z = max(
boxes[i, z_axis] - boxes[i, z_axis + 3] * z_center,
qboxes[j, z_axis] - qboxes[j, z_axis + 3] * z_center)
iw = min_z - max_z
if iw > 0:
area1 = boxes[i, 3] * boxes[i, 4] * boxes[i, 5]
area2 = qboxes[j, 3] * qboxes[j, 4] * qboxes[j, 5]
inc = iw * rinc[i, j]
if criterion == -1:
ua = (area1 + area2 - inc)
elif criterion == 0:
ua = area1
elif criterion == 1:
ua = area2
else:
ua = 1.0
rinc[i, j] = inc / ua
else:
rinc[i, j] = 0.0
def d3_box_overlap(boxes, qboxes, criterion=-1, z_axis=1, z_center=1.0):
"""kitti camera format z_axis=1.
"""
bev_axes = list(range(7))
bev_axes.pop(z_axis + 3)
bev_axes.pop(z_axis)
rinc = rotate_iou_gpu_eval(boxes[:, bev_axes], qboxes[:, bev_axes], 2)
d3_box_overlap_kernel(boxes, qboxes, rinc, criterion, z_axis, z_center)
return rinc
@numba.jit(nopython=True)
def compute_statistics_jit(overlaps,
gt_datas,
dt_datas,
ignored_gt,
ignored_det,
dc_bboxes,
metric,
min_overlap,
thresh=0,
compute_fp=False,
compute_aos=False):
det_size = dt_datas.shape[0]
gt_size = gt_datas.shape[0]
dt_scores = dt_datas[:, -1]
dt_alphas = dt_datas[:, 4]
gt_alphas = gt_datas[:, 4]
dt_bboxes = dt_datas[:, :4]
# gt_bboxes = gt_datas[:, :4]
assigned_detection = [False] * det_size
ignored_threshold = [False] * det_size
if compute_fp:
for i in range(det_size):
if (dt_scores[i] < thresh):
ignored_threshold[i] = True
NO_DETECTION = -10000000
tp, fp, fn, similarity = 0, 0, 0, 0
# thresholds = [0.0]
# delta = [0.0]
thresholds = np.zeros((gt_size, ))
thresh_idx = 0
delta = np.zeros((gt_size, ))
delta_idx = 0
for i in range(gt_size):
if ignored_gt[i] == -1:
continue
det_idx = -1
valid_detection = NO_DETECTION
max_overlap = 0
assigned_ignored_det = False
for j in range(det_size):
if (ignored_det[j] == -1):
continue
if (assigned_detection[j]):
continue
if (ignored_threshold[j]):
continue
overlap = overlaps[j, i]
dt_score = dt_scores[j]
if (not compute_fp and (overlap > min_overlap)
and dt_score > valid_detection):
det_idx = j
valid_detection = dt_score
elif (compute_fp and (overlap > min_overlap)
and (overlap > max_overlap or assigned_ignored_det)
and ignored_det[j] == 0):
max_overlap = overlap
det_idx = j
valid_detection = 1
assigned_ignored_det = False
elif (compute_fp and (overlap > min_overlap)
and (valid_detection == NO_DETECTION)
and ignored_det[j] == 1):
det_idx = j
valid_detection = 1
assigned_ignored_det = True
if (valid_detection == NO_DETECTION) and ignored_gt[i] == 0:
fn += 1
elif ((valid_detection != NO_DETECTION)
and (ignored_gt[i] == 1 or ignored_det[det_idx] == 1)):
assigned_detection[det_idx] = True
elif valid_detection != NO_DETECTION:
# only a tp add a threshold.
tp += 1
# thresholds.append(dt_scores[det_idx])
thresholds[thresh_idx] = dt_scores[det_idx]
thresh_idx += 1
if compute_aos:
# delta.append(gt_alphas[i] - dt_alphas[det_idx])
delta[delta_idx] = gt_alphas[i] - dt_alphas[det_idx]
delta_idx += 1
assigned_detection[det_idx] = True
if compute_fp:
for i in range(det_size):
if (not (assigned_detection[i] or ignored_det[i] == -1
or ignored_det[i] == 1 or ignored_threshold[i])):
fp += 1
nstuff = 0
if metric == 0:
overlaps_dt_dc = image_box_overlap(dt_bboxes, dc_bboxes, 0)
for i in range(dc_bboxes.shape[0]):
for j in range(det_size):
if (assigned_detection[j]):
continue
if (ignored_det[j] == -1 or ignored_det[j] == 1):
continue
if (ignored_threshold[j]):
continue
if overlaps_dt_dc[j, i] > min_overlap:
assigned_detection[j] = True
nstuff += 1
fp -= nstuff
if compute_aos:
tmp = np.zeros((fp + delta_idx, ))
# tmp = [0] * fp
for i in range(delta_idx):
tmp[i + fp] = (1.0 + np.cos(delta[i])) / 2.0
# tmp.append((1.0 + np.cos(delta[i])) / 2.0)
# assert len(tmp) == fp + tp
# assert len(delta) == tp
if tp > 0 or fp > 0:
similarity = np.sum(tmp)
else:
similarity = -1
return tp, fp, fn, similarity, thresholds[:thresh_idx]
def get_split_parts(num, num_part):
same_part = num // num_part
remain_num = num % num_part
if remain_num == 0:
return [same_part] * num_part
else:
return [same_part] * num_part + [remain_num]
@numba.jit(nopython=True)
def fused_compute_statistics(overlaps,
pr,
gt_nums,
dt_nums,
dc_nums,
gt_datas,
dt_datas,
dontcares,
ignored_gts,
ignored_dets,
metric,
min_overlap,
thresholds,
compute_aos=False):
gt_num = 0
dt_num = 0
dc_num = 0
for i in range(gt_nums.shape[0]):
for t, thresh in enumerate(thresholds):
overlap = overlaps[dt_num:dt_num + dt_nums[i], gt_num:gt_num +
gt_nums[i]]
gt_data = gt_datas[gt_num:gt_num + gt_nums[i]]
dt_data = dt_datas[dt_num:dt_num + dt_nums[i]]
ignored_gt = ignored_gts[gt_num:gt_num + gt_nums[i]]
ignored_det = ignored_dets[dt_num:dt_num + dt_nums[i]]
dontcare = dontcares[dc_num:dc_num + dc_nums[i]]
tp, fp, fn, similarity, _ = compute_statistics_jit(
overlap,
gt_data,
dt_data,
ignored_gt,
ignored_det,
dontcare,
metric,
min_overlap=min_overlap,
thresh=thresh,
compute_fp=True,
compute_aos=compute_aos)
pr[t, 0] += tp
pr[t, 1] += fp
pr[t, 2] += fn
if similarity != -1:
pr[t, 3] += similarity
gt_num += gt_nums[i]
dt_num += dt_nums[i]
dc_num += dc_nums[i]
def calculate_iou_partly(dt_annos,
gt_annos,
metric,
num_parts=50,
z_axis=1,
z_center=1.0):
"""fast iou algorithm. this function can be used independently to
do result analysis.
Args:
dt_annos: dict, must from get_label_annos() in kitti_common.py
gt_annos: dict, must from get_label_annos() in kitti_common.py
metric: eval type. 0: bbox, 1: bev, 2: 3d
num_parts: int. a parameter for fast calculate algorithm
z_axis: height axis. kitti camera use 1, lidar use 2.
"""
assert len(dt_annos) == len(gt_annos)
total_dt_num = np.stack([len(a["name"]) for a in dt_annos], 0)
total_gt_num = np.stack([len(a["name"]) for a in gt_annos], 0)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
parted_overlaps = []
example_idx = 0
bev_axes = list(range(3))
bev_axes.pop(z_axis)
for num_part in split_parts:
gt_annos_part = gt_annos[example_idx:example_idx + num_part]
dt_annos_part = dt_annos[example_idx:example_idx + num_part]
if metric == 0: # not support
gt_boxes = np.concatenate([a["bbox"] for a in gt_annos_part], 0)
dt_boxes = np.concatenate([a["bbox"] for a in dt_annos_part], 0)
overlap_part = image_box_overlap(dt_boxes, gt_boxes)
elif metric == 1:
loc = np.concatenate(
[a["location"][:, bev_axes] for a in gt_annos_part], 0)
dims = np.concatenate(
[a["dimensions"][:, bev_axes] for a in gt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)
gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1)
loc = np.concatenate(
[a["location"][:, bev_axes] for a in dt_annos_part], 0)
dims = np.concatenate(
[a["dimensions"][:, bev_axes] for a in dt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)
dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1)
overlap_part = bev_box_overlap(dt_boxes,
gt_boxes).astype(np.float64)
elif metric == 2:
loc = np.concatenate([a["location"] for a in gt_annos_part], 0)
dims = np.concatenate([a["dimensions"] for a in gt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)
gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1)
loc = np.concatenate([a["location"] for a in dt_annos_part], 0)
dims = np.concatenate([a["dimensions"] for a in dt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)
dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1)
overlap_part = d3_box_overlap(
dt_boxes, gt_boxes, z_axis=z_axis,
z_center=z_center).astype(np.float64)
else:
raise ValueError("unknown metric")
parted_overlaps.append(overlap_part)
example_idx += num_part
overlaps = []
example_idx = 0
for j, num_part in enumerate(split_parts):
# gt_annos_part = gt_annos[example_idx:example_idx + num_part]
# dt_annos_part = dt_annos[example_idx:example_idx + num_part]
gt_num_idx, dt_num_idx = 0, 0
for i in range(num_part):
gt_box_num = total_gt_num[example_idx + i]
dt_box_num = total_dt_num[example_idx + i]
overlaps.append(
parted_overlaps[j][dt_num_idx:dt_num_idx +
dt_box_num, gt_num_idx:gt_num_idx +
gt_box_num])
gt_num_idx += gt_box_num
dt_num_idx += dt_box_num
example_idx += num_part
return overlaps, parted_overlaps, total_dt_num, total_gt_num
def _prepare_data(gt_annos, dt_annos, current_class, difficulty):
gt_datas_list = []
dt_datas_list = []
total_dc_num = []
ignored_gts, ignored_dets, dontcares = [], [], []
total_num_valid_gt = 0
total_num_valid_dt = 0
for i in range(len(gt_annos)):
rets = clean_data(gt_annos[i], dt_annos[i], current_class, difficulty)
num_valid_gt, num_valid_dt, ignored_gt, ignored_det, dc_bboxes = rets
ignored_gts.append(np.array(ignored_gt, dtype=np.int64))
ignored_dets.append(np.array(ignored_det, dtype=np.int64))
if len(dc_bboxes) == 0:
dc_bboxes = np.zeros((0, 4)).astype(np.float64)
else:
dc_bboxes = np.stack(dc_bboxes, 0).astype(np.float64)
total_dc_num.append(dc_bboxes.shape[0])
dontcares.append(dc_bboxes)
total_num_valid_gt += num_valid_gt
total_num_valid_dt += num_valid_dt
gt_datas = np.concatenate(
[gt_annos[i]["bbox"], gt_annos[i]["alpha"][..., np.newaxis]], 1)
dt_datas = np.concatenate([
dt_annos[i]["bbox"], dt_annos[i]["alpha"][..., np.newaxis],
dt_annos[i]["score"][..., np.newaxis]
], 1)
gt_datas_list.append(gt_datas)
dt_datas_list.append(dt_datas)
total_dc_num = np.stack(total_dc_num, axis=0)
return (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares,
total_dc_num, total_num_valid_gt)
def eval_class(gt_annos,
dt_annos,
current_classes,
difficultys,
metric,
min_overlaps,
compute_aos=False,
z_axis=1,
z_center=1.0,
num_parts=50):
"""Kitti eval. support 2d/bev/3d/aos eval. support 0.5:0.05:0.95 coco AP.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
current_class: int, 0: car, 1: pedestrian, 2: cyclist
difficulty: int. eval difficulty, 0: easy, 1: normal, 2: hard
metric: eval type. 0: bbox, 1: bev, 2: 3d
min_overlap: float, min overlap. official:
[[0.7, 0.5, 0.5], [0.7, 0.5, 0.5], [0.7, 0.5, 0.5]]
format: [metric, class]. choose one from matrix above.
num_parts: int. a parameter for fast calculate algorithm
Returns:
dict of recall, precision and aos
"""
assert len(gt_annos) == len(dt_annos)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
rets = calculate_iou_partly(
dt_annos, gt_annos, metric, num_parts, z_axis=z_axis, z_center=z_center)
overlaps, parted_overlaps, total_dt_num, total_gt_num = rets
N_SAMPLE_PTS = 41
num_minoverlap = len(min_overlaps)
num_class = len(current_classes)
num_difficulty = len(difficultys)
precision = np.zeros(
[num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
recall = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
all_thresholds = np.zeros(
[num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
for m, current_class in enumerate(current_classes):
for l, difficulty in enumerate(difficultys):
rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty)
(gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares,
total_dc_num, total_num_valid_gt) = rets
for k, min_overlap in enumerate(min_overlaps[:, metric, m]):
thresholdss = []
for i in range(len(gt_annos)):
rets = compute_statistics_jit(
overlaps[i],
gt_datas_list[i],
dt_datas_list[i],
ignored_gts[i],
ignored_dets[i],
dontcares[i],
metric,
min_overlap=min_overlap,
thresh=0.0,
compute_fp=False)
tp, fp, fn, similarity, thresholds = rets
thresholdss += thresholds.tolist()
thresholdss = np.array(thresholdss)
thresholds = get_thresholds(thresholdss, total_num_valid_gt)
thresholds = np.array(thresholds)
all_thresholds[m, l, k, :len(thresholds)] = thresholds
pr = np.zeros([len(thresholds), 4])
idx = 0
for j, num_part in enumerate(split_parts):
gt_datas_part = np.concatenate(
gt_datas_list[idx:idx + num_part], 0)
dt_datas_part = np.concatenate(
dt_datas_list[idx:idx + num_part], 0)
dc_datas_part = np.concatenate(
dontcares[idx:idx + num_part], 0)
ignored_dets_part = np.concatenate(
ignored_dets[idx:idx + num_part], 0)
ignored_gts_part = np.concatenate(
ignored_gts[idx:idx + num_part], 0)
fused_compute_statistics(
parted_overlaps[j],
pr,
total_gt_num[idx:idx + num_part],
total_dt_num[idx:idx + num_part],
total_dc_num[idx:idx + num_part],
gt_datas_part,
dt_datas_part,
dc_datas_part,
ignored_gts_part,
ignored_dets_part,
metric,
min_overlap=min_overlap,
thresholds=thresholds,
compute_aos=compute_aos)
idx += num_part
for i in range(len(thresholds)):
precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1])
if compute_aos:
aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1])
for i in range(len(thresholds)):
precision[m, l, k, i] = np.max(
precision[m, l, k, i:], axis=-1)
if compute_aos:
aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1)
ret_dict = {
# "recall": recall, # [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS]
"precision": precision,
"orientation": aos,
"thresholds": all_thresholds,
"min_overlaps": min_overlaps,
}
return ret_dict
def get_mAP_v2(prec):
sums = 0
for i in range(0, prec.shape[-1], 4):
sums = sums + prec[..., i]
return sums / 11 * 100
def get_mAP_r40(prec):
sums = np.sum(prec[:, 1:], axis=1)
return sums / 40 * 100
def do_eval_v2(gt_annos,
dt_annos,
current_classes,
min_overlaps,
compute_aos=False,
difficultys=(0, 1, 2),
z_axis=1,
z_center=1.0):
# min_overlaps: [num_minoverlap, metric, num_class]
ret = eval_class(
gt_annos,
dt_annos,
current_classes,
difficultys,
0,
min_overlaps,
compute_aos,
z_axis=z_axis,
z_center=z_center)
# ret: [num_class, num_diff, num_minoverlap, num_sample_points]
mAP_bbox = get_mAP_v2(ret["precision"])
mAP_aos = None
if compute_aos:
mAP_aos = get_mAP_v2(ret["orientation"])
ret = eval_class(
gt_annos,
dt_annos,
current_classes,
difficultys,
1,
min_overlaps,
z_axis=z_axis,
z_center=z_center)
mAP_bev = get_mAP_v2(ret["precision"])
ret = eval_class(
gt_annos,
dt_annos,
current_classes,
difficultys,
2,
min_overlaps,
z_axis=z_axis,
z_center=z_center)
mAP_3d = get_mAP_v2(ret["precision"])
return mAP_bbox, mAP_bev, mAP_3d, mAP_aos
def do_eval_v3(gt_annos,
dt_annos,
current_classes,
min_overlaps,
compute_aos=False,
difficultys=(0, 1, 2),
z_axis=1,
z_center=1.0,
metric_types=("bbox", "bev", "3d")):
# min_overlaps: [num_minoverlap, metric, num_class]
types_map = {"bbox": 0, "bev": 1, "3d": 2}
metrics = {}
for t in metric_types:
ret = eval_class(
gt_annos,
dt_annos,
current_classes,
difficultys,
types_map[t],
min_overlaps,
compute_aos,
z_axis=z_axis,
z_center=z_center,
num_parts=100)
metrics[t] = ret
return metrics
def do_coco_style_eval(gt_annos,
dt_annos,
current_classes,
overlap_ranges,
compute_aos,
z_axis=1,
z_center=1.0):
# overlap_ranges: [range, metric, num_class]
min_overlaps = np.zeros([10, *overlap_ranges.shape[1:]])
for i in range(overlap_ranges.shape[1]):
for j in range(overlap_ranges.shape[2]):
min_overlaps[:, i, j] = np.linspace(*overlap_ranges[:, i, j])
mAP_bbox, mAP_bev, mAP_3d, mAP_aos = do_eval_v2(
gt_annos,
dt_annos,
current_classes,
min_overlaps,
compute_aos,
z_axis=z_axis,
z_center=z_center)
# ret: [num_class, num_diff, num_minoverlap]
mAP_bbox = mAP_bbox.mean(-1)
mAP_bev = mAP_bev.mean(-1)
mAP_3d = mAP_3d.mean(-1)
if mAP_aos is not None:
mAP_aos = mAP_aos.mean(-1)
return mAP_bbox, mAP_bev, mAP_3d, mAP_aos
def print_str(value, *arg, sstream=None):
if sstream is None:
sstream = sysio.StringIO()
sstream.truncate(0)
sstream.seek(0)
print(value, *arg, file=sstream)
return sstream.getvalue()
def get_official_eval_result(gt_annos,
dt_annos,
current_classes,
difficultys=[0, 1, 2],
z_axis=1,
z_center=1.0,
metric_types=("bbox", "bev", "3d"),
recall_type='R40'):
"""
gt_annos and dt_annos must contains following keys:
[bbox, location, dimensions, rotation_y, score]
"""
overlap_mod = np.array([[0.7, 0.7, 0.5, 0.5, 0.4],
[0.7, 0.7, 0.5, 0.5, 0.4],
[0.7, 0.7, 0.5, 0.5, 0.4]])
overlap_easy = np.array([[0.7, 0.7, 0.5, 0.5, 0.4],
[0.5, 0.5, 0.25, 0.25, 0.2],
[0.5, 0.5, 0.25, 0.25, 0.2]])
min_overlaps = np.stack([overlap_mod, overlap_easy], axis=0) # [2, 3, 4]
class_to_name = {
0: 'smallMot',
1: 'bigMot',
2: 'nonMot',
3: 'pedestrian',
4: 'TrafficCone'
}
name_to_class = {v: n for n, v in class_to_name.items()}
if not isinstance(current_classes, (list, tuple)):
current_classes = [current_classes]
current_classes_int = []
for curcls in current_classes:
if isinstance(curcls, str):
current_classes_int.append(name_to_class[curcls])
else:
current_classes_int.append(curcls)
current_classes = current_classes_int
min_overlaps = min_overlaps[:, :, current_classes]
result = ''
# check whether alpha is valid
compute_aos = False
for anno in dt_annos:
if anno['alpha'].shape[0] != 0:
if anno['alpha'][0] != -10:
compute_aos = True
break
metrics = do_eval_v3(
gt_annos,
dt_annos,
current_classes,
min_overlaps,
compute_aos,
difficultys,
z_axis=z_axis,
z_center=z_center,
metric_types=metric_types)
res = OrderedDict()
if compute_aos and "bbox" in metric_types:
metric_types = list(metric_types) + ["aos"]
for j, curcls in enumerate(current_classes):
# mAP threshold array: [num_minoverlap, metric, class]
# mAP result: [num_class, num_diff, num_minoverlap]
curcls = class_to_name[curcls]
res[curcls] = OrderedDict()
for i in range(min_overlaps.shape[0]):
overlap = tuple(min_overlaps[i, :, j].tolist())
res[curcls][overlap] = OrderedDict()
for metric_type in metric_types:
if metric_type == "aos":
if recall_type == 'R40':
res[curcls][overlap][metric_type] = get_mAP_r40(
metrics["bbox"]["orientation"][j, :, i])
elif recall_type == 'R11':
res[curcls][overlap][metric_type] = get_mAP_v2(
metrics["bbox"]["orientation"][j, :, i])
else:
if recall_type == 'R40':
res[curcls][overlap][metric_type] = get_mAP_r40(
metrics[metric_type]["precision"][j, :, i])
elif recall_type == 'R11':
res[curcls][overlap][metric_type] = get_mAP_v2(
metrics[metric_type]["precision"][j, :, i])
return res
def get_coco_eval_result(gt_annos,
dt_annos,
current_classes,
z_axis=1,
z_center=1.0):
class_to_name = {
0: 'Car',
1: 'Pedestrian',
2: 'Cyclist',
3: 'Van',
4: 'Person_sitting',
5: 'car',
6: 'tractor',
7: 'trailer',
}
class_to_range = {
0: [0.5, 1.0, 0.05],
1: [0.25, 0.75, 0.05],
2: [0.25, 0.75, 0.05],
3: [0.5, 1.0, 0.05],
4: [0.25, 0.75, 0.05],
5: [0.5, 1.0, 0.05],
6: [0.5, 1.0, 0.05],
7: [0.5, 1.0, 0.05],
}
class_to_range = {
0: [0.5, 0.95, 10],
1: [0.25, 0.7, 10],
2: [0.25, 0.7, 10],
3: [0.5, 0.95, 10],
4: [0.25, 0.7, 10],
5: [0.5, 0.95, 10],
6: [0.5, 0.95, 10],
7: [0.5, 0.95, 10],
}
name_to_class = {v: n for n, v in class_to_name.items()}
if not isinstance(current_classes, (list, tuple)):
current_classes = [current_classes]
current_classes_int = []
for curcls in current_classes:
if isinstance(curcls, str):
current_classes_int.append(name_to_class[curcls])
else:
current_classes_int.append(curcls)
current_classes = current_classes_int
overlap_ranges = np.zeros([3, 3, len(current_classes)])
for i, curcls in enumerate(current_classes):
overlap_ranges[:, :, i] = np.array(
class_to_range[curcls])[:, np.newaxis]
result = ''
# check whether alpha is valid
compute_aos = False
for anno in dt_annos:
if anno['alpha'].shape[0] != 0:
if anno['alpha'][0] != -10:
compute_aos = True
break
mAPbbox, mAPbev, mAP3d, mAPaos = do_coco_style_eval(
gt_annos,
dt_annos,
current_classes,
overlap_ranges,
compute_aos,
z_axis=z_axis,
z_center=z_center)
for j, curcls in enumerate(current_classes):
# mAP threshold array: [num_minoverlap, metric, class]
# mAP result: [num_class, num_diff, num_minoverlap]
o_range = np.array(class_to_range[curcls])[[0, 2, 1]]
o_range[1] = (o_range[2] - o_range[0]) / (o_range[1] - 1)
result += print_str((f"{class_to_name[curcls]} "
"coco AP@{:.2f}:{:.2f}:{:.2f}:".format(*o_range)))
result += print_str((f"bbox AP:{mAPbbox[j, 0]:.2f}, "
f"{mAPbbox[j, 1]:.2f}, "
f"{mAPbbox[j, 2]:.2f}"))
result += print_str((f"bev AP:{mAPbev[j, 0]:.2f}, "
f"{mAPbev[j, 1]:.2f}, "
f"{mAPbev[j, 2]:.2f}"))
result += print_str((f"3d AP:{mAP3d[j, 0]:.2f}, "
f"{mAP3d[j, 1]:.2f}, "
f"{mAP3d[j, 2]:.2f}"))
if compute_aos:
result += print_str((f"aos AP:{mAPaos[j, 0]:.2f}, "
f"{mAPaos[j, 1]:.2f}, "
f"{mAPaos[j, 2]:.2f}"))
return result
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty/semantic_kitti_api/evaluate_panoptic.py
|
#!/usr/bin/env python3
# This file is covered by the LICENSE file in the root of this project.
import argparse
import os
import yaml
import sys
import numpy as np
import time
import json
from auxiliary.eval_np import PanopticEval
# possible splits
splits = ["train", "valid", "test"]
if __name__ == '__main__':
parser = argparse.ArgumentParser("./evaluate_panoptic.py")
parser.add_argument(
'--dataset',
'-d',
type=str,
required=True,
help='Dataset dir. No Default',
)
parser.add_argument(
'--predictions',
'-p',
type=str,
required=None,
help='Prediction dir. Same organization as dataset, but predictions in'
'each sequences "prediction" directory. No Default. If no option is set'
' we look for the labels in the same directory as dataset')
parser.add_argument(
'--split',
'-s',
type=str,
required=False,
choices=["train", "valid", "test"],
default="valid",
help='Split to evaluate on. One of ' + str(splits) + '. Defaults to %(default)s',
)
parser.add_argument(
'--data_cfg',
'-dc',
type=str,
required=False,
default="config/semantic-kitti.yaml",
help='Dataset config file. Defaults to %(default)s',
)
parser.add_argument(
'--limit',
'-l',
type=int,
required=False,
default=None,
help='Limit to the first "--limit" points of each scan. Useful for'
' evaluating single scan from aggregated pointcloud.'
' Defaults to %(default)s',
)
parser.add_argument(
'--min_inst_points',
type=int,
required=False,
default=50,
help='Lower bound for the number of points to be considered instance',
)
parser.add_argument(
'--output',
type=str,
required=False,
default=None,
help='Output directory for scores.txt and detailed_results.html.',
)
start_time = time.time()
FLAGS, unparsed = parser.parse_known_args()
# fill in real predictions dir
if FLAGS.predictions is None:
FLAGS.predictions = FLAGS.dataset
# print summary of what we will do
print("*" * 80)
print("INTERFACE:")
print("Data: ", FLAGS.dataset)
print("Predictions: ", FLAGS.predictions)
print("Split: ", FLAGS.split)
print("Config: ", FLAGS.data_cfg)
print("Limit: ", FLAGS.limit)
print("Min instance points: ", FLAGS.min_inst_points)
print("Output directory", FLAGS.output)
print("*" * 80)
# assert split
assert (FLAGS.split in splits)
# open data config file
DATA = yaml.safe_load(open(FLAGS.data_cfg, 'r'))
# get number of interest classes, and the label mappings
# class
class_remap = DATA["learning_map"]
class_inv_remap = DATA["learning_map_inv"]
class_ignore = DATA["learning_ignore"]
nr_classes = len(class_inv_remap)
class_strings = DATA["labels"]
# make lookup table for mapping
# class
maxkey = max(class_remap.keys())
# +100 hack making lut bigger just in case there are unknown labels
class_lut = np.zeros((maxkey + 100), dtype=np.int32)
class_lut[list(class_remap.keys())] = list(class_remap.values())
# class
ignore_class = [cl for cl, ignored in class_ignore.items() if ignored]
print("Ignoring classes: ", ignore_class)
# create evaluator
class_evaluator = PanopticEval(nr_classes, None, ignore_class, min_points=FLAGS.min_inst_points)
# get test set
test_sequences = DATA["split"][FLAGS.split]
# get label paths
label_names = []
for sequence in test_sequences:
sequence = '{0:02d}'.format(int(sequence))
label_paths = os.path.join(FLAGS.dataset, "sequences", sequence, "labels")
# populate the label names
seq_label_names = sorted([os.path.join(label_paths, fn) for fn in os.listdir(label_paths) if fn.endswith(".label")])
label_names.extend(seq_label_names)
# print(label_names)
# get predictions paths
pred_names = []
for sequence in test_sequences:
sequence = '{0:02d}'.format(int(sequence))
pred_paths = os.path.join(FLAGS.predictions, "sequences", sequence, "predictions")
# populate the label names
seq_pred_names = sorted([os.path.join(pred_paths, fn) for fn in os.listdir(pred_paths) if fn.endswith(".label")])
pred_names.extend(seq_pred_names)
# print(pred_names)
# check that I have the same number of files
assert (len(label_names) == len(pred_names))
print("Evaluating sequences: ", end="", flush=True)
# open each file, get the tensor, and make the iou comparison
complete = len(label_names)
count = 0
percent = 10
for label_file, pred_file in zip(label_names, pred_names):
count = count + 1
if 100 * count / complete > percent:
print("{}% ".format(percent), end="", flush=True)
percent = percent + 10
# print("evaluating label ", label_file, "with", pred_file)
# open label
label = np.fromfile(label_file, dtype=np.uint32)
u_label_sem_class = class_lut[label & 0xFFFF] # remap to xentropy format
u_label_inst = label # unique instance ids.
if FLAGS.limit is not None:
u_label_sem_class = u_label_sem_class[:FLAGS.limit]
u_label_sem_cat = u_label_sem_cat[:FLAGS.limit]
u_label_inst = u_label_inst[:FLAGS.limit]
label = np.fromfile(pred_file, dtype=np.uint32)
u_pred_sem_class = class_lut[label & 0xFFFF] # remap to xentropy format
u_pred_inst = label # unique instance ids.
if FLAGS.limit is not None:
u_pred_sem_class = u_pred_sem_class[:FLAGS.limit]
u_pred_sem_cat = u_pred_sem_cat[:FLAGS.limit]
u_pred_inst = u_pred_inst[:FLAGS.limit]
class_evaluator.addBatch(u_pred_sem_class, u_pred_inst, u_label_sem_class, u_label_inst)
print("100%")
complete_time = time.time() - start_time
# when I am done, print the evaluation
class_PQ, class_SQ, class_RQ, class_all_PQ, class_all_SQ, class_all_RQ = class_evaluator.getPQ()
class_IoU, class_all_IoU = class_evaluator.getSemIoU()
# now make a nice dictionary
output_dict = {}
# make python variables
class_PQ = class_PQ.item()
class_SQ = class_SQ.item()
class_RQ = class_RQ.item()
class_all_PQ = class_all_PQ.flatten().tolist()
class_all_SQ = class_all_SQ.flatten().tolist()
class_all_RQ = class_all_RQ.flatten().tolist()
class_IoU = class_IoU.item()
class_all_IoU = class_all_IoU.flatten().tolist()
# fill in with the raw values
# output_dict["raw"] = {}
# output_dict["raw"]["class_PQ"] = class_PQ
# output_dict["raw"]["class_SQ"] = class_SQ
# output_dict["raw"]["class_RQ"] = class_RQ
# output_dict["raw"]["class_all_PQ"] = class_all_PQ
# output_dict["raw"]["class_all_SQ"] = class_all_SQ
# output_dict["raw"]["class_all_RQ"] = class_all_RQ
# output_dict["raw"]["class_IoU"] = class_IoU
# output_dict["raw"]["class_all_IoU"] = class_all_IoU
things = ['car', 'truck', 'bicycle', 'motorcycle', 'other-vehicle', 'person', 'bicyclist', 'motorcyclist']
stuff = [
'road', 'sidewalk', 'parking', 'other-ground', 'building', 'vegetation', 'trunk', 'terrain', 'fence', 'pole',
'traffic-sign'
]
all_classes = things + stuff
# class
output_dict["all"] = {}
output_dict["all"]["PQ"] = class_PQ
output_dict["all"]["SQ"] = class_SQ
output_dict["all"]["RQ"] = class_RQ
output_dict["all"]["IoU"] = class_IoU
classwise_tables = {}
for idx, (pq, rq, sq, iou) in enumerate(zip(class_all_PQ, class_all_RQ, class_all_SQ, class_all_IoU)):
class_str = class_strings[class_inv_remap[idx]]
output_dict[class_str] = {}
output_dict[class_str]["PQ"] = pq
output_dict[class_str]["SQ"] = sq
output_dict[class_str]["RQ"] = rq
output_dict[class_str]["IoU"] = iou
PQ_all = np.mean([float(output_dict[c]["PQ"]) for c in all_classes])
PQ_dagger = np.mean([float(output_dict[c]["PQ"]) for c in things] + [float(output_dict[c]["IoU"]) for c in stuff])
RQ_all = np.mean([float(output_dict[c]["RQ"]) for c in all_classes])
SQ_all = np.mean([float(output_dict[c]["SQ"]) for c in all_classes])
PQ_things = np.mean([float(output_dict[c]["PQ"]) for c in things])
RQ_things = np.mean([float(output_dict[c]["RQ"]) for c in things])
SQ_things = np.mean([float(output_dict[c]["SQ"]) for c in things])
PQ_stuff = np.mean([float(output_dict[c]["PQ"]) for c in stuff])
RQ_stuff = np.mean([float(output_dict[c]["RQ"]) for c in stuff])
SQ_stuff = np.mean([float(output_dict[c]["SQ"]) for c in stuff])
mIoU = output_dict["all"]["IoU"]
codalab_output = {}
codalab_output["pq_mean"] = float(PQ_all)
codalab_output["pq_dagger"] = float(PQ_dagger)
codalab_output["sq_mean"] = float(SQ_all)
codalab_output["rq_mean"] = float(RQ_all)
codalab_output["iou_mean"] = float(mIoU)
codalab_output["pq_stuff"] = float(PQ_stuff)
codalab_output["rq_stuff"] = float(RQ_stuff)
codalab_output["sq_stuff"] = float(SQ_stuff)
codalab_output["pq_things"] = float(PQ_things)
codalab_output["rq_things"] = float(RQ_things)
codalab_output["sq_things"] = float(SQ_things)
print("Completed in {} s".format(complete_time))
if FLAGS.output is not None:
table = []
for cl in all_classes:
entry = output_dict[cl]
table.append({
"class": cl,
"pq": "{:.3}".format(entry["PQ"]),
"sq": "{:.3}".format(entry["SQ"]),
"rq": "{:.3}".format(entry["RQ"]),
"iou": "{:.3}".format(entry["IoU"])
})
print("Generating output files.")
# save to yaml
output_filename = os.path.join(FLAGS.output, 'scores.txt')
with open(output_filename, 'w') as outfile:
yaml.dump(codalab_output, outfile, default_flow_style=False)
## producing a detailed result page.
output_filename = os.path.join(FLAGS.output, "detailed_results.html")
with open(output_filename, "w") as html_file:
html_file.write("""
<!doctype html>
<html lang="en" style="scroll-behavior: smooth;">
<head>
<script src='https://cdnjs.cloudflare.com/ajax/libs/tabulator/4.4.3/js/tabulator.min.js'></script>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/tabulator/4.4.3/css/bulma/tabulator_bulma.min.css">
</head>
<body>
<div id="classwise_results"></div>
<script>
let table_data = """ + json.dumps(table) + """
table = new Tabulator("#classwise_results", {
layout: "fitData",
data: table_data,
columns: [{title: "Class", field:"class", width:200},
{title: "PQ", field:"pq", width:100, align: "center"},
{title: "SQ", field:"sq", width:100, align: "center"},
{title: "RQ", field:"rq", width:100, align: "center"},
{title: "IoU", field:"iou", width:100, align: "center"}]
});
</script>
</body>
</html>""")
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty/semantic_kitti_api/evaluate_semantics.py
|
#!/usr/bin/env python3
# This file is covered by the LICENSE file in the root of this project.
import argparse
import os
import yaml
import sys
import numpy as np
# possible splits
splits = ["train", "valid", "test"]
# possible backends
backends = ["numpy", "torch"]
if __name__ == '__main__':
parser = argparse.ArgumentParser("./evaluate_semantics.py")
parser.add_argument(
'--dataset', '-d',
type=str,
required=True,
help='Dataset dir. No Default',
)
parser.add_argument(
'--predictions', '-p',
type=str,
required=None,
help='Prediction dir. Same organization as dataset, but predictions in'
'each sequences "prediction" directory. No Default. If no option is set'
' we look for the labels in the same directory as dataset'
)
parser.add_argument(
'--split', '-s',
type=str,
required=False,
choices=["train", "valid", "test"],
default="valid",
help='Split to evaluate on. One of ' +
str(splits) + '. Defaults to %(default)s',
)
parser.add_argument(
'--backend', '-b',
type=str,
required=False,
choices= ["numpy", "torch"],
default="numpy",
help='Backend for evaluation. One of ' +
str(backends) + ' Defaults to %(default)s',
)
parser.add_argument(
'--datacfg', '-dc',
type=str,
required=False,
default="config/semantic-kitti.yaml",
help='Dataset config file. Defaults to %(default)s',
)
parser.add_argument(
'--limit', '-l',
type=int,
required=False,
default=None,
help='Limit to the first "--limit" points of each scan. Useful for'
' evaluating single scan from aggregated pointcloud.'
' Defaults to %(default)s',
)
parser.add_argument(
'--codalab',
dest='codalab',
type=str,
default=None,
help='Exports "scores.txt" to given output directory for codalab'
'Defaults to %(default)s',
)
FLAGS, unparsed = parser.parse_known_args()
# fill in real predictions dir
if FLAGS.predictions is None:
FLAGS.predictions = FLAGS.dataset
# print summary of what we will do
print("*" * 80)
print("INTERFACE:")
print("Data: ", FLAGS.dataset)
print("Predictions: ", FLAGS.predictions)
print("Backend: ", FLAGS.backend)
print("Split: ", FLAGS.split)
print("Config: ", FLAGS.datacfg)
print("Limit: ", FLAGS.limit)
print("Codalab: ", FLAGS.codalab)
print("*" * 80)
# assert split
assert(FLAGS.split in splits)
# assert backend
assert(FLAGS.backend in backends)
print("Opening data config file %s" % FLAGS.datacfg)
DATA = yaml.safe_load(open(FLAGS.datacfg, 'r'))
# get number of interest classes, and the label mappings
class_strings = DATA["labels"]
class_remap = DATA["learning_map"]
class_inv_remap = DATA["learning_map_inv"]
class_ignore = DATA["learning_ignore"]
nr_classes = len(class_inv_remap)
# make lookup table for mapping
maxkey = max(class_remap.keys())
# +100 hack making lut bigger just in case there are unknown labels
remap_lut = np.zeros((maxkey + 100), dtype=np.int32)
remap_lut[list(class_remap.keys())] = list(class_remap.values())
# print(remap_lut)
# create evaluator
ignore = []
for cl, ign in class_ignore.items():
if ign:
x_cl = int(cl)
ignore.append(x_cl)
print("Ignoring xentropy class ", x_cl, " in IoU evaluation")
# create evaluator
if FLAGS.backend == "torch":
from auxiliary.torch_ioueval import iouEval
evaluator = iouEval(nr_classes, ignore)
elif FLAGS.backend == "numpy":
from auxiliary.np_ioueval import iouEval
evaluator = iouEval(nr_classes, ignore)
else:
print("Backend for evaluator should be one of ", str(backends))
quit()
evaluator.reset()
# get test set
test_sequences = DATA["split"][FLAGS.split]
# get label paths
label_names = []
for sequence in test_sequences:
sequence = '{0:02d}'.format(int(sequence))
label_paths = os.path.join(FLAGS.dataset, "sequences",
str(sequence), "labels")
# populate the label names
seq_label_names = [os.path.join(dp, f) for dp, dn, fn in os.walk(
os.path.expanduser(label_paths)) for f in fn if ".label" in f]
seq_label_names.sort()
label_names.extend(seq_label_names)
# print(label_names)
# get predictions paths
pred_names = []
for sequence in test_sequences:
sequence = '{0:02d}'.format(int(sequence))
pred_paths = os.path.join(FLAGS.predictions, "sequences",
sequence, "predictions")
# populate the label names
seq_pred_names = [os.path.join(dp, f) for dp, dn, fn in os.walk(
os.path.expanduser(pred_paths)) for f in fn if ".label" in f]
seq_pred_names.sort()
pred_names.extend(seq_pred_names)
# print(pred_names)
# check that I have the same number of files
# print("labels: ", len(label_names))
# print("predictions: ", len(pred_names))
assert(len(label_names) == len(pred_names))
progress = 10
count = 0
print("Evaluating sequences: ", end="", flush=True)
# open each file, get the tensor, and make the iou comparison
for label_file, pred_file in zip(label_names, pred_names):
count += 1
if 100 * count / len(label_names) > progress:
print("{:d}% ".format(progress), end="", flush=True)
progress += 10
# print("evaluating label ", label_file)
# open label
label = np.fromfile(label_file, dtype=np.int32)
label = label.reshape((-1)) # reshape to vector
label = label & 0xFFFF # get lower half for semantics
if FLAGS.limit is not None:
label = label[:FLAGS.limit] # limit to desired length
label = remap_lut[label] # remap to xentropy format
# open prediction
pred = np.fromfile(pred_file, dtype=np.int32)
pred = pred.reshape((-1)) # reshape to vector
pred = pred & 0xFFFF # get lower half for semantics
if FLAGS.limit is not None:
pred = pred[:FLAGS.limit] # limit to desired length
pred = remap_lut[pred] # remap to xentropy format
# add single scan to evaluation
evaluator.addBatch(pred, label)
# when I am done, print the evaluation
m_accuracy = evaluator.getacc()
m_jaccard, class_jaccard = evaluator.getIoU()
print('Validation set:\n'
'Acc avg {m_accuracy:.3f}\n'
'IoU avg {m_jaccard:.3f}'.format(m_accuracy=m_accuracy,
m_jaccard=m_jaccard))
# print also classwise
for i, jacc in enumerate(class_jaccard):
if i not in ignore:
print('IoU class {i:} [{class_str:}] = {jacc:.3f}'.format(
i=i, class_str=class_strings[class_inv_remap[i]], jacc=jacc))
# print for spreadsheet
print("*" * 80)
print("below can be copied straight for paper table")
for i, jacc in enumerate(class_jaccard):
if i not in ignore:
sys.stdout.write('{jacc:.3f}'.format(jacc=jacc.item()))
sys.stdout.write(",")
sys.stdout.write('{jacc:.3f}'.format(jacc=m_jaccard.item()))
sys.stdout.write(",")
sys.stdout.write('{acc:.3f}'.format(acc=m_accuracy.item()))
sys.stdout.write('\n')
sys.stdout.flush()
# if codalab is necessary, then do it
if FLAGS.codalab is not None:
results = {}
results["accuracy_mean"] = float(m_accuracy)
results["iou_mean"] = float(m_jaccard)
for i, jacc in enumerate(class_jaccard):
if i not in ignore:
results["iou_"+class_strings[class_inv_remap[i]]] = float(jacc)
# save to file
output_filename = os.path.join(FLAGS.codalab, 'scores.txt')
with open(output_filename, 'w') as yaml_file:
yaml.dump(results, yaml_file, default_flow_style=False)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty/semantic_kitti_api/generate_sequential.py
|
#!/usr/bin/env python3
# This file is covered by the LICENSE file in the root of this project.
import argparse
import os
import yaml
import numpy as np
from collections import deque
import shutil
from numpy.linalg import inv
import struct
import time
def parse_calibration(filename):
""" read calibration file with given filename
Returns
-------
dict
Calibration matrices as 4x4 numpy arrays.
"""
calib = {}
calib_file = open(filename)
for line in calib_file:
key, content = line.strip().split(":")
values = [float(v) for v in content.strip().split()]
pose = np.zeros((4, 4))
pose[0, 0:4] = values[0:4]
pose[1, 0:4] = values[4:8]
pose[2, 0:4] = values[8:12]
pose[3, 3] = 1.0
calib[key] = pose
calib_file.close()
return calib
def parse_poses(filename, calibration):
""" read poses file with per-scan poses from given filename
Returns
-------
list
list of poses as 4x4 numpy arrays.
"""
file = open(filename)
poses = []
Tr = calibration["Tr"]
Tr_inv = inv(Tr)
for line in file:
values = [float(v) for v in line.strip().split()]
pose = np.zeros((4, 4))
pose[0, 0:4] = values[0:4]
pose[1, 0:4] = values[4:8]
pose[2, 0:4] = values[8:12]
pose[3, 3] = 1.0
poses.append(np.matmul(Tr_inv, np.matmul(pose, Tr)))
return poses
if __name__ == '__main__':
start_time = time.time()
parser = argparse.ArgumentParser("./generate_sequential.py")
parser.add_argument(
'--dataset',
'-d',
type=str,
required=True,
help='dataset folder containing all sequences in a folder called "sequences".',
)
parser.add_argument(
'--output',
'-o',
type=str,
required=True,
help='output folder for generated sequence scans.',
)
parser.add_argument(
'--sequence_length',
'-s',
type=int,
required=True,
help='length of sequence, i.e., how many scans are concatenated.',
)
FLAGS, unparsed = parser.parse_known_args()
# print summary of what we will do
print("*" * 80)
print(" dataset folder: ", FLAGS.dataset)
print(" output folder: ", FLAGS.output)
print("sequence length: ", FLAGS.sequence_length)
print("*" * 80)
sequences_dir = os.path.join(FLAGS.dataset, "sequences")
sequence_folders = [
f for f in sorted(os.listdir(sequences_dir))
if os.path.isdir(os.path.join(sequences_dir, f))
]
for folder in sequence_folders:
input_folder = os.path.join(sequences_dir, folder)
output_folder = os.path.join(FLAGS.output, "sequences", folder)
velodyne_folder = os.path.join(output_folder, "velodyne")
labels_folder = os.path.join(output_folder, "labels")
if os.path.exists(output_folder) or os.path.exists(
velodyne_folder) or os.path.exists(labels_folder):
print("Output folder '{}' already exists!".format(output_folder))
answer = input("Overwrite? [y/N] ")
if answer != "y":
print("Aborted.")
exit(1)
if not os.path.exists(velodyne_folder):
os.makedirs(velodyne_folder)
if not os.path.exists(labels_folder):
os.makedirs(labels_folder)
else:
os.makedirs(velodyne_folder)
os.makedirs(labels_folder)
shutil.copy(os.path.join(input_folder, "poses.txt"), output_folder)
shutil.copy(os.path.join(input_folder, "calib.txt"), output_folder)
scan_files = [
f for f in sorted(os.listdir(os.path.join(input_folder, "velodyne")))
if f.endswith(".bin")
]
history = deque()
calibration = parse_calibration(os.path.join(input_folder, "calib.txt"))
poses = parse_poses(os.path.join(input_folder, "poses.txt"), calibration)
progress = 10
print("Processing {} ".format(folder), end="", flush=True)
for i, f in enumerate(scan_files):
# read scan and labels, get pose
scan_filename = os.path.join(input_folder, "velodyne", f)
scan = np.fromfile(scan_filename, dtype=np.float32)
scan = scan.reshape((-1, 4))
label_filename = os.path.join(input_folder, "labels", os.path.splitext(f)[0] + ".label")
labels = np.fromfile(label_filename, dtype=np.uint32)
labels = labels.reshape((-1))
# convert points to homogenous coordinates (x, y, z, 1)
points = np.ones((scan.shape))
points[:, 0:3] = scan[:, 0:3]
remissions = scan[:, 3]
pose = poses[i]
# prepare single numpy array for all points that can be written at once.
num_concat_points = points.shape[0]
num_concat_points += sum([past["points"].shape[0] for past in history])
concated_points = np.zeros((num_concat_points * 4), dtype = np.float32)
concated_labels = np.zeros((num_concat_points), dtype = np.uint32)
start = 0
concated_points[4 * start:4 * (start + points.shape[0])] = scan.reshape((-1))
concated_labels[start:start + points.shape[0]] = labels
start += points.shape[0]
for past in history:
diff = np.matmul(inv(pose), past["pose"])
tpoints = np.matmul(diff, past["points"].T).T
tpoints[:, 3] = past["remissions"]
tpoints = tpoints.reshape((-1))
concated_points[4 * start:4 * (start + past["points"].shape[0])] = tpoints
concated_labels[start:start + past["labels"].shape[0]] = past["labels"]
start += past["points"].shape[0]
# write scan and labels in one pass.
concated_points.tofile(os.path.join(velodyne_folder, f))
concated_labels.tofile(os.path.join(labels_folder, os.path.splitext(f)[0] + ".label"))
# append current data to history queue.
history.appendleft({
"points": points,
"labels": labels,
"remissions": remissions,
"pose": pose.copy()
})
if len(history) >= FLAGS.sequence_length:
history.pop()
if 100.0 * i / len(scan_files) >= progress:
print(".", end="", flush=True)
progress = progress + 10
print("finished.")
print("execution time: {}".format(time.time() - start_time))
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty/semantic_kitti_api/evaluate_mos.py
|
#!/usr/bin/env python3
# This file is covered by the LICENSE file in the root of this project.
# developed by Xieyuanli Chen
import argparse
import os
import yaml
import sys
import numpy as np
# possible splits
splits = ["train", "valid", "test"]
# possible backends
backends = ["numpy", "torch"]
if __name__ == '__main__':
parser = argparse.ArgumentParser("./evaluate_mos.py")
parser.add_argument(
'--dataset', '-d',
type=str,
required=True,
help='Dataset dir. No Default',
)
parser.add_argument(
'--predictions', '-p',
type=str,
required=None,
help='Prediction dir. Same organization as dataset, but predictions in'
'each sequences "prediction" directory. No Default. If no option is set'
' we look for the labels in the same directory as dataset'
)
parser.add_argument(
'--split', '-s',
type=str,
required=False,
choices=["train", "valid", "test"],
default="valid",
help='Split to evaluate on. One of ' +
str(splits) + '. Defaults to %(default)s',
)
parser.add_argument(
'--backend', '-b',
type=str,
required=False,
choices= ["numpy", "torch"],
default="numpy",
help='Backend for evaluation. One of ' +
str(backends) + ' Defaults to %(default)s',
)
parser.add_argument(
'--datacfg', '-dc',
type=str,
required=False,
default="config/semantic-kitti-mos.yaml",
help='Dataset config file. Defaults to %(default)s',
)
parser.add_argument(
'--limit', '-l',
type=int,
required=False,
default=None,
help='Limit to the first "--limit" points of each scan. Useful for'
' evaluating single scan from aggregated pointcloud.'
' Defaults to %(default)s',
)
parser.add_argument(
'--codalab',
dest='codalab',
type=str,
default=None,
help='Exports "scores.txt" to given output directory for codalab'
'Defaults to %(default)s',
)
FLAGS, unparsed = parser.parse_known_args()
# fill in real predictions dir
if FLAGS.predictions is None:
FLAGS.predictions = FLAGS.dataset
# print summary of what we will do
print("*" * 80)
print("INTERFACE:")
print("Data: ", FLAGS.dataset)
print("Predictions: ", FLAGS.predictions)
print("Backend: ", FLAGS.backend)
print("Split: ", FLAGS.split)
print("Config: ", FLAGS.datacfg)
print("Limit: ", FLAGS.limit)
print("Codalab: ", FLAGS.codalab)
print("*" * 80)
# assert split
assert(FLAGS.split in splits)
# assert backend
assert(FLAGS.backend in backends)
print("Opening data config file %s" % FLAGS.datacfg)
DATA = yaml.safe_load(open(FLAGS.datacfg, 'r'))
# get number of interest classes, and the label mappings
class_strings = DATA["labels"]
class_remap = DATA["learning_map"]
class_inv_remap = DATA["learning_map_inv"]
class_ignore = DATA["learning_ignore"]
nr_classes = len(class_inv_remap)
# make lookup table for mapping
maxkey = max(class_remap.keys())
# +100 hack making lut bigger just in case there are unknown labels
remap_lut = np.zeros((maxkey + 100), dtype=np.int32)
remap_lut[list(class_remap.keys())] = list(class_remap.values())
# print(remap_lut)
# create evaluator
ignore = []
for cl, ign in class_ignore.items():
if ign:
x_cl = int(cl)
ignore.append(x_cl)
print("Ignoring xentropy class ", x_cl, " in IoU evaluation")
# create evaluator
if FLAGS.backend == "torch":
from auxiliary.torch_ioueval import iouEval
evaluator = iouEval(nr_classes, ignore)
if FLAGS.backend == "numpy":
from auxiliary.np_ioueval import iouEval
evaluator = iouEval(nr_classes, ignore)
else:
print("Backend for evaluator should be one of ", str(backends))
quit()
evaluator.reset()
# get test set
test_sequences = DATA["split"][FLAGS.split]
# get label paths
label_names = []
for sequence in test_sequences:
sequence = '{0:02d}'.format(int(sequence))
label_paths = os.path.join(FLAGS.dataset, "sequences",
str(sequence), "labels")
# populate the label names
seq_label_names = [os.path.join(dp, f) for dp, dn, fn in os.walk(
os.path.expanduser(label_paths)) for f in fn if ".label" in f]
seq_label_names.sort()
label_names.extend(seq_label_names)
# print(label_names)
# get predictions paths
pred_names = []
for sequence in test_sequences:
sequence = '{0:02d}'.format(int(sequence))
pred_paths = os.path.join(FLAGS.predictions, "sequences",
sequence, "predictions")
# populate the label names
seq_pred_names = [os.path.join(dp, f) for dp, dn, fn in os.walk(
os.path.expanduser(pred_paths)) for f in fn if ".label" in f]
seq_pred_names.sort()
pred_names.extend(seq_pred_names)
# print(pred_names)
# check that I have the same number of files
print("labels: ", len(label_names))
print("predictions: ", len(pred_names))
# assert(len(label_names) == len(pred_names))
progress = 10
count = 0
print("Evaluating sequences: ", end="", flush=True)
# open each file, get the tensor, and make the iou comparison
for label_file, pred_file in zip(label_names[:], pred_names[:]):
count += 1
if 100 * count / len(label_names) > progress:
print("{:d}% ".format(progress), end="", flush=True)
progress += 10
# print("evaluating label ", label_file)
# open label
label = np.fromfile(label_file, dtype=np.int32)
label = label.reshape((-1)) # reshape to vector
label = label & 0xFFFF # get lower half for semantics
if FLAGS.limit is not None:
label = label[:FLAGS.limit] # limit to desired length
label = remap_lut[label] # remap to xentropy format
# open prediction
pred = np.fromfile(pred_file, dtype=np.int32)
pred = pred.reshape((-1)) # reshape to vector
pred = pred & 0xFFFF # get lower half for semantics
if FLAGS.limit is not None:
pred = pred[:FLAGS.limit] # limit to desired length
pred = remap_lut[pred] # remap to xentropy format
# add single scan to evaluation
evaluator.addBatch(pred, label)
# when I am done, print the evaluation
m_accuracy = evaluator.getacc()
m_jaccard, class_jaccard = evaluator.getIoU()
# print for spreadsheet
print("*" * 80)
print("below can be copied straight for paper table")
for i, jacc in enumerate(class_jaccard):
if i not in ignore:
if int(class_inv_remap[i]) > 250:
sys.stdout.write('iou_moving: {jacc:.3f}'.format(jacc=jacc.item()))
sys.stdout.write('\n')
sys.stdout.flush()
# if codalab is necessary, then do it
# for moving object detection, we only care about the results of moving objects
if FLAGS.codalab is not None:
results = {}
for i, jacc in enumerate(class_jaccard):
if i not in ignore:
if int(class_inv_remap[i]) > 250:
results["iou_moving"] = float(jacc)
# save to file
output_filename = os.path.join(FLAGS.codalab, 'scores.txt')
with open(output_filename, 'w') as yaml_file:
yaml.dump(results, yaml_file, default_flow_style=False)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty/semantic_kitti_api/LICENSE
|
The MIT License
Copyright (c) 2019, University of Bonn
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty/semantic_kitti_api/requirements.txt
|
matplotlib>=2.2.3
vispy>=0.5.3
torch>=1.1.0
numpy>=1.14.0
PyYAML>=5.1.1
imgui[glfw]>=1.0.0
glfw>=1.8.3
PyOpenGL>=3.1.0
pyqt5>=5.8.1.1
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty/semantic_kitti_api/visualize_mos.py
|
#!/usr/bin/env python3
# This file is covered by the LICENSE file in the root of this project.
# developed by Xieyuanli Chen
import argparse
import os
import yaml
from auxiliary.laserscan import LaserScan, SemLaserScan
from auxiliary.laserscanvis import LaserScanVis
if __name__ == '__main__':
parser = argparse.ArgumentParser("./visualize.py")
parser.add_argument(
'--dataset', '-d',
type=str,
required=True,
help='Dataset to visualize. No Default',
)
parser.add_argument(
'--config', '-c',
type=str,
required=False,
default="config/semantic-kitti-mos.yaml",
help='Dataset config file. Defaults to %(default)s',
)
parser.add_argument(
'--sequence', '-s',
type=str,
default="00",
required=False,
help='Sequence to visualize. Defaults to %(default)s',
)
parser.add_argument(
'--predictions', '-p',
type=str,
default=None,
required=False,
help='Alternate location for labels, to use predictions folder. '
'Must point to directory containing the predictions in the proper format '
' (see readme)'
'Defaults to %(default)s',
)
parser.add_argument(
'--ignore_semantics', '-i',
dest='ignore_semantics',
default=False,
action='store_true',
help='Ignore semantics. Visualizes uncolored pointclouds.'
'Defaults to %(default)s',
)
parser.add_argument(
'--do_instances', '-di',
dest='do_instances',
default=False,
action='store_true',
help='Visualize instances too. Defaults to %(default)s',
)
parser.add_argument(
'--offset',
type=int,
default=0,
required=False,
help='Sequence to start. Defaults to %(default)s',
)
parser.add_argument(
'--ignore_safety',
dest='ignore_safety',
default=False,
action='store_true',
help='Normally you want the number of labels and ptcls to be the same,'
', but if you are not done inferring this is not the case, so this disables'
' that safety.'
'Defaults to %(default)s',
)
FLAGS, unparsed = parser.parse_known_args()
# print summary of what we will do
print("*" * 80)
print("INTERFACE:")
print("Dataset", FLAGS.dataset)
print("Config", FLAGS.config)
print("Sequence", FLAGS.sequence)
print("Predictions", FLAGS.predictions)
print("ignore_semantics", FLAGS.ignore_semantics)
print("do_instances", FLAGS.do_instances)
print("ignore_safety", FLAGS.ignore_safety)
print("offset", FLAGS.offset)
print("*" * 80)
# open config file
try:
print("Opening config file %s" % FLAGS.config)
CFG = yaml.safe_load(open(FLAGS.config, 'r'))
except Exception as e:
print(e)
print("Error opening yaml file.")
quit()
# fix sequence name
FLAGS.sequence = '{0:02d}'.format(int(FLAGS.sequence))
# does sequence folder exist?
scan_paths = os.path.join(FLAGS.dataset, "sequences",
FLAGS.sequence, "velodyne")
if os.path.isdir(scan_paths):
print("Sequence folder exists! Using sequence from %s" % scan_paths)
else:
print("Sequence folder doesn't exist! Exiting...")
quit()
# populate the pointclouds
scan_names = [os.path.join(dp, f) for dp, dn, fn in os.walk(
os.path.expanduser(scan_paths)) for f in fn]
scan_names.sort()
# does sequence folder exist?
if not FLAGS.ignore_semantics:
if FLAGS.predictions is not None:
label_paths = os.path.join(FLAGS.predictions, "sequences",
FLAGS.sequence, "predictions")
else:
label_paths = os.path.join(FLAGS.dataset, "sequences",
FLAGS.sequence, "labels")
if os.path.isdir(label_paths):
print("Labels folder exists! Using labels from %s" % label_paths)
else:
print(label_paths)
print("Labels folder doesn't exist! Exiting...")
quit()
# populate the pointclouds
label_names = [os.path.join(dp, f) for dp, dn, fn in os.walk(
os.path.expanduser(label_paths)) for f in fn]
label_names.sort()
# check that there are same amount of labels and scans
if not FLAGS.ignore_safety:
assert(len(label_names) == len(scan_names))
# create a scan
if FLAGS.ignore_semantics:
scan = LaserScan(project=True) # project all opened scans to spheric proj
else:
color_dict = CFG["color_map"]
nclasses = len(color_dict)
scan = SemLaserScan(nclasses, color_dict, project=True)
# create a visualizer
semantics = not FLAGS.ignore_semantics
instances = FLAGS.do_instances
if not semantics:
label_names = None
vis = LaserScanVis(scan=scan,
scan_names=scan_names,
label_names=label_names,
offset=FLAGS.offset,
semantics=semantics, instances=instances and semantics)
# print instructions
print("To navigate:")
print("\tb: back (previous scan)")
print("\tn: next (next scan)")
print("\tq: quit (exit program)")
# run the visualizer
vis.run()
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty/semantic_kitti_api/Dockerfile
|
# This file is covered by the LICENSE file in the root of this project.
# Use an official ubuntu runtime as a parent image
FROM ubuntu:16.04
# Install all system pre-reqs
# common pre-reqs
RUN apt update
RUN apt upgrade -y
RUN apt install apt-utils \
build-essential \
curl \
git \
cmake \
unzip \
autoconf \
autogen \
libtool \
mlocate \
zlib1g-dev \
python \
python3-dev \
python3-pip \
python3-wheel \
python3-tk \
wget \
libpng-dev \
libfreetype6-dev \
vim \
meld \
sudo \
libav-tools \
python3-pyqt5.qtopengl \
x11-apps \
-y
RUN updatedb
# # Install any python pre-reqs from requirements.txt
RUN pip3 install -U pip
RUN pip3 install scipy==0.19.1 \
numpy==1.14.0 \
torch==0.4.1 \
opencv_python==3.4.0.12 \
vispy==0.5.3 \
tensorflow==1.11.0 \
PyYAML==3.13 \
enum34==1.1.6 \
matplotlib==3.0.3
ENV PYTHONPATH /home/developer/api
# graphical interface stuff
# uid and gid
ARG uid=1000
ARG gid=1000
# echo to make sure that they are the ones from my setup
RUN echo "$uid:$gid"
# Graphical interface stuff
RUN mkdir -p /home/developer && \
cp /etc/skel/.bashrc /home/developer/.bashrc && \
echo "developer:x:${uid}:${gid}:Developer,,,:/home/developer:/bin/bash" >> /etc/passwd && \
echo "developer:x:${uid}:" >> /etc/group && \
echo "developer ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/developer && \
chmod 0440 /etc/sudoers.d/developer && \
chown ${uid}:${gid} -R /home/developer
# opengl things
ENV DEBIAN_FRONTEND "noninteractive"
# Install all needed deps
RUN apt install -y xvfb pkg-config \
llvm-3.9-dev \
xorg-server-source \
python-dev \
x11proto-gl-dev \
libxext-dev \
libx11-xcb-dev \
libxcb-dri2-0-dev \
libxcb-xfixes0-dev \
libdrm-dev \
libx11-dev;
# compile the mesa llvmpipe driver from source.
RUN mkdir -p /var/tmp/build; \
cd /var/tmp/build; \
wget "https://mesa.freedesktop.org/archive/mesa-18.0.1.tar.gz"; \
tar xfv mesa-18.0.1.tar.gz; \
rm mesa-18.0.1.tar.gz; \
cd mesa-18.0.1; \
./configure --enable-glx=gallium-xlib --with-gallium-drivers=swrast,swr --disable-dri --disable-gbm --disable-egl --enable-gallium-osmesa --enable-llvm --prefix=/usr/local/ --with-llvm-prefix=/usr/lib/llvm-3.9/; \
make -j3; \
make install; \
cd .. ; \
rm -rf mesa-18.0.1;
# install mesa stuff for testing
RUN sudo apt install -y glew-utils libglew-dev freeglut3-dev \
&& wget "ftp://ftp.freedesktop.org/pub/mesa/demos/mesa-demos-8.4.0.tar.gz" \
&& tar xfv mesa-demos-8.4.0.tar.gz \
&& rm mesa-demos-8.4.0.tar.gz \
&& cd mesa-demos-8.4.0 \
&& ./configure --prefix=/usr/local \
&& make -j3 \
&& make install \
&& cd .. \
&& rm -rf mesa-demos-8.4.0
# clean the cache
RUN apt update && \
apt autoremove --purge -y && \
apt clean -y
ENV XVFB_WHD="1920x1080x24"\
DISPLAY=":99" \
LIBGL_ALWAYS_SOFTWARE="1" \
GALLIUM_DRIVER="swr" \
LP_NO_RAST="false" \
LP_DEBUG="" \
LP_PERF="" \
LP_NUM_THREADS=""
# Set the working directory to the api location
WORKDIR /home/developer/api
# make user and home
USER developer
ENV HOME /home/developer
# Copy the current directory contents into the container at ~/api
ADD . /home/developer/api
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty/semantic_kitti_api/.pep8
|
[pep8]
max-line-length = 120
indent-size = 2
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty/semantic_kitti_api/validate_submission.py
|
#!/usr/bin/env python3
# This file is covered by the LICENSE file in the root of this project.
import zipfile
import argparse
import os
import numpy as np
class ValidationException(Exception):
pass
def unpack(compressed):
''' given a bit encoded voxel grid, make a normal voxel grid out of it. '''
uncompressed = np.zeros(compressed.shape[0] * 8, dtype=np.uint8)
uncompressed[::8] = compressed[:] >> 7 & 1
uncompressed[1::8] = compressed[:] >> 6 & 1
uncompressed[2::8] = compressed[:] >> 5 & 1
uncompressed[3::8] = compressed[:] >> 4 & 1
uncompressed[4::8] = compressed[:] >> 3 & 1
uncompressed[5::8] = compressed[:] >> 2 & 1
uncompressed[6::8] = compressed[:] >> 1 & 1
uncompressed[7::8] = compressed[:] & 1
return uncompressed
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Validate a submission zip file needed to evaluate on CodaLab competitions.\n\nThe verification tool checks:\n 1. correct folder structure,\n 2. existence of label files for each scan,\n 3. count of labels for each scan.\nInvalid labels are ignored by the evaluation script, therefore we don't check\nfor invalid labels.", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"zipfile",
type=str,
help='zip file that should be validated.',
)
parser.add_argument(
'dataset',
type=str,
help='directory containing the folder "sequences" containing folders "11", ..., "21" with the input data ("velodyne" or "voxels") folder.'
)
parser.add_argument(
"--task",
type=str,
choices=["segmentation", "completion", "panoptic"],
default="segmentation",
help='task for which the zip file should be validated.'
)
FLAGS, _ = parser.parse_known_args()
checkmark = "\u2713"
float_bytes = 4
uint32_bytes = 4
uint16_bytes = 2
try:
print('Validating zip archive "{}".\n'.format(FLAGS.zipfile))
print( " ============ {:^10} ============ ".format(FLAGS.task))
print(" 1. Checking filename.............. ", end="", flush=True)
if not FLAGS.zipfile.endswith('.zip'):
raise ValidationException('Competition bundle must end with ".zip"')
print(checkmark)
with zipfile.ZipFile(FLAGS.zipfile) as zipfile:
if FLAGS.task == "segmentation" or FLAGS.task == "panoptic":
print(" 2. Checking directory structure... ", end="", flush=True)
directories = [folder.filename for folder in zipfile.infolist() if folder.filename.endswith("/")]
if "sequences/" not in directories:
raise ValidationException('Directory "sequences" missing inside zip file.')
for sequence in range(11, 22):
sequence_directory = "sequences/{}/".format(sequence)
if sequence_directory not in directories:
raise ValidationException('Directory "{}" missing inside zip file.'.format(sequence_directory))
predictions_directory = sequence_directory + "predictions/"
if predictions_directory not in directories:
raise ValidationException('Directory "{}" missing inside zip file.'.format(predictions_directory))
print(checkmark)
print(' 3. Checking file sizes............ ', end='', flush=True)
prediction_files = {info.filename: info for info in zipfile.infolist() if not info.filename.endswith("/")}
for sequence in range(11, 22):
sequence_directory = 'sequences/{}'.format(sequence)
velodyne_directory = os.path.join(FLAGS.dataset, 'sequences/{}/velodyne/'.format(sequence))
velodyne_files = sorted([os.path.join(velodyne_directory, file) for file in os.listdir(velodyne_directory)])
label_files = sorted([os.path.join(sequence_directory, "predictions", os.path.splitext(filename)[0] + ".label")
for filename in os.listdir(velodyne_directory)])
for velodyne_file, label_file in zip(velodyne_files, label_files):
num_points = os.path.getsize(velodyne_file) / (4 * float_bytes)
if label_file not in prediction_files:
raise ValidationException('"' + label_file + '" is missing inside zip.')
num_labels = prediction_files[label_file].file_size / uint32_bytes
if num_labels != num_points:
raise ValidationException('label file "' + label_file +
"' should have {} labels, but found {} labels!".format(int(num_points), int(num_labels)))
print(checkmark)
elif FLAGS.task == "completion":
print(" 2. Checking directory structure... ", end="", flush=True)
directories = [folder.filename for folder in zipfile.infolist() if folder.filename.endswith("/")]
if "sequences/" not in directories:
raise ValidationException('Directory "sequences" missing inside zip file.')
for sequence in range(11, 22):
sequence_directory = "sequences/{}/".format(sequence)
if sequence_directory not in directories:
raise ValidationException('Directory "{}" missing inside zip file.'.format(sequence_directory))
predictions_directory = sequence_directory + "predictions/"
if predictions_directory not in directories:
raise ValidationException('Directory "{}" missing inside zip file.'.format(predictions_directory))
print(checkmark)
print(' 3. Checking file sizes', end='', flush=True)
prediction_files = {str(info.filename): info for info in zipfile.infolist() if not info.filename.endswith("/")}
# description.txt is optional and one should not get an error.
if "description.txt" in prediction_files: del prediction_files["description.txt"]
necessary_files = []
for sequence in range(11, 22):
sequence_directory = 'sequences/{}'.format(sequence)
voxel_directory = os.path.join(FLAGS.dataset, 'sequences/{}/voxels/'.format(sequence))
voxel_files = sorted([os.path.join(voxel_directory, file) for file in os.listdir(voxel_directory) if file.endswith(".bin")])
label_files = sorted([os.path.join(sequence_directory, "predictions", os.path.splitext(filename)[0] + ".label")
for filename in os.listdir(voxel_directory)])
necessary_files.extend(label_files)
for voxel_file, label_file in zip(voxel_files, label_files):
input_voxels = unpack(np.fromfile(voxel_file, dtype=np.uint8))
num_voxels = input_voxels.shape[0] # fixed volume (= 256 * 256 * 32)!
if label_file not in prediction_files:
raise ValidationException('"' + label_file + '" is missing inside zip.')
num_labels = prediction_files[label_file].file_size / uint16_bytes # expecting uint16 for labels.
if num_labels != num_voxels:
raise ValidationException('label file "' + label_file +
"' should have {} labels, but found {} labels!".format(int(num_voxels), int(num_labels)))
print(".", end="", flush=True)
print(". ", end="", flush=True)
print(checkmark)
print(' 4. Checking for unneeded files', end='', flush=True)
if len(necessary_files) != len(prediction_files.keys()):
filelist = sorted([f for f in prediction_files.keys() if f not in necessary_files])
ell = ""
if len(filelist) > 10: ell = ", ..."
raise ValidationException("Zip contains unneeded predictions, e.g., {}".format(",".join(filelist[:10]) + ell))
print(".... " + checkmark)
else:
raise NotImplementedError("Unknown task.")
except ValidationException as ex:
print("\n\n " + "\u001b[1;31m>>> Error: " + str(ex) + "\u001b[0m")
exit(1)
print("\n\u001b[1;32mEverything ready for submission!\u001b[0m \U0001F389")
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/thirdparty/semantic_kitti_api/visualize.py
|
#!/usr/bin/env python3
# This file is covered by the LICENSE file in the root of this project.
import argparse
import os
import yaml
from auxiliary.laserscan import LaserScan, SemLaserScan
from auxiliary.laserscanvis import LaserScanVis
if __name__ == '__main__':
parser = argparse.ArgumentParser("./visualize.py")
parser.add_argument(
'--dataset', '-d',
type=str,
required=True,
help='Dataset to visualize. No Default',
)
parser.add_argument(
'--config', '-c',
type=str,
required=False,
default="config/semantic-kitti.yaml",
help='Dataset config file. Defaults to %(default)s',
)
parser.add_argument(
'--sequence', '-s',
type=str,
default="00",
required=False,
help='Sequence to visualize. Defaults to %(default)s',
)
parser.add_argument(
'--predictions', '-p',
type=str,
default=None,
required=False,
help='Alternate location for labels, to use predictions folder. '
'Must point to directory containing the predictions in the proper format '
' (see readme)'
'Defaults to %(default)s',
)
parser.add_argument(
'--ignore_semantics', '-i',
dest='ignore_semantics',
default=False,
action='store_true',
help='Ignore semantics. Visualizes uncolored pointclouds.'
'Defaults to %(default)s',
)
parser.add_argument(
'--do_instances', '-di',
dest='do_instances',
default=False,
action='store_true',
help='Visualize instances too. Defaults to %(default)s',
)
parser.add_argument(
'--offset',
type=int,
default=0,
required=False,
help='Sequence to start. Defaults to %(default)s',
)
parser.add_argument(
'--ignore_safety',
dest='ignore_safety',
default=False,
action='store_true',
help='Normally you want the number of labels and ptcls to be the same,'
', but if you are not done inferring this is not the case, so this disables'
' that safety.'
'Defaults to %(default)s',
)
FLAGS, unparsed = parser.parse_known_args()
# print summary of what we will do
print("*" * 80)
print("INTERFACE:")
print("Dataset", FLAGS.dataset)
print("Config", FLAGS.config)
print("Sequence", FLAGS.sequence)
print("Predictions", FLAGS.predictions)
print("ignore_semantics", FLAGS.ignore_semantics)
print("do_instances", FLAGS.do_instances)
print("ignore_safety", FLAGS.ignore_safety)
print("offset", FLAGS.offset)
print("*" * 80)
# open config file
try:
print("Opening config file %s" % FLAGS.config)
CFG = yaml.safe_load(open(FLAGS.config, 'r'))
except Exception as e:
print(e)
print("Error opening yaml file.")
quit()
# fix sequence name
FLAGS.sequence = '{0:02d}'.format(int(FLAGS.sequence))
# does sequence folder exist?
scan_paths = os.path.join(FLAGS.dataset, "sequences",
FLAGS.sequence, "velodyne")
if os.path.isdir(scan_paths):
print("Sequence folder exists! Using sequence from %s" % scan_paths)
else:
print("Sequence folder doesn't exist! Exiting...")
quit()
# populate the pointclouds
scan_names = [os.path.join(dp, f) for dp, dn, fn in os.walk(
os.path.expanduser(scan_paths)) for f in fn]
scan_names.sort()
# does sequence folder exist?
if not FLAGS.ignore_semantics:
if FLAGS.predictions is not None:
label_paths = os.path.join(FLAGS.predictions, "sequences",
FLAGS.sequence, "predictions")
else:
label_paths = os.path.join(FLAGS.dataset, "sequences",
FLAGS.sequence, "labels")
if os.path.isdir(label_paths):
print("Labels folder exists! Using labels from %s" % label_paths)
else:
print("Labels folder doesn't exist! Exiting...")
quit()
# populate the pointclouds
label_names = [os.path.join(dp, f) for dp, dn, fn in os.walk(
os.path.expanduser(label_paths)) for f in fn]
label_names.sort()
# check that there are same amount of labels and scans
if not FLAGS.ignore_safety:
assert(len(label_names) == len(scan_names))
# create a scan
if FLAGS.ignore_semantics:
scan = LaserScan(project=True) # project all opened scans to spheric proj
else:
color_dict = CFG["color_map"]
nclasses = len(color_dict)
scan = SemLaserScan(nclasses, color_dict, project=True)
# create a visualizer
semantics = not FLAGS.ignore_semantics
instances = FLAGS.do_instances
if not semantics:
label_names = None
vis = LaserScanVis(scan=scan,
scan_names=scan_names,
label_names=label_names,
offset=FLAGS.offset,
semantics=semantics, instances=instances and semantics)
# print instructions
print("To navigate:")
print("\tb: back (previous scan)")
print("\tn: next (next scan)")
print("\tq: quit (exit program)")
# run the visualizer
vis.run()
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.