repo_id
stringlengths 19
138
| file_path
stringlengths 32
200
| content
stringlengths 1
12.9M
| __index_level_0__
int64 0
0
|
|---|---|---|---|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/dense_heads/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .anchor_head import *
from .petr_head import PETRHead
from .point_head import PointHeadSimple
from .target_assigner import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/dense_heads/anchor_head.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from paddle.nn.initializer import Constant, Normal
from paddle3d.apis import manager
from paddle3d.models.layers import reset_parameters
from paddle3d.models.losses import (SigmoidFocalClassificationLoss,
WeightedCrossEntropyLoss,
WeightedSmoothL1Loss)
from paddle3d.utils.box_coder import ResidualCoder
from .target_assigner.anchor_generator import AnchorGenerator
from .target_assigner.axis_aligned_target_assigner import \
AxisAlignedTargetAssigner
__all__ = ['AnchorHeadSingle']
@manager.HEADS.add_component
class AnchorHeadSingle(nn.Layer):
def __init__(self, model_cfg, input_channels, class_names, voxel_size,
point_cloud_range, anchor_target_cfg,
predict_boxes_when_training, anchor_generator_cfg,
num_dir_bins, loss_weights):
super().__init__()
self.model_cfg = model_cfg
self.num_class = len(class_names)
self.class_names = class_names
self.predict_boxes_when_training = predict_boxes_when_training
self.anchor_generator_cfg = anchor_generator_cfg
self.num_dir_bins = num_dir_bins
self.loss_weights = loss_weights
self.box_coder = ResidualCoder(num_dir_bins=num_dir_bins)
point_cloud_range = np.asarray(point_cloud_range)
grid_size = (point_cloud_range[3:] - point_cloud_range[:3]) / voxel_size
grid_size = np.round(grid_size).astype(np.int64)
self.anchors_list, self.num_anchors_per_location = self.generate_anchors(
grid_size=grid_size,
point_cloud_range=point_cloud_range,
anchor_ndim=self.box_coder.code_size)
self.anchors = paddle.concat(self.anchors_list, axis=-3)
# [x for x in anchors]
self.num_anchors_per_location = sum(self.num_anchors_per_location)
self.conv_cls = nn.Conv2D(
input_channels,
self.num_anchors_per_location * self.num_class,
kernel_size=1)
self.conv_box = nn.Conv2D(
input_channels,
self.num_anchors_per_location * self.box_coder.code_size,
kernel_size=1)
self.target_assigner = AxisAlignedTargetAssigner(
anchor_generator_cfg,
anchor_target_cfg,
class_names=self.class_names,
box_coder=self.box_coder)
self.conv_dir_cls = nn.Conv2D(
input_channels,
self.num_anchors_per_location * num_dir_bins,
kernel_size=1)
self.forward_ret_dict = {}
self.reg_loss_func = WeightedSmoothL1Loss(
code_weights=loss_weights["code_weights"])
self.cls_loss_func = SigmoidFocalClassificationLoss(
alpha=0.25, gamma=2.0)
self.dir_loss_func = WeightedCrossEntropyLoss()
self.init_weight()
def init_weight(self):
for sublayer in self.sublayers():
if isinstance(sublayer, nn.Conv2D):
reset_parameters(sublayer)
bias_shape = self.conv_cls.bias.shape
temp_value = paddle.ones(bias_shape) * -paddle.log(
paddle.to_tensor((1.0 - 0.01) / 0.01))
self.conv_cls.bias.set_value(temp_value)
weight_shape = self.conv_box.weight.shape
self.conv_box.weight.set_value(
paddle.normal(mean=0.0, std=0.001, shape=weight_shape))
def generate_anchors(self, grid_size, point_cloud_range, anchor_ndim=7):
anchor_generator = AnchorGenerator(
anchor_range=point_cloud_range,
anchor_generator_config=self.anchor_generator_cfg)
feature_map_size = [
grid_size[:2] // config['feature_map_stride']
for config in self.anchor_generator_cfg
]
anchors_list, num_anchors_per_location_list = anchor_generator.generate_anchors(
feature_map_size)
if anchor_ndim != 7:
for idx, anchors in enumerate(anchors_list):
pad_zeros = anchors.zeros(
[*anchors.shape[0:-1], anchor_ndim - 7])
new_anchors = paddle.concat((anchors, pad_zeros), axis=-1)
anchors_list[idx] = new_anchors
return anchors_list, num_anchors_per_location_list
def generate_predicted_boxes(self,
batch_size,
cls_preds,
box_preds,
dir_cls_preds=None):
"""
Args:
batch_size:
cls_preds: (N, H, W, C1)
box_preds: (N, H, W, C2)
dir_cls_preds: (N, H, W, C3)
Returns:
batch_cls_preds: (B, num_boxes, num_classes)
batch_box_preds: (B, num_boxes, 7+C)
"""
# anchors = paddle.concat(self.anchors, axis=-3)
anchors = self.anchors
num_anchors = paddle.shape(
anchors.reshape([-1, paddle.shape(anchors)[5]]))[0]
batch_anchors = anchors.reshape([1, -1, paddle.shape(anchors)[5]]).tile(
[batch_size, 1, 1])
batch_cls_preds = cls_preds.reshape([batch_size, num_anchors, -1]) \
if not isinstance(cls_preds, list) else cls_preds
batch_box_preds = box_preds.reshape([batch_size, num_anchors, -1]) if not isinstance(box_preds, list) \
else paddle.concat(box_preds, axis=1).reshape([batch_size, num_anchors, -1])
batch_box_preds = self.box_coder.decode_paddle(batch_box_preds,
batch_anchors)
if dir_cls_preds is not None:
dir_offset = self.model_cfg['dir_offset']
dir_limit_offset = self.model_cfg['dir_limit_offset']
dir_cls_preds = dir_cls_preds.reshape([batch_size, num_anchors, -1]) if not isinstance(dir_cls_preds, list) \
else paddle.concat(dir_cls_preds, axis=1).reshape([batch_size, num_anchors, -1])
dir_labels = paddle.argmax(dir_cls_preds, axis=-1)
period = (2 * np.pi / self.num_dir_bins)
dir_rot = self.limit_period(batch_box_preds[..., 6] - dir_offset,
dir_limit_offset, period)
batch_box_preds[
..., 6] = dir_rot + dir_offset + period * dir_labels.cast(
batch_box_preds.dtype)
return batch_cls_preds, batch_box_preds
def limit_period(self, val, offset=0.5, period=np.pi):
ans = val - paddle.floor(val / period + offset) * period
return ans
def forward(self, data_dict):
spatial_features_2d = data_dict['spatial_features_2d']
cls_preds = self.conv_cls(spatial_features_2d)
box_preds = self.conv_box(spatial_features_2d)
cls_preds = cls_preds.transpose([0, 2, 3, 1]) # [N, H, W, C]
box_preds = box_preds.transpose([0, 2, 3, 1]) # [N, H, W, C]
self.forward_ret_dict['cls_preds'] = cls_preds
self.forward_ret_dict['box_preds'] = box_preds
dir_cls_preds = self.conv_dir_cls(spatial_features_2d)
dir_cls_preds = dir_cls_preds.transpose([0, 2, 3, 1])
self.forward_ret_dict['dir_cls_preds'] = dir_cls_preds
if self.training:
targets_dict = self.target_assigner.assign_targets(
self.anchors_list, data_dict['gt_boxes'])
self.forward_ret_dict.update(targets_dict)
if not self.training or self.predict_boxes_when_training:
if getattr(self, 'in_export_mode', False):
batch_size = 1
else:
batch_size = data_dict['batch_size']
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_size,
cls_preds=cls_preds,
box_preds=box_preds,
dir_cls_preds=dir_cls_preds)
data_dict['batch_cls_preds'] = batch_cls_preds
data_dict['batch_box_preds'] = batch_box_preds
data_dict['cls_preds_normalized'] = False
return data_dict
def get_loss(self):
cls_loss, tb_dict = self.get_cls_layer_loss()
box_loss, tb_dict_box = self.get_box_reg_layer_loss()
tb_dict.update(tb_dict_box)
rpn_loss = cls_loss + box_loss
tb_dict['rpn_loss'] = rpn_loss.item()
return rpn_loss, tb_dict
def get_cls_layer_loss(self):
cls_preds = self.forward_ret_dict['cls_preds']
box_cls_labels = self.forward_ret_dict['box_cls_labels']
batch_size = int(cls_preds.shape[0])
cared = box_cls_labels >= 0 # [N, num_anchors]
positives = box_cls_labels > 0
negatives = box_cls_labels == 0
negative_cls_weights = negatives * 1.0
cls_weights = (negative_cls_weights + 1.0 * positives)
reg_weights = positives
if self.num_class == 1:
# class agnostic
box_cls_labels[positives] = 1
pos_normalizer = positives.sum(1, keepdim=True)
reg_weights /= paddle.clip(pos_normalizer, min=1.0)
cls_weights /= paddle.clip(pos_normalizer, min=1.0)
cls_targets = box_cls_labels * cared.cast(box_cls_labels.dtype)
one_hot_targets = []
for b in range(batch_size):
one_hot_targets.append(
F.one_hot(cls_targets[b], num_classes=self.num_class + 1))
one_hot_targets = paddle.stack(one_hot_targets)
cls_preds = cls_preds.reshape([batch_size, -1, self.num_class])
one_hot_targets = one_hot_targets[..., 1:]
one_hot_targets.stop_gradient = True
cls_loss_src = self.cls_loss_func(
cls_preds, one_hot_targets, weights=cls_weights) # [N, M]
cls_loss = cls_loss_src.sum() / batch_size
cls_loss = cls_loss * self.loss_weights['cls_weight']
tb_dict = {'rpn_loss_cls': cls_loss.item()}
return cls_loss, tb_dict
def get_box_reg_layer_loss(self):
box_preds = self.forward_ret_dict['box_preds']
box_dir_cls_preds = self.forward_ret_dict.get('dir_cls_preds', None)
box_reg_targets = self.forward_ret_dict['box_reg_targets']
box_cls_labels = self.forward_ret_dict['box_cls_labels']
batch_size = int(box_preds.shape[0])
positives = box_cls_labels > 0
reg_weights = positives.cast("float32")
pos_normalizer = positives.sum(1, keepdim=True)
reg_weights /= paddle.clip(pos_normalizer, min=1.0)
anchors = self.anchors
anchors = anchors.reshape([1, -1,
anchors.shape[-1]]).tile([batch_size, 1, 1])
box_preds = box_preds.reshape([
batch_size, -1, box_preds.shape[-1] // self.num_anchors_per_location
])
box_preds_sin, reg_targets_sin = self.add_sin_difference(
box_preds, box_reg_targets)
loc_loss_src = self.reg_loss_func(box_preds_sin, reg_targets_sin, \
weights=reg_weights) # [N, M]
loc_loss = loc_loss_src.sum() / batch_size
loc_loss = loc_loss * self.loss_weights['loc_weight']
box_loss = loc_loss
tb_dict = {'rpn_loss_loc': loc_loss.item()}
if box_dir_cls_preds is not None:
dir_targets = self.get_direction_target(
anchors,
box_reg_targets,
dir_offset=self.model_cfg['dir_offset'],
num_bins=self.num_dir_bins)
dir_logits = box_dir_cls_preds.reshape(
[batch_size, -1, self.num_dir_bins])
weights = positives.cast("float32")
weights /= paddle.clip(weights.sum(-1, keepdim=True), min=1.0)
dir_targets.stop_gradient = True
dir_loss = self.dir_loss_func(
dir_logits, dir_targets, weights=weights)
dir_loss = dir_loss.sum() / batch_size
dir_loss = dir_loss * self.loss_weights['dir_weight']
box_loss += dir_loss
tb_dict['rpn_loss_dir'] = dir_loss.item()
return box_loss, tb_dict
def add_sin_difference(self, boxes1, boxes2, dim=6):
assert dim != -1
rad_pred_encoding = paddle.sin(boxes1[..., dim:dim + 1]) * paddle.cos(
boxes2[..., dim:dim + 1])
rad_tg_encoding = paddle.cos(boxes1[..., dim:dim + 1]) * paddle.sin(
boxes2[..., dim:dim + 1])
boxes1 = paddle.concat(
[boxes1[..., :dim], rad_pred_encoding, boxes1[..., dim + 1:]],
axis=-1)
boxes2 = paddle.concat(
[boxes2[..., :dim], rad_tg_encoding, boxes2[..., dim + 1:]],
axis=-1)
return boxes1, boxes2
def get_direction_target(self,
anchors,
reg_targets,
one_hot=True,
dir_offset=0,
num_bins=2):
batch_size = reg_targets.shape[0]
anchors = anchors.reshape([batch_size, -1, anchors.shape[-1]])
rot_gt = reg_targets[..., 6] + anchors[..., 6]
offset_rot = self.limit_period(rot_gt - dir_offset, 0, 2 * np.pi)
dir_cls_targets = paddle.floor(
offset_rot / (2 * np.pi / num_bins)).cast("int64")
dir_cls_targets = paddle.clip(dir_cls_targets, min=0, max=num_bins - 1)
if one_hot:
dir_targets = []
for b in range(batch_size):
dir_targets.append(
F.one_hot(dir_cls_targets[b], num_classes=num_bins))
dir_cls_targets = paddle.stack(dir_targets)
return dir_cls_targets
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/dense_heads/petr_head.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Copyright (c) 2022 megvii-model. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from DETR3D (https://github.com/WangYueFt/detr3d)
# Copyright (c) 2021 Wang, Yue
# ------------------------------------------------------------------------
# Modified from mmdetection3d (https://github.com/open-mmlab/mmdetection3d)
# Copyright (c) OpenMMLab. All rights reserved.
# ------------------------------------------------------------------------
import copy
import math
from functools import partial
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.models.heads.dense_heads.target_assigner.hungarian_assigner import (
HungarianAssigner3D, nan_to_num, normalize_bbox)
from paddle3d.models.layers import param_init
from paddle3d.models.layers.layer_libs import NormedLinear, inverse_sigmoid
from paddle3d.models.losses.focal_loss import FocalLoss, WeightedFocalLoss
from paddle3d.models.losses.weight_loss import WeightedL1Loss
from .samplers.pseudo_sampler import PseudoSampler
def reduce_mean(tensor):
""""Obtain the mean of tensor on different GPUs."""
if not paddle.distributed.is_initialized():
return tensor
tensor = tensor.clone()
paddle.distributed.all_reduce(
tensor.scale_(1. / paddle.distributed.get_world_size()))
return tensor
def multi_apply(func, *args, **kwargs):
"""Apply function to a list of arguments.
"""
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
def pos2posemb3d(pos, num_pos_feats=128, temperature=10000):
scale = 2 * math.pi
pos = pos * scale
dim_t = paddle.arange(num_pos_feats, dtype='int32')
dim_t = temperature**(2 * (dim_t // 2) / num_pos_feats)
pos_x = pos[..., 0, None] / dim_t
pos_y = pos[..., 1, None] / dim_t
pos_z = pos[..., 2, None] / dim_t
pos_x = paddle.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()),
axis=-1).flatten(-2)
pos_y = paddle.stack((pos_y[..., 0::2].sin(), pos_y[..., 1::2].cos()),
axis=-1).flatten(-2)
pos_z = paddle.stack((pos_z[..., 0::2].sin(), pos_z[..., 1::2].cos()),
axis=-1).flatten(-2)
posemb = paddle.concat((pos_y, pos_x, pos_z), axis=-1)
return posemb
class SELayer(nn.Layer):
def __init__(self, channels, act_layer=nn.ReLU, gate_layer=nn.Sigmoid):
super().__init__()
self.conv_reduce = nn.Conv2D(channels, channels, 1, bias_attr=True)
self.act1 = act_layer()
self.conv_expand = nn.Conv2D(channels, channels, 1, bias_attr=True)
self.gate = gate_layer()
def forward(self, x, x_se):
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
return x * self.gate(x_se)
class RegLayer(nn.Layer):
def __init__(
self,
embed_dims=256,
shared_reg_fcs=2,
group_reg_dims=(2, 1, 3, 2, 2), # xy, z, size, rot, velo
act_layer=nn.ReLU,
drop=0.0):
super().__init__()
reg_branch = []
for _ in range(shared_reg_fcs):
reg_branch.append(nn.Linear(embed_dims, embed_dims))
reg_branch.append(act_layer())
reg_branch.append(nn.Dropout(drop))
self.reg_branch = nn.Sequential(*reg_branch)
self.task_heads = nn.LayerList()
for reg_dim in group_reg_dims:
task_head = nn.Sequential(
nn.Linear(embed_dims, embed_dims), act_layer(),
nn.Linear(embed_dims, reg_dim))
self.task_heads.append(task_head)
def forward(self, x):
reg_feat = self.reg_branch(x)
outs = []
for task_head in self.task_heads:
out = task_head(reg_feat.clone())
outs.append(out)
outs = paddle.concat(outs, -1)
return outs
@manager.HEADS.add_component
class PETRHead(nn.Layer):
"""Implements the DETR transformer head.
See `paper: End-to-End Object Detection with Transformers
<https://arxiv.org/pdf/2005.12872>`_ for details.
"""
def __init__(
self,
num_classes,
in_channels,
num_query=100,
num_reg_fcs=2,
transformer=None,
sync_cls_avg_factor=False,
positional_encoding=None,
code_weights=None,
bbox_coder=None,
loss_cls=None,
loss_bbox=None,
loss_iou=None,
assigner=None,
with_position=True,
with_multiview=False,
depth_step=0.8,
depth_num=64,
LID=False,
depth_start=1,
position_level=0,
position_range=[-65, -65, -8.0, 65, 65, 8.0],
group_reg_dims=(2, 1, 3, 2, 2), # xy, z, size, rot, velo
scalar=5,
noise_scale=0.4,
noise_trans=0.0,
dn_weight=1.0,
split=0.5,
init_cfg=None,
normedlinear=False,
with_fpe=False,
with_time=False,
with_multi=False,
with_denoise=False,
**kwargs):
if 'code_size' in kwargs:
self.code_size = kwargs['code_size']
else:
self.code_size = 10
if code_weights is not None:
self.code_weights = code_weights
else:
self.code_weights = [
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2
]
self.code_weights = self.code_weights[:self.code_size]
self.bg_cls_weight = 0
self.sync_cls_avg_factor = sync_cls_avg_factor
self.assigner = HungarianAssigner3D(
pc_range=[-51.2, -51.2, -5.0, 51.2, 51.2, 3.0])
self.sampler = PseudoSampler()
self.num_query = num_query
self.num_classes = num_classes
self.in_channels = in_channels
self.num_reg_fcs = num_reg_fcs
self.fp16_enabled = False
self.embed_dims = 256
self.depth_step = depth_step
self.depth_num = depth_num
self.position_dim = 3 * self.depth_num
self.position_range = position_range
self.LID = LID
self.depth_start = depth_start
self.position_level = position_level
self.with_position = with_position
self.with_multiview = with_multiview
self.num_pred = 6
self.normedlinear = normedlinear
self.with_fpe = with_fpe
self.with_time = with_time
self.with_multi = with_multi
self.group_reg_dims = group_reg_dims
self.scalar = scalar
self.bbox_noise_scale = noise_scale
self.bbox_noise_trans = noise_trans
self.dn_weight = dn_weight
self.split = split
self.with_denoise = with_denoise
super(PETRHead, self).__init__()
self.num_classes = num_classes
self.in_channels = in_channels
self.loss_cls = loss_cls
self.loss_bbox = loss_bbox
self.cls_out_channels = num_classes
self.positional_encoding = positional_encoding
initializer = paddle.nn.initializer.Assign(self.code_weights)
self.code_weights = self.create_parameter(
[len(self.code_weights)], default_initializer=initializer)
self.code_weights.stop_gradient = True
self.bbox_coder = bbox_coder
self.pc_range = self.bbox_coder.point_cloud_range
self._init_layers()
self.transformer = transformer
self.pd_eps = paddle.to_tensor(np.finfo('float32').eps)
def _init_layers(self):
"""Initialize layers of the transformer head."""
if self.with_position:
self.input_proj = nn.Conv2D(
self.in_channels, self.embed_dims, kernel_size=1)
else:
self.input_proj = nn.Conv2D(
self.in_channels, self.embed_dims, kernel_size=1)
cls_branch = []
for _ in range(self.num_reg_fcs):
cls_branch.append(nn.Linear(self.embed_dims, self.embed_dims))
cls_branch.append(nn.LayerNorm(self.embed_dims))
cls_branch.append(nn.ReLU())
if self.normedlinear:
cls_branch.append(
NormedLinear(self.embed_dims, self.cls_out_channels))
else:
cls_branch.append(nn.Linear(self.embed_dims, self.cls_out_channels))
fc_cls = nn.Sequential(*cls_branch)
if self.with_multi:
reg_branch = RegLayer(self.embed_dims, self.num_reg_fcs,
self.group_reg_dims)
else:
reg_branch = []
for _ in range(self.num_reg_fcs):
reg_branch.append(nn.Linear(self.embed_dims, self.embed_dims))
reg_branch.append(nn.ReLU())
reg_branch.append(nn.Linear(self.embed_dims, self.code_size))
reg_branch = nn.Sequential(*reg_branch)
self.cls_branches = nn.LayerList(
[copy.deepcopy(fc_cls) for _ in range(self.num_pred)])
self.reg_branches = nn.LayerList(
[copy.deepcopy(reg_branch) for _ in range(self.num_pred)])
if self.with_multiview:
self.adapt_pos3d = nn.Sequential(
nn.Conv2D(
self.embed_dims * 3 // 2,
self.embed_dims * 4,
kernel_size=1,
stride=1,
padding=0),
nn.ReLU(),
nn.Conv2D(
self.embed_dims * 4,
self.embed_dims,
kernel_size=1,
stride=1,
padding=0),
)
else:
self.adapt_pos3d = nn.Sequential(
nn.Conv2D(
self.embed_dims,
self.embed_dims,
kernel_size=1,
stride=1,
padding=0),
nn.ReLU(),
nn.Conv2D(
self.embed_dims,
self.embed_dims,
kernel_size=1,
stride=1,
padding=0),
)
if self.with_position:
self.position_encoder = nn.Sequential(
nn.Conv2D(
self.position_dim,
self.embed_dims * 4,
kernel_size=1,
stride=1,
padding=0),
nn.ReLU(),
nn.Conv2D(
self.embed_dims * 4,
self.embed_dims,
kernel_size=1,
stride=1,
padding=0),
)
self.reference_points = nn.Embedding(self.num_query, 3)
self.query_embedding = nn.Sequential(
nn.Linear(self.embed_dims * 3 // 2, self.embed_dims),
nn.ReLU(),
nn.Linear(self.embed_dims, self.embed_dims),
)
if self.with_fpe:
self.fpe = SELayer(self.embed_dims)
def init_weights(self):
"""Initialize weights of the transformer head."""
# The initialization for transformer is important
self.input_proj.apply(param_init.reset_parameters)
self.cls_branches.apply(param_init.reset_parameters)
self.reg_branches.apply(param_init.reset_parameters)
self.adapt_pos3d.apply(param_init.reset_parameters)
if self.with_position:
self.position_encoder.apply(param_init.reset_parameters)
if self.with_fpe:
self.fpe.apply(param_init.reset_parameters)
self.transformer.init_weights()
param_init.uniform_init(self.reference_points.weight, 0, 1)
if self.loss_cls.use_sigmoid:
bias_val = param_init.init_bias_by_prob(0.01)
for m in self.cls_branches:
param_init.constant_init(m[-1].bias, value=bias_val)
def position_embeding(self, img_feats, img_metas, masks=None):
eps = 1e-5
if getattr(self, 'in_export_mode', False):
pad_h, pad_w = img_metas['image_shape']
else:
pad_h, pad_w, _ = img_metas[0]['pad_shape'][0]
B, N, C, H, W = img_feats[self.position_level].shape
coords_h = paddle.arange(H, dtype='float32') * pad_h / H
coords_w = paddle.arange(W, dtype='float32') * pad_w / W
if self.LID:
index = paddle.arange(
start=0, end=self.depth_num, step=1, dtype='float32')
index_1 = index + 1
bin_size = (self.position_range[3] - self.depth_start) / (
self.depth_num * (1 + self.depth_num))
coords_d = self.depth_start + bin_size * index * index_1
else:
index = paddle.arange(
start=0, end=self.depth_num, step=1, dtype='float32')
bin_size = (
self.position_range[3] - self.depth_start) / self.depth_num
coords_d = self.depth_start + bin_size * index
D = coords_d.shape[0]
# W, H, D, 3
coords = paddle.stack(paddle.meshgrid(
[coords_w, coords_h, coords_d])).transpose([1, 2, 3, 0])
coords = paddle.concat((coords, paddle.ones_like(coords[..., :1])), -1)
coords[..., :2] = coords[..., :2] * paddle.maximum(
coords[..., 2:3],
paddle.ones_like(coords[..., 2:3]) * eps)
if not getattr(self, 'in_export_mode', False):
img2lidars = []
for img_meta in img_metas:
img2lidar = []
for i in range(len(img_meta['lidar2img'])):
img2lidar.append(np.linalg.inv(img_meta['lidar2img'][i]))
img2lidars.append(np.asarray(img2lidar))
img2lidars = np.asarray(img2lidars)
# (B, N, 4, 4)
img2lidars = paddle.to_tensor(img2lidars).astype(coords.dtype)
else:
img2lidars = img_metas['img2lidars']
coords = coords.reshape([1, 1, W, H, D, 4]).tile(
[B, N, 1, 1, 1, 1]).reshape([B, N, W, H, D, 4, 1])
img2lidars = img2lidars.reshape([B, N, 1, 1, 1, 16]).tile(
[1, 1, W, H, D, 1]).reshape([B, N, W, H, D, 4, 4])
coords3d = paddle.matmul(img2lidars, coords)
coords3d = coords3d.reshape(coords3d.shape[:-1])[..., :3]
coords3d[..., 0:1] = (coords3d[..., 0:1] - self.position_range[0]) / (
self.position_range[3] - self.position_range[0])
coords3d[..., 1:2] = (coords3d[..., 1:2] - self.position_range[1]) / (
self.position_range[4] - self.position_range[1])
coords3d[..., 2:3] = (coords3d[..., 2:3] - self.position_range[2]) / (
self.position_range[5] - self.position_range[2])
coords_mask = (coords3d > 1.0) | (coords3d < 0.0)
coords_mask = coords_mask.astype('float32').flatten(-2).sum(-1) > (
D * 0.5)
coords_mask = masks | coords_mask.transpose([0, 1, 3, 2])
coords3d = coords3d.transpose([0, 1, 4, 5, 3, 2]).reshape(
[B * N, self.depth_num * 3, H, W])
coords3d = inverse_sigmoid(coords3d)
coords_position_embeding = self.position_encoder(coords3d)
return coords_position_embeding.reshape([B, N, self.embed_dims, H,
W]), coords_mask
def prepare_for_dn(self, batch_size, reference_points, img_metas):
if self.training:
def get_gravity_center(bboxes):
bottom_center = bboxes[:, :3]
gravity_center = np.zeros_like(bottom_center)
gravity_center[:, :2] = bottom_center[:, :2]
gravity_center[:, 2] = bottom_center[:, 2] + bboxes[:, 5] * 0.5
return gravity_center
targets = [
paddle.concat(
(paddle.to_tensor(
get_gravity_center(img_meta['gt_bboxes_3d'])),
paddle.to_tensor(img_meta['gt_bboxes_3d'][:, 3:])),
axis=1) for img_meta in img_metas
]
labels = [img_meta['gt_labels_3d'] for img_meta in img_metas]
known = [(paddle.ones_like(t)) for t in labels]
know_idx = known
unmask_bbox = unmask_label = paddle.concat(known)
known_num = [t.shape[0] for t in targets]
labels = paddle.concat([t for t in labels])
boxes = paddle.concat([t for t in targets])
batch_idx = paddle.concat(
[paddle.full((t.shape[0], ), i) for i, t in enumerate(targets)])
known_indice = paddle.nonzero(unmask_label + unmask_bbox)
known_indice = known_indice.reshape([-1])
# add noise
groups = min(self.scalar, self.num_query // max(known_num))
known_indice = known_indice.tile([self.scalar, 1]).reshape([-1])
known_labels = labels.tile([self.scalar,
1]).reshape([-1]).astype('int64')
known_bid = batch_idx.tile([self.scalar, 1]).reshape([-1])
known_bboxs = boxes.tile([self.scalar, 1])
known_bbox_center = known_bboxs[:, :3].clone()
known_bbox_scale = known_bboxs[:, 3:6].clone()
if self.bbox_noise_scale > 0:
diff = known_bbox_scale / 2 + self.bbox_noise_trans
rand_prob = paddle.rand(known_bbox_center.shape) * 2 - 1.0
known_bbox_center += paddle.multiply(
rand_prob, diff) * self.bbox_noise_scale
known_bbox_center[..., 0:1] = (
known_bbox_center[..., 0:1] - self.pc_range[0]) / (
self.pc_range[3] - self.pc_range[0])
known_bbox_center[..., 1:2] = (
known_bbox_center[..., 1:2] - self.pc_range[1]) / (
self.pc_range[4] - self.pc_range[1])
known_bbox_center[..., 2:3] = (
known_bbox_center[..., 2:3] - self.pc_range[2]) / (
self.pc_range[5] - self.pc_range[2])
known_bbox_center = known_bbox_center.clip(min=0.0, max=1.0)
mask = paddle.norm(rand_prob, 2, 1) > self.split
known_labels[mask] = self.num_classes
single_pad = int(max(known_num))
pad_size = int(single_pad * self.scalar)
padding_bbox = paddle.zeros([pad_size, 3])
padded_reference_points = paddle.concat(
[padding_bbox, reference_points], axis=0).unsqueeze(0).tile(
[batch_size, 1, 1])
if len(known_num):
map_known_indice = paddle.concat(
[paddle.to_tensor(list(range(num))) for num in known_num])
map_known_indice = paddle.concat([
map_known_indice + single_pad * i
for i in range(self.scalar)
]).astype('int64')
if len(known_bid):
padded_reference_points[(known_bid.astype('int64'),
map_known_indice)] = known_bbox_center
tgt_size = pad_size + self.num_query
attn_mask = paddle.ones([tgt_size, tgt_size]) < 0
# match query cannot see the reconstruct
attn_mask[pad_size:, :pad_size] = True
# reconstruct cannot see each other
for i in range(self.scalar):
if i == 0:
attn_mask[single_pad * i:single_pad * (i + 1), single_pad *
(i + 1):pad_size] = True
if i == self.scalar - 1:
attn_mask[single_pad * i:single_pad * (i + 1), :single_pad *
i] = True
else:
attn_mask[single_pad * i:single_pad * (i + 1), single_pad *
(i + 1):pad_size] = True
attn_mask[single_pad * i:single_pad * (i + 1), :single_pad *
i] = True
mask_dict = {
'known_indice':
paddle.to_tensor(known_indice, dtype='int64'),
'batch_idx':
paddle.to_tensor(batch_idx, dtype='int64'),
'map_known_indice':
paddle.to_tensor(map_known_indice, dtype='int64'),
'known_lbs_bboxes': (known_labels, known_bboxs),
'know_idx':
know_idx,
'pad_size':
pad_size
}
else:
padded_reference_points = reference_points.unsqueeze(0).tile(
[batch_size, 1, 1])
attn_mask = None
mask_dict = None
return padded_reference_points, attn_mask, mask_dict
def forward(self, mlvl_feats, img_metas):
"""Forward function.
Args:
mlvl_feats (tuple[Tensor]): Features from the upstream
network, each is a 5D-tensor with shape
(B, N, C, H, W).
Returns:
all_cls_scores (Tensor): Outputs from the classification head, \
shape [nb_dec, bs, num_query, cls_out_channels]. Note \
cls_out_channels should includes background.
all_bbox_preds (Tensor): Sigmoid outputs from the regression \
head with normalized coordinate format (cx, cy, w, l, cz, h, theta, vx, vy). \
Shape [nb_dec, bs, num_query, 9].
"""
x = mlvl_feats[self.position_level]
batch_size, num_cams = x.shape[0], x.shape[1]
input_img_h, input_img_w, _ = img_metas[0]['pad_shape'][0]
masks = paddle.ones((batch_size, num_cams, input_img_h, input_img_w))
for img_id in range(batch_size):
for cam_id in range(num_cams):
img_h, img_w, _ = img_metas[img_id]['img_shape'][cam_id]
masks[img_id, cam_id, :img_h, :img_w] = 0
x = self.input_proj(x.flatten(0, 1))
x = x.reshape([batch_size, num_cams, *x.shape[-3:]])
# interpolate masks to have the same spatial shape with x
masks = F.interpolate(masks, size=x.shape[-2:]).cast('bool')
if self.with_position:
coords_position_embeding, _ = self.position_embeding(
mlvl_feats, img_metas, masks)
if self.with_fpe:
coords_position_embeding = self.fpe(
coords_position_embeding.flatten(0, 1),
x.flatten(0, 1)).reshape(x.shape)
pos_embed = coords_position_embeding
if self.with_multiview:
sin_embed = self.positional_encoding(masks)
sin_embed = self.adapt_pos3d(sin_embed.flatten(0, 1)).reshape(
x.shape)
pos_embed = pos_embed + sin_embed
else:
pos_embeds = []
for i in range(num_cams):
xy_embed = self.positional_encoding(masks[:, i, :, :])
pos_embeds.append(xy_embed.unsqueeze(1))
sin_embed = paddle.concat(pos_embeds, 1)
sin_embed = self.adapt_pos3d(sin_embed.flatten(0, 1)).reshape(
x.shape)
pos_embed = pos_embed + sin_embed
else:
if self.with_multiview:
pos_embed = self.positional_encoding(masks)
pos_embed = self.adapt_pos3d(pos_embed.flatten(0, 1)).reshape(
x.shape)
else:
pos_embeds = []
for i in range(num_cams):
pos_embed = self.positional_encoding(masks[:, i, :, :])
pos_embeds.append(pos_embed.unsqueeze(1))
pos_embed = paddle.concat(pos_embeds, 1)
reference_points = self.reference_points.weight
if self.with_denoise:
reference_points, attn_mask, mask_dict = self.prepare_for_dn(
batch_size, reference_points, img_metas)
query_embeds = self.query_embedding(pos2posemb3d(reference_points))
outs_dec, _ = self.transformer(x, masks, query_embeds, pos_embed,
attn_mask, self.reg_branches)
else:
mask_dict = None
query_embeds = self.query_embedding(pos2posemb3d(reference_points))
reference_points = reference_points.unsqueeze(0).tile(
[batch_size, 1, 1])
outs_dec, _ = self.transformer(x, masks, query_embeds, pos_embed,
self.reg_branches)
outs_dec = nan_to_num(outs_dec)
if self.with_time:
time_stamps = []
for img_meta in img_metas:
time_stamps.append(np.asarray(img_meta['timestamp']))
time_stamp = paddle.to_tensor(time_stamps, dtype=x.dtype)
time_stamp = time_stamp.reshape([batch_size, -1, 6])
mean_time_stamp = (
time_stamp[:, 1, :] - time_stamp[:, 0, :]).mean(-1)
outputs_classes = []
outputs_coords = []
for lvl in range(outs_dec.shape[0]):
reference = inverse_sigmoid(reference_points.clone())
assert reference.shape[-1] == 3
outputs_class = self.cls_branches[lvl](outs_dec[lvl])
tmp = self.reg_branches[lvl](outs_dec[lvl])
tmp[..., 0:2] += reference[..., 0:2]
tmp[..., 0:2] = F.sigmoid(tmp[..., 0:2])
tmp[..., 4:5] += reference[..., 2:3]
tmp[..., 4:5] = F.sigmoid(tmp[..., 4:5])
if self.with_time:
tmp[..., 8:] = tmp[..., 8:] / mean_time_stamp[:, None, None]
outputs_coord = tmp
outputs_classes.append(outputs_class)
outputs_coords.append(outputs_coord)
all_cls_scores = paddle.stack(outputs_classes)
all_bbox_preds = paddle.stack(outputs_coords)
all_bbox_preds[..., 0:1] = (
all_bbox_preds[..., 0:1] * (self.pc_range[3] - self.pc_range[0]) +
self.pc_range[0])
all_bbox_preds[..., 1:2] = (
all_bbox_preds[..., 1:2] * (self.pc_range[4] - self.pc_range[1]) +
self.pc_range[1])
all_bbox_preds[..., 4:5] = (
all_bbox_preds[..., 4:5] * (self.pc_range[5] - self.pc_range[2]) +
self.pc_range[2])
if mask_dict and mask_dict['pad_size'] > 0:
output_known_class = all_cls_scores[:, :, :mask_dict['pad_size'], :]
output_known_coord = all_bbox_preds[:, :, :mask_dict['pad_size'], :]
outputs_class = all_cls_scores[:, :, mask_dict['pad_size']:, :]
outputs_coord = all_bbox_preds[:, :, mask_dict['pad_size']:, :]
mask_dict['output_known_lbs_bboxes'] = (output_known_class,
output_known_coord)
outs = {
'all_cls_scores': outputs_class,
'all_bbox_preds': outputs_coord,
'enc_cls_scores': None,
'enc_bbox_preds': None,
'dn_mask_dict': mask_dict,
}
else:
outs = {
'all_cls_scores': all_cls_scores,
'all_bbox_preds': all_bbox_preds,
'enc_cls_scores': None,
'enc_bbox_preds': None,
'dn_mask_dict': None,
}
return outs
def prepare_for_loss(self, mask_dict):
"""
prepare dn components to calculate loss
Args:
mask_dict: a dict that contains dn information
"""
output_known_class, output_known_coord = mask_dict[
'output_known_lbs_bboxes']
known_labels, known_bboxs = mask_dict['known_lbs_bboxes']
map_known_indice = mask_dict['map_known_indice'].astype('int64')
known_indice = mask_dict['known_indice'].astype('int64')
batch_idx = mask_dict['batch_idx'].astype('int64')
bid = batch_idx[known_indice]
if len(output_known_class) > 0:
output_known_class = output_known_class.transpose(
[1, 2, 0, 3])[(bid, map_known_indice)].transpose([1, 0, 2])
output_known_coord = output_known_coord.transpose(
[1, 2, 0, 3])[(bid, map_known_indice)].transpose([1, 0, 2])
num_tgt = known_indice.numel()
return known_labels, known_bboxs, output_known_class, output_known_coord, num_tgt
def dn_loss_single(self,
cls_scores,
bbox_preds,
known_bboxs,
known_labels,
num_total_pos=None):
""""Loss function for outputs from a single decoder layer of a single
feature level.
Args:
cls_scores (Tensor): Box score logits from a single decoder layer
for all images. Shape [bs, num_query, cls_out_channels].
bbox_preds (Tensor): Sigmoid outputs from a single decoder layer
for all images, with normalized coordinate (cx, cy, w, h) and
shape [bs, num_query, 4].
gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image
with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels_list (list[Tensor]): Ground truth class indices for each
image with shape (num_gts, ).
gt_bboxes_ignore_list (list[Tensor], optional): Bounding
boxes which can be ignored for each image. Default None.
Returns:
dict[str, Tensor]: A dictionary of loss components for outputs from
a single decoder layer.
"""
# classification loss
cls_scores = cls_scores.reshape([-1, self.cls_out_channels])
# construct weighted avg_factor to match with the official DETR repo
cls_avg_factor = num_total_pos * 3.14159 / 6 * self.split * self.split * self.split ### positive rate
if self.sync_cls_avg_factor:
cls_avg_factor = reduce_mean(
paddle.to_tensor([cls_avg_factor], dtype=cls_scores.dtype))
bbox_weights = paddle.ones_like(bbox_preds)
label_weights = paddle.ones_like(known_labels)
cls_avg_factor = max(cls_avg_factor, 1)
loss_cls = self.loss_cls(cls_scores, known_labels.astype('int64'),
label_weights) / (cls_avg_factor + self.pd_eps)
# Compute the average number of gt boxes accross all gpus, for
# normalization purposes
num_total_pos = paddle.to_tensor([num_total_pos], dtype=loss_cls.dtype)
num_total_pos = paddle.clip(reduce_mean(num_total_pos), min=1).item()
# regression L1 loss
bbox_preds = bbox_preds.reshape([-1, bbox_preds.shape[-1]])
normalized_bbox_targets = normalize_bbox(known_bboxs, self.pc_range)
isnotnan = paddle.isfinite(normalized_bbox_targets).all(axis=-1)
bbox_weights = bbox_weights * self.code_weights
bbox_weights[:, 6:
8] = 0 ###dn alaways reduce the mAOE, which is useless when training for a long time.
loss_bbox = self.loss_bbox(
bbox_preds[isnotnan], normalized_bbox_targets[isnotnan],
bbox_weights[isnotnan]) / (num_total_pos + self.pd_eps)
loss_cls = nan_to_num(loss_cls)
loss_bbox = nan_to_num(loss_bbox)
return self.dn_weight * loss_cls, self.dn_weight * loss_bbox
def export_forward(self, mlvl_feats, img_metas, time_stamp=None):
"""Forward function.
Args:
mlvl_feats (tuple[Tensor]): Features from the upstream
network, each is a 5D-tensor with shape
(B, N, C, H, W).
Returns:
all_cls_scores (Tensor): Outputs from the classification head, \
shape [nb_dec, bs, num_query, cls_out_channels]. Note \
cls_out_channels should includes background.
all_bbox_preds (Tensor): Sigmoid outputs from the regression \
head with normalized coordinate format (cx, cy, w, l, cz, h, theta, vx, vy). \
Shape [nb_dec, bs, num_query, 9].
"""
x = mlvl_feats[self.position_level]
batch_size, num_cams = x.shape[0], x.shape[1]
input_img_h, input_img_w = img_metas['image_shape']
masks = paddle.zeros([batch_size, num_cams, input_img_h, input_img_w])
x = self.input_proj(x.flatten(0, 1))
x = x.reshape([batch_size, num_cams, *x.shape[-3:]])
# interpolate masks to have the same spatial shape with x
masks = F.interpolate(masks, size=x.shape[-2:]).cast('bool')
if self.with_position:
coords_position_embeding, _ = self.position_embeding(
mlvl_feats, img_metas, masks)
if self.with_fpe:
coords_position_embeding = self.fpe(
coords_position_embeding.flatten(0, 1),
x.flatten(0, 1)).reshape(x.shape)
pos_embed = coords_position_embeding
if self.with_multiview:
sin_embed = self.positional_encoding(masks)
sin_embed = self.adapt_pos3d(sin_embed.flatten(0, 1)).reshape(
x.shape)
pos_embed = pos_embed + sin_embed
else:
pos_embeds = []
for i in range(num_cams):
xy_embed = self.positional_encoding(masks[:, i, :, :])
pos_embeds.append(xy_embed.unsqueeze(1))
sin_embed = paddle.concat(pos_embeds, 1)
sin_embed = self.adapt_pos3d(sin_embed.flatten(0, 1)).reshape(
x.shape)
pos_embed = pos_embed + sin_embed
else:
if self.with_multiview:
pos_embed = self.positional_encoding(masks)
pos_embed = self.adapt_pos3d(pos_embed.flatten(0, 1)).reshape(
x.shape)
else:
pos_embeds = []
for i in range(num_cams):
pos_embed = self.positional_encoding(masks[:, i, :, :])
pos_embeds.append(pos_embed.unsqueeze(1))
pos_embed = paddle.concat(pos_embeds, 1)
reference_points = self.reference_points.weight
query_embeds = self.query_embedding(pos2posemb3d(reference_points))
reference_points = reference_points.unsqueeze(0).tile(
[batch_size, 1, 1])
outs_dec, _ = self.transformer(x, masks, query_embeds, pos_embed,
self.reg_branches)
outs_dec = nan_to_num(outs_dec)
if self.with_time:
time_stamp = time_stamp.reshape([batch_size, -1, 6])
mean_time_stamp = (
time_stamp[:, 1, :] - time_stamp[:, 0, :]).mean(-1)
outputs_classes = []
outputs_coords = []
for lvl in range(outs_dec.shape[0]):
reference = inverse_sigmoid(reference_points.clone())
assert reference.shape[-1] == 3
outputs_class = self.cls_branches[lvl](outs_dec[lvl])
tmp = self.reg_branches[lvl](outs_dec[lvl])
tmp[..., 0:2] += reference[..., 0:2]
tmp[..., 0:2] = F.sigmoid(tmp[..., 0:2])
tmp[..., 4:5] += reference[..., 2:3]
tmp[..., 4:5] = F.sigmoid(tmp[..., 4:5])
if self.with_time:
tmp[..., 8:] = tmp[..., 8:] / mean_time_stamp[:, None, None]
outputs_coord = tmp
outputs_classes.append(outputs_class)
outputs_coords.append(outputs_coord)
all_cls_scores = paddle.stack(outputs_classes)
all_bbox_preds = paddle.stack(outputs_coords)
all_bbox_preds[..., 0:1] = (
all_bbox_preds[..., 0:1] * (self.pc_range[3] - self.pc_range[0]) +
self.pc_range[0])
all_bbox_preds[..., 1:2] = (
all_bbox_preds[..., 1:2] * (self.pc_range[4] - self.pc_range[1]) +
self.pc_range[1])
all_bbox_preds[..., 4:5] = (
all_bbox_preds[..., 4:5] * (self.pc_range[5] - self.pc_range[2]) +
self.pc_range[2])
outs = {
'all_cls_scores': all_cls_scores,
'all_bbox_preds': all_bbox_preds,
# 'enc_cls_scores': None,
# 'enc_bbox_preds': None,
}
return outs
def _get_target_single(self,
cls_score,
bbox_pred,
gt_labels,
gt_bboxes,
gt_bboxes_ignore=None):
""""Compute regression and classification targets for one image.
Outputs from a single decoder layer of a single feature level are used.
Args:
cls_score (Tensor): Box score logits from a single decoder layer
for one image. Shape [num_query, cls_out_channels].
bbox_pred (Tensor): Sigmoid outputs from a single decoder layer
for one image, with normalized coordinate (cx, cy, w, h) and
shape [num_query, 4].
gt_bboxes (Tensor): Ground truth bboxes for one image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (Tensor): Ground truth class indices for one image
with shape (num_gts, ).
gt_bboxes_ignore (Tensor, optional): Bounding boxes
which can be ignored. Default None.
Returns:
tuple[Tensor]: a tuple containing the following for one image.
- labels (Tensor): Labels of each image.
- label_weights (Tensor]): Label weights of each image.
- bbox_targets (Tensor): BBox targets of each image.
- bbox_weights (Tensor): BBox weights of each image.
- pos_inds (Tensor): Sampled positive indices for each image.
- neg_inds (Tensor): Sampled negative indices for each image.
"""
num_bboxes = bbox_pred.shape[0]
# assigner and sampler
assign_result = self.assigner.assign(bbox_pred, cls_score, gt_bboxes,
gt_labels, gt_bboxes_ignore)
sampling_result = self.sampler.sample(assign_result, bbox_pred,
gt_bboxes)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
# label targets
labels = paddle.full((num_bboxes, ), self.num_classes, dtype='int64')
labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]
label_weights = paddle.ones([num_bboxes])
# bbox targets
code_size = gt_bboxes.shape[1]
bbox_targets = paddle.zeros_like(bbox_pred)[..., :code_size]
bbox_weights = paddle.zeros_like(bbox_pred)
bbox_weights[pos_inds] = 1.0
# DETR
if sampling_result.pos_gt_bboxes.shape[1] == 4:
bbox_targets[pos_inds] = sampling_result.pos_gt_bboxes.reshape(
sampling_result.pos_gt_bboxes.shape[0], self.code_size - 1)
else:
bbox_targets[pos_inds] = sampling_result.pos_gt_bboxes
return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
neg_inds)
def get_targets(self,
cls_scores_list,
bbox_preds_list,
gt_bboxes_list,
gt_labels_list,
gt_bboxes_ignore_list=None):
""""Compute regression and classification targets for a batch image.
Outputs from a single decoder layer of a single feature level are used.
Args:
cls_scores_list (list[Tensor]): Box score logits from a single
decoder layer for each image with shape [num_query,
cls_out_channels].
bbox_preds_list (list[Tensor]): Sigmoid outputs from a single
decoder layer for each image, with normalized coordinate
(cx, cy, w, h) and shape [num_query, 4].
gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image
with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels_list (list[Tensor]): Ground truth class indices for each
image with shape (num_gts, ).
gt_bboxes_ignore_list (list[Tensor], optional): Bounding
boxes which can be ignored for each image. Default None.
Returns:
tuple: a tuple containing the following targets.
- labels_list (list[Tensor]): Labels for all images.
- label_weights_list (list[Tensor]): Label weights for all \
images.
- bbox_targets_list (list[Tensor]): BBox targets for all \
images.
- bbox_weights_list (list[Tensor]): BBox weights for all \
images.
- num_total_pos (int): Number of positive samples in all \
images.
- num_total_neg (int): Number of negative samples in all \
images.
"""
assert gt_bboxes_ignore_list is None, \
'Only supports for gt_bboxes_ignore setting to None.'
num_imgs = len(cls_scores_list)
gt_bboxes_ignore_list = [gt_bboxes_ignore_list for _ in range(num_imgs)]
(labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, pos_inds_list, neg_inds_list) = multi_apply(
self._get_target_single, cls_scores_list, bbox_preds_list,
gt_labels_list, gt_bboxes_list, gt_bboxes_ignore_list)
num_total_pos = sum((inds.numel() for inds in pos_inds_list))
num_total_neg = sum((inds.numel() for inds in neg_inds_list))
return (labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, num_total_pos, num_total_neg)
def loss_single(self,
cls_scores,
bbox_preds,
gt_bboxes_list,
gt_labels_list,
gt_bboxes_ignore_list=None):
""""Loss function for outputs from a single decoder layer of a single
feature level.
Args:
cls_scores (Tensor): Box score logits from a single decoder layer
for all images. Shape [bs, num_query, cls_out_channels].
bbox_preds (Tensor): Sigmoid outputs from a single decoder layer
for all images, with normalized coordinate (cx, cy, w, h) and
shape [bs, num_query, 4].
gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image
with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels_list (list[Tensor]): Ground truth class indices for each
image with shape (num_gts, ).
gt_bboxes_ignore_list (list[Tensor], optional): Bounding
boxes which can be ignored for each image. Default None.
Returns:
dict[str, Tensor]: A dictionary of loss components for outputs from
a single decoder layer.
"""
num_imgs = cls_scores.shape[0]
cls_scores_list = [cls_scores[i] for i in range(num_imgs)]
bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)]
cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list,
gt_bboxes_list, gt_labels_list,
gt_bboxes_ignore_list)
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg) = cls_reg_targets
labels = paddle.concat(labels_list, 0)
label_weights = paddle.concat(label_weights_list, 0)
bbox_targets = paddle.concat(bbox_targets_list, 0)
bbox_weights = paddle.concat(bbox_weights_list, 0)
# classification loss
cls_scores = cls_scores.reshape([-1, self.cls_out_channels])
# construct weighted avg_factor to match with the official DETR repo
cls_avg_factor = num_total_pos * 1.0 + \
num_total_neg * self.bg_cls_weight
if self.sync_cls_avg_factor:
cls_avg_factor = reduce_mean(
paddle.to_tensor([cls_avg_factor], dtype=cls_scores.dtype))
cls_avg_factor = max(cls_avg_factor, 1)
loss_cls = self.loss_cls(cls_scores, labels,
label_weights) / (cls_avg_factor + self.pd_eps)
# Compute the average number of gt boxes accross all gpus, for
# normalization purposes
num_total_pos = paddle.to_tensor([num_total_pos], dtype=loss_cls.dtype)
num_total_pos = paddle.clip(reduce_mean(num_total_pos), min=1).item()
# regression L1 loss
bbox_preds = bbox_preds.reshape([-1, bbox_preds.shape[-1]])
normalized_bbox_targets = normalize_bbox(bbox_targets, self.pc_range)
# paddle.all
isnotnan = paddle.isfinite(normalized_bbox_targets).all(axis=-1)
bbox_weights = bbox_weights * self.code_weights
loss_bbox = self.loss_bbox(
bbox_preds[isnotnan], normalized_bbox_targets[isnotnan],
bbox_weights[isnotnan]) / (num_total_pos + self.pd_eps)
loss_cls = nan_to_num(loss_cls)
loss_bbox = nan_to_num(loss_bbox)
return loss_cls, loss_bbox
def loss(self,
gt_bboxes_list,
gt_labels_list,
preds_dicts,
gt_bboxes_ignore=None):
""""Loss function.
Args:
gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image
with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels_list (list[Tensor]): Ground truth class indices for each
image with shape (num_gts, ).
preds_dicts:
all_cls_scores (Tensor): Classification score of all
decoder layers, has shape
[nb_dec, bs, num_query, cls_out_channels].
all_bbox_preds (Tensor): Sigmoid regression
outputs of all decode layers. Each is a 4D-tensor with
normalized coordinate format (cx, cy, w, h) and shape
[nb_dec, bs, num_query, 4].
enc_cls_scores (Tensor): Classification scores of
points on encode feature map , has shape
(N, h*w, num_classes). Only be passed when as_two_stage is
True, otherwise is None.
enc_bbox_preds (Tensor): Regression results of each points
on the encode feature map, has shape (N, h*w, 4). Only be
passed when as_two_stage is True, otherwise is None.
gt_bboxes_ignore (list[Tensor], optional): Bounding boxes
which can be ignored for each image. Default None.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert gt_bboxes_ignore is None, \
f'{self.__class__.__name__} only supports ' \
f'for gt_bboxes_ignore setting to None.'
all_cls_scores = preds_dicts['all_cls_scores']
all_bbox_preds = preds_dicts['all_bbox_preds']
enc_cls_scores = preds_dicts['enc_cls_scores']
enc_bbox_preds = preds_dicts['enc_bbox_preds']
num_dec_layers = len(all_cls_scores)
def get_gravity_center(bboxes):
bottom_center = bboxes[:, :3]
gravity_center = np.zeros_like(bottom_center)
gravity_center[:, :2] = bottom_center[:, :2]
gravity_center[:, 2] = bottom_center[:, 2] + bboxes[:, 5] * 0.5
return gravity_center
gt_bboxes_list = [
paddle.concat((paddle.to_tensor(get_gravity_center(gt_bboxes)),
paddle.to_tensor(gt_bboxes[:, 3:])),
axis=1) for gt_bboxes in gt_bboxes_list
]
all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)]
all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)]
all_gt_bboxes_ignore_list = [
gt_bboxes_ignore for _ in range(num_dec_layers)
]
losses_cls, losses_bbox = multi_apply(
self.loss_single, all_cls_scores, all_bbox_preds,
all_gt_bboxes_list, all_gt_labels_list, all_gt_bboxes_ignore_list)
loss_dict = dict()
# loss of proposal generated from encode feature map.
if enc_cls_scores is not None:
binary_labels_list = [
paddle.zeros_like(gt_labels_list[i])
for i in range(len(all_gt_labels_list))
]
enc_loss_cls, enc_losses_bbox = \
self.loss_single(enc_cls_scores, enc_bbox_preds,
gt_bboxes_list, binary_labels_list, gt_bboxes_ignore)
loss_dict['enc_loss_cls'] = enc_loss_cls
loss_dict['enc_loss_bbox'] = enc_losses_bbox
if preds_dicts['dn_mask_dict'] is not None:
known_labels, known_bboxs, output_known_class, output_known_coord, num_tgt = self.prepare_for_loss(
preds_dicts['dn_mask_dict'])
all_known_bboxs_list = [known_bboxs for _ in range(num_dec_layers)]
all_known_labels_list = [
known_labels for _ in range(num_dec_layers)
]
all_num_tgts_list = [num_tgt for _ in range(num_dec_layers)]
dn_losses_cls, dn_losses_bbox = multi_apply(
self.dn_loss_single, output_known_class, output_known_coord,
all_known_bboxs_list, all_known_labels_list, all_num_tgts_list)
loss_dict['dn_loss_cls'] = dn_losses_cls[-1]
loss_dict['dn_loss_bbox'] = dn_losses_bbox[-1]
num_dec_layer = 0
for loss_cls_i, loss_bbox_i in zip(dn_losses_cls[:-1],
dn_losses_bbox[:-1]):
loss_dict[f'd{num_dec_layer}.dn_loss_cls'] = loss_cls_i
loss_dict[f'd{num_dec_layer}.dn_loss_bbox'] = loss_bbox_i
num_dec_layer += 1
# loss from the last decoder layer
loss_dict['loss_cls'] = losses_cls[-1]
loss_dict['loss_bbox'] = losses_bbox[-1]
# loss from other decoder layers
num_dec_layer = 0
for loss_cls_i, loss_bbox_i in zip(losses_cls[:-1], losses_bbox[:-1]):
loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i
loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i
num_dec_layer += 1
return loss_dict
def get_bboxes(self, preds_dicts, img_metas, rescale=False):
"""Generate bboxes from bbox head predictions.
Args:
preds_dicts (tuple[list[dict]]): Prediction results.
img_metas (list[dict]): Point cloud and image's meta info.
Returns:
list[dict]: Decoded bbox, scores and labels after nms.
"""
preds_dicts = self.bbox_coder.decode(preds_dicts)
num_samples = len(preds_dicts)
ret_list = []
for i in range(num_samples):
preds = preds_dicts[i]
bboxes = preds['bboxes']
bboxes[:, 2] = bboxes[:, 2] - bboxes[:, 5] * 0.5
scores = preds['scores']
labels = preds['labels']
ret_list.append([bboxes, scores, labels])
return ret_list
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/dense_heads/point_head.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/models/dense_heads/point_head_simple.py
Ths copyright of OpenPCDet is as follows:
Apache-2.0 license [see LICENSE for details].
"""
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.models.layers import param_init
from paddle3d.models.losses import SigmoidFocalClassificationLoss
from paddle3d.ops import roiaware_pool3d
from paddle3d.models.common import enlarge_box3d
@manager.HEADS.add_component
class PointHeadSimple(nn.Layer):
"""
A simple point-based segmentation head, which are used for PV-RCNN keypoint segmentaion.
Reference Paper: https://arxiv.org/abs/1912.13192
PV-RCNN: Point-Voxel Feature Set Abstraction for 3D Object Detection
"""
def __init__(self, num_class, input_channels, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class if not self.model_cfg['class_agnostic'] else 1
self.build_losses(self.model_cfg['loss_config'])
self.cls_layers = self.make_fc_layers(
fc_cfg=self.model_cfg['cls_fc'],
input_channels=input_channels,
output_channels=self.num_class)
self.init_weights()
def init_weights(self):
for layer in self.sublayers():
if isinstance(layer, (nn.Linear)):
param_init.reset_parameters(layer)
if isinstance(layer, nn.BatchNorm1D):
param_init.constant_init(layer.weight, value=1)
param_init.constant_init(layer.bias, value=0)
def assign_targets(self, input_dict):
"""
Args:
input_dict:
point_features: (N1 + N2 + N3 + ..., C)
batch_size:
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes (optional): (B, M, 8)
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_part_labels: (N1 + N2 + N3 + ..., 3)
"""
point_coords = input_dict['point_coords']
gt_boxes = input_dict['gt_boxes']
assert len(
gt_boxes.shape) == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
assert len(point_coords.shape) in [
2
], 'points.shape=%s' % str(point_coords.shape)
batch_size = gt_boxes.shape[0]
extend_gt_boxes = enlarge_box3d(
gt_boxes.reshape([-1, gt_boxes.shape[-1]]),
extra_width=self.model_cfg['target_config']
['gt_extra_width']).reshape([batch_size, -1, gt_boxes.shape[-1]])
targets_dict = self.assign_stack_targets(
points=point_coords,
gt_boxes=gt_boxes,
extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True,
use_ball_constraint=False,
ret_part_labels=False)
return targets_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
point_loss_cls, tb_dict_1 = self.get_cls_layer_loss()
point_loss = point_loss_cls
tb_dict.update(tb_dict_1)
return point_loss, tb_dict
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
point_features: (N1 + N2 + N3 + ..., C) or (B, N, C)
point_features_before_fusion: (N1 + N2 + N3 + ..., C)
point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
point_labels (optional): (N1 + N2 + N3 + ...)
gt_boxes (optional): (B, M, 8)
Returns:
batch_dict:
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
"""
if self.model_cfg.get('use_point_features_before_fusion', False):
point_features = batch_dict['point_features_before_fusion']
else:
point_features = batch_dict['point_features']
point_cls_preds = self.cls_layers(
point_features) # (total_points, num_class)
ret_dict = {
'point_cls_preds': point_cls_preds,
}
point_cls_scores = F.sigmoid(point_cls_preds)
batch_dict['point_cls_scores'] = point_cls_scores.max(axis=-1)
if self.training:
targets_dict = self.assign_targets(batch_dict)
ret_dict['point_cls_labels'] = targets_dict['point_cls_labels']
self.forward_ret_dict = ret_dict
return batch_dict
def build_losses(self, losses_cfg):
self.add_sublayer('cls_loss_func',
SigmoidFocalClassificationLoss(alpha=0.25, gamma=2.0))
@staticmethod
def make_fc_layers(fc_cfg, input_channels, output_channels):
fc_layers = []
c_in = input_channels
for k in range(0, fc_cfg.__len__()):
fc_layers.extend([
nn.Linear(c_in, fc_cfg[k], bias_attr=False),
nn.BatchNorm1D(fc_cfg[k]),
nn.ReLU(),
])
c_in = fc_cfg[k]
fc_layers.append(nn.Linear(c_in, output_channels, bias_attr=True))
return nn.Sequential(*fc_layers)
def assign_stack_targets(self,
points,
gt_boxes,
extend_gt_boxes=None,
ret_box_labels=False,
ret_part_labels=False,
set_ignore_flag=True,
use_ball_constraint=False,
central_radius=2.0):
"""
Args:
points: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes: (B, M, 8)
extend_gt_boxes: [B, M, 8]
ret_box_labels:
ret_part_labels:
set_ignore_flag:
use_ball_constraint:
central_radius:
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_box_labels: (N1 + N2 + N3 + ..., code_size)
"""
assert len(
points.shape
) == 2 and points.shape[1] == 4, 'points.shape=%s' % str(points.shape)
assert len(gt_boxes.shape
) == 3 and gt_boxes.shape[2] == 8, 'gt_boxes.shape=%s' % str(
gt_boxes.shape)
assert extend_gt_boxes is None or len(extend_gt_boxes.shape) == 3 and extend_gt_boxes.shape[2] == 8, \
'extend_gt_boxes.shape=%s' % str(extend_gt_boxes.shape)
assert set_ignore_flag != use_ball_constraint, 'Choose one only!'
batch_size = gt_boxes.shape[0]
bs_idx = points[:, 0]
point_cls_labels = paddle.zeros((points.shape[0], ), dtype='int64')
for k in range(batch_size):
bs_mask = (bs_idx == k)
points_single = points[bs_mask][:, 1:4]
point_cls_labels_single = paddle.zeros((bs_mask.sum(), ),
dtype='int64')
box_idxs_of_pts = roiaware_pool3d.points_in_boxes_gpu(
points_single.unsqueeze(axis=0),
gt_boxes[k:k + 1, :, 0:7]).astype('int64').squeeze(axis=0)
box_fg_flag = (box_idxs_of_pts >= 0)
if set_ignore_flag:
extend_box_idxs_of_pts = roiaware_pool3d.points_in_boxes_gpu(
points_single.unsqueeze(axis=0),
extend_gt_boxes[k:k + 1, :, 0:7]).astype('int64').squeeze(
axis=0)
fg_flag = box_fg_flag
ignore_flag = fg_flag ^ (extend_box_idxs_of_pts >= 0)
point_cls_labels_single[ignore_flag] = -1
elif use_ball_constraint:
box_centers = gt_boxes[k][box_idxs_of_pts][:, 0:3].clone()
box_centers[:, 2] += gt_boxes[k][box_idxs_of_pts][:, 5] / 2
ball_flag = (paddle.linalg.norm(
(box_centers - points_single), axis=1) < central_radius)
fg_flag = box_fg_flag & ball_flag
else:
raise NotImplementedError
gt_box_of_fg_points = gt_boxes[k][box_idxs_of_pts[fg_flag]]
point_cls_labels_single[
fg_flag] = 1 if self.num_class == 1 else gt_box_of_fg_points[:,
-1].astype(
'int64'
)
point_cls_labels[bs_mask] = point_cls_labels_single
if ret_box_labels and gt_box_of_fg_points.shape[0] > 0:
raise NotImplementedError
if ret_part_labels:
raise NotImplementedError
targets_dict = {
'point_cls_labels': point_cls_labels,
}
return targets_dict
def get_cls_layer_loss(self, tb_dict=None):
point_cls_labels = self.forward_ret_dict['point_cls_labels'].reshape(
[-1])
point_cls_preds = self.forward_ret_dict['point_cls_preds'].reshape(
[-1, self.num_class])
positives = (point_cls_labels > 0)
negative_cls_weights = (point_cls_labels == 0) * 1.0
cls_weights = (negative_cls_weights + 1.0 * positives).astype('float32')
pos_normalizer = positives.sum(axis=0).astype('float32')
cls_weights /= paddle.clip(pos_normalizer, min=1.0)
one_hot_targets = F.one_hot(
point_cls_labels * (point_cls_labels >= 0).astype(
point_cls_labels.dtype),
num_classes=self.num_class + 1)
one_hot_targets = one_hot_targets[:, 1:]
one_hot_targets.stop_gradient = True
cls_loss_src = self.cls_loss_func(
point_cls_preds, one_hot_targets, weights=cls_weights)
point_loss_cls = cls_loss_src.sum()
loss_weights_dict = self.model_cfg['loss_config']['loss_weights']
point_loss_cls = point_loss_cls * loss_weights_dict['point_cls_weight']
if tb_dict is None:
tb_dict = {}
tb_dict.update({
'point_loss_cls': point_loss_cls,
'point_pos_num': pos_normalizer
})
return point_loss_cls, tb_dict
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/dense_heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/dense_heads/target_assigner/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .anchor_generator import *
from .axis_aligned_target_assigner import *
from .hungarian_assigner import HungarianAssigner3D
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/dense_heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/dense_heads/target_assigner/axis_aligned_target_assigner.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
from paddle3d.utils.box import boxes3d_nearest_bev_iou
class AxisAlignedTargetAssigner(object):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/models/dense_heads/target_assigner/axis_aligned_target_assigner.py#L8
"""
def __init__(self, anchor_generator_cfg, anchor_target_cfg, class_names,
box_coder):
super().__init__()
self.anchor_generator_cfg = anchor_generator_cfg
self.anchor_target_cfg = anchor_target_cfg
self.box_coder = box_coder
self.class_names = np.array(class_names)
self.anchor_class_names = [
config['class_name'] for config in anchor_generator_cfg
]
self.pos_fraction = anchor_target_cfg[
'pos_fraction'] if anchor_target_cfg['pos_fraction'] >= 0 else None
self.sample_size = anchor_target_cfg['sample_size']
self.norm_by_num_examples = anchor_target_cfg['norm_by_num_examples']
self.matched_thresholds = {}
self.unmatched_thresholds = {}
for config in anchor_generator_cfg:
self.matched_thresholds[
config['class_name']] = config['matched_threshold']
self.unmatched_thresholds[
config['class_name']] = config['unmatched_threshold']
def assign_targets(self, all_anchors, gt_boxes_with_classes):
"""
Args:
all_anchors: [(N, 7), ...]
gt_boxes: (B, M, 8)
Returns:
"""
bbox_targets = []
cls_labels = []
reg_weights = []
batch_size = gt_boxes_with_classes.shape[0]
gt_classes = gt_boxes_with_classes[:, :, -1]
gt_boxes = gt_boxes_with_classes[:, :, :-1]
for k in range(batch_size):
cur_gt = gt_boxes[k]
cnt = cur_gt.__len__() - 1
while cnt > 0 and cur_gt[cnt].sum() == 0:
cnt -= 1
cur_gt = cur_gt[:cnt + 1]
cur_gt_classes = gt_classes[k][:cnt + 1].cast("int32")
target_list = []
for anchor_class_name, anchors in zip(self.anchor_class_names,
all_anchors):
if cur_gt_classes.shape[0] > 1:
mask = paddle.to_tensor(
self.class_names[cur_gt_classes.cpu().numpy() -
1] == anchor_class_name)
else:
mask = paddle.to_tensor([
self.class_names[c - 1] == anchor_class_name
for c in cur_gt_classes
],
dtype=paddle.bool)
feature_map_size = anchors.shape[:3]
anchors = anchors.reshape([-1, anchors.shape[-1]])
selected_classes = cur_gt_classes[mask]
single_target = self.assign_targets_single(
anchors,
cur_gt[mask],
gt_classes=selected_classes,
matched_threshold=self.
matched_thresholds[anchor_class_name],
unmatched_threshold=self.
unmatched_thresholds[anchor_class_name])
target_list.append(single_target)
target_dict = {
'box_cls_labels': [
t['box_cls_labels'].reshape([*feature_map_size, -1])
for t in target_list
],
'box_reg_targets': [
t['box_reg_targets'].reshape(
[*feature_map_size, -1, self.box_coder.code_size])
for t in target_list
],
'reg_weights': [
t['reg_weights'].reshape([*feature_map_size, -1])
for t in target_list
]
}
target_dict['box_reg_targets'] = paddle.concat(
target_dict['box_reg_targets'],
axis=-2).reshape([-1, self.box_coder.code_size])
target_dict['box_cls_labels'] = paddle.concat(
target_dict['box_cls_labels'], axis=-1).reshape([-1])
target_dict['reg_weights'] = paddle.concat(
target_dict['reg_weights'], axis=-1).reshape([-1])
bbox_targets.append(target_dict['box_reg_targets'])
cls_labels.append(target_dict['box_cls_labels'])
reg_weights.append(target_dict['reg_weights'])
bbox_targets = paddle.stack(bbox_targets, axis=0)
cls_labels = paddle.stack(cls_labels, axis=0)
reg_weights = paddle.stack(reg_weights, axis=0)
all_targets_dict = {
'box_cls_labels': cls_labels,
'box_reg_targets': bbox_targets,
'reg_weights': reg_weights
}
return all_targets_dict
def assign_targets_single(self,
anchors,
gt_boxes,
gt_classes,
matched_threshold=0.6,
unmatched_threshold=0.45):
num_anchors = anchors.shape[0]
num_gt = gt_boxes.shape[0]
labels = paddle.ones((num_anchors, ), dtype="int32") * -1
gt_ids = paddle.ones((num_anchors, ), dtype="int32") * -1
if len(gt_boxes) > 0 and anchors.shape[0] > 0:
anchor_by_gt_overlap = boxes3d_nearest_bev_iou(
anchors[:, 0:7], gt_boxes[:, 0:7])
anchor_to_gt_argmax = paddle.to_tensor(
anchor_by_gt_overlap.cpu().numpy().argmax(axis=1))
anchor_to_gt_max = anchor_by_gt_overlap[paddle.arange(num_anchors),
anchor_to_gt_argmax]
gt_to_anchor_argmax = paddle.to_tensor(
anchor_by_gt_overlap.cpu().numpy().argmax(axis=0))
gt_to_anchor_max = anchor_by_gt_overlap[gt_to_anchor_argmax,
paddle.arange(num_gt)]
empty_gt_mask = gt_to_anchor_max == 0
gt_to_anchor_max[empty_gt_mask] = -1
anchors_with_max_overlap = (
anchor_by_gt_overlap == gt_to_anchor_max).nonzero()[:, 0]
if anchors_with_max_overlap.shape[0] > 0:
gt_inds_force = anchor_to_gt_argmax[anchors_with_max_overlap]
labels[anchors_with_max_overlap] = gt_classes[gt_inds_force]
gt_ids[anchors_with_max_overlap] = gt_inds_force.cast("int32")
pos_inds = paddle.where(anchor_to_gt_max >= matched_threshold)[0]
if pos_inds.shape[0] > 0:
gt_inds_over_thresh = anchor_to_gt_argmax[pos_inds]
labels[pos_inds] = gt_classes[gt_inds_over_thresh]
gt_ids[pos_inds] = gt_inds_over_thresh.cast("int32")
bg_inds = (anchor_to_gt_max < unmatched_threshold).nonzero()[:, 0]
else:
bg_inds = paddle.arange(num_anchors)
fg_inds = (labels > 0).nonzero()
if self.pos_fraction is not None and fg_inds.numel() > 0:
# TODO(qianhui): zero shape
fg_inds = fg_inds[:, 0]
num_fg = int(self.pos_fraction * self.sample_size)
if len(fg_inds) > num_fg:
num_disabled = len(fg_inds) - num_fg
disable_inds = paddle.randperm(len(fg_inds))[:num_disabled]
labels[disable_inds] = -1
fg_inds = (labels > 0).nonzero()[:, 0]
num_bg = self.sample_size - (labels > 0).sum()
if len(bg_inds) > num_bg:
enable_inds = bg_inds[paddle.randint(
0, len(bg_inds), size=(num_bg, ))]
labels[enable_inds] = 0
# bg_inds = paddle.nonzero(labels == 0)[:, 0]
else:
if len(gt_boxes) == 0 or anchors.shape[0] == 0:
labels[:] = 0
else:
if fg_inds.numel() > 0:
fg_inds = fg_inds[:, 0]
bg_inds = bg_inds.cast('int32')
updates = paddle.zeros(bg_inds.shape, dtype='int32')
labels = paddle.scatter(
labels.astype('int32'), index=bg_inds, updates=updates)
# labels[bg_inds] = 0
# labels[anchors_with_max_overlap] = gt_classes[gt_inds_force]
bbox_targets = paddle.zeros(
shape=[num_anchors, self.box_coder.code_size], dtype=anchors.dtype)
if gt_boxes.shape[0] > 0 and anchors.shape[0] > 0 and len(fg_inds) > 0:
fg_gt_boxes = paddle.gather(
gt_boxes, index=anchor_to_gt_argmax[fg_inds], axis=0)
fg_anchors = paddle.gather(anchors, index=fg_inds, axis=0)
bbox_targets[fg_inds, :] = self.box_coder.encode_paddle(
fg_gt_boxes, fg_anchors)
reg_weights = paddle.zeros(shape=[num_anchors], dtype=anchors.dtype)
if self.norm_by_num_examples:
num_examples = (labels >= 0).sum()
num_examples = num_examples if num_examples > 1.0 else 1.0
reg_weights[labels > 0] = 1.0 / num_examples
else:
reg_weights[labels > 0] = 1.0
ret_dict = {
'box_cls_labels': labels,
'box_reg_targets': bbox_targets,
'reg_weights': reg_weights,
}
return ret_dict
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/dense_heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/dense_heads/target_assigner/anchor_generator.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
class AnchorGenerator(object):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/models/dense_heads/target_assigner/anchor_generator.py#L4
"""
def __init__(self, anchor_range, anchor_generator_config):
super().__init__()
self.anchor_generator_cfg = anchor_generator_config
self.anchor_range = anchor_range
self.anchor_sizes = [
config['anchor_sizes'] for config in anchor_generator_config
]
self.anchor_rotations = [
config['anchor_rotations'] for config in anchor_generator_config
]
self.anchor_heights = [
config['anchor_bottom_heights']
for config in anchor_generator_config
]
self.align_center = [
config.get('align_center', False)
for config in anchor_generator_config
]
assert len(self.anchor_sizes) == len(self.anchor_rotations) == len(
self.anchor_heights)
self.num_of_anchor_sets = len(self.anchor_sizes)
def generate_anchors(self, grid_sizes):
assert len(grid_sizes) == self.num_of_anchor_sets
all_anchors = []
num_anchors_per_location = []
for grid_size, anchor_size, anchor_rotation, anchor_height, align_center in zip(
grid_sizes, self.anchor_sizes, self.anchor_rotations,
self.anchor_heights, self.align_center):
num_anchors_per_location.append(
len(anchor_rotation) * len(anchor_size) * len(anchor_height))
if align_center:
x_stride = (
self.anchor_range[3] - self.anchor_range[0]) / grid_size[0]
y_stride = (
self.anchor_range[4] - self.anchor_range[1]) / grid_size[1]
x_offset, y_offset = x_stride / 2, y_stride / 2
else:
x_stride = (self.anchor_range[3] - self.anchor_range[0]) / (
grid_size[0] - 1)
y_stride = (self.anchor_range[4] - self.anchor_range[1]) / (
grid_size[1] - 1)
x_offset, y_offset = 0, 0
x_shifts = paddle.arange(
self.anchor_range[0] + x_offset,
self.anchor_range[3] + 1e-5,
step=x_stride,
dtype='float32',
)
y_shifts = paddle.arange(
self.anchor_range[1] + y_offset,
self.anchor_range[4] + 1e-5,
step=y_stride,
dtype='float32',
)
z_shifts = paddle.to_tensor(anchor_height, dtype='float32')
num_anchor_size, num_anchor_rotation = anchor_size.__len__(
), anchor_rotation.__len__()
anchor_rotation = paddle.to_tensor(anchor_rotation, dtype='float32')
anchor_size = paddle.to_tensor(anchor_size, dtype='float32')
x_shifts, y_shifts, z_shifts = paddle.meshgrid(
[x_shifts, y_shifts, z_shifts]) # [x_grid, y_grid, z_grid]
anchors = paddle.stack((x_shifts, y_shifts, z_shifts),
axis=-1) # [x, y, z, 3]
anchors = anchors.unsqueeze([3]).tile(
[1, 1, 1, paddle.shape(anchor_size)[0], 1])
anchor_size = anchor_size.reshape([1, 1, 1, -1, 3]).tile(
[*paddle.shape(anchors)[0:3], 1, 1])
anchors = paddle.concat((anchors, anchor_size), axis=-1)
anchors = anchors.unsqueeze([4]).tile(
[1, 1, 1, 1, num_anchor_rotation, 1])
anchor_rotation = anchor_rotation.reshape([1, 1, 1, 1, -1, 1]).tile(
[*paddle.shape(anchors)[0:3], num_anchor_size, 1, 1])
anchors = paddle.concat((anchors, anchor_rotation),
axis=-1) # [x, y, z, num_size, num_rot, 7]
anchors = anchors.transpose([2, 1, 0, 3, 4, 5])
#anchors = anchors.view(-1, anchors.shape[-1])
anchors[..., 2] += anchors[..., 5] / 2 # shift to box centers
all_anchors.append(anchors)
return all_anchors, num_anchors_per_location
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/dense_heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/dense_heads/target_assigner/hungarian_assigner.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Copyright (c) 2021 megvii-model. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from DETR3D (https://github.com/WangYueFt/detr3d)
# Copyright (c) 2021 Wang, Yue
# ------------------------------------------------------------------------
# Modified from mmdetection (https://github.com/open-mmlab/mmdetection)
# Copyright (c) OpenMMLab. All rights reserved.
# ------------------------------------------------------------------------
import numpy as np
import paddle
from paddle3d.models.heads.dense_heads.match_costs import (BBox3DL1Cost,
FocalLossCost,
IoUCost)
from paddle3d.sample import _EasyDict
try:
from scipy.optimize import linear_sum_assignment
except ImportError:
linear_sum_assignment = None
def nan_to_num(x, nan=0.0, posinf=None, neginf=None, name=None):
"""
Replaces NaN, positive infinity, and negative infinity values in input tensor.
"""
# NOTE(tiancaishaonvjituizi): it seems that paddle handles the dtype of python float number
# incorrectly, so we have to explicitly contruct tensors here
posinf_value = paddle.full_like(x, float("+inf"))
neginf_value = paddle.full_like(x, float("-inf"))
nan = paddle.full_like(x, nan)
assert x.dtype in [paddle.float32, paddle.float64]
is_float32 = x.dtype == paddle.float32
if posinf is None:
posinf = (np.finfo(np.float32).max
if is_float32 else np.finfo(np.float64).max)
posinf = paddle.full_like(x, posinf)
if neginf is None:
neginf = (np.finfo(np.float32).min
if is_float32 else np.finfo(np.float64).min)
neginf = paddle.full_like(x, neginf)
x = paddle.where(paddle.isnan(x), nan, x)
x = paddle.where(x == posinf_value, posinf, x)
x = paddle.where(x == neginf_value, neginf, x)
return x
def normalize_bbox(bboxes, pc_range):
cx = bboxes[..., 0:1]
cy = bboxes[..., 1:2]
cz = bboxes[..., 2:3]
w = bboxes[..., 3:4].log()
l = bboxes[..., 4:5].log()
h = bboxes[..., 5:6].log()
rot = bboxes[..., 6:7]
if bboxes.shape[-1] > 7:
vx = bboxes[..., 7:8]
vy = bboxes[..., 8:9]
normalized_bboxes = paddle.concat(
(cx, cy, w, l, cz, h, rot.sin(), rot.cos(), vx, vy), axis=-1)
else:
normalized_bboxes = paddle.concat(
(cx, cy, w, l, cz, h, rot.sin(), rot.cos()), axis=-1)
return normalized_bboxes
class HungarianAssigner3D(object):
"""Computes one-to-one matching between predictions and ground truth.
This class computes an assignment between the targets and the predictions
based on the costs. The costs are weighted sum of three components:
classification cost, regression L1 cost and regression iou cost. The
targets don't include the no_object, so generally there are more
predictions than targets. After the one-to-one matching, the un-matched
are treated as backgrounds. Thus each query prediction will be assigned
with `0` or a positive integer indicating the ground truth index:
- 0: negative sample, no assigned gt
- positive integer: positive sample, index (1-based) of assigned gt
Args:
cls_weight (int | float, optional): The scale factor for classification
cost. Default 1.0.
bbox_weight (int | float, optional): The scale factor for regression
L1 cost. Default 1.0.
iou_weight (int | float, optional): The scale factor for regression
iou cost. Default 1.0.
iou_calculator (dict | optional): The config for the iou calculation.
Default type `BboxOverlaps2D`.
iou_mode (str | optional): "iou" (intersection over union), "iof"
(intersection over foreground), or "giou" (generalized
intersection over union). Default "giou".
"""
def __init__(self,
cls_cost_weight=2.,
reg_cost_weight=0.25,
iou_cost_weight=0.0,
pc_range=None):
self.cls_cost = FocalLossCost(weight=cls_cost_weight)
self.reg_cost = BBox3DL1Cost(weight=reg_cost_weight)
self.iou_cost = IoUCost(weight=iou_cost_weight)
self.pc_range = pc_range
def assign(self,
bbox_pred,
cls_pred,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
eps=1e-7):
"""Computes one-to-one matching based on the weighted costs.
"""
assert gt_bboxes_ignore is None, \
'Only case when gt_bboxes_ignore is None is supported.'
num_gts, num_bboxes = gt_bboxes.shape[0], bbox_pred.shape[0]
# 1. assign -1 by default
assigned_gt_inds = paddle.full((num_bboxes, ), -1, dtype='int64')
assigned_labels = paddle.full((num_bboxes, ), -1, dtype='int64')
if num_gts == 0 or num_bboxes == 0:
# No ground truth or boxes, return empty assignment
if num_gts == 0:
# No ground truth, assign all to background
assigned_gt_inds[:] = 0
return _EasyDict(
num_gts=num_gts,
gt_inds=assigned_gt_inds,
labels=assigned_labels)
# 2. compute the weighted costs
# classification and bboxcost.
cls_cost = self.cls_cost(cls_pred, gt_labels)
# regression L1 cost
normalized_gt_bboxes = normalize_bbox(gt_bboxes, self.pc_range)
reg_cost = self.reg_cost(bbox_pred[:, :8], normalized_gt_bboxes[:, :8])
# weighted sum of above two costs
cost = cls_cost + reg_cost
# 3. do Hungarian matching on CPU using linear_sum_assignment
cost = cost.detach()
if linear_sum_assignment is None:
raise ImportError('Please run "pip install scipy" '
'to install scipy first.')
cost = nan_to_num(cost, nan=100.0, posinf=100.0, neginf=-100.0).numpy()
matched_row_inds, matched_col_inds = linear_sum_assignment(cost)
matched_row_inds = paddle.to_tensor(matched_row_inds)
matched_col_inds = paddle.to_tensor(matched_col_inds)
# 4. assign backgrounds and foregrounds
# assign all indices to backgrounds first
assigned_gt_inds[:] = 0
# assign foregrounds based on matching results
assigned_gt_inds[matched_row_inds] = matched_col_inds + 1
assigned_labels[matched_row_inds] = gt_labels[matched_col_inds]
return _EasyDict(
num_gts=num_gts, gt_inds=assigned_gt_inds, labels=assigned_labels)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/dense_heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/dense_heads/match_costs/match_cost.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Modified from mmdetection (https://github.com/open-mmlab/mmdetection)
# Copyright (c) OpenMMLab. All rights reserved.
# ------------------------------------------------------------------------
import paddle
import paddle.nn.functional as F
def fp16_clamp(x, min=None, max=None):
if not x.is_cuda and x.dtype == paddle.float16:
return x.float().clip(min, max).astype(paddle.float16)
return x.clip(min, max)
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):
"""Calculate overlap between two set of bboxes.
"""
assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'
# Either the boxes are empty or the length of boxes' last dimension is 4
assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)
assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)
# Batch dim must be the same
# Batch dim: (B1, B2, ... Bn)
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
batch_shape = bboxes1.shape[:-2]
rows = bboxes1.size(-2)
cols = bboxes2.size(-2)
if is_aligned:
assert rows == cols
if rows * cols == 0:
if is_aligned:
return bboxes1.new(batch_shape + (rows, ))
else:
return bboxes1.new(batch_shape + (rows, cols))
area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (
bboxes1[..., 3] - bboxes1[..., 1])
area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (
bboxes2[..., 3] - bboxes2[..., 1])
if is_aligned:
lt = paddle.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2]
rb = paddle.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2]
wh = fp16_clamp(rb - lt, min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1 + area2 - overlap
else:
union = area1
if mode == 'giou':
enclosed_lt = paddle.min(bboxes1[..., :2], bboxes2[..., :2])
enclosed_rb = paddle.max(bboxes1[..., 2:], bboxes2[..., 2:])
else:
lt = paddle.max(bboxes1[..., :, None, :2],
bboxes2[..., None, :, :2]) # [B, rows, cols, 2]
rb = paddle.min(bboxes1[..., :, None, 2:],
bboxes2[..., None, :, 2:]) # [B, rows, cols, 2]
wh = fp16_clamp(rb - lt, min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1[..., None] + area2[..., None, :] - overlap
else:
union = area1[..., None]
if mode == 'giou':
enclosed_lt = paddle.min(bboxes1[..., :, None, :2],
bboxes2[..., None, :, :2])
enclosed_rb = paddle.max(bboxes1[..., :, None, 2:],
bboxes2[..., None, :, 2:])
eps = union.new_tensor([eps])
union = paddle.max(union, eps)
ious = overlap / union
if mode in ['iou', 'iof']:
return ious
# calculate gious
enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
enclose_area = paddle.max(enclose_area, eps)
gious = ious - (enclose_area - union) / enclose_area
return gious
def pairwise_dist(A, B):
A = A.unsqueeze(-2)
B = B.unsqueeze(-3)
return paddle.abs(A - B).sum(-1)
def bbox_cxcywh_to_xyxy(bbox):
"""Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2).
Args:
bbox (Tensor): Shape (n, 4) for bboxes.
Returns:
Tensor: Converted bboxes.
"""
cx, cy, w, h = bbox.split((1, 1, 1, 1), axis=-1)
bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)]
return paddle.concat(bbox_new, axis=-1)
class BBox3DL1Cost(object):
"""BBox3DL1Cost.
Args:
weight (int | float, optional): loss_weight
"""
def __init__(self, weight=1.):
self.weight = weight
def __call__(self, bbox_pred, gt_bboxes):
"""
Args:
bbox_pred (Tensor): Predicted boxes with normalized coordinates
(cx, cy, w, h), which are all in range [0, 1]. Shape
[num_query, 4].
gt_bboxes (Tensor): Ground truth boxes with normalized
coordinates (x1, y1, x2, y2). Shape [num_gt, 4].
Returns:
paddle.Tensor: bbox_cost value with weight
"""
# p = 1
bbox_cost = pairwise_dist(bbox_pred, gt_bboxes)
return bbox_cost * self.weight
class ClassificationCost:
"""ClsSoftmaxCost.
Args:
weight (int | float, optional): loss_weight
"""
def __init__(self, weight=1.):
self.weight = weight
def __call__(self, cls_pred, gt_labels):
"""
Args:
cls_pred (Tensor): Predicted classification logits, shape
(num_query, num_class).
gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).
Returns:
paddle.Tensor: cls_cost value with weight
"""
# Following the official DETR repo, contrary to the loss that
# NLL is used, we approximate it in 1 - cls_score[gt_label].
# The 1 is a constant that doesn't change the matching,
# so it can be omitted.
cls_score = cls_pred.softmax(-1)
cls_cost = -cls_score[:, gt_labels]
return cls_cost * self.weight
class IoUCost:
"""IoUCost.
Args:
iou_mode (str, optional): iou mode such as 'iou' | 'giou'
weight (int | float, optional): loss weight
"""
def __init__(self, iou_mode='giou', weight=1.):
self.weight = weight
self.iou_mode = iou_mode
def __call__(self, bboxes, gt_bboxes):
"""
Args:
bboxes (Tensor): Predicted boxes with unnormalized coordinates
(x1, y1, x2, y2). Shape (num_query, 4).
gt_bboxes (Tensor): Ground truth boxes with unnormalized
coordinates (x1, y1, x2, y2). Shape (num_gt, 4).
Returns:
paddle.Tensor: iou_cost value with weight
"""
# overlaps: [num_bboxes, num_gt]
overlaps = bbox_overlaps(
bboxes, gt_bboxes, mode=self.iou_mode, is_aligned=False)
# The 1 is a constant that doesn't change the matching, so omitted.
iou_cost = -overlaps
return iou_cost * self.weight
class FocalLossCost:
"""FocalLossCost.
Args:
weight (int | float, optional): loss_weight
alpha (int | float, optional): focal_loss alpha
gamma (int | float, optional): focal_loss gamma
eps (float, optional): default 1e-12
binary_input (bool, optional): Whether the input is binary,
default False.
"""
def __init__(self,
weight=1.,
alpha=0.25,
gamma=2,
eps=1e-12,
binary_input=False):
self.weight = weight
self.alpha = alpha
self.gamma = gamma
self.eps = eps
self.binary_input = binary_input
def _focal_loss_cost(self, cls_pred, gt_labels):
"""
Args:
cls_pred (Tensor): Predicted classification logits, shape
(num_query, num_class).
gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).
Returns:
paddle.Tensor: cls_cost value with weight
"""
cls_pred = F.sigmoid(cls_pred)
neg_cost = -(1 - cls_pred + self.eps).log() * (
1 - self.alpha) * cls_pred.pow(self.gamma)
pos_cost = -(cls_pred + self.eps).log() * self.alpha * (
1 - cls_pred).pow(self.gamma)
cls_cost = paddle.gather(pos_cost, gt_labels, 1) - paddle.gather(
neg_cost, gt_labels, 1)
return cls_cost * self.weight
def _mask_focal_loss_cost(self, cls_pred, gt_labels):
"""
Args:
cls_pred (Tensor): Predicted classfication logits
in shape (num_query, d1, ..., dn), dtype=float32.
gt_labels (Tensor): Ground truth in shape (num_gt, d1, ..., dn),
dtype='int64'. Labels should be binary.
Returns:
Tensor: Focal cost matrix with weight in shape\
(num_query, num_gt).
"""
cls_pred = cls_pred.flatten(1)
gt_labels = gt_labels.flatten(1).float()
n = cls_pred.shape[1]
cls_pred = cls_pred.sigmoid()
neg_cost = -(1 - cls_pred + self.eps).log() * (
1 - self.alpha) * cls_pred.pow(self.gamma)
pos_cost = -(cls_pred + self.eps).log() * self.alpha * (
1 - cls_pred).pow(self.gamma)
cls_cost = paddle.einsum('nc,mc->nm', pos_cost, gt_labels) + \
paddle.einsum('nc,mc->nm', neg_cost, (1 - gt_labels))
return cls_cost / n * self.weight
def __call__(self, cls_pred, gt_labels):
"""
Args:
cls_pred (Tensor): Predicted classfication logits.
gt_labels (Tensor)): Labels.
Returns:
Tensor: Focal cost matrix with weight in shape\
(num_query, num_gt).
"""
if self.binary_input:
return self._mask_focal_loss_cost(cls_pred, gt_labels)
else:
return self._focal_loss_cost(cls_pred, gt_labels)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/dense_heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/dense_heads/match_costs/__init__.py
|
from .match_cost import BBox3DL1Cost, FocalLossCost, IoUCost
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/dense_heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/dense_heads/samplers/__init__.py
|
from .pseudo_sampler import PseudoSampler
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/dense_heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/dense_heads/samplers/pseudo_sampler.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Copyright (c) 2021 megvii-model. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from DETR3D (https://github.com/WangYueFt/detr3d)
# Copyright (c) 2021 Wang, Yue
# ------------------------------------------------------------------------
# Modified from mmdetection (https://github.com/open-mmlab/mmdetection)
# Copyright (c) OpenMMLab. All rights reserved.
# ------------------------------------------------------------------------
import paddle
class SamplingResult(object):
"""Bbox sampling result.
"""
def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result,
gt_flags):
self.pos_inds = pos_inds
self.neg_inds = neg_inds
self.pos_bboxes = bboxes[pos_inds]
self.neg_bboxes = bboxes[neg_inds]
self.pos_is_gt = gt_flags[pos_inds]
self.num_gts = gt_bboxes.shape[0]
self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1
if gt_bboxes.numel() == 0:
# hack for index error case
assert self.pos_assigned_gt_inds.numel() == 0
self.pos_gt_bboxes = paddle.empty_like(gt_bboxes).reshape([-1, 4])
else:
if len(gt_bboxes.shape) < 2:
gt_bboxes = gt_bboxes.reshape([-1, 4])
self.pos_gt_bboxes = paddle.gather(
gt_bboxes, self.pos_assigned_gt_inds.astype('int64'), axis=0)
if assign_result.labels is not None:
self.pos_gt_labels = assign_result.labels[pos_inds]
else:
self.pos_gt_labels = None
class PseudoSampler(object):
"""A pseudo sampler that does not do sampling actually."""
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
"""Sample positive samples."""
raise NotImplementedError
def _sample_neg(self, **kwargs):
"""Sample negative samples."""
raise NotImplementedError
def sample(self, assign_result, bboxes, gt_bboxes, *args, **kwargs):
"""Directly returns the positive and negative indices of samples.
Args:
assign_result (:obj:`AssignResult`): Assigned results
bboxes (paddle.Tensor): Bounding boxes
gt_bboxes (paddle.Tensor): Ground truth boxes
Returns:
:obj:`SamplingResult`: sampler results
"""
pos_inds = paddle.nonzero(
assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()
neg_inds = paddle.nonzero(
assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()
gt_flags = paddle.zeros([bboxes.shape[0]], dtype='int32')
sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
assign_result, gt_flags)
return sampling_result
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/fcos_heads/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .fcos2d_head import *
from .fcos3d_head import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/fcos_heads/fcos2d_head.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle import nn
from paddle import ParamAttr
from paddle.regularizer import L2Decay
from paddle.nn import functional as F
import paddle.distributed as dist
from paddle3d.models.losses import IOULoss, sigmoid_focal_loss
from paddle3d.models.layers import LayerListDial, Scale, FrozenBatchNorm2d, param_init
from paddle3d.apis import manager
__all__ = ["FCOS2DHead", "FCOS2DLoss", "FCOS2DInference"]
@manager.HEADS.add_component
class FCOS2DHead(nn.Layer):
"""
This code is based on https://github.com/TRI-ML/dd3d/blob/main/tridet/modeling/dd3d/fcos2d.py#L30
"""
def __init__(self,
in_strides,
in_channels,
num_classes=5,
use_scale=True,
box2d_scale_init_factor=1.0,
version="v2",
num_cls_convs=4,
num_box_convs=4,
use_deformable=False,
norm="BN"):
super().__init__()
self.in_strides = in_strides
self.num_levels = len(in_strides)
self.num_classes = num_classes
self.use_scale = use_scale
self.box2d_scale_init_factor = box2d_scale_init_factor
self.version = version
assert len(
set(in_channels)) == 1, "Each level must have the same channel!"
in_channels = in_channels[0]
if use_deformable:
raise ValueError("Not supported yet.")
head_configs = {'cls': num_cls_convs, 'box2d': num_box_convs}
for head_name, num_convs in head_configs.items():
tower = []
if self.version == "v1":
for _ in range(num_convs):
conv_func = nn.Conv2D
tower.append(
conv_func(
in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1,
bias=True))
if norm == "BN":
tower.append(
LayerListDial([
nn.BatchNorm2D(
in_channels,
weight_attr=ParamAttr(
regularizer=L2Decay(0.0)))
for _ in range(self.num_levels)
]))
else:
raise NotImplementedError()
tower.append(nn.ReLU())
elif self.version == "v2":
for _ in range(num_convs):
# Each FPN level has its own batchnorm layer.
# "BN" is converted to "SyncBN" in distributed training
if norm == "BN":
norm_layer = LayerListDial([
nn.BatchNorm2D(
in_channels,
weight_attr=ParamAttr(regularizer=L2Decay(0.0)))
for _ in range(self.num_levels)
])
elif norm == "FrozenBN":
norm_layer = LayerListDial([
FrozenBatchNorm2d(in_channels)
for _ in range(self.num_levels)
])
else:
raise NotImplementedError()
tower.append(
nn.Conv2D(
in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False))
tower.append(norm_layer)
tower.append(nn.ReLU())
else:
raise ValueError(f"Invalid FCOS2D version: {self.version}")
self.add_sublayer(f'{head_name}_tower', nn.Sequential(*tower))
self.cls_logits = nn.Conv2D(
in_channels, self.num_classes, kernel_size=3, stride=1, padding=1)
self.box2d_reg = nn.Conv2D(
in_channels, 4, kernel_size=3, stride=1, padding=1)
self.centerness = nn.Conv2D(
in_channels, 1, kernel_size=3, stride=1, padding=1)
if self.use_scale:
if self.version == "v1":
self.scales_reg = nn.LayerList([
Scale(init_value=stride * self.box2d_scale_init_factor)
for stride in self.in_strides
])
else:
self.scales_box2d_reg = nn.LayerList([
Scale(init_value=stride * self.box2d_scale_init_factor)
for stride in self.in_strides
])
self.init_weights()
def init_weights(self):
for tower in [self.cls_tower, self.box2d_tower]:
for l in tower.sublayers():
if isinstance(l, nn.Conv2D):
param_init.kaiming_normal_init(
l.weight, mode='fan_out', nonlinearity='relu')
if l.bias is not None:
param_init.constant_init(l.bias, value=0.0)
predictors = [self.cls_logits, self.box2d_reg, self.centerness]
for layers in predictors:
for l in layers.sublayers():
if isinstance(l, nn.Conv2D):
param_init.kaiming_uniform_init(l.weight, a=1)
if l.bias is not None: # depth head may not have bias.
param_init.constant_init(l.bias, value=0.0)
def forward(self, x):
logits = []
box2d_reg = []
centerness = []
extra_output = {"cls_tower_out": []}
for l, feature in enumerate(x):
cls_tower_out = self.cls_tower(feature)
bbox_tower_out = self.box2d_tower(feature)
# 2D box
logits.append(self.cls_logits(cls_tower_out))
centerness.append(self.centerness(bbox_tower_out))
box_reg = self.box2d_reg(bbox_tower_out)
if self.use_scale:
# TODO: to optimize the runtime, apply this scaling in inference (and loss compute) only on FG pixels?
if self.version == "v1":
box_reg = self.scales_reg[l](box_reg)
else:
box_reg = self.scales_box2d_reg[l](box_reg)
# Note that we use relu, as in the improved FCOS, instead of exp.
box2d_reg.append(F.relu(box_reg))
extra_output['cls_tower_out'].append(cls_tower_out)
return logits, box2d_reg, centerness, extra_output
def reduce_sum(tensor):
if not dist.get_world_size() > 1:
return tensor
tensor = tensor.clone()
dist.all_reduce(tensor)
return tensor
def compute_ctrness_targets(reg_targets):
if len(reg_targets) == 0:
return reg_targets.new_zeros(len(reg_targets))
left_right = reg_targets[:, 0::2]
top_bottom = reg_targets[:, 1::2]
ctrness = (left_right.min(axis=-1) / left_right.max(axis=-1)) * \
(top_bottom.min(axis=-1) / top_bottom.max(axis=-1))
return paddle.sqrt(ctrness)
@manager.LOSSES.add_component
class FCOS2DLoss(nn.Layer):
"""
This code is based on https://github.com/TRI-ML/dd3d/blob/main/tridet/modeling/dd3d/fcos2d.py#L159
"""
def __init__(self,
alpha=0.25,
gamma=2.0,
loc_loss_type='giou',
num_classes=5):
super().__init__()
self.focal_loss_alpha = alpha
self.focal_loss_gamma = gamma
self.box2d_reg_loss_fn = IOULoss(loc_loss_type)
self.num_classes = num_classes
def forward(self, logits, box2d_reg, centerness, targets):
labels = targets['labels']
box2d_reg_targets = targets['box2d_reg_targets']
pos_inds = targets["pos_inds"]
if len(labels) != box2d_reg_targets.shape[0]:
raise ValueError(
f"The size of 'labels' and 'box2d_reg_targets' does not match: a={len(labels)}, b={box2d_reg_targets.shape[0]}"
)
# Flatten predictions
logits = paddle.concat([
x.transpose([0, 2, 3, 1]).reshape([-1, self.num_classes])
for x in logits
],
axis=0)
box2d_reg_pred = paddle.concat(
[x.transpose([0, 2, 3, 1]).reshape([-1, 4]) for x in box2d_reg],
axis=0)
centerness_pred = paddle.concat(
[x.transpose([0, 2, 3, 1]).reshape([-1]) for x in centerness],
axis=0)
# Classification loss
num_pos_local = pos_inds.numel()
num_gpus = dist.get_world_size()
total_num_pos = reduce_sum(paddle.to_tensor([num_pos_local])).item()
num_pos_avg = max(total_num_pos / num_gpus, 1.0)
# prepare one_hot
cls_target = paddle.zeros_like(logits)
if num_pos_local > 0:
cls_target[pos_inds, labels[pos_inds]] = 1
loss_cls = sigmoid_focal_loss(
logits,
cls_target,
alpha=self.focal_loss_alpha,
gamma=self.focal_loss_gamma,
reduction="sum",
) / num_pos_avg
if pos_inds.numel() == 0:
losses = {
"loss_cls": loss_cls,
"loss_box2d_reg": box2d_reg_pred.sum() * 0.,
"loss_centerness": centerness_pred.sum() * 0.,
}
return losses, {}
# NOTE: The rest of losses only consider foreground pixels.
if num_pos_local == 1:
box2d_reg_pred = box2d_reg_pred[pos_inds].unsqueeze(0)
box2d_reg_targets = box2d_reg_targets[pos_inds].unsqueeze(0)
else:
box2d_reg_pred = box2d_reg_pred[pos_inds]
box2d_reg_targets = box2d_reg_targets[pos_inds]
centerness_pred = centerness_pred[pos_inds]
# Compute centerness targets here using 2D regression targets of foreground pixels.
centerness_targets = compute_ctrness_targets(box2d_reg_targets)
# Denominator for all foreground losses.
ctrness_targets_sum = centerness_targets.sum()
loss_denom = max(
reduce_sum(ctrness_targets_sum).item() / num_gpus, 1e-6)
# 2D box regression loss
loss_box2d_reg = self.box2d_reg_loss_fn(
box2d_reg_pred, box2d_reg_targets, centerness_targets) / loss_denom
# Centerness loss
loss_centerness = F.binary_cross_entropy_with_logits(
centerness_pred, centerness_targets, reduction="sum") / num_pos_avg
loss_dict = {
"loss_cls": loss_cls,
"loss_box2d_reg": loss_box2d_reg,
"loss_centerness": loss_centerness
}
extra_info = {
"loss_denom": loss_denom,
"centerness_targets": centerness_targets
}
return loss_dict, extra_info
@manager.MODELS.add_component
class FCOS2DInference():
"""
This code is based on https://github.com/TRI-ML/dd3d/blob/main/tridet/modeling/dd3d/fcos2d.py#L242
"""
def __init__(self,
thresh_with_ctr=True,
pre_nms_thresh=0.05,
pre_nms_topk=1000,
post_nms_topk=100,
nms_thresh=0.75,
num_classes=5):
self.thresh_with_ctr = thresh_with_ctr
self.pre_nms_thresh = pre_nms_thresh
self.pre_nms_topk = pre_nms_topk
self.post_nms_topk = post_nms_topk
self.nms_thresh = nms_thresh
self.num_classes = num_classes
def __call__(self, logits, box2d_reg, centerness, locations):
pred_instances = [] # List[List[dict]], shape = (L, B)
extra_info = []
for lvl, (logits_lvl, box2d_reg_lvl, centerness_lvl, locations_lvl) in \
enumerate(zip(logits, box2d_reg, centerness, locations)):
instances_per_lvl, extra_info_per_lvl = self.forward_for_single_feature_map(
logits_lvl, box2d_reg_lvl, centerness_lvl,
locations_lvl) # List of dict; one for each image.
for instances_per_im in instances_per_lvl:
instances_per_im['fpn_levels'] = paddle.ones(
[instances_per_im['pred_boxes'].shape[0]], dtype='float64')
if instances_per_im['pred_boxes'].shape[0] != 0:
instances_per_im['fpn_levels'] *= lvl
pred_instances.append(instances_per_lvl)
extra_info.append(extra_info_per_lvl)
return pred_instances, extra_info
def forward_for_single_feature_map(self, logits, box2d_reg, centerness,
locations):
N, C, _, __ = logits.shape
# put in the same format as locations
scores = F.sigmoid(logits.transpose([0, 2, 3, 1]).reshape([N, -1, C]))
box2d_reg = box2d_reg.transpose([0, 2, 3, 1]).reshape([N, -1, 4])
centerness = F.sigmoid(
centerness.transpose([0, 2, 3, 1]).reshape([N, -1]))
# if self.thresh_with_ctr is True, we multiply the classification
# scores with centerness scores before applying the threshold.
if self.thresh_with_ctr:
scores = scores * centerness[:, :, None]
candidate_mask = scores > self.pre_nms_thresh
pre_nms_topk = candidate_mask.reshape([N, -1]).sum(1)
pre_nms_topk = pre_nms_topk.clip(max=self.pre_nms_topk)
if not self.thresh_with_ctr:
scores = scores * centerness[:, :, None]
results = []
all_fg_inds_per_im, all_topk_indices, all_class_inds_per_im = [], [], []
for i in range(N):
scores_per_im = scores[i]
candidate_mask_per_im = candidate_mask[i]
scores_per_im = scores_per_im[candidate_mask_per_im]
candidate_inds_per_im = candidate_mask_per_im.nonzero(
as_tuple=False)
fg_inds_per_im = candidate_inds_per_im[:, 0]
class_inds_per_im = candidate_inds_per_im[:, 1]
# Cache info here.
all_fg_inds_per_im.append(fg_inds_per_im)
all_class_inds_per_im.append(class_inds_per_im)
if fg_inds_per_im.shape[0] == 0:
box2d_reg_per_im = paddle.zeros([0, 4])
locations_per_im = paddle.zeros([0, 2])
else:
box2d_reg_per_im = box2d_reg[i][fg_inds_per_im]
locations_per_im = locations[fg_inds_per_im]
pre_nms_topk_per_im = pre_nms_topk[i]
if candidate_mask_per_im.sum().item() > pre_nms_topk_per_im.item():
scores_per_im, topk_indices = \
scores_per_im.topk(pre_nms_topk_per_im, sorted=False)
class_inds_per_im = class_inds_per_im[topk_indices]
box2d_reg_per_im = box2d_reg_per_im[topk_indices]
locations_per_im = locations_per_im[topk_indices]
else:
topk_indices = None
all_topk_indices.append(topk_indices)
if locations_per_im.shape[0] == 0:
detections = paddle.zeros([0, 4])
elif len(locations_per_im.shape) == 1:
locations_per_im = locations_per_im.unsqueeze(0)
box2d_reg_per_im = box2d_reg_per_im.unsqueeze(0)
detections = paddle.stack([
locations_per_im[:, 0] - box2d_reg_per_im[:, 0],
locations_per_im[:, 1] - box2d_reg_per_im[:, 1],
locations_per_im[:, 0] + box2d_reg_per_im[:, 2],
locations_per_im[:, 1] + box2d_reg_per_im[:, 3],
],
axis=1)
else:
detections = paddle.stack([
locations_per_im[:, 0] - box2d_reg_per_im[:, 0],
locations_per_im[:, 1] - box2d_reg_per_im[:, 1],
locations_per_im[:, 0] + box2d_reg_per_im[:, 2],
locations_per_im[:, 1] + box2d_reg_per_im[:, 3],
],
axis=1)
instances = {}
instances['pred_boxes'] = detections
if scores_per_im.shape[0] == 0:
instances['scores'] = scores_per_im
else:
instances['scores'] = paddle.sqrt(scores_per_im)
instances['pred_classes'] = class_inds_per_im
instances['locations'] = locations_per_im
results.append(instances)
extra_info = {
"fg_inds_per_im": all_fg_inds_per_im,
"class_inds_per_im": all_class_inds_per_im,
"topk_indices": all_topk_indices
}
return results, extra_info
def nms_and_top_k(self, instances_per_im, score_key_for_nms="scores"):
results = []
for instances in instances_per_im:
if self.nms_thresh > 0:
# Multiclass NMS.
if instances['pred_boxes'].shape[0] == 0:
results.append(instances)
continue
keep = paddle.vision.ops.nms(
boxes=instances['pred_boxes'],
iou_threshold=self.nms_thresh,
scores=instances[score_key_for_nms],
category_idxs=instances['pred_classes'],
categories=[0, 1, 2, 3, 4])
if keep.shape[0] == 0:
instances['pred_boxes'] = paddle.zeros([0, 4])
instances['pred_classes'] = paddle.zeros([0])
instances['scores'] = paddle.zeros([0])
instances['scores_3d'] = paddle.zeros([0])
instances['pred_boxes3d'] = paddle.zeros([0, 10])
instances['pred_boxes'] = instances['pred_boxes'][keep]
instances['pred_classes'] = instances['pred_classes'][keep]
instances['scores'] = instances['scores'][keep]
instances['scores_3d'] = instances['scores_3d'][keep]
instances['pred_boxes3d'] = instances['pred_boxes3d'][keep]
if len(instances['pred_boxes'].shape) == 1:
instances['pred_boxes'] = instances['pred_boxes'].unsqueeze(
0)
instances['pred_boxes3d'] = instances[
'pred_boxes3d'].unsqueeze(0)
num_detections = instances['pred_boxes3d'].shape[0]
# Limit to max_per_image detections **over all classes**
if num_detections > self.post_nms_topk > 0:
scores = instances['scores']
image_thresh, _ = paddle.kthvalue(
scores, num_detections - self.post_nms_topk + 1)
keep = scores >= image_thresh.item()
keep = paddle.nonzero(keep).squeeze(1)
instances['pred_boxes'] = instances['pred_boxes'][keep]
instances['pred_classes'] = instances['pred_classes'][keep]
instances['scores'] = instances['scores'][keep]
instances['scores_3d'] = instances['scores_3d'][keep]
instances['pred_boxes3d'] = instances['pred_boxes3d'][keep]
if len(instances['pred_boxes'].shape) == 1:
instances['pred_boxes'].unsqueeze(0)
instances['pred_boxes3d'].unsqueeze(0)
results.append(instances)
return results
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/fcos_heads/fcos3d_head.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle import nn
from paddle import ParamAttr
from paddle.regularizer import L2Decay
import paddle.nn.functional as F
from paddle3d.models.losses import DisentangledBox3DLoss, unproject_points2d
from paddle3d.models.layers import LayerListDial, Offset, Scale, FrozenBatchNorm2d, param_init
from paddle3d.apis import manager
from paddle3d.utils.logger import logger
from paddle3d.utils.transform import matrix_to_quaternion, quaternion_to_matrix
__all__ = ["FCOS3DHead", "FCOS3DLoss", "FCOS3DInference"]
PI = 3.14159265358979323846
EPS = 1e-7
def allocentric_to_egocentric(quat, proj_ctr, inv_intrinsics):
"""
Args:
quat (paddle.Tensor with shape (N, 4)): Batch of (allocentric) quaternions.
proj_ctr (paddle.Tensor with shape (N, 2)): Projected centers. xy coordninates.
inv_intrinsics (paddle.Tensor with shape (N, 3, 3)): Inverted intrinsics.
"""
R_obj_to_local = quaternion_to_matrix(quat)
# ray == z-axis in local orientaion
ray = unproject_points2d(proj_ctr, inv_intrinsics)
z = ray / paddle.linalg.norm(ray, axis=1, keepdim=True)
# gram-schmit process: local_y = global_y - global_y \dot local_z
y = paddle.to_tensor([[0., 1., 0.]]) - z[:, 1:2] * z
y = y / paddle.linalg.norm(y, axis=1, keepdim=True)
x = paddle.cross(y, z, axis=1)
# local -> global
R_local_to_global = paddle.stack([x, y, z], axis=-1)
# obj -> global
R_obj_to_global = paddle.bmm(R_local_to_global, R_obj_to_local)
egocentric_quat = matrix_to_quaternion(R_obj_to_global)
# Make sure it's unit norm.
quat_norm = paddle.linalg.norm(egocentric_quat, axis=1, keepdim=True)
if not paddle.allclose(quat_norm, paddle.ones_like(quat_norm), atol=1e-3):
logger.warning(
f"Some of the input quaternions are not unit norm: min={quat_norm.min()}, max={quat_norm.max()}; therefore normalizing."
)
egocentric_quat = egocentric_quat / quat_norm.clip(min=EPS)
return egocentric_quat
def predictions_to_boxes3d(quat,
proj_ctr,
depth,
size,
locations,
inv_intrinsics,
canon_box_sizes,
min_depth,
max_depth,
scale_depth_by_focal_lengths_factor,
scale_depth_by_focal_lengths=True,
quat_is_allocentric=True,
depth_is_distance=False):
# Normalize to make quat unit norm.
quat = quat / paddle.linalg.norm(quat, axis=1, keepdim=True).clip(min=EPS)
# Make sure again it's numerically unit-norm.
quat = quat / paddle.linalg.norm(quat, axis=1, keepdim=True)
if scale_depth_by_focal_lengths:
pixel_size = paddle.linalg.norm(
paddle.stack([inv_intrinsics[:, 0, 0], inv_intrinsics[:, 1, 1]],
axis=-1),
axis=-1)
depth = depth / (pixel_size * scale_depth_by_focal_lengths_factor)
if depth_is_distance:
depth = depth / paddle.linalg.norm(
unproject_points2d(locations, inv_intrinsics), axis=1).clip(min=EPS)
depth = depth.reshape([-1, 1]).clip(min_depth, max_depth)
proj_ctr = proj_ctr + locations
if quat_is_allocentric:
quat = allocentric_to_egocentric(quat, proj_ctr, inv_intrinsics)
size = (size.tanh() + 1.) * canon_box_sizes # max size = 2 * canon_size
return paddle.concat([quat, proj_ctr, depth, size], -1)
@manager.HEADS.add_component
class FCOS3DHead(nn.Layer):
"""
This code is based on https://github.com/TRI-ML/dd3d/blob/main/tridet/modeling/dd3d/fcos3d.py#L55
"""
def __init__(self,
in_strides,
in_channels,
num_classes=5,
use_scale=True,
depth_scale_init_factor=0.3,
proj_ctr_scale_init_factor=1.0,
use_per_level_predictors=False,
mean_depth_per_level=[32.594, 15.178, 8.424, 5.004, 4.662],
std_depth_per_level=[14.682, 7.139, 4.345, 2.399, 2.587],
num_convs=4,
use_deformable=False,
norm='FrozenBN',
class_agnostic_box3d=False,
per_level_predictors=False):
super().__init__()
self.in_strides = in_strides
self.num_levels = len(in_strides)
self.num_classes = num_classes
self.use_scale = use_scale
self.depth_scale_init_factor = depth_scale_init_factor
self.proj_ctr_scale_init_factor = proj_ctr_scale_init_factor
self.use_per_level_predictors = use_per_level_predictors
self.register_buffer("mean_depth_per_level",
paddle.to_tensor(mean_depth_per_level))
self.register_buffer("std_depth_per_level",
paddle.to_tensor(std_depth_per_level))
assert len(
set(in_channels)) == 1, "Each level must have the same channel!"
in_channels = in_channels[0]
if use_deformable:
raise ValueError("Not supported yet.")
box3d_tower = []
for i in range(num_convs):
if norm == "BN":
norm_layer = LayerListDial([
nn.BatchNorm2D(
in_channels,
weight_attr=ParamAttr(regularizer=L2Decay(0.0)))
for _ in range(self.num_levels)
])
elif norm == "FrozenBN":
norm_layer = LayerListDial([
FrozenBatchNorm2d(in_channels)
for _ in range(self.num_levels)
])
else:
raise NotImplementedError()
box3d_tower.append(
nn.Conv2D(
in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False))
box3d_tower.append(norm_layer)
box3d_tower.append(nn.ReLU())
self.add_sublayer('box3d_tower', nn.Sequential(*box3d_tower))
num_classes = self.num_classes if not class_agnostic_box3d else 1
num_levels = self.num_levels if per_level_predictors else 1
# 3D box branches.
self.box3d_quat = nn.LayerList([
nn.Conv2D(
in_channels,
4 * num_classes,
kernel_size=3,
stride=1,
padding=1) for _ in range(num_levels)
])
self.box3d_ctr = nn.LayerList([
nn.Conv2D(
in_channels,
2 * num_classes,
kernel_size=3,
stride=1,
padding=1) for _ in range(num_levels)
])
self.box3d_depth = nn.LayerList([
nn.Conv2D(
in_channels,
1 * num_classes,
kernel_size=3,
stride=1,
padding=1,
bias_attr=(not self.use_scale)) for _ in range(num_levels)
])
self.box3d_size = nn.LayerList([
nn.Conv2D(
in_channels,
3 * num_classes,
kernel_size=3,
stride=1,
padding=1) for _ in range(num_levels)
])
self.box3d_conf = nn.LayerList([
nn.Conv2D(
in_channels,
1 * num_classes,
kernel_size=3,
stride=1,
padding=1) for _ in range(num_levels)
])
if self.use_scale:
self.scales_proj_ctr = nn.LayerList([
Scale(init_value=stride * self.proj_ctr_scale_init_factor)
for stride in self.in_strides
])
# (pre-)compute (mean, std) of depth for each level, and determine the init value here.
self.scales_size = nn.LayerList(
[Scale(init_value=1.0) for _ in range(self.num_levels)])
self.scales_conf = nn.LayerList(
[Scale(init_value=1.0) for _ in range(self.num_levels)])
self.scales_depth = nn.LayerList([
Scale(init_value=sigma * self.depth_scale_init_factor)
for sigma in self.std_depth_per_level
])
self.offsets_depth = nn.LayerList(
[Offset(init_value=b) for b in self.mean_depth_per_level])
self._init_weights()
def _init_weights(self):
for l in self.box3d_tower.sublayers():
if isinstance(l, nn.Conv2D):
param_init.kaiming_normal_init(
l.weight, mode='fan_out', nonlinearity='relu')
if l.bias is not None:
param_init.constant_init(l.bias, value=0.0)
predictors = [
self.box3d_quat, self.box3d_ctr, self.box3d_depth, self.box3d_size,
self.box3d_conf
]
for layers in predictors:
for l in layers.sublayers():
if isinstance(l, nn.Conv2D):
param_init.kaiming_uniform_init(l.weight, a=1)
if l.bias is not None: # depth head may not have bias.
param_init.constant_init(l.bias, value=0.0)
def forward(self, x):
box3d_quat, box3d_ctr, box3d_depth, box3d_size, box3d_conf = [], [], [], [], []
dense_depth = None
for l, features in enumerate(x):
box3d_tower_out = self.box3d_tower(features)
_l = l if self.use_per_level_predictors else 0
# 3D box
quat = self.box3d_quat[_l](box3d_tower_out)
proj_ctr = self.box3d_ctr[_l](box3d_tower_out)
depth = self.box3d_depth[_l](box3d_tower_out)
size3d = self.box3d_size[_l](box3d_tower_out)
conf3d = self.box3d_conf[_l](box3d_tower_out)
if self.use_scale:
# TODO: to optimize the runtime, apply this scaling in inference (and loss compute) only on FG pixels?
proj_ctr = self.scales_proj_ctr[l](proj_ctr)
size3d = self.scales_size[l](size3d)
conf3d = self.scales_conf[l](conf3d)
depth = self.offsets_depth[l](self.scales_depth[l](depth))
box3d_quat.append(quat)
box3d_ctr.append(proj_ctr)
box3d_depth.append(depth)
box3d_size.append(size3d)
box3d_conf.append(conf3d)
return box3d_quat, box3d_ctr, box3d_depth, box3d_size, box3d_conf, dense_depth
@manager.LOSSES.add_component
class FCOS3DLoss(nn.Layer):
"""
This code is based on https://github.com/TRI-ML/dd3d/blob/main/tridet/modeling/dd3d/fcos3d.py#L191
"""
def __init__(
self,
canon_box_sizes=[
[1.61876949, 3.89154523, 1.52969237], # Car
[0.62806586, 0.82038497, 1.76784787], # Pedestrian
[0.56898187, 1.77149234, 1.7237099], # Cyclist
[1.9134491, 5.15499603, 2.18998422], # Van
[2.61168401, 9.22692319, 3.36492722], # Truck
[0.5390196, 1.08098042, 1.28392158], # Person_sitting
[2.36044838, 15.56991038, 3.5289238], # Tram
[1.24489164, 2.51495357, 1.61402478], # Misc
], # (width, length, height)
min_depth=0.1,
max_depth=80.0,
predict_allocentric_rot=True,
scale_depth_by_focal_lengths=True,
scale_depth_by_focal_lengths_factor=500.0,
predict_distance=False,
smooth_l1_loss_beta=0.05,
max_loss_per_group=20.0,
box3d_loss_weight=2.0,
conf3d_loss_weight=1.0,
conf_3d_temperature=1.0,
num_classes=5,
class_agnostic=False):
super().__init__()
self.box3d_reg_loss_fn = DisentangledBox3DLoss(smooth_l1_loss_beta,
max_loss_per_group)
self.canon_box_sizes = canon_box_sizes
self.min_depth = min_depth
self.max_depth = max_depth
self.predict_allocentric_rot = predict_allocentric_rot
self.scale_depth_by_focal_lengths = scale_depth_by_focal_lengths
self.scale_depth_by_focal_lengths_factor = scale_depth_by_focal_lengths_factor
self.predict_distance = predict_distance
self.box3d_loss_weight = box3d_loss_weight
self.conf3d_loss_weight = conf3d_loss_weight
self.conf_3d_temperature = conf_3d_temperature
self.class_agnostic = class_agnostic
self.num_classes = num_classes
def forward(self, box3d_quat, box3d_ctr, box3d_depth, box3d_size,
box3d_conf, dense_depth, inv_intrinsics, fcos2d_info, targets):
labels = targets['labels']
box3d_targets = targets['box3d_targets']
pos_inds = targets["pos_inds"]
if pos_inds.numel() == 0:
losses = {
"loss_box3d_quat": box3d_quat[0].sum() * 0.,
"loss_box3d_proj_ctr": box3d_ctr[0].sum() * 0.,
"loss_box3d_depth": box3d_depth[0].sum() * 0.,
"loss_box3d_size": box3d_size[0].sum() * 0.,
"loss_conf3d": box3d_conf[0].sum() * 0.
}
return losses
if len(labels) != len(box3d_targets):
raise ValueError(
f"The size of 'labels' and 'box3d_targets' does not match: a={len(labels)}, b={len(box3d_targets)}"
)
num_classes = self.num_classes if not self.class_agnostic else 1
box3d_quat_pred = paddle.concat([
x.transpose([0, 2, 3, 1]).reshape([-1, 4, num_classes])
for x in box3d_quat
],
axis=0)
box3d_ctr_pred = paddle.concat([
x.transpose([0, 2, 3, 1]).reshape([-1, 2, num_classes])
for x in box3d_ctr
],
axis=0)
box3d_depth_pred = paddle.concat([
x.transpose([0, 2, 3, 1]).reshape([-1, num_classes])
for x in box3d_depth
],
axis=0)
box3d_size_pred = paddle.concat([
x.transpose([0, 2, 3, 1]).reshape([-1, 3, num_classes])
for x in box3d_size
],
axis=0)
box3d_conf_pred = paddle.concat([
x.transpose([0, 2, 3, 1]).reshape([-1, num_classes])
for x in box3d_conf
],
axis=0)
# 3D box disentangled loss
if pos_inds.numel() == 1:
box3d_targets = box3d_targets[pos_inds].unsqueeze(0)
box3d_quat_pred = box3d_quat_pred[pos_inds].unsqueeze(0)
box3d_ctr_pred = box3d_ctr_pred[pos_inds].unsqueeze(0)
box3d_depth_pred = box3d_depth_pred[pos_inds].unsqueeze(0)
box3d_size_pred = box3d_size_pred[pos_inds].unsqueeze(0)
box3d_conf_pred = box3d_conf_pred[pos_inds].unsqueeze(0)
else:
box3d_targets = box3d_targets[pos_inds]
box3d_quat_pred = box3d_quat_pred[pos_inds]
box3d_ctr_pred = box3d_ctr_pred[pos_inds]
box3d_depth_pred = box3d_depth_pred[pos_inds]
box3d_size_pred = box3d_size_pred[pos_inds]
box3d_conf_pred = box3d_conf_pred[pos_inds]
if self.class_agnostic:
box3d_quat_pred = box3d_quat_pred.squeeze(-1)
box3d_ctr_pred = box3d_ctr_pred.squeeze(-1)
box3d_depth_pred = box3d_depth_pred.squeeze(-1)
box3d_size_pred = box3d_size_pred.squeeze(-1)
box3d_conf_pred = box3d_conf_pred.squeeze(-1)
else:
I = labels[pos_inds][..., None, None]
box3d_quat_pred = paddle.take_along_axis(
box3d_quat_pred, indices=I.tile([1, 4, 1]), axis=2).squeeze(-1)
box3d_ctr_pred = paddle.take_along_axis(
box3d_ctr_pred, indices=I.tile([1, 2, 1]), axis=2).squeeze(-1)
box3d_depth_pred = paddle.take_along_axis(
box3d_depth_pred, indices=I.squeeze(-1), axis=1).squeeze(-1)
box3d_size_pred = paddle.take_along_axis(
box3d_size_pred, indices=I.tile([1, 3, 1]), axis=2).squeeze(-1)
box3d_conf_pred = paddle.take_along_axis(
box3d_conf_pred, indices=I.squeeze(-1), axis=1).squeeze(-1)
canon_box_sizes = paddle.to_tensor(
self.canon_box_sizes)[labels[pos_inds]]
locations = targets["locations"][pos_inds]
im_inds = targets["im_inds"][pos_inds]
inv_intrinsics = inv_intrinsics[im_inds]
if im_inds.numel() == 1:
inv_intrinsics = inv_intrinsics.unsqueeze(0)
box3d_pred = predictions_to_boxes3d(
box3d_quat_pred,
box3d_ctr_pred,
box3d_depth_pred,
box3d_size_pred,
locations,
inv_intrinsics,
canon_box_sizes,
self.min_depth,
self.max_depth,
scale_depth_by_focal_lengths_factor=self.
scale_depth_by_focal_lengths_factor,
scale_depth_by_focal_lengths=self.scale_depth_by_focal_lengths,
quat_is_allocentric=self.predict_allocentric_rot,
depth_is_distance=self.predict_distance)
centerness_targets = fcos2d_info["centerness_targets"]
loss_denom = fcos2d_info["loss_denom"]
losses_box3d, box3d_l1_error = self.box3d_reg_loss_fn(
box3d_pred, box3d_targets, locations, inv_intrinsics,
centerness_targets)
losses_box3d = {
k: self.box3d_loss_weight * v / loss_denom
for k, v in losses_box3d.items()
}
conf_3d_targets = paddle.exp(
-1. / self.conf_3d_temperature * box3d_l1_error)
loss_conf3d = F.binary_cross_entropy_with_logits(
box3d_conf_pred, conf_3d_targets, reduction='none')
loss_conf3d = self.conf3d_loss_weight * (
loss_conf3d * centerness_targets).sum() / loss_denom
losses = {"loss_conf3d": loss_conf3d, **losses_box3d}
return losses
@manager.MODELS.add_component
class FCOS3DInference(nn.Layer):
"""
This code is based on https://github.com/TRI-ML/dd3d/blob/main/tridet/modeling/dd3d/fcos3d.py#L302
"""
def __init__(
self,
canon_box_sizes=[
[1.61876949, 3.89154523, 1.52969237], # Car
[0.62806586, 0.82038497, 1.76784787], # Pedestrian
[0.56898187, 1.77149234, 1.7237099], # Cyclist
[1.9134491, 5.15499603, 2.18998422], # Van
[2.61168401, 9.22692319, 3.36492722], # Truck
[0.5390196, 1.08098042, 1.28392158], # Person_sitting
[2.36044838, 15.56991038, 3.5289238], # Tram
[1.24489164, 2.51495357, 1.61402478], # Misc
], # (width, length, height)
min_depth=0.1,
max_depth=80.0,
predict_allocentric_rot=True,
scale_depth_by_focal_lengths=True,
scale_depth_by_focal_lengths_factor=500.0,
predict_distance=False,
num_classes=5,
class_agnostic=False):
super().__init__()
self.num_classes = num_classes
self.class_agnostic = class_agnostic
self.predict_distance = predict_distance
self.canon_box_sizes = canon_box_sizes
self.min_depth = min_depth
self.max_depth = max_depth
self.predict_allocentric_rot = predict_allocentric_rot
self.scale_depth_by_focal_lengths_factor = scale_depth_by_focal_lengths_factor
self.scale_depth_by_focal_lengths = scale_depth_by_focal_lengths
def forward(self, box3d_quat, box3d_ctr, box3d_depth, box3d_size,
box3d_conf, inv_intrinsics, pred_instances, fcos2d_info):
# pred_instances: # List[List[Instances]], shape = (L, B)
for lvl, (box3d_quat_lvl, box3d_ctr_lvl, box3d_depth_lvl, box3d_size_lvl, box3d_conf_lvl) in \
enumerate(zip(box3d_quat, box3d_ctr, box3d_depth, box3d_size, box3d_conf)):
# In-place modification: update per-level pred_instances.
self.forward_for_single_feature_map(
box3d_quat_lvl, box3d_ctr_lvl, box3d_depth_lvl, box3d_size_lvl,
box3d_conf_lvl, inv_intrinsics, pred_instances[lvl],
fcos2d_info[lvl]) # List of Instances; one for each image.
def forward_for_single_feature_map(self, box3d_quat, box3d_ctr, box3d_depth,
box3d_size, box3d_conf, inv_intrinsics,
pred_instances, fcos2d_info):
N = box3d_quat.shape[0]
num_classes = self.num_classes if not self.class_agnostic else 1
box3d_quat = box3d_quat.transpose([0, 2, 3,
1]).reshape([N, -1, 4, num_classes])
box3d_ctr = box3d_ctr.transpose([0, 2, 3,
1]).reshape([N, -1, 2, num_classes])
box3d_depth = box3d_depth.transpose([0, 2, 3,
1]).reshape([N, -1, num_classes])
box3d_size = box3d_size.transpose([0, 2, 3,
1]).reshape([N, -1, 3, num_classes])
box3d_conf = F.sigmoid(
box3d_conf.transpose([0, 2, 3, 1]).reshape([N, -1, num_classes]))
for i in range(N):
fg_inds_per_im = fcos2d_info['fg_inds_per_im'][i]
class_inds_per_im = fcos2d_info['class_inds_per_im'][i]
topk_indices = fcos2d_info['topk_indices'][i]
if fg_inds_per_im.shape[0] == 0:
box3d_conf_per_im = paddle.zeros([0, num_classes])
pred_instances[i]['pred_boxes3d'] = paddle.zeros([0, 10])
else:
if fg_inds_per_im.shape[0] == 1:
box3d_quat_per_im = box3d_quat[i][fg_inds_per_im].unsqueeze(
0)
box3d_ctr_per_im = box3d_ctr[i][fg_inds_per_im].unsqueeze(0)
box3d_depth_per_im = box3d_depth[i][
fg_inds_per_im].unsqueeze(0)
box3d_size_per_im = box3d_size[i][fg_inds_per_im].unsqueeze(
0)
box3d_conf_per_im = box3d_conf[i][fg_inds_per_im].unsqueeze(
0)
else:
box3d_quat_per_im = box3d_quat[i][fg_inds_per_im]
box3d_ctr_per_im = box3d_ctr[i][fg_inds_per_im]
box3d_depth_per_im = box3d_depth[i][fg_inds_per_im]
box3d_size_per_im = box3d_size[i][fg_inds_per_im]
box3d_conf_per_im = box3d_conf[i][fg_inds_per_im]
if self.class_agnostic:
box3d_quat_per_im = box3d_quat_per_im.squeeze(-1)
box3d_ctr_per_im = box3d_ctr_per_im.squeeze(-1)
box3d_depth_per_im = box3d_depth_per_im.squeeze(-1)
box3d_size_per_im = box3d_size_per_im.squeeze(-1)
box3d_conf_per_im = box3d_conf_per_im.squeeze(-1)
else:
I = class_inds_per_im[..., None, None]
box3d_quat_per_im = paddle.take_along_axis(
box3d_quat_per_im, indices=I.tile([1, 4, 1]),
axis=2).squeeze(-1)
box3d_ctr_per_im = paddle.take_along_axis(
box3d_ctr_per_im, indices=I.tile([1, 2, 1]),
axis=2).squeeze(-1)
box3d_depth_per_im = paddle.take_along_axis(
box3d_depth_per_im, indices=I.squeeze(-1),
axis=1).squeeze(-1)
box3d_size_per_im = paddle.take_along_axis(
box3d_size_per_im, indices=I.tile([1, 3, 1]),
axis=2).squeeze(-1)
box3d_conf_per_im = paddle.take_along_axis(
box3d_conf_per_im, indices=I.squeeze(-1),
axis=1).squeeze(-1)
if topk_indices is not None:
box3d_quat_per_im = box3d_quat_per_im[topk_indices]
box3d_ctr_per_im = box3d_ctr_per_im[topk_indices]
box3d_depth_per_im = box3d_depth_per_im[topk_indices]
box3d_size_per_im = box3d_size_per_im[topk_indices]
box3d_conf_per_im = box3d_conf_per_im[topk_indices]
canon_box_sizes = paddle.to_tensor(
self.canon_box_sizes)[pred_instances[i]['pred_classes']]
inv_K = inv_intrinsics[i][None, ...].expand(
[len(box3d_quat_per_im), 3, 3])
locations = pred_instances[i]['locations']
pred_boxes3d = predictions_to_boxes3d(
box3d_quat_per_im,
box3d_ctr_per_im,
box3d_depth_per_im,
box3d_size_per_im,
locations,
inv_K,
canon_box_sizes,
self.min_depth,
self.max_depth,
scale_depth_by_focal_lengths_factor=self.
scale_depth_by_focal_lengths_factor,
scale_depth_by_focal_lengths=self.
scale_depth_by_focal_lengths,
quat_is_allocentric=self.predict_allocentric_rot,
depth_is_distance=self.predict_distance)
pred_instances[i]['pred_boxes3d'] = pred_boxes3d
# scores_per_im = pred_instances[i].scores.square()
# NOTE: Before refactoring, the squared score was used. Is raw 2D score better?
scores_per_im = pred_instances[i]['scores']
if scores_per_im.shape[0] == 0:
scores_3d_per_im = paddle.zeros(scores_per_im.shape)
else:
scores_3d_per_im = scores_per_im * box3d_conf_per_im
# In-place modification: add fields to instances.
pred_instances[i]['scores_3d'] = scores_3d_per_im
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/class_heads/ocrnet_head.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.models import layers
from paddle3d.models.layers import param_init, reset_parameters
@manager.HEADS.add_component
class OCRHead(nn.Layer):
"""
The Object contextual representation head.
Args:
num_classes(int): The unique number of target classes.
in_channels(tuple): The number of input channels.
ocr_mid_channels(int, optional): The number of middle channels in OCRHead. Default: 512.
ocr_key_channels(int, optional): The number of key channels in ObjectAttentionBlock. Default: 256.
"""
def __init__(self,
num_classes,
backbone_indices,
in_channels,
ocr_mid_channels=512,
ocr_key_channels=256,
align_corners=False,
pretrained=None):
super().__init__()
self.num_classes = num_classes
self.backbone_indices = backbone_indices
self.in_channels = in_channels
self.align_corners = align_corners
self.pretrained = pretrained
self.spatial_gather = SpatialGatherBlock(ocr_mid_channels, num_classes)
self.spatial_ocr = SpatialOCRModule(ocr_mid_channels, ocr_key_channels,
ocr_mid_channels)
self.indices = [-2, -1] if len(in_channels) > 1 else [-1, -1]
self.conv3x3_ocr = layers.ConvBNReLU(
in_channels[self.indices[1]], ocr_mid_channels, 3, padding=1)
self.cls_head = nn.Conv2D(ocr_mid_channels, self.num_classes, 1)
self.aux_head = nn.Sequential(
layers.ConvBNReLU(in_channels[self.indices[0]],
in_channels[self.indices[0]], 1),
nn.Conv2D(in_channels[self.indices[0]], self.num_classes, 1))
self.init_weight()
def forward(self, feat_list, img_shape):
feat_list = [feat_list[i] for i in self.backbone_indices]
feat_shallow, feat_deep = feat_list[self.indices[0]], feat_list[
self.indices[1]]
soft_regions = self.aux_head(feat_shallow)
pixels = self.conv3x3_ocr(feat_deep)
object_regions = self.spatial_gather(pixels, soft_regions)
ocr = self.spatial_ocr(pixels, object_regions)
logit = self.cls_head(ocr)
logit_list = [logit, soft_regions]
logit_list = logit_list[0]
return logit_list
def init_weight(self):
"""Initialize the parameters of model parts."""
for sublayer in self.sublayers():
if isinstance(sublayer, nn.Conv2D):
reset_parameters(sublayer)
class SpatialGatherBlock(nn.Layer):
"""Aggregation layer to compute the pixel-region representation."""
def __init__(self, pixels_channels, regions_channels):
super().__init__()
self.pixels_channels = pixels_channels
self.regions_channels = regions_channels
def forward(self, pixels, regions):
# pixels: from (n, c, h, w) to (n, h*w, c)
pixels = paddle.reshape(pixels, (0, self.pixels_channels, -1))
pixels = paddle.transpose(pixels, (0, 2, 1))
# regions: from (n, k, h, w) to (n, k, h*w)
regions = paddle.reshape(regions, (0, self.regions_channels, -1))
regions = F.softmax(regions, axis=2)
# feats: from (n, k, c) to (n, c, k, 1)
feats = paddle.bmm(regions, pixels)
feats = paddle.transpose(feats, (0, 2, 1))
feats = paddle.unsqueeze(feats, axis=-1)
return feats
class SpatialOCRModule(nn.Layer):
"""Aggregate the global object representation to update the representation for each pixel."""
def __init__(self,
in_channels,
key_channels,
out_channels,
dropout_rate=0.1):
super().__init__()
self.attention_block = ObjectAttentionBlock(in_channels, key_channels)
self.conv1x1 = nn.Sequential(
layers.ConvBNReLU(2 * in_channels, out_channels, 1),
nn.Dropout2D(dropout_rate))
def forward(self, pixels, regions):
context = self.attention_block(pixels, regions)
feats = paddle.concat([context, pixels], axis=1)
feats = self.conv1x1(feats)
return feats
class ObjectAttentionBlock(nn.Layer):
"""A self-attention module."""
def __init__(self, in_channels, key_channels):
super().__init__()
self.in_channels = in_channels
self.key_channels = key_channels
self.f_pixel = nn.Sequential(
layers.ConvBNReLU(in_channels, key_channels, 1),
layers.ConvBNReLU(key_channels, key_channels, 1))
self.f_object = nn.Sequential(
layers.ConvBNReLU(in_channels, key_channels, 1),
layers.ConvBNReLU(key_channels, key_channels, 1))
self.f_down = layers.ConvBNReLU(in_channels, key_channels, 1)
self.f_up = layers.ConvBNReLU(key_channels, in_channels, 1)
def forward(self, x, proxy):
x_shape = paddle.shape(x)
# query : from (n, c1, h1, w1) to (n, h1*w1, key_channels)
query = self.f_pixel(x)
query = paddle.reshape(query, (0, self.key_channels, -1))
query = paddle.transpose(query, (0, 2, 1))
# key : from (n, c2, h2, w2) to (n, key_channels, h2*w2)
key = self.f_object(proxy)
key = paddle.reshape(key, (0, self.key_channels, -1))
# value : from (n, c2, h2, w2) to (n, h2*w2, key_channels)
value = self.f_down(proxy)
value = paddle.reshape(value, (0, self.key_channels, -1))
value = paddle.transpose(value, (0, 2, 1))
# sim_map (n, h1*w1, h2*w2)
sim_map = paddle.bmm(query, key)
sim_map = (self.key_channels**-.5) * sim_map
sim_map = F.softmax(sim_map, axis=-1)
# context from (n, h1*w1, key_channels) to (n , out_channels, h1, w1)
context = paddle.bmm(sim_map, value)
context = paddle.transpose(context, (0, 2, 1))
context = paddle.reshape(context,
(0, self.key_channels, x_shape[2], x_shape[3]))
context = self.f_up(context)
return context
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/class_heads/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .deeplabv3_head import *
from .ocrnet_head import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/class_heads/deeplabv3_head.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.models.layers import (ASPPModule, ConvBNReLU,
SeparableConvBNReLU, reset_parameters)
@manager.HEADS.add_component
class DeepLabV3Head(nn.Layer):
"""
The DeepLabV3Head implementation based on PaddlePaddle.
Args:
Please Refer to DeepLabV3PHead above.
"""
def __init__(self,
num_classes,
backbone_indices,
backbone_channels,
aspp_ratios,
aspp_out_channels,
align_corners,
pretrained=None):
super().__init__()
self.aspp = ASPPModule(
aspp_ratios,
backbone_channels,
aspp_out_channels,
align_corners,
use_sep_conv=False,
image_pooling=True,
bias_attr=False)
self.conv_bn_relu = ConvBNReLU(
in_channels=aspp_out_channels,
out_channels=aspp_out_channels,
kernel_size=3,
padding=1,
bias_attr=False)
self.cls = nn.Conv2D(
in_channels=aspp_out_channels,
out_channels=num_classes,
kernel_size=1)
self.backbone_indices = backbone_indices
self.init_weight()
def forward(self, feat_list, x):
x = feat_list[self.backbone_indices[0]]
x = self.aspp(x)
x = self.conv_bn_relu(x)
logit = self.cls(x)
return logit
def init_weight(self):
for sublayer in self.sublayers():
if isinstance(sublayer, nn.Conv2D):
reset_parameters(sublayer)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/roi_heads/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .pvrcnn_head import *
from .target_assigner import *
from .voxelrcnn_head import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/roi_heads/roi_head_base.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/models/roi_heads/roi_head_template.py
Ths copyright of OpenPCDet is as follows:
Apache-2.0 license [see LICENSE for details].
"""
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.models.common import class_agnostic_nms, rotate_points_along_z
from paddle3d.models.heads.roi_heads.target_assigner.proposal_target_layer import \
ProposalTargetLayer
from paddle3d.models.losses import WeightedSmoothL1Loss, get_corner_loss_lidar
from paddle3d.utils import box_coder as box_coder_utils
class RoIHeadBase(nn.Layer):
def __init__(self, num_class, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.box_coder = getattr(
box_coder_utils, self.model_cfg["target_config"]["box_coder"])(
**self.model_cfg["target_config"].get('box_coder_config', {}))
self.proposal_target_layer = ProposalTargetLayer(
roi_sampler_cfg=self.model_cfg["target_config"])
self.build_losses(self.model_cfg["loss_config"])
self.forward_ret_dict = None
def build_losses(self, losses_cfg):
self.add_sublayer(
'reg_loss_func',
WeightedSmoothL1Loss(
code_weights=losses_cfg["loss_weights"]['code_weights']))
def make_fc_layers(self, input_channels, output_channels, fc_list):
fc_layers = []
pre_channel = input_channels
for k in range(0, fc_list.__len__()):
fc_layers.extend([
nn.Conv1D(
pre_channel, fc_list[k], kernel_size=1, bias_attr=False),
nn.BatchNorm1D(fc_list[k]),
nn.ReLU()
])
pre_channel = fc_list[k]
if self.model_cfg["dp_ratio"] >= 0 and k == 0:
fc_layers.append(nn.Dropout(self.model_cfg["dp_ratio"]))
fc_layers.append(
nn.Conv1D(
pre_channel, output_channels, kernel_size=1, bias_attr=True))
fc_layers = nn.Sequential(*fc_layers)
return fc_layers
@paddle.no_grad()
def proposal_layer(self, batch_dict, nms_config):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
nms_config:
Returns:
batch_dict:
rois: (B, num_rois, 7+C)
roi_scores: (B, num_rois)
roi_labels: (B, num_rois)
"""
if batch_dict.get('rois', None) is not None:
return batch_dict
batch_size = batch_dict['batch_size']
batch_box_preds = batch_dict['batch_box_preds']
batch_cls_preds = batch_dict['batch_cls_preds']
rois = paddle.zeros((batch_size, nms_config["nms_post_maxsize"],
batch_box_preds.shape[-1]))
roi_scores = paddle.zeros((batch_size, nms_config["nms_post_maxsize"]))
roi_labels = paddle.zeros((batch_size, nms_config["nms_post_maxsize"]),
dtype='int64')
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_cls_preds.shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_cls_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_box_preds[batch_mask]
cls_preds = batch_cls_preds[batch_mask]
cur_roi_scores = paddle.max(cls_preds, axis=1)
cur_roi_labels = paddle.argmax(cls_preds, axis=1)
if nms_config['multi_class_nms']:
raise NotImplementedError
else:
selected_score, selected_label, selected_box = class_agnostic_nms(
box_scores=cur_roi_scores,
box_preds=box_preds,
label_preds=cur_roi_labels,
nms_config=nms_config)
rois[index, :selected_label.shape[0], :] = selected_box
roi_scores[index, :selected_label.shape[0]] = selected_score
roi_labels[index, :selected_label.shape[0]] = selected_label
batch_dict['rois'] = rois
batch_dict['roi_scores'] = roi_scores
batch_dict['roi_labels'] = roi_labels + 1
batch_dict.pop('batch_index', None)
return batch_dict
def assign_targets(self, batch_dict):
batch_size = batch_dict['batch_size']
with paddle.no_grad():
targets_dict = self.proposal_target_layer.forward(batch_dict)
rois = targets_dict['rois'] # (B, N, 7 + C)
gt_of_rois = targets_dict['gt_of_rois'] # (B, N, 7 + C + 1)
targets_dict['gt_of_rois_src'] = gt_of_rois.clone()
# canonical transformation
roi_center = rois[:, :, 0:3]
roi_ry = rois[:, :, 6] % (2 * np.pi)
#index = paddle.to_tensor([0, 1, 2], dtype='int32')
#selected_rois = paddle.index_select(gt_of_rois, index=index, axis=-1)
#gt_of_rois[:, :, 0:3] = gt_of_rois[:, :, 0:3] - roi_center
selected_rois = gt_of_rois[:, :, 0:3] - roi_center
gt_of_rois[:, :, 0:3] = selected_rois
gt_of_rois[:, :, 6] = gt_of_rois[:, :, 6] - roi_ry
# transfer LiDAR coords to local coords
gt_of_rois = rotate_points_along_z(
points=gt_of_rois.reshape([-1, 1, gt_of_rois.shape[-1]]),
angle=-roi_ry.reshape([-1])).reshape(
[batch_size, -1, gt_of_rois.shape[-1]])
# flip orientation if rois have opposite orientation
heading_label = gt_of_rois[:, :, 6] % (2 * np.pi) # 0 ~ 2pi
opposite_flag = (heading_label > np.pi * 0.5) & (heading_label <
np.pi * 1.5)
if opposite_flag.numel() > 0:
heading_label[opposite_flag] = (
heading_label[opposite_flag] + np.pi) % (
2 * np.pi) # (0 ~ pi/2, 3pi/2 ~ 2pi)
flag = heading_label > np.pi
if flag.numel() > 0:
heading_label[
flag] = heading_label[flag] - np.pi * 2 # (-pi/2, pi/2)
heading_label = paddle.clip(
heading_label, min=-np.pi / 2, max=np.pi / 2)
gt_of_rois[:, :, 6] = heading_label
targets_dict['gt_of_rois'] = gt_of_rois
return targets_dict
def get_box_reg_layer_loss(self, forward_ret_dict):
loss_cfgs = self.model_cfg["loss_config"]
code_size = self.box_coder.code_size
reg_valid_mask = forward_ret_dict['reg_valid_mask'].reshape([-1])
gt_boxes3d_ct = forward_ret_dict['gt_of_rois'][..., 0:code_size]
gt_of_rois_src = forward_ret_dict['gt_of_rois_src'][..., 0:
code_size].reshape(
[-1, code_size])
rcnn_reg = forward_ret_dict['rcnn_reg'] # (rcnn_batch_size, C)
roi_boxes3d = forward_ret_dict['rois']
rcnn_batch_size = gt_boxes3d_ct.reshape([-1, code_size]).shape[0]
fg_mask = (reg_valid_mask > 0)
fg_sum = fg_mask.astype('int32').sum()
tb_dict = {}
if loss_cfgs['reg_loss'] == 'smooth-l1':
rois_anchor = roi_boxes3d.clone().reshape([-1, code_size])
rois_anchor[:, 0:3] = 0
rois_anchor[:, 6] = 0
reg_targets = self.box_coder.encode_paddle(
gt_boxes3d_ct.reshape([rcnn_batch_size, code_size]),
rois_anchor)
#reg_targets.stop_gradient = True
rcnn_loss_reg = self.reg_loss_func(
rcnn_reg.reshape([rcnn_batch_size, -1]).unsqueeze(axis=0),
reg_targets.unsqueeze(axis=0),
) # [B, M, 7]
rcnn_loss_reg = (rcnn_loss_reg.reshape([rcnn_batch_size, -1]) *
fg_mask.unsqueeze(axis=-1).astype('float32')
).sum() / max(fg_sum, 1)
rcnn_loss_reg = rcnn_loss_reg * loss_cfgs['loss_weights'][
'rcnn_reg_weight']
tb_dict['rcnn_loss_reg'] = rcnn_loss_reg
if loss_cfgs["corner_loss_regularization"] and fg_sum > 0:
# TODO: NEED to BE CHECK
fg_rcnn_reg = rcnn_reg.reshape([rcnn_batch_size, -1])[fg_mask]
fg_roi_boxes3d = roi_boxes3d.reshape([-1, code_size])[fg_mask]
fg_roi_boxes3d = fg_roi_boxes3d.reshape([1, -1, code_size])
batch_anchors = fg_roi_boxes3d.clone()
roi_ry = fg_roi_boxes3d[:, :, 6].reshape([-1])
roi_xyz = fg_roi_boxes3d[:, :, 0:3].reshape([-1, 3])
batch_anchors[:, :, 0:3] = 0
rcnn_boxes3d = self.box_coder.decode_paddle(
fg_rcnn_reg.reshape([batch_anchors.shape[0], -1,
code_size]),
batch_anchors).reshape([-1, code_size])
rcnn_boxes3d = rotate_points_along_z(
rcnn_boxes3d.unsqueeze(axis=1), roi_ry).squeeze(axis=1)
rcnn_boxes3d[:, 0:3] += roi_xyz
#gt_of_rois_src.stop_gradient = True
loss_corner = get_corner_loss_lidar(
rcnn_boxes3d[:, 0:7], gt_of_rois_src[fg_mask][:, 0:7])
loss_corner = loss_corner.mean()
loss_corner = loss_corner * loss_cfgs["loss_weights"][
'rcnn_corner_weight']
rcnn_loss_reg += loss_corner
tb_dict['rcnn_loss_corner'] = loss_corner
else:
raise NotImplementedError
return rcnn_loss_reg, tb_dict
def get_box_cls_layer_loss(self, forward_ret_dict):
loss_cfgs = self.model_cfg["loss_config"]
rcnn_cls = forward_ret_dict['rcnn_cls']
rcnn_cls_labels = forward_ret_dict['rcnn_cls_labels'].reshape([-1])
if loss_cfgs['cls_loss'] == 'BinaryCrossEntropy':
rcnn_cls_flat = rcnn_cls.reshape([-1])
#rcnn_cls_labels.stop_gradient = True
batch_loss_cls = F.binary_cross_entropy(
F.sigmoid(rcnn_cls_flat),
rcnn_cls_labels.astype('float32'),
reduction='none')
cls_valid_mask = (rcnn_cls_labels >= 0).astype('float32')
rcnn_loss_cls = (
batch_loss_cls * cls_valid_mask).sum() / paddle.clip(
cls_valid_mask.sum(), min=1.0)
elif loss_cfgs['cls_loss'] == 'CrossEntropy':
#rcnn_cls_labels.stop_gradient = True
batch_loss_cls = F.cross_entropy(
rcnn_cls, rcnn_cls_labels, reduction='none', ignore_index=-1)
cls_valid_mask = (rcnn_cls_labels >= 0).astype('float32')
rcnn_loss_cls = (
batch_loss_cls * cls_valid_mask).sum() / paddle.clip(
cls_valid_mask.sum(), min=1.0)
else:
raise NotImplementedError
rcnn_loss_cls = rcnn_loss_cls * loss_cfgs["loss_weights"][
'rcnn_cls_weight']
tb_dict = {'rcnn_loss_cls': rcnn_loss_cls}
return rcnn_loss_cls, tb_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
rcnn_loss = 0
rcnn_loss_cls, cls_tb_dict = self.get_box_cls_layer_loss(
self.forward_ret_dict)
rcnn_loss += rcnn_loss_cls
tb_dict.update(cls_tb_dict)
rcnn_loss_reg, reg_tb_dict = self.get_box_reg_layer_loss(
self.forward_ret_dict)
rcnn_loss += rcnn_loss_reg
tb_dict.update(reg_tb_dict)
tb_dict['rcnn_loss'] = rcnn_loss
return rcnn_loss, tb_dict
def generate_predicted_boxes(self, batch_size, rois, cls_preds, box_preds):
"""
Args:
batch_size:
rois: (B, N, 7)
cls_preds: (BN, num_class)
box_preds: (BN, code_size)
Returns:
"""
code_size = self.box_coder.code_size
# batch_cls_preds: (B, N, num_class or 1)
batch_cls_preds = cls_preds.reshape(
[batch_size, -1, cls_preds.shape[-1]])
batch_box_preds = box_preds.reshape([batch_size, -1, code_size])
roi_ry = rois[:, :, 6].reshape([-1])
roi_xyz = rois[:, :, 0:3].reshape([-1, 3])
local_rois = rois.clone()
local_rois[:, :, 0:3] = 0
batch_box_preds = self.box_coder.decode_paddle(
batch_box_preds, local_rois).reshape([-1, code_size])
batch_box_preds = rotate_points_along_z(
batch_box_preds.unsqueeze(axis=1), roi_ry).squeeze(axis=1)
batch_box_preds[:, 0:3] += roi_xyz
batch_box_preds = batch_box_preds.reshape([batch_size, -1, code_size])
return batch_cls_preds, batch_box_preds
def get_global_grid_points_of_roi(self, rois, grid_size):
rois = rois.reshape([-1, rois.shape[-1]])
batch_size_rcnn = rois.shape[0]
local_roi_grid_points = self.get_dense_grid_points(
rois, batch_size_rcnn, grid_size) # (B, 6x6x6, 3)
global_roi_grid_points = rotate_points_along_z(
local_roi_grid_points.clone(), rois[:, 6]).squeeze(axis=1)
global_center = rois[:, 0:3].clone()
global_roi_grid_points += global_center.unsqueeze(axis=1)
return global_roi_grid_points, local_roi_grid_points
@staticmethod
def get_dense_grid_points(rois, batch_size_rcnn, grid_size):
faked_features = paddle.ones((grid_size, grid_size, grid_size))
dense_idx = faked_features.nonzero() # (N, 3) [x_idx, y_idx, z_idx]
dense_idx = dense_idx.tile([batch_size_rcnn, 1,
1]).astype('float32') # (B, 6x6x6, 3)
local_roi_size = rois.reshape([batch_size_rcnn, -1])[:, 3:6]
roi_grid_points = (dense_idx + 0.5) / grid_size * local_roi_size.unsqueeze(axis=1) \
- (local_roi_size.unsqueeze(axis=1) / 2) # (B, 6x6x6, 3)
return roi_grid_points
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/roi_heads/pvrcnn_head.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/models/roi_heads/pvrcnn_head.py
Ths copyright of OpenPCDet is as follows:
Apache-2.0 license [see LICENSE for details].
"""
import math
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.models.common import pointnet2_stack as pointnet2_stack_modules
from paddle3d.models.heads.roi_heads.roi_head_base import RoIHeadBase
from paddle3d.models.layers import (constant_init, kaiming_normal_init,
xavier_normal_init)
@manager.HEADS.add_component
class PVRCNNHead(RoIHeadBase):
def __init__(self, input_channels, model_cfg, num_class=1, **kwargs):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.roi_grid_pool_layer, num_c_out = pointnet2_stack_modules.build_local_aggregation_module(
input_channels=input_channels,
config=self.model_cfg["roi_grid_pool"])
grid_size = self.model_cfg["roi_grid_pool"]["grid_size"]
pre_channel = grid_size * grid_size * grid_size * num_c_out
self.pre_channel = pre_channel
shared_fc_list = []
for k in range(0, self.model_cfg["shared_fc"].__len__()):
shared_fc_list.extend([
nn.Conv1D(
pre_channel,
self.model_cfg["shared_fc"][k],
kernel_size=1,
bias_attr=False),
nn.BatchNorm1D(self.model_cfg["shared_fc"][k]),
nn.ReLU()
])
pre_channel = self.model_cfg["shared_fc"][k]
if k != self.model_cfg["shared_fc"].__len__(
) - 1 and self.model_cfg["dp_ratio"] > 0:
shared_fc_list.append(nn.Dropout(self.model_cfg["dp_ratio"]))
self.shared_fc_layer = nn.Sequential(*shared_fc_list)
self.cls_layers = self.make_fc_layers(
input_channels=pre_channel,
output_channels=self.num_class,
fc_list=self.model_cfg["cls_fc"])
self.reg_layers = self.make_fc_layers(
input_channels=pre_channel,
output_channels=self.box_coder.code_size * self.num_class,
fc_list=self.model_cfg["reg_fc"])
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init not in ['kaiming', 'xavier', 'normal']:
raise NotImplementedError
for m in self.sublayers():
if isinstance(m, nn.Conv2D) or isinstance(m, nn.Conv1D):
if weight_init == 'normal':
m.weight.set_value(
paddle.normal(mean=0, std=0.001, shape=m.weight.shape))
elif weight_init == 'kaiming':
kaiming_normal_init(
m.weight, reverse=isinstance(m, nn.Linear))
elif weight_init == 'xavier':
xavier_normal_init(
m.weight, reverse=isinstance(m, nn.Linear))
if m.bias is not None:
constant_init(m.bias, value=0)
elif isinstance(m, nn.BatchNorm1D):
constant_init(m.weight, value=1)
constant_init(m.bias, value=0)
self.reg_layers[-1].weight.set_value(
paddle.normal(
mean=0, std=0.001, shape=self.reg_layers[-1].weight.shape))
def roi_grid_pool(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
point_coords: (num_points, 4) [bs_idx, x, y, z]
point_features: (num_points, C)
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
Returns:
"""
batch_size = batch_dict['batch_size']
rois = batch_dict['rois']
point_coords = batch_dict['point_coords']
point_features = batch_dict['point_features']
point_features = point_features * batch_dict[
'point_cls_scores'].reshape([-1, 1])
global_roi_grid_points, local_roi_grid_points = self.get_global_grid_points_of_roi(
rois, grid_size=self.model_cfg["roi_grid_pool"]
["grid_size"]) # (BxN, 6x6x6, 3)
global_roi_grid_points = global_roi_grid_points.reshape(
[batch_size, -1, 3]) # (B, Nx6x6x6, 3)
xyz = point_coords[:, 1:4]
xyz_batch_cnt = paddle.zeros((batch_size, ), dtype='int32')
batch_idx = point_coords[:, 0]
for k in range(batch_size):
xyz_batch_cnt[k] = (batch_idx == k).sum().astype(
xyz_batch_cnt.dtype)
new_xyz = global_roi_grid_points.reshape([-1, 3])
new_xyz_batch_cnt = paddle.full((batch_size, ),
global_roi_grid_points.shape[1],
dtype='int32')
pooled_points, pooled_features = self.roi_grid_pool_layer(
xyz=xyz,
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=point_features,
) # (M1 + M2 ..., C)
pooled_features = pooled_features.reshape([
-1, self.model_cfg["roi_grid_pool"]["grid_size"]**3,
pooled_features.shape[-1]
]) # (BxN, 6x6x6, C)
return pooled_features
def forward(self, batch_dict):
"""
:param input_data: input dict
:return:
"""
targets_dict = self.proposal_layer(
batch_dict,
nms_config=self.model_cfg["nms_config"]
['train' if self.training else 'test'])
if self.training:
targets_dict = batch_dict.get('roi_targets_dict', None)
if targets_dict is None:
targets_dict = self.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
# RoI aware pooling
pooled_features = self.roi_grid_pool(batch_dict) # (BxN, 6x6x6, C)
grid_size = self.model_cfg["roi_grid_pool"]["grid_size"]
pooled_features = pooled_features.transpose([0, 2, 1])
shared_features = self.shared_fc_layer(
pooled_features.reshape([-1, self.pre_channel, 1]))
rcnn_cls = self.cls_layers(shared_features).transpose(
[0, 2, 1]).squeeze(axis=1) # (B, 1 or 2)
rcnn_reg = self.reg_layers(shared_features).transpose(
[0, 2, 1]).squeeze(axis=1) # (B, C)
if not self.training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'],
rois=batch_dict['rois'],
cls_preds=rcnn_cls,
box_preds=rcnn_reg)
batch_dict['batch_cls_preds'] = batch_cls_preds
batch_dict['batch_box_preds'] = batch_box_preds
batch_dict['cls_preds_normalized'] = False
else:
targets_dict['rcnn_cls'] = rcnn_cls
targets_dict['rcnn_reg'] = rcnn_reg
self.forward_ret_dict = targets_dict
return batch_dict
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/roi_heads/voxelrcnn_head.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/models/roi_heads/voxelrcnn_head.py
Ths copyright of OpenPCDet is as follows:
Apache-2.0 license [see LICENSE for details].
"""
import paddle
import paddle.nn as nn
from paddle3d.apis import manager
from paddle3d.models.common import generate_voxel2pinds, get_voxel_centers
from paddle3d.models.common.pointnet2_stack import \
voxel_pool_modules as voxelpool_stack_modules
from paddle3d.models.heads.roi_heads.roi_head_base import RoIHeadBase
from paddle3d.models.layers import constant_init, xavier_normal_init
@manager.HEADS.add_component
class VoxelRCNNHead(RoIHeadBase):
def __init__(self,
input_channels,
model_cfg,
point_cloud_range,
voxel_size,
num_class=1,
**kwargs):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
self.pool_cfg = model_cfg["roi_grid_pool"]
LAYER_cfg = self.pool_cfg["pool_layers"]
self.point_cloud_range = point_cloud_range
self.voxel_size = voxel_size
c_out = 0
self.roi_grid_pool_layers = nn.LayerList()
for src_name in self.pool_cfg["features_source"]:
mlps = LAYER_cfg[src_name]["mlps"]
for k in range(len(mlps)):
mlps[k] = [input_channels[src_name]] + mlps[k]
pool_layer = voxelpool_stack_modules.NeighborVoxelSAModuleMSG(
query_ranges=LAYER_cfg[src_name]["query_ranges"],
nsamples=LAYER_cfg[src_name]["nsample"],
radii=LAYER_cfg[src_name]["pool_radius"],
mlps=mlps,
pool_method=LAYER_cfg[src_name]["pool_method"],
)
self.roi_grid_pool_layers.append(pool_layer)
c_out += sum([x[-1] for x in mlps])
GRID_SIZE = self.model_cfg["roi_grid_pool"]["grid_size"]
# c_out = sum([x[-1] for x in mlps])
pre_channel = GRID_SIZE * GRID_SIZE * GRID_SIZE * c_out
shared_fc_list = []
for k in range(0, self.model_cfg["shared_fc"].__len__()):
shared_fc_list.extend([
nn.Linear(
pre_channel,
self.model_cfg["shared_fc"][k],
bias_attr=False),
nn.BatchNorm1D(self.model_cfg["shared_fc"][k]),
nn.ReLU()
])
pre_channel = self.model_cfg["shared_fc"][k]
if k != self.model_cfg["shared_fc"].__len__(
) - 1 and self.model_cfg["dp_ratio"] > 0:
shared_fc_list.append(nn.Dropout(self.model_cfg["dp_ratio"]))
self.shared_fc_layer = nn.Sequential(*shared_fc_list)
cls_fc_list = []
for k in range(0, self.model_cfg['cls_fc'].__len__()):
cls_fc_list.extend([
nn.Linear(
pre_channel, self.model_cfg['cls_fc'][k], bias_attr=False),
nn.BatchNorm1D(self.model_cfg['cls_fc'][k]),
nn.ReLU()
])
pre_channel = self.model_cfg["cls_fc"][k]
if k != self.model_cfg["cls_fc"].__len__(
) - 1 and self.model_cfg["dp_ratio"] > 0:
cls_fc_list.append(nn.Dropout(self.model_cfg["dp_ratio"]))
self.cls_fc_layers = nn.Sequential(*cls_fc_list)
self.cls_pred_layer = nn.Linear(
pre_channel, self.num_class, bias_attr=True)
reg_fc_list = []
for k in range(0, self.model_cfg['reg_fc'].__len__()):
reg_fc_list.extend([
nn.Linear(
pre_channel, self.model_cfg['reg_fc'][k], bias_attr=False),
nn.BatchNorm1D(self.model_cfg['reg_fc'][k]),
nn.ReLU()
])
pre_channel = self.model_cfg['reg_fc'][k]
if k != self.model_cfg['reg_fc'].__len__(
) - 1 and self.model_cfg["dp_ratio"] > 0:
reg_fc_list.append(nn.Dropout(self.model_cfg["dp_ratio"]))
self.reg_fc_layers = nn.Sequential(*reg_fc_list)
self.reg_pred_layer = nn.Linear(
pre_channel,
self.box_coder.code_size * self.num_class,
bias_attr=True)
self.init_weights()
def init_weights(self):
for module_list in [
self.shared_fc_layer, self.cls_fc_layers, self.reg_fc_layers
]:
for m in module_list.sublayers():
if isinstance(m, nn.Linear):
xavier_normal_init(m.weight, reverse=True)
if m.bias is not None:
constant_init(m.bias, value=0)
elif isinstance(m, nn.BatchNorm1D):
constant_init(m.weight, value=1)
constant_init(m.bias, value=0)
self.cls_pred_layer.weight.set_value(
paddle.normal(
mean=0, std=0.01, shape=self.cls_pred_layer.weight.shape))
constant_init(self.cls_pred_layer.bias, value=0)
self.reg_pred_layer.weight.set_value(
paddle.normal(
mean=0, std=0.001, shape=self.reg_pred_layer.weight.shape))
constant_init(self.reg_pred_layer.bias, value=0)
def roi_grid_pool(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
point_coords: (num_points, 4) [bs_idx, x, y, z]
point_features: (num_points, C)
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
Returns:
"""
rois = batch_dict['rois']
batch_size = batch_dict['batch_size']
with_vf_transform = batch_dict.get('with_voxel_feature_transform',
False)
roi_grid_xyz, _ = self.get_global_grid_points_of_roi(
rois, grid_size=self.pool_cfg["grid_size"]) # (BxN, 6x6x6, 3)
# roi_grid_xyz: (B, Nx6x6x6, 3)
roi_grid_xyz = roi_grid_xyz.reshape([batch_size, -1, 3])
# compute the voxel coordinates of grid points
roi_grid_coords_x = paddle.floor(
(roi_grid_xyz[:, :, 0:1] - self.point_cloud_range[0]) /
self.voxel_size[0])
roi_grid_coords_y = paddle.floor(
(roi_grid_xyz[:, :, 1:2] - self.point_cloud_range[1]) /
self.voxel_size[1])
roi_grid_coords_z = paddle.floor(
(roi_grid_xyz[:, :, 2:3] - self.point_cloud_range[2]) /
self.voxel_size[2])
# roi_grid_coords: (B, Nx6x6x6, 3)
roi_grid_coords = paddle.concat(
[roi_grid_coords_x, roi_grid_coords_y, roi_grid_coords_z], axis=-1)
batch_idx = paddle.zeros((batch_size, roi_grid_coords.shape[1], 1))
for bs_idx in range(batch_size):
batch_idx[bs_idx, :, 0] = bs_idx
# roi_grid_coords: (B, Nx6x6x6, 4)
# roi_grid_coords = paddle.concat([batch_idx, roi_grid_coords], axis=-1)
# roi_grid_coords = roi_grid_coords.int()
roi_grid_batch_cnt = paddle.full([
batch_size,
],
roi_grid_coords.shape[1],
dtype='int32')
pooled_features_list = []
for k, src_name in enumerate(self.pool_cfg["features_source"]):
pool_layer = self.roi_grid_pool_layers[k]
cur_stride = batch_dict['multi_scale_3d_strides'][src_name]
cur_sp_tensors = batch_dict['multi_scale_3d_features'][src_name]
if with_vf_transform:
cur_sp_tensors = batch_dict['multi_scale_3d_features_post'][
src_name]
else:
cur_sp_tensors = batch_dict['multi_scale_3d_features'][src_name]
# compute voxel center xyz and batch_cnt
cur_coords = cur_sp_tensors.indices().transpose([1, 0])
cur_voxel_xyz = get_voxel_centers(
cur_coords[:, 1:4],
downsample_strides=cur_stride,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range)
cur_voxel_xyz_batch_cnt = paddle.zeros([
batch_size,
],
dtype='int32')
for bs_idx in range(batch_size):
cur_voxel_xyz_batch_cnt[bs_idx] = (
cur_coords[:, 0] == bs_idx).sum().astype(
cur_voxel_xyz_batch_cnt.dtype)
# get voxel2point tensor
v2p_ind_tensor = generate_voxel2pinds(cur_sp_tensors.shape,
cur_coords)
# compute the grid coordinates in this scale, in [batch_idx, x y z] order
cur_roi_grid_coords = paddle.floor(roi_grid_coords / cur_stride)
cur_roi_grid_coords = paddle.concat(
[batch_idx, cur_roi_grid_coords], axis=-1)
cur_roi_grid_coords = cur_roi_grid_coords.astype('int32')
# voxel neighbor aggregation
pooled_features = pool_layer(
xyz=cur_voxel_xyz,
xyz_batch_cnt=cur_voxel_xyz_batch_cnt,
new_xyz=roi_grid_xyz.reshape([-1, 3]),
new_xyz_batch_cnt=roi_grid_batch_cnt,
new_coords=cur_roi_grid_coords.reshape([-1, 4]),
features=cur_sp_tensors.values(),
voxel2point_indices=v2p_ind_tensor)
pooled_features = pooled_features.reshape(
[-1, self.pool_cfg["grid_size"]**3,
pooled_features.shape[-1]]) # (BxN, 6x6x6, C)
pooled_features_list.append(pooled_features)
ms_pooled_features = paddle.concat(pooled_features_list, axis=-1)
return ms_pooled_features
def forward(self, batch_dict):
"""
:param input_data: input dict
:return:
"""
targets_dict = self.proposal_layer(
batch_dict,
nms_config=self.model_cfg['nms_config']
['train' if self.training else 'test'])
if self.training:
targets_dict = self.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
# RoI aware pooling
pooled_features = self.roi_grid_pool(batch_dict) # (BxN, 6x6x6, C)
# Box Refinement
pooled_features = pooled_features.reshape(
[pooled_features.shape[0], -1])
shared_features = self.shared_fc_layer(pooled_features)
rcnn_cls = self.cls_pred_layer(self.cls_fc_layers(shared_features))
rcnn_reg = self.reg_pred_layer(self.reg_fc_layers(shared_features))
if not self.training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'],
rois=batch_dict['rois'],
cls_preds=rcnn_cls,
box_preds=rcnn_reg)
batch_dict['batch_cls_preds'] = batch_cls_preds
batch_dict['batch_box_preds'] = batch_box_preds
batch_dict['cls_preds_normalized'] = False
else:
targets_dict['rcnn_cls'] = rcnn_cls
targets_dict['rcnn_reg'] = rcnn_reg
self.forward_ret_dict = targets_dict
return batch_dict
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/roi_heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/roi_heads/target_assigner/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .iou3d_nms_utils import *
from .proposal_target_layer import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/roi_heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/roi_heads/target_assigner/iou3d_nms_utils.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
3D IoU Calculation and Rotated NMS
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
"""
import paddle
from paddle3d.ops import iou3d_nms_cuda
def boxes_iou3d_gpu(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
Returns:
ans_iou: (N, M)
"""
assert boxes_a.shape[1] == boxes_b.shape[1] == 7
# height overlap
boxes_a_height_max = (boxes_a[:, 2] + boxes_a[:, 5] / 2).reshape([-1, 1])
boxes_a_height_min = (boxes_a[:, 2] - boxes_a[:, 5] / 2).reshape([-1, 1])
boxes_b_height_max = (boxes_b[:, 2] + boxes_b[:, 5] / 2).reshape([1, -1])
boxes_b_height_min = (boxes_b[:, 2] - boxes_b[:, 5] / 2).reshape([1, -1])
# bev overlap
overlaps_bev = iou3d_nms_cuda.boxes_overlap_bev_gpu(boxes_a, boxes_b)
max_of_min = paddle.maximum(boxes_a_height_min, boxes_b_height_min)
min_of_max = paddle.minimum(boxes_a_height_max, boxes_b_height_max)
overlaps_h = paddle.clip(min_of_max - max_of_min, min=0)
# 3d iou
overlaps_3d = overlaps_bev * overlaps_h
vol_a = (boxes_a[:, 3] * boxes_a[:, 4] * boxes_a[:, 5]).reshape([-1, 1])
vol_b = (boxes_b[:, 3] * boxes_b[:, 4] * boxes_b[:, 5]).reshape([1, -1])
iou3d = overlaps_3d / paddle.clip(vol_a + vol_b - overlaps_3d, min=1e-6)
return iou3d
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/roi_heads
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/heads/roi_heads/target_assigner/proposal_target_layer.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/models/roi_heads/target_assigner/proposal_target_layer.py
Ths copyright of OpenPCDet is as follows:
Apache-2.0 license [see LICENSE for details].
"""
import numpy as np
import paddle
import paddle.nn as nn
from . import iou3d_nms_utils
class ProposalTargetLayer(nn.Layer):
def __init__(self, roi_sampler_cfg):
super().__init__()
self.roi_sampler_cfg = roi_sampler_cfg
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
roi_scores: (B, num_rois)
gt_boxes: (B, N, 7 + C + 1)
roi_labels: (B, num_rois)
Returns:
batch_dict:
rois: (B, M, 7 + C)
gt_of_rois: (B, M, 7 + C)
gt_iou_of_rois: (B, M)
roi_scores: (B, M)
roi_labels: (B, M)
reg_valid_mask: (B, M)
rcnn_cls_labels: (B, M)
"""
batch_rois, batch_gt_of_rois, batch_roi_ious, batch_roi_scores, batch_roi_labels = self.sample_rois_for_rcnn(
batch_dict=batch_dict)
# regression valid mask
reg_valid_mask = (batch_roi_ious >
self.roi_sampler_cfg["reg_fg_thresh"]).astype('int64')
# classif.concation label
if self.roi_sampler_cfg["cls_score_type"] == 'cls':
batch_cls_labels = (
batch_roi_ious >
self.roi_sampler_cfg["cls_fg_thresh"]).astype('int64')
ignore_mask = (batch_roi_ious > self.roi_sampler_cfg["cls_bg_thresh"]) & \
(batch_roi_ious < self.roi_sampler_cfg["cls_fg_thresh"])
batch_cls_labels[ignore_mask > 0] = -1
elif self.roi_sampler_cfg["cls_score_type"] == 'roi_iou':
iou_bg_thresh = self.roi_sampler_cfg["cls_bg_thresh"]
iou_fg_thresh = self.roi_sampler_cfg["cls_fg_thresh"]
fg_mask = batch_roi_ious > iou_fg_thresh
bg_mask = batch_roi_ious < iou_bg_thresh
interval_mask = (fg_mask == 0) & (bg_mask == 0)
batch_cls_labels = (fg_mask > 0).astype('float32')
batch_cls_labels[interval_mask] = \
(batch_roi_ious[interval_mask] - iou_bg_thresh) / (iou_fg_thresh - iou_bg_thresh)
else:
raise NotImplementedError
targets_dict = {
'rois': batch_rois,
'gt_of_rois': batch_gt_of_rois,
'gt_iou_of_rois': batch_roi_ious,
'roi_scores': batch_roi_scores,
'roi_labels': batch_roi_labels,
'reg_valid_mask': reg_valid_mask,
'rcnn_cls_labels': batch_cls_labels
}
return targets_dict
def sample_rois_for_rcnn(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
roi_scores: (B, num_rois)
gt_boxes: (B, N, 7 + C + 1)
roi_labels: (B, num_rois)
Returns:
"""
batch_size = batch_dict['batch_size']
rois = batch_dict['rois']
roi_scores = batch_dict['roi_scores']
roi_labels = batch_dict['roi_labels']
gt_boxes = batch_dict['gt_boxes']
code_size = rois.shape[-1]
batch_rois = paddle.zeros(
(batch_size, self.roi_sampler_cfg["roi_per_image"], code_size),
dtype=rois.dtype)
batch_gt_of_rois = paddle.zeros(
(batch_size, self.roi_sampler_cfg["roi_per_image"], code_size + 1),
dtype=gt_boxes.dtype)
batch_roi_ious = paddle.zeros(
(batch_size, self.roi_sampler_cfg["roi_per_image"]),
dtype=rois.dtype)
batch_roi_scores = paddle.zeros(
(batch_size, self.roi_sampler_cfg["roi_per_image"]),
dtype=roi_scores.dtype)
batch_roi_labels = paddle.zeros(
(batch_size, self.roi_sampler_cfg["roi_per_image"]),
dtype=roi_labels.dtype)
for index in range(batch_size):
cur_roi, cur_gt, cur_roi_labels, cur_roi_scores = \
rois[index], gt_boxes[index], roi_labels[index], roi_scores[index]
k = cur_gt.__len__() - 1
while k >= 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
cur_gt = paddle.zeros(
(1, cur_gt.shape[1])) if len(cur_gt) == 0 else cur_gt
if self.roi_sampler_cfg.get('sample_roi_by_each_class', False):
max_overlaps, gt_assignment = self.get_max_iou_with_same_class(
rois=cur_roi,
roi_labels=cur_roi_labels,
gt_boxes=cur_gt[:, 0:7],
gt_labels=cur_gt[:, -1].astype('int64'))
else:
iou3d = iou3d_nms_utils.boxes_iou3d_gpu(
cur_roi, cur_gt[:, 0:7]) # (M, N)
max_overlaps = paddle.max(iou3d, axis=1)
gt_assignment = paddle.argmax(iou3d, axis=1)
sampled_inds = self.subsample_rois(max_overlaps=max_overlaps)
batch_rois[index] = cur_roi[sampled_inds]
batch_roi_labels[index] = cur_roi_labels[sampled_inds]
batch_roi_ious[index] = max_overlaps[sampled_inds]
batch_roi_scores[index] = cur_roi_scores[sampled_inds]
batch_gt_of_rois[index] = cur_gt[gt_assignment[sampled_inds]]
return batch_rois, batch_gt_of_rois, batch_roi_ious, batch_roi_scores, batch_roi_labels
def subsample_rois(self, max_overlaps):
# sample fg, easy_bg, hard_bg
fg_rois_per_image = int(
np.round(self.roi_sampler_cfg["fg_ratio"] *
self.roi_sampler_cfg["roi_per_image"]))
fg_thresh = min(self.roi_sampler_cfg["reg_fg_thresh"],
self.roi_sampler_cfg["cls_fg_thresh"])
fg_inds = (max_overlaps >= fg_thresh).nonzero()
easy_bg_inds = (max_overlaps <
self.roi_sampler_cfg["cls_bg_thresh_lo"]).nonzero()
hard_bg_inds = ((max_overlaps < self.roi_sampler_cfg["reg_fg_thresh"]) &
(max_overlaps >=
self.roi_sampler_cfg["cls_bg_thresh_lo"])).nonzero()
fg_num_rois = fg_inds.numel().item()
bg_num_rois = hard_bg_inds.numel().item() + easy_bg_inds.numel().item()
if fg_num_rois > 0 and bg_num_rois > 0:
fg_inds = fg_inds.reshape([-1])
# sampling fg
fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois)
rand_num = paddle.to_tensor(
np.random.permutation(fg_num_rois),
dtype=max_overlaps.dtype).astype('int64')
fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]]
# sampling bg
bg_rois_per_this_image = self.roi_sampler_cfg[
"roi_per_image"] - fg_rois_per_this_image
bg_inds = self.sample_bg_inds(hard_bg_inds, easy_bg_inds,
bg_rois_per_this_image,
self.roi_sampler_cfg["hard_bg_ratio"])
sampled_inds = paddle.concat((fg_inds, bg_inds), axis=0)
return sampled_inds
elif fg_num_rois > 0 and bg_num_rois == 0:
fg_inds = fg_inds.reshape([-1])
# sampling fg
rand_num = np.floor(
np.random.rand(self.roi_sampler_cfg["roi_per_image"]) *
fg_num_rois)
rand_num = paddle.from_numpy(rand_num).type_as(max_overlaps).astype(
'int64')
fg_inds = fg_inds[rand_num]
bg_inds = fg_inds[fg_inds < 0] # yield empty tensor
sampled_inds = paddle.concat((fg_inds, bg_inds), axis=0)
return sampled_inds
elif bg_num_rois > 0 and fg_num_rois == 0:
# sampling bg
bg_rois_per_this_image = self.roi_sampler_cfg["roi_per_image"]
bg_inds = self.sample_bg_inds(hard_bg_inds, easy_bg_inds,
bg_rois_per_this_image,
self.roi_sampler_cfg["hard_bg_ratio"])
return bg_inds
else:
print('maxoverlaps:(min=%f, max=%f)' % (max_overlaps.min(),
max_overlaps.max()))
print('ERROR: FG=%d, BG=%d' % (fg_num_rois, bg_num_rois))
raise NotImplementedError
@staticmethod
def sample_bg_inds(hard_bg_inds, easy_bg_inds, bg_rois_per_this_image,
hard_bg_ratio):
if hard_bg_inds.numel() > 0 and easy_bg_inds.numel() > 0:
hard_bg_inds = hard_bg_inds.reshape([-1])
easy_bg_inds = easy_bg_inds.reshape([-1])
hard_bg_rois_num = min(
int(bg_rois_per_this_image * hard_bg_ratio), len(hard_bg_inds))
easy_bg_rois_num = bg_rois_per_this_image - hard_bg_rois_num
# sampling hard bg
rand_idx = paddle.randint(
low=0, high=hard_bg_inds.numel(),
shape=(hard_bg_rois_num, )).astype('int64')
hard_bg_inds = hard_bg_inds[rand_idx]
# sampling easy bg
rand_idx = paddle.randint(
low=0, high=easy_bg_inds.numel(),
shape=(easy_bg_rois_num, )).astype('int64')
easy_bg_inds = easy_bg_inds[rand_idx]
bg_inds = paddle.concat([hard_bg_inds, easy_bg_inds], axis=0)
elif hard_bg_inds.numel() > 0 and easy_bg_inds.numel() == 0:
hard_bg_inds = hard_bg_inds.reshape([-1])
hard_bg_rois_num = bg_rois_per_this_image
# sampling hard bg
rand_idx = paddle.randint(
low=0, high=hard_bg_inds.numel(),
shape=(hard_bg_rois_num, )).astype('int64')
bg_inds = hard_bg_inds[rand_idx]
elif hard_bg_inds.numel() == 0 and easy_bg_inds.numel() > 0:
easy_bg_inds = easy_bg_inds.reshape([-1])
easy_bg_rois_num = bg_rois_per_this_image
# sampling easy bg
rand_idx = paddle.randint(
low=0, high=easy_bg_inds.numel(),
shape=(easy_bg_rois_num, )).astype('int64')
bg_inds = easy_bg_inds[rand_idx]
else:
raise NotImplementedError
return bg_inds
@staticmethod
def get_max_iou_with_same_class(rois, roi_labels, gt_boxes, gt_labels):
"""
Args:
rois: (N, 7)
roi_labels: (N)
gt_boxes: (N, )
gt_labels:
Returns:
"""
"""
:param rois: (N, 7)
:param roi_labels: (N)
:param gt_boxes: (N, 8)
:return:
"""
max_overlaps = paddle.zeros([rois.shape[0]])
gt_assignment = paddle.zeros([roi_labels.shape[0]], gt_labels.dtype)
for k in range(gt_labels.min(), gt_labels.max() + 1):
roi_mask = (roi_labels == k)
gt_mask = (gt_labels == k)
if roi_mask.sum() > 0 and gt_mask.sum() > 0:
cur_roi = rois[roi_mask]
cur_gt = gt_boxes[gt_mask]
original_gt_assignment = gt_mask.nonzero().reshape([-1])
iou3d = iou3d_nms_utils.boxes_iou3d_gpu(cur_roi,
cur_gt) # (M, N)
cur_max_overlaps = paddle.max(iou3d, axis=1)
cur_gt_assignment = paddle.argmax(iou3d, axis=1)
max_overlaps[roi_mask] = cur_max_overlaps
gt_assignment[roi_mask] = original_gt_assignment[
cur_gt_assignment]
return max_overlaps, gt_assignment
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/optimizers/lr_schedulers.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L432
Ths copyright of mmcv is as follows:
Apache-2.0 license [see LICENSE for details].
This code is based on https://github.com/TRAILab/CaDDN/blob/master/tools/train_utils/optimization/learning_schedules_fastai.py#L60
Ths copyright of CaDDN is as follows:
Apache-2.0 license [see LICENSE for details].
"""
from functools import partial
import paddle
from paddle.optimizer.lr import LRScheduler
from paddle3d.apis import manager
from .utils import annealing_cos
@manager.LR_SCHEDULERS.add_component
class OneCycleWarmupDecayLr(LRScheduler):
def __init__(self,
base_learning_rate,
lr_ratio_peak=10,
lr_ratio_trough=1e-4,
step_ratio_peak=0.4):
self.base_learning_rate = base_learning_rate
self.lr_ratio_peak = lr_ratio_peak
self.lr_ratio_trough = lr_ratio_trough
self.step_ratio_peak = step_ratio_peak
self.lr_phases = [] # init lr_phases
self.anneal_func = annealing_cos
def before_run(self, max_iters):
"""before_run"""
warmup_iter_per_phase = int(self.step_ratio_peak * max_iters)
self.lr_phases.append([0, warmup_iter_per_phase, 1, self.lr_ratio_peak])
self.lr_phases.append([
warmup_iter_per_phase, max_iters, self.lr_ratio_peak,
self.lr_ratio_trough
])
def get_lr(self, curr_iter):
"""get_lr"""
for (start_iter, end_iter, lr_start_ratio,
lr_end_ratio) in self.lr_phases:
if start_iter <= curr_iter < end_iter:
factor = (curr_iter - start_iter) / (end_iter - start_iter)
return self.anneal_func(
self.base_learning_rate * lr_start_ratio,
self.base_learning_rate * lr_end_ratio, factor)
class LRSchedulerCycle(LRScheduler):
def __init__(self, total_step, lr_phases, mom_phases):
self.total_step = total_step
self.lr_phases = []
for i, (start, lambda_func) in enumerate(lr_phases):
if len(self.lr_phases) != 0:
assert self.lr_phases[-1][0] < start
if isinstance(lambda_func, str):
lambda_func = eval(lambda_func)
if i < len(lr_phases) - 1:
self.lr_phases.append(
(int(start * total_step),
int(lr_phases[i + 1][0] * total_step), lambda_func))
else:
self.lr_phases.append(
(int(start * total_step), total_step, lambda_func))
assert self.lr_phases[0][0] == 0
self.mom_phases = []
for i, (start, lambda_func) in enumerate(mom_phases):
if len(self.mom_phases) != 0:
assert self.mom_phases[-1][0] < start
if isinstance(lambda_func, str):
lambda_func = eval(lambda_func)
if i < len(mom_phases) - 1:
self.mom_phases.append(
(int(start * total_step),
int(mom_phases[i + 1][0] * total_step), lambda_func))
else:
self.mom_phases.append(
(int(start * total_step), total_step, lambda_func))
assert self.mom_phases[0][0] == 0
super().__init__()
@manager.OPTIMIZERS.add_component
class OneCycle(LRSchedulerCycle):
def __init__(self, total_step, lr_max, moms, div_factor, pct_start):
self.lr_max = lr_max
self.moms = moms
self.last_moms = moms
self.div_factor = div_factor
self.pct_start = pct_start
a1 = int(total_step * self.pct_start)
a2 = total_step - a1
low_lr = self.lr_max / self.div_factor
lr_phases = ((0, partial(annealing_cos, low_lr, self.lr_max)),
(self.pct_start,
partial(annealing_cos, self.lr_max, low_lr / 1e4)))
mom_phases = ((0, partial(annealing_cos, *self.moms)),
(self.pct_start, partial(annealing_cos,
*self.moms[::-1])))
self.learning_rate = low_lr
super().__init__(total_step, lr_phases, mom_phases)
def get_lr(self):
lr = self.last_lr
for start, end, func in self.lr_phases:
if self.last_epoch >= start:
lr = func((self.last_epoch - start) / (end - start))
return lr
def set_mom(self):
mom = self.last_moms[0]
for start, end, func in self.mom_phases:
if self.last_epoch >= start:
mom = func((self.last_epoch - start) / (end - start))
self.last_moms[0] = mom
def step(self, epoch=None):
super().step()
self.set_mom()
def get_mom(self):
return self.last_moms
@manager.LR_SCHEDULERS.add_component
class CosineAnnealingDecayByEpoch(paddle.optimizer.lr.CosineAnnealingDecay):
iters_per_epoch = 1
warmup_iters = 0
def get_lr(self):
if self.last_epoch == 0:
return self.base_lr
else:
cur_epoch = (self.last_epoch +
self.warmup_iters) // self.iters_per_epoch
return annealing_cos(self.base_lr, self.eta_min,
cur_epoch / self.T_max)
def _get_closed_form_lr(self):
return self.get_lr()
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/optimizers/momentum_schedulers.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/momentum_updater.py#L268
Ths copyright of mmcv is as follows:
Apache-2.0 license [see LICENSE for details].
"""
from paddle3d.apis import manager
from .utils import annealing_cos
@manager.OPTIMIZERS.add_component
class OneCycleDecayWarmupMomentum(object):
def __init__(self,
momentum_peak=0.95,
momentum_trough=0.85,
step_ratio_peak=0.4):
self.momentum_peak = momentum_peak
self.momentum_trough = momentum_trough
self.step_ratio_peak = step_ratio_peak
self.momentum_phases = [] # init momentum_phases
def before_run(self, max_iters):
# initiate momentum_phases
# total momentum_phases are separated as up and down
decay_iter_per_phase = int(self.step_ratio_peak * max_iters)
self.momentum_phases.append(
[0, decay_iter_per_phase, self.momentum_peak, self.momentum_trough])
self.momentum_phases.append([
decay_iter_per_phase, max_iters, self.momentum_trough,
self.momentum_peak
])
def get_momentum(self, curr_iter):
for (start_iter, end_iter, start_momentum,
end_momentum) in self.momentum_phases:
if start_iter <= curr_iter < end_iter:
factor = (curr_iter - start_iter) / (end_iter - start_iter)
return annealing_cos(start_momentum, end_momentum, factor)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/optimizers/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .lr_schedulers import (CosineAnnealingDecayByEpoch, OneCycle,
OneCycleWarmupDecayLr)
from .momentum_schedulers import OneCycleDecayWarmupMomentum
from .optimizers import AdamWOnecycle, OneCycleAdam
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/optimizers/optimizers.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OptimizerWrapper."""
from collections import defaultdict
import paddle
from paddle.optimizer import Adam, AdamW
from paddle3d.apis import manager
from .lr_schedulers import OneCycle
@manager.OPTIMIZERS.add_component
class OneCycleAdam(object):
"""OptimizerWrapper."""
def __init__(self,
learning_rate,
beta1,
beta2=0.999,
epsilon=1e-08,
parameters=None,
weight_decay=None,
grad_clip=None,
name=None,
lazy_mode=False):
self.optimizer = paddle.optimizer.Adam(
beta2=beta2,
epsilon=epsilon,
parameters=parameters,
grad_clip=grad_clip,
name=name,
lazy_mode=lazy_mode)
self.weight_decay = weight_decay
self._learning_rate = learning_rate
self.beta1 = beta1
self._grad_clip = self.optimizer._grad_clip
self.optimizer._grad_clip = None
def _set_beta1(self, beta1, pow):
"""_set_beta1"""
# currently support Adam and AdamW only
update_beta1 = beta1**pow
self.optimizer._beta1 = beta1
if 'beta1_pow_acc' in self.optimizer._accumulators:
for k, v in self.optimizer._accumulators['beta1_pow_acc'].items():
self.optimizer._accumulators['beta1_pow_acc'][k] = v.fill_(
update_beta1)
def before_run(self, max_iters):
"""before_run"""
if self._learning_rate is not None:
self._learning_rate.before_run(max_iters)
if self.beta1 is not None:
self.beta1.before_run(max_iters)
def before_iter(self, curr_iter):
"""before_iter"""
lr = self._learning_rate.get_lr(curr_iter=curr_iter)
self.optimizer.set_lr(lr)
beta1 = self.beta1.get_momentum(curr_iter=curr_iter)
self._set_beta1(beta1, pow=curr_iter + 1)
def regularize(self):
"""regularize"""
scale_value = 1 - self.optimizer.get_lr() * self.weight_decay
if not isinstance(self.optimizer._param_groups[0], dict):
for i, param in enumerate(self.optimizer._param_groups):
if param.stop_gradient:
continue
param.set_value(param * scale_value)
else:
# optimize parameters in groups
for param_group in self.optimizer._param_groups:
params_grads = defaultdict(lambda: list())
for param in param_group['params']:
if param.stop_gradient:
continue
param.set_value(param * scale_value)
def clip_grad(self):
if not isinstance(self.optimizer._param_groups[0], dict):
params_grads = []
for param in self.optimizer._param_groups:
if param.stop_gradient:
continue
if param._grad_ivar() is not None:
grad_var = param._grad_ivar()
params_grads.append((param, grad_var))
else:
# optimize parameters in groups
for idx, param_group in enumerate(self.optimizer._param_groups):
params_grads = defaultdict(lambda: list())
for param in param_group['params']:
if param.stop_gradient:
continue
if param._grad_ivar() is not None:
grad_var = param._grad_ivar()
params_grads['params'].append((param, grad_var))
params_grads.update(
{k: v
for k, v in param_group.items() if k != 'params'})
self._grad_clip(params_grads)
def after_iter(self):
"""after_iter"""
self.clip_grad()
self.regularize()
self.optimizer.step()
self.optimizer.clear_grad()
def set_state_dict(self, optimizer):
self.optimizer.set_state_dict(optimizer)
def get_lr(self):
return self.optimizer.get_lr()
def state_dict(self):
return self.optimizer.state_dict()
@manager.OPTIMIZERS.add_component
class AdamWOnecycle(AdamW):
def __init__(self,
learning_rate=0.001,
beta1=0.9,
beta2=0.999,
clip_grad_by_norm=None,
parameters=None,
**optim_args):
if clip_grad_by_norm is not None:
grad_clip = paddle.nn.ClipGradByNorm(clip_norm=clip_grad_by_norm)
self.learning_rate = learning_rate
super().__init__(
learning_rate=learning_rate,
parameters=parameters,
beta1=beta1,
beta2=beta2,
grad_clip=grad_clip,
**optim_args)
def step(self):
if isinstance(self._learning_rate, OneCycle):
self._beta1 = self._learning_rate.get_mom()[0]
super().step()
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/optimizers/utils.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import cos, pi
def annealing_cos(start, end, factor, weight=1):
cos_out = cos(pi * factor) + 1
return end + 0.5 * weight * (start - end) * cos_out
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/common/model_nms_utils.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle3d.ops import iou3d_nms_cuda
def class_agnostic_nms(box_scores,
box_preds,
label_preds,
nms_config,
score_thresh=None):
def nms(box_scores, box_preds, label_preds, nms_config):
order = box_scores.argsort(0, descending=True)
order = order[:nms_config['nms_pre_maxsize']]
box_preds = paddle.gather(box_preds, index=order)
box_scores = paddle.gather(box_scores, index=order)
label_preds = paddle.gather(label_preds, index=order)
# When order is one-value tensor,
# boxes[order] loses a dimension, so we add a reshape
keep, num_out = iou3d_nms_cuda.nms_gpu(box_preds,
nms_config['nms_thresh'])
selected = keep[0:num_out]
selected = selected[:nms_config['nms_post_maxsize']]
selected_score = paddle.gather(box_scores, index=selected)
selected_box = paddle.gather(box_preds, index=selected)
selected_label = paddle.gather(label_preds, index=selected)
return selected_score, selected_label, selected_box
if score_thresh is not None:
scores_mask = box_scores >= score_thresh
def box_empty(box_scores, box_preds, label_preds):
fake_score = paddle.to_tensor([-1.0], dtype=box_scores.dtype)
fake_label = paddle.to_tensor([-1.0], dtype=label_preds.dtype)
fake_box = paddle.to_tensor([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
dtype=box_preds.dtype)
return fake_score, fake_label, fake_box
def box_not_empty(scores_mask, box_scores, box_preds, label_preds,
nms_config):
nonzero_index = paddle.nonzero(scores_mask)
box_scores = paddle.gather(box_scores, index=nonzero_index)
box_preds = paddle.gather(box_preds, index=nonzero_index)
label_preds = paddle.gather(label_preds, index=nonzero_index)
return nms(box_scores, box_preds, label_preds, nms_config)
return paddle.static.nn.cond(
paddle.logical_not(scores_mask.any()), lambda: box_empty(
box_scores, box_preds, label_preds), lambda: box_not_empty(
scores_mask, box_scores, box_preds, label_preds, nms_config)
)
else:
return nms(box_scores, box_preds, label_preds, nms_config)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/common/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .box_utils import *
from .model_nms_utils import *
from .pointnet2_stack import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/common/box_utils.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3 + C)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
"""
This function is based on https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/utils/common_utils.py#L35
"""
cosa = paddle.cos(angle)
sina = paddle.sin(angle)
zeros = paddle.zeros((points.shape[0], ), dtype='float32')
ones = paddle.ones((points.shape[0], ), dtype='float32')
rot_matrix = paddle.stack(
(cosa, sina, zeros, -sina, cosa, zeros, zeros, zeros, ones),
axis=1).reshape([-1, 3, 3])
points_rot = paddle.matmul(points[:, :, 0:3], rot_matrix)
points_rot = paddle.concat((points_rot, points[:, :, 3:]), axis=-1)
return points_rot
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
"""
"""
This function is based on https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/utils/box_utils.py#L28
"""
template = paddle.to_tensor((
[1, 1, -1],
[1, -1, -1],
[-1, -1, -1],
[-1, 1, -1],
[1, 1, 1],
[1, -1, 1],
[-1, -1, 1],
[-1, 1, 1],
)) / 2
corners3d = boxes3d[:, None, 3:6].tile([1, 8, 1]) * template[None, :, :]
corners3d = rotate_points_along_z(
corners3d.reshape([-1, 8, 3]), boxes3d[:, 6]).reshape([-1, 8, 3])
corners3d += boxes3d[:, None, 0:3]
return corners3d
def get_voxel_centers(voxel_coords, downsample_strides, voxel_size,
point_cloud_range):
"""
Args:
voxel_coords: (N, 3)
downsample_strides:
voxel_size:
point_cloud_range:
Returns:
"""
"""
This function is based on https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/utils/common_utils.py#L66
"""
assert voxel_coords.shape[1] == 3
index = paddle.to_tensor([2, 1, 0], dtype='int32')
voxel_centers = paddle.index_select(
voxel_coords, index, axis=-1).astype('float32') # (xyz)
voxel_size = paddle.to_tensor(voxel_size).astype(
'float32') * downsample_strides
pc_range = paddle.to_tensor(point_cloud_range[0:3]).astype('float32')
voxel_centers = (voxel_centers + 0.5) * voxel_size + pc_range
return voxel_centers
def generate_voxel2pinds(sparse_tensor_shape, sparse_tensor_indices):
batch_size = sparse_tensor_shape[0]
spatial_shape = sparse_tensor_shape[1:-1]
point_indices = paddle.arange(sparse_tensor_indices.shape[0], dtype='int32')
output_shape = [batch_size] + list(spatial_shape)
return paddle.scatter_nd(
index=sparse_tensor_indices,
updates=point_indices + 1,
shape=output_shape) - 1
def enlarge_box3d(boxes3d, extra_width=(0, 0, 0)):
"""
Args:
boxes3d: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
extra_width: [extra_x, extra_y, extra_z]
Returns:
"""
large_boxes3d = boxes3d.clone()
large_boxes3d[:, 3:6] += paddle.to_tensor(
extra_width, dtype=large_boxes3d.dtype).reshape([1, -1])
return large_boxes3d
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/common
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/common/pointnet2_stack/voxel_query_utils.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/ops/pointnet2/pointnet2_stack/voxel_query_utils.py
Ths copyright of OpenPCDet is as follows:
Apache-2.0 license [see LICENSE for details].
"""
from typing import List
import paddle
import paddle.nn as nn
from paddle3d.ops import pointnet2_ops
def voxel_query(max_range: int, radius: float, nsample: int, xyz: paddle.Tensor, \
new_xyz: paddle.Tensor, new_coords: paddle.Tensor, point_indices: paddle.Tensor):
"""
Args:
max_range: int, max range of voxels to be grouped
nsample: int, maximum number of features in the balls
new_coords: (M1 + M2, 4), [batch_id, z, y, x] cooridnates of keypoints
new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
point_indices: (batch_size, Z, Y, X) 4-D tensor recording the point indices of voxels
Returns:
idx: (M1 + M2, nsample) tensor with the indicies of the features that form the query balls
"""
z_range, y_range, x_range = max_range
idx = pointnet2_ops.voxel_query_wrapper(new_xyz, xyz, new_coords, point_indices, \
radius, nsample, z_range, y_range, x_range)
empty_ball_mask = (idx[:, 0] == -1)
idx[empty_ball_mask] = 0
return idx, empty_ball_mask
class VoxelQueryAndGrouping(nn.Layer):
def __init__(self, max_range: int, radius: float, nsample: int):
"""
Args:
radius: float, radius of ball
nsample: int, maximum number of features to gather in the ball
"""
super().__init__()
self.max_range, self.radius, self.nsample = max_range, radius, nsample
def forward(self, new_coords: paddle.Tensor, xyz: paddle.Tensor,
xyz_batch_cnt: paddle.Tensor, new_xyz: paddle.Tensor,
new_xyz_batch_cnt: paddle.Tensor, features: paddle.Tensor,
voxel2point_indices: paddle.Tensor):
"""
Args:
new_coords: (M1 + M2 ..., 3) centers voxel indices of the ball query
xyz: (N1 + N2 ..., 3) xyz coordinates of the features
xyz_batch_cnt: (batch_size), [N1, N2, ...]
new_xyz: (M1 + M2 ..., 3) centers of the ball query
new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
features: (N1 + N2 ..., C) tensor of features to group
voxel2point_indices: (B, Z, Y, X) tensor of points indices of voxels
Returns:
new_features: (M1 + M2, C, nsample) tensor
"""
assert xyz.shape[0] == xyz_batch_cnt.sum(
), 'xyz: %s, xyz_batch_cnt: %s' % (str(xyz.shape),
str(new_xyz_batch_cnt))
assert new_coords.shape[0] == new_xyz_batch_cnt.sum(), \
'new_coords: %s, new_xyz_batch_cnt: %s' % (str(new_coords.shape), str(new_xyz_batch_cnt))
batch_size = xyz_batch_cnt.shape[0]
# idx: (M1 + M2 ..., nsample), empty_ball_mask: (M1 + M2 ...)
idx1, empty_ball_mask1 = voxel_query(self.max_range, self.radius,
self.nsample, xyz, new_xyz,
new_coords, voxel2point_indices)
idx1 = idx1.reshape([batch_size, -1, self.nsample])
count = 0
for bs_idx in range(batch_size):
idx1[bs_idx] -= count
count += xyz_batch_cnt[bs_idx]
idx1 = idx1.reshape([-1, self.nsample])
idx1[empty_ball_mask1] = 0
idx = idx1
empty_ball_mask = empty_ball_mask1
grouped_xyz = pointnet2_ops.grouping_operation_stack(
xyz, xyz_batch_cnt, idx, new_xyz_batch_cnt)
# grouped_features: (M1 + M2, C, nsample)
grouped_features = pointnet2_ops.grouping_operation_stack(
features, xyz_batch_cnt, idx, new_xyz_batch_cnt)
return grouped_features, grouped_xyz, empty_ball_mask
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/common
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/common/pointnet2_stack/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .pointnet2_modules import *
from .pointnet2_utils import *
from .voxel_pool_modules import *
from .voxel_query_utils import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/common
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/common/pointnet2_stack/pointnet2_utils.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/ops/pointnet2/pointnet2_stack/pointnet2_utils.py
Ths copyright of OpenPCDet is as follows:
Apache-2.0 license [see LICENSE for details].
"""
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.ops import pointnet2_ops
class QueryAndGroup(nn.Layer):
def __init__(self, radius: float, nsample: int, use_xyz: bool = True):
"""
Args:
radius: float, radius of ball
nsample: int, maximum number of features to gather in the ball
use_xyz:
"""
super().__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self,
xyz: paddle.Tensor,
xyz_batch_cnt: paddle.Tensor,
new_xyz: paddle.Tensor,
new_xyz_batch_cnt: paddle.Tensor,
features: paddle.Tensor = None):
"""
Args:
xyz: (N1 + N2 ..., 3) xyz coordinates of the features
xyz_batch_cnt: (batch_size), [N1, N2, ...]
new_xyz: (M1 + M2 ..., 3) centers of the ball query
new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
features: (N1 + N2 ..., C) tensor of features to group
Returns:
new_features: (M1 + M2, C, nsample) tensor
"""
assert xyz.shape[0] == xyz_batch_cnt.sum(
), 'xyz: %s, xyz_batch_cnt: %s' % (str(xyz.shape),
str(new_xyz_batch_cnt))
assert new_xyz.shape[0] == new_xyz_batch_cnt.sum(), \
'new_xyz: %s, new_xyz_batch_cnt: %s' % (str(new_xyz.shape), str(new_xyz_batch_cnt))
# idx: (M1 + M2 ..., nsample), empty_ball_mask: (M1 + M2 ...)
idx = pointnet2_ops.ball_query_stack(new_xyz, new_xyz_batch_cnt, xyz,
xyz_batch_cnt, self.radius,
self.nsample)
empty_ball_mask = (idx[:, 0] == -1)
idx[empty_ball_mask] = 0
grouped_xyz = pointnet2_ops.grouping_operation_stack(
xyz, xyz_batch_cnt, idx, new_xyz_batch_cnt) # (M1 + M2, 3, nsample)
grouped_xyz -= new_xyz.unsqueeze(-1)
grouped_xyz[empty_ball_mask] = 0
if features is not None:
grouped_features = pointnet2_ops.grouping_operation_stack(
features, xyz_batch_cnt, idx,
new_xyz_batch_cnt) # (M1 + M2, C, nsample)
grouped_features[empty_ball_mask] = 0
if self.use_xyz:
new_features = paddle.concat(
[grouped_xyz, grouped_features],
axis=1) # (M1 + M2 ..., C + 3, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features, idx
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/common
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/common/pointnet2_stack/pointnet2_modules.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/ops/pointnet2/pointnet2_stack/pointnet2_modules.py
Ths copyright of OpenPCDet is as follows:
Apache-2.0 license [see LICENSE for details].
"""
from typing import List
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.models.layers import constant_init, kaiming_normal_init
from . import pointnet2_utils
def build_local_aggregation_module(input_channels, config):
local_aggregation_name = config.get('name', 'StackSAModuleMSG')
if local_aggregation_name == 'StackSAModuleMSG':
mlps = config["mlps"]
for k in range(len(mlps)):
mlps[k] = [input_channels] + mlps[k]
cur_layer = StackSAModuleMSG(
radii=config["pool_radius"],
nsamples=config["nsample"],
mlps=mlps,
use_xyz=True,
pool_method='max_pool',
)
num_c_out = sum([x[-1] for x in mlps])
elif local_aggregation_name == 'VectorPoolAggregationModuleMSG':
raise NotImplementedError
else:
raise NotImplementedError
return cur_layer, num_c_out
class StackSAModuleMSG(nn.Layer):
def __init__(self,
*,
radii: List[float],
nsamples: List[int],
mlps: List[List[int]],
use_xyz: bool = True,
pool_method='max_pool'):
"""
Args:
radii: list of float, list of radii to group with
nsamples: list of int, number of samples in each ball query
mlps: list of list of int, spec of the pointnet before the global pooling for each scale
use_xyz:
pool_method: max_pool / avg_pool
"""
super(StackSAModuleMSG, self).__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.groupers = nn.LayerList()
self.mlps = nn.LayerList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz))
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
shared_mlps = []
for k in range(len(mlp_spec) - 1):
shared_mlps.extend([
nn.Conv2D(
mlp_spec[k],
mlp_spec[k + 1],
kernel_size=1,
bias_attr=False),
nn.BatchNorm2D(mlp_spec[k + 1]),
nn.ReLU()
])
self.mlps.append(nn.Sequential(*shared_mlps))
self.pool_method = pool_method
self.init_weights()
def init_weights(self):
for m in self.sublayers():
if isinstance(m, nn.Conv2D):
kaiming_normal_init(m.weight)
if m.bias is not None:
constant_init(m.bias, value=0)
if isinstance(m, nn.BatchNorm2D):
constant_init(m.weight, value=1.0)
constant_init(m.bias, value=0)
def forward(self,
xyz,
xyz_batch_cnt,
new_xyz,
new_xyz_batch_cnt,
features=None,
empty_voxel_set_zeros=True):
"""
:param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features
:param xyz_batch_cnt: (batch_size), [N1, N2, ...]
:param new_xyz: (M1 + M2 ..., 3)
:param new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
:param features: (N1 + N2 ..., C) tensor of the descriptors of the the features
:return:
new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz
new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
for k in range(len(self.groupers)):
new_features, ball_idxs = self.groupers[k](
xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt,
features) # (M1 + M2, C, nsample)
new_features = new_features.transpose([1, 0, 2]).unsqueeze(
axis=0) # (1, C, M1 + M2 ..., nsample)
new_features = self.mlps[k](
new_features) # (1, C, M1 + M2 ..., nsample)
if self.pool_method == 'max_pool':
new_features = F.max_pool2d(
new_features,
kernel_size=[1, new_features.shape[3]]).squeeze(
axis=-1) # (1, C, M1 + M2 ...)
elif self.pool_method == 'avg_pool':
new_features = F.avg_pool2d(
new_features,
kernel_size=[1, new_features.shape[3]]).squeeze(
axis=-1) # (1, C, M1 + M2 ...)
else:
raise NotImplementedError
new_features = new_features.squeeze(axis=0).transpose(
[1, 0]) # (M1 + M2 ..., C)
new_features_list.append(new_features)
new_features = paddle.concat(
new_features_list, axis=1) # (M1 + M2 ..., C)
return new_xyz, new_features
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/common
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/common/pointnet2_stack/voxel_pool_modules.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/ops/pointnet2/pointnet2_stack/voxel_pool_modules.py
Ths copyright of OpenPCDet is as follows:
Apache-2.0 license [see LICENSE for details].
"""
from typing import List
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.models.common.pointnet2_stack import voxel_query_utils
from paddle3d.models.layers import constant_init, kaiming_normal_init
class NeighborVoxelSAModuleMSG(nn.Layer):
def __init__(self,
*,
query_ranges: List[List[int]],
radii: List[float],
nsamples: List[int],
mlps: List[List[int]],
use_xyz: bool = True,
pool_method='max_pool'):
"""
Args:
query_ranges: list of int, list of neighbor ranges to group with
nsamples: list of int, number of samples in each ball query
mlps: list of list of int, spec of the pointnet before the global pooling for each scale
use_xyz:
pool_method: max_pool / avg_pool
"""
super().__init__()
assert len(query_ranges) == len(nsamples) == len(mlps)
self.groupers = nn.LayerList()
self.mlps_in = nn.LayerList()
self.mlps_pos = nn.LayerList()
self.mlps_out = nn.LayerList()
for i in range(len(query_ranges)):
max_range = query_ranges[i]
nsample = nsamples[i]
radius = radii[i]
self.groupers.append(
voxel_query_utils.VoxelQueryAndGrouping(max_range, radius,
nsample))
mlp_spec = mlps[i]
cur_mlp_in = nn.Sequential(
nn.Conv1D(
mlp_spec[0], mlp_spec[1], kernel_size=1, bias_attr=False),
nn.BatchNorm1D(mlp_spec[1]))
cur_mlp_pos = nn.Sequential(
nn.Conv2D(3, mlp_spec[1], kernel_size=1, bias_attr=False),
nn.BatchNorm2D(mlp_spec[1]))
cur_mlp_out = nn.Sequential(
nn.Conv1D(
mlp_spec[1], mlp_spec[2], kernel_size=1, bias_attr=False),
nn.BatchNorm1D(mlp_spec[2]), nn.ReLU())
self.mlps_in.append(cur_mlp_in)
self.mlps_pos.append(cur_mlp_pos)
self.mlps_out.append(cur_mlp_out)
self.relu = nn.ReLU()
self.pool_method = pool_method
self.init_weights()
def init_weights(self):
for m in self.sublayers():
if isinstance(m, nn.Conv2D) or isinstance(m, nn.Conv1D):
kaiming_normal_init(m.weight)
if m.bias is not None:
constant_init(m.bias, value=0)
if isinstance(m, nn.BatchNorm2D) or isinstance(m, nn.BatchNorm1D):
constant_init(m.weight, value=1.0)
constant_init(m.bias, value=0)
def forward(self, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, \
new_coords, features, voxel2point_indices):
"""
:param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features
:param xyz_batch_cnt: (batch_size), [N1, N2, ...]
:param new_xyz: (M1 + M2 ..., 3)
:param new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
:param features: (N1 + N2 ..., C) tensor of the descriptors of the the features
:param point_indices: (B, Z, Y, X) tensor of point indices
:return:
new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz
new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
# change the order to [batch_idx, z, y, x]
index = paddle.to_tensor([0, 3, 2, 1], dtype='int32')
new_coords = paddle.index_select(new_coords, index, axis=-1)
new_features_list = []
for k in range(len(self.groupers)):
# features_in: (1, C, M1+M2)
features_in = features.transpose([1, 0]).unsqueeze(0)
features_in = self.mlps_in[k](features_in)
# features_in: (1, M1+M2, C)
features_in = features_in.transpose([0, 2, 1])
# features_in: (M1+M2, C)
features_in = features_in.reshape([-1, features_in.shape[-1]])
# grouped_features: (M1+M2, C, nsample)
# grouped_xyz: (M1+M2, 3, nsample)
grouped_features, grouped_xyz, empty_ball_mask = self.groupers[k](
new_coords, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt,
features_in, voxel2point_indices)
grouped_features[empty_ball_mask] = 0
# grouped_features: (1, C, M1+M2, nsample)
grouped_features = grouped_features.transpose([1, 0,
2]).unsqueeze(axis=0)
# grouped_xyz: (M1+M2, 3, nsample)
grouped_xyz = grouped_xyz - new_xyz.unsqueeze(-1)
grouped_xyz[empty_ball_mask] = 0
# grouped_xyz: (1, 3, M1+M2, nsample)
grouped_xyz = grouped_xyz.transpose([1, 0, 2]).unsqueeze(0)
# grouped_xyz: (1, C, M1+M2, nsample)
position_features = self.mlps_pos[k](grouped_xyz)
new_features = grouped_features + position_features
new_features = self.relu(new_features)
if self.pool_method == 'max_pool':
new_features = F.max_pool2d(
new_features,
kernel_size=[1, new_features.shape[3]]).squeeze(
axis=-1) # (1, C, M1 + M2 ...)
elif self.pool_method == 'avg_pool':
new_features = F.avg_pool2d(
new_features,
kernel_size=[1, new_features.shape[3]]).squeeze(
axis=-1) # (1, C, M1 + M2 ...)
else:
raise NotImplementedError
new_features = self.mlps_out[k](new_features)
new_features = new_features.squeeze(axis=0).transpose(
[1, 0]) # (M1 + M2 ..., C)
new_features_list.append(new_features)
# (M1 + M2 ..., C)
new_features = paddle.concat(new_features_list, axis=1)
return new_features
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers/decoders.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Modified from https://github.com/fundamentalvision/BEVFormer/blob/master/projects/mmdet3d_plugin/bevformer/modules/decoder.py
# Copyright (c) OpenMMLab. All rights reserved.
# ------------------------------------------------------------------------
import copy
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.models.transformers.transformer import inverse_sigmoid
@manager.TRANSFORMER_DECODERS.add_component
class DetectionTransformerDecoder(nn.Layer):
"""Implements the decoder in DETR3D transformer.
Args:
return_intermediate (bool): Whether to return intermediate outputs.
coder_norm_cfg (dict): Config of last normalization layer. Default:
`LN`.
"""
def __init__(self,
transformerlayers=None,
num_layers=None,
return_intermediate=False):
super(DetectionTransformerDecoder, self).__init__()
if isinstance(transformerlayers, dict):
transformerlayers = [
copy.deepcopy(transformerlayers) for _ in range(num_layers)
]
else:
assert isinstance(transformerlayers, list) and \
len(transformerlayers) == num_layers
self.num_layers = num_layers
self.layers = nn.LayerList()
for i in range(num_layers):
layer_name = transformerlayers[i].pop('type_name')
decoder_layer = manager.TRANSFORMER_DECODER_LAYERS.components_dict[
layer_name]
params = transformerlayers[i]
self.layers.append(decoder_layer(**params))
self.embed_dims = self.layers[0].embed_dims
self.pre_norm = self.layers[0].pre_norm
self.return_intermediate = return_intermediate
self.fp16_enabled = False
def forward(self,
query,
key,
value,
query_pos,
reference_points,
reg_branches=None,
key_padding_mask=None,
**kwargs):
"""Forward function for `Detr3DTransformerDecoder`.
Args:
query (Tensor): Input query with shape
`(num_query, bs, embed_dims)`.
reference_points (Tensor): The reference
points of offset. has shape
(bs, num_query, 4) when as_two_stage,
otherwise has shape ((bs, num_query, 2).
reg_branch: (obj:`nn.ModuleList`): Used for
refining the regression results. Only would
be passed when with_box_refine is True,
otherwise would be passed a `None`.
Returns:
Tensor: Results with shape [1, num_query, bs, embed_dims] when
return_intermediate is `False`, otherwise it has shape
[num_layers, num_query, bs, embed_dims].
"""
output = query
intermediate = []
intermediate_reference_points = []
# np.save("d_query.npy", query.numpy())
# np.save("d_value.npy", kwargs['value'].numpy())
for lid, layer in enumerate(self.layers):
reference_points_input = reference_points[..., :2].unsqueeze(
[2]) # BS NUM_QUERY NUM_LEVEL 2
output = layer(
output,
key,
value,
query_pos,
reference_points=reference_points_input,
key_padding_mask=key_padding_mask,
**kwargs)
output = output.transpose([1, 0, 2])
# np.save("d_output_{}.npy".format(lid), output.numpy())
if reg_branches is not None:
tmp = reg_branches[lid](output)
assert reference_points.shape[-1] == 3
new_reference_points = paddle.zeros_like(reference_points)
new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(
reference_points[..., :2])
new_reference_points[..., 2:
3] = tmp[..., 4:5] + inverse_sigmoid(
reference_points[..., 2:3])
reference_points = F.sigmoid(new_reference_points).detach()
# np.save("d_new_reference_points_{}.npy".format(lid), reference_points.numpy())
output = output.transpose([1, 0, 2])
if self.return_intermediate:
intermediate.append(output)
intermediate_reference_points.append(reference_points)
if self.return_intermediate:
return paddle.stack(intermediate), paddle.stack(
intermediate_reference_points)
return output, reference_points
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers/decoder_layers.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Modified from https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/utils/transformer.py#L408
# Copyright (c) OpenMMLab. All rights reserved.
# ------------------------------------------------------------------------
import copy
import numpy as np
import paddle
import paddle.nn as nn
from paddle3d.apis import manager
from paddle3d.models.layers.param_init import constant_init
from paddle3d.models.transformers.encoder_layers import FFN, build_norm_layer
from paddle3d.utils import logger
@manager.TRANSFORMER_DECODER_LAYERS.add_component
class DetrTransformerDecoderLayer(nn.Layer):
"""Implements decoder layer in DETR transformer.
Args:
attn_cfgs (list[`mmcv.ConfigDict`] | list[dict] | dict )):
Configs for self_attention or cross_attention, the order
should be consistent with it in `operation_order`. If it is
a dict, it would be expand to the number of attention in
`operation_order`.
feedforward_channels (int): The hidden dimension for FFNs.
ffn_dropout (float): Probability of an element to be zeroed
in ffn. Default 0.0.
operation_order (tuple[str]): The execution order of operation
in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm').
Default:None
act_cfg (dict): The activation config for FFNs. Default: `LN`
norm_cfg (dict): Config dict for normalization layer.
Default: `LN`.
ffn_num_fcs (int): The number of fully-connected layers in FFNs.
Default:2.
"""
def __init__(self,
attn_cfgs,
feedforward_channels,
ffn_dropout=0.0,
operation_order=None,
ffn_cfgs=dict(
embed_dims=256,
feedforward_channels=1024,
num_fcs=2,
ffn_drop=0.,
act_cfg=dict(type_name='ReLU'),
),
act_cfg=dict(type_name='ReLU'),
norm_cfg=dict(type_name='LayerNorm'),
ffn_num_fcs=2,
batch_first=False,
**kwargs):
super(DetrTransformerDecoderLayer, self).__init__()
self.batch_first = batch_first
assert set(operation_order) & set(
['self_attn', 'norm', 'ffn', 'cross_attn']) == \
set(operation_order), f'The operation_order of' \
f' {self.__class__.__name__} should ' \
f'contains all four operation type ' \
f"{['self_attn', 'norm', 'ffn', 'cross_attn']}"
num_attn = operation_order.count('self_attn') + operation_order.count(
'cross_attn')
if isinstance(attn_cfgs, dict):
attn_cfgs = [copy.deepcopy(attn_cfgs) for _ in range(num_attn)]
else:
assert num_attn == len(attn_cfgs), f'The length ' \
f'of attn_cfg {num_attn} is ' \
f'not consistent with the number of attention' \
f'in operation_order {operation_order}.'
self.num_attn = num_attn
self.operation_order = operation_order
self.norm_cfg = norm_cfg
self.pre_norm = operation_order[0] == 'norm'
self.attentions = nn.LayerList()
index = 0
for operation_name in operation_order:
if operation_name in ['self_attn', 'cross_attn']:
if 'batch_first' in attn_cfgs[index]:
assert self.batch_first == attn_cfgs[index]['batch_first']
else:
attn_cfgs[index]['batch_first'] = self.batch_first
layer_name = attn_cfgs[index].pop('type_name')
attention_layer = manager.ATTENTIONS.components_dict[layer_name]
params = attn_cfgs[index]
attention = attention_layer(**params)
# Some custom attentions used as `self_attn`
# or `cross_attn` can have different behavior.
attention.operation_name = operation_name
self.attentions.append(attention)
index += 1
self.embed_dims = self.attentions[0].embed_dims
self.ffns = nn.LayerList()
num_ffns = operation_order.count('ffn')
ffn_cfgs['ffn_drop'] = ffn_dropout
ffn_cfgs['num_fcs'] = ffn_num_fcs
ffn_cfgs['feedforward_channels'] = feedforward_channels
if isinstance(ffn_cfgs, dict):
ffn_cfgs = [copy.deepcopy(ffn_cfgs) for _ in range(num_ffns)]
assert len(ffn_cfgs) == num_ffns
for ffn_index in range(num_ffns):
if 'embed_dims' not in ffn_cfgs[ffn_index]:
ffn_cfgs['embed_dims'] = self.embed_dims
else:
assert ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims
ffn_params = ffn_cfgs[ffn_index]
self.ffns.append(FFN(**ffn_params))
self.norms = nn.LayerList()
num_norms = operation_order.count('norm')
for _ in range(num_norms):
self.norms.append(build_norm_layer(norm_cfg, self.embed_dims)[1])
assert len(operation_order) == 6
assert set(operation_order) == set(
['self_attn', 'norm', 'cross_attn', 'ffn'])
self.init_weights()
@paddle.no_grad()
def init_weights(self):
for layer in self.norms:
if isinstance(layer, nn.LayerNorm):
constant_init(layer.weight, value=1)
constant_init(layer.bias, value=0)
def forward(self,
query,
key=None,
value=None,
query_pos=None,
key_pos=None,
attn_masks=None,
query_key_padding_mask=None,
key_padding_mask=None,
**kwargs):
"""Forward function for `TransformerDecoderLayer`.
**kwargs contains some specific arguments of attentions.
Args:
query (Tensor): The input query with shape
[num_queries, bs, embed_dims] if
self.batch_first is False, else
[bs, num_queries embed_dims].
key (Tensor): The key tensor with shape [num_keys, bs,
embed_dims] if self.batch_first is False, else
[bs, num_keys, embed_dims] .
value (Tensor): The value tensor with same shape as `key`.
query_pos (Tensor): The positional encoding for `query`.
Default: None.
key_pos (Tensor): The positional encoding for `key`.
Default: None.
attn_masks (List[Tensor] | None): 2D Tensor used in
calculation of corresponding attention. The length of
it should equal to the number of `attention` in
`operation_order`. Default: None.
query_key_padding_mask (Tensor): ByteTensor for `query`, with
shape [bs, num_queries]. Only used in `self_attn` layer.
Defaults to None.
key_padding_mask (Tensor): ByteTensor for `query`, with
shape [bs, num_keys]. Default: None.
Returns:
Tensor: forwarded results with shape [num_queries, bs, embed_dims].
"""
norm_index = 0
attn_index = 0
ffn_index = 0
identity = query
if attn_masks is None:
attn_masks = [None for _ in range(self.num_attn)]
elif isinstance(attn_masks, paddle.Tensor):
attn_masks = [
copy.deepcopy(attn_masks) for _ in range(self.num_attn)
]
logger.warning(f'Use same attn_mask in all attentions in '
f'{self.__class__.__name__} ')
else:
assert len(attn_masks) == self.num_attn, f'The length of ' \
f'attn_masks {len(attn_masks)} must be equal ' \
f'to the number of attention in ' \
f'operation_order {self.num_attn}'
for layer in self.operation_order:
if layer == 'self_attn':
temp_key = temp_value = query
query = self.attentions[attn_index](
query,
temp_key,
temp_value,
identity if self.pre_norm else None,
query_pos=query_pos,
key_pos=query_pos,
attn_mask=attn_masks[attn_index],
key_padding_mask=query_key_padding_mask,
**kwargs)
attn_index += 1
identity = query
elif layer == 'norm':
query = self.norms[norm_index](query)
norm_index += 1
elif layer == 'cross_attn':
query = self.attentions[attn_index](
query,
key,
value,
identity if self.pre_norm else None,
query_pos=query_pos,
key_pos=key_pos,
attn_mask=attn_masks[attn_index],
key_padding_mask=key_padding_mask,
**kwargs)
attn_index += 1
identity = query
elif layer == 'ffn':
query = self.ffns[ffn_index](
query, identity if self.pre_norm else None)
ffn_index += 1
# np.save("d_query_{}.npy".format(layer), query.numpy())
return query
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers/encoder_layers.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Modified from https://github.com/fundamentalvision/BEVFormer/blob/master/projects/mmdet3d_plugin/bevformer/modules/encoder.py
# Copyright (c) OpenMMLab. All rights reserved.
# ------------------------------------------------------------------------
import copy
import paddle
import paddle.nn as nn
from paddle3d.apis import manager
from paddle3d.models.layers.param_init import (constant_init, reset_parameters,
xavier_uniform_init)
from paddle import ParamAttr
from paddle.nn.initializer import Constant
from paddle3d.utils.logger import logger
def build_norm_layer(cfg, num_features, postfix='', init_val=1):
if not isinstance(cfg, dict):
raise TypeError('cfg must be a dict')
if 'type_name' not in cfg:
raise KeyError('the cfg dict must contain the key "type_name"')
cfg_ = copy.deepcopy(cfg)
layer_type = cfg_.pop('type_name')
norm_layer = getattr(nn, layer_type)
abbr = 'bn'
assert isinstance(postfix, (int, str))
name = abbr + str(postfix)
requires_grad = cfg_.pop('requires_grad', True)
cfg_['epsilon'] = 1e-5
weight_attr = ParamAttr(initializer=Constant(value=init_val))
bias_attr = ParamAttr(initializer=Constant(value=0))
cfg_['weight_attr'] = weight_attr
cfg_['bias_attr'] = bias_attr
layer = norm_layer(num_features, **cfg_)
if not requires_grad:
for param in layer.parameters():
param.trainable = requires_grad
return name, layer
class FFN(nn.Layer):
"""Implements feed-forward networks (FFNs) with identity connection.
Args:
embed_dims (int): The feature dimension. Same as
`MultiheadAttention`. Defaults: 256.
feedforward_channels (int): The hidden dimension of FFNs.
Defaults: 1024.
num_fcs (int, optional): The number of fully-connected layers in
FFNs. Default: 2.
act_cfg (dict, optional): The activation config for FFNs.
Default: dict(type='ReLU')
ffn_drop (float, optional): Probability of an element to be
zeroed in FFN. Default 0.0.
add_identity (bool, optional): Whether to add the
identity connection. Default: `True`.
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
Default: None.
"""
def __init__(self,
embed_dims=256,
feedforward_channels=1024,
num_fcs=2,
act_cfg=dict(type_name='ReLU'),
ffn_drop=0.,
dropout_layer=None,
add_identity=True,
**kwargs):
super(FFN, self).__init__()
assert num_fcs >= 2, 'num_fcs should be no less ' \
f'than 2. got {num_fcs}.'
self.embed_dims = embed_dims
self.feedforward_channels = feedforward_channels
self.num_fcs = num_fcs
self.act_cfg = act_cfg
self.activate = getattr(nn, act_cfg.pop('type_name'))()
layers = []
in_channels = embed_dims
for _ in range(num_fcs - 1):
layers.append(
nn.Sequential(
nn.Linear(in_channels, feedforward_channels), self.activate,
nn.Dropout(ffn_drop)))
in_channels = feedforward_channels
layers.append(nn.Linear(feedforward_channels, embed_dims))
layers.append(nn.Dropout(ffn_drop))
self.layers = nn.Sequential(*layers)
if dropout_layer:
dropout_layer_ = copy.deepcopy(dropout_layer)
dropout_layer_name = dropout_layer_.pop("type_name")
self.dropout_layer = getattr(nn,
dropout_layer_name)(**dropout_layer_)
else:
self.dropout_layer = nn.Identity()
self.add_identity = add_identity
self.init_weights()
@paddle.no_grad()
def init_weights(self):
"""Initialize weights of the DeformDETR head."""
for layer in self.sublayers():
if isinstance(layer, nn.Linear):
reset_parameters(layer)
xavier_uniform_init(layer.weight, reverse=True)
def forward(self, x, identity=None):
"""Forward function for `FFN`.
The function would add x to the output tensor if residue is None.
"""
out = self.layers(x)
if not self.add_identity:
return self.dropout_layer(out)
if identity is None:
identity = x
return identity + self.dropout_layer(out)
@manager.TRANSFORMER_ENCODER_LAYERS.add_component
class BEVFormerLayer(nn.Layer):
"""Implements decoder layer in DETR transformer.
Args:
attn_cfgs (list[`mmcv.ConfigDict`] | list[dict] | dict )):
Configs for self_attention or cross_attention, the order
should be consistent with it in `operation_order`. If it is
a dict, it would be expand to the number of attention in
`operation_order`.
feedforward_channels (int): The hidden dimension for FFNs.
ffn_dropout (float): Probability of an element to be zeroed
in ffn. Default 0.0.
operation_order (tuple[str]): The execution order of operation
in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm').
Default:None
act_cfg (dict): The activation config for FFNs. Default: `LN`
norm_cfg (dict): Config dict for normalization layer.
Default: `LN`.
ffn_num_fcs (int): The number of fully-connected layers in FFNs.
Default:2.
"""
def __init__(self,
attn_cfgs,
feedforward_channels,
ffn_dropout=0.0,
operation_order=None,
act_cfg=dict(type_name='ReLU'),
norm_cfg=dict(type_name='LayerNorm'),
ffn_num_fcs=2,
batch_first=True,
**kwargs):
super(BEVFormerLayer, self).__init__()
self.batch_first = batch_first
assert set(operation_order) & set(
['self_attn', 'norm', 'ffn', 'cross_attn']) == \
set(operation_order), f'The operation_order of' \
f' {self.__class__.__name__} should ' \
f'contains all four operation type ' \
f"{['self_attn', 'norm', 'ffn', 'cross_attn']}"
num_attn = operation_order.count('self_attn') + operation_order.count(
'cross_attn')
if isinstance(attn_cfgs, dict):
attn_cfgs = [copy.deepcopy(attn_cfgs) for _ in range(num_attn)]
else:
assert num_attn == len(attn_cfgs), f'The length ' \
f'of attn_cfg {num_attn} is ' \
f'not consistent with the number of attention' \
f'in operation_order {operation_order}.'
self.num_attn = num_attn
self.operation_order = operation_order
self.norm_cfg = norm_cfg
self.pre_norm = operation_order[0] == 'norm'
self.attentions = nn.LayerList()
index = 0
for operation_name in operation_order:
if operation_name in ['self_attn', 'cross_attn']:
if 'batch_first' in attn_cfgs[index]:
assert self.batch_first == attn_cfgs[index]['batch_first']
else:
attn_cfgs[index]['batch_first'] = self.batch_first
layer_name = attn_cfgs[index].pop('type_name')
attention_layer = manager.ATTENTIONS.components_dict[layer_name]
params = attn_cfgs[index]
attention = attention_layer(**params)
# Some custom attentions used as `self_attn`
# or `cross_attn` can have different behavior.
attention.operation_name = operation_name
self.attentions.append(attention)
index += 1
self.embed_dims = self.attentions[0].embed_dims
self.ffns = nn.LayerList()
ffn_cfgs = dict(
embed_dims=256,
feedforward_channels=feedforward_channels,
num_fcs=ffn_num_fcs,
ffn_drop=ffn_dropout,
act_cfg=act_cfg,
)
num_ffns = operation_order.count('ffn')
ffn_cfgs = [copy.deepcopy(ffn_cfgs) for _ in range(num_ffns)]
assert len(ffn_cfgs) == num_ffns
for ffn_index in range(num_ffns):
if 'embed_dims' not in ffn_cfgs[ffn_index]:
ffn_cfgs['embed_dims'] = self.embed_dims
else:
assert ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims
ffn_params = ffn_cfgs[ffn_index]
self.ffns.append(FFN(**ffn_params))
self.norms = nn.LayerList()
num_norms = operation_order.count('norm')
for _ in range(num_norms):
self.norms.append(build_norm_layer(norm_cfg, self.embed_dims)[1])
self.fp16_enabled = False
assert len(operation_order) == 6
assert set(operation_order) == set(
['self_attn', 'norm', 'cross_attn', 'ffn'])
self.init_weights()
@paddle.no_grad()
def init_weights(self):
for layer in self.norms:
if isinstance(layer, nn.LayerNorm):
constant_init(layer.weight, value=1)
constant_init(layer.bias, value=0)
def forward(self,
query,
key=None,
value=None,
bev_pos=None,
query_pos=None,
key_pos=None,
attn_masks=None,
query_key_padding_mask=None,
key_padding_mask=None,
ref_2d=None,
ref_3d=None,
bev_h=None,
bev_w=None,
reference_points_cam=None,
mask=None,
spatial_shapes=None,
level_start_index=None,
prev_bev=None,
**kwargs):
"""Forward function for `TransformerDecoderLayer`.
**kwargs contains some specific arguments of attentions.
Args:
query (Tensor): The input query with shape
[num_queries, bs, embed_dims] if
self.batch_first is False, else
[bs, num_queries embed_dims].
key (Tensor): The key tensor with shape [num_keys, bs,
embed_dims] if self.batch_first is False, else
[bs, num_keys, embed_dims] .
value (Tensor): The value tensor with same shape as `key`.
query_pos (Tensor): The positional encoding for `query`.
Default: None.
key_pos (Tensor): The positional encoding for `key`.
Default: None.
attn_masks (List[Tensor] | None): 2D Tensor used in
calculation of corresponding attention. The length of
it should equal to the number of `attention` in
`operation_order`. Default: None.
query_key_padding_mask (Tensor): ByteTensor for `query`, with
shape [bs, num_queries]. Only used in `self_attn` layer.
Defaults to None.
key_padding_mask (Tensor): ByteTensor for `query`, with
shape [bs, num_keys]. Default: None.
Returns:
Tensor: forwarded results with shape [num_queries, bs, embed_dims].
"""
norm_index = 0
attn_index = 0
ffn_index = 0
identity = query
if attn_masks is None:
attn_masks = [None for _ in range(self.num_attn)]
elif isinstance(attn_masks, paddle.Tensor):
attn_masks = [
copy.deepcopy(attn_masks) for _ in range(self.num_attn)
]
logger.warning(f'Use same attn_mask in all attentions in '
f'{self.__class__.__name__} ')
else:
assert len(attn_masks) == self.num_attn, f'The length of ' \
f'attn_masks {len(attn_masks)} must be equal ' \
f'to the number of attention in ' \
f'operation_order {self.num_attn}'
for layer in self.operation_order:
# temporal self attention
if layer == 'self_attn':
query = self.attentions[attn_index](
query,
prev_bev,
prev_bev,
identity if self.pre_norm else None,
query_pos=bev_pos,
key_pos=bev_pos,
attn_mask=attn_masks[attn_index],
key_padding_mask=query_key_padding_mask,
reference_points=ref_2d,
spatial_shapes=paddle.to_tensor([[bev_h, bev_w]],
dtype=paddle.int64),
level_start_index=paddle.to_tensor([0], dtype=paddle.int64),
**kwargs)
attn_index += 1
identity = query
elif layer == 'norm':
query = self.norms[norm_index](query)
norm_index += 1
# spaital cross attention
elif layer == 'cross_attn':
query = self.attentions[attn_index](
query,
key,
value,
identity if self.pre_norm else None,
query_pos=query_pos,
key_pos=key_pos,
reference_points=ref_3d,
reference_points_cam=reference_points_cam,
mask=mask,
attn_mask=attn_masks[attn_index],
key_padding_mask=key_padding_mask,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
**kwargs)
attn_index += 1
identity = query
elif layer == 'ffn':
query = self.ffns[ffn_index](
query, identity if self.pre_norm else None)
ffn_index += 1
return query
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers/__init__.py
|
from .assigners import *
from .attentions import *
from .decoder_layers import *
from .decoders import *
from .encoder_layers import *
from .encoders import *
from .match_costs import *
from .positional_encoding import *
from .samplers import *
from .transformer import *
from .utils import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers/encoders.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Modified from https://github.com/fundamentalvision/BEVFormer/blob/master/projects/mmdet3d_plugin/bevformer/modules/encoder.py
# Copyright (c) OpenMMLab. All rights reserved.
# ------------------------------------------------------------------------
import copy
import numpy as np
import paddle
import paddle.nn as nn
from paddle3d.apis import manager
@manager.TRANSFORMER_ENCODERS.add_component
class BEVFormerEncoder(nn.Layer):
"""
Attention with both self and cross
Implements the decoder in DETR transformer.
Args:
return_intermediate (bool): Whether to return intermediate outputs.
coder_norm_cfg (dict): Config of last normalization layer. Default:
`LN`.
"""
def __init__(self,
transformerlayers,
num_layers,
point_cloud_range=None,
num_points_in_pillar=4,
return_intermediate=False,
dataset_type='nuscenes',
**kwargs):
super(BEVFormerEncoder, self).__init__()
if isinstance(transformerlayers, dict):
transformerlayers = [
copy.deepcopy(transformerlayers) for _ in range(num_layers)
]
else:
assert isinstance(transformerlayers, list) and \
len(transformerlayers) == num_layers
self.return_intermediate = return_intermediate
self.num_points_in_pillar = num_points_in_pillar
self.point_cloud_range = point_cloud_range
self.layers = nn.LayerList()
for i in range(num_layers):
layer_name = transformerlayers[i].pop('type_name')
encoder_layer = manager.TRANSFORMER_ENCODER_LAYERS.components_dict[
layer_name]
params = transformerlayers[i]
self.layers.append(encoder_layer(**params))
@staticmethod
def get_reference_points(H,
W,
Z=8,
num_points_in_pillar=4,
dim='3d',
bs=1,
dtype=paddle.float32):
"""Get the reference points used in SCA and TSA.
Args:
H, W: spatial shape of bev.
Z: hight of pillar.
D: sample D points uniformly from each pillar.
device (obj:`device`): The device where
reference_points should be.
Returns:
Tensor: reference points used in decoder, has \
shape (bs, num_keys, num_levels, 2).
"""
# reference points in 3D space, used in spatial cross-attention (SCA)
if dim == '3d':
zs = paddle.linspace(
0.5, Z - 0.5, num_points_in_pillar,
dtype=paddle.float32).cast(dtype).reshape([-1, 1, 1]).expand(
[num_points_in_pillar, H, W]) / Z
xs = paddle.linspace(
0.5, W - 0.5, W, dtype=paddle.float32).reshape([
1, 1, W
]).cast(dtype).expand([num_points_in_pillar, H, W]) / W
ys = paddle.linspace(
0.5, H - 0.5, H, dtype=paddle.float32).reshape([
1, H, 1
]).cast(dtype).expand([num_points_in_pillar, H, W]) / H
ref_3d = paddle.stack((xs, ys, zs), -1)
ref_3d = ref_3d.transpose([0, 3, 1,
2]).flatten(2).transpose([0, 2, 1])
ref_3d = ref_3d[None].tile([bs, 1, 1, 1])
return ref_3d
# reference points on 2D bev plane, used in temporal self-attention (TSA).
elif dim == '2d':
ref_y, ref_x = paddle.meshgrid(
paddle.linspace(0.5, H - 0.5, H, dtype=paddle.float32),
paddle.linspace(0.5, W - 0.5, W, dtype=paddle.float32))
ref_y = ref_y.cast(dtype).reshape([-1])[None] / H
ref_x = ref_x.cast(dtype).reshape([-1])[None] / W
ref_2d = paddle.stack((ref_x, ref_y), -1)
ref_2d = ref_2d.tile([bs, 1, 1]).unsqueeze(2)
return ref_2d
# This function must use fp32!!!
def point_sampling(self, reference_points, point_cloud_range, img_metas):
reference_points = reference_points.cast(paddle.float32)
if not getattr(self, 'export_model', False):
lidar2img = []
for img_meta in img_metas:
lidar2img.append(paddle.stack(img_meta['lidar2img']))
lidar2img = paddle.stack(lidar2img) # (B, N, 4, 4)
else:
lidar2img = img_metas[0]['lidar2img']
lidar2img = lidar2img.cast(paddle.float32)
reference_points = reference_points.clone()
reference_points[..., 0:1] = reference_points[..., 0:1] * \
(point_cloud_range[3] - point_cloud_range[0]) + point_cloud_range[0]
reference_points[..., 1:2] = reference_points[..., 1:2] * \
(point_cloud_range[4] - point_cloud_range[1]) + point_cloud_range[1]
reference_points[..., 2:3] = reference_points[..., 2:3] * \
(point_cloud_range[5] - point_cloud_range[2]) + point_cloud_range[2]
reference_points = paddle.concat(
(reference_points, paddle.ones_like(reference_points[..., :1])), -1)
reference_points = reference_points.transpose([1, 0, 2, 3])
D, B, num_query = reference_points.shape[:3]
num_cam = lidar2img.shape[1]
reference_points = reference_points.reshape(
[D, B, 1, num_query, 4]).tile([1, 1, num_cam, 1, 1]).unsqueeze(-1)
lidar2img = lidar2img.reshape([1, B, num_cam, 1, 4,
4]).tile([D, 1, 1, num_query, 1, 1])
# np.save("e_lidar2img.npy", lidar2img.numpy())
# np.save("e_reference_points.npy", reference_points.numpy())
reference_points_cam = paddle.matmul(
lidar2img.cast(paddle.float32),
reference_points.cast(paddle.float32)).squeeze(-1)
# np.save("e_points_cam.npy", reference_points_cam.numpy())
eps = 1e-5
bev_mask = (reference_points_cam[..., 2:3] > eps)
reference_points_cam = reference_points_cam[..., 0:2] / paddle.maximum(
reference_points_cam[..., 2:3],
paddle.ones_like(reference_points_cam[..., 2:3]) * eps)
reference_points_cam[..., 0] /= img_metas[0]['img_shape'][0][1]
reference_points_cam[..., 1] /= img_metas[0]['img_shape'][0][0]
bev_mask = (bev_mask & (reference_points_cam[..., 1:2] > 0.0)
& (reference_points_cam[..., 1:2] < 1.0)
& (reference_points_cam[..., 0:1] < 1.0)
& (reference_points_cam[..., 0:1] > 0.0))
reference_points_cam = reference_points_cam.transpose([2, 1, 3, 0, 4])
bev_mask = bev_mask.transpose([2, 1, 3, 0, 4]).squeeze(-1)
return reference_points_cam, bev_mask
def forward(self,
bev_query,
key,
value,
*args,
bev_h=None,
bev_w=None,
bev_pos=None,
spatial_shapes=None,
level_start_index=None,
valid_ratios=None,
prev_bev=None,
shift=0.,
**kwargs):
"""Forward function for `TransformerDecoder`.
Args:
bev_query (Tensor): Input BEV query with shape
`(num_query, bs, embed_dims)`.
key & value (Tensor): Input multi-cameta features with shape
(num_cam, num_value, bs, embed_dims)
reference_points (Tensor): The reference
points of offset. has shape
(bs, num_query, 4) when as_two_stage,
otherwise has shape ((bs, num_query, 2).
valid_ratios (Tensor): The radios of valid
points on the feature map, has shape
(bs, num_levels, 2)
Returns:
Tensor: Results with shape [1, num_query, bs, embed_dims] when
return_intermediate is `False`, otherwise it has shape
[num_layers, num_query, bs, embed_dims].
"""
output = bev_query
intermediate = []
ref_3d = self.get_reference_points(
bev_h,
bev_w,
self.point_cloud_range[5] - self.point_cloud_range[2],
self.num_points_in_pillar,
dim='3d',
bs=bev_query.shape[1],
dtype=bev_query.dtype)
ref_2d = self.get_reference_points(
bev_h,
bev_w,
dim='2d',
bs=bev_query.shape[1],
dtype=bev_query.dtype)
# np.save("e_ref_2d.npy", ref_2d.numpy())
reference_points_cam, bev_mask = self.point_sampling(
ref_3d, self.point_cloud_range, kwargs['img_metas'])
# bug: this code should be 'shift_ref_2d = ref_2d.clone()', we keep this bug for reproducing our results in paper.
# TODO(qianhui): fix this clone bugs: paddle equal means clone but torch not
#shift_ref_2d = ref_2d
#shift_ref_2d += shift[:, None, None, :]
ref_2d += shift[:, None, None, :]
# np.save("e_shift_ref_2d.npy", ref_2d.numpy())
# np.save("e_ref_2dref_2d.npy", ref_2d.numpy())
# (num_query, bs, embed_dims) -> (bs, num_query, embed_dims)
bev_query = bev_query.transpose([1, 0, 2])
bev_pos = bev_pos.transpose([1, 0, 2])
bs, len_bev, num_bev_level, _ = ref_2d.shape
'''
if prev_bev is not None:
prev_bev = prev_bev.transpose([1, 0, 2])
prev_bev = paddle.stack(
[prev_bev, bev_query], 1).reshape([bs*2, len_bev, -1])
# TODO(qianhui): fix this clone bugs: paddle equal means clone but torch not
#hybird_ref_2d = paddle.stack([shift_ref_2d, ref_2d], 1).reshape(
hybird_ref_2d = paddle.stack([ref_2d, ref_2d], 1).reshape(
[bs*2, len_bev, num_bev_level, 2])
else:
hybird_ref_2d = paddle.stack([ref_2d, ref_2d], 1).reshape(
[bs*2, len_bev, num_bev_level, 2])
'''
prev_bev = prev_bev.transpose([1, 0, 2])
valid_prev_bev = prev_bev.cast('bool').any().cast('int32')
prev_bev = prev_bev * valid_prev_bev + bev_query * (1 - valid_prev_bev)
prev_bev = paddle.stack([prev_bev, bev_query],
1).reshape([bs * 2, len_bev, -1])
hybird_ref_2d = paddle.stack([ref_2d, ref_2d], 1).reshape(
[bs * 2, len_bev, num_bev_level, 2])
# np.save("e_bev_query.npy", bev_query.numpy())
# np.save("e_key.npy", key.numpy())
# np.save("e_value.npy", value.numpy())
# np.save("e_bev_posbev_pos.npy", bev_pos.numpy())
# np.save("e_hybird_ref_2d.npy", hybird_ref_2d.numpy())
# np.save("e_ref_3d.npy", ref_3d.numpy())
# np.save("e_spatial_shapes.npy", spatial_shapes.numpy())
# np.save("e_reference_points_cam.npy", reference_points_cam.numpy())
# np.save("e_bev_mask.npy", bev_mask.numpy())
# np.save("e_prev_bev.npy", prev_bev.numpy())
for lid, layer in enumerate(self.layers):
output = layer(
bev_query,
key,
value,
*args,
bev_pos=bev_pos,
ref_2d=hybird_ref_2d,
ref_3d=ref_3d,
bev_h=bev_h,
bev_w=bev_w,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
reference_points_cam=reference_points_cam,
bev_mask=bev_mask,
prev_bev=prev_bev,
**kwargs)
# np.save("e_output_{}.npy".format(lid), output.numpy())
bev_query = output
if self.return_intermediate:
intermediate.append(output)
if self.return_intermediate:
return paddle.stack(intermediate)
return output
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers/utils.py
|
from typing import List, Optional, Tuple, Union
import numpy as np
import paddle
import paddle.nn.functional as F
def get_dimensions(img: paddle.Tensor) -> List[int]:
"""Returns the dimensions of an image as [channels, height, width].
Args:
img (Tensor): The image to be checked.
Returns:
List[int]: The image dimensions.
"""
channels = 1 if img.ndim == 2 else img.shape[-3]
height, width = img.shape[-2:]
return [channels, height, width]
def _get_inverse_affine_matrix(center: List[float],
angle: float,
translate: List[float],
scale: float,
shear: List[float],
inverted: bool = True) -> List[float]:
"""
This fuction refers to https://github.com/pypaddle/vision/blob/main/paddlevision/transforms/functional.py#L992
"""
# Helper method to compute inverse matrix for affine transformation
# Pillow requires inverse affine transformation matrix:
# Affine matrix is : M = T * C * RotateScaleShear * C^-1
#
# where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]
# C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]
# RotateScaleShear is rotation with scale and shear matrix
#
# RotateScaleShear(a, s, (sx, sy)) =
# = R(a) * S(s) * SHy(sy) * SHx(sx)
# = [ s*cos(a - sy)/cos(sy), s*(-cos(a - sy)*tan(sx)/cos(sy) - sin(a)), 0 ]
# [ s*sin(a + sy)/cos(sy), s*(-sin(a - sy)*tan(sx)/cos(sy) + cos(a)), 0 ]
# [ 0 , 0 , 1 ]
# where R is a rotation matrix, S is a scaling matrix, and SHx and SHy are the shears:
# SHx(s) = [1, -tan(s)] and SHy(s) = [1 , 0]
# [0, 1 ] [-tan(s), 1]
#
# Thus, the inverse is M^-1 = C * RotateScaleShear^-1 * C^-1 * T^-1
def radians(angle):
pi = paddle.to_tensor(np.pi)
degree = pi / 180. * angle
return degree
# rot = math.radians(angle)
# sx = math.radians(shear[0])
# sy = math.radians(shear[1])
rot = radians(angle)
sx = radians(shear[0])
sy = radians(shear[1])
cx, cy = center
tx, ty = translate
# RSS without scaling
# a = math.cos(rot - sy) / math.cos(sy)
# b = -math.cos(rot - sy) * math.tan(sx) / math.cos(sy) - math.sin(rot)
# c = math.sin(rot - sy) / math.cos(sy)
# d = -math.sin(rot - sy) * math.tan(sx) / math.cos(sy) + math.cos(rot)
a = paddle.cos(rot - sy) / paddle.cos(sy)
b = -paddle.cos(rot - sy) * paddle.tan(sx) / paddle.cos(sy) - paddle.sin(
rot)
c = paddle.sin(rot - sy) / paddle.cos(sy)
d = -paddle.sin(rot - sy) * paddle.tan(sx) / paddle.cos(sy) + paddle.cos(
rot)
if inverted:
# Inverted rotation matrix with scale and shear
# det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
matrix = [d, -b, 0.0, -c, a, 0.0]
matrix = [x / scale for x in matrix]
# Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
matrix[2] += matrix[0] * (-cx - tx) + matrix[1] * (-cy - ty)
matrix[5] += matrix[3] * (-cx - tx) + matrix[4] * (-cy - ty)
# Apply center translation: C * RSS^-1 * C^-1 * T^-1
matrix[2] += cx
matrix[5] += cy
else:
matrix = [a, b, 0.0, c, d, 0.0]
matrix = [x * scale for x in matrix]
# Apply inverse of center translation: RSS * C^-1
matrix[2] += matrix[0] * (-cx) + matrix[1] * (-cy)
matrix[5] += matrix[3] * (-cx) + matrix[4] * (-cy)
# Apply translation and center : T * C * RSS * C^-1
matrix[2] += cx + tx
matrix[5] += cy + ty
return matrix
def _gen_affine_grid(
theta: paddle.Tensor,
w: int,
h: int,
ow: int,
oh: int,
) -> paddle.Tensor:
# https://github.com/pytorch/pytorch/blob/74b65c32be68b15dc7c9e8bb62459efbfbde33d8/aten/src/ATen/native/
# AffineGridGenerator.cpp#L18
# Difference with AffineGridGenerator is that:
# 1) we normalize grid values after applying theta
# 2) we can normalize by other image size, such that it covers "extend" option like in PIL.Image.rotate
d = 0.5
base_grid = paddle.empty([1, oh, ow, 3], dtype=theta.dtype)
x_grid = paddle.linspace(-ow * 0.5 + d, ow * 0.5 + d - 1, num=ow)
base_grid[..., 0] = x_grid
y_grid = paddle.linspace(
-oh * 0.5 + d, oh * 0.5 + d - 1, num=oh).unsqueeze([-1])
base_grid[..., 1] = y_grid
base_grid[..., 2] = 1
rescaled_theta = theta.transpose([0, 2, 1]) / paddle.to_tensor(
[0.5 * w, 0.5 * h], dtype=theta.dtype)
output_grid = base_grid.reshape([1, oh * ow, 3]).bmm(rescaled_theta)
return output_grid.reshape([1, oh, ow, 2])
def _cast_squeeze_in(img: paddle.Tensor, req_dtypes: List[paddle.dtype]
) -> Tuple[paddle.Tensor, bool, bool, paddle.dtype]:
need_squeeze = False
# make image NCHW
if img.ndim < 4:
img = img.unsqueeze(axis=0)
need_squeeze = True
out_dtype = img.dtype
need_cast = False
if out_dtype not in req_dtypes:
need_cast = True
req_dtype = req_dtypes[0]
img = img.cast(req_dtype)
return img, need_cast, need_squeeze, out_dtype
def _cast_squeeze_out(img: paddle.Tensor, need_cast: bool, need_squeeze: bool,
out_dtype: paddle.dtype) -> paddle.Tensor:
if need_squeeze:
img = img.squeeze(axis=0)
if need_cast:
if out_dtype in (paddle.uint8, paddle.int8, paddle.int16, paddle.int32,
paddle.int64):
# it is better to round before cast
img = paddle.round(img)
img = img.cast(out_dtype)
return img
def _apply_grid_transform(
img: paddle.Tensor, grid: paddle.Tensor, mode: str,
fill: Optional[Union[int, float, List[float]]]) -> paddle.Tensor:
img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(
img, [grid.dtype])
if img.shape[0] > 1:
# Apply same grid to a batch of images
grid = grid.expand(
[img.shape[0], grid.shape[1], grid.shape[2], grid.shape[3]])
# np.save('grid_img.npy', img.numpy())
img = F.grid_sample(
img, grid, mode=mode, padding_mode="zeros", align_corners=False)
# np.save('grid_sample.npy', img.numpy())
img = _cast_squeeze_out(img, need_cast, need_squeeze, out_dtype)
return img
def _rotate(img: paddle.Tensor,
matrix: List[float],
interpolation: str = "nearest") -> paddle.Tensor:
ow, oh = img.shape[-1], img.shape[-2]
w, h = img.shape[-1], img.shape[-2]
dtype = img.dtype if paddle.is_floating_point(img) else paddle.float32
theta = paddle.to_tensor(matrix, dtype=dtype).reshape([1, 2, 3])
# grid will be generated on the same device as theta and img
grid = _gen_affine_grid(theta, w=w, h=h, ow=ow, oh=oh)
# np.save("grid.npy", grid.numpy())
img = _apply_grid_transform(img, grid, interpolation, fill=None)
return img
def rotate(img: paddle.Tensor,
angle: float,
interpolation: str = "nearest",
center: Optional[List[int]] = None):
center_f = [0.0, 0.0]
if center is not None:
_, height, width = get_dimensions(img)
# Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
center_f = [
1.0 * (c - s * 0.5) for c, s in zip(center, [width, height])
]
# due to current incoherence of rotation angle direction between affine and rotate implementations
# we need to set -angle.
matrix = _get_inverse_affine_matrix(center_f, -angle, [0.0, 0.0], 1.0,
[0.0, 0.0])
# np.save('origin_img.npy', img.numpy())
img = _rotate(img, matrix=matrix, interpolation=interpolation)
return img
def masked_fill(x, mask, value):
y = paddle.full(x.shape, value, x.dtype)
return paddle.where(mask, y, x)
def nan_to_num(x, nan=0.0, posinf=None, neginf=None, name=None):
"""
Replaces NaN, positive infinity, and negative infinity values in input tensor.
"""
# NOTE(tiancaishaonvjituizi): it seems that paddle handles the dtype of python float number
# incorrectly, so we have to explicitly contruct tensors here
posinf_value = paddle.full_like(x, float("+inf"))
neginf_value = paddle.full_like(x, float("-inf"))
nan = paddle.full_like(x, nan)
assert x.dtype in [paddle.float32, paddle.float64]
is_float32 = x.dtype == paddle.float32
if posinf is None:
posinf = (np.finfo(np.float32).max
if is_float32 else np.finfo(np.float64).max)
posinf = paddle.full_like(x, posinf)
if neginf is None:
neginf = (np.finfo(np.float32).min
if is_float32 else np.finfo(np.float64).min)
neginf = paddle.full_like(x, neginf)
x = paddle.where(paddle.isnan(x), nan, x)
x = paddle.where(x == posinf_value, posinf, x)
x = paddle.where(x == neginf_value, neginf, x)
return x
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers/transformer.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Modified from https://github.com/fundamentalvision/BEVFormer/blob/master/projects/mmdet3d_plugin/bevformer/modules/transformer.py
# Copyright (c) OpenMMLab. All rights reserved.
# ------------------------------------------------------------------------
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.models.layers.param_init import (
constant_init, normal_init, reset_parameters, xavier_uniform_init)
from paddle3d.models.transformers.utils import rotate
def inverse_sigmoid(x, eps=1e-5):
"""Inverse function of sigmoid.
Args:
x (Tensor): The tensor to do the
inverse.
eps (float): EPS avoid numerical
overflow. Defaults 1e-5.
Returns:
Tensor: The x has passed the inverse
function of sigmoid, has same
shape with input.
"""
x = x.clip(min=0, max=1)
x1 = x.clip(min=eps)
x2 = (1 - x).clip(min=eps)
return paddle.log(x1 / x2)
@manager.TRANSFORMERS.add_component
class PerceptionTransformer(nn.Layer):
"""Implements the Detr3D transformer.
Args:
as_two_stage (bool): Generate query from encoder features.
Default: False.
num_feature_levels (int): Number of feature maps from FPN:
Default: 4.
two_stage_num_proposals (int): Number of proposals when set
`as_two_stage` as True. Default: 300.
"""
def __init__(self,
num_feature_levels=4,
num_cams=6,
encoder=None,
decoder=None,
embed_dims=256,
rotate_prev_bev=True,
use_shift=True,
use_can_bus=True,
can_bus_norm=True,
use_cams_embeds=True,
rotate_center=[100, 100],
**kwargs):
super(PerceptionTransformer, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.embed_dims = embed_dims
self.num_feature_levels = num_feature_levels
self.num_cams = num_cams
self.fp16_enabled = False
self.rotate_prev_bev = rotate_prev_bev
self.use_shift = use_shift
self.use_can_bus = use_can_bus
self.can_bus_norm = can_bus_norm
self.use_cams_embeds = use_cams_embeds
self.init_layers()
self.rotate_center = rotate_center
self.init_weights()
def init_layers(self):
"""Initialize layers of the Detr3DTransformer."""
level_embeds = self.create_parameter((self.num_feature_levels,
self.embed_dims))
self.add_parameter('level_embeds', level_embeds)
cams_embeds = self.create_parameter((self.num_cams, self.embed_dims))
self.add_parameter('cams_embeds', cams_embeds)
self.reference_points = nn.Linear(self.embed_dims, 3)
self.can_bus_mlp = nn.Sequential(
nn.Linear(18, self.embed_dims // 2),
nn.ReLU(),
nn.Linear(self.embed_dims // 2, self.embed_dims),
nn.ReLU(),
)
if self.can_bus_norm:
self.can_bus_mlp.add_sublayer('norm', nn.LayerNorm(self.embed_dims))
@paddle.no_grad()
def init_weights(self):
"""Initialize the transformer weights."""
normal_init(self.level_embeds)
normal_init(self.cams_embeds)
xavier_uniform_init(self.reference_points.weight, reverse=True)
constant_init(self.reference_points.bias, value=0)
for layer in self.can_bus_mlp:
if isinstance(layer, nn.Linear):
reset_parameters(layer)
# xavier_uniform_init(layer.weight, reverse=True)
elif isinstance(layer, nn.LayerNorm):
constant_init(layer.weight, value=1)
constant_init(layer.bias, value=0)
def get_bev_features(self,
mlvl_feats,
bev_queries,
bev_h,
bev_w,
grid_length=[0.512, 0.512],
bev_pos=None,
prev_bev=None,
**kwargs):
"""
obtain bev features.
"""
bs = mlvl_feats[0].shape[0]
bev_queries = bev_queries.unsqueeze(1).tile([1, bs, 1])
bev_pos = bev_pos.flatten(2).transpose([2, 0, 1])
'''
# obtain rotation angle and shift with ego motion
delta_x = np.array([each['can_bus'][0]
for each in kwargs['img_metas']])
delta_y = np.array([each['can_bus'][1]
for each in kwargs['img_metas']])
ego_angle = np.array(
[each['can_bus'][-2] / np.pi * 180 for each in kwargs['img_metas']])
grid_length_y = grid_length[0]
grid_length_x = grid_length[1]
translation_length = np.sqrt(delta_x ** 2 + delta_y ** 2)
translation_angle = np.arctan2(delta_y, delta_x) / np.pi * 180
bev_angle = ego_angle - translation_angle
shift_y = translation_length * \
np.cos(bev_angle / 180 * np.pi) / grid_length_y / bev_h
shift_x = translation_length * \
np.sin(bev_angle / 180 * np.pi) / grid_length_x / bev_w
shift_y = shift_y * self.use_shift
shift_x = shift_x * self.use_shift
shift = paddle.to_tensor(
[shift_x, shift_y]).transpose([1, 0]) # xy, bs -> bs, xy
'''
pi_tensor = paddle.to_tensor(np.pi / 180)
delta_x = paddle.concat(
[each['can_bus'][0] for each in kwargs['img_metas']])
delta_y = paddle.concat(
[each['can_bus'][1] for each in kwargs['img_metas']])
#np.save('delta_x.npy', delta_x.numpy())
#np.save('delta_y.npy', delta_y.numpy())
ego_angle = paddle.concat(
[each['can_bus'][-2] / pi_tensor for each in kwargs['img_metas']])
#np.save('ego_angle.npy', ego_angle)
grid_length_y = grid_length[0]
grid_length_x = grid_length[1]
translation_length = paddle.sqrt(delta_x**2 + delta_y**2)
#np.save('translation_length.npy', translation_length.numpy())
translation_angle = paddle.atan2(delta_y, delta_x) / pi_tensor
# translation_angle = paddle.angle(delta_y, delta_x) / pi_tensor
#np.save('translation_angle.npy', translation_angle.numpy())
bev_angle = ego_angle - translation_angle
shift_y = translation_length * \
paddle.cos(bev_angle * pi_tensor) / grid_length_y / bev_h
shift_x = translation_length * \
paddle.sin(bev_angle * pi_tensor) / grid_length_x / bev_w
#np.save('shift_x.npy', shift_x.numpy())
#np.save('shift_y.npy', shift_y.numpy())
shift_y = shift_y * self.use_shift
shift_x = shift_x * self.use_shift
shift = paddle.stack([shift_x,
shift_y]).transpose([1, 0]) # xy, bs -> bs, xy
shift = shift.cast(bev_queries.dtype)
'''
if prev_bev is not None:
if prev_bev.shape[1] == bev_h * bev_w:
prev_bev = prev_bev.transpose([1, 0, 2])
if self.rotate_prev_bev:
for i in range(bs):
rotation_angle = kwargs['img_metas'][i]['can_bus'][-1]
tmp_prev_bev = prev_bev[:, i].reshape([
bev_h, bev_w, -1]).transpose([2, 0, 1])
tmp_prev_bev = rotate(tmp_prev_bev, rotation_angle,
center=self.rotate_center)
# #np.save('tmp_prev_bev.npy', tmp_prev_bev.numpy())
tmp_prev_bev = tmp_prev_bev.transpose([1, 2, 0]).reshape([
bev_h * bev_w, 1, -1])
prev_bev[:, i] = tmp_prev_bev[:, 0]
'''
if prev_bev is not None:
if self.rotate_prev_bev:
for i in range(bs):
valid_prev_bev = prev_bev[:, i].cast('bool').any().cast(
'int32')
rotation_angle = kwargs['img_metas'][i]['can_bus'][-1]
tmp_prev_bev = prev_bev[:, i].reshape(
[bev_h, bev_w, -1]).transpose([2, 0, 1])
tmp_prev_bev = rotate(
tmp_prev_bev, rotation_angle, center=self.rotate_center)
# #np.save('tmp_prev_bev.npy', tmp_prev_bev.numpy())
tmp_prev_bev = tmp_prev_bev.transpose([1, 2, 0]).reshape(
[bev_h * bev_w, 1, -1])
prev_bev[:,
i] = tmp_prev_bev[:,
0] * valid_prev_bev + prev_bev[:, i] * (
1 - valid_prev_bev)
# add can bus signals
# can_bus = paddle.to_tensor(
# [each['can_bus'] for each in kwargs['img_metas']], dtype=bev_queries.dtype) # [:, :]
can_bus = paddle.stack([
each['can_bus'] for each in kwargs['img_metas']
]).cast(bev_queries.dtype)
can_bus = self.can_bus_mlp(can_bus)[None, :, :]
bev_queries = bev_queries + can_bus * self.use_can_bus
feat_flatten = []
spatial_shapes = []
for lvl, feat in enumerate(mlvl_feats):
bs, num_cam, c, h, w = feat.shape
spatial_shape = (h, w)
feat = feat.flatten(3).transpose([1, 0, 3, 2])
if self.use_cams_embeds:
feat = feat + self.cams_embeds[:, None, None, :].cast(
feat.dtype)
feat = feat + self.level_embeds[None, None, lvl:lvl + 1, :].cast(
feat.dtype)
spatial_shapes.append(spatial_shape)
feat_flatten.append(feat)
feat_flatten = paddle.concat(feat_flatten, 2)
spatial_shapes = paddle.to_tensor(spatial_shapes, dtype=paddle.int64)
level_start_index = paddle.concat((paddle.zeros(
(1, ), dtype=paddle.int64), spatial_shapes.prod(1).cumsum(0)[:-1]))
feat_flatten = feat_flatten.transpose(
[0, 2, 1, 3]) # (num_cam, H*W, bs, embed_dims)
# #np.save('bev_queries.npy', bev_queries.numpy())
# #np.save('feat_flatten.npy', feat_flatten.numpy())
# #np.save('bev_pos.npy', bev_pos.numpy())
# #np.save('prev_bev.npy', prev_bev.numpy())
# #np.save('spatial_shapes.npy', spatial_shapes.numpy())
# #np.save('level_start_index.npy', level_start_index.numpy())
# #np.save('shift.npy', shift.numpy())
bev_embed = self.encoder(
bev_queries,
feat_flatten,
feat_flatten,
bev_h=bev_h,
bev_w=bev_w,
bev_pos=bev_pos,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
prev_bev=prev_bev,
shift=shift,
**kwargs)
# #np.save('bev_embed.npy', bev_embed.numpy())
# exit()
return bev_embed
def forward(self,
mlvl_feats,
bev_queries,
object_query_embed,
bev_h,
bev_w,
grid_length=[0.512, 0.512],
bev_pos=None,
reg_branches=None,
cls_branches=None,
prev_bev=None,
**kwargs):
"""Forward function for `Detr3DTransformer`.
Args:
mlvl_feats (list(Tensor)): Input queries from
different level. Each element has shape
[bs, num_cams, embed_dims, h, w].
bev_queries (Tensor): (bev_h*bev_w, c)
bev_pos (Tensor): (bs, embed_dims, bev_h, bev_w)
object_query_embed (Tensor): The query embedding for decoder,
with shape [num_query, c].
reg_branches (obj:`nn.ModuleList`): Regression heads for
feature maps from each decoder layer. Only would
be passed when `with_box_refine` is True. Default to None.
Returns:
tuple[Tensor]: results of decoder containing the following tensor.
- bev_embed: BEV features
- inter_states: Outputs from decoder. If
return_intermediate_dec is True output has shape \
(num_dec_layers, bs, num_query, embed_dims), else has \
shape (1, bs, num_query, embed_dims).
- init_reference_out: The initial value of reference \
points, has shape (bs, num_queries, 4).
- inter_references_out: The internal value of reference \
points in decoder, has shape \
(num_dec_layers, bs,num_query, embed_dims)
- enc_outputs_class: The classification score of \
proposals generated from \
encoder's feature maps, has shape \
(batch, h*w, num_classes). \
Only would be returned when `as_two_stage` is True, \
otherwise None.
- enc_outputs_coord_unact: The regression results \
generated from encoder's feature maps., has shape \
(batch, h*w, 4). Only would \
be returned when `as_two_stage` is True, \
otherwise None.
"""
bev_embed = self.get_bev_features(
mlvl_feats,
bev_queries,
bev_h,
bev_w,
grid_length=grid_length,
bev_pos=bev_pos,
prev_bev=prev_bev,
**kwargs) # bev_embed shape: bs, bev_h*bev_w, embed_dims
bs = mlvl_feats[0].shape[0]
query_pos, query = paddle.split(
object_query_embed, [self.embed_dims, self.embed_dims], axis=1)
query_pos = query_pos.unsqueeze(0).expand([bs, -1, -1])
query = query.unsqueeze(0).expand([bs, -1, -1])
reference_points = self.reference_points(query_pos)
reference_points = F.sigmoid(reference_points)
init_reference_out = reference_points
query = query.transpose([1, 0, 2])
query_pos = query_pos.transpose([1, 0, 2])
bev_embed = bev_embed.transpose([1, 0, 2])
inter_states, inter_references = self.decoder(
query=query,
key=None,
value=bev_embed,
query_pos=query_pos,
reference_points=reference_points,
reg_branches=reg_branches,
cls_branches=cls_branches,
spatial_shapes=paddle.to_tensor([[bev_h, bev_w]],
dtype=paddle.int64),
level_start_index=paddle.to_tensor([0], dtype=paddle.int64),
**kwargs)
inter_references_out = inter_references
return bev_embed, inter_states, init_reference_out, inter_references_out
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers/positional_encoding.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Modified from https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/utils/positional_encoding.py
# Copyright (c) OpenMMLab. All rights reserved.
# ------------------------------------------------------------------------
import paddle
import paddle.nn as nn
from paddle import ParamAttr
from paddle.nn.initializer import Uniform
from paddle3d.apis import manager
@manager.POSITIONAL_ENCODING.add_component
class LearnedPositionalEncoding(nn.Layer):
"""Position embedding with learnable embedding weights.
Args:
num_feats (int): The feature dimension for each position
along x-axis or y-axis. The final returned dimension for
each position is 2 times of this value.
row_num_embed (int, optional): The dictionary size of row embeddings.
Default 50.
col_num_embed (int, optional): The dictionary size of col embeddings.
Default 50.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self, num_feats, row_num_embed=50, col_num_embed=50):
super(LearnedPositionalEncoding, self).__init__()
self.row_embed = nn.Embedding(
row_num_embed,
num_feats,
weight_attr=ParamAttr(initializer=Uniform(0, 1)))
self.col_embed = nn.Embedding(
col_num_embed,
num_feats,
weight_attr=ParamAttr(initializer=Uniform(0, 1)))
self.num_feats = num_feats
self.row_num_embed = row_num_embed
self.col_num_embed = col_num_embed
def forward(self, mask):
"""Forward function for `LearnedPositionalEncoding`.
Args:
mask (Tensor): ByteTensor mask. Non-zero values representing
ignored positions, while zero values means valid positions
for this image. Shape [bs, h, w].
Returns:
pos (Tensor): Returned position embedding with shape
[bs, num_feats*2, h, w].
"""
h, w = mask.shape[-2:]
x = paddle.arange(w)
y = paddle.arange(h)
x_embed = self.col_embed(x)
y_embed = self.row_embed(y)
pos = paddle.concat((x_embed.unsqueeze(0).tile([h, 1, 1]),
y_embed.unsqueeze(1).tile([1, w, 1])),
axis=-1).transpose([2, 0, 1]).unsqueeze(0).tile(
[mask.shape[0], 1, 1, 1])
return pos
def __repr__(self):
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(num_feats={self.num_feats}, '
repr_str += f'row_num_embed={self.row_num_embed}, '
repr_str += f'col_num_embed={self.col_num_embed})'
return repr_str
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers/match_costs/match_cost.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Modified from https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/match_costs/match_cost.py
# Copyright (c) OpenMMLab. All rights reserved.
# ------------------------------------------------------------------------
import paddle
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.utils.box import bbox_overlaps
from paddle3d.utils.transform import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh
@manager.MATCH_COSTS.add_component
class BBoxL1Cost:
"""BBoxL1Cost.
Args:
weight (int | float, optional): loss_weight
box_format (str, optional): 'xyxy' for DETR, 'xywh' for Sparse_RCNN
"""
def __init__(self, weight=1., box_format='xyxy'):
self.weight = weight
assert box_format in ['xyxy', 'xywh']
self.box_format = box_format
def __call__(self, bbox_pred, gt_bboxes):
"""
Args:
bbox_pred (Tensor): Predicted boxes with normalized coordinates
(cx, cy, w, h), which are all in range [0, 1]. Shape
[num_query, 4].
gt_bboxes (Tensor): Ground truth boxes with normalized
coordinates (x1, y1, x2, y2). Shape [num_gt, 4].
Returns:
paddle.Tensor: bbox_cost value with weight
"""
if self.box_format == 'xywh':
gt_bboxes = bbox_xyxy_to_cxcywh(gt_bboxes)
elif self.box_format == 'xyxy':
bbox_pred = bbox_cxcywh_to_xyxy(bbox_pred)
bbox_cost = paddle.dist(bbox_pred, gt_bboxes, p=1)
return bbox_cost * self.weight
@manager.MATCH_COSTS.add_component
class FocalLossCost:
"""FocalLossCost.
Args:
weight (int | float, optional): loss_weight
alpha (int | float, optional): focal_loss alpha
gamma (int | float, optional): focal_loss gamma
eps (float, optional): default 1e-12
"""
def __init__(self, weight=1., alpha=0.25, gamma=2, eps=1e-12):
self.weight = weight
self.alpha = alpha
self.gamma = gamma
self.eps = eps
def __call__(self, cls_pred, gt_labels):
"""
Args:
cls_pred (Tensor): Predicted classification logits, shape
[num_query, num_class].
gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).
Returns:
paddle.Tensor: cls_cost value with weight
"""
cls_pred = F.sigmoid(cls_pred)
neg_cost = -(1 - cls_pred + self.eps).log() * (
1 - self.alpha) * cls_pred.pow(self.gamma)
pos_cost = -(cls_pred + self.eps).log() * self.alpha * (
1 - cls_pred).pow(self.gamma)
cls_cost = paddle.gather(pos_cost, gt_labels, 1) - paddle.gather(
neg_cost, gt_labels, 1)
return cls_cost * self.weight
@manager.MATCH_COSTS.add_component
class ClassificationCost:
"""ClsSoftmaxCost.
Args:
weight (int | float, optional): loss_weight
"""
def __init__(self, weight=1.):
self.weight = weight
def __call__(self, cls_pred, gt_labels):
"""
Args:
cls_pred (Tensor): Predicted classification logits, shape
[num_query, num_class].
gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).
Returns:
paddle.Tensor: cls_cost value with weight
"""
# Following the official DETR repo, contrary to the loss that
# NLL is used, we approximate it in 1 - cls_score[gt_label].
# The 1 is a constant that doesn't change the matching,
# so it can be omitted.
cls_score = F.softmax(cls_pred, -1)
cls_cost = -cls_score[:, gt_labels]
return cls_cost * self.weight
@manager.MATCH_COSTS.add_component
class IoUCost:
"""IoUCost.
Args:
iou_mode (str, optional): iou mode such as 'iou' | 'giou'
weight (int | float, optional): loss weight
"""
def __init__(self, iou_mode='giou', weight=1.):
self.weight = weight
self.iou_mode = iou_mode
def __call__(self, bboxes, gt_bboxes):
"""
Args:
bboxes (Tensor): Predicted boxes with unnormalized coordinates
(x1, y1, x2, y2). Shape [num_query, 4].
gt_bboxes (Tensor): Ground truth boxes with unnormalized
coordinates (x1, y1, x2, y2). Shape [num_gt, 4].
Returns:
paddle.Tensor: iou_cost value with weight
"""
# overlaps: [num_bboxes, num_gt]
overlaps = bbox_overlaps(
bboxes, gt_bboxes, mode=self.iou_mode, is_aligned=False)
# The 1 is a constant that doesn't change the matching, so omitted.
iou_cost = -overlaps
return iou_cost * self.weight
def pairwise_dist(A, B):
A = A.unsqueeze(-2)
B = B.unsqueeze(-3)
return paddle.abs(A - B).sum(-1)
@manager.MATCH_COSTS.add_component
class BBox3DL1Cost:
"""BBox3DL1Cost.
Args:
weight (int | float, optional): loss_weight
"""
def __init__(self, weight=1.):
self.weight = weight
def __call__(self, bbox_pred, gt_bboxes):
"""
Args:
bbox_pred (Tensor): Predicted boxes with normalized coordinates
(cx, cy, w, h), which are all in range [0, 1]. Shape
[num_query, 4].
gt_bboxes (Tensor): Ground truth boxes with normalized
coordinates (x1, y1, x2, y2). Shape [num_gt, 4].
Returns:
torch.Tensor: bbox_cost value with weight
"""
bbox_cost = pairwise_dist(bbox_pred, gt_bboxes)
return bbox_cost * self.weight
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers/match_costs/__init__.py
|
from .match_cost import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers/attentions/multihead_attention.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# This code is based on https://github.com/open-mmlab/mmcv/blob/master/mmcv/cnn/bricks/transformer.py#L407
# Copyright (c) OpenMMLab. All rights reserved.
# ------------------------------------------------------------------------
import copy
import paddle
import paddle.nn as nn
from paddle3d.apis import manager
from paddle3d.models.layers.param_init import (constant_init,
xavier_uniform_init)
from paddle3d.utils.logger import logger
@manager.ATTENTIONS.add_component
class MultiheadAttention(nn.Layer):
"""A wrapper for ``paddle.nn.MultiheadAttention``.
This module implements MultiheadAttention with identity connection,
and positional encoding is also passed as input.
Args:
embed_dims (int): The embedding dimension.
num_heads (int): Parallel attention heads.
attn_drop (float): A Dropout layer on attn_output_weights.
Default: 0.0.
proj_drop (float): A Dropout layer after `nn.MultiheadAttention`.
Default: 0.0.
dropout_layer (obj:`ConfigDict`): The dropout_layer used
when adding the shortcut.
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
Default: None.
batch_first (bool): When it is True, Key, Query and Value are shape of
(batch, n, embed_dim), otherwise (n, batch, embed_dim).
Default to False.
"""
def __init__(self,
embed_dims,
num_heads,
attn_drop=0.,
proj_drop=0.,
dropout_layer=dict(type_name='Dropout', p=0.),
batch_first=False,
**kwargs):
super(MultiheadAttention, self).__init__()
if 'dropout' in kwargs:
logger.warning('The arguments `dropout` in MultiheadAttention '
'has been deprecated, now you can separately '
'set `attn_drop`(float), proj_drop(float), '
'and `dropout_layer`(dict) ')
attn_drop = kwargs['dropout']
dropout_layer['p'] = kwargs.pop('dropout')
self.embed_dims = embed_dims
self.num_heads = num_heads
self.batch_first = batch_first
self.attn = nn.MultiHeadAttention(embed_dims, num_heads, attn_drop,
**kwargs)
self.proj_drop = nn.Dropout(proj_drop)
if dropout_layer:
dropout_layer_ = copy.deepcopy(dropout_layer)
dropout_layer_name = dropout_layer_.pop("type_name")
self.dropout_layer = getattr(nn,
dropout_layer_name)(**dropout_layer_)
else:
self.dropout_layer = nn.Identity()
self.init_weights()
@paddle.no_grad()
def init_weights(self):
for layer in self.attn.sublayers():
if isinstance(layer, nn.Linear):
xavier_uniform_init(layer.weight, reverse=True)
constant_init(layer.bias, value=0)
def forward(self,
query,
key=None,
value=None,
identity=None,
query_pos=None,
key_pos=None,
attn_mask=None,
key_padding_mask=None,
**kwargs):
"""Forward function for `MultiheadAttention`.
**kwargs allow passing a more general data flow when combining
with other operations in `transformerlayer`.
Args:
query (Tensor): The input query with shape [num_queries, bs,
embed_dims] if self.batch_first is False, else
[bs, num_queries embed_dims].
key (Tensor): The key tensor with shape [num_keys, bs,
embed_dims] if self.batch_first is False, else
[bs, num_keys, embed_dims] .
If None, the ``query`` will be used. Defaults to None.
value (Tensor): The value tensor with same shape as `key`.
Same in `nn.MultiheadAttention.forward`. Defaults to None.
If None, the `key` will be used.
identity (Tensor): This tensor, with the same shape as x,
will be used for the identity link.
If None, `x` will be used. Defaults to None.
query_pos (Tensor): The positional encoding for query, with
the same shape as `x`. If not None, it will
be added to `x` before forward function. Defaults to None.
key_pos (Tensor): The positional encoding for `key`, with the
same shape as `key`. Defaults to None. If not None, it will
be added to `key` before forward function. If None, and
`query_pos` has the same shape as `key`, then `query_pos`
will be used for `key_pos`. Defaults to None.
attn_mask (Tensor): ByteTensor mask with shape [num_queries,
num_keys]. Same in `nn.MultiheadAttention.forward`.
Defaults to None.
key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys].
Defaults to None.
Returns:
Tensor: forwarded results with shape
[num_queries, bs, embed_dims]
if self.batch_first is False, else
[bs, num_queries embed_dims].
"""
if key is None:
key = query
if value is None:
value = key
if identity is None:
identity = query
if key_pos is None:
if query_pos is not None:
# use query_pos if key_pos is not available
if query_pos.shape == key.shape:
key_pos = query_pos
else:
logger.warning(f'position encoding of key is'
f'missing in {self.__class__.__name__}.')
if query_pos is not None:
query = query + query_pos
if key_pos is not None:
key = key + key_pos
# Because the dataflow('key', 'query', 'value') of
# ``paddle.nn.MultiheadAttention`` is (batch, num_query, embed_dims)
# We should adjust the shape of dataflow from
# num_query_first (num_query ,batch, embed_dims) to batch_first
# (batch, num_query, embed_dims), and recover ``attn_output``
# from num_query_first to batch_first.
if not self.batch_first:
query = query.transpose([1, 0, 2])
key = key.transpose([1, 0, 2])
value = value.transpose([1, 0, 2])
if key_padding_mask is None:
out = self.attn(
query=query, key=key, value=value, attn_mask=attn_mask)
else:
if attn_mask is None:
attn_mask = ~key_padding_mask
out = self.attn(
query=query, key=key, value=value, attn_mask=attn_mask)
else:
raise ValueError('key_padding_mask is not None')
if not self.batch_first:
out = out.transpose([1, 0, 2])
return identity + self.dropout_layer(self.proj_drop(out))
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers/attentions/__init__.py
|
from . import (multihead_attention, spatial_cross_attention,
temporal_self_attention)
from .multihead_attention import *
from .spatial_cross_attention import *
from .temporal_self_attention import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers/attentions/temporal_self_attention.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Modified from BEVFormer (https://github.com/fundamentalvision/BEVFormer/blob/master/projects/mmdet3d_plugin/bevformer/modules/temporal_self_attention.py)
# Copyright (c) OpenMMLab. All rights reserved.
# ------------------------------------------------------------------------
import math
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.models.layers.param_init import (constant_init,
xavier_uniform_init)
from paddle3d.models.transformers.utils import masked_fill
from paddle3d.ops import ms_deform_attn
from paddle3d.utils import logger
@manager.ATTENTIONS.add_component
class TemporalSelfAttention(nn.Layer):
"""An attention module used in BEVFormer based on Deformable-Detr.
`Deformable DETR: Deformable Transformers for End-to-End Object Detection.
<https://arxiv.org/pdf/2010.04159.pdf>`_.
Args:
embed_dims (int): The embedding dimension of Attention.
Default: 256.
num_heads (int): Parallel attention heads. Default: 64.
num_levels (int): The number of feature map used in
Attention. Default: 4.
num_points (int): The number of sampling points for
each query in each head. Default: 4.
im2col_step (int): The step used in image_to_column.
Default: 64.
dropout (float): A Dropout layer on `inp_identity`.
Default: 0.1.
batch_first (bool): Key, Query and Value are shape of
(batch, n, embed_dim)
or (n, batch, embed_dim). Default to True.
norm_cfg (dict): Config dict for normalization layer.
Default: None.
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
Default: None.
num_bev_queue (int): In this version, we only use one history BEV and one currenct BEV.
the length of BEV queue is 2.
"""
def __init__(self,
embed_dims=256,
num_heads=8,
num_levels=4,
num_points=4,
num_bev_queue=2,
im2col_step=64,
dropout=0.1,
batch_first=True,
norm_cfg=None):
super(TemporalSelfAttention, self).__init__()
if embed_dims % num_heads != 0:
raise ValueError(f'embed_dims must be divisible by num_heads, '
f'but got {embed_dims} and {num_heads}')
dim_per_head = embed_dims // num_heads
self.norm_cfg = norm_cfg
self.dropout = nn.Dropout(dropout)
self.batch_first = batch_first
# you'd better set dim_per_head to a power of 2
# which is more efficient in the CUDA implementation
def _is_power_of_2(n):
if (not isinstance(n, int)) or (n < 0):
raise ValueError(
'invalid input for _is_power_of_2: {} (type: {})'.format(
n, type(n)))
return (n & (n - 1) == 0) and n != 0
if not _is_power_of_2(dim_per_head):
logger.warning(
"You'd better set embed_dims in "
'MultiScaleDeformAttention to make '
'the dimension of each attention head a power of 2 '
'which is more efficient in our CUDA implementation.')
self.im2col_step = im2col_step
self.embed_dims = embed_dims
self.num_levels = num_levels
self.num_heads = num_heads
self.num_points = num_points
self.num_bev_queue = num_bev_queue
self.sampling_offsets = nn.Linear(
embed_dims * self.num_bev_queue,
num_bev_queue * num_heads * num_levels * num_points * 2)
self.attention_weights = nn.Linear(
embed_dims * self.num_bev_queue,
num_bev_queue * num_heads * num_levels * num_points)
self.value_proj = nn.Linear(embed_dims, embed_dims)
self.output_proj = nn.Linear(embed_dims, embed_dims)
self.init_weights()
@paddle.no_grad()
def init_weights(self):
"""Default initialization for Parameters of Module."""
constant_init(self.sampling_offsets.weight, value=0.)
constant_init(self.sampling_offsets.bias, value=0.)
thetas = paddle.arange(
self.num_heads,
dtype=paddle.float32) * (2.0 * math.pi / self.num_heads)
grid_init = paddle.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)).reshape(
[self.num_heads, 1, 1, 2]).tile(
[1, self.num_levels * self.num_bev_queue, self.num_points, 1])
for i in range(self.num_points):
grid_init[:, :, i, :] *= i + 1
self.sampling_offsets.bias.set_value(grid_init.reshape([-1]))
constant_init(self.attention_weights.weight, value=0)
constant_init(self.attention_weights.bias, value=0)
xavier_uniform_init(self.value_proj.weight, reverse=True)
constant_init(self.value_proj.bias, value=0)
xavier_uniform_init(self.output_proj.weight, reverse=True)
constant_init(self.output_proj.bias, value=0)
def forward(self,
query,
key=None,
value=None,
identity=None,
query_pos=None,
key_padding_mask=None,
reference_points=None,
spatial_shapes=None,
level_start_index=None,
flag='decoder',
**kwargs):
"""Forward Function of MultiScaleDeformAttention.
Args:
query (Tensor): Query of Transformer with shape
(num_query, bs, embed_dims).
key (Tensor): The key tensor with shape
`(num_key, bs, embed_dims)`.
value (Tensor): The value tensor with shape
`(num_key, bs, embed_dims)`.
identity (Tensor): The tensor used for addition, with the
same shape as `query`. Default None. If None,
`query` will be used.
query_pos (Tensor): The positional encoding for `query`.
Default: None.
key_pos (Tensor): The positional encoding for `key`. Default
None.
reference_points (Tensor): The normalized reference
points with shape (bs, num_query, num_levels, 2),
all elements is range in [0, 1], top-left (0,0),
bottom-right (1, 1), including padding area.
or (N, Length_{query}, num_levels, 4), add
additional two dimensions is (w, h) to
form reference boxes.
key_padding_mask (Tensor): ByteTensor for `query`, with
shape [bs, num_key].
spatial_shapes (Tensor): Spatial shape of features in
different levels. With shape (num_levels, 2),
last dimension represents (h, w).
level_start_index (Tensor): The start index of each level.
A tensor has shape ``(num_levels, )`` and can be represented
as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].
Returns:
Tensor: forwarded results with shape [num_query, bs, embed_dims].
"""
if value is None:
assert self.batch_first
bs, len_bev, c = query.shape
value = paddle.stack([query, query],
1).reshape([bs * 2, len_bev, c])
if identity is None:
identity = query
if query_pos is not None:
query = query + query_pos
if not self.batch_first:
# change to (bs, num_query, embed_dims)
query = query.transpose([1, 0, 2])
value = value.transpose([1, 0, 2])
bs, num_query, embed_dims = query.shape
_, num_value, _ = value.shape
assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value
assert self.num_bev_queue == 2
query = paddle.concat([value[:bs], query], -1)
value = self.value_proj(value)
if key_padding_mask is not None:
value = masked_fill(value, key_padding_mask[..., None], 0.0)
value = value.reshape(
[bs * self.num_bev_queue, num_value, self.num_heads, -1])
sampling_offsets = self.sampling_offsets(query)
sampling_offsets = sampling_offsets.reshape([
bs, num_query, self.num_heads, self.num_bev_queue, self.num_levels,
self.num_points, 2
])
attention_weights = self.attention_weights(query).reshape([
bs, num_query, self.num_heads, self.num_bev_queue,
self.num_levels * self.num_points
])
attention_weights = F.softmax(attention_weights, -1)
attention_weights = attention_weights.reshape([
bs, num_query, self.num_heads, self.num_bev_queue, self.num_levels,
self.num_points
])
attention_weights = attention_weights.transpose([0, 3, 1, 2, 4, 5])\
.reshape([bs*self.num_bev_queue, num_query, self.num_heads, self.num_levels, self.num_points])
sampling_offsets = sampling_offsets.transpose([0, 3, 1, 2, 4, 5, 6])\
.reshape([bs*self.num_bev_queue, num_query, self.num_heads, self.num_levels, self.num_points, 2])
if reference_points.shape[-1] == 2:
offset_normalizer = paddle.stack(
[spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
sampling_locations = reference_points[:, :, None, :, None, :] \
+ sampling_offsets \
/ offset_normalizer[None, None, None, :, None, :]
elif reference_points.shape[-1] == 4:
sampling_locations = reference_points[:, :, None, :, None, :2] \
+ sampling_offsets / self.num_points \
* reference_points[:, :, None, :, None, 2:] \
* 0.5
else:
raise ValueError(
f'Last dim of reference_points must be'
f' 2 or 4, but get {reference_points.shape[-1]} instead.')
# sampling_locations.stop_gradient = True
# attention_weights.stop_gradient = True
value = value.cast(paddle.float32)
sampling_locations = sampling_locations.cast(paddle.float32)
output = ms_deform_attn.ms_deform_attn(
value, sampling_locations, attention_weights, spatial_shapes,
level_start_index, self.im2col_step)
# output shape (bs*num_bev_queue, num_query, embed_dims)
# (bs*num_bev_queue, num_query, embed_dims)-> (num_query, embed_dims, bs*num_bev_queue)
output = output.transpose([1, 2, 0])
# fuse history value and current value
# (num_query, embed_dims, bs*num_bev_queue)-> (num_query, embed_dims, bs, num_bev_queue)
output = output.reshape([num_query, embed_dims, bs, self.num_bev_queue])
output = output.mean(-1)
# (num_query, embed_dims, bs)-> (bs, num_query, embed_dims)
output = output.transpose([2, 0, 1])
output = self.output_proj(output)
if not self.batch_first:
output = output.transpose([1, 0, 2])
return self.dropout(output) + identity
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers/attentions/spatial_cross_attention.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Modified from BEVFormer (https://github.com/fundamentalvision/BEVFormer/blob/master/projects/mmdet3d_plugin/bevformer/modules/spatial_cross_attention.py)
# Copyright (c) OpenMMLab. All rights reserved.
# ------------------------------------------------------------------------
import copy
import math
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.models.layers.param_init import (constant_init,
xavier_uniform_init)
from paddle3d.models.transformers.utils import masked_fill
from paddle3d.ops import ms_deform_attn
from paddle3d.utils.logger import logger
@manager.ATTENTIONS.add_component
class SpatialCrossAttention(nn.Layer):
"""An attention module used in BEVFormer.
Args:
embed_dims (int): The embedding dimension of Attention.
Default: 256.
num_cams (int): The number of cameras
dropout (float): A Dropout layer on `inp_residual`.
Default: 0..
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
Default: None.
deformable_attention: (dict): The config for the deformable attention used in SCA.
"""
def __init__(self,
embed_dims=256,
num_cams=6,
pc_range=None,
dropout=0.1,
batch_first=False,
deformable_attention=dict(
type_name='MSDeformableAttention3D',
embed_dims=256,
num_levels=4),
**kwargs):
super(SpatialCrossAttention, self).__init__()
self.dropout = nn.Dropout(dropout)
self.pc_range = pc_range
self.fp16_enabled = False
deformable_attention_ = copy.deepcopy(deformable_attention)
layer_name = deformable_attention_.pop('type_name')
attention_layer = manager.ATTENTIONS.components_dict[layer_name]
self.deformable_attention = attention_layer(**deformable_attention_)
self.embed_dims = embed_dims
self.num_cams = num_cams
self.output_proj = nn.Linear(embed_dims, embed_dims)
self.batch_first = batch_first
self.init_weight()
@paddle.no_grad()
def init_weight(self):
"""Default initialization for Parameters of Module."""
xavier_uniform_init(self.output_proj.weight, reverse=True)
constant_init(self.output_proj.bias, value=0)
def forward(self,
query,
key,
value,
residual=None,
query_pos=None,
key_padding_mask=None,
reference_points=None,
spatial_shapes=None,
reference_points_cam=None,
bev_mask=None,
level_start_index=None,
flag='encoder',
**kwargs):
"""Forward Function of Detr3DCrossAtten.
Args:
query (Tensor): Query of Transformer with shape
(num_query, bs, embed_dims).
key (Tensor): The key tensor with shape
`(num_key, bs, embed_dims)`.
value (Tensor): The value tensor with shape
`(num_key, bs, embed_dims)`. (B, N, C, H, W)
residual (Tensor): The tensor used for addition, with the
same shape as `x`. Default None. If None, `x` will be used.
query_pos (Tensor): The positional encoding for `query`.
Default: None.
key_pos (Tensor): The positional encoding for `key`. Default
None.
reference_points (Tensor): The normalized reference
points with shape (bs, num_query, 4),
all elements is range in [0, 1], top-left (0,0),
bottom-right (1, 1), including padding area.
or (N, Length_{query}, num_levels, 4), add
additional two dimensions is (w, h) to
form reference boxes.
key_padding_mask (Tensor): ByteTensor for `query`, with
shape [bs, num_key].
spatial_shapes (Tensor): Spatial shape of features in
different level. With shape (num_levels, 2),
last dimension represent (h, w).
level_start_index (Tensor): The start index of each level.
A tensor has shape (num_levels) and can be represented
as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].
Returns:
Tensor: forwarded results with shape [num_query, bs, embed_dims].
"""
query = query.cast(paddle.float32)
key = key.cast(paddle.float32)
value = value.cast(paddle.float32)
if query_pos is not None:
query_pos = query_pos.cast(paddle.float32)
if reference_points_cam is not None:
reference_points_cam = reference_points_cam.cast(paddle.float32)
if key is None:
key = query
if value is None:
value = key
if residual is None:
inp_residual = query
slots = paddle.zeros_like(query)
if query_pos is not None:
query = query + query_pos
bs, num_query, _ = query.shape
D = reference_points_cam.shape[3]
indexes = []
for i, mask_per_img in enumerate(bev_mask):
index_query_per_img = mask_per_img[0].sum(-1).nonzero().squeeze(-1)
indexes.append(index_query_per_img)
max_len = paddle.max(bev_mask.any(-1).sum(-1))
#max_len = 2500
#max_len = max([len(each) for each in indexes])
# each camera only interacts with its corresponding BEV queries. This step can greatly save GPU memory.
queries_rebatch = paddle.zeros(
[bs, self.num_cams, max_len, self.embed_dims], dtype=query.dtype)
reference_points_rebatch = paddle.zeros(
[bs, self.num_cams, max_len, D, 2],
dtype=reference_points_cam.dtype)
for j in range(bs):
for i, reference_points_per_img in enumerate(reference_points_cam):
index_query_per_img = indexes[i]
#queries_rebatch[j, i, :len(index_query_per_img)] = query[j, index_query_per_img]
queries_rebatch[j, i, :len(index_query_per_img
)] = paddle.gather(
query[j], index_query_per_img)
#reference_points_rebatch[j, i, :len(index_query_per_img)] = reference_points_per_img[j, index_query_per_img]
reference_points_rebatch[j, i, :len(
index_query_per_img)] = paddle.gather(
reference_points_per_img[j], index_query_per_img)
num_cams, l, bs, embed_dims = key.shape
key = key.transpose([2, 0, 1, 3]).reshape(
[bs * self.num_cams, l, self.embed_dims])
value = value.transpose([2, 0, 1, 3]).reshape(
[bs * self.num_cams, l, self.embed_dims])
queries = self.deformable_attention(
query=queries_rebatch.reshape(
[bs * self.num_cams, max_len, self.embed_dims]),
key=key,
value=value,
reference_points=reference_points_rebatch.reshape(
[bs * self.num_cams, max_len, D, 2]),
spatial_shapes=spatial_shapes,
level_start_index=level_start_index).reshape(
[bs, self.num_cams, max_len, self.embed_dims])
for j in range(bs):
for i, index_query_per_img in enumerate(indexes):
#slots[j, index_query_per_img] += queries[j, i, :len(index_query_per_img)]
bs_idx = paddle.full_like(
index_query_per_img, j, dtype=index_query_per_img.dtype)
scatter_index = paddle.stack(
[bs_idx, index_query_per_img]).transpose([1, 0])
slots = paddle.scatter_nd_add(
slots, scatter_index,
queries[j, i, :len(index_query_per_img)])
count = bev_mask.sum(-1) > 0
count = count.transpose([1, 2, 0]).sum(-1)
count = paddle.clip(count, min=1.0)
slots = slots / count[..., None]
slots = self.output_proj(slots)
return self.dropout(slots) + inp_residual
@manager.ATTENTIONS.add_component
class MSDeformableAttention3D(nn.Layer):
"""An attention module used in BEVFormer based on Deformable-Detr.
`Deformable DETR: Deformable Transformers for End-to-End Object Detection.
<https://arxiv.org/pdf/2010.04159.pdf>`_.
Args:
embed_dims (int): The embedding dimension of Attention.
Default: 256.
num_heads (int): Parallel attention heads. Default: 64.
num_levels (int): The number of feature map used in
Attention. Default: 4.
num_points (int): The number of sampling points for
each query in each head. Default: 4.
im2col_step (int): The step used in image_to_column.
Default: 64.
dropout (float): A Dropout layer on `inp_identity`.
Default: 0.1.
batch_first (bool): Key, Query and Value are shape of
(batch, n, embed_dim)
or (n, batch, embed_dim). Default to False.
norm_cfg (dict): Config dict for normalization layer.
Default: None.
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
Default: None.
"""
def __init__(self,
embed_dims=256,
num_heads=8,
num_levels=4,
num_points=8,
im2col_step=64,
dropout=0.1,
batch_first=True):
super(MSDeformableAttention3D, self).__init__()
if embed_dims % num_heads != 0:
raise ValueError(f'embed_dims must be divisible by num_heads, '
f'but got {embed_dims} and {num_heads}')
dim_per_head = embed_dims // num_heads
self.batch_first = batch_first
self.output_proj = None
self.fp16_enabled = False
# you'd better set dim_per_head to a power of 2
# which is more efficient in the CUDA implementation
def _is_power_of_2(n):
if (not isinstance(n, int)) or (n < 0):
raise ValueError(
'invalid input for _is_power_of_2: {} (type: {})'.format(
n, type(n)))
return (n & (n - 1) == 0) and n != 0
if not _is_power_of_2(dim_per_head):
logger.warning(
"You'd better set embed_dims in "
'MultiScaleDeformAttention to make '
'the dimension of each attention head a power of 2 '
'which is more efficient in our CUDA implementation.')
self.im2col_step = im2col_step
self.embed_dims = embed_dims
self.num_levels = num_levels
self.num_heads = num_heads
self.num_points = num_points
self.sampling_offsets = nn.Linear(
embed_dims, num_heads * num_levels * num_points * 2)
self.attention_weights = nn.Linear(embed_dims,
num_heads * num_levels * num_points)
self.value_proj = nn.Linear(embed_dims, embed_dims)
self.init_weights()
@paddle.no_grad()
def init_weights(self):
"""Default initialization for Parameters of Module."""
constant_init(self.sampling_offsets.weight, value=0.)
constant_init(self.sampling_offsets.bias, value=0.)
thetas = paddle.arange(
self.num_heads,
dtype=paddle.float32) * (2.0 * math.pi / self.num_heads)
grid_init = paddle.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)).reshape(
[self.num_heads, 1, 1,
2]).tile([1, self.num_levels, self.num_points, 1])
for i in range(self.num_points):
grid_init[:, :, i, :] *= i + 1
self.sampling_offsets.bias.set_value(grid_init.reshape([-1]))
constant_init(self.attention_weights.weight, value=0)
constant_init(self.attention_weights.bias, value=0)
xavier_uniform_init(self.value_proj.weight, reverse=True)
constant_init(self.value_proj.bias, value=0)
def forward(self,
query,
key=None,
value=None,
identity=None,
query_pos=None,
key_padding_mask=None,
reference_points=None,
spatial_shapes=None,
level_start_index=None,
**kwargs):
"""Forward Function of MultiScaleDeformAttention.
Args:
query (Tensor): Query of Transformer with shape
( bs, num_query, embed_dims).
key (Tensor): The key tensor with shape
`(bs, num_key, embed_dims)`.
value (Tensor): The value tensor with shape
`(bs, num_key, embed_dims)`.
identity (Tensor): The tensor used for addition, with the
same shape as `query`. Default None. If None,
`query` will be used.
query_pos (Tensor): The positional encoding for `query`.
Default: None.
key_pos (Tensor): The positional encoding for `key`. Default
None.
reference_points (Tensor): The normalized reference
points with shape (bs, num_query, num_levels, 2),
all elements is range in [0, 1], top-left (0,0),
bottom-right (1, 1), including padding area.
or (N, Length_{query}, num_levels, 4), add
additional two dimensions is (w, h) to
form reference boxes.
key_padding_mask (Tensor): ByteTensor for `query`, with
shape [bs, num_key].
spatial_shapes (Tensor): Spatial shape of features in
different levels. With shape (num_levels, 2),
last dimension represents (h, w).
level_start_index (Tensor): The start index of each level.
A tensor has shape ``(num_levels, )`` and can be represented
as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].
Returns:
Tensor: forwarded results with shape [num_query, bs, embed_dims].
"""
if value is None:
value = query
if identity is None:
identity = query
if query_pos is not None:
query = query + query_pos
if not self.batch_first:
# change to (bs, num_query ,embed_dims)
query = query.transpose([1, 0, 2])
value = value.transpose([1, 0, 2])
bs, num_query, _ = query.shape
bs, num_value, _ = value.shape
assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value
value = self.value_proj(value)
if key_padding_mask is not None:
value = masked_fill(value, key_padding_mask[..., None], 0.0)
value = value.reshape([bs, num_value, self.num_heads, -1])
sampling_offsets = self.sampling_offsets(query).reshape([
bs, num_query, self.num_heads, self.num_levels, self.num_points, 2
])
attention_weights = self.attention_weights(query).reshape(
[bs, num_query, self.num_heads, self.num_levels * self.num_points])
attention_weights = F.softmax(attention_weights, -1)
attention_weights = attention_weights.reshape(
[bs, num_query, self.num_heads, self.num_levels, self.num_points])
if reference_points.shape[-1] == 2:
"""
For each BEV query, it owns `num_Z_anchors` in 3D space that having different heights.
After proejcting, each BEV query has `num_Z_anchors` reference points in each 2D image.
For each referent point, we sample `num_points` sampling points.
For `num_Z_anchors` reference points, it has overall `num_points * num_Z_anchors` sampling points.
"""
offset_normalizer = paddle.stack(
[spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
bs, num_query, num_Z_anchors, xy = reference_points.shape
reference_points = reference_points.reshape(
[bs, num_query, 1, 1, 1, num_Z_anchors, xy])
sampling_offsets = sampling_offsets / \
offset_normalizer.reshape([1, 1, 1, offset_normalizer.shape[0], 1, offset_normalizer.shape[1]])
bs, num_query, num_heads, num_levels, num_all_points, xy = sampling_offsets.shape
sampling_offsets = sampling_offsets.reshape([
bs, num_query, num_heads, num_levels,
num_all_points // num_Z_anchors, num_Z_anchors, xy
])
sampling_locations = reference_points + sampling_offsets
bs, num_query, num_heads, num_levels, num_points, num_Z_anchors, xy = sampling_locations.shape
assert num_all_points == num_points * num_Z_anchors
sampling_locations = sampling_locations.reshape(
[bs, num_query, num_heads, num_levels, num_all_points, xy])
elif reference_points.shape[-1] == 4:
assert False
else:
raise ValueError(
f'Last dim of reference_points must be'
f' 2 or 4, but get {reference_points.shape[-1]} instead.')
# sampling_locations.stop_gradient = True
# attention_weights.stop_gradient = True
output = ms_deform_attn.ms_deform_attn(
value, sampling_locations, attention_weights, spatial_shapes,
level_start_index, self.im2col_step)
if not self.batch_first:
output = output.transpose([1, 0, 2])
return output
@manager.ATTENTIONS.add_component
class CustomMSDeformableAttention(nn.Layer):
"""An attention module used in Deformable-Detr.
`Deformable DETR: Deformable Transformers for End-to-End Object Detection.
<https://arxiv.org/pdf/2010.04159.pdf>`_.
Args:
embed_dims (int): The embedding dimension of Attention.
Default: 256.
num_heads (int): Parallel attention heads. Default: 64.
num_levels (int): The number of feature map used in
Attention. Default: 4.
num_points (int): The number of sampling points for
each query in each head. Default: 4.
im2col_step (int): The step used in image_to_column.
Default: 64.
dropout (float): A Dropout layer on `inp_identity`.
Default: 0.1.
batch_first (bool): Key, Query and Value are shape of
(batch, n, embed_dim)
or (n, batch, embed_dim). Default to False.
norm_cfg (dict): Config dict for normalization layer.
Default: None.
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
Default: None.
"""
def __init__(self,
embed_dims=256,
num_heads=8,
num_levels=4,
num_points=4,
im2col_step=64,
dropout=0.1,
batch_first=False,
norm_cfg=None):
super(CustomMSDeformableAttention, self).__init__()
if embed_dims % num_heads != 0:
raise ValueError(f'embed_dims must be divisible by num_heads, '
f'but got {embed_dims} and {num_heads}')
dim_per_head = embed_dims // num_heads
self.norm_cfg = norm_cfg
self.dropout = nn.Dropout(dropout)
self.batch_first = batch_first
self.fp16_enabled = False
# you'd better set dim_per_head to a power of 2
# which is more efficient in the CUDA implementation
def _is_power_of_2(n):
if (not isinstance(n, int)) or (n < 0):
raise ValueError(
'invalid input for _is_power_of_2: {} (type: {})'.format(
n, type(n)))
return (n & (n - 1) == 0) and n != 0
if not _is_power_of_2(dim_per_head):
logger.warnings(
"You'd better set embed_dims in "
'MultiScaleDeformAttention to make '
'the dimension of each attention head a power of 2 '
'which is more efficient in our CUDA implementation.')
self.im2col_step = im2col_step
self.embed_dims = embed_dims
self.num_levels = num_levels
self.num_heads = num_heads
self.num_points = num_points
self.sampling_offsets = nn.Linear(
embed_dims, num_heads * num_levels * num_points * 2)
self.attention_weights = nn.Linear(embed_dims,
num_heads * num_levels * num_points)
self.value_proj = nn.Linear(embed_dims, embed_dims)
self.output_proj = nn.Linear(embed_dims, embed_dims)
self.init_weights()
@paddle.no_grad()
def init_weights(self):
"""Default initialization for Parameters of Module."""
constant_init(self.sampling_offsets.weight, value=0.)
constant_init(self.sampling_offsets.bias, value=0.)
thetas = paddle.arange(
self.num_heads,
dtype=paddle.float32) * (2.0 * math.pi / self.num_heads)
grid_init = paddle.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)).reshape(
[self.num_heads, 1, 1,
2]).tile([1, self.num_levels, self.num_points, 1])
for i in range(self.num_points):
grid_init[:, :, i, :] *= i + 1
self.sampling_offsets.bias.set_value(grid_init.reshape([-1]))
constant_init(self.attention_weights.weight, value=0)
constant_init(self.attention_weights.bias, value=0)
xavier_uniform_init(self.value_proj.weight, reverse=True)
constant_init(self.value_proj.bias, value=0)
xavier_uniform_init(self.output_proj.weight, reverse=True)
constant_init(self.output_proj.bias, value=0)
def forward(self,
query,
key=None,
value=None,
identity=None,
query_pos=None,
key_padding_mask=None,
reference_points=None,
spatial_shapes=None,
level_start_index=None,
flag='decoder',
**kwargs):
"""Forward Function of MultiScaleDeformAttention.
Args:
query (Tensor): Query of Transformer with shape
(num_query, bs, embed_dims).
key (Tensor): The key tensor with shape
`(num_key, bs, embed_dims)`.
value (Tensor): The value tensor with shape
`(num_key, bs, embed_dims)`.
identity (Tensor): The tensor used for addition, with the
same shape as `query`. Default None. If None,
`query` will be used.
query_pos (Tensor): The positional encoding for `query`.
Default: None.
key_pos (Tensor): The positional encoding for `key`. Default
None.
reference_points (Tensor): The normalized reference
points with shape (bs, num_query, num_levels, 2),
all elements is range in [0, 1], top-left (0,0),
bottom-right (1, 1), including padding area.
or (N, Length_{query}, num_levels, 4), add
additional two dimensions is (w, h) to
form reference boxes.
key_padding_mask (Tensor): ByteTensor for `query`, with
shape [bs, num_key].
spatial_shapes (Tensor): Spatial shape of features in
different levels. With shape (num_levels, 2),
last dimension represents (h, w).
level_start_index (Tensor): The start index of each level.
A tensor has shape ``(num_levels, )`` and can be represented
as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].
Returns:
Tensor: forwarded results with shape [num_query, bs, embed_dims].
"""
if value is None:
value = query
if identity is None:
identity = query
if query_pos is not None:
query = query + query_pos
if not self.batch_first:
# change to (bs, num_query, embed_dims)
query = query.transpose([1, 0, 2])
value = value.transpose([1, 0, 2])
bs, num_query, _ = query.shape
bs, num_value, _ = value.shape
assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value
value = self.value_proj(value)
if key_padding_mask is not None:
value = masked_fill(key_padding_mask[..., None], 0.0)
value = value.reshape([bs, num_value, self.num_heads, -1])
sampling_offsets = self.sampling_offsets(query).reshape([
bs, num_query, self.num_heads, self.num_levels, self.num_points, 2
])
attention_weights = self.attention_weights(query).reshape(
[bs, num_query, self.num_heads, self.num_levels * self.num_points])
attention_weights = F.softmax(attention_weights, -1)
attention_weights = attention_weights.reshape(
[bs, num_query, self.num_heads, self.num_levels, self.num_points])
if reference_points.shape[-1] == 2:
offset_normalizer = paddle.stack(
[spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
bs, num_query, num_Z_anchors, xy = reference_points.shape
sampling_locations = reference_points.reshape([bs, num_query, 1, num_Z_anchors, 1, xy]) \
+ sampling_offsets \
/ offset_normalizer.reshape([1, 1, 1, offset_normalizer.shape[0], 1, offset_normalizer.shape[1]])
elif reference_points.shape[-1] == 4:
unsqueeze_reference_points = paddle.unsqueeze(
reference_points, axis=[2, 4])[..., :2]
sampling_locations = unsqueeze_reference_points \
+ sampling_offsets / self.num_points \
* unsqueeze_reference_points \
* 0.5
else:
raise ValueError(
f'Last dim of reference_points must be'
f' 2 or 4, but get {reference_points.shape[-1]} instead.')
value = value.cast(paddle.float32)
sampling_locations = sampling_locations.cast(paddle.float32)
output = ms_deform_attn.ms_deform_attn(
value, sampling_locations, attention_weights, spatial_shapes,
level_start_index, self.im2col_step)
output = self.output_proj(output)
if not self.batch_first:
# (num_query, bs ,embed_dims)
output = output.transpose([1, 0, 2])
return self.dropout(output) + identity
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers/assigners/hungarian_assigner_3d.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# This code is based on https://github.com/fundamentalvision/BEVFormer/blob/master/projects/mmdet3d_plugin/core/bbox/assigners/hungarian_assigner_3d.py
# Ths copyright of fundamentalvision/BEVFormer is as follows:
# Apache-2.0 license [see LICENSE for details].
# ------------------------------------------------------------------------
import numpy as np
import paddle
from scipy.optimize import linear_sum_assignment
from paddle3d.apis import manager
from paddle3d.utils.box import normalize_bbox
def ensure_rng(rng=None):
"""Coerces input into a random number generator.
If the input is None, then a global random state is returned.
If the input is a numeric value, then that is used as a seed to construct a
random state. Otherwise the input is returned as-is.
Adapted from [1]_.
Args:
rng (int | numpy.random.RandomState | None):
if None, then defaults to the global rng. Otherwise this can be an
integer or a RandomState class
Returns:
(numpy.random.RandomState) : rng -
a numpy random number generator
References:
.. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501
"""
if rng is None:
rng = np.random.mtrand._rand
elif isinstance(rng, int):
rng = np.random.RandomState(rng)
else:
rng = rng
return rng
class AssignResult:
"""Stores assignments between predicted and truth boxes.
Attributes:
num_gts (int): the number of truth boxes considered when computing this
assignment
gt_inds (LongTensor): for each predicted box indicates the 1-based
index of the assigned truth box. 0 means unassigned and -1 means
ignore.
max_overlaps (FloatTensor): the iou between the predicted box and its
assigned truth box.
labels (None | LongTensor): If specified, for each predicted box
indicates the category label of the assigned truth box.
Example:
>>> # An assign result between 4 predicted boxes and 9 true boxes
>>> # where only two boxes were assigned.
>>> num_gts = 9
>>> max_overlaps = paddle.LongTensor([0, .5, .9, 0])
>>> gt_inds = paddle.LongTensor([-1, 1, 2, 0])
>>> labels = paddle.LongTensor([0, 3, 4, 0])
>>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels)
>>> print(str(self)) # xdoctest: +IGNORE_WANT
<AssignResult(num_gts=9, gt_inds.shape=(4,), max_overlaps.shape=(4,),
labels.shape=(4,))>
>>> # Force addition of gt labels (when adding gt as proposals)
>>> new_labels = paddle.LongTensor([3, 4, 5])
>>> self.add_gt_(new_labels)
>>> print(str(self)) # xdoctest: +IGNORE_WANT
<AssignResult(num_gts=9, gt_inds.shape=(7,), max_overlaps.shape=(7,),
labels.shape=(7,))>
"""
def __init__(self, num_gts, gt_inds, max_overlaps, labels=None):
self.num_gts = num_gts
self.gt_inds = gt_inds
self.max_overlaps = max_overlaps
self.labels = labels
# Interface for possible user-defined properties
self._extra_properties = {}
@property
def num_preds(self):
"""int: the number of predictions in this assignment"""
return len(self.gt_inds)
def set_extra_property(self, key, value):
"""Set user-defined new property."""
assert key not in self.info
self._extra_properties[key] = value
def get_extra_property(self, key):
"""Get user-defined property."""
return self._extra_properties.get(key, None)
@property
def info(self):
"""dict: a dictionary of info about the object"""
basic_info = {
'num_gts': self.num_gts,
'num_preds': self.num_preds,
'gt_inds': self.gt_inds,
'max_overlaps': self.max_overlaps,
'labels': self.labels,
}
basic_info.update(self._extra_properties)
return basic_info
def __nice__(self):
"""str: a "nice" summary string describing this assign result"""
parts = []
parts.append(f'num_gts={self.num_gts!r}')
if self.gt_inds is None:
parts.append(f'gt_inds={self.gt_inds!r}')
else:
parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}')
if self.max_overlaps is None:
parts.append(f'max_overlaps={self.max_overlaps!r}')
else:
parts.append('max_overlaps.shape='
f'{tuple(self.max_overlaps.shape)!r}')
if self.labels is None:
parts.append(f'labels={self.labels!r}')
else:
parts.append(f'labels.shape={tuple(self.labels.shape)!r}')
return ', '.join(parts)
@classmethod
def random(cls, **kwargs):
"""Create random AssignResult for tests or debugging.
Args:
num_preds: number of predicted boxes
num_gts: number of true boxes
p_ignore (float): probability of a predicted box assigned to an
ignored truth
p_assigned (float): probability of a predicted box not being
assigned
p_use_label (float | bool): with labels or not
rng (None | int | numpy.random.RandomState): seed or state
Returns:
:obj:`AssignResult`: Randomly generated assign results.
Example:
>>> from mmdet.core.bbox.assigners.assign_result import * # NOQA
>>> self = AssignResult.random()
>>> print(self.info)
"""
rng = ensure_rng(kwargs.get('rng', None))
num_gts = kwargs.get('num_gts', None)
num_preds = kwargs.get('num_preds', None)
p_ignore = kwargs.get('p_ignore', 0.3)
p_assigned = kwargs.get('p_assigned', 0.7)
p_use_label = kwargs.get('p_use_label', 0.5)
num_classes = kwargs.get('p_use_label', 3)
if num_gts is None:
num_gts = rng.randint(0, 8)
if num_preds is None:
num_preds = rng.randint(0, 16)
if num_gts == 0:
max_overlaps = paddle.zeros([num_preds], dtype=paddle.float32)
gt_inds = paddle.zeros([num_preds], dtype=paddle.int64)
if p_use_label is True or p_use_label < rng.rand():
labels = paddle.zeros([num_preds], dtype=paddle.int64)
else:
labels = None
else:
import numpy as np
# Create an overlap for each predicted box
max_overlaps = paddle.to_tensor(rng.rand(num_preds))
# Construct gt_inds for each predicted box
is_assigned = rng.rand(num_preds) < p_assigned
# maximum number of assignments constraints
n_assigned = min(num_preds, min(num_gts, is_assigned.sum()))
assigned_idxs = np.where(is_assigned)[0]
rng.shuffle(assigned_idxs)
assigned_idxs = assigned_idxs[0:n_assigned]
assigned_idxs.sort()
is_assigned = paddle.to_tensor(is_assigned)
is_assigned[:] = 0
is_assigned[assigned_idxs] = True
is_ignore = paddle.to_tensor(
rng.rand(num_preds) < p_ignore) & is_assigned
gt_inds = paddle.zeros([num_preds], dtype=paddle.int64)
true_idxs = np.arange(num_gts)
rng.shuffle(true_idxs)
true_idxs = paddle.to_tensor(true_idxs)
gt_inds[is_assigned] = true_idxs[:n_assigned]
gt_inds = paddle.to_tensor(
rng.randint(1, num_gts + 1, size=num_preds))
gt_inds[is_ignore] = -1
gt_inds[~is_assigned] = 0
max_overlaps[~is_assigned] = 0
if p_use_label is True or p_use_label < rng.rand():
if num_classes == 0:
labels = paddle.zeros([num_preds], dtype=paddle.int64)
else:
labels = paddle.to_tensor(
# remind that we set FG labels to [0, num_class-1]
# since mmdet v2.0
# BG cat_id: num_class
rng.randint(0, num_classes, size=num_preds))
labels[~is_assigned] = 0
else:
labels = None
self = cls(num_gts, gt_inds, max_overlaps, labels)
return self
def add_gt_(self, gt_labels):
"""Add ground truth as assigned results.
Args:
gt_labels (paddle.Tensor): Labels of gt boxes
"""
self_inds = paddle.arange(1, len(gt_labels) + 1, dtype=paddle.int64)
self.gt_inds = paddle.concat([self_inds, self.gt_inds])
self.max_overlaps = paddle.concat(
[paddle.ones((len(gt_labels), )), self.max_overlaps])
if self.labels is not None:
self.labels = paddle.concat([gt_labels, self.labels])
@manager.BBOX_ASSIGNERS.add_component
class HungarianAssigner3D(object):
"""Computes one-to-one matching between predictions and ground truth.
This class computes an assignment between the targets and the predictions
based on the costs. The costs are weighted sum of three components:
classification cost, regression L1 cost and regression iou cost. The
targets don't include the no_object, so generally there are more
predictions than targets. After the one-to-one matching, the un-matched
are treated as backgrounds. Thus each query prediction will be assigned
with `0` or a positive integer indicating the ground truth index:
- 0: negative sample, no assigned gt
- positive integer: positive sample, index (1-based) of assigned gt
Args:
cls_weight (int | float, optional): The scale factor for classification
cost. Default 1.0.
bbox_weight (int | float, optional): The scale factor for regression
L1 cost. Default 1.0.
iou_weight (int | float, optional): The scale factor for regression
iou cost. Default 1.0.
iou_calculator (dict | optional): The config for the iou calculation.
Default type `BboxOverlaps2D`.
iou_mode (str | optional): "iou" (intersection over union), "iof"
(intersection over foreground), or "giou" (generalized
intersection over union). Default "giou".
"""
def __init__(self,
cls_cost=dict(type='ClassificationCost', weight=1.),
reg_cost=dict(type='BBoxL1Cost', weight=1.0),
iou_cost=dict(type='IoUCost', weight=0.0),
pc_range=None):
self.cls_cost = cls_cost
self.reg_cost = reg_cost
self.iou_cost = iou_cost
self.pc_range = pc_range
def assign(self,
bbox_pred,
cls_pred,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
eps=1e-7):
"""Computes one-to-one matching based on the weighted costs.
This method assign each query prediction to a ground truth or
background. The `assigned_gt_inds` with -1 means don't care,
0 means negative sample, and positive number is the index (1-based)
of assigned gt.
The assignment is done in the following steps, the order matters.
1. assign every prediction to -1
2. compute the weighted costs
3. do Hungarian matching on CPU based on the costs
4. assign all to 0 (background) first, then for each matched pair
between predictions and gts, treat this prediction as foreground
and assign the corresponding gt index (plus 1) to it.
Args:
bbox_pred (Tensor): Predicted boxes with normalized coordinates
(cx, cy, w, h), which are all in range [0, 1]. Shape
[num_query, 4].
cls_pred (Tensor): Predicted classification logits, shape
[num_query, num_class].
gt_bboxes (Tensor): Ground truth boxes with unnormalized
coordinates (x1, y1, x2, y2). Shape [num_gt, 4].
gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).
gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
labelled as `ignored`. Default None.
eps (int | float, optional): A value added to the denominator for
numerical stability. Default 1e-7.
Returns:
:obj:`AssignResult`: The assigned result.
"""
assert gt_bboxes_ignore is None, \
'Only case when gt_bboxes_ignore is None is supported.'
num_gts, num_bboxes = gt_bboxes.shape[0], bbox_pred.shape[0]
# 1. assign -1 by default
assigned_gt_inds = paddle.full((num_bboxes, ), -1, dtype=paddle.int64)
assigned_labels = paddle.full((num_bboxes, ), -1, dtype=paddle.int64)
if num_gts == 0 or num_bboxes == 0:
# No ground truth or boxes, return empty assignment
if num_gts == 0:
# No ground truth, assign all to background
assigned_gt_inds[:] = 0
return AssignResult(
num_gts, assigned_gt_inds, None, labels=assigned_labels)
# 2. compute the weighted costs
# classification and bboxcost.
cls_cost = self.cls_cost(cls_pred, gt_labels)
# regression L1 cost
normalized_gt_bboxes = normalize_bbox(gt_bboxes, self.pc_range)
reg_cost = self.reg_cost(bbox_pred[:, :8], normalized_gt_bboxes[:, :8])
# weighted sum of above two costs
cost = cls_cost + reg_cost
# 3. do Hungarian matching on CPU using linear_sum_assignment
cost = cost.detach()
matched_row_inds, matched_col_inds = linear_sum_assignment(cost)
matched_row_inds = paddle.to_tensor(matched_row_inds)
matched_col_inds = paddle.to_tensor(matched_col_inds)
# 4. assign backgrounds and foregrounds
# assign all indices to backgrounds first
assigned_gt_inds[:] = 0
# assign foregrounds based on matching results
assigned_gt_inds[matched_row_inds] = matched_col_inds + 1
assigned_labels[matched_row_inds] = gt_labels[matched_col_inds]
return AssignResult(
num_gts, assigned_gt_inds, None, labels=assigned_labels)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers/assigners/__init__.py
|
from .hungarian_assigner_3d import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers/samplers/__init__.py
|
from .pseudo_sampler import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/transformers/samplers/pseudo_sampler.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Modified from https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/samplers/random_sampler.py
# Copyright (c) OpenMMLab. All rights reserved.
# ------------------------------------------------------------------------
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
import paddle
from paddle3d.apis import manager
from paddle3d.models.transformers.assigners.hungarian_assigner_3d import (
AssignResult, ensure_rng)
def random_boxes(num=1, scale=1, rng=None):
"""Simple version of ``kwimage.Boxes.random``
Returns:
Tensor: shape (n, 4) in x1, y1, x2, y2 format.
References:
https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390
Example:
>>> num = 3
>>> scale = 512
>>> rng = 0
>>> boxes = random_boxes(num, scale, rng)
>>> print(boxes)
tensor([[280.9925, 278.9802, 308.6148, 366.1769],
[216.9113, 330.6978, 224.0446, 456.5878],
[405.3632, 196.3221, 493.3953, 270.7942]])
"""
rng = ensure_rng(rng)
tlbr = rng.rand(num, 4).astype(np.float32)
tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2])
tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3])
br_x = np.maximum(tlbr[:, 0], tlbr[:, 2])
br_y = np.maximum(tlbr[:, 1], tlbr[:, 3])
tlbr[:, 0] = tl_x * scale
tlbr[:, 1] = tl_y * scale
tlbr[:, 2] = br_x * scale
tlbr[:, 3] = br_y * scale
boxes = paddle.to_tensor(tlbr)
return boxes
class SamplingResult:
"""Bbox sampling result.
Example:
>>> # xdoctest: +IGNORE_WANT
>>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA
>>> self = SamplingResult.random(rng=10)
>>> print(f'self = {self}')
self = <SamplingResult({
'neg_bboxes': paddle.Size([12, 4]),
'neg_inds': tensor([ 0, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12]),
'num_gts': 4,
'pos_assigned_gt_inds': tensor([], dtype=paddle.int64),
'pos_bboxes': paddle.Size([0, 4]),
'pos_inds': tensor([], dtype=paddle.int64),
'pos_is_gt': tensor([], dtype=paddle.int32)
})>
"""
def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result,
gt_flags):
self.pos_inds = pos_inds
self.neg_inds = neg_inds
self.pos_bboxes = bboxes[pos_inds]
self.neg_bboxes = bboxes[neg_inds]
self.pos_is_gt = gt_flags[pos_inds]
self.num_gts = gt_bboxes.shape[0]
self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1
if gt_bboxes.numel() == 0:
# hack for index error case
assert self.pos_assigned_gt_inds.numel() == 0
self.pos_gt_bboxes = paddle.empty_like(gt_bboxes).reshape([-1, 4])
else:
if len(gt_bboxes.shape) < 2:
gt_bboxes = gt_bboxes.reshape([-1, 4])
#self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds, :]
self.pos_gt_bboxes = paddle.gather(gt_bboxes,
self.pos_assigned_gt_inds)
if assign_result.labels is not None:
self.pos_gt_labels = assign_result.labels[pos_inds]
else:
self.pos_gt_labels = None
@property
def bboxes(self):
"""paddle.Tensor: concatenated positive and negative boxes"""
return paddle.concat([self.pos_bboxes, self.neg_bboxes])
def to(self, device):
"""Change the device of the data inplace.
Example:
>>> self = SamplingResult.random()
>>> print(f'self = {self.to(None)}')
>>> # xdoctest: +REQUIRES(--gpu)
>>> print(f'self = {self.to(0)}')
"""
_dict = self.__dict__
for key, value in _dict.items():
_dict[key] = value
return self
def __nice__(self):
data = self.info.copy()
data['pos_bboxes'] = data.pop('pos_bboxes').shape
data['neg_bboxes'] = data.pop('neg_bboxes').shape
parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())]
body = ' ' + ',\n '.join(parts)
return '{\n' + body + '\n}'
def __repr__(self):
"""str: the string of the module"""
try:
nice = self.__nice__()
classname = self.__class__.__name__
return f'<{classname}({nice}) at {hex(id(self))}>'
except NotImplementedError as ex:
warnings.warn(str(ex), category=RuntimeWarning)
return object.__repr__(self)
def __str__(self):
"""str: the string of the module"""
try:
classname = self.__class__.__name__
nice = self.__nice__()
return f'<{classname}({nice})>'
except NotImplementedError as ex:
warnings.warn(str(ex), category=RuntimeWarning)
return object.__repr__(self)
@property
def info(self):
"""Returns a dictionary of info about the object."""
return {
'pos_inds': self.pos_inds,
'neg_inds': self.neg_inds,
'pos_bboxes': self.pos_bboxes,
'neg_bboxes': self.neg_bboxes,
'pos_is_gt': self.pos_is_gt,
'num_gts': self.num_gts,
'pos_assigned_gt_inds': self.pos_assigned_gt_inds,
}
@classmethod
def random(cls, rng=None, **kwargs):
"""
Args:
rng (None | int | numpy.random.RandomState): seed or state.
kwargs (keyword arguments):
- num_preds: number of predicted boxes
- num_gts: number of true boxes
- p_ignore (float): probability of a predicted box assigned to \
an ignored truth.
- p_assigned (float): probability of a predicted box not being \
assigned.
- p_use_label (float | bool): with labels or not.
Returns:
:obj:`SamplingResult`: Randomly generated sampling result.
Example:
>>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA
>>> self = SamplingResult.random()
>>> print(self.__dict__)
"""
rng = ensure_rng(rng)
# make probabalistic?
num = 32
pos_fraction = 0.5
neg_pos_ub = -1
assign_result = AssignResult.random(rng=rng, **kwargs)
# Note we could just compute an assignment
bboxes = random_boxes(assign_result.num_preds, rng=rng)
gt_bboxes = random_boxes(assign_result.num_gts, rng=rng)
if rng.rand() > 0.2:
# sometimes algorithms squeeze their data, be robust to that
gt_bboxes = gt_bboxes.squeeze()
bboxes = bboxes.squeeze()
if assign_result.labels is None:
gt_labels = None
else:
gt_labels = None # todo
if gt_labels is None:
add_gt_as_proposals = False
else:
add_gt_as_proposals = True # make probabalistic?
sampler = RandomSampler(
num,
pos_fraction,
neg_pos_ub=neg_pos_ub,
add_gt_as_proposals=add_gt_as_proposals,
rng=rng)
self = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
return self
class BaseSampler(metaclass=ABCMeta):
"""Base class of samplers."""
def __init__(self,
num,
pos_fraction,
neg_pos_ub=-1,
add_gt_as_proposals=True,
**kwargs):
self.num = num
self.pos_fraction = pos_fraction
self.neg_pos_ub = neg_pos_ub
self.add_gt_as_proposals = add_gt_as_proposals
self.pos_sampler = self
self.neg_sampler = self
@abstractmethod
def _sample_pos(self, assign_result, num_expected, **kwargs):
"""Sample positive samples."""
pass
@abstractmethod
def _sample_neg(self, assign_result, num_expected, **kwargs):
"""Sample negative samples."""
pass
def sample(self, assign_result, bboxes, gt_bboxes, gt_labels=None,
**kwargs):
"""Sample positive and negative bboxes.
This is a simple implementation of bbox sampling given candidates,
assigning results and ground truth bboxes.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
bboxes (Tensor): Boxes to be sampled from.
gt_bboxes (Tensor): Ground truth bboxes.
gt_labels (Tensor, optional): Class labels of ground truth bboxes.
Returns:
:obj:`SamplingResult`: Sampling result.
Example:
>>> from mmdet.core.bbox import RandomSampler
>>> from mmdet.core.bbox import AssignResult
>>> from mmdet.core.bbox.demodata import ensure_rng, random_boxes
>>> rng = ensure_rng(None)
>>> assign_result = AssignResult.random(rng=rng)
>>> bboxes = random_boxes(assign_result.num_preds, rng=rng)
>>> gt_bboxes = random_boxes(assign_result.num_gts, rng=rng)
>>> gt_labels = None
>>> self = RandomSampler(num=32, pos_fraction=0.5, neg_pos_ub=-1,
>>> add_gt_as_proposals=False)
>>> self = self.sample(assign_result, bboxes, gt_bboxes, gt_labels)
"""
if len(bboxes.shape) < 2:
bboxes = bboxes[None, :]
bboxes = bboxes[:, :4]
gt_flags = paddle.zeros((bboxes.shape[0], ), dtype=paddle.int32)
if self.add_gt_as_proposals and len(gt_bboxes) > 0:
if gt_labels is None:
raise ValueError(
'gt_labels must be given when add_gt_as_proposals is True')
bboxes = paddle.concat([gt_bboxes, bboxes], axis=0)
assign_result.add_gt_(gt_labels)
gt_ones = paddle.ones(gt_bboxes.shape[0], dtype=paddle.int32)
gt_flags = paddle.concat([gt_ones, gt_flags])
num_expected_pos = int(self.num * self.pos_fraction)
pos_inds = self.pos_sampler._sample_pos(
assign_result, num_expected_pos, bboxes=bboxes, **kwargs)
# We found that sampled indices have duplicated items occasionally.
# (may be a bug of Pypaddle)
pos_inds = pos_inds.unique()
num_sampled_pos = pos_inds.numel()
num_expected_neg = self.num - num_sampled_pos
if self.neg_pos_ub >= 0:
_pos = max(1, num_sampled_pos)
neg_upper_bound = int(self.neg_pos_ub * _pos)
if num_expected_neg > neg_upper_bound:
num_expected_neg = neg_upper_bound
neg_inds = self.neg_sampler._sample_neg(
assign_result, num_expected_neg, bboxes=bboxes, **kwargs)
neg_inds = neg_inds.unique()
sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
assign_result, gt_flags)
return sampling_result
@manager.BBOX_SAMPLERS.add_component
class RandomSampler(BaseSampler):
"""Random sampler.
Args:
num (int): Number of samples
pos_fraction (float): Fraction of positive samples
neg_pos_up (int, optional): Upper bound number of negative and
positive samples. Defaults to -1.
add_gt_as_proposals (bool, optional): Whether to add ground truth
boxes as proposals. Defaults to True.
"""
def __init__(self,
num,
pos_fraction,
neg_pos_ub=-1,
add_gt_as_proposals=True,
**kwargs):
super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub,
add_gt_as_proposals)
self.rng = ensure_rng(kwargs.get('rng', None))
def random_choice(self, gallery, num):
"""Random select some elements from the gallery.
If `gallery` is a Tensor, the returned indices will be a Tensor;
If `gallery` is a ndarray or list, the returned indices will be a
ndarray.
Args:
gallery (Tensor | ndarray | list): indices pool.
num (int): expected sample num.
Returns:
Tensor or ndarray: sampled indices.
"""
assert len(gallery) >= num
is_tensor = isinstance(gallery, paddle.Tensor)
if not is_tensor:
gallery = paddle.to_tensor(gallery, dtype=paddle.int64)
#perm = paddle.randperm(gallery.numel())[:num]
# np.random.seed(0)
perm = paddle.to_tensor(np.random.randperm(gallery.numel())[:num])
rand_inds = gallery[perm]
if not is_tensor:
rand_inds = rand_inds.numpy()
return rand_inds
def _sample_pos(self, assign_result, num_expected, **kwargs):
"""Randomly sample some positive samples."""
pos_inds = paddle.nonzero(assign_result.gt_inds > 0)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.random_choice(pos_inds, num_expected)
def _sample_neg(self, assign_result, num_expected, **kwargs):
"""Randomly sample some negative samples."""
neg_inds = paddle.nonzero(assign_result.gt_inds == 0)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
return self.random_choice(neg_inds, num_expected)
@manager.BBOX_SAMPLERS.add_component
class PseudoSampler(BaseSampler):
"""A pseudo sampler that does not do sampling actually."""
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
"""Sample positive samples."""
raise NotImplementedError
def _sample_neg(self, **kwargs):
"""Sample negative samples."""
raise NotImplementedError
def sample(self, assign_result, bboxes, gt_bboxes, **kwargs):
"""Directly returns the positive and negative indices of samples.
Args:
assign_result (:obj:`AssignResult`): Assigned results
bboxes (paddle.Tensor): Bounding boxes
gt_bboxes (paddle.Tensor): Ground truth boxes
Returns:
:obj:`SamplingResult`: sampler results
"""
pos_inds = paddle.nonzero(
assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()
neg_inds = paddle.nonzero(
assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()
gt_flags = paddle.zeros((bboxes.shape[0], ), dtype=paddle.int32)
sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
assign_result, gt_flags)
return sampling_result
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/voxel_encoders/pillar_encoder.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/tianweiy/CenterPoint/blob/master/det3d/models/readers/pillar_encoder.py
Ths copyright of tianweiy/CenterPoint is as follows:
MIT License [see LICENSE for details].
https://github.com/tianweiy/CenterPoint/blob/master/det3d/models/readers/pillar_encoder.py fork from SECOND.
Code written by Alex Lang and Oscar Beijbom, 2018.
Licensed under MIT License [see LICENSE].
"""
import math
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from paddle.nn.initializer import Constant, Uniform
from paddle3d.apis import manager
from .voxel_encoder import get_paddings_indicator
__all__ = ['PillarFeatureNet', 'build_linear_layer', 'build_norm_layer']
def build_linear_layer(in_channels, out_channels, bias=True):
"""Build linear layer."""
bound = 1 / math.sqrt(in_channels)
param_attr = ParamAttr(initializer=Uniform(-bound, bound))
bias_attr = False
if bias:
bias_attr = ParamAttr(initializer=Uniform(-bound, bound))
return nn.Linear(
in_channels, out_channels, weight_attr=param_attr, bias_attr=bias_attr)
def build_norm_layer(cfg, num_features, weight_attr=True, bias_attr=True):
"""Build normalization layer."""
norm_layer = getattr(nn, cfg['type'])(
num_features,
momentum=1 - cfg['momentum'],
epsilon=cfg['eps'],
weight_attr=ParamAttr(initializer=Constant(
value=1)) if weight_attr else False,
bias_attr=ParamAttr(initializer=Constant(
value=0)) if bias_attr else False)
return norm_layer
class PFNLayer(nn.Layer):
def __init__(self,
in_channels,
out_channels,
max_num_points_in_voxel=20,
norm_cfg=dict(type='BatchNorm1D', eps=1e-3, momentum=0.01),
last_layer=False):
super(PFNLayer, self).__init__()
self.name = 'PFNLayer'
self.last_vfe = last_layer
if not self.last_vfe:
out_channels = out_channels // 2
self.units = out_channels
self.norm = build_norm_layer(norm_cfg, self.units)
self.linear = build_linear_layer(in_channels, self.units, bias=False)
self.max_num_points_in_voxel = max_num_points_in_voxel
def forward(self, inputs, num_voxels=None):
x = self.linear(inputs)
x = self.norm(x.transpose(perm=[0, 2, 1])).transpose(perm=[0, 2, 1])
x = F.relu(x)
# x_max = paddle.max(x, axis=1, keepdim=True)
# TODO(luoqianhui): remove the following complicated max operation
# paddle.max mistakenly backwards gradient to all elements when they are same,
# to align with paddle implement, we recombine paddle apis to backwards gradient
# to the last one.
# Note: when the input elements are same, paddle max and argmax treat the last one
# as the maximum value, but paddle argmax, numpy max and argmax treat the first one.
max_idx = paddle.argmax(x, axis=1)
data = x.transpose([0, 2,
1]).reshape([-1, self.max_num_points_in_voxel])
index = max_idx.reshape([-1, 1])
sample = paddle.index_sample(data, index)
x_max = sample.reshape([-1, self.units, 1]).transpose([0, 2, 1])
if self.last_vfe:
return x_max
else:
x_repeat = x_max.tile([1, self.max_num_points_in_voxel, 1])
x_concatenated = paddle.concat([x, x_repeat], axis=2)
return x_concatenated
@manager.VOXEL_ENCODERS.add_component
class PillarFeatureNet(nn.Layer):
def __init__(self,
in_channels=4,
feat_channels=(64, ),
with_distance=False,
max_num_points_in_voxel=20,
voxel_size=(0.2, 0.2, 4),
point_cloud_range=(0, -40, -3, 70.4, 40, 1),
legacy=True):
super(PillarFeatureNet, self).__init__()
self.legacy = legacy
self.in_channels = in_channels
# with cluster center
in_channels += 3
# with voxel center
in_channels += 2
if with_distance:
in_channels += 1
self.with_distance = with_distance
# Create PillarFeatureNet layers
feat_channels = [in_channels] + list(feat_channels)
pfn_layers = []
norm_cfg = dict(type='BatchNorm1D', eps=1e-3, momentum=0.01)
for i in range(len(feat_channels) - 1):
in_filters = feat_channels[i]
out_filters = feat_channels[i + 1]
if i < len(feat_channels) - 2:
last_layer = False
else:
last_layer = True
pfn_layers.append(
PFNLayer(
in_filters,
out_filters,
max_num_points_in_voxel=max_num_points_in_voxel,
norm_cfg=norm_cfg,
last_layer=last_layer))
self.pfn_layers = nn.LayerList(pfn_layers)
# Need pillar (voxel) size and x/y offset in order to calculate offset
self.vx = voxel_size[0]
self.vy = voxel_size[1]
self.x_offset = self.vx / 2 + point_cloud_range[0]
self.y_offset = self.vy / 2 + point_cloud_range[1]
self.point_cloud_range = point_cloud_range
self.max_num_points_in_voxel = max_num_points_in_voxel
def forward(self, features, num_points_per_voxel, coors):
"""Forward function.
Args:
features (paddle.Tensor): Point features or raw points in shape
(N, M, C).
num_points_per_voxel (paddle.Tensor): Number of points in each pillar.
coors (paddle.Tensor): Coordinates of each voxel.
Returns:
paddle.Tensor: Features of pillars.
"""
features_ls = [features]
# Find distance of x, y, and z from cluster center
features_sum = paddle.sum(features[:, :, :3], axis=1, keepdim=True)
points_mean = features_sum / paddle.cast(
num_points_per_voxel, features.dtype).reshape([-1, 1, 1])
f_cluster = features[:, :, :3] - points_mean
features_ls.append(f_cluster)
# Find distance of x, y, and z from pillar center
dtype = features.dtype
if not self.legacy:
f_center = paddle.zeros_like(features[:, :, :2])
f_center[:, :, 0] = features[:, :, 0] - (coors[:, 3].reshape(
[-1, 1]).astype(dtype) * self.vx + self.x_offset)
f_center[:, :, 1] = features[:, :, 1] - (coors[:, 2].reshape(
[-1, 1]).astype(dtype) * self.vy + self.y_offset)
else:
f_center = features[:, :, :2]
f_center[:, :, 0] = f_center[:, :, 0] - (coors[:, 3].reshape(
[-1, 1]).astype(features.dtype) * self.vx + self.x_offset)
f_center[:, :, 1] = f_center[:, :, 1] - (coors[:, 2].reshape(
[-1, 1]).astype(features.dtype) * self.vy + self.y_offset)
features_ls.append(f_center)
if self.with_distance:
points_dist = paddle.linalg.norm(
features[:, :, :3], 2, 2, keepdim=True)
features_ls.append(points_dist)
features = paddle.concat(features_ls, axis=-1)
# The feature decorations were calculated without regard to whether pillar was empty. Need to ensure that
# empty pillars remain set to zeros.
mask = get_paddings_indicator(num_points_per_voxel,
self.max_num_points_in_voxel)
mask = paddle.reshape(
mask, [-1, self.max_num_points_in_voxel, 1]).astype(features.dtype)
features = features * mask
for pfn in self.pfn_layers:
features = pfn(features, num_points_per_voxel)
return features.squeeze()
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/voxel_encoders/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import pillar_encoder, voxel_encoder
from .pillar_encoder import *
from .voxel_encoder import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/voxel_encoders/voxel_encoder.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/tianweiy/CenterPoint/blob/master/det3d/models/readers/voxel_encoder.py
Ths copyright of tianweiy/CenterPoint is as follows:
MIT License [see LICENSE for details].
"""
import paddle
import paddle.nn as nn
from paddle3d.apis import manager
def get_paddings_indicator(actual_num, max_num):
actual_num = paddle.reshape(actual_num, [-1, 1])
# tiled_actual_num: [N, M, 1]
max_num_shape = [1, -1]
max_num = paddle.arange(
0, max_num, dtype=actual_num.dtype).reshape(max_num_shape)
# tiled_actual_num: [[3,3,3,3,3], [4,4,4,4,4], [2,2,2,2,2]]
# tiled_max_num: [[0,1,2,3,4], [0,1,2,3,4], [0,1,2,3,4]]
paddings_indicator = actual_num > max_num
# paddings_indicator shape: [batch_size, max_num]
return paddings_indicator
@manager.VOXEL_ENCODERS.add_component
class VoxelMean(nn.Layer):
def __init__(self, in_channels=4):
super(VoxelMean, self).__init__()
self.in_channels = in_channels
def forward(self, features, num_voxels, coors=None):
assert self.in_channels == features.shape[-1]
features_sum = paddle.sum(
features[:, :, :self.in_channels], axis=1, keepdim=False)
points_mean = features_sum / paddle.cast(
num_voxels, features.dtype).reshape([-1, 1])
return points_mean
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/voxelizers/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .voxelize import HardVoxelizer
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/voxelizers/voxelize.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import warnings
from paddle3d.apis import manager
from paddle3d.ops import voxelize
__all__ = ['HardVoxelizer']
@manager.VOXELIZERS.add_component
class HardVoxelizer(nn.Layer):
def __init__(self, voxel_size, point_cloud_range, max_num_points_in_voxel,
max_num_voxels):
super(HardVoxelizer, self).__init__()
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
self.max_num_points_in_voxel = max_num_points_in_voxel
if isinstance(max_num_voxels, (tuple, list)):
self.max_num_voxels = max_num_voxels
else:
self.max_num_voxels = [max_num_voxels, max_num_voxels]
def single_forward(self, point, max_num_voxels, bs_idx):
voxels, coors, num_points_per_voxel, voxels_num = voxelize.hard_voxelize(
point, self.voxel_size, self.point_cloud_range,
self.max_num_points_in_voxel, max_num_voxels)
voxels = voxels[0:voxels_num, :, :]
coors = coors[0:voxels_num, :]
num_points_per_voxel = num_points_per_voxel[0:voxels_num]
num_voxels_has_point = num_points_per_voxel.nonzero().shape[0]
if num_voxels_has_point >= max_num_voxels:
warnings.warn("{} is larger than max num voxels {}!".\
format(num_voxels_has_point, max_num_voxels))
# bs_idx = paddle.full(
# shape=voxels_num, fill_value=bs_idx, dtype=coors.dtype)
# bs_idx = bs_idx.reshape([-1, 1])
# coors_pad = paddle.concat([bs_idx, coors], axis=1)
coors = coors.reshape([1, -1, 3])
coors_dtype = coors.dtype
coors = coors.cast('float32')
coors_pad = F.pad(
coors, [1, 0], value=bs_idx, mode='constant', data_format="NCL")
coors_pad = coors_pad.reshape([-1, 4])
coors_pad = coors_pad.cast(coors_dtype)
return voxels, coors_pad, num_points_per_voxel
def forward(self, points):
if self.training:
max_num_voxels = self.max_num_voxels[0]
else:
max_num_voxels = self.max_num_voxels[1]
if not getattr(self, "in_export_mode", False):
batch_voxels, batch_coors, batch_num_points = [], [], []
for bs_idx, point in enumerate(points):
voxels, coors_pad, num_points_per_voxel = self.single_forward(
point, max_num_voxels, bs_idx)
batch_voxels.append(voxels)
batch_coors.append(coors_pad)
batch_num_points.append(num_points_per_voxel)
voxels_batch = paddle.concat(batch_voxels, axis=0)
num_points_batch = paddle.concat(batch_num_points, axis=0)
coors_batch = paddle.concat(batch_coors, axis=0)
return voxels_batch, coors_batch, num_points_batch
else:
voxels, coors_pad, num_points_per_voxel = self.single_forward(
points, max_num_voxels, 0)
return voxels, coors_pad, num_points_per_voxel
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/backbones/hrnet.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.models import layers
from paddle3d.models.layers import param_init, reset_parameters
from paddle3d.utils import checkpoint
__all__ = ["HRNet_W18"]
@manager.BACKBONES.add_component
class HRNet_W18(nn.Layer):
"""
The HRNet implementation based on PaddlePaddle.
The original article refers to
Jingdong Wang, et, al. "HRNet:Deep High-Resolution Representation Learning for Visual Recognition"
(https://arxiv.org/pdf/1908.07919.pdf).
Args:
pretrained (str, optional): The path of pretrained model.
stage1_num_modules (int, optional): Number of modules for stage1. Default 1.
stage1_num_blocks (list, optional): Number of blocks per module for stage1. Default (4).
stage1_num_channels (list, optional): Number of channels per branch for stage1. Default (64).
stage2_num_modules (int, optional): Number of modules for stage2. Default 1.
stage2_num_blocks (list, optional): Number of blocks per module for stage2. Default (4, 4).
stage2_num_channels (list, optional): Number of channels per branch for stage2. Default (18, 36).
stage3_num_modules (int, optional): Number of modules for stage3. Default 4.
stage3_num_blocks (list, optional): Number of blocks per module for stage3. Default (4, 4, 4).
stage3_num_channels (list, optional): Number of channels per branch for stage3. Default [18, 36, 72).
stage4_num_modules (int, optional): Number of modules for stage4. Default 3.
stage4_num_blocks (list, optional): Number of blocks per module for stage4. Default (4, 4, 4, 4).
stage4_num_channels (list, optional): Number of channels per branch for stage4. Default (18, 36, 72. 144).
has_se (bool, optional): Whether to use Squeeze-and-Excitation module. Default False.
align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even,
e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.
"""
def __init__(self,
pretrained=None,
stage1_num_modules=1,
stage1_num_blocks=(4, ),
stage1_num_channels=(64, ),
stage2_num_modules=1,
stage2_num_blocks=(4, 4),
stage2_num_channels=(18, 36),
stage3_num_modules=4,
stage3_num_blocks=(4, 4, 4),
stage3_num_channels=(18, 36, 72),
stage4_num_modules=3,
stage4_num_blocks=(4, 4, 4, 4),
stage4_num_channels=(18, 36, 72, 144),
has_se=False,
align_corners=False,
padding_same=True):
super(HRNet_W18, self).__init__()
self.pretrained = pretrained
self.stage1_num_modules = stage1_num_modules
self.stage1_num_blocks = stage1_num_blocks
self.stage1_num_channels = stage1_num_channels
self.stage2_num_modules = stage2_num_modules
self.stage2_num_blocks = stage2_num_blocks
self.stage2_num_channels = stage2_num_channels
self.stage3_num_modules = stage3_num_modules
self.stage3_num_blocks = stage3_num_blocks
self.stage3_num_channels = stage3_num_channels
self.stage4_num_modules = stage4_num_modules
self.stage4_num_blocks = stage4_num_blocks
self.stage4_num_channels = stage4_num_channels
self.has_se = has_se
self.align_corners = align_corners
self.feat_channels = [sum(stage4_num_channels)]
self.norm_mean = paddle.to_tensor([0.485, 0.456, 0.406])
self.norm_std = paddle.to_tensor([0.229, 0.224, 0.225])
self.conv_layer1_1 = layers.ConvBNReLU(
in_channels=3,
out_channels=64,
kernel_size=3,
stride=2,
padding=1 if not padding_same else 'same',
bias_attr=False)
self.conv_layer1_2 = layers.ConvBNReLU(
in_channels=64,
out_channels=64,
kernel_size=3,
stride=2,
padding=1 if not padding_same else 'same',
bias_attr=False)
self.la1 = Layer1(
num_channels=64,
num_blocks=self.stage1_num_blocks[0],
num_filters=self.stage1_num_channels[0],
has_se=has_se,
name="layer2",
padding_same=padding_same)
self.tr1 = TransitionLayer(
in_channels=[self.stage1_num_channels[0] * 4],
out_channels=self.stage2_num_channels,
name="tr1",
padding_same=padding_same)
self.st2 = Stage(
num_channels=self.stage2_num_channels,
num_modules=self.stage2_num_modules,
num_blocks=self.stage2_num_blocks,
num_filters=self.stage2_num_channels,
has_se=self.has_se,
name="st2",
align_corners=align_corners,
padding_same=padding_same)
self.tr2 = TransitionLayer(
in_channels=self.stage2_num_channels,
out_channels=self.stage3_num_channels,
name="tr2",
padding_same=padding_same)
self.st3 = Stage(
num_channels=self.stage3_num_channels,
num_modules=self.stage3_num_modules,
num_blocks=self.stage3_num_blocks,
num_filters=self.stage3_num_channels,
has_se=self.has_se,
name="st3",
align_corners=align_corners,
padding_same=padding_same)
self.tr3 = TransitionLayer(
in_channels=self.stage3_num_channels,
out_channels=self.stage4_num_channels,
name="tr3",
padding_same=padding_same)
self.st4 = Stage(
num_channels=self.stage4_num_channels,
num_modules=self.stage4_num_modules,
num_blocks=self.stage4_num_blocks,
num_filters=self.stage4_num_channels,
has_se=self.has_se,
name="st4",
align_corners=align_corners,
padding_same=padding_same)
self.init_weight()
def forward(self, x):
x = self.preprocess(x)
conv1 = self.conv_layer1_1(x)
conv2 = self.conv_layer1_2(conv1)
la1 = self.la1(conv2)
tr1 = self.tr1([la1])
st2 = self.st2(tr1)
tr2 = self.tr2(st2)
st3 = self.st3(tr2)
tr3 = self.tr3(st3)
st4 = self.st4(tr3)
size = paddle.shape(st4[1])[2:]
x1 = F.interpolate(
st4[0], size, mode='bilinear', align_corners=self.align_corners)
x2 = F.interpolate(
st4[2], size, mode='bilinear', align_corners=self.align_corners)
x3 = F.interpolate(
st4[3], size, mode='bilinear', align_corners=self.align_corners)
x = paddle.concat([x1, st4[1], x2, x3], axis=1)
return [la1, x]
def init_weight(self):
for sublayer in self.sublayers():
if isinstance(sublayer, nn.Conv2D):
reset_parameters(sublayer)
if self.pretrained is not None:
checkpoint.load_pretrained_model(self, self.pretrained)
def preprocess(self, images):
"""
Preprocess images
Args:
images [paddle.Tensor(N, 3, H, W)]: Input images
Return
x [paddle.Tensor(N, 3, H, W)]: Preprocessed images
"""
x = images
# Create a mask for padded pixels
mask = paddle.isnan(x)
# Match ResNet pretrained preprocessing
x = self.normalize(x, mean=self.norm_mean, std=self.norm_std)
# Make padded pixels = 0
a = paddle.zeros_like(x)
x = paddle.where(mask, a, x)
return x
def normalize(self, image, mean, std):
shape = paddle.shape(image)
if mean.shape:
mean = mean[..., :, None]
if std.shape:
std = std[..., :, None]
out = (image.reshape([shape[0], shape[1], shape[2] * shape[3]]) -
mean) / std
return out.reshape(shape)
class Layer1(nn.Layer):
def __init__(self,
num_channels,
num_filters,
num_blocks,
has_se=False,
name=None,
padding_same=True):
super(Layer1, self).__init__()
self.bottleneck_block_list = []
for i in range(num_blocks):
bottleneck_block = self.add_sublayer(
"bb_{}_{}".format(name, i + 1),
BottleneckBlock(
num_channels=num_channels if i == 0 else num_filters * 4,
num_filters=num_filters,
has_se=has_se,
stride=1,
downsample=True if i == 0 else False,
name=name + '_' + str(i + 1),
padding_same=padding_same))
self.bottleneck_block_list.append(bottleneck_block)
def forward(self, x):
conv = x
for block_func in self.bottleneck_block_list:
conv = block_func(conv)
return conv
class TransitionLayer(nn.Layer):
def __init__(self, in_channels, out_channels, name=None, padding_same=True):
super(TransitionLayer, self).__init__()
num_in = len(in_channels)
num_out = len(out_channels)
self.conv_bn_func_list = []
for i in range(num_out):
residual = None
if i < num_in:
if in_channels[i] != out_channels[i]:
residual = self.add_sublayer(
"transition_{}_layer_{}".format(name, i + 1),
layers.ConvBNReLU(
in_channels=in_channels[i],
out_channels=out_channels[i],
kernel_size=3,
padding=1 if not padding_same else 'same',
bias_attr=False))
else:
residual = self.add_sublayer(
"transition_{}_layer_{}".format(name, i + 1),
layers.ConvBNReLU(
in_channels=in_channels[-1],
out_channels=out_channels[i],
kernel_size=3,
stride=2,
padding=1 if not padding_same else 'same',
bias_attr=False))
self.conv_bn_func_list.append(residual)
def forward(self, x):
outs = []
for idx, conv_bn_func in enumerate(self.conv_bn_func_list):
if conv_bn_func is None:
outs.append(x[idx])
else:
if idx < len(x):
outs.append(conv_bn_func(x[idx]))
else:
outs.append(conv_bn_func(x[-1]))
return outs
class Branches(nn.Layer):
def __init__(self,
num_blocks,
in_channels,
out_channels,
has_se=False,
name=None,
padding_same=True):
super(Branches, self).__init__()
self.basic_block_list = []
for i in range(len(out_channels)):
self.basic_block_list.append([])
for j in range(num_blocks[i]):
in_ch = in_channels[i] if j == 0 else out_channels[i]
basic_block_func = self.add_sublayer(
"bb_{}_branch_layer_{}_{}".format(name, i + 1, j + 1),
BasicBlock(
num_channels=in_ch,
num_filters=out_channels[i],
has_se=has_se,
name=name + '_branch_layer_' + str(i + 1) + '_' +
str(j + 1),
padding_same=padding_same))
self.basic_block_list[i].append(basic_block_func)
def forward(self, x):
outs = []
for idx, input in enumerate(x):
conv = input
for basic_block_func in self.basic_block_list[idx]:
conv = basic_block_func(conv)
outs.append(conv)
return outs
class BottleneckBlock(nn.Layer):
def __init__(self,
num_channels,
num_filters,
has_se,
stride=1,
downsample=False,
name=None,
padding_same=True):
super(BottleneckBlock, self).__init__()
self.has_se = has_se
self.downsample = downsample
self.conv1 = layers.ConvBNReLU(
in_channels=num_channels,
out_channels=num_filters,
kernel_size=1,
bias_attr=False)
self.conv2 = layers.ConvBNReLU(
in_channels=num_filters,
out_channels=num_filters,
kernel_size=3,
stride=stride,
padding=1 if not padding_same else 'same',
bias_attr=False)
self.conv3 = layers.ConvBN(
in_channels=num_filters,
out_channels=num_filters * 4,
kernel_size=1,
bias_attr=False)
if self.downsample:
self.conv_down = layers.ConvBN(
in_channels=num_channels,
out_channels=num_filters * 4,
kernel_size=1,
bias_attr=False)
if self.has_se:
self.se = SELayer(
num_channels=num_filters * 4,
num_filters=num_filters * 4,
reduction_ratio=16,
name=name + '_fc')
self.relu = nn.ReLU()
def forward(self, x):
residual = x
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
if self.downsample:
residual = self.conv_down(x)
if self.has_se:
conv3 = self.se(conv3)
y = paddle.add(conv3, residual)
y = self.relu(y)
return y
class BasicBlock(nn.Layer):
def __init__(self,
num_channels,
num_filters,
stride=1,
has_se=False,
downsample=False,
name=None,
padding_same=True):
super(BasicBlock, self).__init__()
self.has_se = has_se
self.downsample = downsample
self.conv1 = layers.ConvBNReLU(
in_channels=num_channels,
out_channels=num_filters,
kernel_size=3,
stride=stride,
padding=1 if not padding_same else 'same',
bias_attr=False)
self.conv2 = layers.ConvBN(
in_channels=num_filters,
out_channels=num_filters,
kernel_size=3,
padding=1 if not padding_same else 'same',
bias_attr=False)
if self.downsample:
self.conv_down = layers.ConvBNReLU(
in_channels=num_channels,
out_channels=num_filters,
kernel_size=1,
bias_attr=False)
if self.has_se:
self.se = SELayer(
num_channels=num_filters,
num_filters=num_filters,
reduction_ratio=16,
name=name + '_fc')
self.relu = nn.ReLU()
def forward(self, x):
residual = x
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
if self.downsample:
residual = self.conv_down(x)
if self.has_se:
conv2 = self.se(conv2)
y = paddle.add(conv2, residual)
y = self.relu(y)
return y
class SELayer(nn.Layer):
def __init__(self, num_channels, num_filters, reduction_ratio, name=None):
super(SELayer, self).__init__()
self.pool2d_gap = nn.AdaptiveAvgPool2D(1)
self._num_channels = num_channels
med_ch = int(num_channels / reduction_ratio)
stdv = 1.0 / math.sqrt(num_channels * 1.0)
self.squeeze = nn.Linear(
num_channels,
med_ch,
weight_attr=paddle.ParamAttr(
initializer=nn.initializer.Uniform(-stdv, stdv)))
stdv = 1.0 / math.sqrt(med_ch * 1.0)
self.excitation = nn.Linear(
med_ch,
num_filters,
weight_attr=paddle.ParamAttr(
initializer=nn.initializer.Uniform(-stdv, stdv)))
def forward(self, x):
pool = self.pool2d_gap(x)
pool = paddle.reshape(pool, shape=[-1, self._num_channels])
squeeze = self.squeeze(pool)
squeeze = F.relu(squeeze)
excitation = self.excitation(squeeze)
excitation = F.sigmoid(excitation)
excitation = paddle.reshape(
excitation, shape=[-1, self._num_channels, 1, 1])
out = x * excitation
return out
class Stage(nn.Layer):
def __init__(self,
num_channels,
num_modules,
num_blocks,
num_filters,
has_se=False,
multi_scale_output=True,
name=None,
align_corners=False,
padding_same=True):
super(Stage, self).__init__()
self._num_modules = num_modules
self.stage_func_list = []
for i in range(num_modules):
if i == num_modules - 1 and not multi_scale_output:
stage_func = self.add_sublayer(
"stage_{}_{}".format(name, i + 1),
HighResolutionModule(
num_channels=num_channels,
num_blocks=num_blocks,
num_filters=num_filters,
has_se=has_se,
multi_scale_output=False,
name=name + '_' + str(i + 1),
align_corners=align_corners,
padding_same=padding_same))
else:
stage_func = self.add_sublayer(
"stage_{}_{}".format(name, i + 1),
HighResolutionModule(
num_channels=num_channels,
num_blocks=num_blocks,
num_filters=num_filters,
has_se=has_se,
name=name + '_' + str(i + 1),
align_corners=align_corners,
padding_same=padding_same))
self.stage_func_list.append(stage_func)
def forward(self, x):
out = x
for idx in range(self._num_modules):
out = self.stage_func_list[idx](out)
return out
class HighResolutionModule(nn.Layer):
def __init__(self,
num_channels,
num_blocks,
num_filters,
has_se=False,
multi_scale_output=True,
name=None,
align_corners=False,
padding_same=True):
super(HighResolutionModule, self).__init__()
self.branches_func = Branches(
num_blocks=num_blocks,
in_channels=num_channels,
out_channels=num_filters,
has_se=has_se,
name=name,
padding_same=padding_same)
self.fuse_func = FuseLayers(
in_channels=num_filters,
out_channels=num_filters,
multi_scale_output=multi_scale_output,
name=name,
align_corners=align_corners,
padding_same=padding_same)
def forward(self, x):
out = self.branches_func(x)
out = self.fuse_func(out)
return out
class FuseLayers(nn.Layer):
def __init__(self,
in_channels,
out_channels,
multi_scale_output=True,
name=None,
align_corners=False,
padding_same=True):
super(FuseLayers, self).__init__()
self._actual_ch = len(in_channels) if multi_scale_output else 1
self._in_channels = in_channels
self.align_corners = align_corners
self.residual_func_list = []
for i in range(self._actual_ch):
for j in range(len(in_channels)):
if j > i:
residual_func = self.add_sublayer(
"residual_{}_layer_{}_{}".format(name, i + 1, j + 1),
layers.ConvBN(
in_channels=in_channels[j],
out_channels=out_channels[i],
kernel_size=1,
bias_attr=False))
self.residual_func_list.append(residual_func)
elif j < i:
pre_num_filters = in_channels[j]
for k in range(i - j):
if k == i - j - 1:
residual_func = self.add_sublayer(
"residual_{}_layer_{}_{}_{}".format(
name, i + 1, j + 1, k + 1),
layers.ConvBN(
in_channels=pre_num_filters,
out_channels=out_channels[i],
kernel_size=3,
stride=2,
padding=1 if not padding_same else 'same',
bias_attr=False))
pre_num_filters = out_channels[i]
else:
residual_func = self.add_sublayer(
"residual_{}_layer_{}_{}_{}".format(
name, i + 1, j + 1, k + 1),
layers.ConvBNReLU(
in_channels=pre_num_filters,
out_channels=out_channels[j],
kernel_size=3,
stride=2,
padding=1 if not padding_same else 'same',
bias_attr=False))
pre_num_filters = out_channels[j]
self.residual_func_list.append(residual_func)
def forward(self, x):
outs = []
residual_func_idx = 0
for i in range(self._actual_ch):
residual = x[i]
residual_shape = paddle.shape(residual)[-2:]
for j in range(len(self._in_channels)):
if j > i:
y = self.residual_func_list[residual_func_idx](x[j])
residual_func_idx += 1
y = F.interpolate(
y,
residual_shape,
mode='bilinear',
align_corners=self.align_corners)
residual = residual + y
elif j < i:
y = x[j]
for k in range(i - j):
y = self.residual_func_list[residual_func_idx](y)
residual_func_idx += 1
residual = residual + y
residual = F.relu(residual)
outs.append(residual)
return outs
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/backbones/vovnet.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.models.layers import group_norm, FrozenBatchNorm2d, param_init
from paddle3d.utils import checkpoint
__all__ = ["VoVNet", "VoVNet99_eSE"]
norm_func = None
def dw_conv3x3(in_channels,
out_channels,
module_name,
postfix,
stride=1,
kernel_size=3,
padding=1):
"""3x3 convolution with padding"""
return nn.Sequential(
('{}_{}/dw_conv3x3'.format(module_name, postfix),
nn.Conv2D(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=out_channels,
bias_attr=False)),
('{}_{}/pw_conv1x1'.format(module_name, postfix),
nn.Conv2D(
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
groups=1,
bias_attr=False)),
('{}_{}/pw_norm'.format(module_name, postfix), norm_func(out_channels)),
('{}_{}/pw_relu'.format(module_name, postfix), nn.ReLU()))
def conv3x3(in_channels,
out_channels,
module_name,
postfix,
stride=1,
groups=1,
kernel_size=3,
padding=1):
"""3x3 convolution with padding"""
return nn.Sequential(
(f"{module_name}_{postfix}/conv",
nn.Conv2D(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias_attr=False,
)), (f"{module_name}_{postfix}/norm", norm_func(out_channels)),
(f"{module_name}_{postfix}/relu", nn.ReLU()))
def conv1x1(in_channels,
out_channels,
module_name,
postfix,
stride=1,
groups=1,
kernel_size=1,
padding=0):
"""1x1 convolution with padding"""
return nn.Sequential(
(f"{module_name}_{postfix}/conv",
nn.Conv2D(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias_attr=False,
)), (f"{module_name}_{postfix}/norm", norm_func(out_channels)),
(f"{module_name}_{postfix}/relu", nn.ReLU()))
class Hsigmoid(nn.Layer):
def __init__(self):
super(Hsigmoid, self).__init__()
def forward(self, x):
return F.relu6(x + 3.0) / 6.0
class eSEModule(nn.Layer):
def __init__(self, channel, reduction=4):
super(eSEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2D(1)
self.fc = nn.Conv2D(channel, channel, kernel_size=1, padding=0)
self.hsigmoid = Hsigmoid()
def forward(self, x):
input = x
x = self.avg_pool(x)
x = self.fc(x)
x = self.hsigmoid(x)
return input * x
class _OSA_module(nn.Layer):
def __init__(self,
in_ch,
stage_ch,
concat_ch,
layer_per_block,
module_name,
SE=False,
identity=False,
depthwise=False):
super(_OSA_module, self).__init__()
self.identity = identity
self.depthwise = depthwise
self.isReduced = False
self.layers = nn.LayerList()
in_channel = in_ch
if self.depthwise and in_channel != stage_ch:
self.isReduced = True
self.conv_reduction = conv1x1(
in_channel, stage_ch, "{}_reduction".format(module_name), "0")
for i in range(layer_per_block):
if self.depthwise:
self.layers.append(
dw_conv3x3(stage_ch, stage_ch, module_name, i))
else:
self.layers.append(
conv3x3(in_channel, stage_ch, module_name, i))
in_channel = stage_ch
# feature aggregation
in_channel = in_ch + layer_per_block * stage_ch
self.concat = conv1x1(in_channel, concat_ch, module_name, "concat")
self.ese = eSEModule(concat_ch)
def forward(self, x):
identity_feat = x
output = []
output.append(x)
if self.depthwise and self.isReduced:
x = self.conv_reduction(x)
for layer in self.layers:
x = layer(x)
output.append(x)
x = paddle.concat(output, axis=1)
xt = self.concat(x)
xt = self.ese(xt)
if self.identity:
xt = xt + identity_feat
return xt
class _OSA_stage(nn.Sequential):
def __init__(self,
in_ch,
stage_ch,
concat_ch,
block_per_stage,
layer_per_block,
stage_num,
SE=False,
depthwise=False):
super(_OSA_stage, self).__init__()
if not stage_num == 2:
self.add_sublayer(
"Pooling", nn.MaxPool2D(
kernel_size=3, stride=2, ceil_mode=True))
if block_per_stage != 1:
SE = False
module_name = f"OSA{stage_num}_1"
self.add_sublayer(
module_name,
_OSA_module(
in_ch,
stage_ch,
concat_ch,
layer_per_block,
module_name,
SE,
depthwise=depthwise))
for i in range(block_per_stage - 1):
if i != block_per_stage - 2: # last block
SE = False
module_name = f"OSA{stage_num}_{i + 2}"
self.add_sublayer(
module_name,
_OSA_module(
concat_ch,
stage_ch,
concat_ch,
layer_per_block,
module_name,
SE,
identity=True,
depthwise=depthwise),
)
@manager.BACKBONES.add_component
class VoVNet(nn.Layer):
def __init__(self,
stem_ch,
config_stage_ch,
config_concat_ch,
block_per_stage,
layer_per_block,
depthwise,
SE,
norm_type,
input_ch,
out_features=None):
"""
Args:
input_ch(int) : the number of input channel
out_features (list[str]): name of the layers whose outputs should
be returned in forward. Can be anything in "stem", "stage2" ...
"""
super(VoVNet, self).__init__()
global norm_func
if norm_type == "bn" or norm_type is None:
norm_func = nn.BatchNorm2D
elif norm_type == "gn":
norm_func = group_norm
elif norm_type == "frozen_bn":
norm_func = FrozenBatchNorm2d
else:
raise NotImplementedError()
self._out_features = out_features
# Stem module
conv_type = dw_conv3x3 if depthwise else conv3x3
self.stem = nn.Sequential(('stem1',
conv3x3(input_ch, stem_ch[0], "stem", "1",
2)))
self.stem.add_sublayer(
'stem2', conv_type(stem_ch[0], stem_ch[1], "stem", "2", 1))
self.stem.add_sublayer(
'stem3', conv_type(stem_ch[1], stem_ch[2], "stem", "3", 2))
current_stirde = 4
self._out_feature_strides = {
"stem": current_stirde,
"stage2": current_stirde
}
self._out_feature_channels = {"stem": stem_ch[2]}
stem_out_ch = [stem_ch[2]]
in_ch_list = stem_out_ch + config_concat_ch[:-1]
# OSA stages
self.stage_names = []
for i in range(4): # num_stages
name = "stage%d" % (i + 2) # stage 2 ... stage 5
self.stage_names.append(name)
self.add_sublayer(
name,
_OSA_stage(
in_ch_list[i],
config_stage_ch[i],
config_concat_ch[i],
block_per_stage[i],
layer_per_block,
i + 2,
SE,
depthwise,
),
)
self._out_feature_channels[name] = config_concat_ch[i]
if not i == 0:
self._out_feature_strides[name] = current_stirde = int(
current_stirde * 2)
# initialize weights
self._initialize_weights()
def _initialize_weights(self):
for m in self.sublayers():
if isinstance(m, nn.Conv2D):
param_init.kaiming_normal_init(m.weight)
def forward(self, x):
outputs = []
x = self.stem(x)
if "stem" in self._out_features:
outputs.append(x)
for name in self.stage_names:
x = getattr(self, name)(x)
if name in self._out_features:
outputs.append(x)
return outputs
@manager.BACKBONES.add_component
def VoVNet99_eSE(**kwargs):
model = VoVNet(
stem_ch=[64, 64, 128],
config_stage_ch=[128, 160, 192, 224],
config_concat_ch=[256, 512, 768, 1024],
layer_per_block=5,
block_per_stage=[1, 3, 9, 3],
SE=True,
depthwise=False,
**kwargs)
return model
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/backbones/dla.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.models.layers import group_norm, FrozenBatchNorm2d
from paddle3d.utils import checkpoint
__all__ = ["DLA", "DLA34", "DLABase34"]
@manager.BACKBONES.add_component
class DLA(nn.Layer):
def __init__(self,
levels,
channels,
block,
down_ratio=4,
last_level=5,
out_channel=0,
norm_type="gn",
pretrained=None):
super().__init__()
self.pretrained = pretrained
assert down_ratio in [2, 4, 8, 16]
self.first_level = int(np.log2(down_ratio))
self.last_level = last_level
if norm_type == "bn":
norm_func = nn.BatchNorm2D
elif norm_type == "gn":
norm_func = group_norm
elif norm_type == "frozen_bn":
norm_func = FrozenBatchNorm2d
else:
raise NotImplementedError()
self.base = DLABase(levels, channels, block, norm_type=norm_type)
scales = [2**i for i in range(len(channels[self.first_level:]))]
self.dla_up = DLAUp(
startp=self.first_level,
channels=channels[self.first_level:],
scales=scales,
norm_func=norm_func)
if out_channel == 0:
out_channel = channels[self.first_level]
up_scales = [2**i for i in range(self.last_level - self.first_level)]
self.ida_up = IDAUp(
in_channels=channels[self.first_level:self.last_level],
out_channel=out_channel,
up_f=up_scales,
norm_func=norm_func)
self.init_weight()
def forward(self, x):
x = self.base(x)
x = self.dla_up(x)
y = []
iter_levels = range(self.last_level - self.first_level)
for i in iter_levels:
y.append(x[i].clone())
self.ida_up(y, 0, len(y))
return y[-1]
def init_weight(self):
if self.pretrained is not None:
checkpoint.load_pretrained_model(self, self.pretrained)
class DLABase(nn.Layer):
"""DLA base module
"""
def __init__(self,
levels,
channels,
block=None,
residual_root=False,
norm_type=None,
out_features=None):
super().__init__()
self.channels = channels
self.level_length = len(levels)
if norm_type == "bn" or norm_type is None:
norm_func = nn.BatchNorm2D
elif norm_type == "gn":
norm_func = group_norm
elif norm_type == "frozen_bn":
norm_func = FrozenBatchNorm2d
else:
raise NotImplementedError()
if out_features is None:
self.out_features = [i for i in range(self.level_length)]
else:
self.out_features = out_features
if block is None:
block = BasicBlock
else:
block = eval(block)
self.base_layer = nn.Sequential(
nn.Conv2D(
3,
channels[0],
kernel_size=7,
stride=1,
padding=3,
bias_attr=False), norm_func(channels[0]), nn.ReLU())
self.level0 = _make_conv_level(
in_channels=channels[0],
out_channels=channels[0],
num_convs=levels[0],
norm_func=norm_func)
self.level1 = _make_conv_level(
in_channels=channels[0],
out_channels=channels[1],
num_convs=levels[0],
norm_func=norm_func,
stride=2)
self.level2 = Tree(
level=levels[2],
block=block,
in_channels=channels[1],
out_channels=channels[2],
norm_func=norm_func,
stride=2,
level_root=False,
root_residual=residual_root)
self.level3 = Tree(
level=levels[3],
block=block,
in_channels=channels[2],
out_channels=channels[3],
norm_func=norm_func,
stride=2,
level_root=True,
root_residual=residual_root)
self.level4 = Tree(
level=levels[4],
block=block,
in_channels=channels[3],
out_channels=channels[4],
norm_func=norm_func,
stride=2,
level_root=True,
root_residual=residual_root)
self.level5 = Tree(
level=levels[5],
block=block,
in_channels=channels[4],
out_channels=channels[5],
norm_func=norm_func,
stride=2,
level_root=True,
root_residual=residual_root)
def forward(self, x):
"""forward
"""
y = []
x = self.base_layer(x)
for i in range(self.level_length):
x = getattr(self, 'level{}'.format(i))(x)
if i in self.out_features:
y.append(x)
return y
class DLAUp(nn.Layer):
"""DLA Up module
"""
def __init__(self,
startp,
channels,
scales,
in_channels=None,
norm_func=None):
"""DLA Up module
"""
super(DLAUp, self).__init__()
self.startp = startp
if norm_func is None:
norm_func = nn.BatchNorm2d
if in_channels is None:
in_channels = channels
self.channels = channels
channels = list(channels)
scales = np.array(scales, dtype=int)
for i in range(len(channels) - 1):
j = -i - 2
setattr(
self, 'ida_{}'.format(i),
IDAUp(in_channels[j:], channels[j], scales[j:] // scales[j],
norm_func))
scales[j + 1:] = scales[j]
in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]]
def forward(self, layers):
"""forward
"""
out = [layers[-1]] # start with 32
for i in range(len(layers) - self.startp - 1):
ida = getattr(self, 'ida_{}'.format(i))
ida(layers, len(layers) - i - 2, len(layers))
out.insert(0, layers[-1])
return out
class BasicBlock(nn.Layer):
"""Basic Block
"""
def __init__(self,
in_channels,
out_channels,
norm_func,
stride=1,
dilation=1):
super().__init__()
self.conv1 = nn.Conv2D(
in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=dilation,
bias_attr=False,
dilation=dilation)
self.norm1 = norm_func(out_channels)
self.relu = nn.ReLU()
self.conv2 = nn.Conv2D(
out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=dilation,
bias_attr=False,
dilation=dilation)
self.norm2 = norm_func(out_channels)
def forward(self, x, residual=None):
"""forward
"""
if residual is None:
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out += residual
out = self.relu(out)
return out
class Tree(nn.Layer):
def __init__(self,
level,
block,
in_channels,
out_channels,
norm_func,
stride=1,
level_root=False,
root_dim=0,
root_kernel_size=1,
dilation=1,
root_residual=False):
super(Tree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
if level == 1:
self.tree1 = block(
in_channels, out_channels, norm_func, stride, dilation=dilation)
self.tree2 = block(
out_channels,
out_channels,
norm_func,
stride=1,
dilation=dilation)
else:
new_level = level - 1
self.tree1 = Tree(
new_level,
block,
in_channels,
out_channels,
norm_func,
stride,
root_dim=0,
root_kernel_size=root_kernel_size,
dilation=dilation,
root_residual=root_residual)
self.tree2 = Tree(
new_level,
block,
out_channels,
out_channels,
norm_func,
root_dim=root_dim + out_channels,
root_kernel_size=root_kernel_size,
dilation=dilation,
root_residual=root_residual)
if level == 1:
self.root = Root(root_dim, out_channels, norm_func,
root_kernel_size, root_residual)
self.level_root = level_root
self.root_dim = root_dim
self.level = level
self.downsample = None
if stride > 1:
self.downsample = nn.MaxPool2D(stride, stride=stride)
self.project = None
# If 'self.tree1' is a Tree (not BasicBlock), then the output of project is not used.
if in_channels != out_channels and not isinstance(self.tree1, Tree):
self.project = nn.Sequential(
nn.Conv2D(
in_channels,
out_channels,
kernel_size=1,
stride=1,
bias_attr=False), norm_func(out_channels))
def forward(self, x, residual=None, children=None):
"""forward
"""
if children is None:
children = []
if self.downsample:
bottom = self.downsample(x)
else:
bottom = x
if self.project:
residual = self.project(bottom)
else:
residual = bottom
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, residual)
if self.level == 1:
x2 = self.tree2(x1)
x = self.root(x2, x1, *children)
else:
children.append(x1)
x = self.tree2(x1, children=children)
return x
class Root(nn.Layer):
"""Root module
"""
def __init__(self, in_channels, out_channels, norm_func, kernel_size,
residual):
super(Root, self).__init__()
self.conv = nn.Conv2D(
in_channels,
out_channels,
kernel_size=1,
stride=1,
bias_attr=False,
padding=(kernel_size - 1) // 2)
self.norm = norm_func(out_channels)
self.relu = nn.ReLU()
self.residual = residual
def forward(self, *x):
"""forward
"""
children = x
x = self.conv(paddle.concat(x, 1))
x = self.norm(x)
if self.residual:
x += children[0]
x = self.relu(x)
return x
class IDAUp(nn.Layer):
"""IDAUp module
"""
def __init__(
self,
in_channels,
out_channel,
up_f, # todo: what is up_f here?
norm_func):
super().__init__()
for i in range(1, len(in_channels)):
in_channel = in_channels[i]
f = int(up_f[i])
#USE_DEFORMABLE_CONV = False
# so far only support normal convolution
proj = NormalConv(in_channel, out_channel, norm_func)
node = NormalConv(out_channel, out_channel, norm_func)
up = nn.Conv2DTranspose(
out_channel,
out_channel,
kernel_size=f * 2,
stride=f,
padding=f // 2,
output_padding=0,
groups=out_channel,
bias_attr=False)
# todo: uncommoment later
# _fill_up_weights(up)
setattr(self, 'proj_' + str(i), proj)
setattr(self, 'up_' + str(i), up)
setattr(self, 'node_' + str(i), node)
def forward(self, layers, startp, endp):
"""forward
"""
for i in range(startp + 1, endp):
upsample = getattr(self, 'up_' + str(i - startp))
project = getattr(self, 'proj_' + str(i - startp))
layers[i] = upsample(project(layers[i]))
node = getattr(self, 'node_' + str(i - startp))
layers[i] = node(layers[i] + layers[i - 1])
class NormalConv(nn.Layer):
"""Normal Conv without deformable
"""
def __init__(self, in_channels, out_channels, norm_func):
super(NormalConv, self).__init__()
self.norm = norm_func(out_channels)
self.relu = nn.ReLU()
self.conv = nn.Conv2D(
in_channels, out_channels, kernel_size=(3, 3), padding=1)
def forward(self, x):
"""forward
"""
x = self.conv(x)
x = self.norm(x)
x = self.relu(x)
return x
def _make_conv_level(in_channels,
out_channels,
num_convs,
norm_func,
stride=1,
dilation=1):
"""
make conv layers based on its number.
"""
layers = []
for i in range(num_convs):
layers.extend([
nn.Conv2D(
in_channels,
out_channels,
kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation,
bias_attr=False,
dilation=dilation),
norm_func(out_channels),
nn.ReLU()
])
in_channels = out_channels
return nn.Sequential(*layers)
@manager.BACKBONES.add_component
def DLA34(**kwargs):
model = DLA(
levels=[1, 1, 1, 2, 2, 1],
channels=[16, 32, 64, 128, 256, 512],
block="BasicBlock",
**kwargs)
return model
@manager.BACKBONES.add_component
def DLABase34(**kwargs):
model = DLABase(
levels=[1, 1, 1, 2, 2, 1],
channels=[16, 32, 64, 128, 256, 512],
block="BasicBlock",
**kwargs)
return model
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/backbones/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .dla import *
from .hrnet import *
from .resnet import *
from .sac import *
from .second_backbone import *
from .vovnet import *
from .vovnetcp import VoVNetCP
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/backbones/sac.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.models.layers import param_init
from paddle3d.utils import checkpoint
__all__ = ["SACRangeNet21", "SACRangeNet53"]
class SACRangeNet(nn.Layer):
"""
Backbone of SqueezeSegV3. RangeNet++ architecture with
Spatially-Adaptive Convolution (SAC).
For RangeNet++, please refer to:
Milioto, A., et al. “RangeNet++: Fast and Accurate LiDAR Semantic Segmentation.”
IEEE/RSJ Intl. Conf. on Intelligent Robots and Systems (IROS), 2019.
For SAC, please refer to:
Xu, Chenfeng, et al. “SqueezeSegV3: Spatially-Adaptive Convolution for Efficient Point-Cloud Segmentation.”
CoRR, vol. abs/2004.01803, 2020, https://arxiv.org/abs/2004.01803.
Args:
in_channels (int): The number of channels of input.
num_layers (int, optional): The depth of SACRangeNet. Defaults to 53.
encoder_dropout_prob (float, optional): Dropout probability for dropout layers in encoder. Defaults to 0.01.
decoder_dropout_prob (float, optional): Dropout probability for dropout layers in decoder. Defaults to 0.01.
bn_momentum (float, optional): Momentum for batch normalization. Defaults to 0.99.
pretrained (str, optional): Path to pretrained model. Defaults to None.
"""
# TODO(will-jl944): Currently only SAC-ISK is implemented.
def __init__(self,
in_channels: int,
num_layers: int = 53,
encoder_dropout_prob: float = .01,
decoder_dropout_prob: float = .01,
bn_momentum: float = .99,
pretrained: str = None):
supported_layers = {21, 53}
assert num_layers in supported_layers, "Invalid number of layers ({}) for SACRangeNet backbone, " \
"supported values are {}.".format(num_layers, supported_layers)
super().__init__()
self.in_channels = in_channels
self.pretrained = pretrained
if num_layers == 21:
num_stage_blocks = (1, 1, 2, 2, 1)
elif num_layers == 53:
num_stage_blocks = (1, 2, 8, 8, 4)
self.encoder = Encoder(
in_channels,
num_stage_blocks,
encoder_dropout_prob,
bn_momentum=bn_momentum)
self.decoder = Decoder(decoder_dropout_prob, bn_momentum=bn_momentum)
self.init_weight()
def forward(self, inputs):
feature, short_cuts = self.encoder(inputs)
feature_list = self.decoder(feature, short_cuts)
return feature_list
def init_weight(self):
if self.pretrained is not None:
checkpoint.load_pretrained_model(self, self.pretrained)
else:
for layer in self.sublayers():
if isinstance(layer, (nn.Conv2D, nn.Conv2DTranspose)):
param_init.kaiming_uniform_init(
layer.weight, a=math.sqrt(5))
if layer.bias is not None:
fan_in, _ = param_init._calculate_fan_in_and_fan_out(
layer.weight)
if fan_in != 0:
bound = 1 / math.sqrt(fan_in)
param_init.uniform_init(layer.bias, -bound, bound)
class ConvBNLayer(nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
bias=None,
bn_momentum=.9):
super(ConvBNLayer, self).__init__()
self._conv = nn.Conv2D(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias_attr=bias)
self._batch_norm = nn.BatchNorm2D(out_channels, momentum=bn_momentum)
def forward(self, x):
y = self._conv(x)
y = self._batch_norm(y)
return y
class DeconvBNLayer(nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
bias=None,
bn_momentum=.9):
super(DeconvBNLayer, self).__init__()
self._deconv = nn.Conv2DTranspose(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias_attr=bias)
self._batch_norm = nn.BatchNorm2D(out_channels, momentum=bn_momentum)
def forward(self, x):
y = self._deconv(x)
y = self._batch_norm(y)
return y
class SACISKBlock(nn.Layer):
"""
SAC-ISK.
"""
def __init__(self, num_channels):
super(SACISKBlock, self).__init__()
self.attention_layer = ConvBNLayer(
in_channels=3,
out_channels=9 * num_channels,
kernel_size=7,
padding=3,
bn_momentum=.9)
self.position_mlp = nn.Sequential(
ConvBNLayer(
in_channels=9 * num_channels,
out_channels=num_channels,
kernel_size=1,
bn_momentum=.9), nn.ReLU(),
ConvBNLayer(
in_channels=num_channels,
out_channels=num_channels,
kernel_size=3,
padding=1,
bn_momentum=.9), nn.ReLU())
def forward(self, xyz, feature):
N, C, H, W = feature.shape
new_feature = F.unfold(
feature, 3, paddings=1).reshape([N, 3 * 3 * C, H, W])
attention_map = self.attention_layer(xyz)
attention_map = F.sigmoid(attention_map)
new_feature = new_feature * attention_map
new_feature = self.position_mlp(new_feature)
fused_feature = new_feature + feature
return xyz, fused_feature
class DownsampleBlock(nn.Layer):
def __init__(self, in_channels, out_channels, bn_momentum=.9):
super().__init__()
self.ds_layer = nn.Sequential(
ConvBNLayer(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=[1, 2],
padding=1,
bias=False,
bn_momentum=bn_momentum), nn.LeakyReLU(.1))
def forward(self, xyz, feature):
feature = self.ds_layer(feature)
xyz = F.interpolate(
xyz,
size=[xyz.shape[2], xyz.shape[3] // 2],
mode="bilinear",
align_corners=True)
return xyz, feature
class EncoderStage(nn.Layer):
def __init__(self,
num_blocks,
in_channels,
out_channels,
dropout_prob,
downsample=True,
bn_momentum=.9):
super().__init__()
self.downsample = downsample
self.layers = nn.LayerList(
[SACISKBlock(num_channels=in_channels) for _ in range(num_blocks)])
if downsample:
self.layers.append(
DownsampleBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_momentum=bn_momentum))
self.dropout = nn.Dropout2D(dropout_prob)
def forward(self, xyz, feature):
for layer in self.layers:
xyz, feature = layer(xyz, feature)
feature = self.dropout(feature)
return xyz, feature
class Encoder(nn.Layer):
def __init__(self,
in_channels,
num_stage_blocks=(1, 2, 8, 8, 4),
dropout_prob=.01,
bn_momentum=.9):
super(Encoder, self).__init__()
down_channels = ((32, 64), (64, 128), (128, 256), (256, 256), (256,
256))
self.conv_1 = nn.Sequential(
ConvBNLayer(
in_channels=in_channels,
out_channels=32,
kernel_size=3,
stride=1,
padding=1,
bias=False,
bn_momentum=bn_momentum), nn.LeakyReLU(.1))
self.encoder_stages = nn.LayerList([
EncoderStage(
num_blocks,
in_ch,
out_ch,
dropout_prob=dropout_prob,
downsample=i < 3,
bn_momentum=bn_momentum) for i, (num_blocks, (
in_ch,
out_ch)) in enumerate(zip(num_stage_blocks, down_channels))
])
def forward(self, inputs):
xyz = inputs[:, 1:4, :, :]
feature = self.conv_1(inputs)
short_cuts = []
for encoder_stage in self.encoder_stages:
if encoder_stage.downsample:
short_cuts.append(feature.detach())
xyz, feature = encoder_stage(xyz, feature)
return feature, short_cuts
class InvertedResidual(nn.Layer):
def __init__(self, channels, bn_momentum=.9):
super().__init__()
self.conv = nn.Sequential(
ConvBNLayer(
in_channels=channels[1],
out_channels=channels[0],
kernel_size=1,
stride=1,
padding=0,
bias=False,
bn_momentum=bn_momentum), nn.LeakyReLU(.1),
ConvBNLayer(
in_channels=channels[0],
out_channels=channels[1],
kernel_size=3,
stride=1,
padding=1,
bias=False,
bn_momentum=bn_momentum), nn.LeakyReLU(.1))
def forward(self, x):
return self.conv(x) + x
class DecoderStage(nn.Layer):
def __init__(self, in_channels, out_channels, upsample=True,
bn_momentum=.9):
super().__init__()
self.upsample = upsample
self.layers = nn.LayerList()
if upsample:
self.layers.append(
DeconvBNLayer(
in_channels,
out_channels, [1, 4],
stride=[1, 2],
padding=[0, 1],
bn_momentum=bn_momentum))
else:
self.layers.append(
ConvBNLayer(
in_channels,
out_channels,
3,
padding=1,
bn_momentum=bn_momentum))
self.layers.append(nn.LeakyReLU(.1))
self.layers.append(
InvertedResidual(
channels=[in_channels, out_channels], bn_momentum=bn_momentum))
def forward(self, feature):
for layer in self.layers:
feature = layer(feature)
return feature
class Decoder(nn.Layer):
def __init__(self, dropout_prob=.01, bn_momentum=.9):
super().__init__()
up_channels = ((256, 256), (256, 256), (256, 128), (128, 64), (64, 32))
self.decoder_stages = nn.LayerList([
DecoderStage(
in_ch, out_ch, upsample=i > 1, bn_momentum=bn_momentum)
for i, (in_ch, out_ch) in enumerate(up_channels)
])
self.dropout = nn.Dropout2D(dropout_prob)
def forward(self, feature, short_cuts):
feature_list = []
for decoder_stage in self.decoder_stages:
feature = decoder_stage(feature)
if decoder_stage.upsample:
feature += short_cuts.pop()
feature_list.append(self.dropout(feature))
feature_list[-1] = self.dropout(feature_list[-1])
return feature_list
@manager.BACKBONES.add_component
def SACRangeNet21(**kwargs) -> paddle.nn.Layer:
model = SACRangeNet(num_layers=21, **kwargs)
return model
@manager.BACKBONES.add_component
def SACRangeNet53(**kwargs) -> paddle.nn.Layer:
model = SACRangeNet(num_layers=53, **kwargs)
return model
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/backbones/resnet.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from numbers import Integral
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from paddle.nn.initializer import Constant, Uniform
from paddle.regularizer import L2Decay
from paddle3d.apis import manager
from paddle3d.models import layers
from paddle3d.models.layers import reset_parameters
from paddle3d.utils import checkpoint
__all__ = ['ResNet']
class ConvBNLayer(nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
dilation=1,
groups=1,
is_vd_mode=False,
act=None,
data_format='NCHW'):
super(ConvBNLayer, self).__init__()
if dilation != 1 and kernel_size != 3:
raise RuntimeError("When the dilation isn't 1," \
"the kernel_size should be 3.")
self.is_vd_mode = is_vd_mode
self.act = act
self._pool2d_avg = nn.AvgPool2D(
kernel_size=2,
stride=2,
padding=0,
ceil_mode=True,
data_format=data_format)
self._conv = nn.Conv2D(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=(kernel_size - 1) // 2 \
if dilation == 1 else dilation,
dilation=dilation,
groups=groups,
bias_attr=False,
data_format=data_format)
self._batch_norm = nn.BatchNorm2D(out_channels, data_format=data_format)
if self.act:
self._act = nn.ReLU()
def forward(self, inputs):
if self.is_vd_mode:
inputs = self._pool2d_avg(inputs)
y = self._conv(inputs)
y = self._batch_norm(y)
if self.act:
y = self._act(y)
return y
class BottleneckBlock(nn.Layer):
def __init__(self,
in_channels,
out_channels,
stride,
shortcut=True,
if_first=False,
first_conv=False,
dilation=1,
is_vd_mode=False,
data_format='NCHW'):
super(BottleneckBlock, self).__init__()
self.data_format = data_format
self.conv0 = ConvBNLayer(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
act='relu',
data_format=data_format)
if first_conv and dilation != 1:
dilation //= 2
self.dilation = dilation
self.conv1 = ConvBNLayer(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
act='relu',
dilation=dilation,
data_format=data_format)
self.conv2 = ConvBNLayer(
in_channels=out_channels,
out_channels=out_channels * 4,
kernel_size=1,
act=None,
data_format=data_format)
if if_first or stride == 1:
is_vd_mode = False
if not shortcut:
self.short = ConvBNLayer(
in_channels=in_channels,
out_channels=out_channels * 4,
kernel_size=1,
stride=stride,
is_vd_mode=is_vd_mode,
data_format=data_format)
self.shortcut = shortcut
# NOTE: Use the wrap layer for quantization training
self.relu = nn.ReLU()
def forward(self, inputs):
y = self.conv0(inputs)
conv1 = self.conv1(y)
conv2 = self.conv2(conv1)
if self.shortcut:
short = inputs
else:
short = self.short(inputs)
y = paddle.add(short, conv2)
y = self.relu(y)
return y
class BasicBlock(nn.Layer):
def __init__(self,
in_channels,
out_channels,
stride,
dilation=1,
shortcut=True,
if_first=False,
is_vd_mode=False,
data_format='NCHW'):
super(BasicBlock, self).__init__()
self.conv0 = ConvBNLayer(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
dilation=dilation,
act='relu',
data_format=data_format)
self.conv1 = ConvBNLayer(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=3,
dilation=dilation,
act=None,
data_format=data_format)
if if_first or stride == 1:
is_vd_mode = False
if not shortcut:
self.short = ConvBNLayer(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
is_vd_mode=is_vd_mode,
data_format=data_format)
self.shortcut = shortcut
self.dilation = dilation
self.data_format = data_format
self.relu = nn.ReLU()
def forward(self, inputs):
y = self.conv0(inputs)
conv1 = self.conv1(y)
if self.shortcut:
short = inputs
else:
short = self.short(inputs)
y = paddle.add(short, conv1)
y = self.relu(y)
return y
@manager.BACKBONES.add_component
class ResNet(nn.Layer):
def __init__(self,
layers=50,
output_stride=8,
multi_grid=(1, 1, 1),
return_idx=[3],
pretrained=None,
variant='b',
data_format='NCHW'):
"""
Residual Network, see https://arxiv.org/abs/1512.03385
Args:
variant (str): ResNet variant, supports 'a', 'b', 'c', 'd' currently
layers (int, optional): The layers of ResNet_vd. The supported layers are (18, 34, 50, 101, 152, 200). Default: 50.
output_stride (int, optional): The stride of output features compared to input images. It is 8 or 16. Default: 8.
multi_grid (tuple|list, optional): The grid of stage4. Defult: (1, 1, 1).
pretrained (str, optional): The path of pretrained model.
"""
super(ResNet, self).__init__()
self.variant = variant
self.data_format = data_format
self.conv1_logit = None # for gscnn shape stream
self.layers = layers
self.norm_mean = paddle.to_tensor([0.485, 0.456, 0.406])
self.norm_std = paddle.to_tensor([0.229, 0.224, 0.225])
supported_layers = [18, 34, 50, 101, 152, 200]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(
supported_layers, layers)
if layers == 18:
depth = [2, 2, 2, 2]
elif layers == 34 or layers == 50:
depth = [3, 4, 6, 3]
elif layers == 101:
depth = [3, 4, 23, 3]
elif layers == 152:
depth = [3, 8, 36, 3]
elif layers == 200:
depth = [3, 12, 48, 3]
num_channels = [64, 256, 512, 1024
] if layers >= 50 else [64, 64, 128, 256]
num_filters = [64, 128, 256, 512]
# for channels of four returned stages
self.feat_channels = [c * 4 for c in num_filters
] if layers >= 50 else num_filters
dilation_dict = None
if output_stride == 8:
dilation_dict = {2: 2, 3: 4}
elif output_stride == 16:
dilation_dict = {3: 2}
self.return_idx = return_idx
if variant in ['c', 'd']:
conv_defs = [
[3, 32, 3, 2],
[32, 32, 3, 1],
[32, 64, 3, 1],
]
else:
conv_defs = [[3, 64, 7, 2]]
self.conv1 = nn.Sequential()
for (i, conv_def) in enumerate(conv_defs):
c_in, c_out, k, s = conv_def
self.conv1.add_sublayer(
str(i),
ConvBNLayer(
in_channels=c_in,
out_channels=c_out,
kernel_size=k,
stride=s,
act='relu',
data_format=data_format))
self.pool2d_max = nn.MaxPool2D(
kernel_size=3, stride=2, padding=1, data_format=data_format)
self.stage_list = []
if layers >= 50:
for block in range(len(depth)):
shortcut = False
block_list = []
for i in range(depth[block]):
if layers in [101, 152] and block == 2:
if i == 0:
conv_name = "res" + str(block + 2) + "a"
else:
conv_name = "res" + str(block + 2) + "b" + str(i)
else:
conv_name = "res" + str(block + 2) + chr(97 + i)
###############################################################################
# Add dilation rate for some segmentation tasks, if dilation_dict is not None.
dilation_rate = dilation_dict[
block] if dilation_dict and block in dilation_dict else 1
# Actually block here is 'stage', and i is 'block' in 'stage'
# At the stage 4, expand the the dilation_rate if given multi_grid
if block == 3:
dilation_rate = dilation_rate * multi_grid[i]
###############################################################################
bottleneck_block = self.add_sublayer(
'layer_%d_%d' % (block, i),
BottleneckBlock(
in_channels=num_channels[block]
if i == 0 else num_filters[block] * 4,
out_channels=num_filters[block],
stride=2 if i == 0 and block != 0
and dilation_rate == 1 else 1,
shortcut=shortcut,
if_first=block == i == 0,
first_conv=i == 0,
is_vd_mode=variant in ['c', 'd'],
dilation=dilation_rate,
data_format=data_format))
block_list.append(bottleneck_block)
shortcut = True
self.stage_list.append(block_list)
else:
for block in range(len(depth)):
shortcut = False
block_list = []
for i in range(depth[block]):
dilation_rate = dilation_dict[block] \
if dilation_dict and block in dilation_dict else 1
if block == 3:
dilation_rate = dilation_rate * multi_grid[i]
basic_block = self.add_sublayer(
'layer_%d_%d' % (block, i),
BasicBlock(
in_channels=num_channels[block]
if i == 0 else num_filters[block],
out_channels=num_filters[block],
stride=2 if i == 0 and block != 0 \
and dilation_rate == 1 else 1,
dilation=dilation_rate,
shortcut=shortcut,
if_first=block == i == 0,
is_vd_mode=variant in ['c', 'd'],
data_format=data_format))
block_list.append(basic_block)
shortcut = True
self.stage_list.append(block_list)
self.pretrained = pretrained
self.init_weight()
def forward(self, inputs):
image = self.preprocess(inputs)
y = self.conv1(image)
y = self.pool2d_max(y)
# A feature list saves the output feature map of each stage.
feat_list = []
for idx, stage in enumerate(self.stage_list):
for block in stage:
y = block(y)
if idx in self.return_idx:
feat_list.append(y)
return feat_list
def preprocess(self, images):
"""
Preprocess images
Args:
images [paddle.Tensor(N, 3, H, W)]: Input images
Return
x [paddle.Tensor(N, 3, H, W)]: Preprocessed images
"""
x = images
# Create a mask for padded pixels
mask = paddle.isnan(x)
# Match ResNet pretrained preprocessing
x = self.normalize(x, mean=self.norm_mean, std=self.norm_std)
# Make padded pixels = 0
a = paddle.zeros_like(x)
x = paddle.where(mask, a, x)
return x
def normalize(self, image, mean, std):
shape = paddle.shape(image)
if mean.shape:
mean = mean[..., :, None]
if std.shape:
std = std[..., :, None]
out = (image.reshape([shape[0], shape[1], shape[2] * shape[3]]) -
mean) / std
return out.reshape(shape)
def init_weight(self):
for sublayer in self.sublayers():
if isinstance(sublayer, nn.Conv2D):
reset_parameters(sublayer)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/backbones/vovnetcp.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Copyright (c) 2022 megvii-model. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from DETR3D (https://github.com/WangYueFt/detr3d)
# Copyright (c) 2021 Wang, Yue
# ------------------------------------------------------------------------
# Copyright (c) Youngwan Lee (ETRI) All Rights Reserved.
# Copyright 2021 Toyota Research Institute. All rights reserved.
# ------------------------------------------------------------------------
import warnings
from collections import OrderedDict
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.distributed.fleet.utils import recompute
from paddle3d.apis import manager
VoVNet19_slim_dw_eSE = {
'stem': [64, 64, 64],
'stage_conv_ch': [64, 80, 96, 112],
'stage_out_ch': [112, 256, 384, 512],
"layer_per_block": 3,
"block_per_stage": [1, 1, 1, 1],
"eSE": True,
"dw": True
}
VoVNet19_dw_eSE = {
'stem': [64, 64, 64],
"stage_conv_ch": [128, 160, 192, 224],
"stage_out_ch": [256, 512, 768, 1024],
"layer_per_block": 3,
"block_per_stage": [1, 1, 1, 1],
"eSE": True,
"dw": True
}
VoVNet19_slim_eSE = {
'stem': [64, 64, 128],
'stage_conv_ch': [64, 80, 96, 112],
'stage_out_ch': [112, 256, 384, 512],
'layer_per_block': 3,
'block_per_stage': [1, 1, 1, 1],
'eSE': True,
"dw": False
}
VoVNet19_eSE = {
'stem': [64, 64, 128],
"stage_conv_ch": [128, 160, 192, 224],
"stage_out_ch": [256, 512, 768, 1024],
"layer_per_block": 3,
"block_per_stage": [1, 1, 1, 1],
"eSE": True,
"dw": False
}
VoVNet39_eSE = {
'stem': [64, 64, 128],
"stage_conv_ch": [128, 160, 192, 224],
"stage_out_ch": [256, 512, 768, 1024],
"layer_per_block": 5,
"block_per_stage": [1, 1, 2, 2],
"eSE": True,
"dw": False
}
VoVNet57_eSE = {
'stem': [64, 64, 128],
"stage_conv_ch": [128, 160, 192, 224],
"stage_out_ch": [256, 512, 768, 1024],
"layer_per_block": 5,
"block_per_stage": [1, 1, 4, 3],
"eSE": True,
"dw": False
}
VoVNet99_eSE = {
'stem': [64, 64, 128],
"stage_conv_ch": [128, 160, 192, 224],
"stage_out_ch": [256, 512, 768, 1024],
"layer_per_block": 5,
"block_per_stage": [1, 3, 9, 3],
"eSE": True,
"dw": False
}
_STAGE_SPECS = {
"V-19-slim-dw-eSE": VoVNet19_slim_dw_eSE,
"V-19-dw-eSE": VoVNet19_dw_eSE,
"V-19-slim-eSE": VoVNet19_slim_eSE,
"V-19-eSE": VoVNet19_eSE,
"V-39-eSE": VoVNet39_eSE,
"V-57-eSE": VoVNet57_eSE,
"V-99-eSE": VoVNet99_eSE,
}
def dw_conv3x3(in_channels,
out_channels,
module_name,
postfix,
stride=1,
kernel_size=3,
padding=1):
"""3x3 convolution with padding"""
return [
('{}_{}/dw_conv3x3'.format(module_name, postfix),
nn.Conv2D(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=out_channels,
bias_attr=False)),
('{}_{}/pw_conv1x1'.format(module_name, postfix),
nn.Conv2D(
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
groups=1,
bias_attr=False)),
('{}_{}/pw_norm'.format(module_name, postfix),
nn.BatchNorm2D(out_channels)),
('{}_{}/pw_relu'.format(module_name, postfix), nn.ReLU()),
]
def conv3x3(in_channels,
out_channels,
module_name,
postfix,
stride=1,
groups=1,
kernel_size=3,
padding=1):
"""3x3 convolution with padding"""
return [
(
f"{module_name}_{postfix}/conv",
nn.Conv2D(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias_attr=False,
),
),
(f"{module_name}_{postfix}/norm", nn.BatchNorm2D(out_channels)),
(f"{module_name}_{postfix}/relu", nn.ReLU()),
]
def conv1x1(in_channels,
out_channels,
module_name,
postfix,
stride=1,
groups=1,
kernel_size=1,
padding=0):
"""1x1 convolution with padding"""
return [
(
f"{module_name}_{postfix}/conv",
nn.Conv2D(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias_attr=False,
),
),
(f"{module_name}_{postfix}/norm", nn.BatchNorm2D(out_channels)),
(f"{module_name}_{postfix}/relu", nn.ReLU()),
]
class Hsigmoid(nn.Layer):
def __init__(self):
super(Hsigmoid, self).__init__()
def forward(self, x):
return F.relu6(x + 3.0) / 6.0
class eSEModule(nn.Layer):
def __init__(self, channel, reduction=4):
super(eSEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2D(1)
self.fc = nn.Conv2D(channel, channel, kernel_size=1, padding=0)
self.hsigmoid = Hsigmoid()
def forward(self, x):
input = x
x = self.avg_pool(x)
x = self.fc(x)
x = self.hsigmoid(x)
return input * x
class _OSA_layer(nn.Layer):
def __init__(self,
in_ch,
stage_ch,
concat_ch,
layer_per_block,
module_name,
SE=False,
identity=False,
depthwise=False,
with_cp=True):
super(_OSA_layer, self).__init__()
self.identity = identity
self.depthwise = depthwise
self.isReduced = False
self.use_checkpoint = with_cp
self.layers = nn.LayerList()
in_channel = in_ch
if self.depthwise and in_channel != stage_ch:
self.isReduced = True
self.conv_reduction = nn.Sequential(*conv1x1(
in_channel, stage_ch, "{}_reduction".format(module_name), "0"))
for i in range(layer_per_block):
if self.depthwise:
self.layers.append(
nn.Sequential(
*dw_conv3x3(stage_ch, stage_ch, module_name, i)))
else:
self.layers.append(
nn.Sequential(
*conv3x3(in_channel, stage_ch, module_name, i)))
in_channel = stage_ch
# feature aggregation
in_channel = in_ch + layer_per_block * stage_ch
self.concat = nn.Sequential(
*conv1x1(in_channel, concat_ch, module_name, "concat"))
self.ese = eSEModule(concat_ch)
def _forward(self, x):
identity_feat = x
output = []
output.append(x)
if self.depthwise and self.isReduced:
x = self.conv_reduction(x)
for layer in self.layers:
x = layer(x)
output.append(x)
x = paddle.concat(output, axis=1)
xt = self.concat(x)
xt = self.ese(xt)
if self.identity:
xt = xt + identity_feat
return xt
def forward(self, x):
if self.use_checkpoint and self.training:
xt = recompute(self._forward, x)
else:
xt = self._forward(x)
return xt
class _OSA_stage(nn.Sequential):
def __init__(self,
in_ch,
stage_ch,
concat_ch,
block_per_stage,
layer_per_block,
stage_num,
SE=False,
depthwise=False):
super(_OSA_stage, self).__init__()
if not stage_num == 2:
self.add_sublayer(
"Pooling", nn.MaxPool2D(
kernel_size=3, stride=2, ceil_mode=True))
if block_per_stage != 1:
SE = False
module_name = f"OSA{stage_num}_1"
self.add_sublayer(
module_name,
_OSA_layer(
in_ch,
stage_ch,
concat_ch,
layer_per_block,
module_name,
SE,
depthwise=depthwise))
for i in range(block_per_stage - 1):
if i != block_per_stage - 2: # last block
SE = False
module_name = f"OSA{stage_num}_{i + 2}"
self.add_sublayer(
module_name,
_OSA_layer(
concat_ch,
stage_ch,
concat_ch,
layer_per_block,
module_name,
SE,
identity=True,
depthwise=depthwise),
)
@manager.BACKBONES.add_component
class VoVNetCP(nn.Layer):
def __init__(self,
spec_name,
input_ch=3,
out_features=None,
frozen_stages=-1,
norm_eval=True,
pretrained=None,
init_cfg=None):
"""
Args:
input_ch(int) : the number of input channel
out_features (list[str]): name of the layers whose outputs should
be returned in forward. Can be anything in "stem", "stage2" ...
"""
super(VoVNetCP, self).__init__()
self.frozen_stages = frozen_stages
self.norm_eval = norm_eval
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
stage_specs = _STAGE_SPECS[spec_name]
stem_ch = stage_specs["stem"]
config_stage_ch = stage_specs["stage_conv_ch"]
config_concat_ch = stage_specs["stage_out_ch"]
block_per_stage = stage_specs["block_per_stage"]
layer_per_block = stage_specs["layer_per_block"]
SE = stage_specs["eSE"]
depthwise = stage_specs["dw"]
self._out_features = out_features
# Stem module
conv_type = dw_conv3x3 if depthwise else conv3x3
stem = conv3x3(input_ch, stem_ch[0], "stem", "1", 2)
stem += conv_type(stem_ch[0], stem_ch[1], "stem", "2", 1)
stem += conv_type(stem_ch[1], stem_ch[2], "stem", "3", 2)
self.add_sublayer("stem", nn.Sequential(*stem))
current_stirde = 4
self._out_feature_strides = {
"stem": current_stirde,
"stage2": current_stirde
}
self._out_feature_channels = {"stem": stem_ch[2]}
stem_out_ch = [stem_ch[2]]
in_ch_list = stem_out_ch + config_concat_ch[:-1]
# OSA stages
self.stage_names = []
for i in range(4): # num_stages
name = "stage%d" % (i + 2) # stage 2 ... stage 5
self.stage_names.append(name)
self.add_sublayer(
name,
_OSA_stage(
in_ch_list[i],
config_stage_ch[i],
config_concat_ch[i],
block_per_stage[i],
layer_per_block,
i + 2,
SE,
depthwise,
),
)
self._out_feature_channels[name] = config_concat_ch[i]
if not i == 0:
self._out_feature_strides[name] = current_stirde = int(
current_stirde * 2)
def forward(self, x):
outputs = []
x = self.stem(x)
if "stem" in self._out_features:
outputs.append(x)
for name in self.stage_names:
x = getattr(self, name)(x)
if name in self._out_features:
outputs.append(x)
return outputs
def _freeze_stages(self):
if self.frozen_stages >= 0:
m = getattr(self, 'stem')
m.eval()
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'stage{i+1}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def train(self, mode=True):
"""Convert the model into training mode while keep normalization layer
freezed."""
super(VoVNetCP, self).train()
self._freeze_stages()
if mode and self.norm_eval:
for m in self.sublayers():
if isinstance(m, nn.layer.norm._BatchNormBase):
m.eval()
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/backbones/second_backbone.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/backbones/second.py
Ths copyright of mmdetection3d is as follows:
Apache-2.0 license [see LICENSE for details].
"""
import math
import paddle
import paddle.nn as nn
from paddle import ParamAttr
from paddle.nn.initializer import Constant, Normal, Uniform
from paddle3d.apis import manager
from paddle3d.models.voxel_encoders.pillar_encoder import build_norm_layer
__all__ = ['SecondBackbone', 'build_conv_layer']
def build_conv_layer(in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
distribution="uniform"):
"""Build convolution layer."""
if distribution == "uniform":
bound = 1 / math.sqrt(in_channels * kernel_size**2)
param_attr = ParamAttr(initializer=Uniform(-bound, bound))
bias_attr = False
if bias:
bias_attr = ParamAttr(initializer=Uniform(-bound, bound))
else:
fan_out = out_channels * kernel_size**2
std = math.sqrt(2) / math.sqrt(fan_out)
param_attr = ParamAttr(initializer=Normal(0, std))
bias_attr = False
if bias:
bias_attr = ParamAttr(initializer=Constant(0.))
conv_layer = nn.Conv2D(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
weight_attr=param_attr,
bias_attr=bias_attr)
return conv_layer
@manager.BACKBONES.add_component
class SecondBackbone(nn.Layer):
def __init__(self,
in_channels=128,
out_channels=[128, 128, 256],
layer_nums=[3, 5, 5],
downsample_strides=[2, 2, 2]):
super(SecondBackbone, self).__init__()
assert len(downsample_strides) == len(layer_nums)
assert len(out_channels) == len(layer_nums)
self.downsample_strides = downsample_strides
norm_cfg = dict(type='BatchNorm2D', eps=1e-3, momentum=0.01)
in_filters = [in_channels, *out_channels[:-1]]
blocks = []
for i, layer_num in enumerate(layer_nums):
block = [
build_conv_layer(
in_filters[i],
out_channels[i],
3,
stride=downsample_strides[i],
padding=1,
bias=False),
build_norm_layer(norm_cfg, out_channels[i]),
nn.ReLU(),
]
for j in range(layer_num):
block.append(
build_conv_layer(
out_channels[i],
out_channels[i],
3,
padding=1,
bias=False))
block.append(build_norm_layer(norm_cfg, out_channels[i]))
block.append(nn.ReLU())
block = nn.Sequential(*block)
blocks.append(block)
self.blocks = nn.LayerList(blocks)
def forward(self, x):
outs = []
for i in range(len(self.blocks)):
x = self.blocks[i](x)
outs.append(x)
return tuple(outs)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/point_encoders/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .voxel_set_abstraction import VoxelSetAbstraction
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/point_encoders/voxel_set_abstraction.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/models/backbones_3d/pfe/voxel_set_abstraction.py
Ths copyright of OpenPCDet is as follows:
Apache-2.0 license [see LICENSE for details].
"""
import numpy as np
import paddle
import paddle.nn as nn
from paddle import sparse
from paddle3d.apis import manager
from paddle3d.models.common import get_voxel_centers
from paddle3d.models.common import pointnet2_stack as pointnet2_stack_modules
from paddle3d.models.layers import param_init
from paddle3d.ops import pointnet2_ops
def bilinear_interpolate_paddle(im, x, y):
"""
Args:
im: (H, W, C) [y, x]
x: (N)
y: (N)
Returns:
"""
x0 = paddle.floor(x).astype('int64')
x1 = x0 + 1
y0 = paddle.floor(y).astype('int64')
y1 = y0 + 1
x0 = paddle.clip(x0, 0, im.shape[1] - 1)
x1 = paddle.clip(x1, 0, im.shape[1] - 1)
y0 = paddle.clip(y0, 0, im.shape[0] - 1)
y1 = paddle.clip(y1, 0, im.shape[0] - 1)
Ia = im[y0, x0]
Ib = im[y1, x0]
Ic = im[y0, x1]
Id = im[y1, x1]
wa = (x1.astype(x.dtype) - x) * (y1.astype(y.dtype) - y)
wb = (x1.astype(x.dtype) - x) * (y - y0.astype(y.dtype))
wc = (x - x0.astype(x.dtype)) * (y1.astype(y.dtype) - y)
wd = (x - x0.astype(x.dtype)) * (y - y0.astype(y.dtype))
ans = paddle.transpose(
(paddle.transpose(Ia, [1, 0]) * wa), [1, 0]) + paddle.transpose(
paddle.transpose(Ib, [1, 0]) * wb, [1, 0]) + paddle.transpose(
paddle.transpose(Ic, [1, 0]) * wc, [1, 0]) + paddle.transpose(
paddle.transpose(Id, [1, 0]) * wd, [1, 0])
return ans
def sample_points_with_roi(rois,
points,
sample_radius_with_roi,
num_max_points_of_part=200000):
"""
Args:
rois: (M, 7 + C)
points: (N, 3)
sample_radius_with_roi:
num_max_points_of_part:
Returns:
sampled_points: (N_out, 3)
"""
if points.shape[0] < num_max_points_of_part:
distance = paddle.linalg.norm(
points[:, None, :] - rois[None, :, 0:3], axis=-1)
min_dis, min_dis_roi_idx = distance.min(axis=-1)
roi_max_axis = paddle.linalg.norm(
rois[min_dis_roi_idx, 3:6] / 2, axis=-1)
point_mask = min_dis < roi_max_axis + sample_radius_with_roi
else:
start_idx = 0
point_mask_list = []
while start_idx < points.shape[0]:
distance = (
points[start_idx:start_idx + num_max_points_of_part, None, :] -
rois[None, :, 0:3]).norm(axis=-1)
min_dis, min_dis_roi_idx = distance.min(axis=-1)
roi_max_axis = paddle.linalg.norm(
rois[min_dis_roi_idx, 3:6] / 2, axis=-1)
cur_point_mask = min_dis < roi_max_axis + sample_radius_with_roi
point_mask_list.append(cur_point_mask)
start_idx += num_max_points_of_part
point_mask = paddle.concat(point_mask_list, axis=0)
sampled_points = points[:1] if point_mask.sum(
) == 0 else points[point_mask, :]
return sampled_points, point_mask
@manager.POINT_ENCODERS.add_component
class VoxelSetAbstraction(nn.Layer):
def __init__(self,
model_cfg,
voxel_size,
point_cloud_range,
num_bev_features=None,
num_rawpoint_features=None,
**kwargs):
super(VoxelSetAbstraction, self).__init__()
self.model_cfg = model_cfg
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
sa_cfg = self.model_cfg["sa_layer"]
self.sa_layers = nn.LayerList()
self.sa_layer_names = []
self.downsample_stride_map = {}
c_in = 0
for src_name in self.model_cfg["features_source"]:
if src_name in ['bev', 'raw_points']:
continue
self.downsample_stride_map[src_name] = sa_cfg[src_name][
"downsample_stride"]
if sa_cfg[src_name].get('in_channels', None) is None:
input_channels = sa_cfg[src_name]["mlps"][0][0] \
if isinstance(sa_cfg[src_name]["mlps"][0], list) else sa_cfg[src_name]["mlps"][0]
else:
input_channels = sa_cfg[src_name]['in_channels']
cur_layer, cur_num_c_out = pointnet2_stack_modules.build_local_aggregation_module(
input_channels=input_channels, config=sa_cfg[src_name])
self.sa_layers.append(cur_layer)
self.sa_layer_names.append(src_name)
c_in += cur_num_c_out
if 'bev' in self.model_cfg["features_source"]:
c_bev = num_bev_features
c_in += c_bev
self.num_rawpoint_features = num_rawpoint_features
if 'raw_points' in self.model_cfg["features_source"]:
self.sa_rawpoints, cur_num_c_out = pointnet2_stack_modules.build_local_aggregation_module(
input_channels=num_rawpoint_features - 3,
config=sa_cfg['raw_points'])
c_in += cur_num_c_out
self.vsa_point_feature_fusion = nn.Sequential(
nn.Linear(c_in, self.model_cfg["out_channels"], bias_attr=False),
nn.BatchNorm1D(self.model_cfg["out_channels"]),
nn.ReLU(),
)
self.num_point_features = self.model_cfg["out_channels"]
self.num_point_features_before_fusion = c_in
self.init_weights()
def init_weights(self):
for layer in self.vsa_point_feature_fusion.sublayers():
if isinstance(layer, (nn.Linear)):
param_init.reset_parameters(layer)
if isinstance(layer, nn.BatchNorm1D):
param_init.constant_init(layer.weight, value=1)
param_init.constant_init(layer.bias, value=0)
def interpolate_from_bev_features(self, keypoints, bev_features, batch_size,
bev_stride):
"""
Args:
keypoints: (N1 + N2 + ..., 4)
bev_features: (B, C, H, W)
batch_size:
bev_stride:
Returns:
point_bev_features: (N1 + N2 + ..., C)
"""
x_idxs = (
keypoints[:, 1] - self.point_cloud_range[0]) / self.voxel_size[0]
y_idxs = (
keypoints[:, 2] - self.point_cloud_range[1]) / self.voxel_size[1]
x_idxs = x_idxs / bev_stride
y_idxs = y_idxs / bev_stride
point_bev_features_list = []
for k in range(batch_size):
bs_mask = (keypoints[:, 0] == k)
cur_x_idxs = x_idxs[bs_mask]
cur_y_idxs = y_idxs[bs_mask]
cur_bev_features = bev_features[k].transpose((1, 2, 0)) # (H, W, C)
point_bev_features = bilinear_interpolate_paddle(
cur_bev_features, cur_x_idxs, cur_y_idxs)
point_bev_features_list.append(point_bev_features)
point_bev_features = paddle.concat(
point_bev_features_list, axis=0) # (N1 + N2 + ..., C)
return point_bev_features
def get_sampled_points(self, batch_dict):
"""
Args:
batch_dict:
Returns:
keypoints: (N1 + N2 + ..., 4), where 4 ind.concates [bs_idx, x, y, z]
"""
batch_size = batch_dict['batch_size']
if self.model_cfg["point_source"] == 'raw_points':
src_points = batch_dict['points'][:, 1:4]
batch_indices = batch_dict['points'][:, 0].astype('int64')
elif self.model_cfg["point_source"] == 'voxel_centers':
raise NotImplementedError
else:
raise NotImplementedError
keypoints_list = []
for bs_idx in range(batch_size):
bs_mask = (batch_indices == bs_idx)
sampled_points = src_points[bs_mask].unsqueeze(axis=0) # (1, N, 3)
if self.model_cfg["sample_method"] == 'FPS':
cur_pt_idxs = pointnet2_ops.farthest_point_sample(
sampled_points[:, :, 0:3],
self.model_cfg["num_keypoints"]).astype('int64')
if sampled_points.shape[1] < self.model_cfg["num_keypoints"]:
times = int(self.model_cfg["num_keypoints"] /
sampled_points.shape[1]) + 1
non_empty = cur_pt_idxs[0, :sampled_points.shape[1]]
cur_pt_idxs[0] = non_empty.tile(
[1, times])[:self.model_cfg["num_keypoints"]]
keypoints = sampled_points[0][cur_pt_idxs[0]].unsqueeze(axis=0)
elif self.model_cfg["sample_method"] == 'SPC':
raise NotImplementedError
else:
raise NotImplementedError
keypoints_list.append(keypoints)
keypoints = paddle.concat(
keypoints_list, axis=0) # (B, M, 3) or (N1 + N2 + ..., 4)
if len(keypoints.shape) == 3:
batch_idx = paddle.arange(batch_size).reshape([-1, 1]).tile(
[1, keypoints.shape[1]]).reshape([-1, 1])
keypoints = paddle.concat(
(batch_idx.astype('float32'), keypoints.reshape([-1, 3])),
axis=1)
return keypoints
@staticmethod
def aggregate_keypoint_features_from_one_source(
batch_size,
aggregate_func,
xyz,
xyz_features,
xyz_bs_idxs,
new_xyz,
new_xyz_batch_cnt,
filter_neighbors_with_roi=False,
radius_of_neighbor=None,
num_max_points_of_part=200000,
rois=None):
"""
Args:
aggregate_func:
xyz: (N, 3)
xyz_features: (N, C)
xyz_bs_idxs: (N)
new_xyz: (M, 3)
new_xyz_batch_cnt: (batch_size), [N1, N2, ...]
filter_neighbors_with_roi: True/False
radius_of_neighbor: float
num_max_points_of_part: int
rois: (batch_size, num_rois, 7 + C)
Returns:
"""
xyz_batch_cnt = paddle.zeros((batch_size, ), dtype='int32')
if filter_neighbors_with_roi:
point_features = paddle.concat(
(xyz,
xyz_features), axis=-1) if xyz_features is not None else xyz
point_features_list = []
for bs_idx in range(batch_size):
bs_mask = (xyz_bs_idxs == bs_idx)
_, valid_mask = sample_points_with_roi(
rois=rois[bs_idx],
points=xyz[bs_mask],
sample_radius_with_roi=radius_of_neighbor,
num_max_points_of_part=num_max_points_of_part,
)
point_features_list.append(point_features[bs_mask][valid_mask])
xyz_batch_cnt[bs_idx] = valid_mask.sum().astype(
xyz_batch_cnt.dtype)
valid_point_features = paddle.concat(point_features_list, axis=0)
xyz = valid_point_features[:, 0:3]
xyz_features = valid_point_features[:,
3:] if xyz_features is not None else None
else:
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (xyz_bs_idxs == bs_idx).sum().astype(
xyz_batch_cnt.dtype)
pooled_points, pooled_features = aggregate_func(
xyz=xyz,
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=xyz_features,
)
return pooled_features
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
keypoints: (B, num_keypoints, 3)
multi_scale_3d_features: {
'x_conv4': ...
}
points: optional (N, 1 + 3 + C) [bs_idx, x, y, z, ...]
spatial_features: optional
spatial_features_stride: optional
Returns:
point_features: (N, C)
point_coords: (N, 4)
"""
keypoints = self.get_sampled_points(batch_dict)
point_features_list = []
if 'bev' in self.model_cfg["features_source"]:
point_bev_features = self.interpolate_from_bev_features(
keypoints,
batch_dict['spatial_features'],
batch_dict['batch_size'],
bev_stride=batch_dict['spatial_features_stride'])
point_features_list.append(point_bev_features)
batch_size = batch_dict['batch_size']
new_xyz = keypoints[:, 1:4]
new_xyz_batch_cnt = paddle.zeros((batch_size, ), dtype='int32')
for k in range(batch_size):
new_xyz_batch_cnt[k] = (keypoints[:, 0] == k).sum().astype(
new_xyz_batch_cnt.dtype)
if 'raw_points' in self.model_cfg["features_source"]:
raw_points = batch_dict['points']
pooled_features = self.aggregate_keypoint_features_from_one_source(
batch_size=batch_size,
aggregate_func=self.sa_rawpoints,
xyz=raw_points[:, 1:4],
xyz_features=raw_points[:, 4:]
if self.num_rawpoint_features > 3 else None,
xyz_bs_idxs=raw_points[:, 0],
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
filter_neighbors_with_roi=self.model_cfg["sa_layer"]
['raw_points'].get('filter_neighbor_with_roi', False),
radius_of_neighbor=self.model_cfg["sa_layer"]['raw_points'].get(
'radius_of_neighbor_with_roi', None),
rois=batch_dict.get('rois', None))
point_features_list.append(pooled_features)
for k, src_name in enumerate(self.sa_layer_names):
cur_coords = batch_dict['multi_scale_3d_features'][
src_name].indices().transpose([1, 0])
cur_features = batch_dict['multi_scale_3d_features'][
src_name].values()
xyz = get_voxel_centers(
cur_coords[:, 1:4],
downsample_strides=self.downsample_stride_map[src_name],
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range)
pooled_features = self.aggregate_keypoint_features_from_one_source(
batch_size=batch_size,
aggregate_func=self.sa_layers[k],
xyz=xyz,
xyz_features=cur_features,
xyz_bs_idxs=cur_coords[:, 0],
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
filter_neighbors_with_roi=self.model_cfg["sa_layer"]
[src_name].get('filter_neighbor_with_roi', False),
radius_of_neighbor=self.model_cfg["sa_layer"][src_name].get(
'radius_of_neighbor_with_roi', None),
rois=batch_dict.get('rois', None))
point_features_list.append(pooled_features)
point_features = paddle.concat(point_features_list, axis=-1)
batch_dict['point_features_before_fusion'] = point_features.reshape(
(-1, point_features.shape[-1]))
point_features = self.vsa_point_feature_fusion(
point_features.reshape((-1, point_features.shape[-1])))
batch_dict['point_features'] = point_features # (BxN, C)
batch_dict['point_coords'] = keypoints # (BxN, 4)
return batch_dict
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import bevformer, caddn, centerpoint, iassd, petr, pointpillars, smoke, pv_rcnn, voxel_rcnn
from .bevformer import *
from .caddn import *
from .centerpoint import *
from .iassd import *
from .petr import *
from .pointpillars import *
from .pv_rcnn import *
from .smoke import *
from .dd3d import *
from .voxel_rcnn import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/smoke/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .processor import PostProcessor
from .smoke import SMOKE
from .smoke_coder import SMOKECoder
from .smoke_predictor import SMOKEPredictor
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/smoke/processor.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/lzccccc/SMOKE/blob/master/smoke/modeling/heads/smoke_head/inference.py
Ths copyright is MIT License
"""
import paddle
from paddle import nn
from paddle3d.apis import manager
from paddle3d.models.detection.smoke.smoke_coder import SMOKECoder
from paddle3d.models.layers import (nms_hm, select_point_of_interest,
select_topk)
@manager.MODELS.add_component
class PostProcessor(nn.Layer):
def __init__(self,
depth_ref,
dim_ref,
reg_head=10,
det_threshold=0.25,
max_detection=50,
pred_2d=True):
super().__init__()
self.smoke_coder = SMOKECoder(depth_ref, dim_ref)
self.reg_head = reg_head
self.max_detection = max_detection
self.det_threshold = det_threshold
self.pred_2d = pred_2d
def export_forward(self, predictions, cam_info):
pred_heatmap, pred_regression = predictions[0], predictions[1]
batch = pred_heatmap.shape[0]
heatmap = nms_hm(pred_heatmap)
topk_dict = select_topk(
heatmap,
K=self.max_detection,
)
scores, indexs = topk_dict["topk_score"], topk_dict["topk_inds_all"]
clses, ys = topk_dict["topk_clses"], topk_dict["topk_ys"]
xs = topk_dict["topk_xs"]
pred_regression = select_point_of_interest(batch, indexs,
pred_regression)
pred_regression_pois = paddle.reshape(
pred_regression, (numel_t(pred_regression) // 10, 10))
# yapf: disable
pred_proj_points = paddle.concat([
paddle.reshape(xs, (numel_t(xs), 1)),
paddle.reshape(ys, (numel_t(ys), 1))
], axis=1)
# yapf: enable
# FIXME: fix hard code here
pred_depths_offset = pred_regression_pois[:, 0]
pred_proj_offsets = pred_regression_pois[:, 1:3]
pred_dimensions_offsets = pred_regression_pois[:, 3:6]
pred_orientation = pred_regression_pois[:, 6:8]
pred_bbox_size = pred_regression_pois[:, 8:10]
pred_depths = self.smoke_coder.decode_depth(pred_depths_offset)
pred_locations = self.smoke_coder.decode_location_without_transmat(
pred_proj_points, pred_proj_offsets, pred_depths, cam_info[0],
cam_info[1])
pred_dimensions = self.smoke_coder.decode_dimension(
clses, pred_dimensions_offsets)
# we need to change center location to bottom location
pred_locations[:, 1] += pred_dimensions[:, 1] / 2
pred_rotys, pred_alphas = self.smoke_coder.decode_orientation(
pred_orientation, pred_locations)
box2d = self.smoke_coder.decode_bbox_2d_without_transmat(
pred_proj_points, pred_bbox_size, cam_info[1])
# change variables to the same dimension
clses = paddle.reshape(clses, (-1, 1))
pred_alphas = paddle.reshape(pred_alphas, (-1, 1))
pred_rotys = paddle.reshape(pred_rotys, (-1, 1))
scores = paddle.reshape(scores, (-1, 1))
l, h, w = pred_dimensions.chunk(3, 1)
pred_dimensions = paddle.concat([h, w, l], axis=1)
# yapf: disable
result = paddle.concat([
clses, pred_alphas, box2d, pred_dimensions, pred_locations,
pred_rotys, scores
], axis=1)
# yapf: enable
return result
def forward(self, predictions, targets):
pred_heatmap, pred_regression = predictions[0], predictions[1]
batch = pred_heatmap.shape[0]
heatmap = nms_hm(pred_heatmap)
topk_dict = select_topk(
heatmap,
K=self.max_detection,
)
scores, indexs = topk_dict["topk_score"], topk_dict["topk_inds_all"]
clses, ys = topk_dict["topk_clses"], topk_dict["topk_ys"]
xs = topk_dict["topk_xs"]
pred_regression = select_point_of_interest(batch, indexs,
pred_regression)
pred_regression_pois = paddle.reshape(pred_regression,
(-1, self.reg_head))
pred_proj_points = paddle.concat(
[paddle.reshape(xs, (-1, 1)),
paddle.reshape(ys, (-1, 1))], axis=1)
# FIXME: fix hard code here
pred_depths_offset = pred_regression_pois[:, 0]
pred_proj_offsets = pred_regression_pois[:, 1:3]
pred_dimensions_offsets = pred_regression_pois[:, 3:6]
pred_orientation = pred_regression_pois[:, 6:8]
pred_bbox_size = pred_regression_pois[:, 8:10]
pred_depths = self.smoke_coder.decode_depth(pred_depths_offset)
pred_locations = self.smoke_coder.decode_location(
pred_proj_points, pred_proj_offsets, pred_depths, targets["K"],
targets["trans_mat"])
pred_dimensions = self.smoke_coder.decode_dimension(
clses, pred_dimensions_offsets)
# we need to change center location to bottom location
pred_locations[:, 1] += pred_dimensions[:, 1] / 2
pred_rotys, pred_alphas = self.smoke_coder.decode_orientation(
pred_orientation, pred_locations)
if self.pred_2d:
box2d = self.smoke_coder.decode_bbox_2d(
pred_proj_points, pred_bbox_size, targets["trans_mat"],
targets["image_size"])
else:
box2d = paddle.to_tensor([0, 0, 0, 0])
# change variables to the same dimension
clses = paddle.reshape(clses, (-1, 1))
pred_alphas = paddle.reshape(pred_alphas, (-1, 1))
pred_rotys = paddle.reshape(pred_rotys, (-1, 1))
scores = paddle.reshape(scores, (-1, 1))
l, h, w = pred_dimensions.chunk(3, 1)
pred_dimensions = paddle.concat([h, w, l], axis=1)
# yapf: disable
result = paddle.concat([
clses, pred_alphas, box2d, pred_dimensions, pred_locations,
pred_rotys, scores
], axis=1)
# yapf: enable
keep_idx = result[:, -1] > self.det_threshold
if paddle.sum(keep_idx.astype("int32")) >= 1:
# Add indexs to determine which sample each box belongs to
batch_size = targets['K'].shape[0]
ids = paddle.arange(batch_size, dtype=paddle.float32)
ids = ids.unsqueeze(0).expand([self.max_detection, batch_size])
ids = ids.transpose([1, 0]).reshape([-1, 1])
result = paddle.concat([result, ids], 1)
# Filter out low confidence boxes
keep_idx = paddle.nonzero(keep_idx)
result = paddle.gather(result, keep_idx)
else:
result = paddle.to_tensor([])
return result
# Use numel_t(Tensor) instead of Tensor.numel to avoid shape uncertainty when exporting the model
def numel_t(var):
from numpy import prod
assert -1 not in var.shape
return prod(var.shape)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/smoke/smoke_predictor.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/lzccccc/SMOKE/blob/master/smoke/modeling/heads/smoke_head/smoke_predictor.py
Ths copyright is MIT License
"""
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.models.layers import group_norm, param_init, sigmoid_hm
@manager.MODELS.add_component
class SMOKEPredictor(nn.Layer):
"""SMOKE Predictor
"""
def __init__(self,
num_classes=3,
reg_channels=(1, 2, 3, 2, 2),
num_chanels=256,
norm_type="gn",
in_channels=64):
super().__init__()
self.reg_heads = sum(reg_channels)
head_conv = num_chanels
norm_func = nn.BatchNorm2D if norm_type == "bn" else group_norm
self.dim_channel = get_channel_spec(reg_channels, name="dim")
self.ori_channel = get_channel_spec(reg_channels, name="ori")
self.class_head = nn.Sequential(
nn.Conv2D(
in_channels,
head_conv,
kernel_size=3,
padding=1,
bias_attr=True), norm_func(head_conv), nn.ReLU(),
nn.Conv2D(
head_conv,
num_classes,
kernel_size=1,
padding=1 // 2,
bias_attr=True))
param_init.constant_init(self.class_head[-1].bias, value=-2.19)
self.regression_head = nn.Sequential(
nn.Conv2D(
in_channels,
head_conv,
kernel_size=3,
padding=1,
bias_attr=True), norm_func(head_conv), nn.ReLU(),
nn.Conv2D(
head_conv,
self.reg_heads,
kernel_size=1,
padding=1 // 2,
bias_attr=True))
self.init_weight(self.regression_head)
def forward(self, features):
"""predictor forward
Args:
features (paddle.Tensor): smoke backbone output
Returns:
list: sigmoid class heatmap and regression map
"""
head_class = self.class_head(features)
head_regression = self.regression_head(features)
head_class = sigmoid_hm(head_class)
offset_dims = head_regression[:, self.dim_channel, :, :].clone()
head_regression[:, self.
dim_channel, :, :] = F.sigmoid(offset_dims) - 0.5
vector_ori = head_regression[:, self.ori_channel, :, :].clone()
head_regression[:, self.ori_channel, :, :] = F.normalize(vector_ori)
return [head_class, head_regression]
def init_weight(self, block):
for sublayer in block.sublayers():
if isinstance(sublayer, nn.Conv2D):
param_init.constant_init(sublayer.bias, value=0.0)
def get_channel_spec(reg_channels, name):
"""get dim and ori dim
Args:
reg_channels (tuple): regress channels, default(1, 2, 3, 2) for
(depth_offset, keypoint_offset, dims, ori)
name (str): dim or ori
Returns:
slice: for start channel to stop channel
"""
if name == "dim":
s = sum(reg_channels[:2])
e = sum(reg_channels[:3])
elif name == "ori":
s = sum(reg_channels[:3])
e = sum(reg_channels[:4])
return slice(s, e, 1)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/smoke/smoke_coder.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/lzccccc/SMOKE/blob/master/smoke/modeling/smoke_coder.py
Ths copyright is MIT License
"""
import numpy as np
import paddle
from paddle3d.models.layers.layer_libs import gather
class SMOKECoder(paddle.nn.Layer):
"""SMOKE Coder class
"""
def __init__(self, depth_ref, dim_ref):
super().__init__()
self.depth_decoder = DepthDecoder(depth_ref)
self.dimension_decoder = DimensionDecoder(dim_ref)
@staticmethod
def rad_to_matrix(rotys, N):
"""decode rotys to R_matrix
Args:
rotys (Tensor): roty of objects
N (int): num of batch
Returns:
Tensor: R matrix with shape (N, 3, 3)
R = [[cos(r), 0, sin(r)], [0, 1, 0], [-cos(r), 0, sin(r)]]
"""
cos, sin = rotys.cos(), rotys.sin()
i_temp = paddle.to_tensor([[1, 0, 1], [0, 1, 0], [-1, 0, 1]],
dtype="float32")
ry = paddle.reshape(i_temp.tile([N, 1]), (N, -1, 3))
ry[:, 0, 0] *= cos
ry[:, 0, 2] *= sin
ry[:, 2, 0] *= sin
ry[:, 2, 2] *= cos
return ry
def encode_box3d(self, rotys, dims, locs):
"""
construct 3d bounding box for each object.
Args:
rotys: rotation in shape N
dims: dimensions of objects
locs: locations of objects
Returns:
"""
if len(rotys.shape) == 2:
rotys = rotys.flatten()
if len(dims.shape) == 3:
dims = paddle.reshape(dims, (-1, 3))
if len(locs.shape) == 3:
locs = paddle.reshape(locs, (-1, 3))
N = rotys.shape[0]
ry = self.rad_to_matrix(rotys, N)
dims = paddle.reshape(dims, (-1, 1)).tile([1, 8])
dims[::3, :4] = 0.5 * dims[::3, :4]
dims[1::3, :4] = 0.
dims[2::3, :4] = 0.5 * dims[2::3, :4]
dims[::3, 4:] = -0.5 * dims[::3, 4:]
dims[1::3, 4:] = -dims[1::3, 4:]
dims[2::3, 4:] = -0.5 * dims[2::3, 4:]
index = paddle.to_tensor([[4, 0, 1, 2, 3, 5, 6, 7],
[4, 5, 0, 1, 6, 7, 2, 3],
[4, 5, 6, 0, 1, 2, 3, 7]]).tile([N, 1])
index = index.unsqueeze(2)
box_3d_object = gather(dims, index)
box_3d = paddle.matmul(ry, paddle.reshape(box_3d_object, (N, 3, -1)))
box_3d += locs.unsqueeze(-1).tile((1, 1, 8))
return box_3d
def decode_depth(self, depths_offset):
"""
Transform depth offset to depth
"""
return self.depth_decoder(depths_offset)
def decode_location(self, points, points_offset, depths, Ks, trans_mats):
"""
retrieve objects location in camera coordinate based on projected points
Args:
points: projected points on feature map in (x, y)
points_offset: project points offset in (delata_x, delta_y)
depths: object depth z
Ks: camera intrinsic matrix, shape = [N, 3, 3]
trans_mats: transformation matrix from image to feature map, shape = [N, 3, 3]
Returns:
locations: objects location, shape = [N, 3]
"""
# number of points
N = points_offset.shape[0]
# batch size
N_batch = Ks.shape[0]
batch_id = paddle.arange(N_batch).unsqueeze(1)
obj_id = batch_id.tile([1, N // N_batch]).flatten()
trans_mats_inv = trans_mats.inverse()[obj_id]
Ks_inv = Ks.inverse()[obj_id]
points = paddle.reshape(points, (-1, 2))
assert points.shape[0] == N
# int + float -> int, but float + int -> float
# proj_points = points + points_offset
proj_points = points_offset + points
# transform project points in homogeneous form.
proj_points_extend = paddle.concat(
(proj_points.astype("float32"), paddle.ones((N, 1))), axis=1)
# expand project points as [N, 3, 1]
proj_points_extend = proj_points_extend.unsqueeze(-1)
# transform project points back on image
proj_points_img = paddle.matmul(trans_mats_inv, proj_points_extend)
# with depth
proj_points_img = proj_points_img * paddle.reshape(depths, (N, -1, 1))
# transform image coordinates back to object locations
locations = paddle.matmul(Ks_inv, proj_points_img)
return locations.squeeze(2)
def decode_location_without_transmat(self,
points,
points_offset,
depths,
Ks,
down_ratios=None):
"""
retrieve objects location in camera coordinate based on projected points
Args:
points: projected points on feature map in (x, y)
points_offset: project points offset in (delata_x, delta_y)
depths: object depth z
Ks: camera intrinsic matrix, shape = [N, 3, 3]
trans_mats: transformation matrix from image to feature map, shape = [N, 3, 3]
Returns:
locations: objects location, shape = [N, 3]
"""
if down_ratios is None:
down_ratios = [(1, 1)]
# number of points
N = points_offset.shape[0]
# batch size
N_batch = Ks.shape[0]
#batch_id = paddle.arange(N_batch).unsqueeze(1)
batch_id = paddle.arange(N_batch).reshape((N_batch, 1))
# obj_id = batch_id.repeat(1, N // N_batch).flatten()
obj_id = batch_id.tile([1, N // N_batch]).flatten()
Ks_inv = Ks.inverse()[obj_id]
down_ratio = down_ratios[0]
points = paddle.reshape(points, (numel_t(points) // 2, 2))
proj_points = points + points_offset
# trans point from heatmap to ori image, down_sample * resize_scale
proj_points[:, 0] = down_ratio[0] * proj_points[:, 0]
proj_points[:, 1] = down_ratio[1] * proj_points[:, 1]
# transform project points in homogeneous form.
proj_points_extend = paddle.concat(
[proj_points, paddle.ones((N, 1))], axis=1)
# expand project points as [N, 3, 1]
proj_points_extend = proj_points_extend.unsqueeze(-1)
# with depth
proj_points_img = proj_points_extend * paddle.reshape(
depths, (N, numel_t(depths) // N, 1))
# transform image coordinates back to object locations
locations = paddle.matmul(Ks_inv, proj_points_img)
return locations.squeeze(2)
def decode_bbox_2d_without_transmat(self,
points,
bbox_size,
down_ratios=None):
"""get bbox 2d
Args:
points (paddle.Tensor, (50, 2)): 2d center
bbox_size (paddle.Tensor, (50, 2)): 2d bbox height and width
trans_mats (paddle.Tensor, (1, 3, 3)): transformation coord from img to feature map
"""
if down_ratios is None:
down_ratios = [(1, 1)]
# number of points
N = bbox_size.shape[0]
points = paddle.reshape(points, (-1, 2))
assert points.shape[0] == N
box2d = paddle.zeros((N, 4))
down_ratio = down_ratios[0]
box2d[:, 0] = (points[:, 0] - bbox_size[:, 0] / 2)
box2d[:, 1] = (points[:, 1] - bbox_size[:, 1] / 2)
box2d[:, 2] = (points[:, 0] + bbox_size[:, 0] / 2)
box2d[:, 3] = (points[:, 1] + bbox_size[:, 1] / 2)
box2d[:, 0] = down_ratio[0] * box2d[:, 0]
box2d[:, 1] = down_ratio[1] * box2d[:, 1]
box2d[:, 2] = down_ratio[0] * box2d[:, 2]
box2d[:, 3] = down_ratio[1] * box2d[:, 3]
return box2d
def decode_dimension(self, cls_id, dims_offset):
"""
retrieve object dimensions
Args:
cls_id: each object id
dims_offset: dimension offsets, shape = (N, 3)
Returns:
"""
return self.dimension_decoder(cls_id, dims_offset)
def decode_orientation(self, vector_ori, locations, flip_mask=None):
"""
retrieve object orientation
Args:
vector_ori: local orientation in [sin, cos] format
locations: object location
Returns: for training we only need roty
for testing we need both alpha and roty
"""
locations = paddle.reshape(locations, (-1, 3))
rays = paddle.atan(locations[:, 0] / (locations[:, 2] + 1e-7))
alphas = paddle.atan(vector_ori[:, 0] / (vector_ori[:, 1] + 1e-7))
PI = 3.14159
cos_pos_diff = (vector_ori[:, 1] >= 0).astype('float32')
cos_pos_diff = (cos_pos_diff * 2 - 1) * PI / 2
alphas -= cos_pos_diff
# retrieve object rotation y angle.
rotys = alphas + rays
# in training time, it does not matter if angle lies in [-PI, PI]
# it matters at inference time? todo: does it really matter if it exceeds.
larger_idx = (rotys > PI).astype('float32')
small_idx = (rotys < -PI).astype('float32')
diff = larger_idx * 2 * PI + small_idx * (-2) * PI
rotys -= diff
if flip_mask is not None:
fm = flip_mask.astype("float32").flatten()
rotys_flip = fm * rotys
rotys_flip_diff = (rotys_flip > 0).astype('float32')
rotys_flip_diff = (rotys_flip_diff * 2 - 1) * PI
rotys_flip -= rotys_flip_diff
rotys_all = fm * rotys_flip + (1 - fm) * rotys
return rotys_all
else:
return rotys, alphas
def decode_bbox_2d(self, points, bbox_size, trans_mats, img_size):
"""get bbox 2d
Args:
points (paddle.Tensor, (50, 2)): 2d center
bbox_size (paddle.Tensor, (50, 2)): 2d bbox height and width
trans_mats (paddle.Tensor, (1, 3, 3)): transformation coord from img to feature map
"""
img_size = img_size.flatten()
# number of points
N = bbox_size.shape[0]
# batch size
N_batch = trans_mats.shape[0]
batch_id = paddle.arange(N_batch).unsqueeze(1)
obj_id = batch_id.tile([1, N // N_batch]).flatten()
trans_mats_inv = trans_mats.inverse()[obj_id]
points = paddle.reshape(points, (-1, 2))
assert points.shape[0] == N
box2d = paddle.zeros([N, 4])
box2d[:, 0] = (points[:, 0] - bbox_size[:, 0] / 2)
box2d[:, 1] = (points[:, 1] - bbox_size[:, 1] / 2)
box2d[:, 2] = (points[:, 0] + bbox_size[:, 0] / 2)
box2d[:, 3] = (points[:, 1] + bbox_size[:, 1] / 2)
# transform project points in homogeneous form.
proj_points_extend_top = paddle.concat(
(box2d[:, :2], paddle.ones([N, 1])), axis=1)
proj_points_extend_bot = paddle.concat(
(box2d[:, 2:], paddle.ones([N, 1])), axis=1)
# expand project points as [N, 3, 1]
proj_points_extend_top = proj_points_extend_top.unsqueeze(-1)
proj_points_extend_bot = proj_points_extend_bot.unsqueeze(-1)
# transform project points back on image
proj_points_img_top = paddle.matmul(trans_mats_inv,
proj_points_extend_top)
proj_points_img_bot = paddle.matmul(trans_mats_inv,
proj_points_extend_bot)
box2d[:, :2] = proj_points_img_top.squeeze(2)[:, :2]
box2d[:, 2:] = proj_points_img_bot.squeeze(2)[:, :2]
box2d[:, ::2] = box2d[:, ::2].clip(0, img_size[0])
box2d[:, 1::2] = box2d[:, 1::2].clip(0, img_size[1])
return box2d
class DepthDecoder(paddle.nn.Layer):
def __init__(self, depth_ref):
super().__init__()
self.depth_ref = paddle.to_tensor(depth_ref)
def forward(self, depths_offset):
"""
Transform depth offset to depth
"""
depth = depths_offset * self.depth_ref[1] + self.depth_ref[0]
return depth
class DimensionDecoder(paddle.nn.Layer):
def __init__(self, dim_ref):
super().__init__()
self.dim_ref = paddle.to_tensor(dim_ref)
def forward(self, cls_id, dims_offset):
"""
retrieve object dimensions
Args:
cls_id: each object id
dims_offset: dimension offsets, shape = (N, 3)
Returns:
"""
cls_id = cls_id.flatten().astype('int32')
dims_select = self.dim_ref[cls_id]
dimensions = dims_offset.exp() * dims_select
return dimensions
# Use numel_t(Tensor) instead of Tensor.numel to avoid shape uncertainty when exporting the model
def numel_t(var):
from numpy import prod
assert -1 not in var.shape
return prod(var.shape)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/smoke/smoke_loss.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/lzccccc/SMOKE/blob/master/smoke/modeling/heads/smoke_head/loss.py
Ths copyright is MIT License
"""
import copy
import os
import cv2
import numpy as np
import paddle
import paddle.nn as nn
from paddle.nn import functional as F
from paddle3d.apis import manager
from paddle3d.models.detection.smoke.smoke_coder import SMOKECoder
from paddle3d.models.layers import select_point_of_interest
from paddle3d.models.losses import FocalLoss
@manager.LOSSES.add_component
class SMOKELossComputation(object):
"""Convert targets and preds to heatmaps®s, compute
loss with CE and L1
"""
def __init__(self,
depth_ref,
dim_ref,
reg_loss="DisL1",
loss_weight=(1., 10.),
max_objs=50):
self.smoke_coder = SMOKECoder(depth_ref, dim_ref)
self.cls_loss = FocalLoss(alpha=2, beta=4)
self.reg_loss = reg_loss
self.loss_weight = loss_weight
self.max_objs = max_objs
def prepare_targets(self, targets):
"""get heatmaps, regressions and 3D infos from targets
"""
heatmaps = targets["hm"]
regression = targets["reg"]
cls_ids = targets["cls_ids"]
proj_points = targets["proj_p"]
dimensions = targets["dimensions"]
locations = targets["locations"]
rotys = targets["rotys"]
trans_mat = targets["trans_mat"]
K = targets["K"]
reg_mask = targets["reg_mask"]
flip_mask = targets["flip_mask"]
bbox_size = targets["bbox_size"]
c_offsets = targets["c_offsets"]
return heatmaps, regression, dict(
cls_ids=cls_ids,
proj_points=proj_points,
dimensions=dimensions,
locations=locations,
rotys=rotys,
trans_mat=trans_mat,
K=K,
reg_mask=reg_mask,
flip_mask=flip_mask,
bbox_size=bbox_size,
c_offsets=c_offsets)
def prepare_predictions(self, targets_variables, pred_regression):
"""decode model predictions
"""
batch, channel = pred_regression.shape[0], pred_regression.shape[1]
targets_proj_points = targets_variables["proj_points"]
# obtain prediction from points of interests
pred_regression_pois = select_point_of_interest(
batch, targets_proj_points, pred_regression)
pred_regression_pois = paddle.reshape(pred_regression_pois,
(-1, channel))
# FIXME: fix hard code here
pred_depths_offset = pred_regression_pois[:, 0]
pred_proj_offsets = pred_regression_pois[:, 1:3]
pred_dimensions_offsets = pred_regression_pois[:, 3:6]
pred_orientation = pred_regression_pois[:, 6:8]
pred_bboxsize = pred_regression_pois[:, 8:10]
pred_depths = self.smoke_coder.decode_depth(pred_depths_offset)
pred_locations = self.smoke_coder.decode_location(
targets_proj_points, pred_proj_offsets, pred_depths,
targets_variables["K"], targets_variables["trans_mat"])
pred_dimensions = self.smoke_coder.decode_dimension(
targets_variables["cls_ids"],
pred_dimensions_offsets,
)
# we need to change center location to bottom location
pred_locations[:, 1] += pred_dimensions[:, 1] / 2
pred_rotys = self.smoke_coder.decode_orientation(
pred_orientation, targets_variables["locations"],
targets_variables["flip_mask"])
if self.reg_loss == "DisL1":
pred_box3d_rotys = self.smoke_coder.encode_box3d(
pred_rotys, targets_variables["dimensions"],
targets_variables["locations"])
pred_box3d_dims = self.smoke_coder.encode_box3d(
targets_variables["rotys"], pred_dimensions,
targets_variables["locations"])
pred_box3d_locs = self.smoke_coder.encode_box3d(
targets_variables["rotys"], targets_variables["dimensions"],
pred_locations)
return dict(
ori=pred_box3d_rotys,
dim=pred_box3d_dims,
loc=pred_box3d_locs,
bbox=pred_bboxsize,
)
elif self.reg_loss == "L1":
pred_box_3d = self.smoke_coder.encode_box3d(
pred_rotys, pred_dimensions, pred_locations)
return pred_box_3d
def __call__(self, predictions, targets):
pred_heatmap, pred_regression = predictions[0], predictions[1]
targets_heatmap, targets_regression, targets_variables \
= self.prepare_targets(targets)
predict_boxes3d = self.prepare_predictions(targets_variables,
pred_regression)
hm_loss = self.cls_loss(pred_heatmap,
targets_heatmap) * self.loss_weight[0]
targets_regression = paddle.reshape(
targets_regression,
(-1, targets_regression.shape[2], targets_regression.shape[3]))
reg_mask = targets_variables["reg_mask"].astype("float32").flatten()
reg_mask = paddle.reshape(reg_mask, (-1, 1, 1))
reg_mask = reg_mask.expand_as(targets_regression)
if self.reg_loss == "DisL1":
reg_loss_ori = F.l1_loss(
predict_boxes3d["ori"] * reg_mask,
targets_regression * reg_mask,
reduction="sum") / (self.loss_weight[1] * self.max_objs)
reg_loss_dim = F.l1_loss(
predict_boxes3d["dim"] * reg_mask,
targets_regression * reg_mask,
reduction="sum") / (self.loss_weight[1] * self.max_objs)
reg_loss_loc = F.l1_loss(
predict_boxes3d["loc"] * reg_mask,
targets_regression * reg_mask,
reduction="sum") / (self.loss_weight[1] * self.max_objs)
reg_loss_size = F.l1_loss(
predict_boxes3d["bbox"],
paddle.reshape(targets_variables["bbox_size"],
(-1, targets_variables["bbox_size"].shape[-1])),
reduction="sum") / (self.loss_weight[1] * self.max_objs)
losses = dict(
hm_loss=hm_loss,
reg_loss=reg_loss_ori + reg_loss_dim + reg_loss_loc,
size_loss=reg_loss_size)
return hm_loss + reg_loss_ori + reg_loss_dim + reg_loss_loc + reg_loss_size
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/smoke/smoke.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List, Tuple
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from paddle3d.geometries import BBoxes2D, BBoxes3D, CoordMode
from paddle3d.models.base import BaseMonoModel
from paddle3d.models.detection.smoke.processor import PostProcessor
from paddle3d.models.detection.smoke.smoke_loss import SMOKELossComputation
from paddle3d.sample import Sample
from paddle3d.utils.logger import logger
@manager.MODELS.add_component
class SMOKE(BaseMonoModel):
"""
"""
def __init__(self,
backbone,
head,
depth_ref: Tuple,
dim_ref: Tuple,
max_detection: int = 50,
pred_2d: bool = True,
box_with_velocity: bool = False):
super().__init__(
box_with_velocity=box_with_velocity,
need_camera_to_image=True,
need_lidar_to_camera=False,
need_down_ratios=True)
self.backbone = backbone
self.heads = head
self.max_detection = max_detection
self.init_weight()
self.loss_computation = SMOKELossComputation(
depth_ref=depth_ref,
dim_ref=dim_ref,
reg_loss="DisL1",
loss_weight=[1., 10.],
max_objs=max_detection)
self.post_process = PostProcessor(
depth_ref=depth_ref,
dim_ref=dim_ref,
reg_head=self.heads.reg_heads,
max_detection=max_detection,
pred_2d=pred_2d)
def export_forward(self, samples):
images = samples['images']
features = self.backbone(images)
if isinstance(features, (list, tuple)):
features = features[-1]
predictions = self.heads(features)
return self.post_process.export_forward(
predictions, [samples['trans_cam_to_img'], samples['down_ratios']])
def train_forward(self, samples):
images = samples['data']
features = self.backbone(images)
if isinstance(features, (list, tuple)):
features = features[-1]
predictions = self.heads(features)
loss = self.loss_computation(predictions, samples['target'])
return {'loss': loss}
def test_forward(self, samples):
images = samples['data']
features = self.backbone(images)
if isinstance(features, (list, tuple)):
features = features[-1]
predictions = self.heads(features)
bs = predictions[0].shape[0]
predictions = self.post_process(predictions, samples['target'])
res = [
self._parse_results_to_sample(predictions, samples, i)
for i in range(bs)
]
return {'preds': res}
def init_weight(self, bias_lr_factor=2):
for sublayer in self.sublayers():
if hasattr(sublayer, 'bias') and sublayer.bias is not None:
sublayer.bias.optimize_attr['learning_rate'] = bias_lr_factor
def _parse_results_to_sample(self, results: paddle.Tensor, sample: dict,
index: int):
ret = Sample(sample['path'][index], sample['modality'][index])
ret.meta.update(
{key: value[index]
for key, value in sample['meta'].items()})
if 'calibs' in sample:
ret.calibs = [
sample['calibs'][i][index]
for i in range(len(sample['calibs']))
]
if results.shape[0] != 0:
results = results[results[:, 14] == index][:, :14]
results = results.numpy()
clas = results[:, 0]
bboxes_2d = BBoxes2D(results[:, 2:6])
# TODO: fix hard code here
bboxes_3d = BBoxes3D(
results[:, [9, 10, 11, 8, 6, 7, 12]],
coordmode=CoordMode.KittiCamera,
origin=(0.5, 1, 0.5),
rot_axis=1)
confidences = results[:, 13]
ret.confidences = confidences
ret.bboxes_2d = bboxes_2d
ret.bboxes_3d = bboxes_3d
ret.labels = clas
return ret
@property
def inputs(self) -> List[dict]:
images = {
'name': 'images',
'dtype': 'float32',
'shape': [1, 3, self.image_height, self.image_width]
}
res = [images]
intrinsics = {
'name': 'trans_cam_to_img',
'dtype': 'float32',
'shape': [1, 3, 3]
}
res.append(intrinsics)
down_ratios = {
'name': 'down_ratios',
'dtype': 'float32',
'shape': [1, 2]
}
res.append(down_ratios)
return res
@property
def outputs(self) -> List[dict]:
data = {
'name': 'smoke_output',
'dtype': 'float32',
'shape': [self.max_detection, 14]
}
return [data]
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/petr/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .petr3d import Petr3D
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/petr/petr3d.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Copyright (c) 2022 megvii-model. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from DETR3D (https://github.com/WangYueFt/detr3d)
# Copyright (c) 2021 Wang, Yue
import os
from os import path as osp
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from PIL import Image
from paddle3d.apis import manager
from paddle3d.models.base import BaseMultiViewModel
from paddle3d.geometries import BBoxes3D
from paddle3d.sample import Sample, SampleMeta
from paddle3d.utils import dtype2float32
class GridMask(nn.Layer):
def __init__(self,
use_h,
use_w,
rotate=1,
offset=False,
ratio=0.5,
mode=0,
prob=1.):
super(GridMask, self).__init__()
self.use_h = use_h
self.use_w = use_w
self.rotate = rotate
self.offset = offset
self.ratio = ratio
self.mode = mode
self.st_prob = prob
self.prob = prob
def set_prob(self, epoch, max_epoch):
self.prob = self.st_prob * epoch / max_epoch #+ 1.#0.5
def forward(self, x):
if np.random.rand() > self.prob or not self.training:
return x
n, c, h, w = x.shape
x = x.reshape([-1, h, w])
hh = int(1.5 * h)
ww = int(1.5 * w)
d = np.random.randint(2, h)
self.l = min(max(int(d * self.ratio + 0.5), 1), d - 1)
mask = np.ones((hh, ww), np.float32)
st_h = np.random.randint(d)
st_w = np.random.randint(d)
if self.use_h:
for i in range(hh // d):
s = d * i + st_h
t = min(s + self.l, hh)
mask[s:t, :] *= 0
if self.use_w:
for i in range(ww // d):
s = d * i + st_w
t = min(s + self.l, ww)
mask[:, s:t] *= 0
r = np.random.randint(self.rotate)
mask = Image.fromarray(np.uint8(mask))
mask = mask.rotate(r)
mask = np.asarray(mask)
mask = mask[(hh - h) // 2:(hh - h) // 2 +
h, (ww - w) // 2:(ww - w) // 2 + w]
mask = paddle.to_tensor(mask).astype('float32')
if self.mode == 1:
mask = 1 - mask
mask = mask.expand_as(x)
if self.offset:
offset = paddle.to_tensor(
2 * (np.random.rand(h, w) - 0.5)).astype('float32')
x = x * mask + offset * (1 - mask)
else:
x = x * mask
return x.reshape([n, c, h, w])
def bbox3d2result(bboxes, scores, labels, attrs=None):
"""Convert detection results to a list of numpy arrays.
"""
result_dict = dict(
boxes_3d=bboxes.cpu(), scores_3d=scores.cpu(), labels_3d=labels.cpu())
if attrs is not None:
result_dict['attrs_3d'] = attrs.cpu()
return result_dict
@manager.MODELS.add_component
class Petr3D(BaseMultiViewModel):
"""Petr3D."""
def __init__(self,
use_grid_mask=False,
backbone=None,
neck=None,
pts_bbox_head=None,
img_roi_head=None,
img_rpn_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
use_recompute=False,
us_ms=False,
multi_scale=None,
box_with_velocity: bool = False):
num_cameras = 12 if pts_bbox_head.with_time else 6
super(Petr3D, self).__init__(
box_with_velocity=box_with_velocity,
num_cameras=num_cameras,
need_timestamp=pts_bbox_head.with_time)
self.pts_bbox_head = pts_bbox_head
self.backbone = backbone
self.neck = neck
self.use_grid_mask = use_grid_mask
self.use_recompute = use_recompute
self.us_ms = us_ms
if self.us_ms:
self.multi_scale = multi_scale
if use_grid_mask:
self.grid_mask = GridMask(
True, True, rotate=1, offset=False, ratio=0.5, mode=1, prob=0.7)
self.init_weight()
def init_weight(self, bias_lr_factor=0.1):
for _, param in self.backbone.named_parameters():
param.optimize_attr['learning_rate'] = bias_lr_factor
self.pts_bbox_head.init_weights()
def extract_img_feat(self, img, img_metas):
"""Extract features of images."""
if isinstance(img, list):
img = paddle.stack(img, axis=0)
B = img.shape[0]
if img is not None:
input_shape = img.shape[-2:]
# update real input shape of each single img
if not getattr(self, 'in_export_mode', False):
for img_meta in img_metas:
img_meta.update(input_shape=input_shape)
if img.dim() == 5:
if img.shape[0] == 1 and img.shape[1] != 1:
if getattr(self, 'in_export_mode', False):
img = img.squeeze()
else:
img.squeeze_()
else:
B, N, C, H, W = img.shape
img = img.reshape([B * N, C, H, W])
if self.use_grid_mask:
img = self.grid_mask(img)
if self.us_ms:
ms_img = []
img_feats = []
for scale in self.multi_scale:
s_img = F.interpolate(
img,
scale_factor=scale,
mode='bilinear',
align_corners=True)
ms_img.append(ms_img)
img_feat = self.backbone(s_img)
if isinstance(img_feat, dict):
img_feat = list(img_feat.values())
img_feats.append(img_feat)
if len(self.multi_scale) > 1:
for i, scale in enumerate(self.multi_scale):
img_feats[i] = self.neck(img_feats[i])
if len(self.multi_scale) == 2:
img_feats = [
paddle.concat((img_feats[1][-2],
F.interpolate(
img_feats[0][-2],
scale_factor=self.multi_scale[1]
/ self.multi_scale[0],
mode='bilinear',
align_corners=True)), 1)
]
if len(self.multi_scale) == 3:
img_feats = [
paddle.concat((img_feats[2][-2],
F.interpolate(
img_feats[0][-2],
scale_factor=self.multi_scale[2]
/ self.multi_scale[0],
mode='bilinear',
align_corners=True),
F.interpolate(
img_feats[1][-2],
scale_factor=self.multi_scale[2]
/ self.multi_scale[1],
mode='bilinear',
align_corners=True)), 1)
]
else:
img_feats = self.neck(img_feats[-1])
else:
img_feats = self.backbone(img)
if isinstance(img_feats, dict):
img_feats = list(img_feats.values())
img_feats = self.neck(img_feats)
else:
return None
img_feats_reshaped = []
for img_feat in img_feats:
BN, C, H, W = img_feat.shape
img_feats_reshaped.append(
img_feat.reshape([B, int(BN / B), C, H, W]))
return img_feats_reshaped
def extract_feat(self, img, img_metas):
"""Extract features from images and points."""
img_feats = self.extract_img_feat(img, img_metas)
return img_feats
def forward_pts_train(self,
pts_feats,
gt_bboxes_3d,
gt_labels_3d,
img_metas,
gt_bboxes_ignore=None):
"""
"""
outs = self.pts_bbox_head(pts_feats, img_metas)
loss_inputs = [gt_bboxes_3d, gt_labels_3d, outs]
losses = self.pts_bbox_head.loss(*loss_inputs)
return losses
def train_forward(self,
samples=None,
points=None,
img_metas=None,
gt_bboxes_3d=None,
gt_labels_3d=None,
gt_labels=None,
gt_bboxes=None,
img=None,
proposals=None,
gt_bboxes_ignore=None,
img_depth=None,
img_mask=None):
"""
"""
self.backbone.train()
if samples is not None:
img_metas = samples['meta']
img = samples['img']
gt_labels_3d = samples['gt_labels_3d']
gt_bboxes_3d = samples['gt_bboxes_3d']
if hasattr(self, 'amp_cfg_'):
with paddle.amp.auto_cast(**self.amp_cfg_):
img_feats = self.extract_feat(img=img, img_metas=img_metas)
img_feats = dtype2float32(img_feats)
else:
img_feats = self.extract_feat(img=img, img_metas=img_metas)
losses = dict()
losses_pts = self.forward_pts_train(
img_feats, gt_bboxes_3d, gt_labels_3d, img_metas, gt_bboxes_ignore)
losses.update(losses_pts)
return dict(loss=losses)
def test_forward(self, samples, img=None, **kwargs):
img_metas = samples['meta']
img = samples['img']
img = [img] if img is None else img
results = self.simple_test(img_metas, img, **kwargs)
return dict(preds=self._parse_results_to_sample(results, samples))
def simple_test_pts(self, x, img_metas, rescale=False):
"""Test function of point cloud branch."""
outs = self.pts_bbox_head(x, img_metas)
bbox_list = self.pts_bbox_head.get_bboxes(
outs, img_metas, rescale=rescale)
bbox_results = [
bbox3d2result(bboxes, scores, labels)
for bboxes, scores, labels in bbox_list
]
return bbox_results
def simple_test(self, img_metas, img=None, rescale=False):
"""Test function without augmentaiton."""
img_feats = self.extract_feat(img=img, img_metas=img_metas)
bbox_list = [dict() for i in range(len(img_metas))]
bbox_pts = self.simple_test_pts(img_feats, img_metas, rescale=rescale)
for result_dict, pts_bbox in zip(bbox_list, bbox_pts):
result_dict['pts_bbox'] = pts_bbox
return bbox_list
def _parse_results_to_sample(self, results: dict, sample: dict):
num_samples = len(results)
new_results = []
for i in range(num_samples):
data = Sample(None, sample["modality"][i])
bboxes_3d = results[i]['pts_bbox']["boxes_3d"].numpy()
labels = results[i]['pts_bbox']["labels_3d"].numpy()
confidences = results[i]['pts_bbox']["scores_3d"].numpy()
bottom_center = bboxes_3d[:, :3]
gravity_center = np.zeros_like(bottom_center)
gravity_center[:, :2] = bottom_center[:, :2]
gravity_center[:, 2] = bottom_center[:, 2] + bboxes_3d[:, 5] * 0.5
bboxes_3d[:, :3] = gravity_center
data.bboxes_3d = BBoxes3D(bboxes_3d[:, 0:7])
data.bboxes_3d.coordmode = 'Lidar'
data.bboxes_3d.origin = [0.5, 0.5, 0.5]
data.bboxes_3d.rot_axis = 2
data.bboxes_3d.velocities = bboxes_3d[:, 7:9]
data['bboxes_3d_numpy'] = bboxes_3d[:, 0:7]
data['bboxes_3d_coordmode'] = 'Lidar'
data['bboxes_3d_origin'] = [0.5, 0.5, 0.5]
data['bboxes_3d_rot_axis'] = 2
data['bboxes_3d_velocities'] = bboxes_3d[:, 7:9]
data.labels = labels
data.confidences = confidences
data.meta = SampleMeta(id=sample["meta"][i]['id'])
if "calibs" in sample:
calib = [calibs.numpy()[i] for calibs in sample["calibs"]]
data.calibs = calib
new_results.append(data)
return new_results
def aug_test_pts(self, feats, img_metas, rescale=False):
feats_list = []
for j in range(len(feats[0])):
feats_list_level = []
for i in range(len(feats)):
feats_list_level.append(feats[i][j])
feats_list.append(paddle.stack(feats_list_level, -1).mean(-1))
outs = self.pts_bbox_head(feats_list, img_metas)
bbox_list = self.pts_bbox_head.get_bboxes(
outs, img_metas, rescale=rescale)
bbox_results = [
bbox3d2result(bboxes, scores, labels)
for bboxes, scores, labels in bbox_list
]
return bbox_results
def aug_test(self, img_metas, imgs=None, rescale=False):
"""Test function with augmentaiton."""
img_feats = self.extract_feats(img_metas, imgs)
img_metas = img_metas[0]
bbox_list = [dict() for i in range(len(img_metas))]
bbox_pts = self.aug_test_pts(img_feats, img_metas, rescale)
for result_dict, pts_bbox in zip(bbox_list, bbox_pts):
result_dict['pts_bbox'] = pts_bbox
return bbox_list
def export_forward(self, samples):
img = samples['images']
img_metas = {'img2lidars': samples['img2lidars']}
time_stamp = samples.get('timestamps', None)
img_metas['image_shape'] = img.shape[-2:]
img_feats = self.extract_feat(img=img, img_metas=None)
bbox_list = [dict() for i in range(len(img_metas))]
outs = self.pts_bbox_head.export_forward(img_feats, img_metas,
time_stamp)
bbox_list = self.pts_bbox_head.get_bboxes(outs, None, rescale=True)
return bbox_list
@property
def save_name(self):
if self.pts_bbox_head.with_time:
return "petrv2_inference"
return "petr_inference"
@property
def apollo_deploy_name(self):
if self.pts_bbox_head.with_time:
return "PETR_V2"
return "PETR_V1"
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/centerpoint/center_head.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/tianweiy/CenterPoint/blob/master/det3d/models/bbox_heads/center_head.py
Ths copyright of tianweiy/CenterPoint is as follows:
MIT License [see LICENSE for details].
Portions of https://github.com/tianweiy/CenterPoint/blob/master/det3d/models/bbox_heads/center_head.py are from
det3d (https://github.com/poodarchu/Det3D/tree/56402d4761a5b73acd23080f537599b0888cce07)
Ths copyright of det3d is as follows:
MIT License [see LICENSE for details].
"""
import copy
import logging
from collections import defaultdict
import paddle
import paddle.nn.functional as F
from paddle import nn
from paddle3d.apis import manager
from paddle3d.geometries.bbox import circle_nms
from paddle3d.models.backbones.second_backbone import build_conv_layer
from paddle3d.models.layers.layer_libs import rotate_nms_pcdet
from paddle3d.models.losses import FastFocalLoss, RegLoss
from paddle3d.models.voxel_encoders.pillar_encoder import build_norm_layer
from paddle3d.ops import centerpoint_postprocess
from paddle3d.utils.logger import logger
class ConvModule(nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
norm_cfg=dict(type='BatchNorm2D', eps=1e-05, momentum=0.1)):
super(ConvModule, self).__init__()
# build convolution layer
self.conv = build_conv_layer(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=True,
distribution="norm")
# build normalization layers
norm_channels = out_channels
self.bn = build_norm_layer(norm_cfg, norm_channels)
# build activation layer
self.activate = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.activate(x)
return x
class SeparateHead(nn.Layer):
def __init__(self,
in_channels,
heads,
head_conv=64,
final_kernel=1,
init_bias=-2.19,
norm_cfg=dict(type='BatchNorm2D', eps=1e-05, momentum=0.1),
**kwargs):
super(SeparateHead, self).__init__()
self.heads = heads
self.init_bias = init_bias
for head in self.heads:
classes, num_conv = self.heads[head]
conv_layers = []
c_in = in_channels
if (num_conv - 1) >= 1:
for i in range(num_conv - 1):
conv_layers.append(
ConvModule(
c_in,
head_conv,
kernel_size=final_kernel,
stride=1,
padding=final_kernel // 2,
norm_cfg=norm_cfg))
c_in = head_conv
conv_layers.append(
build_conv_layer(
head_conv,
classes,
kernel_size=final_kernel,
stride=1,
padding=final_kernel // 2,
bias=True))
conv_layers = nn.Sequential(*conv_layers)
else:
conv_layers.append(
build_conv_layer(
c_in,
classes,
kernel_size=final_kernel,
stride=1,
padding=final_kernel // 2,
bias=True))
conv_layers = nn.Sequential(*conv_layers)
self.__setattr__(head, conv_layers)
with paddle.no_grad():
for head in self.heads:
if head == 'hm':
self.__getattr__(head)[-1].bias[:] = self.init_bias
def forward(self, x):
"""Forward function for SepHead.
Args:
x (paddle.Tensor): Input feature map with the shape of
[B, 512, 128, 128].
Returns:
dict[str: paddle.Tensor]: contains the following keys:
-reg (paddle.Tensor): 2D regression value with the \
shape of [B, 2, H, W].
-height (paddle.Tensor): Height value with the \
shape of [B, 1, H, W].
-dim (paddle.Tensor): Size value with the shape \
of [B, 3, H, W].
-rot (paddle.Tensor): Rotation value with the \
shape of [B, 2, H, W].
-vel (paddle.Tensor): Velocity value with the \
shape of [B, 2, H, W].
-hm (paddle.Tensor): hm with the shape of \
[B, N, H, W].
"""
ret_dict = dict()
for head in self.heads.keys():
ret_dict[head] = self.__getattr__(head)(x)
return ret_dict
@manager.MODELS.add_component
class CenterHead(nn.Layer):
def __init__(
self,
in_channels=[
128,
],
tasks=[],
weight=0.25,
code_weights=[],
common_heads=dict(),
init_bias=-2.19,
share_conv_channel=64,
num_hm_conv=2,
norm_cfg=dict(type='BatchNorm2D', eps=1e-05, momentum=0.1),
):
super(CenterHead, self).__init__()
num_classes = [len(t["class_names"]) for t in tasks]
self.class_names = [t["class_names"] for t in tasks]
self.code_weights = code_weights
self.weight = weight # weight between hm loss and loc loss
self.in_channels = in_channels
self.num_classes = num_classes
self.crit = FastFocalLoss()
self.crit_reg = RegLoss()
self.box_n_dim = 9 if 'vel' in common_heads else 7
self.with_velocity = True if 'vel' in common_heads else False
self.code_weights = code_weights
self.use_direction_classifier = False
# a shared convolution
self.shared_conv = ConvModule(
in_channels,
share_conv_channel,
kernel_size=3,
padding=1,
norm_cfg=norm_cfg)
self.tasks = nn.LayerList()
for num_cls in num_classes:
heads = copy.deepcopy(common_heads)
heads.update(dict(hm=(num_cls, num_hm_conv)))
self.tasks.append(
SeparateHead(
init_bias=init_bias,
final_kernel=3,
in_channels=share_conv_channel,
heads=heads,
num_cls=num_cls))
logger.info("Finish CenterHead Initialization")
def forward(self, x, *kwargs):
ret_dicts = []
x = self.shared_conv(x)
for task in self.tasks:
ret_dicts.append(task(x))
return ret_dicts, x
def _sigmoid(self, x):
y = paddle.clip(F.sigmoid(x), min=1e-4, max=1 - 1e-4)
return y
def loss(self, example, preds_dicts, test_cfg, **kwargs):
rets = []
for task_id, preds_dict in enumerate(preds_dicts):
# hm focal loss
preds_dict['hm'] = self._sigmoid(preds_dict['hm'])
hm_loss = self.crit(preds_dict['hm'], example['heat_map'][task_id],
example['center_idx'][task_id],
example['target_mask'][task_id],
example['target_label'][task_id])
target_box = example['target_bbox'][task_id]
# reconstruct the anno_box from multiple reg heads
if 'vel' in preds_dict:
preds_dict['target_bbox'] = paddle.concat(
(preds_dict['reg'], preds_dict['height'], preds_dict['dim'],
preds_dict['vel'], preds_dict['rot']),
axis=1)
else:
preds_dict['target_bbox'] = paddle.concat(
(preds_dict['reg'], preds_dict['height'], preds_dict['dim'],
preds_dict['rot']),
axis=1)
index = paddle.to_tensor([
0, 1, 2, 3, 4, 5, target_box.shape[-1] - 2,
target_box.shape[-1] - 1
],
dtype='int32')
target_box = paddle.index_select(
target_box, index=index, axis=-1)
#target_box = target_box[..., [0, 1, 2, 3, 4, 5, -2, -1]] # remove vel target
ret = {}
# Regression loss for dimension, offset, height, rotation
box_loss = self.crit_reg(preds_dict['target_bbox'],
example['target_mask'][task_id],
example['center_idx'][task_id], target_box)
loc_loss = (box_loss * paddle.to_tensor(
self.code_weights, dtype=box_loss.dtype)).sum()
loss = hm_loss + self.weight * loc_loss
ret.update({
'loss':
loss,
'hm_loss':
hm_loss,
'loc_loss':
self.weight * loc_loss,
'loc_loss_elem':
box_loss,
'num_positive':
paddle.cast(example['target_mask'][task_id],
dtype='float32').sum()
})
rets.append(ret)
"""convert batch-key to key-batch
"""
rets_merged = defaultdict(list)
for ret in rets:
for k, v in ret.items():
rets_merged[k].append(v)
rets_merged['loss'] = sum(rets_merged['loss'])
return rets_merged
@paddle.no_grad()
def predict_by_custom_op(self, example, preds_dicts, test_cfg, **kwargs):
rets = []
metas = []
hm = []
reg = []
height = []
dim = []
vel = []
rot = []
num_classes = []
flag = 0
for task_id, preds_dict in enumerate(preds_dicts):
for j, num_class in enumerate(self.num_classes):
num_classes.append(flag)
flag += num_class
hm.append(preds_dict['hm'])
reg.append(preds_dict['reg'])
height.append(preds_dict['height'])
dim.append(preds_dict['dim'])
if self.with_velocity:
vel.append(preds_dict['vel'])
else:
vel.append(preds_dict['reg'])
rot.append(preds_dict['rot'])
bboxes, scores, labels = centerpoint_postprocess.centerpoint_postprocess(
hm, reg, height, dim, vel, rot, test_cfg.voxel_size,
test_cfg.point_cloud_range, test_cfg.post_center_limit_range,
num_classes, test_cfg.down_ratio, test_cfg.score_threshold,
test_cfg.nms.nms_iou_threshold, test_cfg.nms.nms_pre_max_size,
test_cfg.nms.nms_post_max_size, self.with_velocity)
if "meta" not in example or len(example["meta"]) == 0:
meta_list = [None]
else:
meta_list = example["meta"]
ret_list = [{
'meta': meta_list[0],
'box3d_lidar': bboxes,
'label_preds': labels,
'scores': scores
}]
return ret_list
@paddle.no_grad()
def predict(self, example, preds_dicts, test_cfg, **kwargs):
"""decode, nms, then return the detection result. Additionaly support double flip testing
"""
# get loss info
rets = []
metas = []
post_center_range = test_cfg.post_center_limit_range
if len(post_center_range) > 0:
post_center_range = paddle.to_tensor(
post_center_range,
dtype=preds_dicts[0]['hm'].dtype,
)
for task_id, preds_dict in enumerate(preds_dicts):
# convert N C H W to N H W C
for key, val in preds_dict.items():
preds_dict[key] = val.transpose(perm=[0, 2, 3, 1])
batch_size = preds_dict['hm'].shape[0]
if "meta" not in example or len(example["meta"]) == 0:
meta_list = [None] * batch_size
else:
meta_list = example["meta"]
batch_hm = F.sigmoid(preds_dict['hm'])
batch_dim = paddle.exp(preds_dict['dim'])
batch_rots = preds_dict['rot'][..., 0:1]
batch_rotc = preds_dict['rot'][..., 1:2]
batch_reg = preds_dict['reg']
batch_hei = preds_dict['height']
batch_rot = paddle.atan2(batch_rots, batch_rotc)
batch, H, W, num_cls = batch_hm.shape
batch_reg = batch_reg.reshape([batch, H * W, 2])
batch_hei = batch_hei.reshape([batch, H * W, 1])
batch_rot = batch_rot.reshape([batch, H * W, 1])
batch_dim = batch_dim.reshape([batch, H * W, 3])
batch_hm = batch_hm.reshape([batch, H * W, num_cls])
ys, xs = paddle.meshgrid([paddle.arange(0, H), paddle.arange(0, W)])
ys = ys.reshape([1, H, W]).tile(repeat_times=[batch, 1, 1]).astype(
batch_hm.dtype)
xs = xs.reshape([1, H, W]).tile(repeat_times=[batch, 1, 1]).astype(
batch_hm.dtype)
xs = xs.reshape([batch, -1, 1]) + batch_reg[:, :, 0:1]
ys = ys.reshape([batch, -1, 1]) + batch_reg[:, :, 1:2]
xs = xs * test_cfg.down_ratio * test_cfg.voxel_size[
0] + test_cfg.point_cloud_range[0]
ys = ys * test_cfg.down_ratio * test_cfg.voxel_size[
1] + test_cfg.point_cloud_range[1]
if 'vel' in preds_dict:
batch_vel = preds_dict['vel']
batch_vel = batch_vel.reshape([batch, H * W, 2])
batch_box_preds = paddle.concat(
[xs, ys, batch_hei, batch_dim, batch_vel, batch_rot],
axis=2)
else:
batch_box_preds = paddle.concat(
[xs, ys, batch_hei, batch_dim, batch_rot], axis=2)
metas.append(meta_list)
if test_cfg.get('per_class_nms', False):
pass
else:
rets.append(
self.post_processing(batch_box_preds, batch_hm, test_cfg,
post_center_range, task_id))
# Merge branches results
ret_list = []
num_samples = len(rets[0])
ret_list = []
for i in range(num_samples):
ret = {}
for k in rets[0][i].keys():
if k in ["box3d_lidar", "scores"]:
ret[k] = paddle.concat([ret[i][k] for ret in rets])
elif k in ["label_preds"]:
flag = 0
for j, num_class in enumerate(self.num_classes):
rets[j][i][k] += flag
flag += num_class
ret[k] = paddle.concat([ret[i][k] for ret in rets])
ret['meta'] = metas[0][i]
ret_list.append(ret)
return ret_list
def single_post_processing(self, box_preds, hm_preds, test_cfg,
post_center_range, task_id):
scores = paddle.max(hm_preds, axis=-1)
labels = paddle.argmax(hm_preds, axis=-1)
score_mask = scores > test_cfg.score_threshold
distance_mask = (box_preds[..., :3] >= post_center_range[:3]).all(1) \
& (box_preds[..., :3] <= post_center_range[3:]).all(1)
mask = distance_mask & score_mask
box_preds = box_preds[mask]
scores = scores[mask]
labels = labels[mask]
def box_empty(box_preds, scores, labels, box_n_dim):
# zero-shape tensor here will raise a error,
# so we replace it with a fake result
# prediction_dict = {
# 'box3d_lidar': box_preds,
# 'scores': scores,
# 'label_preds': labels
# }
prediction_dict = {
'box3d_lidar': paddle.zeros([1, box_n_dim],
dtype=box_preds.dtype),
'scores': -paddle.ones([1], dtype=scores.dtype),
'label_preds': paddle.zeros([1], dtype=labels.dtype),
}
return prediction_dict
def box_not_empty(box_preds, scores, labels, test_cfg):
index = paddle.to_tensor(
[0, 1, 2, 3, 4, 5, box_preds.shape[-1] - 1], dtype='int32')
boxes_for_nms = paddle.index_select(box_preds, index=index, axis=-1)
#boxes_for_nms = box_preds[:, [0, 1, 2, 3, 4, 5, -1]]
if test_cfg.get('circular_nms', False):
centers = boxes_for_nms[:, [0, 1]]
boxes = paddle.concat(
[centers, scores.reshape([-1, 1])], axis=1)
selected = _circle_nms(
boxes,
min_radius=test_cfg.min_radius[task_id],
post_max_size=test_cfg.nms.nms_post_max_size)
else:
selected = rotate_nms_pcdet(
boxes_for_nms,
scores,
thresh=test_cfg.nms.nms_iou_threshold,
pre_max_size=test_cfg.nms.nms_pre_max_size,
post_max_size=test_cfg.nms.nms_post_max_size)
selected_boxes = box_preds[selected].reshape(
[-1, box_preds.shape[-1]])
selected_scores = scores[selected]
selected_labels = labels[selected]
prediction_dict = {
'box3d_lidar': selected_boxes,
'scores': selected_scores,
'label_preds': selected_labels
}
return prediction_dict
return paddle.static.nn.cond(
paddle.logical_not(mask.any()), lambda: box_empty(
box_preds, scores, labels, self.box_n_dim), lambda:
box_not_empty(box_preds, scores, labels, test_cfg))
'''
def single_post_processing(self, box_preds, hm_preds, test_cfg,
post_center_range, task_id):
scores = paddle.max(hm_preds, axis=-1)
labels = paddle.argmax(hm_preds, axis=-1)
score_mask = scores > test_cfg.score_threshold
distance_mask = (box_preds[..., :3] >= post_center_range[:3]).all(1) \
& (box_preds[..., :3] <= post_center_range[3:]).all(1)
mask = distance_mask & score_mask
box_preds = box_preds[mask]
scores = scores[mask]
labels = labels[mask]
# if 0 in box_preds.shape:
# prediction_dict = {
# 'box3d_lidar': box_preds,
# 'scores': scores,
# 'label_preds': labels
# }
# return prediction_dict
index = paddle.to_tensor(
[0, 1, 2, 3, 4, 5, box_preds.shape[-1] - 1], dtype='int32')
boxes_for_nms = paddle.index_select(box_preds, index=index, axis=-1)
#boxes_for_nms = box_preds[:, [0, 1, 2, 3, 4, 5, -1]]
if test_cfg.get('circular_nms', False):
centers = boxes_for_nms[:, [0, 1]]
boxes = paddle.concat([centers, scores.reshape([-1, 1])], axis=1)
selected = _circle_nms(
boxes,
min_radius=test_cfg.min_radius[task_id],
post_max_size=test_cfg.nms.nms_post_max_size)
else:
selected = rotate_nms_pcdet(
boxes_for_nms,
scores,
thresh=test_cfg.nms.nms_iou_threshold,
pre_maxsize=test_cfg.nms.nms_pre_max_size,
post_max_size=test_cfg.nms.nms_post_max_size)
selected_boxes = box_preds[selected].reshape([-1, box_preds.shape[-1]])
selected_scores = scores[selected]
selected_labels = labels[selected]
prediction_dict = {
'box3d_lidar': selected_boxes,
'scores': selected_scores,
'label_preds': selected_labels
}
return prediction_dict
'''
def post_processing(self, batch_box_preds, batch_hm, test_cfg,
post_center_range, task_id):
if not getattr(self, "in_export_mode", False):
batch_size = len(batch_hm)
prediction_dicts = []
for i in range(batch_size):
box_preds = batch_box_preds[i]
hm_preds = batch_hm[i]
prediction_dict = self.single_post_processing(
box_preds, hm_preds, test_cfg, post_center_range, task_id)
prediction_dicts.append(prediction_dict)
return prediction_dicts
else:
prediction_dict = self.single_post_processing(
batch_box_preds[0], batch_hm[0], test_cfg, post_center_range,
task_id)
return [prediction_dict]
import numpy as np
def _circle_nms(boxes, min_radius, post_max_size=83):
"""
NMS according to center distance
"""
keep = np.array(circle_nms(boxes.numpy(),
thresh=min_radius))[:post_max_size]
keep = paddle.to_tensor(keep, dtype='int32')
return keep
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/centerpoint/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import center_head, centerpoint
from .center_head import *
from .centerpoint import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/centerpoint/centerpoint.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import os
from copy import deepcopy
from typing import Dict, List
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.static import InputSpec
from paddle3d.apis import manager
from paddle3d.geometries import BBoxes3D
from paddle3d.models.base import BaseLidarModel
from paddle3d.sample import Sample, SampleMeta
from paddle3d.utils.checkpoint import load_pretrained_model
from paddle3d.utils.logger import logger
from paddle3d.utils import dtype2float32
class DictObject(Dict):
def __init__(self, config: Dict):
for key, value in config.items():
if isinstance(value, dict):
setattr(self, key, DictObject(value))
else:
setattr(self, key, value)
@manager.MODELS.add_component
class CenterPoint(BaseLidarModel):
def __init__(self,
voxelizer,
voxel_encoder,
middle_encoder,
backbone,
neck,
bbox_head,
test_cfg=None,
pretrained=None,
box_with_velocity: bool = False,
freeze=[]):
super().__init__(
with_voxelizer=True, box_with_velocity=box_with_velocity)
self.voxelizer = voxelizer
self.voxel_encoder = voxel_encoder
self.middle_encoder = middle_encoder
self.backbone = backbone
self.neck = neck
self.bbox_head = bbox_head
self.test_cfg = DictObject(test_cfg)
self.sync_bn = True
if pretrained is not None:
load_pretrained_model(self, self.pretrained)
self.freeze = freeze
def _freeze(self):
if len(self.freeze) > 0:
freeze_layers = []
for layer_name in self.freeze:
if layer_name == 'shared_conv':
freeze_layers.append(
getattr(self, 'bbox_head').shared_conv)
elif isinstance(layer_name, str):
freeze_layers.append(getattr(self, layer_name))
elif isinstance(layer_name, list):
for current_layer in layer_name:
freeze_layers.append(
getattr(self, 'bbox_head').tasks[current_layer])
else:
raise NotImplementedError(
'The freeze_layer type {} is not supported'.format(
layer_name))
for freeze_layer in freeze_layers:
self.freeze_signle_layer(freeze_layer)
def freeze_signle_layer(self, layer):
layer.eval()
for param in layer.parameters():
param.trainable = False
for m in layer.sublayers():
if isinstance(m, nn.layer.norm._BatchNormBase):
m.eval()
def deploy_preprocess(self, points):
def true_fn(points):
points = points[:, 0:5]
return points
def false_fn(points):
points = points.reshape([1, -1, 4])
points = F.pad(
points, [0, 1], value=0, mode='constant', data_format="NCL")
points = points.reshape([-1, 5])
return points
points = paddle.static.nn.cond(
points.shape[-1] >=
5, lambda: true_fn(points), lambda: false_fn(points))
return points[:, 0:self.voxel_encoder.in_channels]
def voxelize(self, points):
voxels, coordinates, num_points_in_voxel = self.voxelizer(points)
return voxels, coordinates, num_points_in_voxel
def extract_feat(self, data):
voxels, coordinates, num_points_in_voxel = self.voxelizer(
data['points'])
data["features"] = voxels
data["num_points_in_voxel"] = num_points_in_voxel
data["coors"] = coordinates
input_features = self.voxel_encoder(
data["features"], data["num_points_in_voxel"], data["coors"])
x = self.middle_encoder(input_features, data["coors"],
data["batch_size"])
x = self.backbone(x)
x = self.neck(x)
return x
def train_forward(self, samples):
if len(self.freeze) > 0:
self._freeze()
batch_size = len(samples["data"])
points = samples["data"]
data = dict(points=points, batch_size=batch_size)
if hasattr(self, 'amp_cfg_'):
with paddle.amp.auto_cast(**self.amp_cfg_):
x = self.extract_feat(data)
preds, x = self.bbox_head(x)
preds = dtype2float32(preds)
else:
x = self.extract_feat(data)
preds, x = self.bbox_head(x)
return self.bbox_head.loss(samples, preds, self.test_cfg)
def test_forward(self, samples):
batch_size = len(samples["data"])
points = samples["data"]
data = dict(points=points, batch_size=batch_size)
x = self.extract_feat(data)
preds, x = self.bbox_head(x)
preds = self.bbox_head.predict(samples, preds, self.test_cfg)
preds = self._parse_results_to_sample(preds, samples)
return {'preds': preds}
def export_forward(self, samples):
batch_size = 1
points = samples["data"]
points = self.deploy_preprocess(points)
data = dict(points=points, batch_size=batch_size)
x = self.extract_feat(data)
preds, x = self.bbox_head(x)
return self.bbox_head.predict_by_custom_op(samples, preds,
self.test_cfg)
def _parse_results_to_sample(self, results: dict, sample: dict):
num_samples = len(results)
new_results = []
for i in range(num_samples):
data = Sample(sample["path"][i], sample["modality"][i])
# data = Sample(sample["path"][i], 'lidar')
bboxes_3d = results[i]["box3d_lidar"].numpy()
labels = results[i]["label_preds"].numpy()
confidences = results[i]["scores"].numpy()
data.bboxes_3d = BBoxes3D(bboxes_3d[:, [0, 1, 2, 3, 4, 5, -1]])
data.bboxes_3d.coordmode = 'Lidar'
data.bboxes_3d.origin = [0.5, 0.5, 0.5]
data.bboxes_3d.rot_axis = 2
if bboxes_3d.shape[-1] == 9:
data.bboxes_3d.velocities = bboxes_3d[:, 6:8]
data.labels = labels
data.confidences = confidences
data.meta = SampleMeta(id=results[i]["meta"])
if "calibs" in sample:
calib = [calibs.numpy()[i] for calibs in sample["calibs"]]
data.calibs = calib
new_results.append(data)
return new_results
def collate_fn(self, batch: List):
"""
"""
sample_merged = collections.defaultdict(list)
for sample in batch:
for k, v in sample.items():
sample_merged[k].append(v)
batch_size = len(sample_merged['meta'])
ret = {}
for key, elems in sample_merged.items():
if key in ["voxels", "num_points_per_voxel"]:
ret[key] = np.concatenate(elems, axis=0)
elif key in ["meta"]:
ret[key] = [elem.id for elem in elems]
elif key in ["path", "modality"]:
ret[key] = elems
elif key == "data":
ret[key] = [elem for elem in elems]
elif key == "coords":
coors = []
for i, coor in enumerate(elems):
coor_pad = np.pad(
coor, ((0, 0), (1, 0)),
mode="constant",
constant_values=i)
coors.append(coor_pad)
ret[key] = np.concatenate(coors, axis=0)
elif key in [
"heat_map", "target_bbox", "center_idx", "target_mask",
"target_label", "calibs"
]:
ret[key] = collections.defaultdict(list)
res = []
for elem in elems:
for idx, ele in enumerate(elem):
ret[key][str(idx)].append(ele)
for kk, vv in ret[key].items():
res.append(np.stack(vv))
ret[key] = res
return ret
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/iassd/iassd_coder.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
__all__ = ["PointResidual_BinOri_Coder"]
class PointResidual_BinOri_Coder(paddle.nn.Layer):
def __init__(self, code_size=8, use_mean_size=True, **kwargs):
super().__init__()
self.bin_size = kwargs.get('bin_size', 12)
self.code_size = 6 + 2 * self.bin_size
self.bin_inter = 2 * np.pi / self.bin_size
self.use_mean_size = use_mean_size
if self.use_mean_size:
self.mean_size = paddle.to_tensor(
kwargs['mean_size'], dtype='float32')
assert self.mean_size.min() > 0
def encode_paddle(self, gt_boxes, points, gt_classes=None):
"""
Args:
gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
points: (N, 3) [x, y, z]
gt_classes: (N) [1, num_classes]
Returns:
box_coding: (N, 8 + C)
"""
gt_boxes[:, 3:6] = paddle.clip(gt_boxes[:, 3:6], min=1e-5)
xg, yg, zg, dxg, dyg, dzg, rg = paddle.split(gt_boxes, 7, axis=-1)
xa, ya, za = paddle.split(points, 3, axis=-1)
if self.use_mean_size:
assert gt_classes.max() <= self.mean_size.shape[0]
point_anchor_size = self.mean_size[gt_classes - 1]
dxa, dya, dza = paddle.split(point_anchor_size, 3, axis=-1)
diagonal = paddle.sqrt(dxa**2 + dya**2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / dza
dxt = paddle.log(dxg / dxa)
dyt = paddle.log(dyg / dya)
dzt = paddle.log(dzg / dza)
else:
xt = (xg - xa)
yt = (yg - ya)
zt = (zg - za)
dxt = paddle.log(dxg)
dyt = paddle.log(dyg)
dzt = paddle.log(dzg)
rg = paddle.clip(rg, max=np.pi - 1e-5, min=-np.pi + 1e-5)
bin_id = paddle.floor((rg + np.pi) / self.bin_inter)
bin_res = (
(rg + np.pi) - (bin_id * self.bin_inter + self.bin_inter / 2)) / (
self.bin_inter / 2) # norm to [-1, 1]
return paddle.concat([xt, yt, zt, dxt, dyt, dzt, bin_id, bin_res],
axis=-1)
def decode_paddle(self, box_encodings, points, pred_classes=None):
"""
Args:
box_encodings: (N, 8 + C) [x, y, z, dx, dy, dz, bin_id, bin_res , ...]
points: [x, y, z]
pred_classes: (N) [1, num_classes]
Returns:
decoded box predictions
"""
xt, yt, zt, dxt, dyt, dzt = paddle.split(
box_encodings[..., :6], 6, axis=-1)
xa, ya, za = paddle.split(points, 3, axis=-1)
if self.use_mean_size:
assert pred_classes.max() <= self.mean_size.shape[0]
point_anchor_size = self.mean_size[pred_classes - 1]
dxa, dya, dza = paddle.split(point_anchor_size, 3, axis=-1)
diagonal = paddle.sqrt(dxa**2 + dya**2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = paddle.exp(dxt) * dxa
dyg = paddle.exp(dyt) * dya
dzg = paddle.exp(dzt) * dza
else:
xg = xt + xa
yg = yt + ya
zg = zt + za
dxg, dyg, dzg = paddle.split(
paddle.exp(box_encodings[..., 3:6]), 3, axis=-1)
bin_id = box_encodings[..., 6:6 + self.bin_size]
bin_res = box_encodings[..., 6 + self.bin_size:]
bin_id = paddle.argmax(bin_id, axis=-1)
bin_id_one_hot = paddle.nn.functional.one_hot(
bin_id.astype('int64'), self.bin_size)
bin_res = paddle.sum(
bin_res * bin_id_one_hot.astype('float32'), axis=-1)
rg = bin_id.astype(
'float32') * self.bin_inter - np.pi + self.bin_inter / 2
rg = rg + bin_res * (self.bin_inter / 2)
rg = rg.unsqueeze(-1)
return paddle.concat([xg, yg, zg, dxg, dyg, dzg, rg], axis=-1)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/iassd/iassd.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import defaultdict
from typing import List
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.static import InputSpec
from paddle3d.apis import manager
from paddle3d.geometries import BBoxes3D, CoordMode
from paddle3d.models.layers import constant_init, reset_parameters
from paddle3d.ops import iou3d_nms_cuda, pointnet2_ops
from paddle3d.sample import Sample
from paddle3d.utils import box_utils
from paddle3d.utils.logger import logger
__all = ["IASSD"]
@manager.MODELS.add_component
class IASSD(nn.Layer):
"""Model of IA-SSD
Args:
backbone (nn.Layer): instantiated class of backbone.
head (nn.Layer): instantiated class of head.
post_process_cfg (dict): config of nms post-process.
"""
def __init__(self, backbone, head, post_process_cfg):
super().__init__()
self.backbone = backbone
self.head = head
self.post_process_cfg = post_process_cfg
self.export_model = False
self.apply(self.init_weight)
self.export_model = False
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
data: (B * N, C) # C = [batch_id, x, y, z, intensity, ...]
bboxes_3d: (B, num_gt, 8) # [x, y, z, l, w, h, heading, label]
Returns:
...
"""
batch_dict = self.backbone(batch_dict)
batch_dict = self.head(batch_dict)
if self.training:
loss = self.head.get_loss()
return {"loss": loss}
else:
if getattr(self, "export_model", False):
return self.post_process(batch_dict)
else:
result_list = self.post_process(batch_dict)
sample_list = self._parse_results_to_sample(
result_list, batch_dict)
return {"preds": sample_list}
def init_weight(self, m):
if isinstance(m, nn.Conv2D):
reset_parameters(m)
elif isinstance(m, nn.Conv1D):
reset_parameters(m)
elif isinstance(m, nn.Linear):
reset_parameters(m, reverse=True)
elif isinstance(m, nn.BatchNorm2D):
constant_init(m.weight, value=1)
constant_init(m.bias, value=0)
elif isinstance(m, nn.BatchNorm1D):
constant_init(m.weight, value=1)
constant_init(m.bias, value=0)
def collate_fn(self, batch: List):
data_dict = defaultdict(list)
for cur_sample in batch:
for key, val in cur_sample.items():
data_dict[key].append(val)
batch_size = len(batch)
collated_batch = {}
collated_fileds = [
"data", "bboxes_3d", "meta", "path", "modality", "calibs"
]
for key, val in data_dict.items():
if key not in collated_fileds or val[0] is None:
continue
if key == "data":
collated_batch[key] = np.concatenate([
np.pad(
coor, ((0, 0), (1, 0)),
mode="constant",
constant_values=i) for i, coor in enumerate(val)
])
elif key == "bboxes_3d":
max_gt = max([len(x) for x in val])
batch_bboxes_3d = np.zeros(
(batch_size, max_gt, val[0].shape[-1]), dtype=np.float32)
# pad num of bboxes to max_gt with zeros, as well as labels
for k in range(batch_size):
batch_bboxes_3d[k, :val[k].__len__(), :] = val[k]
collated_batch[key] = batch_bboxes_3d
elif key in ["path", "modality", "calibs", "meta"]:
collated_batch[key] = val
collated_batch["batch_size"] = batch_size
return collated_batch
@paddle.no_grad()
def post_process(self, batch_dict):
batch_size = batch_dict["batch_size"]
pred_list = []
for index in range(batch_size):
if batch_dict.get("batch_index", None) is not None:
assert batch_dict["batch_box_preds"].shape.__len__() == 2
batch_mask = batch_dict["batch_index"] == index
else:
assert batch_dict["batch_box_preds"].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict["batch_box_preds"][batch_mask]
if not isinstance(batch_dict["batch_cls_preds"], list):
cls_preds = batch_dict["batch_cls_preds"][batch_mask]
if not batch_dict["cls_preds_normalized"]:
cls_preds = F.sigmoid(cls_preds)
else:
cls_preds = [
x[batch_mask] for x in batch_dict["batch_cls_preds"]
]
if not batch_dict["cls_preds_normalized"]:
cls_preds = [F.sigmoid(x) for x in cls_preds]
label_preds = paddle.argmax(cls_preds, axis=-1)
cls_preds = paddle.max(cls_preds, axis=-1)
selected_score, selected_label, selected_box = self.class_agnostic_nms(
box_scores=cls_preds,
box_preds=box_preds,
label_preds=label_preds,
nms_config=self.post_process_cfg["nms_config"],
score_thresh=self.post_process_cfg["score_thresh"],
)
record_dict = {
"pred_boxes": selected_box,
"pred_scores": selected_score,
"pred_labels": selected_label,
}
pred_list.append(record_dict)
return pred_list
def class_agnostic_nms(self, box_scores, box_preds, label_preds, nms_config,
score_thresh):
scores_mask = paddle.nonzero(box_scores >= score_thresh)
fake_score = paddle.to_tensor([0.0], dtype="float32")
fake_label = paddle.to_tensor([-1.0], dtype="float32")
fake_box = paddle.to_tensor([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
dtype="float32")
if paddle.shape(scores_mask)[0] == 0:
return fake_score, fake_label, fake_box
else:
scores_mask = scores_mask
box_scores = paddle.gather(box_scores, index=scores_mask)
box_preds = paddle.gather(box_preds, index=scores_mask)
label_preds = paddle.gather(label_preds, index=scores_mask)
order = box_scores.argsort(0, descending=True)
order = order[:nms_config["nms_pre_maxsize"]]
box_preds = paddle.gather(box_preds, index=order)
box_scores = paddle.gather(box_scores, index=order)
label_preds = paddle.gather(label_preds, index=order)
# When order is one-value tensor,
# boxes[order] loses a dimension, so we add a reshape
keep, num_out = iou3d_nms_cuda.nms_gpu(box_preds,
nms_config["nms_thresh"])
if num_out.cast("int64") == 0:
return fake_score, fake_label, fake_box
else:
selected = keep[0:num_out]
selected = selected[:nms_config["nms_post_maxsize"]]
selected_score = paddle.gather(box_scores, index=selected)
selected_box = paddle.gather(box_preds, index=selected)
selected_label = paddle.gather(label_preds, index=selected)
return selected_score, selected_label, selected_box
def _parse_results_to_sample(self, results, batch_dict):
num = len(results)
sample_list = []
for i in range(num):
result = results[i]
path = batch_dict["path"][i]
if (result["pred_labels"] == -1).any():
sample = Sample(path=path, modality="lidar")
else:
sample = Sample(path=path, modality="lidar")
box_preds = result["pred_boxes"]
if isinstance(box_preds, paddle.Tensor):
box_preds = box_preds.numpy()
# convert box format to kitti, only for kitti eval
box_preds = box_utils.boxes3d_lidar_to_kitti_lidar(box_preds)
cls_labels = result["pred_labels"]
cls_scores = result["pred_scores"]
sample.bboxes_3d = BBoxes3D(
box_preds,
origin=[0.5, 0.5, 0],
coordmode="Lidar",
rot_axis=2)
sample.labels = cls_labels.numpy()
sample.confidences = cls_scores.numpy()
sample.alpha = (-np.arctan2(-box_preds[:, 1], box_preds[:, 0]) +
box_preds[:, 6])
if ("calibs" in batch_dict) and (batch_dict["calibs"] is
not None):
sample.calibs = [
calib.numpy() for calib in batch_dict["calibs"][i]
]
if ("meta" in batch_dict) and (batch_dict["meta"] is not None):
sample.meta.update(batch_dict["meta"][i])
sample_list.append(sample)
return sample_list
def export(self, save_dir, **kwargs):
self.export_model = True
self.backbone.export_model = True
input_spec = [{
"data":
InputSpec(shape=[-1, 4], name="data", dtype='float32')
}]
save_path = os.path.join(save_dir, 'iassd')
paddle.jit.to_static(self, input_spec=input_spec)
paddle.jit.save(self, save_path, input_spec=input_spec)
logger.info("Exported model is saved in {}".format(save_path))
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/iassd/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .iassd import *
from .iassd_backbone import *
from .iassd_head import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/iassd/iassd_head.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code is based on https://github.com/yifanzhang713/IA-SSD/blob/main/pcdet/models/dense_heads/IASSD_head.py
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.apis import manager
from .iassd_coder import PointResidual_BinOri_Coder
from .iassd_loss import (WeightedClassificationLoss, WeightedSmoothL1Loss,
get_corner_loss_lidar)
from paddle3d.ops import roiaware_pool3d
from paddle3d.models.common import enlarge_box3d, rotate_points_along_z
__all__ = ["IASSD_Head"]
@manager.HEADS.add_component
class IASSD_Head(nn.Layer):
"""Head of IA-SSD
Args:
input_channle (int): input feature channel of IA-SSD Head.
cls_fc (List[int]): hidden dim of box classification branch.
reg_fc (List[int]): hidden dim of box regression branch.
num_classes (int): number of classes.
target_config (dict): config of box coder to encode boxes.
loss_config (dict): config of loss computation.
"""
def __init__(self, input_channel, cls_fc, reg_fc, num_classes,
target_config, loss_config):
super().__init__()
self.forward_ret_dict = None
self.num_classes = num_classes
self.target_config = target_config
self.loss_config = loss_config
self.box_coder = PointResidual_BinOri_Coder(
**target_config.get("box_coder_config"))
self.cls_center_layers = self.make_fc_layers(
fc_cfg=cls_fc,
input_channel=input_channel,
output_channel=num_classes)
self.box_center_layers = self.make_fc_layers(
fc_cfg=reg_fc,
input_channel=input_channel,
output_channel=self.box_coder.code_size,
)
self.build_loss()
def make_fc_layers(self, fc_cfg, input_channel, output_channel):
fc_layers = []
for k in range(len(fc_cfg)):
fc_layers.extend([
nn.Linear(input_channel, fc_cfg[k], bias_attr=False),
nn.BatchNorm1D(fc_cfg[k]),
nn.ReLU(),
])
input_channel = fc_cfg[k]
fc_layers.append(
nn.Linear(input_channel, output_channel, bias_attr=True))
return nn.Sequential(*fc_layers)
def build_loss(self):
# classification loss
if self.loss_config["loss_cls"] == "WeightedClassificationLoss":
self.add_sublayer("cls_loss_func", WeightedClassificationLoss())
else:
raise NotImplementedError
# regression loss
if self.loss_config["loss_reg"] == "WeightedSmoothL1Loss":
self.add_sublayer(
"reg_loss_func",
WeightedSmoothL1Loss(
code_weights=paddle.to_tensor(
self.loss_config["loss_weight"]["code_weights"])),
)
else:
raise NotImplementedError
# instance-aware loss
if self.loss_config["loss_ins"] == "WeightedClassificationLoss":
self.add_sublayer("ins_loss_func", WeightedClassificationLoss())
else:
raise NotImplementedError
def get_loss(self):
# vote loss
center_loss_reg = self.get_contextual_vote_loss()
# semantic loss in SA
sa_loss_cls = self.get_sa_ins_layer_loss()
# cls loss
center_loss_cls = self.get_center_cls_layer_loss()
# reg loss
center_loss_box = self.get_center_box_binori_layer_loss()
# corner loss
if self.loss_config.get("corner_loss_regularization", False):
corner_loss = self.get_corner_layer_loss()
point_loss = (center_loss_reg + center_loss_cls + center_loss_box +
corner_loss + sa_loss_cls)
return point_loss
def get_contextual_vote_loss(self):
pos_mask = self.forward_ret_dict["center_origin_cls_labels"] > 0
center_origin_loss_box = []
for i in self.forward_ret_dict["center_origin_cls_labels"].unique():
if i <= 0:
continue
simple_pos_mask = self.forward_ret_dict[
"center_origin_cls_labels"] == i
center_box_labels = self.forward_ret_dict[
"center_origin_gt_box_of_fg_points"][:, 0:3][(
pos_mask & simple_pos_mask)[pos_mask == 1]]
centers_origin = self.forward_ret_dict["centers_origin"]
ctr_offsets = self.forward_ret_dict["ctr_offsets"]
centers_pred = centers_origin + ctr_offsets
centers_pred = centers_pred[simple_pos_mask][:, 1:4]
simple_center_origin_loss_box = F.smooth_l1_loss(
centers_pred, center_box_labels)
center_origin_loss_box.append(
simple_center_origin_loss_box.unsqueeze(-1))
center_origin_loss_box = paddle.concat(
center_origin_loss_box, axis=-1).mean()
center_origin_loss_box = (
center_origin_loss_box *
self.loss_config["loss_weight"]["vote_weight"])
return center_origin_loss_box
def get_center_cls_layer_loss(self):
point_cls_labels = self.forward_ret_dict["center_cls_labels"].reshape(
[-1])
point_cls_preds = self.forward_ret_dict["center_cls_preds"].reshape(
[-1, self.num_classes])
positives = point_cls_labels > 0
negative_cls_weights = (point_cls_labels == 0) * 1.0
cls_weights = (
1.0 * negative_cls_weights + 1.0 * positives).astype("float32")
pos_normalizer = positives.sum(axis=0).astype("float32")
cls_weights /= paddle.clip(pos_normalizer, min=1.0)
one_hot_targets = F.one_hot(
(point_cls_labels *
(point_cls_labels >= 0).astype("int64")).astype("int64"),
self.num_classes + 1,
)
one_hot_targets = one_hot_targets[..., 1:]
if self.loss_config.get("centerness_regularization", False):
centerness_mask = self.generate_center_ness_mask()
one_hot_targets = one_hot_targets * centerness_mask.unsqueeze(
-1).expand(one_hot_targets.shape)
point_loss_cls = (self.cls_loss_func(
point_cls_preds, one_hot_targets,
weights=cls_weights).mean(axis=-1).sum())
point_loss_cls = (point_loss_cls *
self.loss_config["loss_weight"]["point_cls_weight"])
return point_loss_cls
def get_sa_ins_layer_loss(self):
sa_ins_labels = self.forward_ret_dict["sa_ins_labels"]
sa_ins_preds = self.forward_ret_dict["sa_ins_preds"]
sa_centerness_mask = self.generate_sa_center_ness_mask()
sa_ins_loss, ignore = 0, 0
for i in range(len(sa_ins_labels)): # valid when i=1,2 for IA-SSD
if len(sa_ins_preds[i]) != 0:
point_cls_preds = sa_ins_preds[i][..., 1:].reshape(
[-1, self.num_classes])
else:
ignore += 1
continue
point_cls_labels = sa_ins_labels[i].reshape([-1])
positives = point_cls_labels > 0
negative_cls_weights = (point_cls_labels == 0) * 1.0
cls_weights = (
negative_cls_weights + 1.0 * positives).astype("float32")
pos_normalizer = positives.sum(axis=0).astype("float32")
cls_weights /= paddle.clip(pos_normalizer, min=1.0)
one_hot_targets = F.one_hot(
(point_cls_labels *
(point_cls_labels >= 0).astype("int64")).astype("int64"),
self.num_classes + 1,
)
one_hot_targets = one_hot_targets[..., 1:]
if "ctr" in self.loss_config["sample_method_list"][i + 1]:
centerness_mask = sa_centerness_mask[i]
one_hot_targets = one_hot_targets * centerness_mask.unsqueeze(
-1).expand(one_hot_targets.shape)
point_loss_ins = (self.ins_loss_func(
point_cls_preds, one_hot_targets,
weights=cls_weights).mean(axis=-1).sum())
loss_weights = self.loss_config["loss_weight"]["ins_aware_weight"]
point_loss_ins = point_loss_ins * loss_weights[i]
sa_ins_loss += point_loss_ins
sa_ins_loss = sa_ins_loss / (len(sa_ins_labels) - ignore)
return sa_ins_loss
def generate_center_ness_mask(self):
pos_mask = self.forward_ret_dict["center_cls_labels"] > 0
gt_boxes = self.forward_ret_dict["center_gt_box_of_fg_points"]
centers = self.forward_ret_dict["centers"][:, 1:]
centers = centers[pos_mask].clone().detach()
offset_xyz = centers[:, 0:3] - gt_boxes[:, 0:3]
offset_xyz_canical = rotate_points_along_z(
offset_xyz.unsqueeze(axis=1), -gt_boxes[:, 6]).squeeze(axis=1)
template = paddle.to_tensor([[1, 1, 1], [-1, -1, -1]],
dtype=gt_boxes.dtype) / 2
margin = (gt_boxes[:, None, 3:6].expand([gt_boxes.shape[0], 2, 3]) *
template[None, :, :])
distance = margin - offset_xyz_canical[:, None, :].expand(
[offset_xyz_canical.shape[0], 2, offset_xyz_canical.shape[1]])
distance[:, 1, :] = -1 * distance[:, 1, :]
distance_min = paddle.where(distance[:, 0, :] < distance[:, 1, :],
distance[:, 0, :], distance[:, 1, :])
distance_max = paddle.where(distance[:, 0, :] > distance[:, 1, :],
distance[:, 0, :], distance[:, 1, :])
centerness = distance_min / distance_max
centerness = centerness[:, 0] * centerness[:, 1] * centerness[:, 2]
centerness = paddle.clip(centerness, min=1e-6)
centerness = paddle.pow(centerness, 1 / 3)
centerness_mask = paddle.zeros(pos_mask.shape).astype("float32")
centerness_mask[pos_mask] = centerness
return centerness_mask
def generate_sa_center_ness_mask(self):
sa_pos_mask = self.forward_ret_dict["sa_ins_labels"]
sa_gt_boxes = self.forward_ret_dict["sa_gt_box_of_fg_points"]
sa_xyz_coords = self.forward_ret_dict["sa_xyz_coords"]
sa_centerness_mask = []
for i in range(len(sa_pos_mask)):
pos_mask = sa_pos_mask[i] > 0
gt_boxes = sa_gt_boxes[i]
xyz_coords = sa_xyz_coords[i].reshape(
[-1, sa_xyz_coords[i].shape[-1]])[:, 1:]
xyz_coords = xyz_coords[pos_mask].clone().detach()
offset_xyz = xyz_coords[:, 0:3] - gt_boxes[:, 0:3]
offset_xyz_canical = rotate_points_along_z(
offset_xyz.unsqueeze(axis=1), -gt_boxes[:, 6]).squeeze(axis=1)
template = (paddle.to_tensor([[1, 1, 1], [-1, -1, -1]],
dtype=gt_boxes.dtype) / 2)
margin = (gt_boxes[:, None, 3:6].expand([gt_boxes.shape[0], 2, 3]) *
template[None, :, :])
distance = margin - offset_xyz_canical[:, None, :].expand(
[offset_xyz_canical.shape[0], 2, offset_xyz_canical.shape[1]])
distance[:, 1, :] = -1 * distance[:, 1, :]
distance_min = paddle.where(
distance[:, 0, :] < distance[:, 1, :],
distance[:, 0, :],
distance[:, 1, :],
)
distance_max = paddle.where(
distance[:, 0, :] > distance[:, 1, :],
distance[:, 0, :],
distance[:, 1, :],
)
centerness = distance_min / distance_max
centerness = centerness[:, 0] * centerness[:, 1] * centerness[:, 2]
centerness = paddle.clip(centerness, min=1e-6)
centerness = paddle.pow(centerness, 1 / 3)
centerness_mask = paddle.zeros(pos_mask.shape).astype("float32")
centerness_mask[pos_mask] = centerness
sa_centerness_mask.append(centerness_mask)
return sa_centerness_mask
def get_center_box_binori_layer_loss(self):
pos_mask = self.forward_ret_dict["center_cls_labels"] > 0
point_box_labels = self.forward_ret_dict["center_box_labels"]
point_box_preds = self.forward_ret_dict["center_box_preds"]
reg_weights = pos_mask.astype("float32")
pos_normalizer = pos_mask.sum().astype("float32")
reg_weights /= paddle.clip(pos_normalizer, min=1.0)
pred_box_xyzwhl = point_box_preds[:, :6]
label_box_xyzwhl = point_box_labels[:, :6]
point_loss_box_src = self.reg_loss_func(
pred_box_xyzwhl[None, ...],
label_box_xyzwhl[None, ...],
weights=reg_weights[None, ...],
)
point_loss_xyzwhl = point_loss_box_src.sum()
pred_ori_bin_id = point_box_preds[:, 6:6 + self.box_coder.bin_size]
pred_ori_bin_res = point_box_preds[:, 6 + self.box_coder.bin_size:]
label_ori_bin_id = point_box_labels[:, 6]
label_ori_bin_res = point_box_labels[:, 7]
criterion = nn.CrossEntropyLoss(reduction="none")
loss_ori_cls = criterion(pred_ori_bin_id,
label_ori_bin_id.astype("int64"))
loss_ori_cls = paddle.sum(loss_ori_cls * reg_weights)
label_id_one_hot = F.one_hot(
label_ori_bin_id.astype("int64"), self.box_coder.bin_size)
pred_ori_bin_res = paddle.sum(
pred_ori_bin_res * label_id_one_hot.astype("float32"), axis=-1)
loss_ori_reg = F.smooth_l1_loss(pred_ori_bin_res, label_ori_bin_res)
loss_ori_reg = paddle.sum(loss_ori_reg * reg_weights)
loss_ori_cls = loss_ori_cls * self.loss_config["loss_weight"][
"dir_weight"]
point_loss_box = point_loss_xyzwhl + loss_ori_reg + loss_ori_cls
point_loss_box = (point_loss_box *
self.loss_config["loss_weight"]["point_box_weight"])
return point_loss_box
def get_corner_layer_loss(self):
pos_mask = self.forward_ret_dict["center_cls_labels"] > 0
gt_boxes = self.forward_ret_dict["center_gt_box_of_fg_points"]
pred_boxes = self.forward_ret_dict["point_box_preds"]
pred_boxes = pred_boxes[pos_mask]
loss_corner = get_corner_loss_lidar(pred_boxes[:, 0:7],
gt_boxes[:, 0:7])
loss_corner = loss_corner.mean()
loss_corner = loss_corner * self.loss_config["loss_weight"][
"corner_weight"]
return loss_corner
def assign_stack_targets_IASSD(
self,
points,
gt_boxes,
extend_gt_boxes=None,
weighted_labels=False,
ret_box_labels=False,
ret_offset_labels=True,
set_ignore_flag=True,
use_ball_constraint=False,
central_radius=2.0,
use_query_assign=False,
central_radii=2.0,
use_ex_gt_assign=False,
fg_pc_ignore=False,
binary_label=False,
):
"""
Args:
points: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes: (B, M, 8)
extend_gt_boxes: [B, M, 8]
Returns:
point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored
point_box_labels: (N1 + N2 + N3 + ..., code_size)
"""
assert len(
points.shape
) == 2 and points.shape[1] == 4, "points.shape=%s" % str(points.shape)
assert (len(gt_boxes.shape) == 3
and gt_boxes.shape[2] == 8), "gt_boxes.shape=%s" % str(
gt_boxes.shape)
assert (extend_gt_boxes is None or len(extend_gt_boxes.shape) == 3
and extend_gt_boxes.shape[2] == 8
), "extend_gt_boxes.shape=%s" % str(extend_gt_boxes.shape)
batch_size = gt_boxes.shape[0]
bs_idx = points[:, 0]
point_cls_labels = paddle.zeros([points.shape[0]]).astype("int64")
point_box_labels = (paddle.zeros([points.shape[0], 8])
if ret_box_labels else None)
box_idxs_labels = paddle.zeros([points.shape[0]]).astype("int64")
gt_boxes_of_fg_points = []
gt_box_of_points = paddle.zeros([points.shape[0], 8],
dtype=gt_boxes.dtype)
for k in range(batch_size):
bs_mask = bs_idx == k
points_single = points[bs_mask][:, 1:4]
point_cls_labels_single = paddle.zeros([bs_mask.sum()],
dtype=point_cls_labels.dtype)
box_idxs_of_pts = (roiaware_pool3d.points_in_boxes_gpu(
points_single.unsqueeze(axis=0),
gt_boxes[k:k + 1, :, 0:7]).astype("int64").squeeze(axis=0))
box_fg_flag = box_idxs_of_pts >= 0
if use_ex_gt_assign:
extend_box_idxs_of_pts = (roiaware_pool3d.points_in_boxes_gpu(
points_single.unsqueeze(axis=0),
extend_gt_boxes[k:k + 1, :, 0:7],
).astype("int64").squeeze(axis=0))
extend_fg_flag = extend_box_idxs_of_pts >= 0
extend_box_idxs_of_pts[box_fg_flag] = box_idxs_of_pts[
box_fg_flag] # instance points should keep unchanged
if fg_pc_ignore:
fg_flag = extend_fg_flag ^ box_fg_flag
extend_box_idxs_of_pts[box_idxs_of_pts != -1] = -1
box_idxs_of_pts = extend_box_idxs_of_pts
else:
fg_flag = extend_fg_flag
box_idxs_of_pts = extend_box_idxs_of_pts
elif set_ignore_flag:
extend_box_idxs_of_pts = (roiaware_pool3d.points_in_boxes_gpu(
points_single.unsqueeze(axis=0),
extend_gt_boxes[k:k + 1, :, 0:7],
).astype("int64").squeeze(axis=0))
fg_flag = box_fg_flag
ignore_flag = fg_flag ^ (extend_box_idxs_of_pts >= 0)
point_cls_labels_single[ignore_flag] = -1
elif use_ball_constraint:
box_centers = gt_boxes[k][box_idxs_of_pts][:, 0:3].clone()
box_centers[:, 2] += gt_boxes[k][box_idxs_of_pts][:, 5] / 2
ball_flag = (box_centers - points_single).norm(
axis=1) < central_radius
fg_flag = box_fg_flag & ball_flag
else:
raise NotImplementedError
gt_box_of_fg_points = gt_boxes[k][box_idxs_of_pts[fg_flag]]
point_cls_labels_single[fg_flag] = (
1 if self.num_classes == 1 or binary_label else
gt_box_of_fg_points[:, -1].astype("int64"))
point_cls_labels[bs_mask] = point_cls_labels_single
bg_flag = point_cls_labels_single == 0 # except ignore_id
# box_bg_flag
fg_flag = fg_flag ^ (fg_flag & bg_flag)
gt_box_of_fg_points = gt_boxes[k][box_idxs_of_pts[fg_flag]]
gt_boxes_of_fg_points.append(gt_box_of_fg_points)
box_idxs_labels[bs_mask] = box_idxs_of_pts
# FIXME: -1 index slice is not supported in paddle
box_idxs_of_pts[box_idxs_of_pts == -1] = gt_boxes[k].shape[0] - 1
gt_box_of_points[bs_mask] = gt_boxes[k][box_idxs_of_pts]
if ret_box_labels and gt_box_of_fg_points.shape[0] > 0:
point_box_labels_single = paddle.zeros(
[bs_mask.sum(), 8], dtype=point_box_labels.dtype)
fg_point_box_labels = self.box_coder.encode_paddle(
gt_boxes=gt_box_of_fg_points[:, :-1],
points=points_single[fg_flag],
gt_classes=gt_box_of_fg_points[:, -1].astype("int64"),
)
point_box_labels_single[fg_flag] = fg_point_box_labels
point_box_labels[bs_mask] = point_box_labels_single
gt_boxes_of_fg_points = paddle.concat(gt_boxes_of_fg_points, axis=0)
targets_dict = {
"point_cls_labels": point_cls_labels,
"point_box_labels": point_box_labels,
"gt_box_of_fg_points": gt_boxes_of_fg_points,
"box_idxs_labels": box_idxs_labels,
"gt_box_of_points": gt_box_of_points,
}
return targets_dict
def assign_targets(self, input_dict):
"""
Args:
input_dict:
batch_size: int
centers: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
centers_origin: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
encoder_coords: List of point_coords in SA
bboxes_3d (optional): (B, M, 8)
Returns:
target_dict:
...
"""
gt_boxes = input_dict["bboxes_3d"]
batch_size = input_dict["batch_size"]
targets_dict = {}
extend_gt = gt_boxes
extend_gt_boxes = enlarge_box3d(
extend_gt.reshape([-1, extend_gt.shape[-1]]),
extra_width=self.target_config.get("gt_extra_width"),
).reshape([batch_size, -1, extend_gt.shape[-1]])
assert gt_boxes.shape.__len__() == 3, "bboxes_3d.shape=%s" % str(
gt_boxes.shape)
center_targets_dict = self.assign_stack_targets_IASSD(
points=input_dict["centers"].detach(),
gt_boxes=extend_gt,
extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True,
use_ball_constraint=False,
ret_box_labels=True,
)
targets_dict["center_gt_box_of_fg_points"] = center_targets_dict[
"gt_box_of_fg_points"]
targets_dict["center_cls_labels"] = center_targets_dict[
"point_cls_labels"]
targets_dict["center_box_labels"] = center_targets_dict[
"point_box_labels"]
targets_dict["center_gt_box_of_points"] = center_targets_dict[
"gt_box_of_points"]
(
sa_ins_labels,
sa_gt_box_of_fg_points,
sa_xyz_coords,
sa_gt_box_of_points,
sa_box_idxs_labels,
) = ([], [], [], [], [])
sa_ins_preds = input_dict["sa_ins_preds"]
for i in range(1, len(sa_ins_preds)): # valid when i = 1,2 for IA-SSD
sa_xyz = input_dict["encoder_coords"][i]
if i == 1:
extend_gt_boxes = enlarge_box3d(
gt_boxes.reshape([-1, gt_boxes.shape[-1]]),
extra_width=[0.5, 0.5, 0.5],
).reshape([batch_size, -1, gt_boxes.shape[-1]])
sa_targets_dict = self.assign_stack_targets_IASSD(
points=sa_xyz.reshape([-1, sa_xyz.shape[-1]]).detach(),
gt_boxes=gt_boxes,
extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True,
use_ex_gt_assign=False,
)
if i >= 2:
extend_gt_boxes = enlarge_box3d(
gt_boxes.reshape([-1, gt_boxes.shape[-1]]),
extra_width=[0.5, 0.5, 0.5],
).reshape([batch_size, -1, gt_boxes.shape[-1]])
sa_targets_dict = self.assign_stack_targets_IASSD(
points=sa_xyz.reshape([-1, sa_xyz.shape[-1]]).detach(),
gt_boxes=gt_boxes,
extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=False,
use_ex_gt_assign=True,
)
sa_xyz_coords.append(sa_xyz)
sa_ins_labels.append(sa_targets_dict["point_cls_labels"])
sa_gt_box_of_fg_points.append(
sa_targets_dict["gt_box_of_fg_points"])
sa_gt_box_of_points.append(sa_targets_dict["gt_box_of_points"])
sa_box_idxs_labels.append(sa_targets_dict["box_idxs_labels"])
targets_dict["sa_ins_labels"] = sa_ins_labels
targets_dict["sa_gt_box_of_fg_points"] = sa_gt_box_of_fg_points
targets_dict["sa_xyz_coords"] = sa_xyz_coords
targets_dict["sa_gt_box_of_points"] = sa_gt_box_of_points
targets_dict["sa_box_idxs_labels"] = sa_box_idxs_labels
extend_gt_boxes = enlarge_box3d(
gt_boxes.reshape([-1, gt_boxes.shape[-1]]),
extra_width=self.target_config.get("extra_width"),
).reshape([batch_size, -1, gt_boxes.shape[-1]])
points = input_dict["centers_origin"].detach()
center_targets_dict = self.assign_stack_targets_IASSD(
points=points,
gt_boxes=gt_boxes,
extend_gt_boxes=extend_gt_boxes,
set_ignore_flag=True,
use_ball_constraint=False,
ret_box_labels=True,
use_ex_gt_assign=True,
fg_pc_ignore=False,
)
targets_dict["center_origin_gt_box_of_fg_points"] = center_targets_dict[
"gt_box_of_fg_points"]
targets_dict["center_origin_cls_labels"] = center_targets_dict[
"point_cls_labels"]
targets_dict["center_origin_box_idxs_of_pts"] = center_targets_dict[
"box_idxs_labels"]
targets_dict["gt_box_of_center_origin"] = center_targets_dict[
"gt_box_of_points"]
return targets_dict
def generate_predicted_boxes(self, points, point_cls_preds,
point_box_preds):
"""
Args:
points: (N, 3)
point_cls_preds: (N, num_class)
point_box_preds: (N, box_code_size)
Returns:
point_cls_preds: (N, num_class)
point_box_preds: (N, box_code_size)
"""
pred_classes = point_cls_preds.argmax(axis=-1)
point_box_preds = self.box_coder.decode_paddle(point_box_preds, points,
pred_classes + 1)
return point_cls_preds, point_box_preds
def forward(self, batch_dict):
center_features = batch_dict[
"centers_features"] # (total_centers, C) total_centers = bs * npoints
center_coords = batch_dict["centers"] # (total_centers, 4)
center_cls_preds = self.cls_center_layers(
center_features) # (total_centers, num_class)
center_box_preds = self.box_center_layers(
center_features) # (total_centers, box_code_size)
ret_dict = {
"center_cls_preds": center_cls_preds,
"center_box_preds": center_box_preds,
"ctr_offsets": batch_dict["ctr_offsets"],
"centers": batch_dict["centers"],
"centers_origin": batch_dict["centers_origin"],
"sa_ins_preds": batch_dict["sa_ins_preds"],
}
if self.training:
targets_dict = self.assign_targets(batch_dict)
ret_dict.update(targets_dict)
if (not self.training or self.loss_config["corner_loss_regularization"]
or self.loss_config["centerness_regularization"]):
point_cls_preds, point_box_preds = self.generate_predicted_boxes(
points=center_coords[:, 1:4],
point_cls_preds=center_cls_preds,
point_box_preds=center_box_preds,
)
batch_dict["batch_cls_preds"] = point_cls_preds
batch_dict["batch_box_preds"] = point_box_preds
batch_dict["batch_index"] = center_coords[:, 0]
batch_dict["cls_preds_normalized"] = False
ret_dict["point_box_preds"] = point_box_preds
self.forward_ret_dict = ret_dict
return batch_dict
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/iassd/iassd_backbone.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code is based on https://github.com/yifanzhang713/IA-SSD/blob/main/pcdet/models/backbones_3d/IASSD_backbone.py
import numpy as np
import paddle
import paddle.nn as nn
from paddle3d.apis import manager
from .iassd_modules import SAModuleMSG_WithSampling, Vote_layer
__all__ = ["IASSD_Backbone"]
@manager.BACKBONES.add_component
class IASSD_Backbone(nn.Layer):
"""Backbone of IA-SSD
Args:
npoint_list (List[int]): num of sampled points in each layer.
sample_method_list (List[str]): sample method in each layer.
radius_list (List[List[float]]): radius params in multi-scale SA layer.
nsample_list (List[List[int]]): num of sampled points in multi-scale SA layer.
mlps (List[List[int]]): hidden dim of mlps in SA layer.
layer_types (List[str]): type of layer, SA or Vote layer in IA-SSD.
dilated_group (List[bool]): not implemented, set to False in default.
aggregation_mlps (List[List[int]]): hidden dim of aggregation mlps, used to aggregate the outputs of multi-scale SA layer.
confidence_mlps (List[List[int]]): hidden dim of confidence mlps, used to predict classes of each point.
layer_input (List[int]): index of layer input, determine which layer's outputs feeded in current layer.
ctr_index (List[int]): index of centroid, determine which layer's outpus used in centroid prediction.
max_translate_range (List[float]): limit the max range of predicted offset in Vote layer.
input_channle (int): input pointcloud feature dim.
num_classes (int): number of classes.
"""
def __init__(
self,
npoint_list,
sample_method_list,
radius_list,
nsample_list,
mlps,
layer_types,
dilated_group,
aggregation_mlps,
confidence_mlps,
layer_input,
ctr_index,
max_translate_range,
input_channel,
num_classes,
):
super().__init__()
self.npoint_list = npoint_list
self.sample_method_list = sample_method_list
self.radius_list = radius_list
self.nsample_list = nsample_list
self.mlps = mlps
self.layer_types = layer_types
self.dilated_group = dilated_group
self.aggregation_mlps = aggregation_mlps
self.confidence_mlps = confidence_mlps
self.layer_input = layer_input
self.ctr_idx_list = ctr_index
self.max_translate_range = max_translate_range
self.export_model = False
channel_in = input_channel - 3
channel_out_list = [channel_in]
self.SA_modules = nn.LayerList()
for k in range(len(self.nsample_list)):
channel_in = channel_out_list[self.layer_input[k]]
if self.layer_types[k] == "SA_Layer":
mlps = self.mlps[k].copy()
channel_out = 0
for idx in range(len(mlps)):
mlps[idx] = [channel_in] + mlps[idx]
channel_out += mlps[idx][-1]
if self.aggregation_mlps and self.aggregation_mlps[k]:
aggregation_mlp = self.aggregation_mlps[k].copy()
if len(aggregation_mlp) == 0:
aggregation_mlp = None
else:
channel_out = aggregation_mlp[-1]
else:
aggregation_mlp = None
if self.confidence_mlps and self.confidence_mlps[k]:
confidence_mlp = self.confidence_mlps[k].copy()
if len(confidence_mlp) == 0:
confidence_mlp = None
else:
confidence_mlp = None
self.SA_modules.append(
SAModuleMSG_WithSampling(
npoint=self.npoint_list[k],
sample_range=-1,
sample_type=self.sample_method_list[k],
radii=self.radius_list[k],
nsamples=self.nsample_list[k],
mlps=mlps,
use_xyz=True,
dilated_group=self.dilated_group[k],
aggregation_mlp=aggregation_mlp,
confidence_mlp=confidence_mlp,
num_classes=num_classes,
))
elif self.layer_types[k] == "Vote_Layer":
self.SA_modules.append(
Vote_layer(
mlp_list=self.mlps[k],
pre_channel=channel_out_list[self.layer_input[k]],
max_translate_range=self.max_translate_range,
))
channel_out_list.append(channel_out)
self.num_point_features = channel_out
def forward(self, batch_dict):
"""
Args:
batch_dict: input dict of batched point data and box annos.
data: (num_points * B, 3 + C), input point cloud, C is feature dim.
batch_size: B.
num_points: number of points in single point cloud
Return:
batch_dict: add new fileds int to input batch_dict
"""
points = batch_dict["data"]
# for export only
if self.export_model:
batch_dict["batch_size"] = 1
points = self.stack_batch_idx_to_points(
points, num_points=16384) # 16384 for kitti
batch_size = batch_dict["batch_size"]
batch_idx, xyz, features = self.break_up_pc(points)
xyz = xyz.reshape([batch_size, -1, 3])
features = (features.reshape([
batch_size, -1, features.shape[-1]
]).transpose([0, 2, 1]) if features is not None else None)
encoder_xyz, encoder_features, sa_ins_preds = [xyz], [features], []
encoder_coords = [
paddle.concat([batch_idx.reshape([batch_size, -1, 1]), xyz],
axis=-1)
]
# yapf: disable
li_cls_pred = None
var_dict = dict()
for i in range(len(self.SA_modules)):
xyz_input = encoder_xyz[self.layer_input[i]]
feature_input = encoder_features[self.layer_input[i]]
if self.layer_types[i] == "SA_Layer":
ctr_xyz = (encoder_xyz[self.ctr_idx_list[i]] if self.ctr_idx_list[i] != -1 else None)
li_xyz, li_features, li_cls_pred = self.SA_modules[i](
xyz_input, feature_input, li_cls_pred, ctr_xyz=ctr_xyz)
elif self.layer_types[i] == "Vote_Layer":
li_xyz, li_features, xyz_select, ctr_offsets = self.SA_modules[
i](xyz_input, feature_input)
centers = li_xyz
centers_origin = xyz_select
var_dict["ctr_offsets"] = ctr_offsets
var_dict["centers"] = centers
var_dict["centers_origin"] = centers_origin
center_origin_batch_idx = batch_idx.reshape([batch_size, -1])[:, :centers_origin.shape[1]]
encoder_coords.append(
paddle.concat(
[
center_origin_batch_idx[..., None].astype("float32"),
centers_origin.reshape([batch_size, -1, 3])
],
axis=-1
))
encoder_xyz.append(li_xyz)
li_batch_idx = batch_idx.reshape([batch_size, -1])[:, :li_xyz.shape[1]]
encoder_coords.append(
paddle.concat(
[
li_batch_idx[..., None].astype("float32"),
li_xyz.reshape([batch_size, -1, 3])
],
axis=-1
))
encoder_features.append(li_features)
if li_cls_pred is not None:
li_cls_batch_idx = batch_idx.reshape([batch_size, -1])[:, :li_cls_pred.shape[1]]
sa_ins_preds.append(
paddle.concat(
[
li_cls_batch_idx[..., None].astype("float32"),
li_cls_pred.reshape([batch_size, -1, li_cls_pred.shape[-1]])
],
axis=-1
))
else:
sa_ins_preds.append([])
ctr_batch_idx = batch_idx.reshape([batch_size, -1])[:, :encoder_xyz[-1].shape[1]]
ctr_batch_idx = ctr_batch_idx.reshape([-1])
batch_dict["ctr_offsets"] = paddle.concat(
[
ctr_batch_idx[:, None].astype("float32"),
var_dict["ctr_offsets"].reshape([-1, 3])
],
axis=1
)
batch_dict["centers"] = paddle.concat(
[
ctr_batch_idx[:, None].astype("float32"),
var_dict["centers"].reshape([-1, 3])
],
axis=1
)
batch_dict["centers_origin"] = paddle.concat(
[
ctr_batch_idx[:, None].astype("float32"),
var_dict["centers_origin"].reshape([-1, 3])
],
axis=1
)
# yapf: enable
center_features = (encoder_features[-1].transpose([0, 2, 1]).reshape(
[-1, encoder_features[-1].shape[1]]))
batch_dict["centers_features"] = center_features
batch_dict["ctr_batch_idx"] = ctr_batch_idx
batch_dict["encoder_xyz"] = encoder_xyz
batch_dict["encoder_coords"] = encoder_coords
batch_dict["sa_ins_preds"] = sa_ins_preds
batch_dict["encoder_features"] = encoder_features
return batch_dict
def break_up_pc(self, pc):
"""break up point cloud into xyz + point_feature
Args:
pc: (num_points * B, C)
Return:
batch_idx: (num_points * B, 1), batch index of input data
xyz: (num_points * B, 3), coordinates of points
features: (num_points * B, C), features of points
"""
batch_idx = pc[:, 0] # (B*N, 1)
xyz = pc[:, 1:4] # (B*N, 3)
features = pc[:, 4:] if pc.shape[
-1] > 4 else None # (B*N, C) C=1 for intensity
return batch_idx, xyz, features
def stack_batch_idx_to_points(self, points, num_points=16384):
batch_idx = paddle.zeros([num_points, 1], dtype='float32')
points = paddle.concat([batch_idx, points], axis=-1)
return points
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/iassd/iassd_loss.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.models.common import boxes_to_corners_3d
class WeightedClassificationLoss(nn.Layer):
def __init__(self):
super(WeightedClassificationLoss, self).__init__()
@staticmethod
def sigmoid_cross_entropy_with_logits(input, target):
""" Paddle Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = paddle.clip(input, min=0) - input * target + \
paddle.log1p(paddle.exp(-paddle.abs(input)))
return loss
def forward(self, input, target, weights=None, reduction='none'):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
if weights is not None:
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == bce_loss.shape.__len__()
loss = weights * bce_loss
else:
loss = bce_loss
if reduction == 'none':
return loss
elif reduction == 'sum':
loss = loss.sum(axis=-1)
elif reduction == 'mean':
loss = loss.mean(axis=-1)
return loss
class WeightedSmoothL1Loss(nn.Layer):
"""
Please refer to:
<https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py>
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta=1.0 / 9.0, code_weights=None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
self.code_weights = code_weights
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = paddle.abs(diff)
else:
n_diff = paddle.abs(diff)
loss = paddle.where(n_diff < beta, 0.5 * n_diff**2 / beta,
n_diff - 0.5 * beta)
return loss
def forward(self, input, target, weights):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = paddle.where(paddle.isnan(target), input,
target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.reshape([1, 1, -1])
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[
1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
def get_corner_loss_lidar(pred_bbox3d, gt_bbox3d):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = paddle.minimum(
paddle.linalg.norm(pred_box_corners - gt_box_corners, axis=2),
paddle.linalg.norm(pred_box_corners - gt_box_corners_flip, axis=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(axis=1)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/iassd/iassd_modules.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code is based on https://github.com/yifanzhang713/IA-SSD/blob/main/pcdet/ops/pointnet2/pointnet2_batch/pointnet2_modules.py
import os
from typing import List
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.ops import pointnet2_ops
__all__ = ["SAModuleMSG_WithSampling", "Vote_layer"]
class QueryAndGroup(nn.Layer):
def __init__(self, radius: float, nsample: int, use_xyz: bool = True):
super().__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: paddle.Tensor, new_xyz: paddle.Tensor,
features: paddle.Tensor):
"""
xyz: (B, N, 3)
new_xyz: (B, npoint, 3)
features: (B, C, N)
"""
idx = pointnet2_ops.ball_query_batch(
new_xyz, xyz, self.radius, self.nsample) # (B, npoints, nsample)
xyz_trans = xyz.transpose([0, 2, 1]) # (B, 3, N)
grouped_xyz = pointnet2_ops.grouping_operation_batch(
xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose([0, 2, 1]).unsqueeze(-1)
if features is not None:
grouped_features = pointnet2_ops.grouping_operation_batch(
features, idx)
if self.use_xyz:
new_features = paddle.concat(
[grouped_xyz, grouped_features],
axis=1) # (B, 3+C, npoint, nsmaple)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class SAModuleMSG_WithSampling(nn.Layer):
def __init__(self,
*,
npoint: int,
sample_range: int,
sample_type: str,
radii: List[float],
nsamples: List[int],
mlps: List[List[int]],
use_xyz: bool = True,
dilated_group=False,
pool_method='max_pool',
aggregation_mlp: List[int],
confidence_mlp: List[int],
num_classes: int):
"""
"""
super().__init__()
assert len(radii) == len(nsamples)
self.npoint = npoint
self.sample_type = sample_type
self.sample_range = sample_range
self.dilated_group = dilated_group
self.groupers = nn.LayerList()
self.mlps = nn.LayerList()
out_channels = 0
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
if self.dilated_group:
raise NotImplementedError
else:
self.groupers.append(
QueryAndGroup(radius, nsample, use_xyz=use_xyz))
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
shared_mlps = []
for k in range(len(mlp_spec) - 1):
shared_mlps.extend([
nn.Conv2D(
mlp_spec[k],
mlp_spec[k + 1],
kernel_size=1,
bias_attr=False),
nn.BatchNorm2D(mlp_spec[k + 1]),
nn.ReLU()
])
self.mlps.append(nn.Sequential(*shared_mlps))
out_channels += mlp_spec[-1]
self.pool_method = pool_method
if (aggregation_mlp is not None) and (len(aggregation_mlp) !=
0) and (len(self.mlps) > 0):
shared_mlp = []
for k in range(len(aggregation_mlp)):
shared_mlp.extend([
nn.Conv1D(
out_channels,
aggregation_mlp[k],
kernel_size=1,
bias_attr=False),
nn.BatchNorm1D(aggregation_mlp[k]),
nn.ReLU()
])
out_channels = aggregation_mlp[k]
self.aggregation_layer = nn.Sequential(*shared_mlp)
else:
self.aggregation_layer = None
if (confidence_mlp is not None) and (len(confidence_mlp) != 0):
shared_mlp = []
for k in range(len(confidence_mlp)):
shared_mlp.extend([
nn.Conv1D(
out_channels,
confidence_mlp[k],
kernel_size=1,
bias_attr=False),
nn.BatchNorm1D(confidence_mlp[k]),
nn.ReLU()
])
out_channels = confidence_mlp[k]
shared_mlp.append(
nn.Conv1D(
out_channels, num_classes, kernel_size=1, bias_attr=True))
self.confidence_layer = nn.Sequential(*shared_mlp)
else:
self.confidence_layer = None
def forward(self,
xyz: paddle.Tensor,
features: paddle.Tensor = None,
cls_features: paddle.Tensor = None,
new_xyz=None,
ctr_xyz=None):
"""
xyz: (B, N, 3)
features: (B, C, N)
cls_features: (B, npoint, num_class) or None
new_xyz: (B, npoint, 3) or None
ctr_xyz: (B, npoint, 3) or None
"""
new_features_list = []
xyz_flipped = xyz.transpose([0, 2, 1])
# Sample operation
if ctr_xyz is None:
# No downsample
if xyz.shape[1] <= self.npoint:
sample_idx = paddle.arange(
xyz.shape[1], dtype='int32') * paddle.ones(
xyz.shape[:2], dtype='int32')
# ctr downsample
elif 'ctr' in self.sample_type:
cls_features_max = cls_features.max(axis=-1)
score_pred = F.sigmoid(cls_features_max)
sample_value, sample_idx = paddle.topk(
score_pred, self.npoint, axis=-1)
sample_idx = sample_idx.astype('int32') # (B, npoint)
# D-FPS downsample
elif 'D-FPS' in self.sample_type:
sample_idx = pointnet2_ops.farthest_point_sample(
xyz, self.npoint) # (B, npoint)
new_xyz = pointnet2_ops.gather_operation(
xyz_flipped, sample_idx).transpose([0, 2, 1]) # (B, npoint, 3)
else:
new_xyz = ctr_xyz
# MSG group operation (ball query and group)
if len(self.groupers) > 0:
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features) # (B, C, npoint, nsample)
new_features = self.mlps[i](
new_features) # (B, mlp[-1], npoint, nsample)
if self.pool_method == 'max_pool':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.shape[-1]])
elif self.pool_method == 'avg_pool':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.shape[-1]])
else:
raise NotImplementedError
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
new_features = paddle.concat(
new_features_list, axis=1) # (B, mlp_cat, npoint)
if self.aggregation_layer is not None:
new_features = self.aggregation_layer(
new_features) # (B, mlp_agg, npoint)
else:
new_features = pointnet2_ops.gather_operation(
features, sample_idx) # (B, C, npoint)
# Confidence layer
if self.confidence_layer is not None:
cls_features = self.confidence_layer(new_features).transpose(
[0, 2, 1]) # (B, npoint, num_class)
else:
cls_features = None
return new_xyz, new_features, cls_features
class Vote_layer(nn.Layer):
def __init__(self, mlp_list, pre_channel, max_translate_range):
super().__init__()
if len(mlp_list) > 0:
shared_mlps = []
for i in range(len(mlp_list)):
shared_mlps.extend([
nn.Conv1D(
pre_channel,
mlp_list[i],
kernel_size=1,
bias_attr=False),
nn.BatchNorm1D(mlp_list[i]),
nn.ReLU()
])
pre_channel = mlp_list[i]
self.mlps = nn.Sequential(*shared_mlps)
else:
self.mlps = None
self.ctr_reg = nn.Conv1D(pre_channel, 3, kernel_size=1)
self.max_offset_limit = paddle.to_tensor(
max_translate_range,
dtype='float32') if max_translate_range is not None else None
def forward(self, xyz, features):
if self.mlps is not None:
new_features = self.mlps(features) # [2, 256, 256] -> [2, 128, 256]
else:
new_features = features
ctr_offsets = self.ctr_reg(new_features) # [2, 128, 256] -> [2, 3, 256]
ctr_offsets = ctr_offsets.transpose([0, 2, 1]) # [2, 256, 3]
feat_offsets = ctr_offsets[..., 3:]
new_features = feat_offsets
ctr_offsets = ctr_offsets[..., :3]
if self.max_offset_limit is not None:
max_offset_limit = self.max_offset_limit.expand(xyz.shape)
limited_ctr_offsets = paddle.where(ctr_offsets > max_offset_limit,
max_offset_limit, ctr_offsets)
min_offset_limit = -1 * max_offset_limit
limited_ctr_offsets = paddle.where(
limited_ctr_offsets < min_offset_limit, min_offset_limit,
limited_ctr_offsets)
vote_xyz = xyz + limited_ctr_offsets
else:
vote_xyz = xyz + ctr_offsets
return vote_xyz, new_features, xyz, ctr_offsets
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/caddn/__init__.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .bev import *
from .caddn import *
from .f2v import *
from .ffe import *
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/caddn/caddn.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.static import InputSpec
from paddle3d.apis import manager
from paddle3d.models.common import class_agnostic_nms
from paddle3d.models.base import BaseMonoModel
from paddle3d.models.layers import ConvBNReLU
from paddle3d.utils import checkpoint
from paddle3d.utils.logger import logger
from .bev import BEV
from .f2v import FrustumToVoxel
from .ffe import FFE
@manager.MODELS.add_component
class CADDN(BaseMonoModel):
"""
"""
def __init__(self,
backbone_3d,
bev_cfg,
dense_head,
class_head,
ffe_cfg,
f2v_cfg,
disc_cfg,
map_to_bev_cfg,
post_process_cfg,
pretrained=None,
box_with_velocity: bool = False):
super().__init__(
box_with_velocity=box_with_velocity,
need_camera_to_image=True,
need_lidar_to_camera=True)
self.backbone_3d = backbone_3d
self.class_head = class_head
self.ffe = FFE(ffe_cfg, disc_cfg=disc_cfg)
self.map_to_bev = ConvBNReLU(**map_to_bev_cfg)
self.backbone_2d = BEV(**bev_cfg)
self.dense_head = dense_head
self.f2v = FrustumToVoxel(**f2v_cfg, disc_cfg=disc_cfg)
self.post_process_cfg = post_process_cfg
self.pretrained = pretrained
self.init_weight()
def train_forward(self, data):
images = data["images"]
if not self.training:
b, c, h, w = paddle.shape(images)
data["batch_size"] = b
# ffe
image_features = self.backbone_3d(images)
depth_logits = self.class_head(image_features, data["image_shape"])
data = self.ffe(image_features[0], depth_logits, data)
# frustum_to_voxel
data = self.f2v(data)
# map_to_bev
voxel_features = data["voxel_features"]
bev_features = voxel_features.flatten(
start_axis=1, stop_axis=2) # (B, C, Z, Y, X) -> (B, C*Z, Y, X)
b, c, h, w = paddle.shape(bev_features)
bev_features = bev_features.reshape(
[b, self.map_to_bev._conv._in_channels, h, w])
bev_features = self.map_to_bev(
bev_features) # (B, C*Z, Y, X) -> (B, C, Y, X)
data["spatial_features"] = bev_features
# backbone_2d
data = self.backbone_2d(data)
predictions = self.dense_head(data)
loss = self.get_loss(predictions)
return loss
def test_forward(self, data):
images = data["images"]
b, c, h, w = paddle.shape(images)
data["batch_size"] = b
image_features = self.backbone_3d(images)
depth_logits = self.class_head(image_features, data["image_shape"])
data = self.ffe(image_features[0], depth_logits, data)
# frustum_to_voxel
data = self.f2v(data)
# map_to_bev
voxel_features = data["voxel_features"]
bev_features = voxel_features.flatten(
start_axis=1, stop_axis=2) # (B, C, Z, Y, X) -> (B, C*Z, Y, X)
b, c, h, w = paddle.shape(bev_features)
bev_features = bev_features.reshape(
[b, self.map_to_bev._conv._in_channels, h, w])
bev_features = self.map_to_bev(
bev_features) # (B, C*Z, Y, X) -> (B, C, Y, X)
data["spatial_features"] = bev_features
# backbone_2d
data = self.backbone_2d(data)
predictions = self.dense_head(data)
return self.post_process(predictions)
def export_forward(self, data):
images = data["images"]
b, c, h, w = paddle.shape(images)
data["batch_size"] = b
data["image_shape"] = paddle.concat([h, w]).unsqueeze(0)
image_features = self.backbone_3d(images)
depth_logits = self.class_head(image_features, data["image_shape"])
data = self.ffe(image_features[0], depth_logits, data)
# frustum_to_voxel
data = self.f2v(data)
# map_to_bev
voxel_features = data["voxel_features"]
bev_features = voxel_features.flatten(
start_axis=1, stop_axis=2) # (B, C, Z, Y, X) -> (B, C*Z, Y, X)
b, c, h, w = paddle.shape(bev_features)
bev_features = bev_features.reshape(
[b, self.map_to_bev._conv._in_channels, h, w])
bev_features = self.map_to_bev(
bev_features) # (B, C*Z, Y, X) -> (B, C, Y, X)
data["spatial_features"] = bev_features
# backbone_2d
data = self.backbone_2d(data)
predictions = self.dense_head(data)
return self.post_process(predictions)
def get_loss(self, predictions):
disp_dict = {}
loss_rpn, tb_dict_rpn = self.dense_head.get_loss()
loss_depth, tb_dict_depth = self.ffe.get_loss()
tb_dict = {
'loss_rpn': loss_rpn.item(),
'loss_depth': loss_depth.item(),
**tb_dict_rpn,
**tb_dict_depth
}
loss = loss_rpn + loss_depth
losses = {"loss": loss, "tb_dict": tb_dict, "disp_dict": disp_dict}
return losses
def post_process(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...]
multihead_label_mapping: [(num_class1), (num_class2), ...]
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
has_class_labels: True/False
roi_labels: (B, num_rois) 1 .. num_classes
batch_pred_labels: (B, num_boxes, 1)
Returns:
"""
if getattr(self, "in_export_mode", False):
batch_size = 1
else:
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_box_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_box_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
src_box_preds = box_preds
if not isinstance(batch_dict['batch_cls_preds'], list):
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
src_cls_preds = cls_preds
if not batch_dict['cls_preds_normalized']:
cls_preds = F.sigmoid(cls_preds)
else:
cls_preds = [
x[batch_mask] for x in batch_dict['batch_cls_preds']
]
src_cls_preds = cls_preds
if not batch_dict['cls_preds_normalized']:
cls_preds = [F.sigmoid(x) for x in cls_preds]
label_preds = paddle.argmax(cls_preds, axis=-1) + 1.0
cls_preds = paddle.max(cls_preds, axis=-1)
selected_score, selected_label, selected_box = class_agnostic_nms(
box_scores=cls_preds,
box_preds=box_preds,
label_preds=label_preds,
nms_config=self.post_process_cfg['nms_config'],
score_thresh=self.post_process_cfg['score_thresh'])
record_dict = paddle.concat([
selected_score.unsqueeze(1), selected_box,
selected_label.unsqueeze(1)
],
axis=1)
record_dict = {
'pred_boxes': selected_box,
'pred_scores': selected_score,
'pred_labels': selected_label
}
pred_dicts.append(record_dict)
return {'preds': pred_dicts}
def init_weight(self):
if self.pretrained:
checkpoint.load_pretrained_model(self, self.pretrained)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/caddn/bev.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.nn as nn
from paddle3d.models.layers import ConvBNReLU, reset_parameters
class BEV(nn.Layer):
def __init__(self, layer_nums, layer_strides, num_filters, upsample_strides,
num_upsample_filters, input_channels):
super().__init__()
num_levels = len(layer_nums)
c_in_list = [input_channels, *num_filters[:-1]]
self.blocks = nn.Sequential()
self.deblocks = nn.Sequential()
for idx in range(num_levels):
cur_layers = [
nn.Pad2D(1),
nn.Conv2D(
c_in_list[idx],
num_filters[idx],
kernel_size=3,
stride=layer_strides[idx],
padding=0,
bias_attr=False),
nn.BatchNorm2D(num_filters[idx], epsilon=1e-3, momentum=0.99),
nn.ReLU()
]
for k in range(layer_nums[idx]):
cur_layers.extend([
nn.Conv2D(
num_filters[idx],
num_filters[idx],
kernel_size=3,
padding=1,
bias_attr=False),
nn.BatchNorm2D(
num_filters[idx], epsilon=1e-3, momentum=0.99),
nn.ReLU()
])
self.blocks.add_sublayer("level_" + str(idx),
nn.Sequential(*cur_layers))
if len(upsample_strides) > 0:
stride = upsample_strides[idx]
if stride >= 1:
self.deblocks.add_sublayer(
"level_" + str(idx),
nn.Sequential(
nn.Conv2DTranspose(
num_filters[idx],
num_upsample_filters[idx],
upsample_strides[idx],
stride=upsample_strides[idx],
bias_attr=False),
nn.BatchNorm2D(
num_upsample_filters[idx],
epsilon=1e-3,
momentum=0.99), nn.ReLU()))
else:
stride = np.round(1 / stride).astype(np.int)
self.deblocks.add_sublayer(
"level_" + str(idx),
nn.Sequential(
nn.Conv2D(
num_filters[idx],
num_upsample_filters[idx],
stride,
stride=stride,
bias_attr=False),
nn.BatchNorm2D(
num_upsample_filters[idx],
epsilon=1e-3,
momentum=0.99), nn.ReLU()))
c_in = sum(num_upsample_filters)
if len(upsample_strides) > num_levels:
self.deblocks.add_sublayer(
"upsample",
nn.Sequential(
nn.Conv2DTranspose(
c_in,
c_in,
upsample_strides[-1],
stride=upsample_strides[-1],
bias_attr=False),
nn.BatchNorm2D(c_in, epsilon=1e-3, momentum=0.99),
nn.ReLU(),
))
self.num_bev_features = c_in
self.init_weight()
def forward(self, data_dict):
"""
Args:
data_dict:
spatial_features
Returns:
"""
spatial_features = data_dict['spatial_features']
ups = []
x = spatial_features
for i in range(len(self.blocks)):
x = self.blocks["level_" + str(i)](x)
if len(self.deblocks) > 0:
ups.append(self.deblocks["level_" + str(i)](x))
else:
ups.append(x)
if len(ups) > 1:
x = paddle.concat(ups, axis=1)
elif len(ups) == 1:
x = ups[0]
if len(self.deblocks) > len(self.blocks):
x = self.deblocks["upsample"](x)
data_dict['spatial_features_2d'] = x
return data_dict
def init_weight(self):
for sublayer in self.sublayers():
if isinstance(sublayer, nn.Conv2D):
reset_parameters(sublayer)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/caddn
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/caddn/ffe/__init__.py
|
from .ddn_loss import *
from .ffe import FFE
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/caddn
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/caddn/ffe/ffe.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.models.layers import ConvBNReLU, reset_parameters
from .ddn_loss.ddn_loss import DDNLoss
class FFE(nn.Layer):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/models/backbones_3d/ffe/depth_ffe.py#L9
"""
def __init__(self, ffe_cfg, disc_cfg):
"""
Initialize depth classification network
Args:
model_cfg [EasyDict]: Depth classification network config
downsample_factor [int]: Depth map downsample factor
"""
super().__init__()
self.disc_cfg = disc_cfg
self.downsample_factor = ffe_cfg['downsample_factor']
self.channel_reduce = ConvBNReLU(**ffe_cfg['channel_reduce_cfg'])
self.ddn_loss = DDNLoss(
disc_cfg=self.disc_cfg,
downsample_factor=self.downsample_factor,
**ffe_cfg['ddn_loss'])
self.forward_ret_dict = {}
self.init_weight()
def forward(self, image_features, depth_logits, batch_dict):
"""
Predicts depths and creates image depth feature volume using depth classification scores
Args:
batch_dict:
images [paddle.Tensor(N, 3, H_in, W_in)]: Input images
Returns:
batch_dict:
frustum_features [paddle.Tensor(N, C, D, H_out, W_out)]: Image depth features
"""
# Pixel-wise depth classification
b, c, h, w = paddle.shape(image_features)
depth_logits = F.interpolate(
depth_logits, size=[h, w], mode='bilinear', align_corners=False)
image_features = self.channel_reduce(image_features)
frustum_features = self.create_frustum_features(
image_features=image_features, depth_logits=depth_logits)
batch_dict["frustum_features"] = frustum_features
if self.training:
self.forward_ret_dict["depth_maps"] = batch_dict["depth_maps"]
self.forward_ret_dict["gt_boxes2d"] = batch_dict["gt_boxes2d"]
self.forward_ret_dict["depth_logits"] = depth_logits
return batch_dict
def create_frustum_features(self, image_features, depth_logits):
"""
Create image depth feature volume by multiplying image features with depth classification scores
Args:
image_features [torch.Tensor(N, C, H, W)]: Image features
depth_logits [torch.Tensor(N, D, H, W)]: Depth classification logits
Returns:
frustum_features [torch.Tensor(N, C, D, H, W)]: Image features
"""
channel_dim = 1
depth_dim = 2
# Resize to match dimensions
image_features = image_features.unsqueeze(depth_dim)
depth_logits = depth_logits.unsqueeze(channel_dim)
# Apply softmax along depth axis and remove last depth category (> Max Range)
depth_probs = F.softmax(depth_logits, axis=depth_dim)
depth_probs = depth_probs[:, :, :-1]
# Multiply to form image depth feature volume
frustum_features = depth_probs * image_features
return frustum_features
def get_loss(self):
loss, tb_dict = self.ddn_loss(**self.forward_ret_dict)
return loss, tb_dict
def init_weight(self):
for sublayer in self.sublayers():
if isinstance(sublayer, nn.Conv2D):
reset_parameters(sublayer)
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/caddn/ffe
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/caddn/ffe/ddn_loss/__init__.py
|
from .ddn_loss import DDNLoss
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/caddn/ffe
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/caddn/ffe/ddn_loss/ddn_loss.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
from paddle3d.models.losses import MultiFocalLoss
from paddle3d.utils.depth import bin_depths
from .balancer import Balancer
class DDNLoss(nn.Layer):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/models/backbones_3d/ffe/ddn_loss/ddn_loss.py#L9
"""
def __init__(self, weight, alpha, beta, disc_cfg, fg_weight, bg_weight,
downsample_factor):
"""
Initializes DDNLoss module
Args:
weight [float]: Loss function weight
alpha [float]: Alpha value for Focal Loss
beta [float]: Beta value for Focal Loss
disc_cfg [dict]: Depth discretiziation configuration
fg_weight [float]: Foreground loss weight
bg_weight [float]: Background loss weight
downsample_factor [int]: Depth map downsample factor
"""
super().__init__()
self.disc_cfg = disc_cfg
self.balancer = Balancer(
downsample_factor=downsample_factor,
fg_weight=fg_weight,
bg_weight=bg_weight)
# Set loss function
self.alpha = alpha
self.beta = beta
self.loss_func = MultiFocalLoss(alpha=self.alpha, beta=self.beta)
self.weight = weight
def forward(self, depth_logits, depth_maps, gt_boxes2d):
"""
Gets DDN loss
Args:
depth_logits: paddle.Tensor(B, D+1, H, W)]: Predicted depth logits
depth_maps: paddle.Tensor(B, H, W)]: Depth map [m]
gt_boxes2d [paddle.Tensor (B, N, 4)]: 2D box labels for foreground/background balancing
Returns:
loss [paddle.Tensor(1)]: Depth classification network loss
tb_dict [dict[float]]: All losses to log in tensorboard
"""
tb_dict = {}
# Bin depth map to create target
depth_target = bin_depths(depth_maps, **self.disc_cfg, target=True)
# Compute loss
loss = self.loss_func(depth_logits, depth_target)
# Compute foreground/background balancing
loss, tb_dict = self.balancer(loss=loss, gt_boxes2d=gt_boxes2d)
# Final loss
loss *= self.weight
tb_dict.update({"ddn_loss": loss.item()})
return loss, tb_dict
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/caddn/ffe
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/caddn/ffe/ddn_loss/balancer.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
class Balancer(nn.Layer):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/models/backbones_3d/ffe/ddn_loss/balancer.py#L7
"""
def __init__(self, fg_weight, bg_weight, downsample_factor=1):
"""
Initialize fixed foreground/background loss balancer
Args:
fg_weight [float]: Foreground loss weight
bg_weight [float]: Background loss weight
downsample_factor [int]: Depth map downsample factor
"""
super().__init__()
self.fg_weight = fg_weight
self.bg_weight = bg_weight
self.downsample_factor = downsample_factor
def compute_fg_mask(self, gt_boxes2d, shape, downsample_factor=1):
"""
Compute foreground mask for images
Args:
gt_boxes2d [paddle.Tensor(B, N, 4)]: 2D box labels
shape [paddle.Size or tuple]: Foreground mask desired shape
downsample_factor [int]: Downsample factor for image
device [paddle.device]: Foreground mask desired device
Returns:
fg_mask [paddle.Tensor(shape)]: Foreground mask
"""
fg_mask = paddle.zeros(shape, dtype=paddle.bool)
# Set box corners
gt_boxes2d /= downsample_factor
gt_boxes2d[:, :, :2] = paddle.floor(gt_boxes2d[:, :, :2])
gt_boxes2d[:, :, 2:] = paddle.ceil(gt_boxes2d[:, :, 2:])
gt_boxes2d = gt_boxes2d.cast("int64")
# Set all values within each box to True
B, N = gt_boxes2d.shape[:2]
for b in range(B):
for n in range(N):
u1, v1, u2, v2 = gt_boxes2d[b, n]
fg_mask[b, v1:v2, u1:u2] = True
return fg_mask
def forward(self, loss, gt_boxes2d):
"""
Forward pass
Args:
loss [paddle.Tensor(B, H, W)]: Pixel-wise loss
gt_boxes2d [paddle.Tensor (B, N, 4)]: 2D box labels for foreground/background balancing
Returns:
loss [paddle.Tensor(1)]: Total loss after foreground/background balancing
tb_dict [dict[float]]: All losses to log in tensorboard
"""
# Compute masks
fg_mask = self.compute_fg_mask(
gt_boxes2d=gt_boxes2d,
shape=loss.shape,
downsample_factor=self.downsample_factor)
bg_mask = ~fg_mask
fg_mask = fg_mask.cast("int64")
bg_mask = bg_mask.cast("int64")
# Compute balancing weights
weights = self.fg_weight * fg_mask + self.bg_weight * bg_mask
num_pixels = fg_mask.sum() + bg_mask.sum()
# Compute losses
loss *= weights
fg_loss = loss[fg_mask.cast("bool")].sum() / num_pixels
bg_loss = loss[bg_mask.cast("bool")].sum() / num_pixels
# Get total loss
loss = fg_loss + bg_loss
tb_dict = {
"balancer_loss": loss.item(),
"fg_loss": fg_loss.item(),
"bg_loss": bg_loss.item()
}
return loss, tb_dict
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/caddn
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/caddn/f2v/__init__.py
|
from .frustum_to_voxel import FrustumToVoxel
| 0
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/caddn
|
apollo_public_repos/apollo-model-centerpoint/paddle3d/models/detection/caddn/f2v/sampler.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle3d.ops import grid_sample_3d
class Sampler(nn.Layer):
"""
This code is based on https://github.com/TRAILab/CaDDN/blob/5a96b37f16b3c29dd2509507b1cdfdff5d53c558/pcdet/models/backbones_3d/f2v/sampler.py#L6
"""
def __init__(self, mode="bilinear", padding_mode="zeros"):
"""
Initializes module
Args:
mode [string]: Sampling mode [bilinear/nearest]
padding_mode [string]: Padding mode for outside grid values [zeros/border/reflection]
"""
super().__init__()
self.mode = mode
self.padding_mode = padding_mode
self.func = grid_sample_3d.grid_sample_3d
def forward(self, input_features, grid):
"""
Samples input using sampling grid
Args:
input_features [Tensor(N, C, H_in, W_in)]: Input feature maps
grid [Tensor(N, H_out, W,_out 2)]: Sampling grids for image features
Returns
output_features [Tensor(N, C, H_out, W_out)]: Output feature maps
"""
# Sample from grid
output = self.func(
x=input_features,
grid=grid,
mode='bilinear',
padding_mode='zeros',
align_corners=False)
return output
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.