Alfred Liu commited on
Commit ·
6ce1271
1
Parent(s): 7d96e83
Add new setting: r50_nuimg_704x256
Browse files
README.md
CHANGED
|
@@ -4,11 +4,14 @@ This is the official PyTorch implementation for paper [SparseBEV: High-Performan
|
|
| 4 |
|
| 5 |
## Model Zoo
|
| 6 |
|
| 7 |
-
|
|
| 8 |
-
|----------|----------|------------
|
| 9 |
-
|
|
| 10 |
-
|
| 11 |
-
|
|
|
|
|
|
|
|
|
|
| 12 |
* The noise is around 0.3 NDS.
|
| 13 |
|
| 14 |
## Environment
|
|
@@ -86,14 +89,14 @@ These `*.pkl` files can also be generated with our script: `gen_sweep_info.py`.
|
|
| 86 |
Train SparseBEV with 8 GPUs:
|
| 87 |
|
| 88 |
```
|
| 89 |
-
torchrun --nproc_per_node 8 train.py --config configs/
|
| 90 |
```
|
| 91 |
|
| 92 |
Train SparseBEV with 4 GPUs (i.e the last four GPUs):
|
| 93 |
|
| 94 |
```
|
| 95 |
export CUDA_VISIBLE_DEVICES=4,5,6,7
|
| 96 |
-
torchrun --nproc_per_node 4 train.py --config configs/
|
| 97 |
```
|
| 98 |
|
| 99 |
The batch size for each GPU will be scaled automatically. So there is no need to modify the `batch_size` in config files.
|
|
|
|
| 4 |
|
| 5 |
## Model Zoo
|
| 6 |
|
| 7 |
+
| Setting | Pretrain | Training Cost | NDS | FPS | Config | Weights |
|
| 8 |
+
|----------|----------|---------------|-----|-----|--------|---------|
|
| 9 |
+
| r50_nuimg_704x256 | [nuImages](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim/cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim_20201009_124951-40963960.pth) | 21h (8x2080Ti) | 55.6 | 15.8 | [config](configs/r50_nuimg_704x256.py) | [weights](https://drive.google.com/file/d/1ft34-pxLpHGo2Aw-jowEtCxyXcqszHNn/view) |
|
| 10 |
+
| r50_nuimg_704x256_400q_36ep | [nuImages](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim/cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim_20201009_124951-40963960.pth) | 28h (8x2080Ti) | 55.8 | 23.5 | [config](configs/r50_nuimg_704x256_400q_36ep.py) | [weights](https://drive.google.com/file/d/1C_Vn3iiSnSW1Dw1r0DkjJMwvHC5Y3zTN/view) |
|
| 11 |
+
|
| 12 |
+
* We use `r50_nuimg_704x256` for ablation studies and `r50_nuimg_704x256_400q_36ep` for comparison with others.
|
| 13 |
+
* We recommend using `r50_nuimg_704x256` to validate new ideas since it trains faster and the result is more stable.
|
| 14 |
+
* FPS is measured with AMD 5800X CPU and RTX 3090 GPU.
|
| 15 |
* The noise is around 0.3 NDS.
|
| 16 |
|
| 17 |
## Environment
|
|
|
|
| 89 |
Train SparseBEV with 8 GPUs:
|
| 90 |
|
| 91 |
```
|
| 92 |
+
torchrun --nproc_per_node 8 train.py --config configs/r50_nuimg_704x256.py
|
| 93 |
```
|
| 94 |
|
| 95 |
Train SparseBEV with 4 GPUs (i.e the last four GPUs):
|
| 96 |
|
| 97 |
```
|
| 98 |
export CUDA_VISIBLE_DEVICES=4,5,6,7
|
| 99 |
+
torchrun --nproc_per_node 4 train.py --config configs/r50_nuimg_704x256.py
|
| 100 |
```
|
| 101 |
|
| 102 |
The batch size for each GPU will be scaled automatically. So there is no need to modify the `batch_size` in config files.
|
configs/{r101_nuimg_1408x512_900q_24ep.py → r101_nuimg_1408x512.py}
RENAMED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
_base_ = ['./
|
| 2 |
|
| 3 |
# For nuScenes we usually do 10-class detection
|
| 4 |
class_names = [
|
|
@@ -27,9 +27,7 @@ img_neck = dict(
|
|
| 27 |
model = dict(
|
| 28 |
img_backbone=img_backbone,
|
| 29 |
img_neck=img_neck,
|
| 30 |
-
pts_bbox_head=dict(
|
| 31 |
-
num_query=900,
|
| 32 |
-
transformer=dict(num_levels=5)),
|
| 33 |
)
|
| 34 |
|
| 35 |
ida_aug_conf = {
|
|
@@ -91,6 +89,3 @@ optimizer = dict(
|
|
| 91 |
# load pretrained weights
|
| 92 |
load_from = 'pretrain/cascade_mask_rcnn_r101_fpn_1x_nuim_20201024_134804-45215b1e.pth'
|
| 93 |
revise_keys = [('backbone', 'img_backbone')]
|
| 94 |
-
|
| 95 |
-
total_epochs = 24
|
| 96 |
-
eval_config = dict(interval=total_epochs)
|
|
|
|
| 1 |
+
_base_ = ['./r50_nuimg_704x256.py']
|
| 2 |
|
| 3 |
# For nuScenes we usually do 10-class detection
|
| 4 |
class_names = [
|
|
|
|
| 27 |
model = dict(
|
| 28 |
img_backbone=img_backbone,
|
| 29 |
img_neck=img_neck,
|
| 30 |
+
pts_bbox_head=dict(transformer=dict(num_levels=5)),
|
|
|
|
|
|
|
| 31 |
)
|
| 32 |
|
| 33 |
ida_aug_conf = {
|
|
|
|
| 89 |
# load pretrained weights
|
| 90 |
load_from = 'pretrain/cascade_mask_rcnn_r101_fpn_1x_nuim_20201024_134804-45215b1e.pth'
|
| 91 |
revise_keys = [('backbone', 'img_backbone')]
|
|
|
|
|
|
|
|
|
configs/{r50_in1k_704x256_900q_36ep.py → r50_in1k_704x256.py}
RENAMED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
_base_ = ['./
|
| 2 |
|
| 3 |
img_backbone = dict(pretrained='torchvision://resnet50')
|
| 4 |
|
|
|
|
| 1 |
+
_base_ = ['./r50_nuimg_704x256.py']
|
| 2 |
|
| 3 |
img_backbone = dict(pretrained='torchvision://resnet50')
|
| 4 |
|
configs/r50_nuimg_704x256.py
ADDED
|
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_type = 'CustomNuScenesDataset'
|
| 2 |
+
dataset_root = 'data/nuscenes/'
|
| 3 |
+
|
| 4 |
+
input_modality = dict(
|
| 5 |
+
use_lidar=False,
|
| 6 |
+
use_camera=True,
|
| 7 |
+
use_radar=False,
|
| 8 |
+
use_map=False,
|
| 9 |
+
use_external=True
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
# For nuScenes we usually do 10-class detection
|
| 13 |
+
class_names = [
|
| 14 |
+
'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier',
|
| 15 |
+
'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
|
| 16 |
+
]
|
| 17 |
+
|
| 18 |
+
# If point cloud range is changed, the models should also change their point
|
| 19 |
+
# cloud range accordingly
|
| 20 |
+
point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
|
| 21 |
+
voxel_size = [0.2, 0.2, 8]
|
| 22 |
+
|
| 23 |
+
# arch config
|
| 24 |
+
embed_dims = 256
|
| 25 |
+
num_layers = 6
|
| 26 |
+
num_query = 900
|
| 27 |
+
num_frames = 8
|
| 28 |
+
num_levels = 4
|
| 29 |
+
num_points = 4
|
| 30 |
+
|
| 31 |
+
img_backbone = dict(
|
| 32 |
+
type='ResNet',
|
| 33 |
+
depth=50,
|
| 34 |
+
num_stages=4,
|
| 35 |
+
out_indices=(0, 1, 2, 3),
|
| 36 |
+
frozen_stages=1,
|
| 37 |
+
norm_cfg=dict(type='BN2d', requires_grad=True),
|
| 38 |
+
norm_eval=True,
|
| 39 |
+
style='pytorch',
|
| 40 |
+
with_cp=True)
|
| 41 |
+
img_neck = dict(
|
| 42 |
+
type='FPN',
|
| 43 |
+
in_channels=[256, 512, 1024, 2048],
|
| 44 |
+
out_channels=embed_dims,
|
| 45 |
+
num_outs=num_levels)
|
| 46 |
+
img_norm_cfg = dict(
|
| 47 |
+
mean=[123.675, 116.280, 103.530],
|
| 48 |
+
std=[58.395, 57.120, 57.375],
|
| 49 |
+
to_rgb=True)
|
| 50 |
+
|
| 51 |
+
model = dict(
|
| 52 |
+
type='SparseBEV',
|
| 53 |
+
data_aug=dict(
|
| 54 |
+
img_color_aug=True, # Move some augmentations to GPU
|
| 55 |
+
img_norm_cfg=img_norm_cfg,
|
| 56 |
+
img_pad_cfg=dict(size_divisor=32)),
|
| 57 |
+
stop_prev_grad=False,
|
| 58 |
+
img_backbone=img_backbone,
|
| 59 |
+
img_neck=img_neck,
|
| 60 |
+
pts_bbox_head=dict(
|
| 61 |
+
type='SparseBEVHead',
|
| 62 |
+
num_classes=10,
|
| 63 |
+
in_channels=embed_dims,
|
| 64 |
+
num_query=num_query,
|
| 65 |
+
query_denoising=True,
|
| 66 |
+
query_denoising_groups=10,
|
| 67 |
+
code_size=10,
|
| 68 |
+
code_weights=[2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
|
| 69 |
+
sync_cls_avg_factor=True,
|
| 70 |
+
transformer=dict(
|
| 71 |
+
type='SparseBEVTransformer',
|
| 72 |
+
embed_dims=embed_dims,
|
| 73 |
+
num_frames=num_frames,
|
| 74 |
+
num_points=num_points,
|
| 75 |
+
num_layers=num_layers,
|
| 76 |
+
num_levels=num_levels,
|
| 77 |
+
num_classes=10,
|
| 78 |
+
code_size=10,
|
| 79 |
+
pc_range=point_cloud_range),
|
| 80 |
+
bbox_coder=dict(
|
| 81 |
+
type='NMSFreeCoder',
|
| 82 |
+
post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0],
|
| 83 |
+
pc_range=point_cloud_range,
|
| 84 |
+
max_num=300,
|
| 85 |
+
voxel_size=voxel_size,
|
| 86 |
+
score_threshold=0.05,
|
| 87 |
+
num_classes=10),
|
| 88 |
+
positional_encoding=dict(
|
| 89 |
+
type='SinePositionalEncoding',
|
| 90 |
+
num_feats=embed_dims // 2,
|
| 91 |
+
normalize=True,
|
| 92 |
+
offset=-0.5),
|
| 93 |
+
loss_cls=dict(
|
| 94 |
+
type='FocalLoss',
|
| 95 |
+
use_sigmoid=True,
|
| 96 |
+
gamma=2.0,
|
| 97 |
+
alpha=0.25,
|
| 98 |
+
loss_weight=2.0),
|
| 99 |
+
loss_bbox=dict(type='L1Loss', loss_weight=0.25),
|
| 100 |
+
loss_iou=dict(type='GIoULoss', loss_weight=0.0)),
|
| 101 |
+
train_cfg=dict(pts=dict(
|
| 102 |
+
grid_size=[512, 512, 1],
|
| 103 |
+
voxel_size=voxel_size,
|
| 104 |
+
point_cloud_range=point_cloud_range,
|
| 105 |
+
out_size_factor=4,
|
| 106 |
+
assigner=dict(
|
| 107 |
+
type='HungarianAssigner3D',
|
| 108 |
+
cls_cost=dict(type='FocalLossCost', weight=2.0),
|
| 109 |
+
reg_cost=dict(type='BBox3DL1Cost', weight=0.25),
|
| 110 |
+
iou_cost=dict(type='IoUCost', weight=0.0),
|
| 111 |
+
)
|
| 112 |
+
))
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
ida_aug_conf = {
|
| 116 |
+
'resize_lim': (0.38, 0.55),
|
| 117 |
+
'final_dim': (256, 704),
|
| 118 |
+
'bot_pct_lim': (0.0, 0.0),
|
| 119 |
+
'rot_lim': (0.0, 0.0),
|
| 120 |
+
'H': 900, 'W': 1600,
|
| 121 |
+
'rand_flip': True,
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
train_pipeline = [
|
| 125 |
+
dict(type='LoadMultiViewImageFromFiles', to_float32=False, color_type='color'),
|
| 126 |
+
dict(type='LoadMultiViewImageFromMultiSweeps', sweeps_num=num_frames - 1),
|
| 127 |
+
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False),
|
| 128 |
+
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
|
| 129 |
+
dict(type='ObjectNameFilter', classes=class_names),
|
| 130 |
+
dict(type='RandomTransformImage', ida_aug_conf=ida_aug_conf, training=True),
|
| 131 |
+
dict(type='GlobalRotScaleTransImage', rot_range=[-0.3925, 0.3925], scale_ratio_range=[0.95, 1.05]),
|
| 132 |
+
dict(type='DefaultFormatBundle3D', class_names=class_names),
|
| 133 |
+
dict(type='Collect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img'], meta_keys=(
|
| 134 |
+
'filename', 'ori_shape', 'img_shape', 'pad_shape', 'lidar2img', 'img_timestamp'))
|
| 135 |
+
]
|
| 136 |
+
|
| 137 |
+
test_pipeline = [
|
| 138 |
+
dict(type='LoadMultiViewImageFromFiles', to_float32=False, color_type='color'),
|
| 139 |
+
dict(type='LoadMultiViewImageFromMultiSweeps', sweeps_num=num_frames - 1, test_mode=True),
|
| 140 |
+
dict(type='RandomTransformImage', ida_aug_conf=ida_aug_conf, training=False),
|
| 141 |
+
dict(
|
| 142 |
+
type='MultiScaleFlipAug3D',
|
| 143 |
+
img_scale=(1600, 900),
|
| 144 |
+
pts_scale_ratio=1,
|
| 145 |
+
flip=False,
|
| 146 |
+
transforms=[
|
| 147 |
+
dict(type='DefaultFormatBundle3D', class_names=class_names, with_label=False),
|
| 148 |
+
dict(type='Collect3D', keys=['img'], meta_keys=(
|
| 149 |
+
'filename', 'box_type_3d', 'ori_shape', 'img_shape', 'pad_shape',
|
| 150 |
+
'lidar2img', 'img_timestamp'))
|
| 151 |
+
])
|
| 152 |
+
]
|
| 153 |
+
|
| 154 |
+
data = dict(
|
| 155 |
+
workers_per_gpu=8,
|
| 156 |
+
train=dict(
|
| 157 |
+
type=dataset_type,
|
| 158 |
+
data_root=dataset_root,
|
| 159 |
+
ann_file=dataset_root + 'nuscenes_infos_train_sweep.pkl',
|
| 160 |
+
pipeline=train_pipeline,
|
| 161 |
+
classes=class_names,
|
| 162 |
+
modality=input_modality,
|
| 163 |
+
test_mode=False,
|
| 164 |
+
use_valid_flag=True,
|
| 165 |
+
box_type_3d='LiDAR'),
|
| 166 |
+
val=dict(
|
| 167 |
+
type=dataset_type,
|
| 168 |
+
data_root=dataset_root,
|
| 169 |
+
ann_file=dataset_root + 'nuscenes_infos_val_sweep.pkl',
|
| 170 |
+
pipeline=test_pipeline,
|
| 171 |
+
classes=class_names,
|
| 172 |
+
modality=input_modality,
|
| 173 |
+
test_mode=True,
|
| 174 |
+
box_type_3d='LiDAR'),
|
| 175 |
+
test=dict(
|
| 176 |
+
type=dataset_type,
|
| 177 |
+
data_root=dataset_root,
|
| 178 |
+
ann_file=dataset_root + 'nuscenes_custom_infos_test.pkl',
|
| 179 |
+
pipeline=test_pipeline,
|
| 180 |
+
classes=class_names,
|
| 181 |
+
modality=input_modality,
|
| 182 |
+
test_mode=True,
|
| 183 |
+
box_type_3d='LiDAR')
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
optimizer = dict(
|
| 187 |
+
type='AdamW',
|
| 188 |
+
lr=2e-4,
|
| 189 |
+
paramwise_cfg=dict(custom_keys={
|
| 190 |
+
'img_backbone': dict(lr_mult=0.1),
|
| 191 |
+
'sampling_offset': dict(lr_mult=0.1),
|
| 192 |
+
}),
|
| 193 |
+
weight_decay=0.01
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
optimizer_config = dict(
|
| 197 |
+
type='Fp16OptimizerHook',
|
| 198 |
+
loss_scale=512.0,
|
| 199 |
+
grad_clip=dict(max_norm=35, norm_type=2)
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
# learning policy
|
| 203 |
+
lr_config = dict(
|
| 204 |
+
policy='CosineAnnealing',
|
| 205 |
+
warmup='linear',
|
| 206 |
+
warmup_iters=500,
|
| 207 |
+
warmup_ratio=1.0 / 3,
|
| 208 |
+
min_lr_ratio=1e-3
|
| 209 |
+
)
|
| 210 |
+
total_epochs = 24
|
| 211 |
+
batch_size = 8
|
| 212 |
+
|
| 213 |
+
# load pretrained weights
|
| 214 |
+
load_from = 'pretrain/cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim_20201009_124951-40963960.pth'
|
| 215 |
+
revise_keys = [('backbone', 'img_backbone')]
|
| 216 |
+
|
| 217 |
+
# resume the last training
|
| 218 |
+
resume_from = None
|
| 219 |
+
|
| 220 |
+
# checkpointing
|
| 221 |
+
checkpoint_config = dict(interval=1, max_keep_ckpts=1)
|
| 222 |
+
|
| 223 |
+
# logging
|
| 224 |
+
log_config = dict(
|
| 225 |
+
interval=1,
|
| 226 |
+
hooks=[
|
| 227 |
+
dict(type='MyTextLoggerHook', interval=1, reset_flag=True),
|
| 228 |
+
dict(type='MyTensorboardLoggerHook', interval=500, reset_flag=True)
|
| 229 |
+
]
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
# evaluation
|
| 233 |
+
eval_config = dict(interval=total_epochs)
|
| 234 |
+
|
| 235 |
+
# other flags
|
| 236 |
+
debug = False
|
configs/r50_nuimg_704x256_400q_36ep.py
CHANGED
|
@@ -1,236 +1,8 @@
|
|
| 1 |
-
|
| 2 |
-
dataset_root = 'data/nuscenes/'
|
| 3 |
-
|
| 4 |
-
input_modality = dict(
|
| 5 |
-
use_lidar=False,
|
| 6 |
-
use_camera=True,
|
| 7 |
-
use_radar=False,
|
| 8 |
-
use_map=False,
|
| 9 |
-
use_external=True
|
| 10 |
-
)
|
| 11 |
-
|
| 12 |
-
# For nuScenes we usually do 10-class detection
|
| 13 |
-
class_names = [
|
| 14 |
-
'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier',
|
| 15 |
-
'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
|
| 16 |
-
]
|
| 17 |
-
|
| 18 |
-
# If point cloud range is changed, the models should also change their point
|
| 19 |
-
# cloud range accordingly
|
| 20 |
-
point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
|
| 21 |
-
voxel_size = [0.2, 0.2, 8]
|
| 22 |
-
|
| 23 |
-
# arch config
|
| 24 |
-
embed_dims = 256
|
| 25 |
-
num_layers = 6
|
| 26 |
-
num_query = 400
|
| 27 |
-
num_frames = 8
|
| 28 |
-
num_levels = 4
|
| 29 |
-
num_points = 4
|
| 30 |
-
|
| 31 |
-
img_backbone = dict(
|
| 32 |
-
type='ResNet',
|
| 33 |
-
depth=50,
|
| 34 |
-
num_stages=4,
|
| 35 |
-
out_indices=(0, 1, 2, 3),
|
| 36 |
-
frozen_stages=1,
|
| 37 |
-
norm_cfg=dict(type='BN2d', requires_grad=True),
|
| 38 |
-
norm_eval=True,
|
| 39 |
-
style='pytorch',
|
| 40 |
-
with_cp=True)
|
| 41 |
-
img_neck = dict(
|
| 42 |
-
type='FPN',
|
| 43 |
-
in_channels=[256, 512, 1024, 2048],
|
| 44 |
-
out_channels=embed_dims,
|
| 45 |
-
num_outs=num_levels)
|
| 46 |
-
img_norm_cfg = dict(
|
| 47 |
-
mean=[123.675, 116.280, 103.530],
|
| 48 |
-
std=[58.395, 57.120, 57.375],
|
| 49 |
-
to_rgb=True)
|
| 50 |
|
| 51 |
model = dict(
|
| 52 |
-
|
| 53 |
-
data_aug=dict(
|
| 54 |
-
img_color_aug=True, # Move some augmentations to GPU
|
| 55 |
-
img_norm_cfg=img_norm_cfg,
|
| 56 |
-
img_pad_cfg=dict(size_divisor=32)),
|
| 57 |
-
stop_prev_grad=False,
|
| 58 |
-
img_backbone=img_backbone,
|
| 59 |
-
img_neck=img_neck,
|
| 60 |
-
pts_bbox_head=dict(
|
| 61 |
-
type='SparseBEVHead',
|
| 62 |
-
num_classes=10,
|
| 63 |
-
in_channels=embed_dims,
|
| 64 |
-
num_query=num_query,
|
| 65 |
-
query_denoising=True,
|
| 66 |
-
query_denoising_groups=10,
|
| 67 |
-
code_size=10,
|
| 68 |
-
code_weights=[2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
|
| 69 |
-
sync_cls_avg_factor=True,
|
| 70 |
-
transformer=dict(
|
| 71 |
-
type='SparseBEVTransformer',
|
| 72 |
-
embed_dims=embed_dims,
|
| 73 |
-
num_frames=num_frames,
|
| 74 |
-
num_points=num_points,
|
| 75 |
-
num_layers=num_layers,
|
| 76 |
-
num_levels=num_levels,
|
| 77 |
-
num_classes=10,
|
| 78 |
-
code_size=10,
|
| 79 |
-
pc_range=point_cloud_range),
|
| 80 |
-
bbox_coder=dict(
|
| 81 |
-
type='NMSFreeCoder',
|
| 82 |
-
post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0],
|
| 83 |
-
pc_range=point_cloud_range,
|
| 84 |
-
max_num=300,
|
| 85 |
-
voxel_size=voxel_size,
|
| 86 |
-
score_threshold=0.05,
|
| 87 |
-
num_classes=10),
|
| 88 |
-
positional_encoding=dict(
|
| 89 |
-
type='SinePositionalEncoding',
|
| 90 |
-
num_feats=embed_dims // 2,
|
| 91 |
-
normalize=True,
|
| 92 |
-
offset=-0.5),
|
| 93 |
-
loss_cls=dict(
|
| 94 |
-
type='FocalLoss',
|
| 95 |
-
use_sigmoid=True,
|
| 96 |
-
gamma=2.0,
|
| 97 |
-
alpha=0.25,
|
| 98 |
-
loss_weight=2.0),
|
| 99 |
-
loss_bbox=dict(type='L1Loss', loss_weight=0.25),
|
| 100 |
-
loss_iou=dict(type='GIoULoss', loss_weight=0.0)),
|
| 101 |
-
train_cfg=dict(pts=dict(
|
| 102 |
-
grid_size=[512, 512, 1],
|
| 103 |
-
voxel_size=voxel_size,
|
| 104 |
-
point_cloud_range=point_cloud_range,
|
| 105 |
-
out_size_factor=4,
|
| 106 |
-
assigner=dict(
|
| 107 |
-
type='HungarianAssigner3D',
|
| 108 |
-
cls_cost=dict(type='FocalLossCost', weight=2.0),
|
| 109 |
-
reg_cost=dict(type='BBox3DL1Cost', weight=0.25),
|
| 110 |
-
iou_cost=dict(type='IoUCost', weight=0.0),
|
| 111 |
-
)
|
| 112 |
-
))
|
| 113 |
)
|
| 114 |
|
| 115 |
-
ida_aug_conf = {
|
| 116 |
-
'resize_lim': (0.38, 0.55),
|
| 117 |
-
'final_dim': (256, 704),
|
| 118 |
-
'bot_pct_lim': (0.0, 0.0),
|
| 119 |
-
'rot_lim': (0.0, 0.0),
|
| 120 |
-
'H': 900, 'W': 1600,
|
| 121 |
-
'rand_flip': True,
|
| 122 |
-
}
|
| 123 |
-
|
| 124 |
-
train_pipeline = [
|
| 125 |
-
dict(type='LoadMultiViewImageFromFiles', to_float32=False, color_type='color'),
|
| 126 |
-
dict(type='LoadMultiViewImageFromMultiSweeps', sweeps_num=num_frames - 1),
|
| 127 |
-
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False),
|
| 128 |
-
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
|
| 129 |
-
dict(type='ObjectNameFilter', classes=class_names),
|
| 130 |
-
dict(type='RandomTransformImage', ida_aug_conf=ida_aug_conf, training=True),
|
| 131 |
-
dict(type='GlobalRotScaleTransImage', rot_range=[-0.3925, 0.3925], scale_ratio_range=[0.95, 1.05]),
|
| 132 |
-
dict(type='DefaultFormatBundle3D', class_names=class_names),
|
| 133 |
-
dict(type='Collect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img'], meta_keys=(
|
| 134 |
-
'filename', 'ori_shape', 'img_shape', 'pad_shape', 'lidar2img', 'img_timestamp'))
|
| 135 |
-
]
|
| 136 |
-
|
| 137 |
-
test_pipeline = [
|
| 138 |
-
dict(type='LoadMultiViewImageFromFiles', to_float32=False, color_type='color'),
|
| 139 |
-
dict(type='LoadMultiViewImageFromMultiSweeps', sweeps_num=num_frames - 1, test_mode=True),
|
| 140 |
-
dict(type='RandomTransformImage', ida_aug_conf=ida_aug_conf, training=False),
|
| 141 |
-
dict(
|
| 142 |
-
type='MultiScaleFlipAug3D',
|
| 143 |
-
img_scale=(1600, 900),
|
| 144 |
-
pts_scale_ratio=1,
|
| 145 |
-
flip=False,
|
| 146 |
-
transforms=[
|
| 147 |
-
dict(type='DefaultFormatBundle3D', class_names=class_names, with_label=False),
|
| 148 |
-
dict(type='Collect3D', keys=['img'], meta_keys=(
|
| 149 |
-
'filename', 'box_type_3d', 'ori_shape', 'img_shape', 'pad_shape',
|
| 150 |
-
'lidar2img', 'img_timestamp'))
|
| 151 |
-
])
|
| 152 |
-
]
|
| 153 |
-
|
| 154 |
-
data = dict(
|
| 155 |
-
workers_per_gpu=8,
|
| 156 |
-
train=dict(
|
| 157 |
-
type=dataset_type,
|
| 158 |
-
data_root=dataset_root,
|
| 159 |
-
ann_file=dataset_root + 'nuscenes_infos_train_sweep.pkl',
|
| 160 |
-
pipeline=train_pipeline,
|
| 161 |
-
classes=class_names,
|
| 162 |
-
modality=input_modality,
|
| 163 |
-
test_mode=False,
|
| 164 |
-
use_valid_flag=True,
|
| 165 |
-
box_type_3d='LiDAR'),
|
| 166 |
-
val=dict(
|
| 167 |
-
type=dataset_type,
|
| 168 |
-
data_root=dataset_root,
|
| 169 |
-
ann_file=dataset_root + 'nuscenes_infos_val_sweep.pkl',
|
| 170 |
-
pipeline=test_pipeline,
|
| 171 |
-
classes=class_names,
|
| 172 |
-
modality=input_modality,
|
| 173 |
-
test_mode=True,
|
| 174 |
-
box_type_3d='LiDAR'),
|
| 175 |
-
test=dict(
|
| 176 |
-
type=dataset_type,
|
| 177 |
-
data_root=dataset_root,
|
| 178 |
-
ann_file=dataset_root + 'nuscenes_custom_infos_test.pkl',
|
| 179 |
-
pipeline=test_pipeline,
|
| 180 |
-
classes=class_names,
|
| 181 |
-
modality=input_modality,
|
| 182 |
-
test_mode=True,
|
| 183 |
-
box_type_3d='LiDAR')
|
| 184 |
-
)
|
| 185 |
-
|
| 186 |
-
optimizer = dict(
|
| 187 |
-
type='AdamW',
|
| 188 |
-
lr=2e-4,
|
| 189 |
-
paramwise_cfg=dict(custom_keys={
|
| 190 |
-
'img_backbone': dict(lr_mult=0.1),
|
| 191 |
-
'sampling_offset': dict(lr_mult=0.1),
|
| 192 |
-
}),
|
| 193 |
-
weight_decay=0.01
|
| 194 |
-
)
|
| 195 |
-
|
| 196 |
-
optimizer_config = dict(
|
| 197 |
-
type='Fp16OptimizerHook',
|
| 198 |
-
loss_scale=512.0,
|
| 199 |
-
grad_clip=dict(max_norm=35, norm_type=2)
|
| 200 |
-
)
|
| 201 |
-
|
| 202 |
-
# learning policy
|
| 203 |
-
lr_config = dict(
|
| 204 |
-
policy='CosineAnnealing',
|
| 205 |
-
warmup='linear',
|
| 206 |
-
warmup_iters=500,
|
| 207 |
-
warmup_ratio=1.0 / 3,
|
| 208 |
-
min_lr_ratio=1e-3
|
| 209 |
-
)
|
| 210 |
total_epochs = 36
|
| 211 |
-
batch_size = 8
|
| 212 |
-
|
| 213 |
-
# load pretrained weights
|
| 214 |
-
load_from = 'pretrain/cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim_20201009_124951-40963960.pth'
|
| 215 |
-
revise_keys = [('backbone', 'img_backbone')]
|
| 216 |
-
|
| 217 |
-
# resume the last training
|
| 218 |
-
resume_from = None
|
| 219 |
-
|
| 220 |
-
# checkpointing
|
| 221 |
-
checkpoint_config = dict(interval=1, max_keep_ckpts=1)
|
| 222 |
-
|
| 223 |
-
# logging
|
| 224 |
-
log_config = dict(
|
| 225 |
-
interval=1,
|
| 226 |
-
hooks=[
|
| 227 |
-
dict(type='MyTextLoggerHook', interval=1, reset_flag=True),
|
| 228 |
-
dict(type='MyTensorboardLoggerHook', interval=500, reset_flag=True)
|
| 229 |
-
]
|
| 230 |
-
)
|
| 231 |
-
|
| 232 |
-
# evaluation
|
| 233 |
eval_config = dict(interval=total_epochs)
|
| 234 |
-
|
| 235 |
-
# other flags
|
| 236 |
-
debug = False
|
|
|
|
| 1 |
+
_base_ = ['./r50_nuimg_704x256.py']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
model = dict(
|
| 4 |
+
pts_bbox_head=dict(num_query=400)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
)
|
| 6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
total_epochs = 36
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
eval_config = dict(interval=total_epochs)
|
|
|
|
|
|
|
|
|