Haisong Liu commited on
Commit
b1da435
·
unverified ·
1 Parent(s): 1a2cae5

Release model: vov99_dd3d_1600x640_trainval_future (#2)

Browse files
README.md CHANGED
@@ -4,11 +4,12 @@ This is the official PyTorch implementation for paper [SparseBEV: High-Performan
4
 
5
  ## Model Zoo
6
 
7
- | Setting | Pretrain | Training Cost | NDS | FPS | Weights |
8
- |----------|----------|---------------|-----|-----|---------|
9
- | [r50_nuimg_704x256](configs/r50_nuimg_704x256.py) | [nuImages](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim/cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim_20201009_124951-40963960.pth) | 21h (8x2080Ti) | 55.6 | 15.8 | [weights](https://drive.google.com/file/d/1ft34-pxLpHGo2Aw-jowEtCxyXcqszHNn/view) |
10
- | [r50_nuimg_704x256_400q_36ep](configs/r50_nuimg_704x256_400q_36ep.py) | [nuImages](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim/cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim_20201009_124951-40963960.pth) | 28h (8x2080Ti) | 55.8 | 23.5 | [weights](https://drive.google.com/file/d/1C_Vn3iiSnSW1Dw1r0DkjJMwvHC5Y3zTN/view) |
11
- | [r101_nuimg_1408x512](configs/r101_nuimg_1408x512.py) | [nuImages](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/cascade_mask_rcnn_r101_fpn_1x_nuim/cascade_mask_rcnn_r101_fpn_1x_nuim_20201024_134804-45215b1e.pth) | 2d8h (8xV100) | 59.2 | 6.5 | [weights](https://drive.google.com/file/d/1dKu5cR1fuo-O0ynyBh-RCPtHrgut29mN/view) |
 
12
 
13
  * We use `r50_nuimg_704x256` for ablation studies and `r50_nuimg_704x256_400q_36ep` for comparison with others.
14
  * We recommend using `r50_nuimg_704x256` to validate new ideas since it trains faster and the result is more stable.
@@ -72,13 +73,10 @@ python setup.py build_ext --inplace
72
  data/nuscenes
73
  ├── maps
74
  ├── nuscenes_infos_test_sweep.pkl
75
- ├── nuscenes_infos_train_mini_sweep.pkl
76
  ├── nuscenes_infos_train_sweep.pkl
77
- ├── nuscenes_infos_val_mini_sweep.pkl
78
  ├── nuscenes_infos_val_sweep.pkl
79
  ├── samples
80
  ├── sweeps
81
- ├── v1.0-mini
82
  ├── v1.0-test
83
  └── v1.0-trainval
84
  ```
@@ -87,6 +85,14 @@ These `*.pkl` files can also be generated with our script: `gen_sweep_info.py`.
87
 
88
  ## Training
89
 
 
 
 
 
 
 
 
 
90
  Train SparseBEV with 8 GPUs:
91
 
92
  ```
 
4
 
5
  ## Model Zoo
6
 
7
+ | Setting | Pretrain | Training Cost | NDS<sub>val</sub> | NDS<sub>test</sub> | FPS | Weights |
8
+ |----------|:--------:|:-------------:|:-----------------:|:------------------:|:---:|:-------:|
9
+ | [r50_nuimg_704x256](configs/r50_nuimg_704x256.py) | [nuImg](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim/cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim_20201009_124951-40963960.pth) | 21h (8x2080Ti) | 55.6 | - | 15.8 | [gdrive](https://drive.google.com/file/d/1ft34-pxLpHGo2Aw-jowEtCxyXcqszHNn/view) |
10
+ | [r50_nuimg_704x256_400q_36ep](configs/r50_nuimg_704x256_400q_36ep.py) | [nuImg](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim/cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim_20201009_124951-40963960.pth) | 28h (8x2080Ti) | 55.8 | - | 23.5 | [gdrive](https://drive.google.com/file/d/1C_Vn3iiSnSW1Dw1r0DkjJMwvHC5Y3zTN/view) |
11
+ | [r101_nuimg_1408x512](configs/r101_nuimg_1408x512.py) | [nuImg](https://download.openmmlab.com/mmdetection3d/v0.1.0_models/nuimages_semseg/cascade_mask_rcnn_r101_fpn_1x_nuim/cascade_mask_rcnn_r101_fpn_1x_nuim_20201024_134804-45215b1e.pth) | 2d8h (8xV100) | 59.2 | - | 6.5 | [gdrive](https://drive.google.com/file/d/1dKu5cR1fuo-O0ynyBh-RCPtHrgut29mN/view) |
12
+ | [vov99_dd3d_1600x640_trainval_future](configs/vov99_dd3d_1600x640_trainval_future.py) | [DD3D](https://drive.google.com/file/d/1gQkhWERCzAosBwG5bh2BKkt1k0TJZt-A/view) | 4d1h (8xA100) | 84.9 | 67.5 | - | [gdrive](https://drive.google.com/file/d/1TL0QoCiWD5uq8PCAWWE3A-g73ibK1R0S/view) |
13
 
14
  * We use `r50_nuimg_704x256` for ablation studies and `r50_nuimg_704x256_400q_36ep` for comparison with others.
15
  * We recommend using `r50_nuimg_704x256` to validate new ideas since it trains faster and the result is more stable.
 
73
  data/nuscenes
74
  ├── maps
75
  ├── nuscenes_infos_test_sweep.pkl
 
76
  ├── nuscenes_infos_train_sweep.pkl
 
77
  ├── nuscenes_infos_val_sweep.pkl
78
  ├── samples
79
  ├── sweeps
 
80
  ├── v1.0-test
81
  └── v1.0-trainval
82
  ```
 
85
 
86
  ## Training
87
 
88
+ Download pretrained weights and put it in directory `pretrain/`:
89
+
90
+ ```
91
+ pretrain
92
+ ├── cascade_mask_rcnn_r101_fpn_1x_nuim_20201024_134804-45215b1e.pth
93
+ ├── cascade_mask_rcnn_r50_fpn_coco-20e_20e_nuim_20201009_124951-40963960.pth
94
+ ```
95
+
96
  Train SparseBEV with 8 GPUs:
97
 
98
  ```
configs/r101_nuimg_1408x512.py CHANGED
@@ -70,7 +70,6 @@ test_pipeline = [
70
  ]
71
 
72
  data = dict(
73
- workers_per_gpu=4,
74
  train=dict(pipeline=train_pipeline),
75
  val=dict(pipeline=test_pipeline),
76
  test=dict(pipeline=test_pipeline)
 
70
  ]
71
 
72
  data = dict(
 
73
  train=dict(pipeline=train_pipeline),
74
  val=dict(pipeline=test_pipeline),
75
  test=dict(pipeline=test_pipeline)
configs/r50_nuimg_704x256.py CHANGED
@@ -175,7 +175,7 @@ data = dict(
175
  test=dict(
176
  type=dataset_type,
177
  data_root=dataset_root,
178
- ann_file=dataset_root + 'nuscenes_custom_infos_test.pkl',
179
  pipeline=test_pipeline,
180
  classes=class_names,
181
  modality=input_modality,
 
175
  test=dict(
176
  type=dataset_type,
177
  data_root=dataset_root,
178
+ ann_file=dataset_root + 'nuscenes_infos_test_sweep.pkl',
179
  pipeline=test_pipeline,
180
  classes=class_names,
181
  modality=input_modality,
configs/r50_nuimg_704x256_400q_36ep.py CHANGED
@@ -6,3 +6,5 @@ model = dict(
6
 
7
  total_epochs = 36
8
  eval_config = dict(interval=total_epochs)
 
 
 
6
 
7
  total_epochs = 36
8
  eval_config = dict(interval=total_epochs)
9
+
10
+ data = dict(workers_per_gpu=12)
configs/vov99_dd3d_1600x640_trainval_future.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = ['./r50_nuimg_704x256.py']
2
+
3
+ # For nuScenes we usually do 10-class detection
4
+ class_names = [
5
+ 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier',
6
+ 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
7
+ ]
8
+
9
+ # If point cloud range is changed, the models should also change their point
10
+ # cloud range accordingly
11
+ point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
12
+ voxel_size = [0.2, 0.2, 8]
13
+
14
+ img_backbone = dict(
15
+ _delete_=True,
16
+ type='VoVNet',
17
+ spec_name='V-99-eSE',
18
+ out_features=['stage2', 'stage3', 'stage4', 'stage5'],
19
+ norm_eval=True,
20
+ frozen_stages=1,
21
+ with_cp=True
22
+ )
23
+ img_neck=dict(
24
+ _delete_=True,
25
+ type='FPN',
26
+ in_channels=[256, 512, 768, 1024],
27
+ out_channels=256,
28
+ num_outs=5
29
+ )
30
+ img_norm_cfg = dict(
31
+ _delete_=True,
32
+ mean=[103.530, 116.280, 123.675],
33
+ std=[57.375, 57.120, 58.395],
34
+ to_rgb=False
35
+ )
36
+
37
+ model = dict(
38
+ data_aug=dict(
39
+ img_color_aug=True,
40
+ img_norm_cfg=img_norm_cfg,
41
+ img_pad_cfg=dict(size_divisor=32)
42
+ ),
43
+ img_backbone=img_backbone,
44
+ img_neck=img_neck,
45
+ pts_bbox_head=dict(
46
+ num_query=1600,
47
+ transformer=dict(
48
+ num_levels=5,
49
+ num_points=4,
50
+ num_frames=15
51
+ )
52
+ )
53
+ )
54
+
55
+ ida_aug_conf = {
56
+ 'resize_lim': (0.94, 1.25),
57
+ 'final_dim': (640, 1600),
58
+ 'bot_pct_lim': (0.0, 0.0),
59
+ 'rot_lim': (0.0, 0.0),
60
+ 'H': 900, 'W': 1600,
61
+ 'rand_flip': True,
62
+ }
63
+
64
+ train_pipeline = [
65
+ dict(type='LoadMultiViewImageFromFiles', to_float32=False, color_type='color'),
66
+ dict(type='LoadMultiViewImageFromMultiSweepsFuture', prev_sweeps_num=7, next_sweeps_num=7),
67
+ dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False),
68
+ dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
69
+ dict(type='ObjectNameFilter', classes=class_names),
70
+ dict(type='RandomTransformImage', ida_aug_conf=ida_aug_conf, training=True),
71
+ dict(type='GlobalRotScaleTransImage', rot_range=[-0.3925, 0.3925], scale_ratio_range=[0.95, 1.05]),
72
+ dict(type='DefaultFormatBundle3D', class_names=class_names),
73
+ dict(type='Collect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img'], meta_keys=(
74
+ 'filename', 'ori_shape', 'img_shape', 'pad_shape', 'lidar2img', 'img_timestamp'))
75
+ ]
76
+
77
+ test_pipeline = [
78
+ dict(type='LoadMultiViewImageFromFiles', to_float32=False, color_type='color'),
79
+ dict(type='LoadMultiViewImageFromMultiSweepsFuture', prev_sweeps_num=7, next_sweeps_num=7, test_mode=True),
80
+ dict(type='RandomTransformImage', ida_aug_conf=ida_aug_conf, training=False),
81
+ dict(
82
+ type='MultiScaleFlipAug3D',
83
+ img_scale=(1600, 900),
84
+ pts_scale_ratio=1,
85
+ flip=False,
86
+ transforms=[
87
+ dict(type='DefaultFormatBundle3D', class_names=class_names, with_label=False),
88
+ dict(type='Collect3D', keys=['img'], meta_keys=(
89
+ 'filename', 'box_type_3d', 'ori_shape', 'img_shape', 'pad_shape',
90
+ 'lidar2img', 'img_timestamp'))
91
+ ])
92
+ ]
93
+
94
+ data = dict(
95
+ train=dict(
96
+ ann_file=['data/nuscenes/nuscenes_infos_train_sweep.pkl',
97
+ 'data/nuscenes/nuscenes_infos_val_sweep.pkl'],
98
+ pipeline=train_pipeline),
99
+ val=dict(
100
+ ann_file='data/nuscenes/nuscenes_infos_val_sweep.pkl', # use nuscenes_infos_test_sweep.pkl for submission
101
+ pipeline=test_pipeline),
102
+ test=dict(pipeline=test_pipeline)
103
+ )
104
+
105
+ # load pretrained weights
106
+ load_from = 'pretrain/dd3d_det_final.pth'
107
+ revise_keys = None
loaders/nuscenes_dataset.py CHANGED
@@ -8,7 +8,7 @@ from pyquaternion import Quaternion
8
  @DATASETS.register_module()
9
  class CustomNuScenesDataset(NuScenesDataset):
10
 
11
- def collect_sweeps(self, index, into_past=60, into_future=0):
12
  all_sweeps_prev = []
13
  curr_index = index
14
  while len(all_sweeps_prev) < into_past:
 
8
  @DATASETS.register_module()
9
  class CustomNuScenesDataset(NuScenesDataset):
10
 
11
+ def collect_sweeps(self, index, into_past=60, into_future=60):
12
  all_sweeps_prev = []
13
  curr_index = index
14
  while len(all_sweeps_prev) < into_past:
loaders/pipelines/loading.py CHANGED
@@ -152,3 +152,106 @@ class LoadMultiViewImageFromMultiSweeps(object):
152
  return self.load_online(results)
153
  else:
154
  return self.load_offline(results)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
  return self.load_online(results)
153
  else:
154
  return self.load_offline(results)
155
+
156
+
157
+ @PIPELINES.register_module()
158
+ class LoadMultiViewImageFromMultiSweepsFuture(object):
159
+ def __init__(self,
160
+ prev_sweeps_num=5,
161
+ next_sweeps_num=5,
162
+ color_type='color',
163
+ test_mode=False):
164
+ self.prev_sweeps_num = prev_sweeps_num
165
+ self.next_sweeps_num = next_sweeps_num
166
+ self.color_type = color_type
167
+ self.test_mode = test_mode
168
+
169
+ assert prev_sweeps_num == next_sweeps_num
170
+
171
+ self.train_interval = [4, 8]
172
+ self.test_interval = 6
173
+
174
+ try:
175
+ mmcv.use_backend('turbojpeg')
176
+ except ImportError:
177
+ mmcv.use_backend('cv2')
178
+
179
+ def __call__(self, results):
180
+ if self.prev_sweeps_num == 0 and self.next_sweeps_num == 0:
181
+ return results
182
+
183
+ cam_types = [
184
+ 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_FRONT_LEFT',
185
+ 'CAM_BACK', 'CAM_BACK_LEFT', 'CAM_BACK_RIGHT'
186
+ ]
187
+
188
+ if self.test_mode:
189
+ interval = self.test_interval
190
+ else:
191
+ interval = np.random.randint(self.train_interval[0], self.train_interval[1] + 1)
192
+
193
+ # previous sweeps
194
+ if len(results['sweeps']['prev']) == 0:
195
+ for _ in range(self.prev_sweeps_num):
196
+ for j in range(len(cam_types)):
197
+ results['img'].append(results['img'][j])
198
+ results['img_timestamp'].append(results['img_timestamp'][j])
199
+ results['filename'].append(results['filename'][j])
200
+ results['lidar2img'].append(np.copy(results['lidar2img'][j]))
201
+ else:
202
+ choices = [(k + 1) * interval - 1 for k in range(self.prev_sweeps_num)]
203
+
204
+ for idx in sorted(list(choices)):
205
+ sweep_idx = min(idx, len(results['sweeps']['prev']) - 1)
206
+ sweep = results['sweeps']['prev'][sweep_idx]
207
+
208
+ if len(sweep.keys()) < len(cam_types):
209
+ sweep = results['sweeps']['prev'][sweep_idx - 1]
210
+
211
+ for sensor in cam_types:
212
+ results['img'].append(mmcv.imread(sweep[sensor]['data_path'], self.color_type))
213
+ results['img_timestamp'].append(sweep[sensor]['timestamp'] / 1e6)
214
+ results['filename'].append(sweep[sensor]['data_path'])
215
+ results['lidar2img'].append(compose_lidar2img(
216
+ results['ego2global_translation'],
217
+ results['ego2global_rotation'],
218
+ results['lidar2ego_translation'],
219
+ results['lidar2ego_rotation'],
220
+ sweep[sensor]['sensor2global_translation'],
221
+ sweep[sensor]['sensor2global_rotation'],
222
+ sweep[sensor]['cam_intrinsic'],
223
+ ))
224
+
225
+ # future sweeps
226
+ if len(results['sweeps']['next']) == 0:
227
+ for _ in range(self.next_sweeps_num):
228
+ for j in range(len(cam_types)):
229
+ results['img'].append(results['img'][j])
230
+ results['img_timestamp'].append(results['img_timestamp'][j])
231
+ results['filename'].append(results['filename'][j])
232
+ results['lidar2img'].append(np.copy(results['lidar2img'][j]))
233
+ else:
234
+ choices = [(k + 1) * interval - 1 for k in range(self.next_sweeps_num)]
235
+
236
+ for idx in sorted(list(choices)):
237
+ sweep_idx = min(idx, len(results['sweeps']['next']) - 1)
238
+ sweep = results['sweeps']['next'][sweep_idx]
239
+
240
+ if len(sweep.keys()) < len(cam_types):
241
+ sweep = results['sweeps']['next'][sweep_idx - 1]
242
+
243
+ for sensor in cam_types:
244
+ results['img'].append(mmcv.imread(sweep[sensor]['data_path'], self.color_type))
245
+ results['img_timestamp'].append(sweep[sensor]['timestamp'] / 1e6)
246
+ results['filename'].append(sweep[sensor]['data_path'])
247
+ results['lidar2img'].append(compose_lidar2img(
248
+ results['ego2global_translation'],
249
+ results['ego2global_rotation'],
250
+ results['lidar2ego_translation'],
251
+ results['lidar2ego_rotation'],
252
+ sweep[sensor]['sensor2global_translation'],
253
+ sweep[sensor]['sensor2global_rotation'],
254
+ sweep[sensor]['cam_intrinsic'],
255
+ ))
256
+
257
+ return results
models/sparsebev.py CHANGED
@@ -239,7 +239,7 @@ class SparseBEV(MVXTwoStageDetector):
239
  world_size = get_dist_info()[1]
240
  if world_size == 1: # online
241
  return self.simple_test_online(img_metas, img, rescale)
242
- elif world_size > 1: # offline
243
  return self.simple_test_offline(img_metas, img, rescale)
244
 
245
  def simple_test_offline(self, img_metas, img=None, rescale=False):
@@ -273,23 +273,21 @@ class SparseBEV(MVXTwoStageDetector):
273
  for i in range(num_frames):
274
  img_indices = list(np.arange(i * 6, (i + 1) * 6))
275
 
276
- img_curr_large = img[:, 0] # [B, 6, C, H, W]
277
- img_metas_curr_large = [{}]
278
-
279
  for k in img_metas[0].keys():
280
  if isinstance(img_metas[0][k], list):
281
- img_metas_curr_large[0][k] = [img_metas[0][k][i] for i in img_indices]
282
 
283
  if img_filenames[img_indices[0]] in self.memory:
284
- img_feats_curr_large = self.memory[img_filenames[img_indices[0]]]
285
  else:
286
- assert i == 0
287
- img_feats_curr_large = self.extract_feat(img_curr_large, img_metas_curr_large)
288
- self.memory[img_filenames[img_indices[0]]] = img_feats_curr_large
289
  self.queue.put(img_filenames[img_indices[0]])
290
 
291
- img_feats_large.append(img_feats_curr_large)
292
- img_metas_large.append(img_metas_curr_large)
293
 
294
  # reorganize
295
  feat_levels = len(img_feats_large[0])
@@ -314,7 +312,7 @@ class SparseBEV(MVXTwoStageDetector):
314
  for result_dict, pts_bbox in zip(bbox_list, bbox_pts):
315
  result_dict['pts_bbox'] = pts_bbox
316
 
317
- while self.queue.qsize() >= 8:
318
  pop_key = self.queue.get()
319
  self.memory.pop(pop_key)
320
 
 
239
  world_size = get_dist_info()[1]
240
  if world_size == 1: # online
241
  return self.simple_test_online(img_metas, img, rescale)
242
+ else: # offline
243
  return self.simple_test_offline(img_metas, img, rescale)
244
 
245
  def simple_test_offline(self, img_metas, img=None, rescale=False):
 
273
  for i in range(num_frames):
274
  img_indices = list(np.arange(i * 6, (i + 1) * 6))
275
 
276
+ img_metas_curr = [{}]
 
 
277
  for k in img_metas[0].keys():
278
  if isinstance(img_metas[0][k], list):
279
+ img_metas_curr[0][k] = [img_metas[0][k][i] for i in img_indices]
280
 
281
  if img_filenames[img_indices[0]] in self.memory:
282
+ img_feats_curr = self.memory[img_filenames[img_indices[0]]]
283
  else:
284
+ img_curr_large = img[:, i] # [B, 6, C, H, W]
285
+ img_feats_curr = self.extract_feat(img_curr_large, img_metas_curr)
286
+ self.memory[img_filenames[img_indices[0]]] = img_feats_curr
287
  self.queue.put(img_filenames[img_indices[0]])
288
 
289
+ img_feats_large.append(img_feats_curr)
290
+ img_metas_large.append(img_metas_curr)
291
 
292
  # reorganize
293
  feat_levels = len(img_feats_large[0])
 
312
  for result_dict, pts_bbox in zip(bbox_list, bbox_pts):
313
  result_dict['pts_bbox'] = pts_bbox
314
 
315
+ while self.queue.qsize() >= 16:
316
  pop_key = self.queue.get()
317
  self.memory.pop(pop_key)
318
 
val.py CHANGED
@@ -17,7 +17,7 @@ from models.utils import VERSION
17
 
18
 
19
  def evaluate(dataset, results, epoch):
20
- metrics = dataset.evaluate(results, jsonfile_prefix=None)
21
 
22
  mAP = metrics['pts_bbox_NuScenes/mAP']
23
  mATE = metrics['pts_bbox_NuScenes/mATE']
@@ -126,7 +126,6 @@ def main():
126
 
127
  if 'version' in checkpoint:
128
  VERSION.name = checkpoint['version']
129
- logging.info(VERSION.name)
130
 
131
  if world_size > 1:
132
  results = multi_gpu_test(model, val_loader, gpu_collect=True)
 
17
 
18
 
19
  def evaluate(dataset, results, epoch):
20
+ metrics = dataset.evaluate(results, jsonfile_prefix='submission')
21
 
22
  mAP = metrics['pts_bbox_NuScenes/mAP']
23
  mATE = metrics['pts_bbox_NuScenes/mATE']
 
126
 
127
  if 'version' in checkpoint:
128
  VERSION.name = checkpoint['version']
 
129
 
130
  if world_size > 1:
131
  results = multi_gpu_test(model, val_loader, gpu_collect=True)