File size: 2,001 Bytes
663494c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71

import mmcv
from typing import Dict, List
import pickle as pkl

def sort_data(data_dict: Dict):
    return data_dict["token"]

FRAME_INTERVAL = 10

# formula to calculate runtime
# num_frame / 8 FPS * 1.66 (map eval) = 
# for val pkl during training, 4000 is the MAX due to time limit for autoresume
# MAX_FRAME_SEQ = 4000    # total 10400 frames in 25 sequences
# for val pkl during testtng, 4000 is the MAX due to time limit for autoresume
MAX_FRAME_SEQ = 10000    # total 10400 frames in 25 sequences

predroot = '/mnt/hdd2/datasets/carla_1.0/carla_data_0414/data_val_nusc_format.pkl'

pkl_data: Dict = mmcv.load(predroot)
# keys are bbox_results, occ_results_computed, planning_results_computed

info_data: List[Dict] = pkl_data["infos"]
info_data.sort(key=sort_data)

# initialize
new_info_data: List[Dict] = []
scene_token = None
for index in range(len(info_data)):
    data_frame = info_data[index]
    scene_token_cur = data_frame['scene_token']
    frame_id_cur = data_frame['frame_idx']

    # skipping
    if frame_id_cur > MAX_FRAME_SEQ:
        continue

    print(data_frame['token'])
    print(data_frame['scene_token'])
    print(data_frame['frame_idx'])

    # new sequence
    if scene_token is None or scene_token_cur != scene_token:
        frame_idx = frame_id_cur
        scene_token = scene_token_cur

    # continue the sequence
    else:
        frame_idx += FRAME_INTERVAL
        new_info_data.append(data_frame)

    # checking
    assert frame_idx == frame_id_cur, f'frame id wrong, {frame_idx} vs {frame_id_cur}'

print(f'total number of frames is {len(new_info_data)}')

# add other keys to construct the final dict
new_pkl_data = {
    'infos': new_info_data,
}
for key in pkl_data.keys():
    if key == 'infos':
        continue
    else:
        new_pkl_data[key] = pkl_data[key]

# saving
output_path = f'/mnt/hdd2/datasets/carla_1.0/carla_data_0414/data_val_nusc_format_partial_{MAX_FRAME_SEQ}.pkl'
with open(output_path, "wb") as f:
    pkl.dump(new_pkl_data, f)