File size: 1,759 Bytes
d670799
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
# Copyright (c) OpenMMLab. All rights reserved.
"""This file processes the annotation files and generates proper annotation

files for localizers."""
import json

import numpy as np


def load_json(file):
    with open(file) as json_file:
        data = json.load(json_file)
        return data


data_file = '../../../data/ActivityNet'
info_file = f'{data_file}/video_info_new.csv'
ann_file = f'{data_file}/anet_anno_action.json'

anno_database = load_json(ann_file)

video_record = np.loadtxt(info_file, dtype=str, delimiter=',', skiprows=1)

video_dict_train = {}
video_dict_val = {}
video_dict_test = {}
video_dict_full = {}

for _, video_item in enumerate(video_record):
    video_name = video_item[0]
    video_info = anno_database[video_name]
    video_subset = video_item[5]
    video_info['fps'] = video_item[3].astype(np.float64)
    video_info['rfps'] = video_item[4].astype(np.float64)
    video_dict_full[video_name] = video_info
    if video_subset == 'training':
        video_dict_train[video_name] = video_info
    elif video_subset == 'testing':
        video_dict_test[video_name] = video_info
    elif video_subset == 'validation':
        video_dict_val[video_name] = video_info

print(f'full subset video numbers: {len(video_record)}')

with open(f'{data_file}/anet_anno_train.json', 'w') as result_file:
    json.dump(video_dict_train, result_file)

with open(f'{data_file}/anet_anno_val.json', 'w') as result_file:
    json.dump(video_dict_val, result_file)

with open(f'{data_file}/anet_anno_test.json', 'w') as result_file:
    json.dump(video_dict_test, result_file)

with open(f'{data_file}/anet_anno_full.json', 'w') as result_file:
    json.dump(video_dict_full, result_file)