File size: 1,587 Bytes
d670799
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
# Copyright (c) OpenMMLab. All rights reserved.
import json
import multiprocessing
import os

import decord

with open('HACS_v1.1.1/HACS_segments_v1.1.1.json') as f:
    all_annotations = json.load(f)['database']


def parse_anno(key):
    anno = {}
    anno['duration_second'] = float(all_annotations[key]['duration'])
    anno['annotations'] = all_annotations[key]['annotations']
    anno['subset'] = all_annotations[key]['subset']

    labels = set([i['label'] for i in anno['annotations']])
    num_frames = int(anno['duration_second'] * 30)
    for label in labels:
        path = f'data/{label}/v_{key}.mp4'
        if os.path.isfile(path):
            vr = decord.VideoReader(path)
            num_frames = len(vr)
            break

    anno['feature_frame'] = anno['duration_frame'] = num_frames
    anno['key'] = f'v_{key}'
    return anno


pool = multiprocessing.Pool(16)
video_list = list(all_annotations)
outputs = pool.map(parse_anno, video_list)

train_anno = {}
val_anno = {}
test_anno = {}

for anno in outputs:
    key = anno.pop('key')
    subset = anno.pop('subset')
    if subset == 'training':
        train_anno[key] = anno
    elif subset == 'validation':
        val_anno[key] = anno
    else:
        test_anno[key] = anno

outdir = '../../../data/HACS'
with open(f'{outdir}/hacs_anno_train.json', 'w') as f:
    json.dump(train_anno, f)

with open(f'{outdir}/hacs_anno_val.json', 'w') as f:
    json.dump(val_anno, f)

with open(f'{outdir}/hacs_anno_test.json', 'w') as f:
    json.dump(test_anno, f)