File size: 2,341 Bytes
625a17f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import json
import pickle
import os
from tqdm import tqdm

def build_referring_dataset(instance_path, refs_path, split, save_path):
    assert os.path.exists(instance_path), f'Path not found: {instance_path}'
    assert os.path.exists(refs_path), f'Path not found: {refs_path}'

    with open(instance_path) as f:
        instance = json.load(f)
    with open(refs_path, 'rb') as f:
        refs = pickle.load(f)

    images = instance['images']
    annotations = instance['annotations']

    img_id2info = {}
    for image in images:
        img_id2info[image['id']] = image
    anno_id2info = {}
    for annotation in annotations:
        anno_id2info[annotation['id']] = annotation

    outputs = []
    new_img_id = 0
    for sample in tqdm(refs):
        if sample['split'] != split:
            continue
        sample_annotation = anno_id2info[sample['ann_id']]
        sample_image = img_id2info[sample['image_id']]
        outputs.append(
            {
                'image': sample_image['file_name'],
                'image_info': sample_image,
                'instruction': sample['sentences'],
                'anns': [sample_annotation],
                'new_img_id': new_img_id,
            }
        )
        new_img_id += 1

    with open(save_path, 'w') as f:
        json.dump(outputs, f)
    print(f'Saving at {save_path}. Total sample: {len(outputs)}.')

    

if __name__ == '__main__':
    # Change root path to your own directory
    root_path = 'datasets/refer_seg'
    datasets = ['refcoco', 'refcoco+', 'refcocog']
    splits = ['train', 'val', 'testA', 'testB']
    for dataset in datasets:
        if dataset == 'refcocog':
            splits = ['train', 'val', 'test']

        for split in splits:
            instance_path = os.path.join(root_path, f'{dataset}', 'instances.json')
            if dataset == 'refcocog':
                refs_name = 'refs(umd).p'
            else:
                refs_name = 'refs(unc).p'
            refs_path = os.path.join(root_path, f'{dataset}', refs_name)
            save_path = os.path.join(root_path, f'{dataset}', f'{split}_psalm.json')
            print(f'Processing {dataset}: {split}...')

            build_referring_dataset(instance_path, refs_path, split, save_path)

    print(f'Done')