File size: 2,186 Bytes
b1c4fa7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import json
import os
from tqdm import tqdm

from datasets import Dataset, load_dataset, Image


def load_jsonl(file_path):
    """
    Loads a JSONL file and returns a list of Python dictionaries.
    Each dictionary represents a JSON object from a line in the file.
    """
    data = []
    with open(file_path, 'r', encoding='utf-8') as f:
        for line in f:
            try:
                # Parse each line as a JSON object
                json_object = json.loads(line.strip())
                data.append(json_object)
            except json.JSONDecodeError as e:
                print(f"Error decoding JSON on line: {line.strip()} - {e}")
    return data


def main():
    dset = "val"
    workdir = "./coco"

    # Load Annotions from Official Coco
    with open(
        os.path.join(workdir, "annotations", f"captions_{dset}2017.json"),
        "r",
        encoding="utf-8"
    ) as reader:
        data = json.load(reader)

    # Format dict of image elements
    images = {}
    for item in tqdm(data["images"]):
        _idx = item["id"]
        images[_idx] = {
            "file_name": item["file_name"],
            "height": item["height"],
            "width": item["width"],
            "id": _idx,
            "image": os.path.join(workdir, f"{dset}2017", item["file_name"]),
            "captions": [],
            "narratives": []
        }

    # Assign official annotations
    for item in tqdm(data["annotations"]):
        _idx = item["image_id"]
        images[_idx]["captions"].append(item["caption"])

    # Load Narratives
    data_narr = load_jsonl(os.path.join(workdir, "localized_narratives", f"coco_{dset}_captions.jsonl"))
    for item in tqdm(data_narr):
        _idx = int(item["image_id"])
        images[_idx]["narratives"].append(item["caption"])

    def gen():
        for k, v in images.items():
            yield v

    ds = Dataset.from_generator(gen)
    ds = ds.cast_column("image", Image())
    ds.save_to_disk(f"coco/datasets/data/{dset}", max_shard_size="400MB")
    return


def test_coco():
    ds = load_dataset("./coco/datasets", split="val")
    print(ds.info)
    

if __name__ == "__main__":
    # main()
    test_coco()