kumapo commited on
Commit
707299a
·
1 Parent(s): 4947e5a

Upload stair_captions_dataset_script.py

Browse files
Files changed (1) hide show
  1. stair_captions_dataset_script.py +180 -0
stair_captions_dataset_script.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # copied from https://huggingface.co/datasets/ydshieh/coco_dataset_script/blob/main/coco_dataset_script.py
2
+ import json
3
+ import os
4
+ import datasets
5
+
6
+
7
+ class COCOBuilderConfig(datasets.BuilderConfig):
8
+
9
+ def __init__(self, name, splits, **kwargs):
10
+ super().__init__(name, **kwargs)
11
+ self.splits = splits
12
+
13
+
14
+ # Add BibTeX citation
15
+ # Find for instance the citation on arxiv or on the dataset repo/website
16
+ _CITATION = """\
17
+ @InProceedings{Yoshikawa2017,
18
+ title = {STAIR Captions: Constructing a Large-Scale Japanese Image Caption Dataset},
19
+ booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)},
20
+ month = {July},
21
+ year = {2017},
22
+ address = {Vancouver, Canada},
23
+ publisher = {Association for Computational Linguistics},
24
+ pages = {417--421},
25
+ url = {http://www.aclweb.org/anthology/P17-2066}
26
+ }
27
+ """
28
+
29
+ # Add description of the dataset here
30
+ # You can copy an official description
31
+ _DESCRIPTION = """\
32
+ COCO is a large-scale object detection, segmentation, and captioning dataset.
33
+ """
34
+
35
+ # Add a link to an official homepage for the dataset here
36
+ _HOMEPAGE = "http://cocodataset.org/#home"
37
+
38
+ # Add the licence for the dataset here if you can find it
39
+ _LICENSE = ""
40
+
41
+ # Add link to the official dataset URLs here
42
+ # The HuggingFace dataset library don't host the datasets but only point to the original files
43
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
44
+
45
+ # This script is supposed to work with local (downloaded) COCO dataset.
46
+ _URLs = {}
47
+
48
+
49
+ # Name of the dataset usually match the script name with CamelCase instead of snake_case
50
+ class COCODataset(datasets.GeneratorBasedBuilder):
51
+ """An example dataset script to work with the local (downloaded) COCO dataset"""
52
+
53
+ VERSION = datasets.Version("0.0.0")
54
+
55
+ BUILDER_CONFIG_CLASS = COCOBuilderConfig
56
+ BUILDER_CONFIGS = [
57
+ COCOBuilderConfig(name='2014', splits=['train', 'valid']),
58
+ ]
59
+ DEFAULT_CONFIG_NAME = "2014"
60
+
61
+ def _info(self):
62
+ # This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
63
+
64
+ feature_dict = {
65
+ "image_id": datasets.Value("int64"),
66
+ "caption_id": datasets.Value("int64"),
67
+ "caption": datasets.Value("string"),
68
+ "height": datasets.Value("int64"),
69
+ "width": datasets.Value("int64"),
70
+ "file_name": datasets.Value("string"),
71
+ "coco_url": datasets.Value("string"),
72
+ "image_path": datasets.Value("string"),
73
+ }
74
+
75
+ features = datasets.Features(feature_dict)
76
+
77
+ return datasets.DatasetInfo(
78
+ # This is the description that will appear on the datasets page.
79
+ description=_DESCRIPTION,
80
+ # This defines the different columns of the dataset and their types
81
+ features=features, # Here we define them above because they are different between the two configurations
82
+ # If there's a common (input, target) tuple from the features,
83
+ # specify them here. They'll be used if as_supervised=True in
84
+ # builder.as_dataset.
85
+ supervised_keys=None,
86
+ # Homepage of the dataset for documentation
87
+ homepage=_HOMEPAGE,
88
+ # License for the dataset if available
89
+ license=_LICENSE,
90
+ # Citation for the dataset
91
+ citation=_CITATION,
92
+ )
93
+
94
+ def _split_generators(self, dl_manager):
95
+ """Returns SplitGenerators."""
96
+ # This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
97
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
98
+
99
+ data_dir = self.config.data_dir
100
+ if not data_dir:
101
+ raise ValueError(
102
+ "This script is supposed to work with local (downloaded) COCO dataset. The argument `data_dir` in `load_dataset()` is required."
103
+ )
104
+
105
+ splits = []
106
+ for split in self.config.splits:
107
+ if split == 'train':
108
+ dataset = datasets.SplitGenerator(
109
+ name=datasets.Split.TRAIN,
110
+ # These kwargs will be passed to _generate_examples
111
+ gen_kwargs={
112
+ "json_path": os.path.join(data_dir, "stair_captions_v1.2", "stair_captions_v1.2_train.json"),
113
+ "image_dir": os.path.join(data_dir, "train2014"),
114
+ "split": "train",
115
+ }
116
+ )
117
+ elif split in ['val', 'valid', 'validation', 'dev']:
118
+ dataset = datasets.SplitGenerator(
119
+ name=datasets.Split.VALIDATION,
120
+ # These kwargs will be passed to _generate_examples
121
+ gen_kwargs={
122
+ "json_path": os.path.join(data_dir, "stair_captions_v1.2", "stair_captions_v1.2_val.json"),
123
+ "image_dir": os.path.join(data_dir, "val2014"),
124
+ "split": "valid",
125
+ },
126
+ )
127
+ else:
128
+ continue
129
+
130
+ splits.append(dataset)
131
+
132
+ return splits
133
+
134
+ def _generate_examples(
135
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
136
+ self, json_path, image_dir, split
137
+ ):
138
+ """ Yields examples as (key, example) tuples. """
139
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
140
+ # The `key` is here for legacy reason (tfds) and is not important in itself.
141
+
142
+ _features = ["image_id", "caption_id", "caption", "height", "width", "file_name", "coco_url", "image_path", "id"]
143
+ features = list(_features)
144
+
145
+ if split in "valid":
146
+ split = "val"
147
+
148
+ with open(json_path, 'r', encoding='UTF-8') as fp:
149
+ data = json.load(fp)
150
+
151
+ # list of dict
152
+ images = data["images"]
153
+ entries = images
154
+
155
+ # build a dict of image_id -> image info dict
156
+ d = {image["id"]: image for image in images}
157
+
158
+ # list of dict
159
+ if split in ["train", "val"]:
160
+ annotations = data["annotations"]
161
+
162
+ # build a dict of image_id ->
163
+ for annotation in annotations:
164
+ _id = annotation["id"]
165
+ image_info = d[annotation["image_id"]]
166
+ annotation.update(image_info)
167
+ annotation["id"] = _id
168
+
169
+ entries = annotations
170
+
171
+ for id_, entry in enumerate(entries):
172
+
173
+ entry = {k: v for k, v in entry.items() if k in features}
174
+
175
+ entry["caption_id"] = entry.pop("id")
176
+ entry["image_path"] = os.path.join(image_dir, entry["file_name"])
177
+
178
+ entry = {k: entry[k] for k in _features if k in entry}
179
+
180
+ yield str((entry["image_id"], entry["caption_id"])), entry