thomasw21 commited on
Commit
1d83a66
·
1 Parent(s): cb26dfd

PMD dataset

Browse files
Files changed (1) hide show
  1. pmd.py +759 -0
pmd.py ADDED
@@ -0,0 +1,759 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Wikipedia-based Image Text (WIT) Dataset is a large multimodal multilingual dataset"""
16
+ import json
17
+ import re
18
+ import sqlite3
19
+ from abc import abstractmethod
20
+ from hashlib import md5
21
+ from pathlib import Path
22
+ from typing import Any, Dict, List, Optional
23
+ from urllib.parse import unquote_plus
24
+
25
+ # TODO: @thomasw21
26
+ import datasets
27
+ from datasets import load_dataset
28
+ from langdetect import detect
29
+
30
+ _CITATION = """"""
31
+
32
+ # TODO: @thomasw21
33
+ _DESCRIPTION = """"""
34
+
35
+ # TODO: @thomasw21
36
+ _HOMEPAGE = ""
37
+
38
+ # TODO: @thomasw21
39
+ _LICENSE = ""
40
+
41
+ _FEATURES = datasets.Features(
42
+ {
43
+ # Some images provide an url others provide an Image. Both are exclusive.
44
+ "image_url": datasets.Value("string"),
45
+ "image": datasets.Image(),
46
+ # An image can have multiple text associated with the same value. For example COCO.
47
+ "texts": [datasets.Value("string")],
48
+ # Define where the sample comes from, this is necessary when we start to use aggregated versions like PMD.
49
+ "source": datasets.Value("string"),
50
+ # We commit any kind of additional information in json format in `meta`
51
+ "meta": datasets.Value("string"),
52
+ }
53
+ )
54
+
55
+
56
+ class BaseLoader:
57
+ def __init__(
58
+ self,
59
+ source: str,
60
+ split: str,
61
+ ):
62
+ self.source = source
63
+ self.split = split
64
+
65
+ @abstractmethod
66
+ def _generate_examples(self):
67
+ raise NotImplementedError()
68
+
69
+
70
+ class DatasetsLoader(BaseLoader):
71
+ """Helper as some datasets are already implemented"""
72
+
73
+ def __init__(
74
+ self,
75
+ dataset_name: str,
76
+ config_name: Optional[str],
77
+ split: str,
78
+ batch_size: int = 1000,
79
+ ):
80
+ super(DatasetsLoader, self).__init__(source=dataset_name, split=split)
81
+ self.dataset_name = dataset_name
82
+ self.config_name = config_name
83
+ self.batch_size = batch_size
84
+
85
+ @abstractmethod
86
+ def cast_to_pmd_features(self, row: Dict) -> List[Dict[str, Any]]:
87
+ """Return list of caster rows. Casted row are either PMD features"""
88
+ raise NotImplementedError()
89
+
90
+ def convert_batch_to_list_of_rows(self, batch: Dict) -> List[Dict[str, Any]]:
91
+ # batch_size can be different to self.batch_size, ie due to last batch
92
+ batch_size = len(next(iter(batch.values())))
93
+ column_names = list(batch.keys())
94
+ return [
95
+ {column_name: batch[column_name][i] for column_name in column_names}
96
+ for i in range(batch_size)
97
+ ]
98
+
99
+ def _generate_examples(self):
100
+ dataset = load_dataset(self.dataset_name, self.config_name, split=self.split)
101
+ dataset_size = len(dataset)
102
+ # load batches and yield individual rows
103
+ for batch_start in range(0, dataset_size, self.batch_size):
104
+ batch_end = min(batch_start + self.batch_size, dataset_size)
105
+ batch = dataset[batch_start:batch_end]
106
+ rows = self.convert_batch_to_list_of_rows(batch)
107
+ for row in rows:
108
+ rows_casted_pmd_features = self.cast_to_pmd_features(row)
109
+ for row_casted_pmd_features in rows_casted_pmd_features:
110
+ yield row_casted_pmd_features
111
+
112
+
113
+ class BaseLoaderWithDLManager(BaseLoader):
114
+ """We use dl_manager to generate `gen_kwargs` needed in order to generate examples."""
115
+
116
+ def __init__(self, dl_manager, source: str, split: str):
117
+ super(BaseLoaderWithDLManager, self).__init__(source=source, split=split)
118
+ self.gen_kwargs = self.generate_gen_kwargs(dl_manager)
119
+
120
+ @abstractmethod
121
+ def generate_gen_kwargs(self, dl_manager):
122
+ raise NotImplementedError()
123
+
124
+ @abstractmethod
125
+ def _generate_examples_with_kwargs(self, **kwargs):
126
+ raise NotImplementedError()
127
+
128
+ def _generate_examples(self):
129
+ for elt in self._generate_examples_with_kwargs(**self.gen_kwargs):
130
+ yield elt
131
+
132
+
133
+ class COCOloader(BaseLoaderWithDLManager):
134
+ # TODO @thomasw21 rely on offical coco integration as soon as it's ready.
135
+ _ANNOTATION_URL = (
136
+ "http://images.cocodataset.org/annotations/annotations_trainval2017.zip"
137
+ )
138
+ _IMAGES_URLS = {
139
+ "train": "http://images.cocodataset.org/zips/train2017.zip",
140
+ "validation": "http://images.cocodataset.org/zips/val2017.zip",
141
+ }
142
+ _SPLIT_MAP = {"train": "train2017", "validation": "val207"}
143
+
144
+ def __init__(self, dl_manager, split: str):
145
+ super(COCOloader, self).__init__(
146
+ dl_manager=dl_manager, source="coco", split=split
147
+ )
148
+
149
+ def generate_gen_kwargs(self, dl_manager):
150
+ annotation_file = (
151
+ Path(dl_manager.download_and_extract(self._ANNOTATION_URL))
152
+ / "annotations" / f"captions_{self._SPLIT_MAP[self.split]}.json"
153
+ )
154
+ image_folder = Path(
155
+ dl_manager.download_and_extract(self._IMAGES_URLS[self.split])
156
+ )
157
+ return {
158
+ "annotation_file": annotation_file,
159
+ "base_image_path": image_folder / self._SPLIT_MAP[self.split],
160
+ }
161
+
162
+ def _generate_examples_with_kwargs(
163
+ self, annotation_file: str, base_image_path: Path
164
+ ):
165
+ with open(annotation_file, "r", encoding="utf-8") as fi:
166
+ annotations = json.load(fi)
167
+
168
+ # We're going to index all the annotations according to `image_id`
169
+ annotations_per_image_id = {}
170
+ for annotation in annotations["annotations"]:
171
+ image_id = annotation["image_id"]
172
+ if image_id in annotations_per_image_id:
173
+ annotations_per_image_id[image_id].append(annotation)
174
+ else:
175
+ annotations_per_image_id[image_id] = [annotation]
176
+
177
+ for image_metadata in annotations["images"]:
178
+ image_id = image_metadata["id"]
179
+ image_path = base_image_path / f"{image_id:012}.jpg"
180
+ for annotation in annotations_per_image_id[image_id]:
181
+ yield {
182
+ "image_url": None,
183
+ "image": str(image_path.absolute()),
184
+ "texts": [annotation["caption"]],
185
+ "source": self.source,
186
+ "meta": json.dumps(
187
+ {"image_metadata": image_metadata, "annotation": annotation}
188
+ ),
189
+ }
190
+
191
+
192
+ class SBULoader(DatasetsLoader):
193
+ def __init__(self, split: str, batch_size: int = 1000):
194
+ super(SBULoader, self).__init__(
195
+ dataset_name="sbu_captions",
196
+ config_name=None,
197
+ split=split,
198
+ batch_size=batch_size,
199
+ )
200
+
201
+ def cast_to_pmd_features(self, row: Dict) -> List[Dict[str, Any]]:
202
+ meta = {k: v for k, v in row.items() if k not in ["image_url", "caption"]}
203
+ return [
204
+ {
205
+ "image_url": row["image_url"],
206
+ "image": None,
207
+ "texts": [row["caption"]],
208
+ "source": self.source,
209
+ "meta": json.dumps(meta),
210
+ }
211
+ ]
212
+
213
+
214
+ class LocalizedNarrativesOpenImagesLoader(BaseLoaderWithDLManager):
215
+ _ANNOTATION_URLs = {
216
+ "train": "https://storage.googleapis.com/localized-narratives/annotations/open_images_train_v6_captions.jsonl",
217
+ "validation": (
218
+ "https://storage.googleapis.com/localized-narratives/annotations/open_images_validation_captions.jsonl"
219
+ ),
220
+ "test": "https://storage.googleapis.com/localized-narratives/annotations/open_images_test_captions.jsonl",
221
+ }
222
+
223
+ def __init__(self, dl_manager, split: str):
224
+ super(LocalizedNarrativesOpenImagesLoader, self).__init__(
225
+ dl_manager=dl_manager, source="localized_narratives__coco", split=split
226
+ )
227
+
228
+ def generate_gen_kwargs(self, dl_manager):
229
+ annotation_file = dl_manager.download(self._ANNOTATION_URLs[self.split])
230
+ return {"annotation_file": annotation_file, "split": self.split}
231
+
232
+ def _generate_examples_with_kwargs(self, annotation_file: str, split: str):
233
+ with open(annotation_file, "r", encoding="utf-8") as fi:
234
+ for line in fi:
235
+ annotation = json.loads(line)
236
+ assert "image_url" not in annotation
237
+ yield {
238
+ "image_url": f"https://s3.amazonaws.com/open-images-dataset/{split}/{annotation['image_id']}.jpg",
239
+ "image": None,
240
+ "texts": [annotation["caption"]],
241
+ "source": self.source,
242
+ "meta": json.dumps(annotation),
243
+ }
244
+
245
+
246
+ class LocalizedNarrativesCOCOLoader(BaseLoaderWithDLManager):
247
+ # TODO @thomasw21 rely on offical coco integration as soon as it's ready.
248
+ _ANNOTATION_URLs = {
249
+ "train": "https://storage.googleapis.com/localized-narratives/annotations/coco_train_captions.jsonl",
250
+ "validation": "https://storage.googleapis.com/localized-narratives/annotations/coco_val_captions.jsonl",
251
+ }
252
+ _IMAGES_URLS = {
253
+ "train": "http://images.cocodataset.org/zips/train2017.zip",
254
+ "validation": "http://images.cocodataset.org/zips/val2017.zip",
255
+ }
256
+ _SPLIT_MAP = {"train": "train2017", "validation": "val207"}
257
+
258
+ def __init__(self, dl_manager, split: str):
259
+ super(LocalizedNarrativesCOCOLoader, self).__init__(
260
+ dl_manager=dl_manager, source="localized_narratives__coco", split=split
261
+ )
262
+
263
+ def generate_gen_kwargs(self, dl_manager):
264
+ annotation_file = dl_manager.download(self._ANNOTATION_URLs[self.split])
265
+ image_folder = Path(
266
+ dl_manager.download_and_extract(self._IMAGES_URLS[self.split])
267
+ )
268
+ return {
269
+ "annotation_file": annotation_file,
270
+ "base_image_path": image_folder / self._SPLIT_MAP[self.split],
271
+ }
272
+
273
+ def _generate_examples_with_kwargs(
274
+ self, annotation_file: str, base_image_path: Path
275
+ ):
276
+ with open(annotation_file, "r", encoding="utf-8") as fi:
277
+ for line in fi:
278
+ annotation = json.loads(line)
279
+ assert "image_url" not in annotation
280
+ image_path = base_image_path / f"{annotation['image_id'].zfill(12)}.jpg"
281
+ yield {
282
+ "image_url": None,
283
+ "image": str(image_path.absolute()),
284
+ "texts": [annotation["caption"]],
285
+ "source": self.source,
286
+ "meta": json.dumps(annotation),
287
+ }
288
+
289
+
290
+ class LocalizedNarrativesFlickr30kLoader(BaseLoaderWithDLManager):
291
+ _LOCAL_IMAGE_FOLDER_NAME = "flickr30k-images"
292
+ _ANNOTATION_URLs = {
293
+ "train": "https://storage.googleapis.com/localized-narratives/annotations/flickr30k_train_captions.jsonl",
294
+ "validation": "https://storage.googleapis.com/localized-narratives/annotations/flickr30k_val_captions.jsonl",
295
+ "test": "https://storage.googleapis.com/localized-narratives/annotations/flickr30k_test_captions.jsonl",
296
+ }
297
+
298
+ def __init__(self, dl_manager, split: str):
299
+ super(LocalizedNarrativesFlickr30kLoader, self).__init__(
300
+ dl_manager=dl_manager, source="localized_narratives__flickr30k", split=split
301
+ )
302
+
303
+ def generate_gen_kwargs(self, dl_manager):
304
+ if dl_manager.manual_dir is None:
305
+ raise FileNotFoundError(
306
+ f"Please set manual dir via `datasets.load_dataset('pmd', data_dir={{PATH}})` where `{{PATH}}/flickr30k` includes `{self._LOCAL_IMAGE_FOLDER_NAME}`.\n. Manual download instructions: {self.manual_download_instruction}"
307
+ )
308
+
309
+ manual_dir = Path(dl_manager.manual_dir) / "flickr30k"
310
+ if not manual_dir.exists():
311
+ raise FileNotFoundError(
312
+ f"Please set manual dir via `datasets.load_dataset('pmd', data_dir={{PATH}})` where `{{PATH}}/flickr30k` includes `{self._LOCAL_IMAGE_FOLDER_NAME}`.\n. Manual download instructions: {self.manual_download_instruction}"
313
+ )
314
+
315
+ annotation_file = dl_manager.download(self._ANNOTATION_URLs[self.split])
316
+
317
+ return {"annotation_file": annotation_file, "base_image_path": manual_dir}
318
+
319
+ @property
320
+ def manual_download_instruction(self):
321
+ return """\
322
+ You need to go to http://shannon.cs.illinois.edu/DenotationGraph/data/index.html,
323
+ and manually download the dataset ("Flickr 30k images."). Once it is completed,
324
+ a file named `flickr30k-images.tar.gz` will appear in your Downloads folder
325
+ or whichever folder your browser chooses to save files to. You then have
326
+ to unzip the file and move `flickr30k-images` under <path/to/folder>/flickr30k.
327
+ The <path/to/folder> can e.g. be "~/manual_data".
328
+ dataset can then be loaded using the following command `datasets.load_dataset("pmd", data_dir="<path/to/folder>")`.
329
+ """
330
+
331
+ def _generate_examples_with_kwargs(
332
+ self, annotation_file: str, base_image_path: Path
333
+ ):
334
+ with open(annotation_file, "r", encoding="utf-8") as fi:
335
+ for line in fi:
336
+ annotation = json.loads(line)
337
+ assert "image" not in annotation
338
+ image_path = base_image_path / f"{annotation['image_id']}.jpg"
339
+ yield {
340
+ "image_url": None,
341
+ "image": str(image_path.absolute()),
342
+ "texts": [annotation["caption"]],
343
+ "source": self.source,
344
+ "meta": json.dumps(annotation),
345
+ }
346
+
347
+
348
+ class LocalizedNarrativesADE20kLoader(BaseLoaderWithDLManager):
349
+ _ANNOTATION_URLs = {
350
+ "train": "https://storage.googleapis.com/localized-narratives/annotations/ade20k_train_captions.jsonl",
351
+ "validation": "https://storage.googleapis.com/localized-narratives/annotations/ade20k_validation_captions.jsonl",
352
+ }
353
+ _IMAGES_URL = (
354
+ "http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip"
355
+ )
356
+ _SPLIT_MAP = {"train": "training", "validation": "validation"}
357
+
358
+ def __init__(self, dl_manager, split: str):
359
+ super(LocalizedNarrativesADE20kLoader, self).__init__(
360
+ dl_manager=dl_manager, source="localized_narratives__ADE20k", split=split
361
+ )
362
+
363
+ def generate_gen_kwargs(self, dl_manager):
364
+ annotation_file = dl_manager.download(self._ANNOTATION_URLs[self.split])
365
+ image_base_dir = (
366
+ Path(dl_manager.download_and_extract(self._IMAGES_URL))
367
+ / "ADEChallengeData2016"
368
+ / "images"
369
+ )
370
+
371
+ return {
372
+ "annotation_file": annotation_file,
373
+ "base_image_path": image_base_dir / self._SPLIT_MAP[self.split],
374
+ }
375
+
376
+ def _generate_examples_with_kwargs(
377
+ self, annotation_file: str, base_image_path: Path
378
+ ):
379
+ with open(annotation_file, "r", encoding="utf-8") as fi:
380
+ for line in fi:
381
+ annotation = json.loads(line)
382
+ assert "image" not in annotation
383
+ image_path = base_image_path / f"{annotation['image_id']}.jpg"
384
+ yield {
385
+ "image_url": None,
386
+ "image": str(image_path.absolute()),
387
+ "texts": [annotation["caption"]],
388
+ "source": self.source,
389
+ "meta": json.dumps(annotation),
390
+ }
391
+
392
+
393
+ class VisualGenomeLoader(DatasetsLoader):
394
+ def __init__(self, split: str, batch_size: int = 1000):
395
+ super(VisualGenomeLoader, self).__init__(
396
+ dataset_name="visual_genome",
397
+ config_name="region_descriptions_v1.2.0",
398
+ split=split,
399
+ batch_size=batch_size,
400
+ )
401
+
402
+ def cast_to_pmd_features(self, row: Dict) -> List[Dict[str, Any]]:
403
+ meta = {k: v for k, v in row.items() if k not in ["image", "regions"]}
404
+ rows = [
405
+ {
406
+ "image_url": None,
407
+ # TODO @thomasw21 I believe this is slow as hell
408
+ "image": row["image"].crop(
409
+ (
410
+ region["x"],
411
+ region["y"],
412
+ region["x"] + region["width"],
413
+ region["y"] + region["height"],
414
+ )
415
+ ),
416
+ "texts": [region["phrase"]],
417
+ "source": self.source,
418
+ "meta": json.dumps(meta),
419
+ }
420
+ for region in row["regions"]
421
+ ]
422
+ return rows
423
+
424
+
425
+ class WITLoader(DatasetsLoader):
426
+ def __init__(self, split: str, batch_size: int = 1000):
427
+ super(WITLoader, self).__init__(
428
+ dataset_name="google/wit",
429
+ config_name=None,
430
+ split=split,
431
+ batch_size=batch_size,
432
+ )
433
+
434
+ def cast_to_pmd_features(self, row: Dict) -> List[Dict[str, Any]]:
435
+ meta = {k: v for k, v in row.items() if k not in ["image_url"]}
436
+ return [
437
+ {
438
+ "image_url": row["image_url"],
439
+ "image": None,
440
+ "texts": [
441
+ row[caption_name]
442
+ # TODO @thomasw21 figure out which one we should choose
443
+ for caption_name in [
444
+ "caption_reference_description",
445
+ "context_section_description",
446
+ "caption_attribution_description",
447
+ ]
448
+ ],
449
+ "source": self.source,
450
+ "meta": json.dumps(meta),
451
+ }
452
+ ]
453
+
454
+
455
+ class ConceptualCaptions(DatasetsLoader):
456
+ def __init__(self, split: str, batch_size: int = 1000):
457
+ super(ConceptualCaptions, self).__init__(
458
+ dataset_name="conceptual_captions",
459
+ config_name="unlabeled",
460
+ split=split,
461
+ batch_size=batch_size,
462
+ )
463
+
464
+ def cast_to_pmd_features(self, row: Dict) -> List[Dict[str, Any]]:
465
+ meta = {k: v for k, v in row.items() if k not in ["image_url", "caption"]}
466
+ return [
467
+ {
468
+ "image_url": row["image_url"],
469
+ "image": None,
470
+ "texts": [row["caption"]],
471
+ "source": self.source,
472
+ "meta": json.dumps(meta),
473
+ }
474
+ ]
475
+
476
+
477
+ class Conceptual12MLoader(DatasetsLoader):
478
+ def __init__(self, split: str, batch_size: int = 1000):
479
+ super(Conceptual12MLoader, self).__init__(
480
+ dataset_name="conceptual_12m",
481
+ config_name=None,
482
+ split=split,
483
+ batch_size=batch_size,
484
+ )
485
+
486
+ def cast_to_pmd_features(self, row):
487
+ meta = {k: v for k, v in row if k not in ["image_url", "caption"]}
488
+ return [
489
+ {
490
+ "image_url": row["image_url"],
491
+ "image": None,
492
+ "texts": [row["caption"]],
493
+ "source": self.source,
494
+ "meta": json.dumps(meta),
495
+ }
496
+ ]
497
+
498
+
499
+ class RedCapsLoader(DatasetsLoader):
500
+ def __init__(self, split: str, batch_size: int = 1000):
501
+ super(RedCapsLoader, self).__init__(
502
+ dataset_name="red_caps",
503
+ config_name="all",
504
+ split=split,
505
+ batch_size=batch_size,
506
+ )
507
+
508
+ def cast_to_pmd_features(self, row: Dict) -> List[Dict[str, Any]]:
509
+ meta = {k: v for k, v in row.items() if k not in ["image_url", "raw_caption"]}
510
+ return [
511
+ {
512
+ "image_url": row["image_url"],
513
+ "image": None,
514
+ # TODO @thomasw21
515
+ "texts": [row["raw_caption"]],
516
+ "source": self.source,
517
+ "meta": json.dumps(meta),
518
+ }
519
+ ]
520
+
521
+
522
+ class YFCC100MLoader(BaseLoaderWithDLManager):
523
+ _ANNOTATION_URL = "https://multimedia-commons.s3-us-west-2.amazonaws.com/tools/etc/yfcc100m_dataset.sql"
524
+ # Columns we're interested in
525
+ _COLUMNS = [
526
+ "photoid",
527
+ "uid",
528
+ "title",
529
+ "description",
530
+ "usertags",
531
+ "downloadurl",
532
+ "licensename",
533
+ "licenseurl",
534
+ "marker",
535
+ ]
536
+ # Text columns that are url encoded
537
+ _TEXT_COLUMNS = ["title", "description", "usertags"]
538
+
539
+ WHITE_SPACE_REGEX = re.compile(r"\s+")
540
+
541
+ # Original YFCC100M filtering regexes
542
+ LINE_BREAK_REGEX = re.compile(r"[\n\r]")
543
+ REMOVE_HTML_TAGS_REGEX = re.compile(r"<.*?>")
544
+ # TODO @thomasw21 improve that regex
545
+ DATE_HOUR_REGEX = re.compile(r"[0-9](:|\.|-|/)[0-9][0-9](:|\.|-|/)[0-9][0-9]")
546
+ WEIRD_CHARACTERS_REGEX = re.compile(r"[_©]")
547
+ SECOND_WORD_REGEX = re.compile(r" [a-zA-Z]+")
548
+
549
+ def __init__(self, dl_manager, batch_size: int, split: str):
550
+ super(YFCC100MLoader, self).__init__(
551
+ dl_manager=dl_manager, source="yfcc100m", split=split
552
+ )
553
+ self.batch_size = batch_size
554
+
555
+ # Code from https://gitlab.com/jfolz/yfcc100m/-/blob/master/yfcc100m/convert_metadata.py
556
+ BYTE_MAP = {"%02x" % v: "%x" % v for v in range(256)}
557
+
558
+ @classmethod
559
+ def yfcc_local_path(cls, url, __bm=BYTE_MAP):
560
+ h = md5(url.encode("utf-8")).hexdigest()
561
+ hash_ = "".join(__bm[h[x : x + 2]] for x in range(0, 32, 2))
562
+ return f"data/images/{hash_[0:3]}/{hash_[3:6]}/{hash_}.jpg"
563
+
564
+ @classmethod
565
+ def generate_image_url(cls, downloadurl: str):
566
+ """Takes original image url, and download verion store in `multimedia-commons`"""
567
+ # compute yfcc hash
568
+ local_path = cls.yfcc_local_path(downloadurl)
569
+ return f"https://multimedia-commons.s3-us-west-2.amazonaws.com/{local_path}"
570
+
571
+ def generate_gen_kwargs(self, dl_manager):
572
+ sql_file = dl_manager.download(self._ANNOTATION_URL)
573
+ return {"sql_file": sql_file}
574
+
575
+ def filter_text(self, text: str) -> bool:
576
+ # # If less than two words return False
577
+ # # TODO @thomasw21 we probably don't need to split all the way til the end ...
578
+ # if len([substring for substring in self.WHITE_SPACE_REGEX.split(text) if substring != ""]) < 2:
579
+ # return False
580
+
581
+ if self.WEIRD_CHARACTERS_REGEX.search(text) is not None:
582
+ return False
583
+
584
+ if self.SECOND_WORD_REGEX.search(text) is None:
585
+ return False
586
+
587
+ if self.DATE_HOUR_REGEX.search(text) is not None:
588
+ return False
589
+
590
+ # filter only english
591
+ try:
592
+ if detect(text) != "en":
593
+ return False
594
+ except Exception:
595
+ return False
596
+
597
+ return True
598
+
599
+ def clean_text(self, text: str) -> str:
600
+ """Inspired from original code"""
601
+ text = self.LINE_BREAK_REGEX.sub(" ", text)
602
+ cleantext = self.REMOVE_HTML_TAGS_REGEX.sub("", text)
603
+ return cleantext
604
+
605
+ def get_associated_text(self, annotation: Dict[str, Any]) -> Optional[str]:
606
+ """
607
+ Given an annotation, return text associated to the image
608
+ We return None when the annotation should be filtered out
609
+ """
610
+ ordered_text_columns_consideration = ["description", "title"]
611
+ record_text = None
612
+ for column_name in ordered_text_columns_consideration:
613
+ text_candidate = annotation[column_name]
614
+ if column_name == "description" and not (5 < len(text_candidate) < 256):
615
+ continue
616
+ cleaned_text_candidate = self.clean_text(text_candidate)
617
+ if self.filter_text(cleaned_text_candidate):
618
+ record_text = cleaned_text_candidate
619
+ break
620
+ return record_text
621
+
622
+ def _generate_examples_with_kwargs(self, sql_file: str):
623
+ # query records command
624
+ sql_command = f"select {', '.join(self._COLUMNS)} from yfcc100m_dataset"
625
+
626
+ # Create a connection and get a cursor
627
+ with sqlite3.connect(sql_file) as connection:
628
+ cursor = connection.cursor()
629
+
630
+ # Execute the query
631
+ cursor.execute(sql_command)
632
+ # Get data in batches
633
+ while True:
634
+ # Read the data
635
+ records = cursor.fetchmany(self.batch_size)
636
+
637
+ # If we are at the end
638
+ if len(records) == 0:
639
+ break
640
+
641
+ # Format data
642
+ for record in records:
643
+ annotation = {
644
+ column_name: value
645
+ for value, column_name in zip(record, self._COLUMNS)
646
+ }
647
+
648
+ # TODO @thomasw21 if it's not an image we don't care for now
649
+ if annotation["marker"] != 0:
650
+ continue
651
+
652
+ # We compute text candidate and skip the row if None work.
653
+ text = self.get_associated_text(annotation)
654
+ if text is None:
655
+ continue
656
+
657
+ for text_column in self._TEXT_COLUMNS:
658
+ annotation[text_column] = unquote_plus(annotation[text_column])
659
+
660
+ yield {
661
+ # Add image_url that we download from s3 bucket instead of official download url
662
+ "image_url": self.generate_image_url(annotation["downloadurl"]),
663
+ "image": None,
664
+ "texts": [text],
665
+ "source": self.source,
666
+ "meta": json.dumps(annotation),
667
+ }
668
+ cursor.close()
669
+
670
+
671
+ class PMDConfig(datasets.BuilderConfig):
672
+ """BuilderConfig for PMD."""
673
+
674
+ def __init__(
675
+ self,
676
+ datasets_batch_size: int = 1000,
677
+ sqlite3_batch_size: int = 10_000,
678
+ **kwargs,
679
+ ):
680
+ super(PMDConfig, self).__init__(**kwargs)
681
+ # determines how much we can load
682
+ self.datasets_batch_size = datasets_batch_size
683
+ self.sqlite3_batch_size = sqlite3_batch_size
684
+
685
+
686
+ class PMD(datasets.GeneratorBasedBuilder):
687
+ """Builder for Open Images subset of PMD."""
688
+
689
+ BUILDER_CONFIG_CLASS = PMDConfig
690
+
691
+ def _info(self):
692
+ return datasets.DatasetInfo(
693
+ description=_DESCRIPTION,
694
+ features=_FEATURES,
695
+ homepage=_HOMEPAGE,
696
+ license=_LICENSE,
697
+ citation=_CITATION,
698
+ )
699
+
700
+ def _split_generators(self, dl_manager):
701
+ return [
702
+ datasets.SplitGenerator(
703
+ name=split_name,
704
+ gen_kwargs={
705
+ "loaders": [
706
+ COCOloader(dl_manager=dl_manager, split=split_name),
707
+ SBULoader(
708
+ split=split_name,
709
+ batch_size=self.config.datasets_batch_size,
710
+ ),
711
+ LocalizedNarrativesOpenImagesLoader(
712
+ dl_manager=dl_manager, split=split_name
713
+ ),
714
+ LocalizedNarrativesCOCOLoader(
715
+ dl_manager=dl_manager, split=split_name
716
+ ),
717
+ LocalizedNarrativesFlickr30kLoader(
718
+ dl_manager=dl_manager, split=split_name
719
+ ),
720
+ LocalizedNarrativesADE20kLoader(
721
+ dl_manager=dl_manager, split=split_name
722
+ ),
723
+ ConceptualCaptions(
724
+ split=split_name,
725
+ batch_size=self.config.datasets_batch_size,
726
+ ),
727
+ VisualGenomeLoader(
728
+ split=split_name,
729
+ batch_size=self.config.datasets_batch_size,
730
+ ),
731
+ WITLoader(
732
+ split=split_name,
733
+ batch_size=self.config.datasets_batch_size,
734
+ ),
735
+ Conceptual12MLoader(
736
+ split=split_name,
737
+ batch_size=self.config.datasets_batch_size,
738
+ ),
739
+ RedCapsLoader(
740
+ split=split_name,
741
+ batch_size=self.config.datasets_batch_size,
742
+ ),
743
+ YFCC100MLoader(
744
+ dl_manager=dl_manager,
745
+ split=split_name,
746
+ batch_size=self.config.sqlite3_batch_size,
747
+ ),
748
+ ]
749
+ },
750
+ )
751
+ for split_name in [datasets.Split.TRAIN]
752
+ ]
753
+
754
+ def _generate_examples(self, loaders: List[BaseLoader]):
755
+ idx = 0
756
+ for loader in loaders:
757
+ for elt in loader._generate_examples():
758
+ yield idx, elt
759
+ idx += 1