thomasw21 commited on
Commit
cbdd0ca
·
1 Parent(s): 8c55fa4

Run everything in parallel except sqlite3

Browse files
Files changed (1) hide show
  1. pmd.py +390 -221
pmd.py CHANGED
@@ -12,6 +12,7 @@
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
 
15
  import json
16
  import re
17
  import sqlite3
@@ -20,7 +21,7 @@ from functools import partial
20
  from hashlib import md5
21
  from multiprocessing import Pool
22
  from pathlib import Path
23
- from typing import Any, Dict, List, Optional
24
  import pyarrow as pa
25
 
26
  from datetime import datetime
@@ -65,13 +66,10 @@ def json_serializer(o):
65
 
66
 
67
  class BaseLoader:
68
- def __init__(
69
- self,
70
- source: str,
71
- split: str,
72
- ):
73
  self.source = source
74
  self.split = split
 
75
 
76
  @abstractmethod
77
  def _generate_batches(self):
@@ -87,35 +85,25 @@ class DatasetsLoader(BaseLoader):
87
  config_name: Optional[str],
88
  split: str,
89
  num_proc: int,
90
- batch_size: int = 1000,
91
  ):
92
- super(DatasetsLoader, self).__init__(source=dataset_name, split=split)
 
 
 
93
  self.dataset_name = dataset_name
94
  self.config_name = config_name
95
  self.num_proc = num_proc
96
- # # In order to not have memory explode we define a batch size per proc
97
- # self.batch_size_per_proc = batch_size // self.num_proc
98
- # assert self.batch_size_per_proc >= 1
99
-
100
- self.batch_size_per_proc = batch_size
101
 
102
  @abstractmethod
103
  def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
104
  """Return list of caster rows. Casted row are either PMD features"""
105
  raise NotImplementedError()
106
 
107
- # def convert_batch_to_list_of_rows(self, batch: Dict) -> List[Dict[str, Any]]:
108
- # # batch_size can be different to self.batch_size, ie due to last batch
109
- # batch_size = len(next(iter(batch.values())))
110
- # column_names = list(batch.keys())
111
- # return [
112
- # {column_name: batch[column_name][i] for column_name in column_names}
113
- # for i in range(batch_size)
114
- # ]
115
-
116
  def _generate_batch_table(self, batch_start: int, dset: Dataset) -> pa.Table:
117
  dataset_size = len(dset)
118
- batch_end = min(batch_start + self.batch_size_per_proc, dataset_size)
119
  batch = dset[batch_start:batch_end]
120
  output_batch = self.cast_to_pmd_features(batch)
121
  return pa.table(_FEATURES.encode_batch(output_batch))
@@ -126,7 +114,7 @@ class DatasetsLoader(BaseLoader):
126
 
127
  # load batches and yield individual rows
128
  if self.num_proc == 1:
129
- for batch_start in range(0, dataset_size, self.batch_size_per_proc):
130
  batch_casted_pmd_features = self._generate_batch_table(
131
  dset=dataset, batch_start=batch_start
132
  )
@@ -139,7 +127,7 @@ class DatasetsLoader(BaseLoader):
139
  # TODO @thomasw21 figure out if we care about imap / imap_unordered
140
  batch_iterator = pool.imap(
141
  partial(self._generate_batch_table, dset=dataset),
142
- range(0, dataset_size, self.batch_size_per_proc),
143
  )
144
  for batch_casted_pmd_features in batch_iterator:
145
  yield batch_casted_pmd_features
@@ -148,21 +136,49 @@ class DatasetsLoader(BaseLoader):
148
  class BaseLoaderWithDLManager(BaseLoader):
149
  """We use dl_manager to generate `gen_kwargs` needed in order to generate examples."""
150
 
151
- def __init__(self, dl_manager, source: str, split: str):
152
- super(BaseLoaderWithDLManager, self).__init__(source=source, split=split)
 
 
 
 
 
 
 
 
 
 
153
  self.gen_kwargs = self.generate_gen_kwargs(dl_manager)
 
 
 
154
 
155
  @abstractmethod
156
  def generate_gen_kwargs(self, dl_manager):
157
  raise NotImplementedError()
158
 
159
  @abstractmethod
160
- def _generate_examples_with_kwargs(self, **kwargs) -> pa.Table:
161
  raise NotImplementedError()
162
 
 
 
 
 
 
 
 
163
  def _generate_batches(self):
164
- for elt in self._generate_batches_with_kwargs(**self.gen_kwargs):
165
- yield elt
 
 
 
 
 
 
 
 
166
 
167
 
168
  class COCOloader(BaseLoaderWithDLManager):
@@ -176,9 +192,21 @@ class COCOloader(BaseLoaderWithDLManager):
176
  }
177
  _SPLIT_MAP = {"train": "train2017", "validation": "val207"}
178
 
179
- def __init__(self, dl_manager, split: str):
 
 
 
 
 
 
 
180
  super(COCOloader, self).__init__(
181
- dl_manager=dl_manager, source="coco", split=split
 
 
 
 
 
182
  )
183
 
184
  def generate_gen_kwargs(self, dl_manager):
@@ -195,9 +223,7 @@ class COCOloader(BaseLoaderWithDLManager):
195
  "base_image_path": image_folder / self._SPLIT_MAP[self.split],
196
  }
197
 
198
- def _generate_batches_with_kwargs(
199
- self, annotation_file: str, base_image_path: Path
200
- ) -> pa.Table:
201
  with open(annotation_file, "r", encoding="utf-8") as fi:
202
  annotations = json.load(fi)
203
 
@@ -210,36 +236,41 @@ class COCOloader(BaseLoaderWithDLManager):
210
  else:
211
  annotations_per_image_id[image_id] = [annotation]
212
 
 
 
213
  for image_metadata in annotations["images"]:
214
  image_id = image_metadata["id"]
215
- image_path = base_image_path / f"{image_id:012}.jpg"
216
  for annotation in annotations_per_image_id[image_id]:
217
- yield pa.table(
218
- {
219
- "image_url": None,
220
- "image": str(image_path.absolute()),
221
- "texts": [annotation["caption"]],
222
- "source": self.source,
223
- "meta": json.dumps(
224
- {
225
- "image_metadata": image_metadata,
226
- "annotation": annotation,
227
- },
228
- default=json_serializer,
229
- indent=2,
230
- ),
231
- }
232
- )
 
 
 
 
233
 
234
 
235
  class SBULoader(DatasetsLoader):
236
- def __init__(self, split: str, batch_size: int = 1000):
237
  super(SBULoader, self).__init__(
238
  dataset_name="sbu_captions",
239
  config_name=None,
240
  split=split,
241
- batch_size=batch_size,
242
- num_proc=1,
243
  )
244
 
245
  def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
@@ -270,31 +301,56 @@ class LocalizedNarrativesOpenImagesLoader(BaseLoaderWithDLManager):
270
  "test": "https://storage.googleapis.com/localized-narratives/annotations/open_images_test_captions.jsonl",
271
  }
272
 
273
- def __init__(self, dl_manager, split: str):
 
 
 
 
 
 
 
274
  super(LocalizedNarrativesOpenImagesLoader, self).__init__(
275
- dl_manager=dl_manager, source="localized_narratives__coco", split=split
 
 
 
 
 
276
  )
277
 
278
  def generate_gen_kwargs(self, dl_manager):
279
  annotation_file = dl_manager.download(self._ANNOTATION_URLs[self.split])
280
  return {"annotation_file": annotation_file, "split": self.split}
281
 
282
- def _generate_batches_with_kwargs(self, annotation_file: str, split: str):
283
  with open(annotation_file, "r", encoding="utf-8") as fi:
284
- for line in fi:
285
- annotation = json.loads(line)
286
- assert "image_url" not in annotation
287
- yield {
288
- "image_url": f"https://s3.amazonaws.com/open-images-dataset/{split}/{annotation['image_id']}.jpg",
289
- "image": None,
290
- "texts": [annotation["caption"]],
291
- "source": self.source,
292
- "meta": json.dumps(
293
- annotation,
294
- default=json_serializer,
295
- indent=2,
296
- ),
297
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
298
 
299
 
300
  class LocalizedNarrativesCOCOLoader(BaseLoaderWithDLManager):
@@ -309,9 +365,21 @@ class LocalizedNarrativesCOCOLoader(BaseLoaderWithDLManager):
309
  }
310
  _SPLIT_MAP = {"train": "train2017", "validation": "val207"}
311
 
312
- def __init__(self, dl_manager, split: str):
 
 
 
 
 
 
 
313
  super(LocalizedNarrativesCOCOLoader, self).__init__(
314
- dl_manager=dl_manager, source="localized_narratives__coco", split=split
 
 
 
 
 
315
  )
316
 
317
  def generate_gen_kwargs(self, dl_manager):
@@ -324,26 +392,31 @@ class LocalizedNarrativesCOCOLoader(BaseLoaderWithDLManager):
324
  "base_image_path": image_folder / self._SPLIT_MAP[self.split],
325
  }
326
 
327
- def _generate_batches_with_kwargs(
328
- self, annotation_file: str, base_image_path: Path
329
- ):
330
  with open(annotation_file, "r", encoding="utf-8") as fi:
331
- for line in fi:
332
- annotation = json.loads(line)
333
- assert "image_url" not in annotation
334
- image_path = base_image_path / f"{annotation['image_id'].zfill(12)}.jpg"
335
- yield {
336
- "image_url": None,
337
- "image": str(image_path.absolute()),
338
- "texts": [annotation["caption"]],
339
- "source": self.source,
340
- "meta": json.dumps(
341
- annotation,
342
- default=json_serializer,
343
- indent=2,
344
- ),
345
- }
346
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
347
 
348
  class LocalizedNarrativesFlickr30kLoader(BaseLoaderWithDLManager):
349
  _LOCAL_IMAGE_FOLDER_NAME = "flickr30k-images"
@@ -353,9 +426,21 @@ class LocalizedNarrativesFlickr30kLoader(BaseLoaderWithDLManager):
353
  "test": "https://storage.googleapis.com/localized-narratives/annotations/flickr30k_test_captions.jsonl",
354
  }
355
 
356
- def __init__(self, dl_manager, split: str):
 
 
 
 
 
 
 
357
  super(LocalizedNarrativesFlickr30kLoader, self).__init__(
358
- dl_manager=dl_manager, source="localized_narratives__flickr30k", split=split
 
 
 
 
 
359
  )
360
 
361
  def generate_gen_kwargs(self, dl_manager):
@@ -386,25 +471,33 @@ class LocalizedNarrativesFlickr30kLoader(BaseLoaderWithDLManager):
386
  dataset can then be loaded using the following command `datasets.load_dataset("pmd", data_dir="<path/to/folder>")`.
387
  """
388
 
389
- def _generate_batches_with_kwargs(
390
- self, annotation_file: str, base_image_path: Path
391
- ):
392
  with open(annotation_file, "r", encoding="utf-8") as fi:
393
- for line in fi:
394
- annotation = json.loads(line)
395
- assert "image" not in annotation
396
- image_path = base_image_path / f"{annotation['image_id']}.jpg"
397
- yield {
398
- "image_url": None,
399
- "image": str(image_path.absolute()),
400
- "texts": [annotation["caption"]],
401
- "source": self.source,
402
- "meta": json.dumps(
403
- annotation,
404
- default=json_serializer,
405
- indent=2,
406
- ),
407
- }
 
 
 
 
 
 
 
 
 
 
408
 
409
 
410
  class LocalizedNarrativesADE20kLoader(BaseLoaderWithDLManager):
@@ -417,9 +510,21 @@ class LocalizedNarrativesADE20kLoader(BaseLoaderWithDLManager):
417
  )
418
  _SPLIT_MAP = {"train": "training", "validation": "validation"}
419
 
420
- def __init__(self, dl_manager, split: str):
 
 
 
 
 
 
 
421
  super(LocalizedNarrativesADE20kLoader, self).__init__(
422
- dl_manager=dl_manager, source="localized_narratives__ADE20k", split=split
 
 
 
 
 
423
  )
424
 
425
  def generate_gen_kwargs(self, dl_manager):
@@ -435,34 +540,39 @@ class LocalizedNarrativesADE20kLoader(BaseLoaderWithDLManager):
435
  "base_image_path": image_base_dir / self._SPLIT_MAP[self.split],
436
  }
437
 
438
- def _generate_batches_with_kwargs(
439
- self, annotation_file: str, base_image_path: Path
440
- ):
441
  with open(annotation_file, "r", encoding="utf-8") as fi:
442
- for line in fi:
443
- annotation = json.loads(line)
444
- assert "image" not in annotation
445
- image_path = base_image_path / f"{annotation['image_id']}.jpg"
446
- yield {
447
- "image_url": None,
448
- "image": str(image_path.absolute()),
449
- "texts": [annotation["caption"]],
450
- "source": self.source,
451
- "meta": json.dumps(
452
- annotation,
453
- default=json_serializer,
454
- indent=2,
455
- ),
456
- }
 
 
 
 
 
 
 
457
 
458
 
459
  class VisualGenomeLoader(DatasetsLoader):
460
- def __init__(self, split: str, num_proc: int, batch_size: int = 1000):
461
  super(VisualGenomeLoader, self).__init__(
462
  dataset_name="visual_genome",
463
  config_name="region_descriptions_v1.2.0",
464
  split=split,
465
- batch_size=batch_size,
466
  num_proc=num_proc,
467
  )
468
 
@@ -504,13 +614,13 @@ class VisualGenomeLoader(DatasetsLoader):
504
 
505
 
506
  class WITLoader(DatasetsLoader):
507
- def __init__(self, split: str, batch_size: int = 1000):
508
  super(WITLoader, self).__init__(
509
  dataset_name="google/wit",
510
  config_name=None,
511
  split=split,
512
- batch_size=batch_size,
513
- num_proc=1,
514
  )
515
 
516
  def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
@@ -541,13 +651,13 @@ class WITLoader(DatasetsLoader):
541
 
542
 
543
  class ConceptualCaptions(DatasetsLoader):
544
- def __init__(self, split: str, batch_size: int = 1000):
545
  super(ConceptualCaptions, self).__init__(
546
  dataset_name="conceptual_captions",
547
  config_name="unlabeled",
548
  split=split,
549
- batch_size=batch_size,
550
- num_proc=1,
551
  )
552
 
553
  def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
@@ -570,13 +680,13 @@ class ConceptualCaptions(DatasetsLoader):
570
 
571
 
572
  class Conceptual12MLoader(DatasetsLoader):
573
- def __init__(self, split: str, batch_size: int = 1000):
574
  super(Conceptual12MLoader, self).__init__(
575
  dataset_name="conceptual_12m",
576
  config_name=None,
577
  split=split,
578
- batch_size=batch_size,
579
- num_proc=1,
580
  )
581
 
582
  def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
@@ -599,13 +709,13 @@ class Conceptual12MLoader(DatasetsLoader):
599
 
600
 
601
  class RedCapsLoader(DatasetsLoader):
602
- def __init__(self, split: str, batch_size: int = 1000):
603
  super(RedCapsLoader, self).__init__(
604
  dataset_name="red_caps",
605
  config_name="all",
606
  split=split,
607
- batch_size=batch_size,
608
- num_proc=1,
609
  )
610
 
611
  def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
@@ -656,11 +766,23 @@ class YFCC100MLoader(BaseLoaderWithDLManager):
656
  WEIRD_CHARACTERS_REGEX = re.compile(r"[_©]")
657
  SECOND_WORD_REGEX = re.compile(r" [a-zA-Z]+")
658
 
659
- def __init__(self, dl_manager, batch_size: int, split: str):
 
 
 
 
 
 
 
660
  super(YFCC100MLoader, self).__init__(
661
- dl_manager=dl_manager, source="yfcc100m", split=split
 
 
 
 
 
662
  )
663
- self.batch_size = batch_size
664
 
665
  # Code from https://gitlab.com/jfolz/yfcc100m/-/blob/master/yfcc100m/convert_metadata.py
666
  BYTE_MAP = {"%02x" % v: "%x" % v for v in range(256)}
@@ -729,7 +851,7 @@ class YFCC100MLoader(BaseLoaderWithDLManager):
729
  break
730
  return record_text
731
 
732
- def _generate_batches_with_kwargs(self, sql_file: str):
733
  # query records command
734
  sql_command = f"select {', '.join(self._COLUMNS)} from yfcc100m_dataset"
735
 
@@ -742,45 +864,57 @@ class YFCC100MLoader(BaseLoaderWithDLManager):
742
  # Get data in batches
743
  while True:
744
  # Read the data
745
- records = cursor.fetchmany(self.batch_size)
746
 
747
  # If we are at the end
748
  if len(records) == 0:
749
  break
750
 
751
- # Format data
752
- for record in records:
753
- annotation = {
754
- column_name: value
755
- for value, column_name in zip(record, self._COLUMNS)
756
- }
757
-
758
- # TODO @thomasw21 if it's not an image we don't care for now
759
- if annotation["marker"] != 0:
760
- continue
761
-
762
- # We compute text candidate and skip the row if None work.
763
- text = self.get_associated_text(annotation)
764
- if text is None:
765
- continue
766
-
767
- for text_column in self._TEXT_COLUMNS:
768
- annotation[text_column] = unquote_plus(annotation[text_column])
769
-
770
- yield {
771
- # Add image_url that we download from s3 bucket instead of official download url
772
- "image_url": self.generate_image_url(annotation["downloadurl"]),
773
- "image": None,
774
- "texts": [text],
775
- "source": self.source,
776
- "meta": json.dumps(
777
- annotation,
778
- default=json_serializer,
779
- indent=2,
780
- ),
781
- }
782
  cursor.close()
783
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
784
 
785
  class PMDConfig(datasets.BuilderConfig):
786
  """BuilderConfig for PMD."""
@@ -790,6 +924,8 @@ class PMDConfig(datasets.BuilderConfig):
790
  num_proc: Optional[int] = None,
791
  datasets_batch_size: int = 1000,
792
  sqlite3_batch_size: int = 10_000,
 
 
793
  **kwargs,
794
  ):
795
  if num_proc is None:
@@ -802,6 +938,10 @@ class PMDConfig(datasets.BuilderConfig):
802
 
803
  # Some datasets should be loaded via multiprocessing.
804
  self.num_proc = num_proc
 
 
 
 
805
 
806
 
807
  # TODO @thomasw21 ArrowBasedBuilder to be able to return batches
@@ -825,49 +965,78 @@ class PMD(datasets.ArrowBasedBuilder):
825
  name=split_name,
826
  gen_kwargs={
827
  "loaders": [
828
- # COCOloader(dl_manager=dl_manager, split=split_name),
829
- # SBULoader(
830
- # split=split_name,
831
- # batch_size=self.config.datasets_batch_size,
832
- # ),
833
- # LocalizedNarrativesOpenImagesLoader(
834
- # dl_manager=dl_manager, split=split_name
835
- # ),
836
- # LocalizedNarrativesCOCOLoader(
837
- # dl_manager=dl_manager, split=split_name
838
- # ),
839
- # LocalizedNarrativesFlickr30kLoader(
840
- # dl_manager=dl_manager, split=split_name
841
- # ),
842
- # LocalizedNarrativesADE20kLoader(
843
- # dl_manager=dl_manager, split=split_name
844
- # ),
845
- # ConceptualCaptions(
846
- # split=split_name,
847
- # batch_size=self.config.datasets_batch_size,
848
- # ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
849
  VisualGenomeLoader(
850
  split=split_name,
851
- batch_size=self.config.datasets_batch_size,
852
  num_proc=self.config.num_proc,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
853
  ),
854
- # WITLoader(
855
- # split=split_name,
856
- # batch_size=self.config.datasets_batch_size,
857
- # ),
858
- # Conceptual12MLoader(
859
- # split=split_name,
860
- # batch_size=self.config.datasets_batch_size,
861
- # ),
862
- # RedCapsLoader(
863
- # split=split_name,
864
- # batch_size=self.config.datasets_batch_size,
865
- # ),
866
- # YFCC100MLoader(
867
- # dl_manager=dl_manager,
868
- # split=split_name,
869
- # batch_size=self.config.sqlite3_batch_size,
870
- # ),
871
  ]
872
  },
873
  )
 
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
+ import itertools
16
  import json
17
  import re
18
  import sqlite3
 
21
  from hashlib import md5
22
  from multiprocessing import Pool
23
  from pathlib import Path
24
+ from typing import Any, Dict, List, Optional, Iterator
25
  import pyarrow as pa
26
 
27
  from datetime import datetime
 
66
 
67
 
68
  class BaseLoader:
69
+ def __init__(self, source: str, split: str, writer_batch_size: int):
 
 
 
 
70
  self.source = source
71
  self.split = split
72
+ self.writer_batch_size = writer_batch_size
73
 
74
  @abstractmethod
75
  def _generate_batches(self):
 
85
  config_name: Optional[str],
86
  split: str,
87
  num_proc: int,
88
+ datasets_batch_size: int = 1000,
89
  ):
90
+ # TODO @thomasw21 do we somehow want to de-correlate both writer and datasets batch_size
91
+ super(DatasetsLoader, self).__init__(
92
+ source=dataset_name, split=split, writer_batch_size=datasets_batch_size
93
+ )
94
  self.dataset_name = dataset_name
95
  self.config_name = config_name
96
  self.num_proc = num_proc
97
+ self.datasets_batch_size = datasets_batch_size
 
 
 
 
98
 
99
  @abstractmethod
100
  def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
101
  """Return list of caster rows. Casted row are either PMD features"""
102
  raise NotImplementedError()
103
 
 
 
 
 
 
 
 
 
 
104
  def _generate_batch_table(self, batch_start: int, dset: Dataset) -> pa.Table:
105
  dataset_size = len(dset)
106
+ batch_end = min(batch_start + self.datasets_batch_size, dataset_size)
107
  batch = dset[batch_start:batch_end]
108
  output_batch = self.cast_to_pmd_features(batch)
109
  return pa.table(_FEATURES.encode_batch(output_batch))
 
114
 
115
  # load batches and yield individual rows
116
  if self.num_proc == 1:
117
+ for batch_start in range(0, dataset_size, self.datasets_batch_size):
118
  batch_casted_pmd_features = self._generate_batch_table(
119
  dset=dataset, batch_start=batch_start
120
  )
 
127
  # TODO @thomasw21 figure out if we care about imap / imap_unordered
128
  batch_iterator = pool.imap(
129
  partial(self._generate_batch_table, dset=dataset),
130
+ range(0, dataset_size, self.datasets_batch_size),
131
  )
132
  for batch_casted_pmd_features in batch_iterator:
133
  yield batch_casted_pmd_features
 
136
  class BaseLoaderWithDLManager(BaseLoader):
137
  """We use dl_manager to generate `gen_kwargs` needed in order to generate examples."""
138
 
139
+ def __init__(
140
+ self,
141
+ dl_manager,
142
+ source: str,
143
+ split: str,
144
+ num_proc: int,
145
+ chunk_size: int,
146
+ writer_batch_size: int = 10_000,
147
+ ):
148
+ super(BaseLoaderWithDLManager, self).__init__(
149
+ source=source, split=split, writer_batch_size=writer_batch_size
150
+ )
151
  self.gen_kwargs = self.generate_gen_kwargs(dl_manager)
152
+ # Used for multiprocessing
153
+ self.chunk_size = chunk_size
154
+ self.num_proc = num_proc
155
 
156
  @abstractmethod
157
  def generate_gen_kwargs(self, dl_manager):
158
  raise NotImplementedError()
159
 
160
  @abstractmethod
161
+ def _build_rows_iterator(self, chunk_size: int, **kwargs) -> Iterator[List[Any]]:
162
  raise NotImplementedError()
163
 
164
+ @abstractmethod
165
+ def _generate_examples(self, examples: List[Any], **kwargs) -> Dict[str, List[Any]]:
166
+ raise NotImplementedError
167
+
168
+ def _generate_tables(self, batch: Dict[str, List[Any]]) -> pa.Table:
169
+ return pa.table(_FEATURES.encode_batch(batch))
170
+
171
  def _generate_batches(self):
172
+ rows_iterator = self._build_rows_iterator(self.chunk_size, **self.gen_kwargs)
173
+
174
+ with Pool(self.num_proc) as pool:
175
+ tables_iterator = pool.imap(
176
+ partial(self._generate_tables, **self.gen_kwargs),
177
+ rows_iterator,
178
+ chunksize=1,
179
+ )
180
+ for table in tables_iterator:
181
+ yield table
182
 
183
 
184
  class COCOloader(BaseLoaderWithDLManager):
 
192
  }
193
  _SPLIT_MAP = {"train": "train2017", "validation": "val207"}
194
 
195
+ def __init__(
196
+ self,
197
+ dl_manager,
198
+ split: str,
199
+ num_proc: int,
200
+ chunk_size: int,
201
+ writer_batch_size: int,
202
+ ):
203
  super(COCOloader, self).__init__(
204
+ dl_manager=dl_manager,
205
+ source="coco",
206
+ split=split,
207
+ num_proc=num_proc,
208
+ chunk_size=chunk_size,
209
+ writer_batch_size=writer_batch_size,
210
  )
211
 
212
  def generate_gen_kwargs(self, dl_manager):
 
223
  "base_image_path": image_folder / self._SPLIT_MAP[self.split],
224
  }
225
 
226
+ def _build_rows_iterator(self, chunk_size: int, annotation_file: str, base_image_path: Path) -> Iterator[List[Any]]:
 
 
227
  with open(annotation_file, "r", encoding="utf-8") as fi:
228
  annotations = json.load(fi)
229
 
 
236
  else:
237
  annotations_per_image_id[image_id] = [annotation]
238
 
239
+ # yield chunks
240
+ buffer = []
241
  for image_metadata in annotations["images"]:
242
  image_id = image_metadata["id"]
 
243
  for annotation in annotations_per_image_id[image_id]:
244
+ buffer.append({
245
+ "annotation": annotation,
246
+ "image_metadata": image_metadata
247
+ })
248
+ if len(buffer) == chunk_size:
249
+ yield buffer
250
+ buffer = []
251
+
252
+ def _generate_examples(self, examples: List[Any], annotation_file: str, base_image_path: Path) -> Dict[str, List[Any]]:
253
+ return {
254
+ "image_url": [None for _ in examples],
255
+ "image": [str((base_image_path / f"{example['image_metadata']['id']:012}.jpg").absolute()) for example in examples],
256
+ "texts": [example["annotation"]["caption"] for example in examples],
257
+ "source": [self.source for _ in examples],
258
+ "meta": [json.dumps(
259
+ example,
260
+ default=json_serializer,
261
+ indent=2,
262
+ ) for example in examples],
263
+ }
264
 
265
 
266
  class SBULoader(DatasetsLoader):
267
+ def __init__(self, split: str, num_proc: int, datasets_batch_size: int = 1000):
268
  super(SBULoader, self).__init__(
269
  dataset_name="sbu_captions",
270
  config_name=None,
271
  split=split,
272
+ datasets_batch_size=datasets_batch_size,
273
+ num_proc=num_proc,
274
  )
275
 
276
  def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
 
301
  "test": "https://storage.googleapis.com/localized-narratives/annotations/open_images_test_captions.jsonl",
302
  }
303
 
304
+ def __init__(
305
+ self,
306
+ dl_manager,
307
+ split: str,
308
+ num_proc: int,
309
+ chunk_size: int,
310
+ writer_batch_size: int,
311
+ ):
312
  super(LocalizedNarrativesOpenImagesLoader, self).__init__(
313
+ dl_manager=dl_manager,
314
+ source="localized_narratives__coco",
315
+ split=split,
316
+ num_proc=num_proc,
317
+ chunk_size=chunk_size,
318
+ writer_batch_size=writer_batch_size,
319
  )
320
 
321
  def generate_gen_kwargs(self, dl_manager):
322
  annotation_file = dl_manager.download(self._ANNOTATION_URLs[self.split])
323
  return {"annotation_file": annotation_file, "split": self.split}
324
 
325
+ def _build_rows_iterator(self, chunk_size: int, annotation_file: str, split: str) -> Iterator[List[Any]]:
326
  with open(annotation_file, "r", encoding="utf-8") as fi:
327
+ chunk = itertools.islice(fi, chunk_size)
328
+
329
+ try:
330
+ chunk = next(chunk)
331
+ except StopIteration:
332
+ return
333
+
334
+ yield chunk
335
+
336
+ def _generate_examples(self, examples: List[Any], annotation_file: str, split: str) -> Dict[str, List[Any]]:
337
+ annotations = [json.loads(line) for line in examples]
338
+
339
+ # sanity check
340
+ for annotation in annotations:
341
+ assert "image_url" not in annotation
342
+
343
+ return {
344
+ "image_url": [f"https://s3.amazonaws.com/open-images-dataset/{split}/{annotation['image_id']}.jpg" for annotation in annotations],
345
+ "image": [None for _ in annotations],
346
+ "texts": [[annotation["caption"]] for annotation in annotations],
347
+ "source": [self.source for _ in annotations],
348
+ "meta": [json.dumps(
349
+ annotation,
350
+ default=json_serializer,
351
+ indent=2,
352
+ ) for annotation in annotations],
353
+ }
354
 
355
 
356
  class LocalizedNarrativesCOCOLoader(BaseLoaderWithDLManager):
 
365
  }
366
  _SPLIT_MAP = {"train": "train2017", "validation": "val207"}
367
 
368
+ def __init__(
369
+ self,
370
+ dl_manager,
371
+ split: str,
372
+ num_proc: int,
373
+ chunk_size: int,
374
+ writer_batch_size: int,
375
+ ):
376
  super(LocalizedNarrativesCOCOLoader, self).__init__(
377
+ dl_manager=dl_manager,
378
+ source="localized_narratives__coco",
379
+ split=split,
380
+ num_proc=num_proc,
381
+ chunk_size=chunk_size,
382
+ writer_batch_size=writer_batch_size,
383
  )
384
 
385
  def generate_gen_kwargs(self, dl_manager):
 
392
  "base_image_path": image_folder / self._SPLIT_MAP[self.split],
393
  }
394
 
395
+ def _build_rows_iterator(self, chunk_size: int, annotation_file: str, base_image_path: Path) -> Iterator[List[Any]]:
 
 
396
  with open(annotation_file, "r", encoding="utf-8") as fi:
397
+ chunk = itertools.islice(fi, chunk_size)
398
+
399
+ try:
400
+ chunk = next(chunk)
401
+ except StopIteration:
402
+ return
403
+
404
+ yield chunk
 
 
 
 
 
 
 
405
 
406
+ def _generate_examples(self, examples: List[Any], annotation_file: str, base_image_path: Path) -> Dict[str, List[Any]]:
407
+ annotations = [json.loads(line) for line in examples]
408
+
409
+ return {
410
+ "image_url": [None for _ in examples],
411
+ "image": [str((base_image_path / f"{annotation['image_id'].zfill(12)}.jpg").absolute()) for annotation in annotations],
412
+ "texts": [[annotation["caption"]] for annotation in annotations],
413
+ "source": [self.source for _ in annotations],
414
+ "meta": [json.dumps(
415
+ annotation,
416
+ default=json_serializer,
417
+ indent=2,
418
+ ) for annotation in annotations],
419
+ }
420
 
421
  class LocalizedNarrativesFlickr30kLoader(BaseLoaderWithDLManager):
422
  _LOCAL_IMAGE_FOLDER_NAME = "flickr30k-images"
 
426
  "test": "https://storage.googleapis.com/localized-narratives/annotations/flickr30k_test_captions.jsonl",
427
  }
428
 
429
+ def __init__(
430
+ self,
431
+ dl_manager,
432
+ split: str,
433
+ num_proc: int,
434
+ chunk_size: int,
435
+ writer_batch_size: int,
436
+ ):
437
  super(LocalizedNarrativesFlickr30kLoader, self).__init__(
438
+ dl_manager=dl_manager,
439
+ source="localized_narratives__flickr30k",
440
+ split=split,
441
+ num_proc=num_proc,
442
+ chunk_size=chunk_size,
443
+ writer_batch_size=writer_batch_size,
444
  )
445
 
446
  def generate_gen_kwargs(self, dl_manager):
 
471
  dataset can then be loaded using the following command `datasets.load_dataset("pmd", data_dir="<path/to/folder>")`.
472
  """
473
 
474
+ def _build_rows_iterator(self, chunk_size: int, annotation_file: str, base_image_path: Path) -> Iterator[List[Any]]:
 
 
475
  with open(annotation_file, "r", encoding="utf-8") as fi:
476
+ chunk = itertools.islice(fi, chunk_size)
477
+
478
+ try:
479
+ chunk = next(chunk)
480
+ except StopIteration:
481
+ return
482
+
483
+ yield chunk
484
+
485
+ def _generate_examples(self, examples: List[Any], annotation_file: str, base_image_path: Path) -> Dict[
486
+ str, List[Any]]:
487
+ annotations = [json.loads(line) for line in examples]
488
+
489
+ return {
490
+ "image_url": [None for _ in examples],
491
+ "image": [str((base_image_path / f"{annotation['image_id']}.jpg").absolute()) for annotation in
492
+ annotations],
493
+ "texts": [[annotation["caption"]] for annotation in annotations],
494
+ "source": [self.source for _ in annotations],
495
+ "meta": [json.dumps(
496
+ annotation,
497
+ default=json_serializer,
498
+ indent=2,
499
+ ) for annotation in annotations],
500
+ }
501
 
502
 
503
  class LocalizedNarrativesADE20kLoader(BaseLoaderWithDLManager):
 
510
  )
511
  _SPLIT_MAP = {"train": "training", "validation": "validation"}
512
 
513
+ def __init__(
514
+ self,
515
+ dl_manager,
516
+ split: str,
517
+ num_proc: int,
518
+ chunk_size: int,
519
+ writer_batch_size: int,
520
+ ):
521
  super(LocalizedNarrativesADE20kLoader, self).__init__(
522
+ dl_manager=dl_manager,
523
+ source="localized_narratives__ADE20k",
524
+ split=split,
525
+ num_proc=num_proc,
526
+ chunk_size=chunk_size,
527
+ writer_batch_size=writer_batch_size,
528
  )
529
 
530
  def generate_gen_kwargs(self, dl_manager):
 
540
  "base_image_path": image_base_dir / self._SPLIT_MAP[self.split],
541
  }
542
 
543
+ def _build_rows_iterator(self, annotation_file: str, base_image_path: Path, chunk_size: int) -> Iterator[List[Any]]:
 
 
544
  with open(annotation_file, "r", encoding="utf-8") as fi:
545
+ chunk = itertools.islice(fi, chunk_size)
546
+
547
+ try:
548
+ chunk = next(chunk)
549
+ except StopIteration:
550
+ return
551
+
552
+ yield chunk
553
+
554
+ def _generate_examples(self, examples: List[Any], annotation_file: str, base_image_path: Path) -> Dict[str, Any]:
555
+ annotations = [json.loads(line) for line in examples]
556
+ return {
557
+ "image_url": [None for _ in examples],
558
+ "image": [str((base_image_path / f"{annotation['image_id']}.jpg").absolute()) for annotation in annotations],
559
+ "texts": [annotation["caption"] for annotation in annotations],
560
+ "source": [self.source for _ in examples],
561
+ "meta": [json.dumps(
562
+ annotation,
563
+ default=json_serializer,
564
+ indent=2,
565
+ ) for annotation in annotations],
566
+ }
567
 
568
 
569
  class VisualGenomeLoader(DatasetsLoader):
570
+ def __init__(self, split: str, num_proc: int, datasets_batch_size: int = 1000):
571
  super(VisualGenomeLoader, self).__init__(
572
  dataset_name="visual_genome",
573
  config_name="region_descriptions_v1.2.0",
574
  split=split,
575
+ datasets_batch_size=datasets_batch_size,
576
  num_proc=num_proc,
577
  )
578
 
 
614
 
615
 
616
  class WITLoader(DatasetsLoader):
617
+ def __init__(self, split: str, num_proc: int, datasets_batch_size: int = 1000):
618
  super(WITLoader, self).__init__(
619
  dataset_name="google/wit",
620
  config_name=None,
621
  split=split,
622
+ datasets_batch_size=datasets_batch_size,
623
+ num_proc=num_proc,
624
  )
625
 
626
  def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
 
651
 
652
 
653
  class ConceptualCaptions(DatasetsLoader):
654
+ def __init__(self, split: str, num_proc: int, datasets_batch_size: int = 1000):
655
  super(ConceptualCaptions, self).__init__(
656
  dataset_name="conceptual_captions",
657
  config_name="unlabeled",
658
  split=split,
659
+ datasets_batch_size=datasets_batch_size,
660
+ num_proc=num_proc,
661
  )
662
 
663
  def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
 
680
 
681
 
682
  class Conceptual12MLoader(DatasetsLoader):
683
+ def __init__(self, split: str, num_proc: int, datasets_batch_size: int = 1000):
684
  super(Conceptual12MLoader, self).__init__(
685
  dataset_name="conceptual_12m",
686
  config_name=None,
687
  split=split,
688
+ datasets_batch_size=datasets_batch_size,
689
+ num_proc=num_proc,
690
  )
691
 
692
  def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
 
709
 
710
 
711
  class RedCapsLoader(DatasetsLoader):
712
+ def __init__(self, split: str, num_proc: int, datasets_batch_size: int = 1000):
713
  super(RedCapsLoader, self).__init__(
714
  dataset_name="red_caps",
715
  config_name="all",
716
  split=split,
717
+ datasets_batch_size=datasets_batch_size,
718
+ num_proc=num_proc,
719
  )
720
 
721
  def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
 
766
  WEIRD_CHARACTERS_REGEX = re.compile(r"[_©]")
767
  SECOND_WORD_REGEX = re.compile(r" [a-zA-Z]+")
768
 
769
+ def __init__(
770
+ self,
771
+ dl_manager,
772
+ split: str,
773
+ num_proc: int,
774
+ chunk_size: int,
775
+ writer_batch_size: int,
776
+ ):
777
  super(YFCC100MLoader, self).__init__(
778
+ dl_manager=dl_manager,
779
+ source="yfcc100m",
780
+ split=split,
781
+ num_proc=num_proc,
782
+ chunk_size=chunk_size,
783
+ writer_batch_size=writer_batch_size,
784
  )
785
+ self.chunk_size = chunk_size
786
 
787
  # Code from https://gitlab.com/jfolz/yfcc100m/-/blob/master/yfcc100m/convert_metadata.py
788
  BYTE_MAP = {"%02x" % v: "%x" % v for v in range(256)}
 
851
  break
852
  return record_text
853
 
854
+ def _build_rows_iterator(self, sql_file: str, chunk_size: int) -> Iterator[List[Any]]:
855
  # query records command
856
  sql_command = f"select {', '.join(self._COLUMNS)} from yfcc100m_dataset"
857
 
 
864
  # Get data in batches
865
  while True:
866
  # Read the data
867
+ records = cursor.fetchmany(self.chunk_size)
868
 
869
  # If we are at the end
870
  if len(records) == 0:
871
  break
872
 
873
+ yield records
874
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
875
  cursor.close()
876
 
877
+ def _generate_examples(self, examples: List[Any], sql_file: str) -> Dict[str, Any]:
878
+ buffer = {}
879
+ # Format data
880
+ for example in examples:
881
+ annotation = {
882
+ column_name: value
883
+ for value, column_name in zip(example, self._COLUMNS)
884
+ }
885
+
886
+ # TODO @thomasw21 if it's not an image we don't care for now
887
+ if annotation["marker"] != 0:
888
+ continue
889
+
890
+ # We compute text candidate and skip the row if None work.
891
+ text = self.get_associated_text(annotation)
892
+ if text is None:
893
+ continue
894
+
895
+ for text_column in self._TEXT_COLUMNS:
896
+ annotation[text_column] = unquote_plus(annotation[text_column])
897
+
898
+ # add data to buffer
899
+ temp_object = {
900
+ # Add image_url that we download from s3 bucket instead of official download url
901
+ "image_url": [self.generate_image_url(annotation["downloadurl"])],
902
+ "image": [None],
903
+ "texts": [[text]],
904
+ "source": [self.source],
905
+ "meta": [json.dumps(
906
+ annotation,
907
+ default=json_serializer,
908
+ indent=2,
909
+ )],
910
+ }
911
+ if len(buffer) == 0:
912
+ buffer = temp_object
913
+ else:
914
+ for column_name in buffer.keys():
915
+ buffer[column_name].append(temp_object[column_name])
916
+ return buffer
917
+
918
 
919
  class PMDConfig(datasets.BuilderConfig):
920
  """BuilderConfig for PMD."""
 
924
  num_proc: Optional[int] = None,
925
  datasets_batch_size: int = 1000,
926
  sqlite3_batch_size: int = 10_000,
927
+ chunk_size: int = 10_000,
928
+ writer_batch_size: int = 10_000,
929
  **kwargs,
930
  ):
931
  if num_proc is None:
 
938
 
939
  # Some datasets should be loaded via multiprocessing.
940
  self.num_proc = num_proc
941
+ self.chunk_size = chunk_size
942
+
943
+ # Batch writing
944
+ self.writer_batch_size = writer_batch_size
945
 
946
 
947
  # TODO @thomasw21 ArrowBasedBuilder to be able to return batches
 
965
  name=split_name,
966
  gen_kwargs={
967
  "loaders": [
968
+ COCOloader(
969
+ dl_manager=dl_manager,
970
+ split=split_name,
971
+ num_proc=self.config.num_proc,
972
+ chunk_size=self.config.chunk_size,
973
+ writer_batch_size=self.config.writer_batch_size,
974
+ ),
975
+ SBULoader(
976
+ split=split_name,
977
+ datasets_batch_size=self.config.datasets_batch_size,
978
+ num_proc=self.config.num_proc,
979
+ ),
980
+ LocalizedNarrativesOpenImagesLoader(
981
+ dl_manager=dl_manager,
982
+ split=split_name,
983
+ num_proc=self.config.num_proc,
984
+ chunk_size=self.config.chunk_size,
985
+ writer_batch_size=self.config.writer_batch_size,
986
+ ),
987
+ LocalizedNarrativesCOCOLoader(
988
+ dl_manager=dl_manager,
989
+ split=split_name,
990
+ num_proc=self.config.num_proc,
991
+ chunk_size=self.config.chunk_size,
992
+ writer_batch_size=self.config.writer_batch_size,
993
+ ),
994
+ LocalizedNarrativesFlickr30kLoader(
995
+ dl_manager=dl_manager,
996
+ split=split_name,
997
+ num_proc=self.config.num_proc,
998
+ chunk_size=self.config.chunk_size,
999
+ writer_batch_size=self.config.writer_batch_size,
1000
+ ),
1001
+ LocalizedNarrativesADE20kLoader(
1002
+ dl_manager=dl_manager,
1003
+ split=split_name,
1004
+ num_proc=self.config.num_proc,
1005
+ chunk_size=self.config.chunk_size,
1006
+ writer_batch_size=self.config.writer_batch_size,
1007
+ ),
1008
+ ConceptualCaptions(
1009
+ split=split_name,
1010
+ num_proc=self.config.num_proc,
1011
+ datasets_batch_size=self.config.datasets_batch_size,
1012
+ ),
1013
  VisualGenomeLoader(
1014
  split=split_name,
 
1015
  num_proc=self.config.num_proc,
1016
+ datasets_batch_size=self.config.datasets_batch_size,
1017
+ ),
1018
+ WITLoader(
1019
+ split=split_name,
1020
+ num_proc=self.config.num_proc,
1021
+ datasets_batch_size=self.config.datasets_batch_size,
1022
+ ),
1023
+ Conceptual12MLoader(
1024
+ split=split_name,
1025
+ num_proc=self.config.num_proc,
1026
+ datasets_batch_size=self.config.datasets_batch_size,
1027
+ ),
1028
+ RedCapsLoader(
1029
+ split=split_name,
1030
+ num_proc=self.config.num_proc,
1031
+ datasets_batch_size=self.config.datasets_batch_size,
1032
+ ),
1033
+ YFCC100MLoader(
1034
+ dl_manager=dl_manager,
1035
+ split=split_name,
1036
+ num_proc=self.config.num_proc,
1037
+ chunk_size=self.config.sqlite3_batch_size,
1038
+ writer_batch_size=self.config.writer_batch_size,
1039
  ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1040
  ]
1041
  },
1042
  )