thomasw21
commited on
Commit
·
622d8d0
1
Parent(s):
da3e34c
Test out arrowBuilder
Browse files
pmd.py
CHANGED
|
@@ -12,7 +12,6 @@
|
|
| 12 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
# See the License for the specific language governing permissions and
|
| 14 |
# limitations under the License.
|
| 15 |
-
"""Wikipedia-based Image Text (WIT) Dataset is a large multimodal multilingual dataset"""
|
| 16 |
import json
|
| 17 |
import re
|
| 18 |
import sqlite3
|
|
@@ -22,15 +21,16 @@ from hashlib import md5
|
|
| 22 |
from multiprocessing import Pool
|
| 23 |
from pathlib import Path
|
| 24 |
from typing import Any, Dict, List, Optional
|
|
|
|
| 25 |
|
| 26 |
from datetime import datetime
|
| 27 |
from urllib.parse import unquote_plus
|
| 28 |
|
| 29 |
-
# TODO: @thomasw21
|
| 30 |
import datasets
|
| 31 |
from datasets import load_dataset, Dataset
|
| 32 |
from langdetect import detect
|
| 33 |
|
|
|
|
| 34 |
_CITATION = """"""
|
| 35 |
|
| 36 |
# TODO: @thomasw21
|
|
@@ -74,7 +74,7 @@ class BaseLoader:
|
|
| 74 |
self.split = split
|
| 75 |
|
| 76 |
@abstractmethod
|
| 77 |
-
def
|
| 78 |
raise NotImplementedError()
|
| 79 |
|
| 80 |
|
|
@@ -93,7 +93,6 @@ class DatasetsLoader(BaseLoader):
|
|
| 93 |
self.dataset_name = dataset_name
|
| 94 |
self.config_name = config_name
|
| 95 |
self.num_proc = num_proc
|
| 96 |
-
print(self.num_proc)
|
| 97 |
# # In order to not have memory explode we define a batch size per proc
|
| 98 |
# self.batch_size_per_proc = batch_size // self.num_proc
|
| 99 |
# assert self.batch_size_per_proc >= 1
|
|
@@ -101,55 +100,48 @@ class DatasetsLoader(BaseLoader):
|
|
| 101 |
self.batch_size_per_proc = batch_size
|
| 102 |
|
| 103 |
@abstractmethod
|
| 104 |
-
def cast_to_pmd_features(self,
|
| 105 |
"""Return list of caster rows. Casted row are either PMD features"""
|
| 106 |
raise NotImplementedError()
|
| 107 |
|
| 108 |
-
def convert_batch_to_list_of_rows(self, batch: Dict) -> List[Dict[str, Any]]:
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
|
| 117 |
-
def
|
| 118 |
dataset_size = len(dset)
|
| 119 |
batch_end = min(batch_start + self.batch_size_per_proc, dataset_size)
|
| 120 |
batch = dset[batch_start:batch_end]
|
| 121 |
-
|
| 122 |
|
| 123 |
-
|
| 124 |
-
for row in rows:
|
| 125 |
-
rows_casted_pmd_features += self.cast_to_pmd_features(row)
|
| 126 |
-
return rows_casted_pmd_features
|
| 127 |
-
|
| 128 |
-
def _generate_examples(self):
|
| 129 |
dataset = load_dataset(self.dataset_name, self.config_name, split=self.split)
|
| 130 |
dataset_size = len(dataset)
|
| 131 |
|
| 132 |
# load batches and yield individual rows
|
| 133 |
if self.num_proc == 1:
|
| 134 |
for batch_start in range(0, dataset_size, self.batch_size_per_proc):
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
|
|
|
| 138 |
|
| 139 |
# Parallel version
|
| 140 |
else:
|
| 141 |
assert self.num_proc > 1
|
| 142 |
with Pool(self.num_proc) as pool:
|
| 143 |
-
# TODO @thomasw21 figure out if we care about imap
|
| 144 |
-
batch_iterator = pool.
|
| 145 |
-
partial(self.
|
| 146 |
range(0, dataset_size, self.batch_size_per_proc),
|
| 147 |
)
|
| 148 |
-
for
|
| 149 |
-
|
| 150 |
-
print("hello")
|
| 151 |
-
for row_casted_pmd_features in rows_casted_pmd_features:
|
| 152 |
-
yield row_casted_pmd_features
|
| 153 |
|
| 154 |
|
| 155 |
class BaseLoaderWithDLManager(BaseLoader):
|
|
@@ -164,11 +156,11 @@ class BaseLoaderWithDLManager(BaseLoader):
|
|
| 164 |
raise NotImplementedError()
|
| 165 |
|
| 166 |
@abstractmethod
|
| 167 |
-
def _generate_examples_with_kwargs(self, **kwargs):
|
| 168 |
raise NotImplementedError()
|
| 169 |
|
| 170 |
-
def
|
| 171 |
-
for elt in self.
|
| 172 |
yield elt
|
| 173 |
|
| 174 |
|
|
@@ -202,9 +194,9 @@ class COCOloader(BaseLoaderWithDLManager):
|
|
| 202 |
"base_image_path": image_folder / self._SPLIT_MAP[self.split],
|
| 203 |
}
|
| 204 |
|
| 205 |
-
def
|
| 206 |
self, annotation_file: str, base_image_path: Path
|
| 207 |
-
):
|
| 208 |
with open(annotation_file, "r", encoding="utf-8") as fi:
|
| 209 |
annotations = json.load(fi)
|
| 210 |
|
|
@@ -221,20 +213,22 @@ class COCOloader(BaseLoaderWithDLManager):
|
|
| 221 |
image_id = image_metadata["id"]
|
| 222 |
image_path = base_image_path / f"{image_id:012}.jpg"
|
| 223 |
for annotation in annotations_per_image_id[image_id]:
|
| 224 |
-
yield
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
|
|
|
|
|
|
| 238 |
|
| 239 |
|
| 240 |
class SBULoader(DatasetsLoader):
|
|
@@ -244,24 +238,26 @@ class SBULoader(DatasetsLoader):
|
|
| 244 |
config_name=None,
|
| 245 |
split=split,
|
| 246 |
batch_size=batch_size,
|
| 247 |
-
num_proc=1
|
| 248 |
)
|
| 249 |
|
| 250 |
-
def cast_to_pmd_features(self,
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
|
|
|
| 260 |
default=json_serializer,
|
| 261 |
indent=2,
|
| 262 |
-
)
|
| 263 |
-
|
| 264 |
-
|
|
|
|
| 265 |
|
| 266 |
|
| 267 |
class LocalizedNarrativesOpenImagesLoader(BaseLoaderWithDLManager):
|
|
@@ -282,7 +278,7 @@ class LocalizedNarrativesOpenImagesLoader(BaseLoaderWithDLManager):
|
|
| 282 |
annotation_file = dl_manager.download(self._ANNOTATION_URLs[self.split])
|
| 283 |
return {"annotation_file": annotation_file, "split": self.split}
|
| 284 |
|
| 285 |
-
def
|
| 286 |
with open(annotation_file, "r", encoding="utf-8") as fi:
|
| 287 |
for line in fi:
|
| 288 |
annotation = json.loads(line)
|
|
@@ -327,7 +323,7 @@ class LocalizedNarrativesCOCOLoader(BaseLoaderWithDLManager):
|
|
| 327 |
"base_image_path": image_folder / self._SPLIT_MAP[self.split],
|
| 328 |
}
|
| 329 |
|
| 330 |
-
def
|
| 331 |
self, annotation_file: str, base_image_path: Path
|
| 332 |
):
|
| 333 |
with open(annotation_file, "r", encoding="utf-8") as fi:
|
|
@@ -389,7 +385,7 @@ class LocalizedNarrativesFlickr30kLoader(BaseLoaderWithDLManager):
|
|
| 389 |
dataset can then be loaded using the following command `datasets.load_dataset("pmd", data_dir="<path/to/folder>")`.
|
| 390 |
"""
|
| 391 |
|
| 392 |
-
def
|
| 393 |
self, annotation_file: str, base_image_path: Path
|
| 394 |
):
|
| 395 |
with open(annotation_file, "r", encoding="utf-8") as fi:
|
|
@@ -438,7 +434,7 @@ class LocalizedNarrativesADE20kLoader(BaseLoaderWithDLManager):
|
|
| 438 |
"base_image_path": image_base_dir / self._SPLIT_MAP[self.split],
|
| 439 |
}
|
| 440 |
|
| 441 |
-
def
|
| 442 |
self, annotation_file: str, base_image_path: Path
|
| 443 |
):
|
| 444 |
with open(annotation_file, "r", encoding="utf-8") as fi:
|
|
@@ -466,18 +462,22 @@ class VisualGenomeLoader(DatasetsLoader):
|
|
| 466 |
config_name="region_descriptions_v1.2.0",
|
| 467 |
split=split,
|
| 468 |
batch_size=batch_size,
|
| 469 |
-
num_proc=num_proc
|
| 470 |
)
|
| 471 |
|
| 472 |
-
def cast_to_pmd_features(self,
|
| 473 |
-
|
| 474 |
-
|
| 475 |
-
|
| 476 |
-
|
| 477 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 478 |
{
|
| 479 |
-
"image_url": None,
|
| 480 |
-
# TODO @thomasw21 I believe this is slow as hell
|
| 481 |
"image": image.crop(
|
| 482 |
(
|
| 483 |
region["x"],
|
|
@@ -486,18 +486,20 @@ class VisualGenomeLoader(DatasetsLoader):
|
|
| 486 |
region["y"] + region["height"],
|
| 487 |
)
|
| 488 |
),
|
| 489 |
-
"
|
|
|
|
| 490 |
"source": self.source,
|
| 491 |
-
"meta":
|
| 492 |
-
meta,
|
| 493 |
-
default=json_serializer,
|
| 494 |
-
indent=2,
|
| 495 |
-
),
|
| 496 |
}
|
| 497 |
-
for
|
|
|
|
|
|
|
|
|
|
| 498 |
]
|
| 499 |
-
|
| 500 |
-
|
|
|
|
|
|
|
| 501 |
|
| 502 |
|
| 503 |
class WITLoader(DatasetsLoader):
|
|
@@ -507,32 +509,34 @@ class WITLoader(DatasetsLoader):
|
|
| 507 |
config_name=None,
|
| 508 |
split=split,
|
| 509 |
batch_size=batch_size,
|
| 510 |
-
num_proc=1
|
| 511 |
)
|
| 512 |
|
| 513 |
-
def cast_to_pmd_features(self,
|
| 514 |
-
|
| 515 |
-
|
| 516 |
-
|
| 517 |
-
|
| 518 |
-
|
| 519 |
-
|
| 520 |
-
|
| 521 |
-
|
| 522 |
-
|
| 523 |
-
|
| 524 |
-
|
| 525 |
-
|
| 526 |
-
|
| 527 |
-
|
| 528 |
-
|
| 529 |
-
|
| 530 |
-
|
|
|
|
| 531 |
default=json_serializer,
|
| 532 |
indent=2,
|
| 533 |
-
)
|
| 534 |
-
|
| 535 |
-
|
|
|
|
| 536 |
|
| 537 |
|
| 538 |
class ConceptualCaptions(DatasetsLoader):
|
|
@@ -542,24 +546,26 @@ class ConceptualCaptions(DatasetsLoader):
|
|
| 542 |
config_name="unlabeled",
|
| 543 |
split=split,
|
| 544 |
batch_size=batch_size,
|
| 545 |
-
num_proc=1
|
| 546 |
)
|
| 547 |
|
| 548 |
-
def cast_to_pmd_features(self,
|
| 549 |
-
|
| 550 |
-
|
| 551 |
-
|
| 552 |
-
|
| 553 |
-
|
| 554 |
-
|
| 555 |
-
|
| 556 |
-
|
| 557 |
-
|
|
|
|
| 558 |
default=json_serializer,
|
| 559 |
indent=2,
|
| 560 |
-
)
|
| 561 |
-
|
| 562 |
-
|
|
|
|
| 563 |
|
| 564 |
|
| 565 |
class Conceptual12MLoader(DatasetsLoader):
|
|
@@ -569,24 +575,26 @@ class Conceptual12MLoader(DatasetsLoader):
|
|
| 569 |
config_name=None,
|
| 570 |
split=split,
|
| 571 |
batch_size=batch_size,
|
| 572 |
-
num_proc=1
|
| 573 |
)
|
| 574 |
|
| 575 |
-
def cast_to_pmd_features(self,
|
| 576 |
-
|
| 577 |
-
|
| 578 |
-
|
| 579 |
-
|
| 580 |
-
|
| 581 |
-
|
| 582 |
-
|
| 583 |
-
|
| 584 |
-
|
|
|
|
| 585 |
default=json_serializer,
|
| 586 |
indent=2,
|
| 587 |
-
)
|
| 588 |
-
|
| 589 |
-
|
|
|
|
| 590 |
|
| 591 |
|
| 592 |
class RedCapsLoader(DatasetsLoader):
|
|
@@ -596,25 +604,28 @@ class RedCapsLoader(DatasetsLoader):
|
|
| 596 |
config_name="all",
|
| 597 |
split=split,
|
| 598 |
batch_size=batch_size,
|
| 599 |
-
num_proc=1
|
| 600 |
)
|
| 601 |
|
| 602 |
-
def cast_to_pmd_features(self,
|
| 603 |
-
|
| 604 |
-
|
| 605 |
-
|
| 606 |
-
|
| 607 |
-
|
| 608 |
-
|
| 609 |
-
|
| 610 |
-
|
| 611 |
-
|
| 612 |
-
|
|
|
|
|
|
|
| 613 |
default=json_serializer,
|
| 614 |
indent=2,
|
| 615 |
-
)
|
| 616 |
-
|
| 617 |
-
|
|
|
|
| 618 |
|
| 619 |
|
| 620 |
class YFCC100MLoader(BaseLoaderWithDLManager):
|
|
@@ -717,7 +728,7 @@ class YFCC100MLoader(BaseLoaderWithDLManager):
|
|
| 717 |
break
|
| 718 |
return record_text
|
| 719 |
|
| 720 |
-
def
|
| 721 |
# query records command
|
| 722 |
sql_command = f"select {', '.join(self._COLUMNS)} from yfcc100m_dataset"
|
| 723 |
|
|
@@ -792,7 +803,8 @@ class PMDConfig(datasets.BuilderConfig):
|
|
| 792 |
self.num_proc = num_proc
|
| 793 |
|
| 794 |
|
| 795 |
-
|
|
|
|
| 796 |
"""Builder for Open Images subset of PMD."""
|
| 797 |
|
| 798 |
BUILDER_CONFIG_CLASS = PMDConfig
|
|
@@ -836,7 +848,7 @@ class PMD(datasets.GeneratorBasedBuilder):
|
|
| 836 |
VisualGenomeLoader(
|
| 837 |
split=split_name,
|
| 838 |
batch_size=self.config.datasets_batch_size,
|
| 839 |
-
num_proc=self.config.num_proc
|
| 840 |
),
|
| 841 |
# WITLoader(
|
| 842 |
# split=split_name,
|
|
@@ -861,9 +873,9 @@ class PMD(datasets.GeneratorBasedBuilder):
|
|
| 861 |
for split_name in [datasets.Split.TRAIN]
|
| 862 |
]
|
| 863 |
|
| 864 |
-
def
|
| 865 |
idx = 0
|
| 866 |
for loader in loaders:
|
| 867 |
-
for elt in loader.
|
| 868 |
yield idx, elt
|
| 869 |
idx += 1
|
|
|
|
| 12 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
# See the License for the specific language governing permissions and
|
| 14 |
# limitations under the License.
|
|
|
|
| 15 |
import json
|
| 16 |
import re
|
| 17 |
import sqlite3
|
|
|
|
| 21 |
from multiprocessing import Pool
|
| 22 |
from pathlib import Path
|
| 23 |
from typing import Any, Dict, List, Optional
|
| 24 |
+
import pyarrow as pa
|
| 25 |
|
| 26 |
from datetime import datetime
|
| 27 |
from urllib.parse import unquote_plus
|
| 28 |
|
|
|
|
| 29 |
import datasets
|
| 30 |
from datasets import load_dataset, Dataset
|
| 31 |
from langdetect import detect
|
| 32 |
|
| 33 |
+
# TODO: @thomasw21
|
| 34 |
_CITATION = """"""
|
| 35 |
|
| 36 |
# TODO: @thomasw21
|
|
|
|
| 74 |
self.split = split
|
| 75 |
|
| 76 |
@abstractmethod
|
| 77 |
+
def _generate_batches(self):
|
| 78 |
raise NotImplementedError()
|
| 79 |
|
| 80 |
|
|
|
|
| 93 |
self.dataset_name = dataset_name
|
| 94 |
self.config_name = config_name
|
| 95 |
self.num_proc = num_proc
|
|
|
|
| 96 |
# # In order to not have memory explode we define a batch size per proc
|
| 97 |
# self.batch_size_per_proc = batch_size // self.num_proc
|
| 98 |
# assert self.batch_size_per_proc >= 1
|
|
|
|
| 100 |
self.batch_size_per_proc = batch_size
|
| 101 |
|
| 102 |
@abstractmethod
|
| 103 |
+
def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
|
| 104 |
"""Return list of caster rows. Casted row are either PMD features"""
|
| 105 |
raise NotImplementedError()
|
| 106 |
|
| 107 |
+
# def convert_batch_to_list_of_rows(self, batch: Dict) -> List[Dict[str, Any]]:
|
| 108 |
+
# # batch_size can be different to self.batch_size, ie due to last batch
|
| 109 |
+
# batch_size = len(next(iter(batch.values())))
|
| 110 |
+
# column_names = list(batch.keys())
|
| 111 |
+
# return [
|
| 112 |
+
# {column_name: batch[column_name][i] for column_name in column_names}
|
| 113 |
+
# for i in range(batch_size)
|
| 114 |
+
# ]
|
| 115 |
|
| 116 |
+
def _generate_batch_table(self, batch_start: int, dset: Dataset) -> pa.Table:
|
| 117 |
dataset_size = len(dset)
|
| 118 |
batch_end = min(batch_start + self.batch_size_per_proc, dataset_size)
|
| 119 |
batch = dset[batch_start:batch_end]
|
| 120 |
+
return pa.table(self.cast_to_pmd_features(batch))
|
| 121 |
|
| 122 |
+
def _generate_batches(self):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 123 |
dataset = load_dataset(self.dataset_name, self.config_name, split=self.split)
|
| 124 |
dataset_size = len(dataset)
|
| 125 |
|
| 126 |
# load batches and yield individual rows
|
| 127 |
if self.num_proc == 1:
|
| 128 |
for batch_start in range(0, dataset_size, self.batch_size_per_proc):
|
| 129 |
+
batch_casted_pmd_features = self._generate_batch_table(
|
| 130 |
+
dset=dataset, batch_start=batch_start
|
| 131 |
+
)
|
| 132 |
+
yield batch_casted_pmd_features
|
| 133 |
|
| 134 |
# Parallel version
|
| 135 |
else:
|
| 136 |
assert self.num_proc > 1
|
| 137 |
with Pool(self.num_proc) as pool:
|
| 138 |
+
# TODO @thomasw21 figure out if we care about imap / imap_unordered
|
| 139 |
+
batch_iterator = pool.imap(
|
| 140 |
+
partial(self._generate_batch_table, dset=dataset),
|
| 141 |
range(0, dataset_size, self.batch_size_per_proc),
|
| 142 |
)
|
| 143 |
+
for batch_casted_pmd_features in batch_iterator:
|
| 144 |
+
yield batch_casted_pmd_features
|
|
|
|
|
|
|
|
|
|
| 145 |
|
| 146 |
|
| 147 |
class BaseLoaderWithDLManager(BaseLoader):
|
|
|
|
| 156 |
raise NotImplementedError()
|
| 157 |
|
| 158 |
@abstractmethod
|
| 159 |
+
def _generate_examples_with_kwargs(self, **kwargs) -> pa.Table:
|
| 160 |
raise NotImplementedError()
|
| 161 |
|
| 162 |
+
def _generate_batches(self):
|
| 163 |
+
for elt in self._generate_batches_with_kwargs(**self.gen_kwargs):
|
| 164 |
yield elt
|
| 165 |
|
| 166 |
|
|
|
|
| 194 |
"base_image_path": image_folder / self._SPLIT_MAP[self.split],
|
| 195 |
}
|
| 196 |
|
| 197 |
+
def _generate_batches_with_kwargs(
|
| 198 |
self, annotation_file: str, base_image_path: Path
|
| 199 |
+
) -> pa.Table:
|
| 200 |
with open(annotation_file, "r", encoding="utf-8") as fi:
|
| 201 |
annotations = json.load(fi)
|
| 202 |
|
|
|
|
| 213 |
image_id = image_metadata["id"]
|
| 214 |
image_path = base_image_path / f"{image_id:012}.jpg"
|
| 215 |
for annotation in annotations_per_image_id[image_id]:
|
| 216 |
+
yield pa.table(
|
| 217 |
+
{
|
| 218 |
+
"image_url": None,
|
| 219 |
+
"image": str(image_path.absolute()),
|
| 220 |
+
"texts": [annotation["caption"]],
|
| 221 |
+
"source": self.source,
|
| 222 |
+
"meta": json.dumps(
|
| 223 |
+
{
|
| 224 |
+
"image_metadata": image_metadata,
|
| 225 |
+
"annotation": annotation,
|
| 226 |
+
},
|
| 227 |
+
default=json_serializer,
|
| 228 |
+
indent=2,
|
| 229 |
+
),
|
| 230 |
+
}
|
| 231 |
+
)
|
| 232 |
|
| 233 |
|
| 234 |
class SBULoader(DatasetsLoader):
|
|
|
|
| 238 |
config_name=None,
|
| 239 |
split=split,
|
| 240 |
batch_size=batch_size,
|
| 241 |
+
num_proc=1,
|
| 242 |
)
|
| 243 |
|
| 244 |
+
def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
|
| 245 |
+
metas = {k: v for k, v in batch.items() if k not in ["image_url", "caption"]}
|
| 246 |
+
batch_size = len(next(iter(batch.values())))
|
| 247 |
+
return {
|
| 248 |
+
"image_url": batch["image_url"],
|
| 249 |
+
"image": [None] * batch_size,
|
| 250 |
+
"texts": [[caption] for caption in batch["caption"]],
|
| 251 |
+
"source": [self.source] * batch_size,
|
| 252 |
+
"meta": [
|
| 253 |
+
json.dumps(
|
| 254 |
+
{key: value[batch_id] for key, value in metas.items()},
|
| 255 |
default=json_serializer,
|
| 256 |
indent=2,
|
| 257 |
+
)
|
| 258 |
+
for batch_id in range(batch_size)
|
| 259 |
+
],
|
| 260 |
+
}
|
| 261 |
|
| 262 |
|
| 263 |
class LocalizedNarrativesOpenImagesLoader(BaseLoaderWithDLManager):
|
|
|
|
| 278 |
annotation_file = dl_manager.download(self._ANNOTATION_URLs[self.split])
|
| 279 |
return {"annotation_file": annotation_file, "split": self.split}
|
| 280 |
|
| 281 |
+
def _generate_batches_with_kwargs(self, annotation_file: str, split: str):
|
| 282 |
with open(annotation_file, "r", encoding="utf-8") as fi:
|
| 283 |
for line in fi:
|
| 284 |
annotation = json.loads(line)
|
|
|
|
| 323 |
"base_image_path": image_folder / self._SPLIT_MAP[self.split],
|
| 324 |
}
|
| 325 |
|
| 326 |
+
def _generate_batches_with_kwargs(
|
| 327 |
self, annotation_file: str, base_image_path: Path
|
| 328 |
):
|
| 329 |
with open(annotation_file, "r", encoding="utf-8") as fi:
|
|
|
|
| 385 |
dataset can then be loaded using the following command `datasets.load_dataset("pmd", data_dir="<path/to/folder>")`.
|
| 386 |
"""
|
| 387 |
|
| 388 |
+
def _generate_batches_with_kwargs(
|
| 389 |
self, annotation_file: str, base_image_path: Path
|
| 390 |
):
|
| 391 |
with open(annotation_file, "r", encoding="utf-8") as fi:
|
|
|
|
| 434 |
"base_image_path": image_base_dir / self._SPLIT_MAP[self.split],
|
| 435 |
}
|
| 436 |
|
| 437 |
+
def _generate_batches_with_kwargs(
|
| 438 |
self, annotation_file: str, base_image_path: Path
|
| 439 |
):
|
| 440 |
with open(annotation_file, "r", encoding="utf-8") as fi:
|
|
|
|
| 462 |
config_name="region_descriptions_v1.2.0",
|
| 463 |
split=split,
|
| 464 |
batch_size=batch_size,
|
| 465 |
+
num_proc=num_proc,
|
| 466 |
)
|
| 467 |
|
| 468 |
+
def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
|
| 469 |
+
metas = {k: v for k, v in batch.items() if k not in ["image_url", "caption"]}
|
| 470 |
+
input_batch_size = len(next(iter(batch.values())))
|
| 471 |
+
serialized_metas = [
|
| 472 |
+
json.dumps(
|
| 473 |
+
{key: value[batch_id] for key, value in metas.items()},
|
| 474 |
+
default=json_serializer,
|
| 475 |
+
indent=2,
|
| 476 |
+
)
|
| 477 |
+
for batch_id in range(input_batch_size)
|
| 478 |
+
]
|
| 479 |
+
output_list_rows = [
|
| 480 |
{
|
|
|
|
|
|
|
| 481 |
"image": image.crop(
|
| 482 |
(
|
| 483 |
region["x"],
|
|
|
|
| 486 |
region["y"] + region["height"],
|
| 487 |
)
|
| 488 |
),
|
| 489 |
+
"image_url": None,
|
| 490 |
+
"texts": [phrase],
|
| 491 |
"source": self.source,
|
| 492 |
+
"meta": serialized_meta,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 493 |
}
|
| 494 |
+
for image, regions, phrase, serialized_meta in zip(
|
| 495 |
+
batch["image"], batch["regions"], batch["phrase"], serialized_metas
|
| 496 |
+
)
|
| 497 |
+
for region in regions
|
| 498 |
]
|
| 499 |
+
return {
|
| 500 |
+
column_name: [row[column_name] for row in output_list_rows]
|
| 501 |
+
for column_name in ["image_url", "image", "texts", "source", "meta"]
|
| 502 |
+
}
|
| 503 |
|
| 504 |
|
| 505 |
class WITLoader(DatasetsLoader):
|
|
|
|
| 509 |
config_name=None,
|
| 510 |
split=split,
|
| 511 |
batch_size=batch_size,
|
| 512 |
+
num_proc=1,
|
| 513 |
)
|
| 514 |
|
| 515 |
+
def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
|
| 516 |
+
metas = {k: v for k, v in batch.items() if k not in ["image_url"]}
|
| 517 |
+
batch_size = len(next(iter(batch.values())))
|
| 518 |
+
return {
|
| 519 |
+
"image_url": batch["image_url"],
|
| 520 |
+
"image": [None] * batch_size,
|
| 521 |
+
"texts": [
|
| 522 |
+
texts
|
| 523 |
+
# TODO @thomasw21 figure out which one we should choose
|
| 524 |
+
for texts in zip(
|
| 525 |
+
batch["caption_reference_description"],
|
| 526 |
+
batch["context_section_description"],
|
| 527 |
+
batch["caption_attribution_description"],
|
| 528 |
+
)
|
| 529 |
+
],
|
| 530 |
+
"source": [self.source] * batch_size,
|
| 531 |
+
"meta": [
|
| 532 |
+
json.dumps(
|
| 533 |
+
{key: value[batch_id] for key, value in metas.items()},
|
| 534 |
default=json_serializer,
|
| 535 |
indent=2,
|
| 536 |
+
)
|
| 537 |
+
for batch_id in range(batch_size)
|
| 538 |
+
],
|
| 539 |
+
}
|
| 540 |
|
| 541 |
|
| 542 |
class ConceptualCaptions(DatasetsLoader):
|
|
|
|
| 546 |
config_name="unlabeled",
|
| 547 |
split=split,
|
| 548 |
batch_size=batch_size,
|
| 549 |
+
num_proc=1,
|
| 550 |
)
|
| 551 |
|
| 552 |
+
def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
|
| 553 |
+
metas = {k: v for k, v in batch.items() if k not in ["image_url", "caption"]}
|
| 554 |
+
batch_size = len(next(iter(batch.values())))
|
| 555 |
+
return {
|
| 556 |
+
"image_url": batch["image_url"],
|
| 557 |
+
"image": [None] * batch_size,
|
| 558 |
+
"texts": [[caption] for caption in batch["caption"]],
|
| 559 |
+
"source": [self.source] * batch_size,
|
| 560 |
+
"meta": [
|
| 561 |
+
json.dumps(
|
| 562 |
+
{key: value[batch_id] for key, value in metas.items()},
|
| 563 |
default=json_serializer,
|
| 564 |
indent=2,
|
| 565 |
+
)
|
| 566 |
+
for batch_id in range(batch_size)
|
| 567 |
+
],
|
| 568 |
+
}
|
| 569 |
|
| 570 |
|
| 571 |
class Conceptual12MLoader(DatasetsLoader):
|
|
|
|
| 575 |
config_name=None,
|
| 576 |
split=split,
|
| 577 |
batch_size=batch_size,
|
| 578 |
+
num_proc=1,
|
| 579 |
)
|
| 580 |
|
| 581 |
+
def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
|
| 582 |
+
metas = {k: v for k, v in batch.items() if k not in ["image_url", "caption"]}
|
| 583 |
+
batch_size = len(next(iter(batch.values())))
|
| 584 |
+
return {
|
| 585 |
+
"image_url": batch["image_url"],
|
| 586 |
+
"image": [None] * batch_size,
|
| 587 |
+
"texts": [[caption] for caption in batch["caption"]],
|
| 588 |
+
"source": [self.source] * batch_size,
|
| 589 |
+
"meta": [
|
| 590 |
+
json.dumps(
|
| 591 |
+
{key: value[batch_id] for key, value in metas.items()},
|
| 592 |
default=json_serializer,
|
| 593 |
indent=2,
|
| 594 |
+
)
|
| 595 |
+
for batch_id in range(batch_size)
|
| 596 |
+
],
|
| 597 |
+
}
|
| 598 |
|
| 599 |
|
| 600 |
class RedCapsLoader(DatasetsLoader):
|
|
|
|
| 604 |
config_name="all",
|
| 605 |
split=split,
|
| 606 |
batch_size=batch_size,
|
| 607 |
+
num_proc=1,
|
| 608 |
)
|
| 609 |
|
| 610 |
+
def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
|
| 611 |
+
metas = {
|
| 612 |
+
k: v for k, v in batch.items() if k not in ["image_url", "raw_caption"]
|
| 613 |
+
}
|
| 614 |
+
batch_size = len(next(iter(batch.values())))
|
| 615 |
+
return {
|
| 616 |
+
"image_url": batch["image_url"],
|
| 617 |
+
"image": [None] * batch_size,
|
| 618 |
+
"texts": [[caption] for caption in batch["raw_caption"]],
|
| 619 |
+
"source": [self.source] * batch_size,
|
| 620 |
+
"meta": [
|
| 621 |
+
json.dumps(
|
| 622 |
+
{key: value[batch_id] for key, value in metas.items()},
|
| 623 |
default=json_serializer,
|
| 624 |
indent=2,
|
| 625 |
+
)
|
| 626 |
+
for batch_id in range(batch_size)
|
| 627 |
+
],
|
| 628 |
+
}
|
| 629 |
|
| 630 |
|
| 631 |
class YFCC100MLoader(BaseLoaderWithDLManager):
|
|
|
|
| 728 |
break
|
| 729 |
return record_text
|
| 730 |
|
| 731 |
+
def _generate_batches_with_kwargs(self, sql_file: str):
|
| 732 |
# query records command
|
| 733 |
sql_command = f"select {', '.join(self._COLUMNS)} from yfcc100m_dataset"
|
| 734 |
|
|
|
|
| 803 |
self.num_proc = num_proc
|
| 804 |
|
| 805 |
|
| 806 |
+
# TODO @thomasw21 ArrowBasedBuilder to be able to return batches
|
| 807 |
+
class PMD(datasets.ArrowBasedBuilder):
|
| 808 |
"""Builder for Open Images subset of PMD."""
|
| 809 |
|
| 810 |
BUILDER_CONFIG_CLASS = PMDConfig
|
|
|
|
| 848 |
VisualGenomeLoader(
|
| 849 |
split=split_name,
|
| 850 |
batch_size=self.config.datasets_batch_size,
|
| 851 |
+
num_proc=self.config.num_proc,
|
| 852 |
),
|
| 853 |
# WITLoader(
|
| 854 |
# split=split_name,
|
|
|
|
| 873 |
for split_name in [datasets.Split.TRAIN]
|
| 874 |
]
|
| 875 |
|
| 876 |
+
def _generate_tables(self, loaders: List[BaseLoader]):
|
| 877 |
idx = 0
|
| 878 |
for loader in loaders:
|
| 879 |
+
for elt in loader._generate_batches():
|
| 880 |
yield idx, elt
|
| 881 |
idx += 1
|