thomasw21
commited on
Commit
·
f6d6453
1
Parent(s):
e6583b9
Define a batch_size_per_proc to lower memory footprint, there's going to be a natural tradeoff
Browse files
pmd.py
CHANGED
|
@@ -93,7 +93,9 @@ class DatasetsLoader(BaseLoader):
|
|
| 93 |
self.dataset_name = dataset_name
|
| 94 |
self.config_name = config_name
|
| 95 |
self.num_proc = num_proc
|
| 96 |
-
|
|
|
|
|
|
|
| 97 |
|
| 98 |
@abstractmethod
|
| 99 |
def cast_to_pmd_features(self, row: Dict) -> List[Dict[str, Any]]:
|
|
@@ -111,7 +113,7 @@ class DatasetsLoader(BaseLoader):
|
|
| 111 |
|
| 112 |
def _generate_rows_casted_pmd_features(self, dset: Dataset, batch_start: int):
|
| 113 |
dataset_size = len(dset)
|
| 114 |
-
batch_end = min(batch_start + self.
|
| 115 |
batch = dset[batch_start:batch_end]
|
| 116 |
rows = self.convert_batch_to_list_of_rows(batch)
|
| 117 |
|
|
@@ -120,14 +122,13 @@ class DatasetsLoader(BaseLoader):
|
|
| 120 |
rows_casted_pmd_features += self.cast_to_pmd_features(row)
|
| 121 |
return rows_casted_pmd_features
|
| 122 |
|
| 123 |
-
|
| 124 |
def _generate_examples(self):
|
| 125 |
dataset = load_dataset(self.dataset_name, self.config_name, split=self.split)
|
| 126 |
dataset_size = len(dataset)
|
| 127 |
|
| 128 |
# load batches and yield individual rows
|
| 129 |
if self.num_proc == 1:
|
| 130 |
-
for batch_start in range(0, dataset_size, self.
|
| 131 |
rows_casted_pmd_features = self._generate_rows_casted_pmd_features(dset=dataset, batch_start=batch_start)
|
| 132 |
for row_casted_pmd_features in rows_casted_pmd_features:
|
| 133 |
yield row_casted_pmd_features
|
|
@@ -138,7 +139,7 @@ class DatasetsLoader(BaseLoader):
|
|
| 138 |
with Pool(self.num_proc) as pool:
|
| 139 |
rows_casted_pmd_features = pool.imap(
|
| 140 |
partial(self._generate_rows_casted_pmd_features, dataset),
|
| 141 |
-
range(0, dataset_size, self.
|
| 142 |
)
|
| 143 |
for row_casted_pmd_features in rows_casted_pmd_features:
|
| 144 |
yield row_casted_pmd_features
|
|
|
|
| 93 |
self.dataset_name = dataset_name
|
| 94 |
self.config_name = config_name
|
| 95 |
self.num_proc = num_proc
|
| 96 |
+
# In order to not have memory explode we define a batch size per proc
|
| 97 |
+
self.batch_size_per_proc = batch_size // self.num_proc
|
| 98 |
+
assert self.batch_size_per_proc > 1
|
| 99 |
|
| 100 |
@abstractmethod
|
| 101 |
def cast_to_pmd_features(self, row: Dict) -> List[Dict[str, Any]]:
|
|
|
|
| 113 |
|
| 114 |
def _generate_rows_casted_pmd_features(self, dset: Dataset, batch_start: int):
|
| 115 |
dataset_size = len(dset)
|
| 116 |
+
batch_end = min(batch_start + self.batch_size_per_proc, dataset_size)
|
| 117 |
batch = dset[batch_start:batch_end]
|
| 118 |
rows = self.convert_batch_to_list_of_rows(batch)
|
| 119 |
|
|
|
|
| 122 |
rows_casted_pmd_features += self.cast_to_pmd_features(row)
|
| 123 |
return rows_casted_pmd_features
|
| 124 |
|
|
|
|
| 125 |
def _generate_examples(self):
|
| 126 |
dataset = load_dataset(self.dataset_name, self.config_name, split=self.split)
|
| 127 |
dataset_size = len(dataset)
|
| 128 |
|
| 129 |
# load batches and yield individual rows
|
| 130 |
if self.num_proc == 1:
|
| 131 |
+
for batch_start in range(0, dataset_size, self.batch_size_per_proc):
|
| 132 |
rows_casted_pmd_features = self._generate_rows_casted_pmd_features(dset=dataset, batch_start=batch_start)
|
| 133 |
for row_casted_pmd_features in rows_casted_pmd_features:
|
| 134 |
yield row_casted_pmd_features
|
|
|
|
| 139 |
with Pool(self.num_proc) as pool:
|
| 140 |
rows_casted_pmd_features = pool.imap(
|
| 141 |
partial(self._generate_rows_casted_pmd_features, dataset),
|
| 142 |
+
range(0, dataset_size, self.batch_size_per_proc),
|
| 143 |
)
|
| 144 |
for row_casted_pmd_features in rows_casted_pmd_features:
|
| 145 |
yield row_casted_pmd_features
|