batched tokenization
Browse files- docbank.py +17 -17
docbank.py
CHANGED
|
@@ -277,23 +277,23 @@ class Docbank(datasets.GeneratorBasedBuilder):
|
|
| 277 |
# )
|
| 278 |
#print(processed)
|
| 279 |
|
| 280 |
-
for chunk_id, index in enumerate(range(0, len(tokens), self.CHUNK_SIZE)):
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
|
| 287 |
#tokenized = self.TOKENIZER(processed['words'], boxes=processed['boxes'])
|
| 288 |
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
|
|
|
| 277 |
# )
|
| 278 |
#print(processed)
|
| 279 |
|
| 280 |
+
# for chunk_id, index in enumerate(range(0, len(tokens), self.CHUNK_SIZE)):
|
| 281 |
+
# split_tokens = tokens[index:index + self.CHUNK_SIZE]
|
| 282 |
+
# split_bboxes = bboxes[index:index + self.CHUNK_SIZE]
|
| 283 |
+
# # split_rgbs = rgbs[index:index + self.CHUNK_SIZE]
|
| 284 |
+
# # split_fonts = fonts[index:index + self.CHUNK_SIZE]
|
| 285 |
+
# split_labels = labels[index:index + self.CHUNK_SIZE]
|
| 286 |
|
| 287 |
#tokenized = self.TOKENIZER(processed['words'], boxes=processed['boxes'])
|
| 288 |
|
| 289 |
+
yield key, {
|
| 290 |
+
"id": f"file_{f_id}",
|
| 291 |
+
'words': tokens,
|
| 292 |
+
"bbox": bboxes,
|
| 293 |
+
# "RGBs": split_rgbs,
|
| 294 |
+
# "fonts": split_fonts,
|
| 295 |
+
#"image": image,
|
| 296 |
+
"original_image": original_image,
|
| 297 |
+
"labels": labels
|
| 298 |
+
}
|
| 299 |
+
key += 1
|