batched tokenization
Browse files- docbank.py +14 -27
docbank.py
CHANGED
|
@@ -60,7 +60,7 @@ _URLS = {
|
|
| 60 |
_FEATURES = datasets.Features(
|
| 61 |
{
|
| 62 |
"id": datasets.Value("string"),
|
| 63 |
-
"input_ids": datasets.Sequence(datasets.Value("
|
| 64 |
"bbox": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
|
| 65 |
# "RGBs": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
|
| 66 |
# "fonts": datasets.Sequence(datasets.Value("string")),
|
|
@@ -267,42 +267,29 @@ class Docbank(datasets.GeneratorBasedBuilder):
|
|
| 267 |
continue
|
| 268 |
|
| 269 |
#print('Processing...')
|
| 270 |
-
processed = self.TOKENIZER(
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
return_offsets_mapping=True
|
| 279 |
-
|
| 280 |
-
)
|
| 281 |
#print(processed)
|
| 282 |
|
| 283 |
-
for
|
| 284 |
-
split_tokens =
|
| 285 |
-
split_bboxes =
|
| 286 |
-
split_attention_mask = processed['attention_mask'][batch_id]
|
| 287 |
-
split_offset_mapping = processed['offset_mapping'][batch_id]
|
| 288 |
# split_rgbs = rgbs[index:index + self.CHUNK_SIZE]
|
| 289 |
# split_fonts = fonts[index:index + self.CHUNK_SIZE]
|
| 290 |
-
split_labels =
|
| 291 |
-
# for chunk_id, index in enumerate(range(0, len(processed['input_ids']), self.CHUNK_SIZE)):
|
| 292 |
-
# split_tokens = processed['input_ids'][index:index + self.CHUNK_SIZE]
|
| 293 |
-
# split_bboxes = processed['bbox'][index:index + self.CHUNK_SIZE]
|
| 294 |
-
# # split_rgbs = rgbs[index:index + self.CHUNK_SIZE]
|
| 295 |
-
# # split_fonts = fonts[index:index + self.CHUNK_SIZE]
|
| 296 |
-
# split_labels = processed['labels'][index:index + self.CHUNK_SIZE]
|
| 297 |
|
| 298 |
#tokenized = self.TOKENIZER(processed['words'], boxes=processed['boxes'])
|
| 299 |
|
| 300 |
yield key, {
|
| 301 |
-
"id": f"{f_id}_{
|
| 302 |
'input_ids': split_tokens,
|
| 303 |
"bbox": split_bboxes,
|
| 304 |
-
"attention_mask": split_attention_mask,
|
| 305 |
-
"offset_mapping":split_offset_mapping,
|
| 306 |
# "RGBs": split_rgbs,
|
| 307 |
# "fonts": split_fonts,
|
| 308 |
#"image": image,
|
|
|
|
| 60 |
_FEATURES = datasets.Features(
|
| 61 |
{
|
| 62 |
"id": datasets.Value("string"),
|
| 63 |
+
"input_ids": datasets.Sequence(datasets.Value("string")),
|
| 64 |
"bbox": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
|
| 65 |
# "RGBs": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
|
| 66 |
# "fonts": datasets.Sequence(datasets.Value("string")),
|
|
|
|
| 267 |
continue
|
| 268 |
|
| 269 |
#print('Processing...')
|
| 270 |
+
# processed = self.TOKENIZER(
|
| 271 |
+
# tokens,
|
| 272 |
+
# boxes=bboxes,
|
| 273 |
+
# word_labels=labels,
|
| 274 |
+
# add_special_tokens=False,
|
| 275 |
+
# return_offsets_mapping=False,
|
| 276 |
+
# return_attention_mask=False,
|
| 277 |
+
# )
|
|
|
|
|
|
|
|
|
|
| 278 |
#print(processed)
|
| 279 |
|
| 280 |
+
for chunk_id, index in enumerate(range(0, len(tokens), self.CHUNK_SIZE)):
|
| 281 |
+
split_tokens = tokens[index:index + self.CHUNK_SIZE]
|
| 282 |
+
split_bboxes = bboxes[index:index + self.CHUNK_SIZE]
|
|
|
|
|
|
|
| 283 |
# split_rgbs = rgbs[index:index + self.CHUNK_SIZE]
|
| 284 |
# split_fonts = fonts[index:index + self.CHUNK_SIZE]
|
| 285 |
+
split_labels = labels[index:index + self.CHUNK_SIZE]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 286 |
|
| 287 |
#tokenized = self.TOKENIZER(processed['words'], boxes=processed['boxes'])
|
| 288 |
|
| 289 |
yield key, {
|
| 290 |
+
"id": f"{f_id}_{chunk_id}",
|
| 291 |
'input_ids': split_tokens,
|
| 292 |
"bbox": split_bboxes,
|
|
|
|
|
|
|
| 293 |
# "RGBs": split_rgbs,
|
| 294 |
# "fonts": split_fonts,
|
| 295 |
#"image": image,
|