rewrote tokenizer
Browse files- docbank.py +35 -19
docbank.py
CHANGED
|
@@ -18,7 +18,7 @@ import csv
|
|
| 18 |
import os
|
| 19 |
import numpy as np
|
| 20 |
from PIL import Image
|
| 21 |
-
from transformers import
|
| 22 |
import datasets
|
| 23 |
|
| 24 |
# TODO: Add BibTeX citation
|
|
@@ -131,7 +131,7 @@ class Docbank(datasets.GeneratorBasedBuilder):
|
|
| 131 |
]
|
| 132 |
|
| 133 |
# DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
| 134 |
-
TOKENIZER =
|
| 135 |
|
| 136 |
def _info(self):
|
| 137 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
|
@@ -220,35 +220,51 @@ class Docbank(datasets.GeneratorBasedBuilder):
|
|
| 220 |
#image, size = load_image(f_fp_img, size=224)
|
| 221 |
original_image, _ = load_image(f_fp_img)
|
| 222 |
|
|
|
|
| 223 |
try:
|
| 224 |
with open(f_fp_txt, newline='', encoding='utf-8') as csvfile:
|
| 225 |
reader = csv.reader(csvfile, delimiter='\t', quotechar=' ')
|
| 226 |
for row in reader:
|
| 227 |
# normalized_bbox = normalize_bbox(row[1:5], size)
|
| 228 |
normalized_bbox = [int(x) for x in row[1:5]]
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 242 |
|
| 243 |
except:
|
| 244 |
continue
|
| 245 |
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 249 |
# split_rgbs = rgbs[index:index + self.CHUNK_SIZE]
|
| 250 |
# split_fonts = fonts[index:index + self.CHUNK_SIZE]
|
| 251 |
-
split_labels = labels[index:index + self.CHUNK_SIZE]
|
|
|
|
|
|
|
| 252 |
|
| 253 |
yield key, {
|
| 254 |
"id": f"{f_id}_{chunk_id}",
|
|
|
|
| 18 |
import os
|
| 19 |
import numpy as np
|
| 20 |
from PIL import Image
|
| 21 |
+
from transformers import LayoutXLMTokenizerFast
|
| 22 |
import datasets
|
| 23 |
|
| 24 |
# TODO: Add BibTeX citation
|
|
|
|
| 131 |
]
|
| 132 |
|
| 133 |
# DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
| 134 |
+
TOKENIZER = LayoutXLMTokenizerFast.from_pretrained("microsoft/layoutxlm-base")
|
| 135 |
|
| 136 |
def _info(self):
|
| 137 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
|
|
|
| 220 |
#image, size = load_image(f_fp_img, size=224)
|
| 221 |
original_image, _ = load_image(f_fp_img)
|
| 222 |
|
| 223 |
+
|
| 224 |
try:
|
| 225 |
with open(f_fp_txt, newline='', encoding='utf-8') as csvfile:
|
| 226 |
reader = csv.reader(csvfile, delimiter='\t', quotechar=' ')
|
| 227 |
for row in reader:
|
| 228 |
# normalized_bbox = normalize_bbox(row[1:5], size)
|
| 229 |
normalized_bbox = [int(x) for x in row[1:5]]
|
| 230 |
+
tokens.append(row[0])
|
| 231 |
+
bboxes.append(normalized_bbox)
|
| 232 |
+
labels.append(row[9])
|
| 233 |
+
# tokenized_input = self.TOKENIZER(
|
| 234 |
+
# row[0],
|
| 235 |
+
# add_special_tokens=False,
|
| 236 |
+
# return_offsets_mapping=False,
|
| 237 |
+
# return_attention_mask=False,
|
| 238 |
+
# max_length=512, truncation=True
|
| 239 |
+
# )
|
| 240 |
+
#
|
| 241 |
+
# for tkn in tokenized_input['input_ids']:
|
| 242 |
+
# tokens.append(tkn)
|
| 243 |
+
# bboxes.append(normalized_bbox)
|
| 244 |
+
# # rgbs.append(row[5:8])
|
| 245 |
+
# # fonts.append(row[8])
|
| 246 |
+
# labels.append(row[9])
|
| 247 |
|
| 248 |
except:
|
| 249 |
continue
|
| 250 |
|
| 251 |
+
processed = self.TOKENIZER(
|
| 252 |
+
tokens,
|
| 253 |
+
boxes=bboxes,
|
| 254 |
+
word_labels=labels,
|
| 255 |
+
add_special_tokens=False,
|
| 256 |
+
return_offsets_mapping=False,
|
| 257 |
+
return_attention_mask=False,
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
for chunk_id, index in enumerate(range(0, len(processed['input_ids'][0]), self.CHUNK_SIZE)):
|
| 261 |
+
split_tokens = processed['input_ids'][0][index:index + self.CHUNK_SIZE]
|
| 262 |
+
split_bboxes = processed['bbox'][0][index:index + self.CHUNK_SIZE]
|
| 263 |
# split_rgbs = rgbs[index:index + self.CHUNK_SIZE]
|
| 264 |
# split_fonts = fonts[index:index + self.CHUNK_SIZE]
|
| 265 |
+
split_labels = processed['labels'][0][index:index + self.CHUNK_SIZE]
|
| 266 |
+
|
| 267 |
+
#tokenized = self.TOKENIZER(processed['words'], boxes=processed['boxes'])
|
| 268 |
|
| 269 |
yield key, {
|
| 270 |
"id": f"{f_id}_{chunk_id}",
|