MrPotato commited on
Commit
fbb0bf8
·
1 Parent(s): 8942231

batched tokenization

Browse files
Files changed (1) hide show
  1. docbank.py +21 -8
docbank.py CHANGED
@@ -271,25 +271,38 @@ class Docbank(datasets.GeneratorBasedBuilder):
271
  tokens,
272
  boxes=bboxes,
273
  word_labels=labels,
274
- add_special_tokens=False,
275
- return_offsets_mapping=False,
276
- return_attention_mask=False,
 
 
 
277
  )
278
  #print(processed)
279
 
280
- for chunk_id, index in enumerate(range(0, len(processed['input_ids']), self.CHUNK_SIZE)):
281
- split_tokens = processed['input_ids'][index:index + self.CHUNK_SIZE]
282
- split_bboxes = processed['bbox'][index:index + self.CHUNK_SIZE]
 
 
283
  # split_rgbs = rgbs[index:index + self.CHUNK_SIZE]
284
  # split_fonts = fonts[index:index + self.CHUNK_SIZE]
285
- split_labels = processed['labels'][index:index + self.CHUNK_SIZE]
 
 
 
 
 
 
286
 
287
  #tokenized = self.TOKENIZER(processed['words'], boxes=processed['boxes'])
288
 
289
  yield key, {
290
- "id": f"{f_id}_{chunk_id}",
291
  'input_ids': split_tokens,
292
  "bbox": split_bboxes,
 
 
293
  # "RGBs": split_rgbs,
294
  # "fonts": split_fonts,
295
  #"image": image,
 
271
  tokens,
272
  boxes=bboxes,
273
  word_labels=labels,
274
+ padding=True,
275
+ max_length=512,
276
+ truncation=True,
277
+ return_overflowing_tokens=True,
278
+ return_offsets_mapping=True
279
+
280
  )
281
  #print(processed)
282
 
283
+ for batch_id, batch in enumerate(processed['input_ids']):
284
+ split_tokens = batch
285
+ split_bboxes = processed['bbox'][batch_id]
286
+ split_attention_mask = processed['attention_mask'][batch_id]
287
+ split_offset_mapping = processed['offset_mapping'][batch_id]
288
  # split_rgbs = rgbs[index:index + self.CHUNK_SIZE]
289
  # split_fonts = fonts[index:index + self.CHUNK_SIZE]
290
+ split_labels = processed['labels'][batch_id]
291
+ # for chunk_id, index in enumerate(range(0, len(processed['input_ids']), self.CHUNK_SIZE)):
292
+ # split_tokens = processed['input_ids'][index:index + self.CHUNK_SIZE]
293
+ # split_bboxes = processed['bbox'][index:index + self.CHUNK_SIZE]
294
+ # # split_rgbs = rgbs[index:index + self.CHUNK_SIZE]
295
+ # # split_fonts = fonts[index:index + self.CHUNK_SIZE]
296
+ # split_labels = processed['labels'][index:index + self.CHUNK_SIZE]
297
 
298
  #tokenized = self.TOKENIZER(processed['words'], boxes=processed['boxes'])
299
 
300
  yield key, {
301
+ "id": f"{f_id}_{batch_id}",
302
  'input_ids': split_tokens,
303
  "bbox": split_bboxes,
304
+ "attention_mask": split_attention_mask,
305
+ "offset_mapping":split_offset_mapping,
306
  # "RGBs": split_rgbs,
307
  # "fonts": split_fonts,
308
  #"image": image,