MrPotato commited on
Commit
fdcf5aa
·
1 Parent(s): 1f6b229

Upload docbank.py

Browse files
Files changed (1) hide show
  1. docbank.py +11 -1
docbank.py CHANGED
@@ -16,9 +16,10 @@
16
 
17
  import csv
18
  import os
 
19
  import numpy as np
20
  from PIL import Image
21
-
22
  import datasets
23
 
24
  # TODO: Add BibTeX citation
@@ -56,6 +57,7 @@ _FEATURES = datasets.Features(
56
  {
57
  "id": datasets.Value("string"),
58
  "tokens": datasets.Sequence(datasets.Value("string")),
 
59
  "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
60
  "RGBs": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
61
  "fonts": datasets.Sequence(datasets.Value("string")),
@@ -136,6 +138,7 @@ class Docbank(datasets.GeneratorBasedBuilder):
136
  ]
137
 
138
  DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
 
139
 
140
  def _info(self):
141
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
@@ -236,10 +239,17 @@ class Docbank(datasets.GeneratorBasedBuilder):
236
  labels.append(row[9])
237
  except:
238
  continue
 
 
 
 
 
 
239
 
240
  yield key, {
241
  "id": f_id,
242
  "tokens": tokens,
 
243
  "bboxes": bboxes,
244
  "RGBs": rgbs,
245
  "fonts": fonts,
 
16
 
17
  import csv
18
  import os
19
+ import itertools
20
  import numpy as np
21
  from PIL import Image
22
+ from transformers import AutoTokenizer
23
  import datasets
24
 
25
  # TODO: Add BibTeX citation
 
57
  {
58
  "id": datasets.Value("string"),
59
  "tokens": datasets.Sequence(datasets.Value("string")),
60
+ "input_ids": datasets.Sequence(datasets.Value("int64")),
61
  "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
62
  "RGBs": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
63
  "fonts": datasets.Sequence(datasets.Value("string")),
 
138
  ]
139
 
140
  DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
141
+ TOKENIZER = AutoTokenizer.from_pretrained("xlm-roberta-base")
142
 
143
  def _info(self):
144
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
 
239
  labels.append(row[9])
240
  except:
241
  continue
242
+ tokenized_inputs = self.TOKENIZER(
243
+ tokens,
244
+ add_special_tokens=False,
245
+ return_offsets_mapping=False,
246
+ return_attention_mask=False,
247
+ )
248
 
249
  yield key, {
250
  "id": f_id,
251
  "tokens": tokens,
252
+ 'input_ids': list(itertools.chain.from_iterable(tokenized_inputs['input_ids'])),
253
  "bboxes": bboxes,
254
  "RGBs": rgbs,
255
  "fonts": fonts,