MrPotato commited on
Commit
491aaba
·
1 Parent(s): fdcf5aa

Upload docbank.py

Browse files
Files changed (1) hide show
  1. docbank.py +29 -22
docbank.py CHANGED
@@ -56,7 +56,6 @@ _URLS = {
56
  _FEATURES = datasets.Features(
57
  {
58
  "id": datasets.Value("string"),
59
- "tokens": datasets.Sequence(datasets.Value("string")),
60
  "input_ids": datasets.Sequence(datasets.Value("int64")),
61
  "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
62
  "RGBs": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
@@ -117,6 +116,7 @@ def merge_bbox(bbox_list):
117
  class Docbank(datasets.GeneratorBasedBuilder):
118
  """TODO: Short description of my dataset."""
119
 
 
120
  VERSION = datasets.Version("1.0.0")
121
 
122
  # This is an example of a dataset with multiple configurations.
@@ -212,7 +212,8 @@ class Docbank(datasets.GeneratorBasedBuilder):
212
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
213
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
214
  #print(filepath)
215
- for key, f in enumerate(filepath):
 
216
  #print(f)
217
  f_id = f['id']
218
  f_fp_txt = f['filepath_txt']
@@ -231,31 +232,37 @@ class Docbank(datasets.GeneratorBasedBuilder):
231
  reader = csv.reader(csvfile, delimiter='\t', quotechar=' ')
232
  for row in reader:
233
  #if f_id == '121.tar_1606.08710.gz_mutualEnergy_05_77':
234
- # print(row[0])
235
- tokens.append(row[0])
 
 
 
 
 
236
  bboxes.append(normalize_bbox(row[1:5], size))
237
  rgbs.append(row[5:8])
238
  fonts.append(row[8])
239
  labels.append(row[9])
240
  except:
241
  continue
242
- tokenized_inputs = self.TOKENIZER(
243
- tokens,
244
- add_special_tokens=False,
245
- return_offsets_mapping=False,
246
- return_attention_mask=False,
247
- )
248
-
249
- yield key, {
250
- "id": f_id,
251
- "tokens": tokens,
252
- 'input_ids': list(itertools.chain.from_iterable(tokenized_inputs['input_ids'])),
253
- "bboxes": bboxes,
254
- "RGBs": rgbs,
255
- "fonts": fonts,
256
- "image": image,
257
- "original_image": original_image,
258
- "labels": labels
259
- }
 
260
 
261
 
 
56
  _FEATURES = datasets.Features(
57
  {
58
  "id": datasets.Value("string"),
 
59
  "input_ids": datasets.Sequence(datasets.Value("int64")),
60
  "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
61
  "RGBs": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
 
116
  class Docbank(datasets.GeneratorBasedBuilder):
117
  """TODO: Short description of my dataset."""
118
 
119
+ CHUNK_SIZE = 512
120
  VERSION = datasets.Version("1.0.0")
121
 
122
  # This is an example of a dataset with multiple configurations.
 
212
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
213
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
214
  #print(filepath)
215
+ key = 0
216
+ for f in filepath:
217
  #print(f)
218
  f_id = f['id']
219
  f_fp_txt = f['filepath_txt']
 
232
  reader = csv.reader(csvfile, delimiter='\t', quotechar=' ')
233
  for row in reader:
234
  #if f_id == '121.tar_1606.08710.gz_mutualEnergy_05_77':
235
+ tokenized_input = self.TOKENIZER(
236
+ row[0],
237
+ add_special_tokens=False,
238
+ return_offsets_mapping=False,
239
+ return_attention_mask=False,
240
+ )
241
+ tokens.append(tokenized_input['input_ids'][0] if len(tokenized_input['input_ids']) == 1 else self.TOKENIZER.unk_token_id)
242
  bboxes.append(normalize_bbox(row[1:5], size))
243
  rgbs.append(row[5:8])
244
  fonts.append(row[8])
245
  labels.append(row[9])
246
  except:
247
  continue
248
+
249
+ for chunk_id, index in enumerate(range(0, len(tokens), self.CHUNK_SIZE)):
250
+ split_tokens = tokens[index:index + self.CHUNK_SIZE]
251
+ split_bboxes = bboxes[index:index + self.CHUNK_SIZE]
252
+ split_rgbs = rgbs[index:index + self.CHUNK_SIZE]
253
+ split_fonts = fonts[index:index + self.CHUNK_SIZE]
254
+ split_labels = labels[index:index + self.CHUNK_SIZE]
255
+
256
+ yield key, {
257
+ "id": f"{f_id}_{chunk_id}",
258
+ 'input_ids': split_tokens,
259
+ "bboxes": split_bboxes,
260
+ "RGBs": split_rgbs,
261
+ "fonts": split_fonts,
262
+ "image": image,
263
+ "original_image": original_image,
264
+ "labels": split_labels
265
+ }
266
+ key += 1
267
 
268