MrPotato commited on
Commit
a11428c
·
1 Parent(s): 0dd8deb
Files changed (1) hide show
  1. docbank.py +6 -6
docbank.py CHANGED
@@ -211,7 +211,7 @@ class Docbank(datasets.GeneratorBasedBuilder):
211
  # print(filepath)
212
  key = 0
213
  for f in filepath:
214
- print(f)
215
  f_id = f['id']
216
  f_fp_txt = f['filepath_txt']
217
  f_fp_img = f['filepath_img']
@@ -233,9 +233,9 @@ class Docbank(datasets.GeneratorBasedBuilder):
233
  normalized_bbox = [int(x) for x in row[1:5]]
234
  tokens.append(row[0])
235
  bboxes.append(normalized_bbox)
236
- print(f'Before: {row[9]}')
237
- labels.append(self.LABEL2ID(row[9]))
238
- print(f'After: {row[9]}')
239
  # tokenized_input = self.TOKENIZER(
240
  # row[0],
241
  # add_special_tokens=False,
@@ -254,7 +254,7 @@ class Docbank(datasets.GeneratorBasedBuilder):
254
  except:
255
  continue
256
 
257
- print('Processing...')
258
  processed = self.TOKENIZER(
259
  tokens,
260
  boxes=bboxes,
@@ -263,7 +263,7 @@ class Docbank(datasets.GeneratorBasedBuilder):
263
  return_offsets_mapping=False,
264
  return_attention_mask=False,
265
  )
266
- print(processed)
267
 
268
  for chunk_id, index in enumerate(range(0, len(processed['input_ids']), self.CHUNK_SIZE)):
269
  split_tokens = processed['input_ids'][index:index + self.CHUNK_SIZE]
 
211
  # print(filepath)
212
  key = 0
213
  for f in filepath:
214
+ #print(f)
215
  f_id = f['id']
216
  f_fp_txt = f['filepath_txt']
217
  f_fp_img = f['filepath_img']
 
233
  normalized_bbox = [int(x) for x in row[1:5]]
234
  tokens.append(row[0])
235
  bboxes.append(normalized_bbox)
236
+ #print(f'Before: {row[9]}')
237
+ labels.append(self.LABEL2ID[row[9]])
238
+ #print(f'After: {row[9]}')
239
  # tokenized_input = self.TOKENIZER(
240
  # row[0],
241
  # add_special_tokens=False,
 
254
  except:
255
  continue
256
 
257
+ #print('Processing...')
258
  processed = self.TOKENIZER(
259
  tokens,
260
  boxes=bboxes,
 
263
  return_offsets_mapping=False,
264
  return_attention_mask=False,
265
  )
266
+ #print(processed)
267
 
268
  for chunk_id, index in enumerate(range(0, len(processed['input_ids']), self.CHUNK_SIZE)):
269
  split_tokens = processed['input_ids'][index:index + self.CHUNK_SIZE]