debugging
Browse files- docbank.py +2 -1
docbank.py
CHANGED
|
@@ -248,6 +248,7 @@ class Docbank(datasets.GeneratorBasedBuilder):
|
|
| 248 |
except:
|
| 249 |
continue
|
| 250 |
|
|
|
|
| 251 |
processed = self.TOKENIZER(
|
| 252 |
tokens,
|
| 253 |
boxes=bboxes,
|
|
@@ -256,7 +257,7 @@ class Docbank(datasets.GeneratorBasedBuilder):
|
|
| 256 |
return_offsets_mapping=False,
|
| 257 |
return_attention_mask=False,
|
| 258 |
)
|
| 259 |
-
print(
|
| 260 |
|
| 261 |
for chunk_id, index in enumerate(range(0, len(processed['input_ids'][0]), self.CHUNK_SIZE)):
|
| 262 |
split_tokens = processed['input_ids'][0][index:index + self.CHUNK_SIZE]
|
|
|
|
| 248 |
except:
|
| 249 |
continue
|
| 250 |
|
| 251 |
+
print('Processing...')
|
| 252 |
processed = self.TOKENIZER(
|
| 253 |
tokens,
|
| 254 |
boxes=bboxes,
|
|
|
|
| 257 |
return_offsets_mapping=False,
|
| 258 |
return_attention_mask=False,
|
| 259 |
)
|
| 260 |
+
print('Processed')
|
| 261 |
|
| 262 |
for chunk_id, index in enumerate(range(0, len(processed['input_ids'][0]), self.CHUNK_SIZE)):
|
| 263 |
split_tokens = processed['input_ids'][0][index:index + self.CHUNK_SIZE]
|