batched tokenization
Browse files- docbank.py +2 -2
docbank.py
CHANGED
|
@@ -60,7 +60,7 @@ _URLS = {
|
|
| 60 |
_FEATURES = datasets.Features(
|
| 61 |
{
|
| 62 |
"id": datasets.Value("string"),
|
| 63 |
-
"
|
| 64 |
"bbox": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
|
| 65 |
# "RGBs": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
|
| 66 |
# "fonts": datasets.Sequence(datasets.Value("string")),
|
|
@@ -288,7 +288,7 @@ class Docbank(datasets.GeneratorBasedBuilder):
|
|
| 288 |
|
| 289 |
yield key, {
|
| 290 |
"id": f"{f_id}_{chunk_id}",
|
| 291 |
-
'
|
| 292 |
"bbox": split_bboxes,
|
| 293 |
# "RGBs": split_rgbs,
|
| 294 |
# "fonts": split_fonts,
|
|
|
|
| 60 |
_FEATURES = datasets.Features(
|
| 61 |
{
|
| 62 |
"id": datasets.Value("string"),
|
| 63 |
+
"words": datasets.Sequence(datasets.Value("string")),
|
| 64 |
"bbox": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
|
| 65 |
# "RGBs": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
|
| 66 |
# "fonts": datasets.Sequence(datasets.Value("string")),
|
|
|
|
| 288 |
|
| 289 |
yield key, {
|
| 290 |
"id": f"{f_id}_{chunk_id}",
|
| 291 |
+
'words': split_tokens,
|
| 292 |
"bbox": split_bboxes,
|
| 293 |
# "RGBs": split_rgbs,
|
| 294 |
# "fonts": split_fonts,
|