changed tokenization
Browse files- ref_seg_ger.py +11 -3
ref_seg_ger.py
CHANGED
|
@@ -18,8 +18,8 @@ from glob import glob
|
|
| 18 |
import os
|
| 19 |
import numpy as np
|
| 20 |
from PIL import Image
|
| 21 |
-
from
|
| 22 |
-
from tokenizers.pre_tokenizers import Whitespace
|
| 23 |
import datasets
|
| 24 |
from itertools import chain
|
| 25 |
import pandas as pd
|
|
@@ -142,7 +142,15 @@ class RefSeg(datasets.GeneratorBasedBuilder):
|
|
| 142 |
# ]
|
| 143 |
|
| 144 |
# DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
| 145 |
-
TOKENIZER =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 146 |
|
| 147 |
def _info(self):
|
| 148 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
|
|
|
| 18 |
import os
|
| 19 |
import numpy as np
|
| 20 |
from PIL import Image
|
| 21 |
+
from tokenizers import pre_tokenizers
|
| 22 |
+
from tokenizers.pre_tokenizers import Digits, Split, Whitespace
|
| 23 |
import datasets
|
| 24 |
from itertools import chain
|
| 25 |
import pandas as pd
|
|
|
|
| 142 |
# ]
|
| 143 |
|
| 144 |
# DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
| 145 |
+
TOKENIZER = pre_tokenizer = pre_tokenizers.Sequence([
|
| 146 |
+
Whitespace(),
|
| 147 |
+
Digits(),
|
| 148 |
+
Split(".", behavior="isolated"),
|
| 149 |
+
Split(":", behavior="isolated"),
|
| 150 |
+
Split("/", behavior="isolated"),
|
| 151 |
+
Split("-", behavior="isolated"),
|
| 152 |
+
Split(",", behavior="isolated"),
|
| 153 |
+
])
|
| 154 |
|
| 155 |
def _info(self):
|
| 156 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|