feat: use RoBERTa tokenizer to (hopefully) fix some tokenization problems for token classification tasks
Browse files- tokenizer_config.json +1 -1
tokenizer_config.json
CHANGED
|
@@ -935,7 +935,7 @@
|
|
| 935 |
"model_max_length": 8192,
|
| 936 |
"pad_token": "[PAD]",
|
| 937 |
"sep_token": "[SEP]",
|
| 938 |
-
"tokenizer_class": "
|
| 939 |
"model_input_names": [
|
| 940 |
"input_ids",
|
| 941 |
"attention_mask"
|
|
|
|
| 935 |
"model_max_length": 8192,
|
| 936 |
"pad_token": "[PAD]",
|
| 937 |
"sep_token": "[SEP]",
|
| 938 |
+
"tokenizer_class": "RobertaTokenizerFast",
|
| 939 |
"model_input_names": [
|
| 940 |
"input_ids",
|
| 941 |
"attention_mask"
|