Upload tokenizer
Browse files- special_tokens_map.json +1 -7
- tokenizer_config.json +1 -8
special_tokens_map.json
CHANGED
|
@@ -13,11 +13,5 @@
|
|
| 13 |
"rstrip": false,
|
| 14 |
"single_word": false
|
| 15 |
},
|
| 16 |
-
"pad_token":
|
| 17 |
-
"content": "<|eot_id|>",
|
| 18 |
-
"lstrip": false,
|
| 19 |
-
"normalized": false,
|
| 20 |
-
"rstrip": false,
|
| 21 |
-
"single_word": false
|
| 22 |
-
}
|
| 23 |
}
|
|
|
|
| 13 |
"rstrip": false,
|
| 14 |
"single_word": false
|
| 15 |
},
|
| 16 |
+
"pad_token": "<|eot_id|>"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
}
|
tokenizer_config.json
CHANGED
|
@@ -2054,18 +2054,11 @@
|
|
| 2054 |
"clean_up_tokenization_spaces": true,
|
| 2055 |
"eos_token": "<|eot_id|>",
|
| 2056 |
"extra_special_tokens": {},
|
| 2057 |
-
"max_length": 256,
|
| 2058 |
"model_input_names": [
|
| 2059 |
"input_ids",
|
| 2060 |
"attention_mask"
|
| 2061 |
],
|
| 2062 |
"model_max_length": 1000000000000000019884624838656,
|
| 2063 |
-
"pad_to_multiple_of": null,
|
| 2064 |
"pad_token": "<|eot_id|>",
|
| 2065 |
-
"
|
| 2066 |
-
"padding_side": "right",
|
| 2067 |
-
"stride": 0,
|
| 2068 |
-
"tokenizer_class": "PreTrainedTokenizerFast",
|
| 2069 |
-
"truncation_side": "right",
|
| 2070 |
-
"truncation_strategy": "longest_first"
|
| 2071 |
}
|
|
|
|
| 2054 |
"clean_up_tokenization_spaces": true,
|
| 2055 |
"eos_token": "<|eot_id|>",
|
| 2056 |
"extra_special_tokens": {},
|
|
|
|
| 2057 |
"model_input_names": [
|
| 2058 |
"input_ids",
|
| 2059 |
"attention_mask"
|
| 2060 |
],
|
| 2061 |
"model_max_length": 1000000000000000019884624838656,
|
|
|
|
| 2062 |
"pad_token": "<|eot_id|>",
|
| 2063 |
+
"tokenizer_class": "PreTrainedTokenizerFast"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2064 |
}
|