Upload tokenizer
Browse files- special_tokens_map.json +1 -7
- tokenizer.json +0 -9
- tokenizer_config.json +1 -10
special_tokens_map.json
CHANGED
|
@@ -17,13 +17,7 @@
|
|
| 17 |
"rstrip": false,
|
| 18 |
"single_word": false
|
| 19 |
},
|
| 20 |
-
"pad_token":
|
| 21 |
-
"content": "[PAD]",
|
| 22 |
-
"lstrip": false,
|
| 23 |
-
"normalized": false,
|
| 24 |
-
"rstrip": false,
|
| 25 |
-
"single_word": false
|
| 26 |
-
},
|
| 27 |
"unk_token": {
|
| 28 |
"content": "<|endoftext|>",
|
| 29 |
"lstrip": false,
|
|
|
|
| 17 |
"rstrip": false,
|
| 18 |
"single_word": false
|
| 19 |
},
|
| 20 |
+
"pad_token": "<|im_end|>",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
"unk_token": {
|
| 22 |
"content": "<|endoftext|>",
|
| 23 |
"lstrip": false,
|
tokenizer.json
CHANGED
|
@@ -155,15 +155,6 @@
|
|
| 155 |
"rstrip": false,
|
| 156 |
"normalized": false,
|
| 157 |
"special": true
|
| 158 |
-
},
|
| 159 |
-
{
|
| 160 |
-
"id": 49152,
|
| 161 |
-
"content": "[PAD]",
|
| 162 |
-
"single_word": false,
|
| 163 |
-
"lstrip": false,
|
| 164 |
-
"rstrip": false,
|
| 165 |
-
"normalized": false,
|
| 166 |
-
"special": true
|
| 167 |
}
|
| 168 |
],
|
| 169 |
"normalizer": null,
|
|
|
|
| 155 |
"rstrip": false,
|
| 156 |
"normalized": false,
|
| 157 |
"special": true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 158 |
}
|
| 159 |
],
|
| 160 |
"normalizer": null,
|
tokenizer_config.json
CHANGED
|
@@ -136,14 +136,6 @@
|
|
| 136 |
"rstrip": false,
|
| 137 |
"single_word": false,
|
| 138 |
"special": true
|
| 139 |
-
},
|
| 140 |
-
"49152": {
|
| 141 |
-
"content": "[PAD]",
|
| 142 |
-
"lstrip": false,
|
| 143 |
-
"normalized": false,
|
| 144 |
-
"rstrip": false,
|
| 145 |
-
"single_word": false,
|
| 146 |
-
"special": true
|
| 147 |
}
|
| 148 |
},
|
| 149 |
"additional_special_tokens": [
|
|
@@ -156,8 +148,7 @@
|
|
| 156 |
"eos_token": "<|im_end|>",
|
| 157 |
"extra_special_tokens": {},
|
| 158 |
"model_max_length": 2048,
|
| 159 |
-
"pad_token": "
|
| 160 |
-
"padding_side": "left",
|
| 161 |
"tokenizer_class": "GPT2Tokenizer",
|
| 162 |
"unk_token": "<|endoftext|>",
|
| 163 |
"vocab_size": 49152
|
|
|
|
| 136 |
"rstrip": false,
|
| 137 |
"single_word": false,
|
| 138 |
"special": true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 139 |
}
|
| 140 |
},
|
| 141 |
"additional_special_tokens": [
|
|
|
|
| 148 |
"eos_token": "<|im_end|>",
|
| 149 |
"extra_special_tokens": {},
|
| 150 |
"model_max_length": 2048,
|
| 151 |
+
"pad_token": "<|im_end|>",
|
|
|
|
| 152 |
"tokenizer_class": "GPT2Tokenizer",
|
| 153 |
"unk_token": "<|endoftext|>",
|
| 154 |
"vocab_size": 49152
|