Upd tokenizer bos_token, eos_token, pad_token
#3
by
sirorezka - opened
- tokenizer_config.json +5 -2
tokenizer_config.json
CHANGED
|
@@ -157,12 +157,15 @@
|
|
| 157 |
"<jupyter_script>",
|
| 158 |
"<empty_output>"
|
| 159 |
],
|
| 160 |
-
"bos_token": "<|
|
| 161 |
"clean_up_tokenization_spaces": false,
|
| 162 |
"chat_template": "{%- if messages[0]['role'] == 'system' -%}{{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}{%- else -%}{{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}{%- endif -%}{%- for message in messages -%}{%- if message.role == 'system' and loop.first -%}{# Skip #}{%- else -%}{{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n' }}{%- endif -%}{%- endfor -%}{%- if add_generation_prompt -%}{{- '<|im_start|>assistant\\n' }}{%- endif -%}",
|
| 163 |
-
"eos_token": "<|
|
| 164 |
"extra_special_tokens": {},
|
| 165 |
"model_max_length": 131072,
|
|
|
|
|
|
|
|
|
|
| 166 |
"tokenizer_class": "GPT2Tokenizer",
|
| 167 |
"unk_token": "<|endoftext|>",
|
| 168 |
"vocab_size": 49152
|
|
|
|
| 157 |
"<jupyter_script>",
|
| 158 |
"<empty_output>"
|
| 159 |
],
|
| 160 |
+
"bos_token": "<|im_start|>",
|
| 161 |
"clean_up_tokenization_spaces": false,
|
| 162 |
"chat_template": "{%- if messages[0]['role'] == 'system' -%}{{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}{%- else -%}{{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}{%- endif -%}{%- for message in messages -%}{%- if message.role == 'system' and loop.first -%}{# Skip #}{%- else -%}{{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n' }}{%- endif -%}{%- endfor -%}{%- if add_generation_prompt -%}{{- '<|im_start|>assistant\\n' }}{%- endif -%}",
|
| 163 |
+
"eos_token": "<|im_end|>",
|
| 164 |
"extra_special_tokens": {},
|
| 165 |
"model_max_length": 131072,
|
| 166 |
+
"pad_token": "<|im_end|>",
|
| 167 |
+
"padding_side": "right",
|
| 168 |
+
"split_special_tokens": false,
|
| 169 |
"tokenizer_class": "GPT2Tokenizer",
|
| 170 |
"unk_token": "<|endoftext|>",
|
| 171 |
"vocab_size": 49152
|