Fix bos/eos token IDs (config.json + tokenizer_config.json)

#5
by KristianS7 - opened
Files changed (2) hide show
  1. config.json +2 -2
  2. tokenizer_config.json +2 -2
config.json CHANGED
@@ -8,8 +8,8 @@
8
  "AutoModel": "modeling_ouro.OuroModel",
9
  "AutoModelForCausalLM": "modeling_ouro.OuroForCausalLM"
10
  },
11
- "bos_token_id": 0,
12
- "eos_token_id": 0,
13
  "head_dim": 128,
14
  "hidden_act": "silu",
15
  "hidden_size": 2048,
 
8
  "AutoModel": "modeling_ouro.OuroModel",
9
  "AutoModelForCausalLM": "modeling_ouro.OuroForCausalLM"
10
  },
11
+ "bos_token_id": 1,
12
+ "eos_token_id": 2,
13
  "head_dim": 128,
14
  "hidden_act": "silu",
15
  "hidden_size": 2048,
tokenizer_config.json CHANGED
@@ -157,10 +157,10 @@
157
  "<jupyter_script>",
158
  "<empty_output>"
159
  ],
160
- "bos_token": "<|endoftext|>",
161
  "clean_up_tokenization_spaces": false,
162
  "chat_template": "{%- if messages[0]['role'] == 'system' -%}{{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}{%- else -%}{{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}{%- endif -%}{%- for message in messages -%}{%- if message.role == 'system' and loop.first -%}{# Skip #}{%- else -%}{{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n' }}{%- endif -%}{%- endfor -%}{%- if add_generation_prompt -%}{{- '<|im_start|>assistant\\n' }}{%- endif -%}",
163
- "eos_token": "<|endoftext|>",
164
  "extra_special_tokens": {},
165
  "model_max_length": 131072,
166
  "tokenizer_class": "GPT2Tokenizer",
 
157
  "<jupyter_script>",
158
  "<empty_output>"
159
  ],
160
+ "bos_token": "<|im_start|>",
161
  "clean_up_tokenization_spaces": false,
162
  "chat_template": "{%- if messages[0]['role'] == 'system' -%}{{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}{%- else -%}{{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}{%- endif -%}{%- for message in messages -%}{%- if message.role == 'system' and loop.first -%}{# Skip #}{%- else -%}{{- '<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n' }}{%- endif -%}{%- endfor -%}{%- if add_generation_prompt -%}{{- '<|im_start|>assistant\\n' }}{%- endif -%}",
163
+ "eos_token": "<|im_end|>",
164
  "extra_special_tokens": {},
165
  "model_max_length": 131072,
166
  "tokenizer_class": "GPT2Tokenizer",