RobertML commited on
Commit
244a47a
·
verified ·
1 Parent(s): 0435933

Upload tokenizer

Browse files
Files changed (2) hide show
  1. tokenizer.json +14 -2
  2. tokenizer_config.json +7 -0
tokenizer.json CHANGED
@@ -1,7 +1,19 @@
1
  {
2
  "version": "1.0",
3
- "truncation": null,
4
- "padding": null,
 
 
 
 
 
 
 
 
 
 
 
 
5
  "added_tokens": [
6
  {
7
  "id": 100256,
 
1
  {
2
  "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 2048,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
+ "padding": {
10
+ "strategy": "BatchLongest",
11
+ "direction": "Right",
12
+ "pad_to_multiple_of": null,
13
+ "pad_id": 100257,
14
+ "pad_type_id": 0,
15
+ "pad_token": "<|endoftext|>"
16
+ },
17
  "added_tokens": [
18
  {
19
  "id": 100256,
tokenizer_config.json CHANGED
@@ -329,8 +329,15 @@
329
  "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
330
  "clean_up_tokenization_spaces": true,
331
  "eos_token": "</s>",
 
332
  "model_max_length": 2048,
 
333
  "pad_token": "<|endoftext|>",
 
 
 
334
  "tokenizer_class": "GPT2Tokenizer",
 
 
335
  "unk_token": "<unk>"
336
  }
 
329
  "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
330
  "clean_up_tokenization_spaces": true,
331
  "eos_token": "</s>",
332
+ "max_length": 2048,
333
  "model_max_length": 2048,
334
+ "pad_to_multiple_of": null,
335
  "pad_token": "<|endoftext|>",
336
+ "pad_token_type_id": 0,
337
+ "padding_side": "right",
338
+ "stride": 0,
339
  "tokenizer_class": "GPT2Tokenizer",
340
+ "truncation_side": "right",
341
+ "truncation_strategy": "longest_first",
342
  "unk_token": "<unk>"
343
  }