depasquale commited on
Commit
11a3d91
·
1 Parent(s): c08bace

Switch to fast tokenizer format (tokenizer.json)

Browse files

- Replace vocab.json + merges.txt with tokenizer.json
- Update tokenizer_config.json
- Compatible with swift-transformers and Python transformers

Files changed (5) hide show
  1. .gitattributes +1 -0
  2. merges.txt +0 -0
  3. tokenizer.json +3 -0
  4. tokenizer_config.json +5 -2
  5. vocab.json +0 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
merges.txt DELETED
The diff for this file is too large to render. See raw diff
 
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e3855f008ece7e02f09bad5c0aa492cd1594529a796b80193f4ef640200e58b
3
+ size 10523354
tokenizer_config.json CHANGED
@@ -26,12 +26,15 @@
26
  "special": true
27
  }
28
  },
29
- "additional_special_tokens": ["<|im_start|>", "<|im_end|>"],
 
 
 
30
  "bos_token": null,
31
- "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
32
  "clean_up_tokenization_spaces": false,
33
  "eos_token": "<|im_end|>",
34
  "errors": "replace",
 
35
  "model_max_length": 32768,
36
  "pad_token": "<|endoftext|>",
37
  "split_special_tokens": false,
 
26
  "special": true
27
  }
28
  },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
  "bos_token": null,
 
34
  "clean_up_tokenization_spaces": false,
35
  "eos_token": "<|im_end|>",
36
  "errors": "replace",
37
+ "extra_special_tokens": {},
38
  "model_max_length": 32768,
39
  "pad_token": "<|endoftext|>",
40
  "split_special_tokens": false,
vocab.json DELETED
The diff for this file is too large to render. See raw diff