baby-dev commited on
Commit
ffa2f3e
·
verified ·
1 Parent(s): bb1115f

Upload model trained with Unsloth

Browse files

Upload model trained with Unsloth 2x faster

Files changed (5) hide show
  1. merges.txt +0 -0
  2. special_tokens_map.json +6 -7
  3. tokenizer.json +2 -2
  4. tokenizer_config.json +126 -21
  5. vocab.json +0 -0
merges.txt CHANGED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json CHANGED
@@ -1,32 +1,31 @@
1
  {
2
  "additional_special_tokens": [
3
- "<unk>",
4
- "<s>",
5
- "</s>"
6
  ],
7
  "bos_token": {
8
- "content": "<s>",
9
  "lstrip": false,
10
  "normalized": false,
11
  "rstrip": false,
12
  "single_word": false
13
  },
14
  "eos_token": {
15
- "content": "</s>",
16
  "lstrip": false,
17
  "normalized": false,
18
  "rstrip": false,
19
  "single_word": false
20
  },
21
  "pad_token": {
22
- "content": "<unk>",
23
  "lstrip": false,
24
  "normalized": false,
25
  "rstrip": false,
26
  "single_word": false
27
  },
28
  "unk_token": {
29
- "content": "<unk>",
30
  "lstrip": false,
31
  "normalized": false,
32
  "rstrip": false,
 
1
  {
2
  "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
 
5
  ],
6
  "bos_token": {
7
+ "content": "<|im_start|>",
8
  "lstrip": false,
9
  "normalized": false,
10
  "rstrip": false,
11
  "single_word": false
12
  },
13
  "eos_token": {
14
+ "content": "<|im_end|>",
15
  "lstrip": false,
16
  "normalized": false,
17
  "rstrip": false,
18
  "single_word": false
19
  },
20
  "pad_token": {
21
+ "content": "<empty_output>",
22
  "lstrip": false,
23
  "normalized": false,
24
  "rstrip": false,
25
  "single_word": false
26
  },
27
  "unk_token": {
28
+ "content": "<|endoftext|>",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1d5b9634c23bdd4540f633d327120e1fa4b57a2a723a56d7a92debfc4d15c061
3
- size 3505751
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d27c493c729a66ecefc837280b05d948b1ed50d130eebdbf911b1b36cf38ed7
3
+ size 3522656
tokenizer_config.json CHANGED
@@ -1,10 +1,8 @@
1
  {
2
- "add_bos_token": true,
3
- "add_eos_token": false,
4
- "add_prefix_space": null,
5
  "added_tokens_decoder": {
6
  "0": {
7
- "content": "<unk>",
8
  "lstrip": false,
9
  "normalized": false,
10
  "rstrip": false,
@@ -12,7 +10,7 @@
12
  "special": true
13
  },
14
  "1": {
15
- "content": "<s>",
16
  "lstrip": false,
17
  "normalized": false,
18
  "rstrip": false,
@@ -20,7 +18,119 @@
20
  "special": true
21
  },
22
  "2": {
23
- "content": "</s>",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  "lstrip": false,
25
  "normalized": false,
26
  "rstrip": false,
@@ -29,23 +139,18 @@
29
  }
30
  },
31
  "additional_special_tokens": [
32
- "<unk>",
33
- "<s>",
34
- "</s>"
35
  ],
36
- "bos_token": "<s>",
37
- "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
38
  "clean_up_tokenization_spaces": false,
39
- "eos_token": "</s>",
40
  "extra_special_tokens": {},
41
- "legacy": true,
42
- "model_max_length": 32768,
43
- "pad_token": "<unk>",
44
  "padding_side": "right",
45
- "sp_model_kwargs": {},
46
- "spaces_between_special_tokens": false,
47
- "tokenizer_class": "LlamaTokenizer",
48
- "truncation_side": "left",
49
- "unk_token": "<unk>",
50
- "use_default_system_prompt": true
51
  }
 
1
  {
2
+ "add_prefix_space": false,
 
 
3
  "added_tokens_decoder": {
4
  "0": {
5
+ "content": "<|endoftext|>",
6
  "lstrip": false,
7
  "normalized": false,
8
  "rstrip": false,
 
10
  "special": true
11
  },
12
  "1": {
13
+ "content": "<|im_start|>",
14
  "lstrip": false,
15
  "normalized": false,
16
  "rstrip": false,
 
18
  "special": true
19
  },
20
  "2": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<repo_name>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "4": {
37
+ "content": "<reponame>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "5": {
45
+ "content": "<file_sep>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "6": {
53
+ "content": "<filename>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "7": {
61
+ "content": "<gh_stars>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "8": {
69
+ "content": "<issue_start>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "9": {
77
+ "content": "<issue_comment>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "10": {
85
+ "content": "<issue_closed>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "11": {
93
+ "content": "<jupyter_start>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "12": {
101
+ "content": "<jupyter_text>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": true
107
+ },
108
+ "13": {
109
+ "content": "<jupyter_code>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": true
115
+ },
116
+ "14": {
117
+ "content": "<jupyter_output>",
118
+ "lstrip": false,
119
+ "normalized": false,
120
+ "rstrip": false,
121
+ "single_word": false,
122
+ "special": true
123
+ },
124
+ "15": {
125
+ "content": "<jupyter_script>",
126
+ "lstrip": false,
127
+ "normalized": false,
128
+ "rstrip": false,
129
+ "single_word": false,
130
+ "special": true
131
+ },
132
+ "16": {
133
+ "content": "<empty_output>",
134
  "lstrip": false,
135
  "normalized": false,
136
  "rstrip": false,
 
139
  }
140
  },
141
  "additional_special_tokens": [
142
+ "<|im_start|>",
143
+ "<|im_end|>"
 
144
  ],
145
+ "bos_token": "<|im_start|>",
146
+ "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
147
  "clean_up_tokenization_spaces": false,
148
+ "eos_token": "<|im_end|>",
149
  "extra_special_tokens": {},
150
+ "model_max_length": 2048,
151
+ "pad_token": "<empty_output>",
 
152
  "padding_side": "right",
153
+ "tokenizer_class": "GPT2Tokenizer",
154
+ "unk_token": "<|endoftext|>",
155
+ "vocab_size": 49152
 
 
 
156
  }
vocab.json CHANGED
The diff for this file is too large to render. See raw diff