sagsan commited on
Commit
b618366
·
verified ·
1 Parent(s): 79cc2b3

Upload tokenizer

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json CHANGED
@@ -1,3 +1,6 @@
1
  {
2
- " <|NEWLINE|> ": 50257
 
 
 
3
  }
 
1
  {
2
+ " <|NEWLINE|> ": 151646,
3
+ "<|endoftext|>": 151643,
4
+ "<|im_end|>": 151645,
5
+ "<|im_start|>": 151644
6
  }
chat_template.jinja ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system
2
+ You are a helpful assistant.<|im_end|>
3
+ ' }}{% endif %}{{'<|im_start|>' + message['role'] + '
4
+ ' + message['content'] + '<|im_end|>' + '
5
+ '}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
6
+ ' }}{% endif %}
merges.txt CHANGED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json CHANGED
@@ -2,31 +2,17 @@
2
  "additional_special_tokens": [
3
  " <|NEWLINE|> "
4
  ],
5
- "bos_token": {
6
- "content": "<|endoftext|>",
7
- "lstrip": false,
8
- "normalized": true,
9
- "rstrip": false,
10
- "single_word": false
11
- },
12
  "eos_token": {
13
- "content": "<|endoftext|>",
14
  "lstrip": false,
15
- "normalized": true,
16
  "rstrip": false,
17
  "single_word": false
18
  },
19
  "pad_token": {
20
- "content": "<|endoftext|>",
21
- "lstrip": false,
22
- "normalized": true,
23
- "rstrip": false,
24
- "single_word": false
25
- },
26
- "unk_token": {
27
- "content": "<|endoftext|>",
28
  "lstrip": false,
29
- "normalized": true,
30
  "rstrip": false,
31
  "single_word": false
32
  }
 
2
  "additional_special_tokens": [
3
  " <|NEWLINE|> "
4
  ],
 
 
 
 
 
 
 
5
  "eos_token": {
6
+ "content": "<|im_end|>",
7
  "lstrip": false,
8
+ "normalized": false,
9
  "rstrip": false,
10
  "single_word": false
11
  },
12
  "pad_token": {
13
+ "content": "<|im_end|>",
 
 
 
 
 
 
 
14
  "lstrip": false,
15
+ "normalized": false,
16
  "rstrip": false,
17
  "single_word": false
18
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,15 +1,31 @@
1
  {
2
  "add_prefix_space": false,
3
  "added_tokens_decoder": {
4
- "50256": {
5
  "content": "<|endoftext|>",
6
  "lstrip": false,
7
- "normalized": true,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  "rstrip": false,
9
  "single_word": false,
10
  "special": true
11
  },
12
- "50257": {
13
  "content": " <|NEWLINE|> ",
14
  "lstrip": false,
15
  "normalized": false,
@@ -21,12 +37,14 @@
21
  "additional_special_tokens": [
22
  " <|NEWLINE|> "
23
  ],
24
- "bos_token": "<|endoftext|>",
25
  "clean_up_tokenization_spaces": false,
26
- "eos_token": "<|endoftext|>",
 
27
  "extra_special_tokens": {},
28
- "model_max_length": 1024,
29
- "pad_token": "<|endoftext|>",
30
- "tokenizer_class": "GPT2Tokenizer",
31
- "unk_token": "<|endoftext|>"
 
32
  }
 
1
  {
2
  "add_prefix_space": false,
3
  "added_tokens_decoder": {
4
+ "151643": {
5
  "content": "<|endoftext|>",
6
  "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
  "rstrip": false,
25
  "single_word": false,
26
  "special": true
27
  },
28
+ "151646": {
29
  "content": " <|NEWLINE|> ",
30
  "lstrip": false,
31
  "normalized": false,
 
37
  "additional_special_tokens": [
38
  " <|NEWLINE|> "
39
  ],
40
+ "bos_token": null,
41
  "clean_up_tokenization_spaces": false,
42
+ "eos_token": "<|im_end|>",
43
+ "errors": "replace",
44
  "extra_special_tokens": {},
45
+ "model_max_length": 32768,
46
+ "pad_token": "<|im_end|>",
47
+ "split_special_tokens": false,
48
+ "tokenizer_class": "Qwen2Tokenizer",
49
+ "unk_token": null
50
  }
vocab.json CHANGED
The diff for this file is too large to render. See raw diff