arnavmahapatra commited on
Commit
195c6e5
·
verified ·
1 Parent(s): e321dc1

Upload tokenizer_config.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. tokenizer_config.json +29 -31
tokenizer_config.json CHANGED
@@ -1,40 +1,38 @@
1
  {
2
  "add_bos_token": false,
3
  "add_prefix_space": false,
4
- "bos_token": {
5
- "__type": "AddedToken",
6
- "content": "<|startoftext|>",
7
- "lstrip": false,
8
- "normalized": true,
9
- "rstrip": false,
10
- "single_word": false
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  },
 
12
  "clean_up_tokenization_spaces": true,
13
- "eos_token": {
14
- "__type": "AddedToken",
15
- "content": "<|endoftext|>",
16
- "lstrip": false,
17
- "normalized": true,
18
- "rstrip": false,
19
- "single_word": false
20
- },
21
  "errors": "replace",
22
  "model_max_length": 2048,
23
- "pad_token": {
24
- "__type": "AddedToken",
25
- "content": "<|pad|>",
26
- "lstrip": false,
27
- "normalized": true,
28
- "rstrip": false,
29
- "single_word": false
30
- },
31
  "tokenizer_class": "GPT2Tokenizer",
32
- "unk_token": {
33
- "__type": "AddedToken",
34
- "content": "<|endoftext|>",
35
- "lstrip": false,
36
- "normalized": true,
37
- "rstrip": false,
38
- "single_word": false
39
- }
40
  }
 
1
  {
2
  "add_bos_token": false,
3
  "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "50256": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "50257": {
14
+ "content": "<|startoftext|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "50258": {
22
+ "content": "<|pad|>",
23
+ "lstrip": false,
24
+ "normalized": true,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
  },
30
+ "bos_token": "<|startoftext|>",
31
  "clean_up_tokenization_spaces": true,
32
+ "eos_token": "<|endoftext|>",
 
 
 
 
 
 
 
33
  "errors": "replace",
34
  "model_max_length": 2048,
35
+ "pad_token": "<|pad|>",
 
 
 
 
 
 
 
36
  "tokenizer_class": "GPT2Tokenizer",
37
+ "unk_token": "<|endoftext|>"
 
 
 
 
 
 
 
38
  }