paragon-analytics commited on
Commit
993aab7
·
1 Parent(s): eb18b1d

Upload tokenizer

Browse files
Files changed (4) hide show
  1. special_tokens_map.json +5 -49
  2. tokenizer.json +0 -0
  3. tokenizer_config.json +9 -60
  4. vocab.txt +0 -0
special_tokens_map.json CHANGED
@@ -1,51 +1,7 @@
1
  {
2
- "bos_token": {
3
- "content": "<s>",
4
- "lstrip": false,
5
- "normalized": true,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "cls_token": {
10
- "content": "<s>",
11
- "lstrip": false,
12
- "normalized": true,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "eos_token": {
17
- "content": "</s>",
18
- "lstrip": false,
19
- "normalized": true,
20
- "rstrip": false,
21
- "single_word": false
22
- },
23
- "mask_token": {
24
- "content": "<mask>",
25
- "lstrip": true,
26
- "normalized": true,
27
- "rstrip": false,
28
- "single_word": false
29
- },
30
- "pad_token": {
31
- "content": "<pad>",
32
- "lstrip": false,
33
- "normalized": true,
34
- "rstrip": false,
35
- "single_word": false
36
- },
37
- "sep_token": {
38
- "content": "</s>",
39
- "lstrip": false,
40
- "normalized": true,
41
- "rstrip": false,
42
- "single_word": false
43
- },
44
- "unk_token": {
45
- "content": "<unk>",
46
- "lstrip": false,
47
- "normalized": true,
48
- "rstrip": false,
49
- "single_word": false
50
- }
51
  }
 
1
  {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,65 +1,14 @@
1
  {
2
- "add_prefix_space": false,
3
- "bos_token": {
4
- "__type": "AddedToken",
5
- "content": "<s>",
6
- "lstrip": false,
7
- "normalized": true,
8
- "rstrip": false,
9
- "single_word": false
10
- },
11
- "cls_token": {
12
- "__type": "AddedToken",
13
- "content": "<s>",
14
- "lstrip": false,
15
- "normalized": true,
16
- "rstrip": false,
17
- "single_word": false
18
- },
19
  "do_lower_case": true,
20
- "eos_token": {
21
- "__type": "AddedToken",
22
- "content": "</s>",
23
- "lstrip": false,
24
- "normalized": true,
25
- "rstrip": false,
26
- "single_word": false
27
- },
28
- "errors": "replace",
29
- "mask_token": {
30
- "__type": "AddedToken",
31
- "content": "<mask>",
32
- "lstrip": true,
33
- "normalized": true,
34
- "rstrip": false,
35
- "single_word": false
36
- },
37
  "model_max_length": 512,
38
- "name_or_path": "roberta-base",
39
- "pad_token": {
40
- "__type": "AddedToken",
41
- "content": "<pad>",
42
- "lstrip": false,
43
- "normalized": true,
44
- "rstrip": false,
45
- "single_word": false
46
- },
47
- "sep_token": {
48
- "__type": "AddedToken",
49
- "content": "</s>",
50
- "lstrip": false,
51
- "normalized": true,
52
- "rstrip": false,
53
- "single_word": false
54
- },
55
  "special_tokens_map_file": null,
56
- "tokenizer_class": "RobertaTokenizer",
57
- "unk_token": {
58
- "__type": "AddedToken",
59
- "content": "<unk>",
60
- "lstrip": false,
61
- "normalized": true,
62
- "rstrip": false,
63
- "single_word": false
64
- }
65
  }
 
1
  {
2
+ "cls_token": "[CLS]",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  "do_lower_case": true,
4
+ "mask_token": "[MASK]",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "model_max_length": 512,
6
+ "name_or_path": "bert-base-uncased",
7
+ "pad_token": "[PAD]",
8
+ "sep_token": "[SEP]",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  "special_tokens_map_file": null,
10
+ "strip_accents": null,
11
+ "tokenize_chinese_chars": true,
12
+ "tokenizer_class": "BertTokenizer",
13
+ "unk_token": "[UNK]"
 
 
 
 
 
14
  }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff