nono1224 commited on
Commit
cb497f9
·
verified ·
1 Parent(s): a53d9c4

Upload tokenizer

Browse files
Files changed (4) hide show
  1. special_tokens_map.json +14 -4
  2. spm.model +3 -0
  3. tokenizer.json +2 -2
  4. tokenizer_config.json +11 -45
special_tokens_map.json CHANGED
@@ -1,8 +1,11 @@
1
  {
2
- "additional_special_tokens": [
3
- "[URL]",
4
- "[USER]"
5
- ],
 
 
 
6
  "cls_token": {
7
  "content": "[CLS]",
8
  "lstrip": false,
@@ -10,6 +13,13 @@
10
  "rstrip": false,
11
  "single_word": false
12
  },
 
 
 
 
 
 
 
13
  "mask_token": {
14
  "content": "[MASK]",
15
  "lstrip": false,
 
1
  {
2
+ "bos_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
  "cls_token": {
10
  "content": "[CLS]",
11
  "lstrip": false,
 
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
+ "eos_token": {
17
+ "content": "[SEP]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
  "mask_token": {
24
  "content": "[MASK]",
25
  "lstrip": false,
spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a4f774d877b61859dd2df012b51de4f25a0ee46b30b5dbaa886530efec6fc68
3
+ size 969814
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:553c7d94babbd19461d019eef9ff56f66d41058ffdad0e3a81d09b6781d30889
3
- size 17098348
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92b9a8ca700480bf52a97c4dbf7ba5752cda0b008af756f8f20a7fc64acb574b
3
+ size 3381622
tokenizer_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "added_tokens_decoder": {
3
  "0": {
4
- "content": "[UNK]",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
@@ -9,7 +9,7 @@
9
  "special": true
10
  },
11
  "1": {
12
- "content": "[SEP]",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
@@ -17,7 +17,7 @@
17
  "special": true
18
  },
19
  "2": {
20
- "content": "[PAD]",
21
  "lstrip": false,
22
  "normalized": false,
23
  "rstrip": false,
@@ -25,7 +25,7 @@
25
  "special": true
26
  },
27
  "3": {
28
- "content": "[CLS]",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
@@ -39,54 +39,20 @@
39
  "rstrip": false,
40
  "single_word": false,
41
  "special": true
42
- },
43
- "5": {
44
- "content": "[URL]",
45
- "lstrip": false,
46
- "normalized": false,
47
- "rstrip": false,
48
- "single_word": false,
49
- "special": true
50
- },
51
- "6": {
52
- "content": "[USER]",
53
- "lstrip": false,
54
- "normalized": false,
55
- "rstrip": false,
56
- "single_word": false,
57
- "special": true
58
- },
59
- "31999": {
60
- "content": "\n",
61
- "lstrip": false,
62
- "normalized": true,
63
- "rstrip": false,
64
- "single_word": false,
65
- "special": false
66
  }
67
  },
68
- "additional_special_tokens": [
69
- "[URL]",
70
- "[USER]"
71
- ],
72
  "clean_up_tokenization_spaces": true,
73
  "cls_token": "[CLS]",
74
  "do_lower_case": false,
75
- "do_subword_tokenize": true,
76
- "do_word_tokenize": true,
77
- "jumanpp_kwargs": null,
78
- "keep_newlines": true,
79
  "mask_token": "[MASK]",
80
- "mecab_kwargs": {
81
- "mecab_dic": "unidic_lite"
82
- },
83
- "model_max_length": 512,
84
- "never_split": null,
85
  "pad_token": "[PAD]",
86
  "sep_token": "[SEP]",
87
- "subword_tokenizer_type": "wordpiece",
88
- "sudachi_kwargs": null,
89
- "tokenizer_class": "BertJapaneseTokenizer",
90
  "unk_token": "[UNK]",
91
- "word_tokenizer_type": "mecab"
92
  }
 
1
  {
2
  "added_tokens_decoder": {
3
  "0": {
4
+ "content": "[PAD]",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
 
9
  "special": true
10
  },
11
  "1": {
12
+ "content": "[CLS]",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
 
17
  "special": true
18
  },
19
  "2": {
20
+ "content": "[SEP]",
21
  "lstrip": false,
22
  "normalized": false,
23
  "rstrip": false,
 
25
  "special": true
26
  },
27
  "3": {
28
+ "content": "[UNK]",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
 
39
  "rstrip": false,
40
  "single_word": false,
41
  "special": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  }
43
  },
44
+ "bos_token": "[CLS]",
 
 
 
45
  "clean_up_tokenization_spaces": true,
46
  "cls_token": "[CLS]",
47
  "do_lower_case": false,
48
+ "eos_token": "[SEP]",
 
 
 
49
  "mask_token": "[MASK]",
50
+ "model_max_length": 1000000000000000019884624838656,
 
 
 
 
51
  "pad_token": "[PAD]",
52
  "sep_token": "[SEP]",
53
+ "sp_model_kwargs": {},
54
+ "split_by_punct": false,
55
+ "tokenizer_class": "DebertaV2Tokenizer",
56
  "unk_token": "[UNK]",
57
+ "vocab_type": "spm"
58
  }