keiwoo commited on
Commit
15b49d2
·
verified ·
1 Parent(s): 9e49940

Upload tokenizer.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. tokenizer.json +180 -0
tokenizer.json ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 16,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
+ "padding": null,
10
+ "added_tokens": [
11
+ {
12
+ "id": 0,
13
+ "content": "[PAD]",
14
+ "single_word": false,
15
+ "lstrip": false,
16
+ "rstrip": false,
17
+ "normalized": false,
18
+ "special": true
19
+ },
20
+ {
21
+ "id": 1,
22
+ "content": "[UNK]",
23
+ "single_word": false,
24
+ "lstrip": false,
25
+ "rstrip": false,
26
+ "normalized": false,
27
+ "special": true
28
+ },
29
+ {
30
+ "id": 2,
31
+ "content": "[CLS]",
32
+ "single_word": false,
33
+ "lstrip": false,
34
+ "rstrip": false,
35
+ "normalized": false,
36
+ "special": true
37
+ },
38
+ {
39
+ "id": 3,
40
+ "content": "[SEP]",
41
+ "single_word": false,
42
+ "lstrip": false,
43
+ "rstrip": false,
44
+ "normalized": false,
45
+ "special": true
46
+ },
47
+ {
48
+ "id": 4,
49
+ "content": "[MASK]",
50
+ "single_word": false,
51
+ "lstrip": false,
52
+ "rstrip": false,
53
+ "normalized": false,
54
+ "special": true
55
+ }
56
+ ],
57
+ "normalizer": {
58
+ "type": "BertNormalizer",
59
+ "clean_text": true,
60
+ "handle_chinese_chars": true,
61
+ "strip_accents": null,
62
+ "lowercase": false
63
+ },
64
+ "pre_tokenizer": {
65
+ "type": "BertPreTokenizer"
66
+ },
67
+ "post_processor": {
68
+ "type": "TemplateProcessing",
69
+ "single": [
70
+ {
71
+ "SpecialToken": {
72
+ "id": "[CLS]",
73
+ "type_id": 0
74
+ }
75
+ },
76
+ {
77
+ "Sequence": {
78
+ "id": "A",
79
+ "type_id": 0
80
+ }
81
+ },
82
+ {
83
+ "SpecialToken": {
84
+ "id": "[SEP]",
85
+ "type_id": 0
86
+ }
87
+ }
88
+ ],
89
+ "pair": [
90
+ {
91
+ "SpecialToken": {
92
+ "id": "[CLS]",
93
+ "type_id": 0
94
+ }
95
+ },
96
+ {
97
+ "Sequence": {
98
+ "id": "A",
99
+ "type_id": 0
100
+ }
101
+ },
102
+ {
103
+ "SpecialToken": {
104
+ "id": "[SEP]",
105
+ "type_id": 0
106
+ }
107
+ },
108
+ {
109
+ "Sequence": {
110
+ "id": "B",
111
+ "type_id": 1
112
+ }
113
+ },
114
+ {
115
+ "SpecialToken": {
116
+ "id": "[SEP]",
117
+ "type_id": 1
118
+ }
119
+ }
120
+ ],
121
+ "special_tokens": {
122
+ "[CLS]": {
123
+ "id": "[CLS]",
124
+ "ids": [
125
+ 2
126
+ ],
127
+ "tokens": [
128
+ "[CLS]"
129
+ ]
130
+ },
131
+ "[SEP]": {
132
+ "id": "[SEP]",
133
+ "ids": [
134
+ 3
135
+ ],
136
+ "tokens": [
137
+ "[SEP]"
138
+ ]
139
+ }
140
+ }
141
+ },
142
+ "decoder": {
143
+ "type": "WordPiece",
144
+ "prefix": "##",
145
+ "cleanup": true
146
+ },
147
+ "model": {
148
+ "type": "WordPiece",
149
+ "unk_token": "[UNK]",
150
+ "continuing_subword_prefix": "##",
151
+ "max_input_chars_per_word": 100,
152
+ "vocab": {
153
+ "[PAD]": 0,
154
+ "[UNK]": 1,
155
+ "[CLS]": 2,
156
+ "[SEP]": 3,
157
+ "[MASK]": 4,
158
+ "A": 5,
159
+ "C": 6,
160
+ "D": 7,
161
+ "E": 8,
162
+ "F": 9,
163
+ "G": 10,
164
+ "H": 11,
165
+ "I": 12,
166
+ "K": 13,
167
+ "L": 14,
168
+ "M": 15,
169
+ "N": 16,
170
+ "P": 17,
171
+ "Q": 18,
172
+ "R": 19,
173
+ "S": 20,
174
+ "T": 21,
175
+ "V": 22,
176
+ "W": 23,
177
+ "Y": 24
178
+ }
179
+ }
180
+ }