| { |
| "clean_up_tokenization_spaces": true, |
| "cls_token": "[CLS]", |
| "do_basic_tokenize": true, |
| "do_lower_case": false, |
| "mask_token": "[MASK]", |
| "max_len": 512, |
| "model_max_length": 512, |
| "never_split": [ |
| "+ู", |
| "+ูู
ุง", |
| "ู+", |
| "+ูุง", |
| "+ูู", |
| "ู+", |
| "+ูู", |
| "+ุงู", |
| "+ูู
", |
| "+ุฉ", |
| "[ุจุฑูุฏ]", |
| "ูู+", |
| "+ู", |
| "+ุช", |
| "+ู", |
| "ุณ+", |
| "ู+", |
| "[ู
ุณุชุฎุฏู
]", |
| "+ูู
", |
| "+ุง", |
| "ุจ+", |
| "ู+", |
| "+ูุง", |
| "+ูุง", |
| "+ูู", |
| "+ูู
ุง", |
| "ุงู+", |
| "+ู", |
| "+ูู", |
| "+ุงุช", |
| "[ุฑุงุจุท]" |
| ], |
| "pad_token": "[PAD]", |
| "sep_token": "[SEP]", |
| "strip_accents": null, |
| "tokenize_chinese_chars": true, |
| "tokenizer_class": "BertTokenizer", |
| "unk_token": "[UNK]" |
| } |
|
|