DeBERTaRefCheckTokenizer / special_tokens_map.json
AIcademic's picture
Add DeBERTa tokenizer with SEC_* tokens
4989982 verified
{
"additional_special_tokens": [
"[SEC_ABSTRACT]",
"[SEC_ACKNOW]",
"[SEC_CONCL]",
"[SEC_INTRO]",
"[SEC_ABSTRACT]",
"[SEC_ACKNOWLEDGMENTS]",
"[SEC_BACKGROUND]",
"[SEC_CATEGORIES_AND_SUBJECT_DESCRIPTORS]",
"[SEC_CONCLUSIONS]",
"[SEC_DISCUSSIONS]",
"[SEC_EVALUATION]",
"[SEC_GENERAL_TERMS]",
"[SEC_INTRODUCTION]",
"[SEC_KEYWORDS]",
"[SEC_METHOD]",
"[SEC_REFERENCES]",
"[SEC_RELATED_WORK]"
],
"bos_token": {
"content": "[CLS]",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"cls_token": {
"content": "[CLS]",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"eos_token": {
"content": "[SEP]",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"mask_token": {
"content": "[MASK]",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"pad_token": {
"content": "[PAD]",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"sep_token": {
"content": "[SEP]",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false
},
"unk_token": {
"content": "[UNK]",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
}
}