test-model-1 / tokenizer_config.json
Nevena's picture
add tokenizer
0768e17
raw
history blame
466 Bytes
{"bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "/Users/nevenasekaric/Desktop/smart_search_old/trained_models/platforma/0_Transformer", "tokenizer_class": "XLMRobertaTokenizer"}