add tokenizer
Browse files- added_tokens.json +1 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +1 -0
added_tokens.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"<s>": 60, "</s>": 61}
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
vocab.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"،": 1, "؟": 2, "ء": 3, "آ": 4, "ؤ": 5, "ئ": 6, "ا": 7, "ب": 8, "ت": 9, "ث": 10, "ج": 11, "ح": 12, "خ": 13, "د": 14, "ذ": 15, "ر": 16, "ز": 17, "س": 18, "ش": 19, "ص": 20, "ض": 21, "ط": 22, "ظ": 23, "ع": 24, "غ": 25, "ف": 26, "ق": 27, "ل": 28, "م": 29, "ن": 30, "و": 31, "ى": 32, "ي": 33, "ً": 34, "َ": 35, "ُ": 36, "ِ": 37, "ّ": 38, "ٓ": 39, "ٔ": 40, "ٰ": 41, "ٹ": 42, "پ": 43, "چ": 44, "ڈ": 45, "ڑ": 46, "ژ": 47, "ک": 48, "گ": 49, "ں": 50, "ھ": 51, "ہ": 52, "ۂ": 53, "ی": 54, "ے": 55, "۔": 56, "’": 57, "|": 0, "[UNK]": 58, "[PAD]": 59}
|