add tokenizer
Browse files- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +1 -0
special_tokens_map.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]"}
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "replace_word_delimiter_char": " ", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
vocab.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"s": 0, "R": 2, "T": 3, "t": 4, "o": 5, "w": 6, "N": 7, "j": 8, "S": 9, "\"": 10, "K": 11, "k": 12, "U": 13, "d": 14, "p": 15, "r": 16, "E": 17, "y": 18, "X": 19, "B": 20, "u": 21, ":": 22, "c": 23, "I": 24, "A": 25, "F": 26, ",": 27, "W": 28, "L": 29, "!": 30, "a": 31, "V": 32, "G": 33, "D": 34, "i": 35, "Y": 36, "n": 37, "q": 38, "Q": 39, "f": 40, "h": 41, "J": 42, "l": 43, "-": 44, "H": 45, "g": 46, "b": 47, "m": 48, "v": 49, "?": 50, "O": 51, "e": 52, "z": 53, "'": 54, ".": 55, ";": 56, "P": 57, "C": 58, "x": 59, "M": 60, "|": 1, "[UNK]": 61, "[PAD]": 62}
|