| import sentencepiece as spm | |
| class TTSTokenizer: | |
| def __init__(self, model_path): | |
| self.sp = spm.SentencePieceProcessor() | |
| self.sp.load(model_path) | |
| def encode(self, text): | |
| return self.sp.encode(text, out_type=int) | |
| def decode(self, tokens): | |
| return self.sp.decode(tokens) | |