| class TextTokenizer: |
| def __init__(self): |
| |
| self.vocab = {chr(i): i - 97 for i in range(97, 123)} |
| self.vocab.update({str(i): 26 + i for i in range(10)}) |
| self.vocab['<unk>'] = 36 |
| self.vocab['<bot>'] = 37 |
| self.vocab['<eot>'] = 38 |
| self.vocab[' '] = 39 |
| self.vocab['.'] = 40 |
| self.vocab[','] = 41 |
| self.vocab['!'] = 42 |
| self.vocab['?'] = 43 |
| self.vocab['\''] = 44 |
| self.vocab['"'] = 45 |
| self.vocab['-'] = 46 |
| |
| |
| additional_chars = ['”', 'â', ')', ']', '“', 'è', '£', ';', 'à', '$', '[', '’', 'ü', 'ê', ':', '(', 'é', '&'] |
| for i, char in enumerate(additional_chars): |
| self.vocab[char] = 47 + i |
|
|
| |
| self.vocab_size = 128 |
| self.vocab.update({f'<unk{47 + len(additional_chars) + i}>': 47 + len(additional_chars) + i for i in range(self.vocab_size - len(self.vocab))}) |
|
|
| def tokenize(self, text): |
| |
| text = text.lower() |
| |
| tokens = [self.vocab['<bot>']] |
| |
| tokens.extend(self.vocab.get(char, self.vocab['<unk>']) for char in text) |
| |
| tokens.append(self.vocab['<eot>']) |
| return tokens |
|
|
| def detokenize(self, tokens): |
| |
| id_to_char = {v: k for k, v in self.vocab.items()} |
| |
| text = ''.join(id_to_char.get(token, '<unk>') for token in tokens if token not in {self.vocab['<bot>'], self.vocab['<eot>']}) |
| return text |