File size: 2,484 Bytes
2363b1f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import os
import json
from transformers import PreTrainedTokenizer


class TwentyQTokenizer(PreTrainedTokenizer):
    """Byte-level tokenizer for TwentyQ.

    Also stores the question and target vocabularies — these are the model's
    "tokens" in the same way that a language model's tokenizer stores its vocabulary.
    """

    vocab_files_names = {"vocab_file": "vocab.json"}

    def __init__(self, vocab_file=None, **kwargs):
        self.questions = []
        self.targets = []
        if vocab_file and os.path.exists(vocab_file):
            with open(vocab_file) as f:
                data = json.load(f)
            self.questions = data.get("questions", [])
            self.targets = data.get("targets", [])

        self._byte_vocab = {i: chr(i) if 32 <= i < 127 else f"<0x{i:02X}>" for i in range(256)}
        self._byte_vocab[256] = "<pad>"
        self._byte_vocab[257] = "<s>"
        self._byte_vocab[258] = "</s>"
        self._str_to_id = {v: k for k, v in self._byte_vocab.items()}

        kwargs.setdefault("pad_token", "<pad>")
        kwargs.setdefault("bos_token", "<s>")
        kwargs.setdefault("eos_token", "</s>")
        kwargs.setdefault("model_max_length", 4096)
        super().__init__(vocab_file=vocab_file, **kwargs)

    @property
    def vocab_size(self):
        return 259

    def get_vocab(self):
        return dict(self._str_to_id)

    def _tokenize(self, text):
        return [self._byte_vocab.get(b, f"<0x{b:02X}>") for b in text.encode("utf-8")]

    def _convert_token_to_id(self, token):
        return self._str_to_id.get(token, 0)

    def _convert_id_to_token(self, index):
        return self._byte_vocab.get(index, "<0x00>")

    def convert_tokens_to_string(self, tokens):
        byte_vals = []
        for t in tokens:
            if t in ("<pad>", "<s>", "</s>"):
                continue
            if t.startswith("<0x") and t.endswith(">"):
                byte_vals.append(int(t[3:-1], 16))
            elif len(t) == 1:
                byte_vals.append(ord(t))
        return bytes(byte_vals).decode("utf-8", errors="replace")

    def save_vocabulary(self, save_directory, filename_prefix=None):
        vocab_file = os.path.join(
            save_directory,
            (filename_prefix + "-" if filename_prefix else "") + "vocab.json",
        )
        with open(vocab_file, "w") as f:
            json.dump({"questions": self.questions, "targets": self.targets}, f, indent=2)
        return (vocab_file,)