File size: 3,017 Bytes
e037bb9
 
8cf0109
af2d2d0
b014be8
7929e48
 
b014be8
7929e48
 
 
 
b014be8
7929e48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b014be8
7929e48
 
 
b014be8
7929e48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import json
import os
from transformers import PreTrainedTokenizer

class ChessTokenizer(PreTrainedTokenizer):
        model_input_names = ["input_ids", "attention_mask"]
        vocab_files_names = {"vocab_file": "vocab.json"}
        
        PAD_TOKEN = "[PAD]"
        BOS_TOKEN = "[BOS]"
        EOS_TOKEN = "[EOS]"
        UNK_TOKEN = "[UNK]"
        
        def __init__(self, vocab_file=None, vocab=None, **kwargs):
            self._pad_token = self.PAD_TOKEN
            self._bos_token = self.BOS_TOKEN
            self._eos_token = self.EOS_TOKEN
            self._unk_token = self.UNK_TOKEN
            
            kwargs.pop("pad_token", None)
            kwargs.pop("bos_token", None)
            kwargs.pop("eos_token", None)
            kwargs.pop("unk_token", None)
            
            if vocab is not None:
                self._vocab = vocab
            elif vocab_file is not None and os.path.exists(vocab_file):
                with open(vocab_file, "r", encoding="utf-8") as f:
                    self._vocab = json.load(f)
            else:
                self._vocab = self._create_default_vocab()
            
            self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
            
            super().__init__(
                pad_token=self._pad_token,
                bos_token=self._bos_token,
                eos_token=self._eos_token,
                unk_token=self._unk_token,
                **kwargs,
            )
        
        def _create_default_vocab(self):
            special_tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
            return {token: idx for idx, token in enumerate(special_tokens)}
        
        @property
        def vocab_size(self):
            return len(self._vocab)
        
        def get_vocab(self):
            return dict(self._vocab)
        
        def _tokenize(self, text):
            return text.strip().split()
        
        def _convert_token_to_id(self, token):
            return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0))
        
        def _convert_id_to_token(self, index):
            return self._ids_to_tokens.get(index, self.UNK_TOKEN)
        
        def convert_tokens_to_string(self, tokens):
            special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
            return " ".join(t for t in tokens if t not in special)
        
        def save_vocabulary(self, save_directory, filename_prefix=None):
            if not os.path.isdir(save_directory):
                os.makedirs(save_directory, exist_ok=True)
            vocab_file = os.path.join(
                save_directory,
                (filename_prefix + "-" if filename_prefix else "") + "vocab.json",
            )
            with open(vocab_file, "w", encoding="utf-8") as f:
                json.dump(self._vocab, f, ensure_ascii=False, indent=2)
            return (vocab_file,)