Sunxt25 commited on
Commit
2e52fad
·
verified ·
1 Parent(s): e37eb91

Delete tokenizer.py

Browse files
Files changed (1) hide show
  1. tokenizer.py +0 -121
tokenizer.py DELETED
@@ -1,121 +0,0 @@
1
- from __future__ import annotations
2
- import json
3
- import os
4
- from typing import Dict, List, Optional
5
- from transformers import PreTrainedTokenizer
6
- import torch
7
-
8
- class ChessTokenizer(PreTrainedTokenizer):
9
- """
10
- vocab size: 149 (4 special + 12 pieces + 64 from_sq + 64 to_sq + 5 suffix)
11
- """
12
-
13
- model_input_names = ["input_ids", "attention_mask"]
14
- vocab_files_names = {"vocab_file": "vocab.json"}
15
-
16
- PAD_TOKEN = "[PAD]"
17
- BOS_TOKEN = "[BOS]"
18
- EOS_TOKEN = "[EOS]"
19
- UNK_TOKEN = "[UNK]"
20
-
21
- def __init__(self, vocab_file: Optional[str] = None, vocab: Optional[Dict[str, int]] = None, **kwargs):
22
- special_tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
23
-
24
- self.colors_pieces = [f'{c}{p}' for c in ['W','B'] for p in ['P','N','B','R','Q','K']]
25
- self.squares = [f'{f}{r}' for r in '12345678' for f in 'abcdefgh']
26
- self.suffixes = ["(x)", "(+)", "(+*)", "(o)", "(O)"]
27
-
28
- if vocab is not None:
29
- self._vocab = vocab
30
- elif vocab_file is not None and os.path.exists(vocab_file):
31
- with open(vocab_file, "r", encoding="utf-8") as f:
32
- self._vocab = json.load(f)
33
- else:
34
- self._vocab = {t: i for i, t in enumerate(special_tokens)}
35
- for cp in self.colors_pieces:
36
- self._vocab[cp] = len(self._vocab)
37
- for sq in self.squares:
38
- self._vocab[f"{sq}_f"] = len(self._vocab)
39
- for sq in self.squares:
40
- self._vocab[f"{sq}_t"] = len(self._vocab)
41
- for suf in self.suffixes:
42
- self._vocab[suf] = len(self._vocab)
43
-
44
- self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
45
-
46
- super().__init__(
47
- pad_token=self.PAD_TOKEN,
48
- bos_token=self.BOS_TOKEN,
49
- eos_token=self.EOS_TOKEN,
50
- unk_token=self.UNK_TOKEN,
51
- **kwargs,
52
- )
53
-
54
- @property
55
- def vocab_size(self) -> int:
56
- return len(self._vocab)
57
-
58
- def get_vocab(self) -> Dict[str, int]:
59
- return dict(self._vocab)
60
-
61
- def _tokenize(self, text: str) -> List[str]:
62
- """Piece(2) + From(2) + To(2) + Suffix(?)"""
63
- tokens = []
64
- moves = text.strip().split()
65
-
66
- for move in moves:
67
- if move in [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]:
68
- tokens.append(move)
69
- continue
70
-
71
- if len(move) >= 6:
72
- tokens.append(move[:2]) # Piece (e.g., WP)
73
- tokens.append(f"{move[2:4]}_f") # From (e.g., e2_f)
74
- tokens.append(f"{move[4:6]}_t") # To (e.g., e4_t)
75
-
76
- if len(move) > 6:
77
- suffix = move[6:]
78
- if suffix in self.suffixes:
79
- tokens.append(suffix)
80
- return tokens
81
-
82
- def _convert_token_to_id(self, token: str) -> int:
83
- return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN))
84
- def _convert_id_to_token(self, index: int) -> str:
85
- token = self._ids_to_tokens.get(index, self.UNK_TOKEN)
86
- if token in [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]:
87
- return ""
88
- if token in self.suffixes:
89
- return token
90
- return token.replace("_f", "").replace("_t", "")
91
-
92
- def convert_tokens_to_string(self, tokens: List[str]) -> str:
93
- return "".join([t for t in tokens if t])
94
-
95
- def decode(self, token_ids, skip_special_tokens=True, **kwargs) -> str:
96
- if hasattr(token_ids, "tolist"):
97
- ids = token_ids.tolist()
98
- elif isinstance(token_ids, (int, torch.LongTensor, torch.IntTensor)):
99
- ids = [int(token_ids)] if isinstance(token_ids, int) else token_ids.tolist()
100
- else:
101
- ids = token_ids
102
-
103
- tokens = [self._convert_id_to_token(i) for i in ids]
104
- return self.convert_tokens_to_string(tokens)
105
-
106
- def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple:
107
- if not os.path.isdir(save_directory):
108
- os.makedirs(save_directory, exist_ok=True)
109
- vocab_file = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "vocab.json")
110
- with open(vocab_file, "w", encoding="utf-8") as f:
111
- json.dump(self._vocab, f, ensure_ascii=False, indent=2)
112
- return (vocab_file,)
113
-
114
- @classmethod
115
- def from_pretrained(cls, pretrained_model_name_or_path, **kwargs) -> "ChessTokenizer":
116
- vocab_file = os.path.join(pretrained_model_name_or_path, "vocab.json")
117
- if not os.path.exists(vocab_file):
118
- return cls()
119
- with open(vocab_file, "r", encoding="utf-8") as f:
120
- vocab = json.load(f)
121
- return cls(vocab=vocab, **kwargs)