File size: 2,737 Bytes
a00d81d | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 | from __future__ import annotations
import shutil
from pathlib import Path
import sentencepiece as spm
from transformers import PreTrainedTokenizer
class HanForgeTokenizer(PreTrainedTokenizer):
vocab_files_names = {"vocab_file": "tokenizer.model"}
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file: str,
bos_token: str = "<s>",
eos_token: str = "</s>",
unk_token: str = "<unk>",
pad_token: str = "<pad>",
additional_special_tokens: list[str] | None = None,
**kwargs,
):
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor(model_file=vocab_file)
super().__init__(
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
pad_token=pad_token,
additional_special_tokens=additional_special_tokens or [],
**kwargs,
)
@property
def vocab_size(self) -> int:
return int(self.sp_model.vocab_size())
def get_vocab(self) -> dict[str, int]:
vocab = {self.sp_model.id_to_piece(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text: str) -> list[str]:
return list(self.sp_model.encode(text, out_type=str))
def _convert_token_to_id(self, token: str) -> int:
return int(self.sp_model.piece_to_id(token))
def _convert_id_to_token(self, index: int) -> str:
return str(self.sp_model.id_to_piece(index))
def convert_tokens_to_string(self, tokens: list[str]) -> str:
return self.sp_model.decode_pieces(tokens)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = [self.bos_token_id] + list(token_ids_0)
if token_ids_1 is not None:
output += list(token_ids_1)
output += [self.eos_token_id]
return output
def save_vocabulary(self, save_directory: str, filename_prefix: str | None = None):
save_dir = Path(save_directory)
save_dir.mkdir(parents=True, exist_ok=True)
out_name = f"{filename_prefix + '-' if filename_prefix else ''}tokenizer.model"
out_path = save_dir / out_name
if Path(self.vocab_file).resolve() != out_path.resolve():
shutil.copy2(self.vocab_file, out_path)
vocab_src = Path(self.vocab_file).with_suffix(".vocab")
if vocab_src.exists():
vocab_out = save_dir / f"{filename_prefix + '-' if filename_prefix else ''}tokenizer.vocab"
if vocab_src.resolve() != vocab_out.resolve():
shutil.copy2(vocab_src, vocab_out)
return (str(out_path),)
|