|
|
""" |
|
|
Character-level tokenizer compatible with HuggingFace transformers. |
|
|
""" |
|
|
|
|
|
import json |
|
|
import os |
|
|
from typing import Dict, List, Optional |
|
|
|
|
|
from transformers import PreTrainedTokenizer |
|
|
|
|
|
|
|
|
class CharTokenizer(PreTrainedTokenizer): |
|
|
""" |
|
|
Character-level tokenizer that treats each character as a token. |
|
|
Compatible with HuggingFace transformers. |
|
|
""" |
|
|
|
|
|
|
|
|
vocab_files_names = {"vocab_file": "vocab.json"} |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
vocab_file: Optional[str] = None, |
|
|
characters: Optional[str] = None, |
|
|
model_max_length: int = 512, |
|
|
padding_side: str = "right", |
|
|
**kwargs, |
|
|
): |
|
|
""" |
|
|
Initialize character tokenizer. |
|
|
|
|
|
Args: |
|
|
vocab_file: Path to vocabulary file (vocab.json) to load. |
|
|
This is the first argument for HuggingFace compatibility. |
|
|
characters: String of characters to include in vocabulary. |
|
|
If None, will be built from training data or loaded from vocab_file. |
|
|
model_max_length: Maximum sequence length. |
|
|
padding_side: Which side to pad on ("left" or "right"). |
|
|
""" |
|
|
|
|
|
pad_token = kwargs.pop("pad_token", "<pad>") |
|
|
unk_token = kwargs.pop("unk_token", "<unk>") |
|
|
bos_token = kwargs.pop("bos_token", "<s>") |
|
|
eos_token = kwargs.pop("eos_token", "</s>") |
|
|
user_token = kwargs.pop("user_token", "<|user|>") |
|
|
assistant_token = kwargs.pop("assistant_token", "<|assistant|>") |
|
|
system_token = kwargs.pop("system_token", "<|system|>") |
|
|
eot_token = kwargs.pop("eot_token", "<|end|>") |
|
|
mask_token = kwargs.pop("mask_token", "<|mdm_mask|>") |
|
|
|
|
|
|
|
|
self.char_to_id = {} |
|
|
self.id_to_char = {} |
|
|
|
|
|
|
|
|
if vocab_file is not None and os.path.exists(vocab_file): |
|
|
|
|
|
with open(vocab_file, "r", encoding="utf-8") as f: |
|
|
self.char_to_id = json.load(f) |
|
|
self.id_to_char = {int(idx): char for char, idx in self.char_to_id.items()} |
|
|
|
|
|
self.char_to_id = { |
|
|
char: int(idx) if isinstance(idx, str) else idx |
|
|
for char, idx in self.char_to_id.items() |
|
|
} |
|
|
elif characters is not None: |
|
|
|
|
|
special_tokens = [ |
|
|
pad_token, |
|
|
unk_token, |
|
|
bos_token, |
|
|
eos_token, |
|
|
user_token, |
|
|
assistant_token, |
|
|
system_token, |
|
|
eot_token, |
|
|
mask_token, |
|
|
] |
|
|
unique_chars = [] |
|
|
for char in characters: |
|
|
if char not in unique_chars and char not in special_tokens: |
|
|
unique_chars.append(char) |
|
|
all_tokens = special_tokens + sorted(unique_chars) |
|
|
self.char_to_id = {char: idx for idx, char in enumerate(all_tokens)} |
|
|
self.id_to_char = {idx: char for char, idx in self.char_to_id.items()} |
|
|
|
|
|
super().__init__( |
|
|
pad_token=pad_token, |
|
|
unk_token=unk_token, |
|
|
bos_token=bos_token, |
|
|
eos_token=eos_token, |
|
|
user_token=user_token, |
|
|
assistant_token=assistant_token, |
|
|
system_token=system_token, |
|
|
eot_token=eot_token, |
|
|
mask_token=mask_token, |
|
|
model_max_length=model_max_length, |
|
|
padding_side=padding_side, |
|
|
**kwargs, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
special_tokens_to_register = [pad_token, unk_token, bos_token, eos_token] |
|
|
for token in special_tokens_to_register: |
|
|
if token is not None and token in self.char_to_id: |
|
|
token_id = self.char_to_id[token] |
|
|
if token not in self._added_tokens_encoder: |
|
|
from transformers.tokenization_utils import AddedToken |
|
|
|
|
|
added_token = AddedToken(token, special=True, normalized=False) |
|
|
self._added_tokens_encoder[token] = token_id |
|
|
self._added_tokens_decoder[token_id] = added_token |
|
|
self._update_trie() |
|
|
|
|
|
@property |
|
|
def vocab_size(self) -> int: |
|
|
"""Return vocabulary size including added tokens.""" |
|
|
base_size = len(self.char_to_id) |
|
|
|
|
|
if hasattr(self, "added_tokens_decoder") and self.added_tokens_decoder: |
|
|
max_added_id = max(int(k) for k in self.added_tokens_decoder.keys()) |
|
|
return max(base_size, max_added_id + 1) |
|
|
return base_size |
|
|
|
|
|
def get_vocab(self) -> Dict[str, int]: |
|
|
"""Return vocabulary dictionary.""" |
|
|
return self.char_to_id.copy() |
|
|
|
|
|
def _tokenize(self, text: str) -> List[str]: |
|
|
"""Tokenize text into characters.""" |
|
|
return list(text) |
|
|
|
|
|
def _convert_token_to_id(self, token: str) -> int: |
|
|
"""Convert a token (character) to an id.""" |
|
|
|
|
|
token_str = str(token) if not isinstance(token, str) else token |
|
|
return self.char_to_id.get(token_str, self.char_to_id.get(self.unk_token, 1)) |
|
|
|
|
|
def _convert_id_to_token(self, index: int) -> str: |
|
|
"""Convert an id to a token (character).""" |
|
|
return self.id_to_char.get(index, self.unk_token) |
|
|
|
|
|
def convert_tokens_to_string(self, tokens: List[str]) -> str: |
|
|
"""Convert tokens back to string.""" |
|
|
return "".join(tokens) |
|
|
|
|
|
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple: |
|
|
"""Save vocabulary to file.""" |
|
|
if not os.path.isdir(save_directory): |
|
|
os.makedirs(save_directory, exist_ok=True) |
|
|
|
|
|
vocab_file = os.path.join( |
|
|
save_directory, (filename_prefix + "-" if filename_prefix else "") + "vocab.json" |
|
|
) |
|
|
|
|
|
with open(vocab_file, "w", encoding="utf-8") as f: |
|
|
json.dump(self.char_to_id, f, ensure_ascii=False, indent=2) |
|
|
|
|
|
return (vocab_file,) |
|
|
|
|
|
def build_inputs_with_special_tokens( |
|
|
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None |
|
|
) -> List[int]: |
|
|
""" |
|
|
Build model inputs by adding special tokens. |
|
|
Format: <s> token_ids_0 </s> [<s> token_ids_1 </s>] |
|
|
""" |
|
|
bos = [self.bos_token_id] if self.bos_token_id is not None else [] |
|
|
eos = [self.eos_token_id] if self.eos_token_id is not None else [] |
|
|
|
|
|
if token_ids_1 is None: |
|
|
return bos + token_ids_0 + eos |
|
|
|
|
|
return bos + token_ids_0 + eos + bos + token_ids_1 + eos |
|
|
|
|
|
def get_special_tokens_mask( |
|
|
self, |
|
|
token_ids_0: List[int], |
|
|
token_ids_1: Optional[List[int]] = None, |
|
|
already_has_special_tokens: bool = False, |
|
|
) -> List[int]: |
|
|
""" |
|
|
Get mask for special tokens. |
|
|
""" |
|
|
if already_has_special_tokens: |
|
|
return super().get_special_tokens_mask( |
|
|
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True |
|
|
) |
|
|
|
|
|
bos_mask = [1] if self.bos_token_id is not None else [] |
|
|
eos_mask = [1] if self.eos_token_id is not None else [] |
|
|
|
|
|
if token_ids_1 is None: |
|
|
return bos_mask + ([0] * len(token_ids_0)) + eos_mask |
|
|
|
|
|
return ( |
|
|
bos_mask |
|
|
+ ([0] * len(token_ids_0)) |
|
|
+ eos_mask |
|
|
+ bos_mask |
|
|
+ ([0] * len(token_ids_1)) |
|
|
+ eos_mask |
|
|
) |
|
|
|
|
|
|
|
|
def create_char_tokenizer_from_file( |
|
|
file_path: str, save_directory: str, model_max_length: int = 512, **kwargs |
|
|
) -> CharTokenizer: |
|
|
""" |
|
|
Create and save a character tokenizer from a text file. |
|
|
|
|
|
Args: |
|
|
file_path: Path to text file to build vocabulary from. |
|
|
save_directory: Directory to save the tokenizer. |
|
|
model_max_length: Maximum sequence length. |
|
|
**kwargs: Additional arguments for CharTokenizer. |
|
|
|
|
|
Returns: |
|
|
Initialized CharTokenizer. |
|
|
""" |
|
|
|
|
|
with open(file_path, "r", encoding="utf-8") as f: |
|
|
text = f.read() |
|
|
|
|
|
|
|
|
tokenizer = CharTokenizer(characters=text, model_max_length=model_max_length, **kwargs) |
|
|
|
|
|
|
|
|
tokenizer.save_pretrained(save_directory) |
|
|
|
|
|
print(f"Character tokenizer created with vocabulary size: {tokenizer.vocab_size}") |
|
|
print(f"Saved to: {save_directory}") |
|
|
|
|
|
return tokenizer |
|
|
|
|
|
|
|
|
def create_char_tokenizer_from_dataset( |
|
|
dataset, |
|
|
text_column: str, |
|
|
save_directory: str, |
|
|
model_max_length: int = 512, |
|
|
max_samples: Optional[int] = None, |
|
|
**kwargs, |
|
|
) -> CharTokenizer: |
|
|
""" |
|
|
Create and save a character tokenizer from a HuggingFace dataset. |
|
|
|
|
|
Args: |
|
|
dataset: HuggingFace dataset object. |
|
|
text_column: Name of the column containing text. |
|
|
save_directory: Directory to save the tokenizer. |
|
|
model_max_length: Maximum sequence length. |
|
|
max_samples: Maximum number of samples to use (None for all). |
|
|
**kwargs: Additional arguments for CharTokenizer. |
|
|
|
|
|
Returns: |
|
|
Initialized CharTokenizer. |
|
|
""" |
|
|
|
|
|
all_chars = set() |
|
|
|
|
|
samples = ( |
|
|
dataset if max_samples is None else dataset.select(range(min(max_samples, len(dataset)))) |
|
|
) |
|
|
|
|
|
for example in samples: |
|
|
text = example[text_column] |
|
|
all_chars.update(text) |
|
|
|
|
|
|
|
|
characters = "".join(sorted(all_chars)) |
|
|
tokenizer = CharTokenizer(characters=characters, model_max_length=model_max_length, **kwargs) |
|
|
|
|
|
|
|
|
tokenizer.save_pretrained(save_directory) |
|
|
|
|
|
print(f"Character tokenizer created with vocabulary size: {tokenizer.vocab_size}") |
|
|
print(f"Saved to: {save_directory}") |
|
|
|
|
|
return tokenizer |
|
|
|