| | """ |
| | Custom Chess Tokenizer for the Chess Challenge. |
| | |
| | This tokenizer treats each move as a single token using the extended UCI notation |
| | from the Lichess dataset (e.g., WPe2e4, BNg8f6). |
| | |
| | The dataset format uses: |
| | - W/B prefix for White/Black |
| | - Piece letter: P=Pawn, N=Knight, B=Bishop, R=Rook, Q=Queen, K=King |
| | - Source and destination squares (e.g., e2e4) |
| | - Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling |
| | """ |
| |
|
| | from __future__ import annotations |
| |
|
| | import json |
| | import os |
| | from pathlib import Path |
| | from typing import Dict, List, Optional |
| |
|
| | from transformers import PreTrainedTokenizer |
| |
|
| |
|
| | class ChessTokenizerOld(PreTrainedTokenizer): |
| | """ |
| | A custom tokenizer for chess moves using extended UCI notation. |
| | |
| | This tokenizer maps each possible chess move to a unique token ID. |
| | The vocabulary is built from the training dataset to ensure all moves |
| | encountered during training have a corresponding token. |
| | |
| | Example: |
| | >>> tokenizer = ChessTokenizer() |
| | >>> tokenizer.encode("WPe2e4 BPe7e5") |
| | [1, 42, 87, 2] # [BOS, e2e4, e7e5, EOS] |
| | """ |
| | |
| | model_input_names = ["input_ids", "attention_mask"] |
| | vocab_files_names = {"vocab_file": "vocab.json"} |
| | |
| | |
| | PAD_TOKEN = "[PAD]" |
| | BOS_TOKEN = "[BOS]" |
| | EOS_TOKEN = "[EOS]" |
| | UNK_TOKEN = "[UNK]" |
| | |
| | def __init__( |
| | self, |
| | vocab_file: Optional[str] = None, |
| | vocab: Optional[Dict[str, int]] = None, |
| | **kwargs, |
| | ): |
| | """ |
| | Initialize the chess tokenizer. |
| | |
| | Args: |
| | vocab_file: Path to a JSON file containing the vocabulary mapping. |
| | vocab: Dictionary mapping tokens to IDs (alternative to vocab_file). |
| | **kwargs: Additional arguments passed to PreTrainedTokenizer. |
| | """ |
| | |
| | self._pad_token = self.PAD_TOKEN |
| | self._bos_token = self.BOS_TOKEN |
| | self._eos_token = self.EOS_TOKEN |
| | self._unk_token = self.UNK_TOKEN |
| |
|
| | |
| | |
| | kwargs.pop("pad_token", None) |
| | kwargs.pop("bos_token", None) |
| | kwargs.pop("eos_token", None) |
| | kwargs.pop("unk_token", None) |
| | |
| | |
| | if vocab is not None: |
| | self._vocab = vocab |
| | elif vocab_file is not None and os.path.exists(vocab_file): |
| | with open(vocab_file, "r", encoding="utf-8") as f: |
| | self._vocab = json.load(f) |
| | else: |
| | |
| | |
| | self._vocab = self._create_default_vocab() |
| | |
| | |
| | self._ids_to_tokens = {v: k for k, v in self._vocab.items()} |
| | |
| | |
| | super().__init__( |
| | pad_token=self._pad_token, |
| | bos_token=self._bos_token, |
| | eos_token=self._eos_token, |
| | unk_token=self._unk_token, |
| | **kwargs, |
| | ) |
| | |
| | def _create_default_vocab(self) -> Dict[str, int]: |
| | """ |
| | Create a minimal default vocabulary with just special tokens. |
| | |
| | For the full vocabulary, use `build_vocab_from_dataset()`. |
| | This minimal vocab is just a placeholder - you should build from data. |
| | """ |
| | special_tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN] |
| | vocab = {token: idx for idx, token in enumerate(special_tokens)} |
| | return vocab |
| | |
| | @classmethod |
| | def build_vocab_from_iterator( |
| | cls, |
| | iterator, |
| | min_frequency: int = 1, |
| | ) -> "ChessTokenizer": |
| | """ |
| | Build a tokenizer vocabulary from an iterator of game strings. |
| | |
| | Args: |
| | iterator: An iterator yielding game strings (space-separated moves). |
| | min_frequency: Minimum frequency for a token to be included. |
| | |
| | Returns: |
| | A ChessTokenizer with the built vocabulary. |
| | """ |
| | from collections import Counter |
| | |
| | token_counts = Counter() |
| | |
| | for game in iterator: |
| | moves = game.strip().split() |
| | token_counts.update(moves) |
| | |
| | |
| | tokens = [ |
| | token for token, count in token_counts.items() |
| | if count >= min_frequency |
| | ] |
| | |
| | |
| | tokens = sorted(tokens) |
| | |
| | |
| | special_tokens = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN] |
| | vocab = {token: idx for idx, token in enumerate(special_tokens + tokens)} |
| | |
| | return cls(vocab=vocab) |
| | |
| | @classmethod |
| | def build_vocab_from_dataset( |
| | cls, |
| | dataset_name: str = "dlouapre/lichess_2025-01_1M", |
| | split: str = "train", |
| | column: str = "text", |
| | min_frequency: int = 500, |
| | max_samples: Optional[int] = 100000, |
| | ) -> "ChessTokenizer": |
| | """ |
| | Build a tokenizer vocabulary from a Hugging Face dataset. |
| | |
| | Args: |
| | dataset_name: Name of the dataset on Hugging Face Hub. |
| | split: Dataset split to use. |
| | column: Column containing the game strings. |
| | min_frequency: Minimum frequency for a token to be included (default: 500). |
| | max_samples: Maximum number of samples to process (default: 100k). |
| | |
| | Returns: |
| | A ChessTokenizer with the built vocabulary. |
| | """ |
| | from datasets import load_dataset |
| | |
| | dataset = load_dataset(dataset_name, split=split) |
| | |
| | if max_samples is not None: |
| | dataset = dataset.select(range(min(max_samples, len(dataset)))) |
| | |
| | def game_iterator(): |
| | for example in dataset: |
| | yield example[column] |
| | |
| | return cls.build_vocab_from_iterator(game_iterator(), min_frequency=min_frequency) |
| | |
| | @property |
| | def vocab_size(self) -> int: |
| | """Return the size of the vocabulary.""" |
| | return len(self._vocab) |
| | |
| | def get_vocab(self) -> Dict[str, int]: |
| | """Return the vocabulary as a dictionary.""" |
| | return dict(self._vocab) |
| | |
| | def _tokenize(self, text: str) -> List[str]: |
| | """ |
| | Tokenize a string of moves into a list of tokens. |
| | |
| | Args: |
| | text: A string of space-separated moves. |
| | |
| | Returns: |
| | List of move tokens. |
| | """ |
| | return text.strip().split() |
| | |
| | def _convert_token_to_id(self, token: str) -> int: |
| | """Convert a token to its ID.""" |
| | return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0)) |
| | |
| | def _convert_id_to_token(self, index: int) -> str: |
| | """Convert an ID to its token.""" |
| | return self._ids_to_tokens.get(index, self.UNK_TOKEN) |
| | |
| | def convert_tokens_to_string(self, tokens: List[str]) -> str: |
| | """Convert a list of tokens back to a string.""" |
| | |
| | special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN} |
| | return " ".join(t for t in tokens if t not in special) |
| | |
| | def save_vocabulary( |
| | self, |
| | save_directory: str, |
| | filename_prefix: Optional[str] = None, |
| | ) -> tuple: |
| | """ |
| | Save the vocabulary to a JSON file. |
| | |
| | Args: |
| | save_directory: Directory to save the vocabulary. |
| | filename_prefix: Optional prefix for the filename. |
| | |
| | Returns: |
| | Tuple containing the path to the saved vocabulary file. |
| | """ |
| | if not os.path.isdir(save_directory): |
| | os.makedirs(save_directory, exist_ok=True) |
| | |
| | vocab_file = os.path.join( |
| | save_directory, |
| | (filename_prefix + "-" if filename_prefix else "") + "vocab.json", |
| | ) |
| | |
| | with open(vocab_file, "w", encoding="utf-8") as f: |
| | json.dump(self._vocab, f, ensure_ascii=False, indent=2) |
| | |
| | return (vocab_file,) |
| |
|
| |
|
| | def count_vocab_from_dataset( |
| | dataset_name: str = "dlouapre/lichess_2025-01_1M", |
| | split: str = "train", |
| | column: str = "text", |
| | max_samples: Optional[int] = 10000, |
| | ) -> Dict[str, int]: |
| | """ |
| | Count token frequencies in a dataset (useful for vocabulary analysis). |
| | |
| | Args: |
| | dataset_name: Name of the dataset on Hugging Face Hub. |
| | split: Dataset split to use. |
| | column: Column containing the game strings. |
| | max_samples: Maximum number of samples to process. |
| | |
| | Returns: |
| | Dictionary mapping tokens to their frequencies. |
| | """ |
| | from collections import Counter |
| | from datasets import load_dataset |
| | |
| | dataset = load_dataset(dataset_name, split=split) |
| | |
| | if max_samples is not None: |
| | dataset = dataset.select(range(min(max_samples, len(dataset)))) |
| | |
| | token_counts = Counter() |
| | |
| | for example in dataset: |
| | moves = example[column].strip().split() |
| | token_counts.update(moves) |
| | |
| | return dict(token_counts) |
| |
|
| |
|
| | class ChessTokenizer(PreTrainedTokenizer): |
| | """ |
| | A sophisticated chess tokenizer that decomposes moves into components. |
| | |
| | Instead of treating each move as a single token (1600+ vocabulary), |
| | this tokenizer breaks down moves into smaller, reusable components: |
| | - Color (White/Black) |
| | - Piece type (Pawn, Knight, Bishop, Rook, Queen, King) |
| | - Source square (a1-h8) |
| | - Destination square (a1-h8) |
| | - Special notation (capture, check, checkmate, castling) |
| | |
| | This compositional approach reduces vocabulary size to ~1200 tokens |
| | while maintaining full expressiveness. |
| | |
| | Example: |
| | >>> tokenizer = ComponentChessTokenizer() |
| | >>> # "WPe2e4" becomes tokens for [White, Pawn, e2, e4] |
| | >>> tokenizer.encode("WPe2e4 BPe7e5") |
| | [1, 5, 10, 20, 28, 6, 10, 21, 29, 2] # [BOS, W, P, e2, e4, B, P, e7, e5, EOS] |
| | """ |
| | |
| | model_input_names = ["input_ids", "attention_mask"] |
| | vocab_files_names = {"vocab_file": "vocab.json"} |
| | |
| | |
| | PAD_TOKEN = "[PAD]" |
| | BOS_TOKEN = "[BOS]" |
| | EOS_TOKEN = "[EOS]" |
| | UNK_TOKEN = "[UNK]" |
| | |
| | |
| | COLOR_TOKENS = ["[W]", "[B]"] |
| | PIECE_TOKENS = ["[P]", "[N]", "[B]", "[R]", "[Q]", "[K]"] |
| | SQUARE_TOKENS = [f"[{file}{rank}]" for file in "abcdefgh" for rank in "12345678"] |
| | SPECIAL_TOKENS_MOVE = [ |
| | "[x]", |
| | "[+]", |
| | "[#+]", |
| | "[o]", |
| | "[O]", |
| | ] |
| | |
| | def __init__( |
| | self, |
| | vocab_file: Optional[str] = None, |
| | vocab: Optional[Dict[str, int]] = None, |
| | **kwargs, |
| | ): |
| | """ |
| | Initialize the component-based chess tokenizer. |
| | |
| | Args: |
| | vocab_file: Path to a JSON file containing the vocabulary mapping. |
| | vocab: Dictionary mapping tokens to IDs (alternative to vocab_file). |
| | **kwargs: Additional arguments passed to PreTrainedTokenizer. |
| | """ |
| | |
| | self._pad_token = self.PAD_TOKEN |
| | self._bos_token = self.BOS_TOKEN |
| | self._eos_token = self.EOS_TOKEN |
| | self._unk_token = self.UNK_TOKEN |
| | |
| | |
| | kwargs.pop("pad_token", None) |
| | kwargs.pop("bos_token", None) |
| | kwargs.pop("eos_token", None) |
| | kwargs.pop("unk_token", None) |
| | |
| | |
| | if vocab is not None: |
| | self._vocab = vocab |
| | elif vocab_file is not None and os.path.exists(vocab_file): |
| | with open(vocab_file, "r", encoding="utf-8") as f: |
| | self._vocab = json.load(f) |
| | else: |
| | self._vocab = self._create_component_vocab() |
| | |
| | |
| | self._ids_to_tokens = {v: k for k, v in self._vocab.items()} |
| | |
| | |
| | super().__init__( |
| | pad_token=self._pad_token, |
| | bos_token=self._bos_token, |
| | eos_token=self._eos_token, |
| | unk_token=self._unk_token, |
| | **kwargs, |
| | ) |
| | |
| | def _create_component_vocab(self) -> Dict[str, int]: |
| | """ |
| | Create a vocabulary from pre-defined components. |
| | |
| | Structure: |
| | - Special tokens (4) |
| | - Color tokens (2) |
| | - Piece tokens (6) |
| | - Square tokens (64) |
| | - Move notation tokens (5) |
| | |
| | Total: ~81 base tokens for complete coverage |
| | Plus additional tokens for padding and special cases |
| | Target vocab size: ~1200 (with room for learned variants/compressed sequences) |
| | """ |
| | vocab = {} |
| | idx = 0 |
| | |
| | |
| | special_tokens = [ |
| | self.PAD_TOKEN, |
| | self.BOS_TOKEN, |
| | self.EOS_TOKEN, |
| | self.UNK_TOKEN, |
| | ] |
| | for token in special_tokens: |
| | vocab[token] = idx |
| | idx += 1 |
| | |
| | |
| | for token in self.COLOR_TOKENS: |
| | vocab[token] = idx |
| | idx += 1 |
| | |
| | |
| | for token in self.PIECE_TOKENS: |
| | vocab[token] = idx |
| | idx += 1 |
| | |
| | |
| | for token in self.SQUARE_TOKENS: |
| | vocab[token] = idx |
| | idx += 1 |
| | |
| | |
| | for token in self.SPECIAL_TOKENS_MOVE: |
| | vocab[token] = idx |
| | idx += 1 |
| | |
| | |
| | |
| | |
| | common_patterns = self._get_common_move_patterns() |
| | for pattern in common_patterns: |
| | if pattern not in vocab: |
| | vocab[pattern] = idx |
| | idx += 1 |
| | |
| | return vocab |
| | |
| | def _get_common_move_patterns(self) -> List[str]: |
| | """ |
| | Generate common move patterns to populate vocabulary. |
| | |
| | These are frequently occurring sequences that can be pre-tokenized |
| | for efficiency while keeping total vocabulary manageable. |
| | """ |
| | patterns = [] |
| | |
| | |
| | for file1 in "abcdefgh": |
| | for rank1 in "12345678": |
| | for file2 in "abcdefgh": |
| | for rank2 in "12345678": |
| | sq1 = f"{file1}{rank1}" |
| | sq2 = f"{file2}{rank2}" |
| | |
| | |
| | if abs(ord(file1) - ord(file2)) <= 2 and abs(int(rank1) - int(rank2)) <= 2: |
| | patterns.append(f"[{sq1}-{sq2}]") |
| | |
| | return patterns[:1100] |
| | |
| | def _parse_move(self, move: str) -> List[str]: |
| | """ |
| | Parse a move string into components. |
| | |
| | Examples: |
| | "WPe2e4" -> ["[W]", "[P]", "[e2]", "[e4]"] |
| | "BNg8f6x" -> ["[B]", "[N]", "[g8]", "[f6]", "[x]"] |
| | "WKe1g1o" -> ["[W]", "[K]", "[e1]", "[g1]", "[o]"] |
| | |
| | Args: |
| | move: A move string in extended UCI format. |
| | |
| | Returns: |
| | List of component tokens. |
| | """ |
| | if not move or len(move) < 4: |
| | return [self.UNK_TOKEN] |
| | |
| | components = [] |
| | |
| | |
| | color = move[0] |
| | if color == "W": |
| | components.append("[W]") |
| | elif color == "B": |
| | components.append("[B]") |
| | else: |
| | return [self.UNK_TOKEN] |
| | |
| | |
| | piece = move[1] |
| | piece_map = {"P": "[P]", "N": "[N]", "B": "[B]", "R": "[R]", "Q": "[Q]", "K": "[K]"} |
| | if piece not in piece_map: |
| | return [self.UNK_TOKEN] |
| | components.append(piece_map[piece]) |
| | |
| | |
| | src_square = move[2:4] |
| | dst_square = move[4:6] |
| | |
| | |
| | if (len(src_square) != 2 or len(dst_square) != 2 or |
| | src_square[0] not in "abcdefgh" or dst_square[0] not in "abcdefgh" or |
| | src_square[1] not in "12345678" or dst_square[1] not in "12345678"): |
| | return [self.UNK_TOKEN] |
| | |
| | components.append(f"[{src_square}]") |
| | components.append(f"[{dst_square}]") |
| | |
| | |
| | if len(move) > 6: |
| | suffix = move[6:] |
| | if "x" in suffix: |
| | components.append("[x]") |
| | if "+*" in suffix: |
| | components.append("[#+]") |
| | elif "+" in suffix: |
| | components.append("[+]") |
| | if "o" in suffix.lower(): |
| | if "O" in move: |
| | components.append("[O]") |
| | else: |
| | components.append("[o]") |
| | |
| | return components |
| | |
| | def _tokenize(self, text: str) -> List[str]: |
| | """ |
| | Tokenize a string of moves into component tokens. |
| | |
| | Args: |
| | text: A string of space-separated moves. |
| | |
| | Returns: |
| | List of component tokens. |
| | """ |
| | moves = text.strip().split() |
| | tokens = [] |
| | |
| | for move in moves: |
| | components = self._parse_move(move) |
| | tokens.extend(components) |
| | |
| | return tokens |
| | |
| | def _convert_token_to_id(self, token: str) -> int: |
| | """Convert a token to its ID.""" |
| | return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0)) |
| | |
| | def _convert_id_to_token(self, index: int) -> str: |
| | """Convert an ID to its token.""" |
| | return self._ids_to_tokens.get(index, self.UNK_TOKEN) |
| | |
| | def convert_tokens_to_string(self, tokens: List[str]) -> str: |
| | """Convert a list of tokens back to a string representation.""" |
| | |
| | cleaned = [] |
| | for t in tokens: |
| | if t not in {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}: |
| | |
| | t = t.strip("[]") |
| | if t: |
| | cleaned.append(t) |
| | return " ".join(cleaned) |
| | |
| | def save_vocabulary( |
| | self, |
| | save_directory: str, |
| | filename_prefix: Optional[str] = None, |
| | ) -> tuple: |
| | """ |
| | Save the vocabulary to a JSON file. |
| | |
| | Args: |
| | save_directory: Directory to save the vocabulary. |
| | filename_prefix: Optional prefix for the filename. |
| | |
| | Returns: |
| | Tuple containing the path to the saved vocabulary file. |
| | """ |
| | if not os.path.isdir(save_directory): |
| | os.makedirs(save_directory, exist_ok=True) |
| | |
| | vocab_file = os.path.join( |
| | save_directory, |
| | (filename_prefix + "-" if filename_prefix else "") + "vocab.json", |
| | ) |
| | |
| | with open(vocab_file, "w", encoding="utf-8") as f: |
| | json.dump(self._vocab, f, ensure_ascii=False, indent=2) |
| | |
| | return (vocab_file,) |
| | |
| | @classmethod |
| | def build_vocab_from_iterator( |
| | cls, |
| | iterator, |
| | min_frequency: int = 1, |
| | ) -> "ComponentChessTokenizer": |
| | """ |
| | Build a tokenizer vocabulary from an iterator of game strings. |
| | |
| | This method decomposes moves into components and builds the vocabulary |
| | from the component tokens. |
| | |
| | Args: |
| | iterator: An iterator yielding game strings (space-separated moves). |
| | min_frequency: Minimum frequency for a component token to be included. |
| | |
| | Returns: |
| | A ComponentChessTokenizer with the built vocabulary. |
| | """ |
| | from collections import Counter |
| | |
| | component_counts = Counter() |
| | |
| | |
| | temp_tokenizer = cls() |
| | |
| | for game in iterator: |
| | moves = game.strip().split() |
| | for move in moves: |
| | components = temp_tokenizer._parse_move(move) |
| | component_counts.update(components) |
| | |
| | |
| | components = [ |
| | token for token, count in component_counts.items() |
| | if count >= min_frequency |
| | ] |
| | |
| | |
| | components = sorted(components) |
| | |
| | |
| | tokenizer = cls() |
| | |
| | current_vocab = dict(tokenizer._vocab) |
| | idx = len(current_vocab) |
| | |
| | for component in components: |
| | if component not in current_vocab: |
| | current_vocab[component] = idx |
| | idx += 1 |
| | return cls(vocab=current_vocab) |
| | |
| | @classmethod |
| | def build_vocab_from_dataset( |
| | cls, |
| | dataset_name: str = "dlouapre/lichess_2025-01_1M", |
| | split: str = "train", |
| | column: str = "text", |
| | min_frequency: int = 500, |
| | max_samples: Optional[int] = 100000, |
| | ) -> "ComponentChessTokenizer": |
| | """ |
| | Build a tokenizer vocabulary from a Hugging Face dataset. |
| | |
| | This method decomposes moves into components and builds the vocabulary |
| | from the component tokens found in the dataset. |
| | |
| | Args: |
| | dataset_name: Name of the dataset on Hugging Face Hub. |
| | split: Dataset split to use. |
| | column: Column containing the game strings. |
| | min_frequency: Minimum frequency for a component token to be included (default: 500). |
| | max_samples: Maximum number of samples to process (default: 100k). |
| | |
| | Returns: |
| | A ComponentChessTokenizer with the built vocabulary. |
| | """ |
| | from datasets import load_dataset |
| | |
| | dataset = load_dataset(dataset_name, split=split) |
| | |
| | if max_samples is not None: |
| | dataset = dataset.select(range(min(max_samples, len(dataset)))) |
| | |
| | def game_iterator(): |
| | for example in dataset: |
| | yield example[column] |
| | |
| | return cls.build_vocab_from_iterator(game_iterator(), min_frequency=min_frequency) |
| |
|
| | @property |
| | def vocab_size(self) -> int: |
| | """Return the size of the vocabulary.""" |
| | return len(self._vocab) |
| | |
| | def get_vocab(self) -> Dict[str, int]: |
| | """Return the vocabulary as a dictionary.""" |
| | return dict(self._vocab) |
| | |
| | def save_vocabulary( |
| | self, |
| | save_directory: str, |
| | filename_prefix: Optional[str] = None, |
| | ) -> tuple: |
| | """ |
| | Save the vocabulary to a JSON file. |
| | |
| | Args: |
| | save_directory: Directory to save the vocabulary. |
| | filename_prefix: Optional prefix for the filename. |
| | |
| | Returns: |
| | Tuple containing the path to the saved vocabulary file. |
| | """ |
| | if not os.path.isdir(save_directory): |
| | os.makedirs(save_directory, exist_ok=True) |
| | |
| | vocab_file = os.path.join( |
| | save_directory, |
| | (filename_prefix + "-" if filename_prefix else "") + "vocab.json", |
| | ) |
| | |
| | with open(vocab_file, "w", encoding="utf-8") as f: |
| | json.dump(self._vocab, f, ensure_ascii=False, indent=2) |
| | |
| | return (vocab_file,) |
| |
|
| |
|
| | def count_vocab_from_dataset( |
| | dataset_name: str = "dlouapre/lichess_2025-01_1M", |
| | split: str = "train", |
| | column: str = "text", |
| | max_samples: Optional[int] = 10000, |
| | ) -> Dict[str, int]: |
| | """ |
| | Count token frequencies in a dataset (useful for vocabulary analysis). |
| | |
| | Args: |
| | dataset_name: Name of the dataset on Hugging Face Hub. |
| | split: Dataset split to use. |
| | column: Column containing the game strings. |
| | max_samples: Maximum number of samples to process. |
| | |
| | Returns: |
| | Dictionary mapping tokens to their frequencies. |
| | """ |
| | from collections import Counter |
| | from datasets import load_dataset |
| | |
| | dataset = load_dataset(dataset_name, split=split) |
| | |
| | if max_samples is not None: |
| | dataset = dataset.select(range(min(max_samples, len(dataset)))) |
| | |
| | token_counts = Counter() |
| | |
| | for example in dataset: |
| | moves = example[column].strip().split() |
| | token_counts.update(moves) |
| | |
| | return dict(token_counts) |