| | """Chunker module for Finesse Benchmark Database Generator
|
| |
|
| | This module implements the 'Gemstone Necklace Crafter' aka the atomic bead generator.
|
| | It transforms raw Wikimedia Wikipedia documents into 'strings of beads': sequential chains
|
| | of exactly 64-token atomic beads from each source article, preserving semantic flow within
|
| | each string while ensuring atomicity and reproducibility across languages.
|
| |
|
| | Key Principles:
|
| | - Uses official reference tokenizer: bert-base-multilingual-cased for universal fairness.
|
| | - Exactly 64-token beads only; discard all incomplete final chunks (golden rule).
|
| | - One 'string' per Wikipedia article, with beads in original sequential order.
|
| | - Balanced collection: samples_per_language strings per language.
|
| | - Streaming processing for efficiency on massive datasets.
|
| | - Logged progress for transparency and debugging.
|
| |
|
| | Usage:
|
| | from chunker import generate_all_strings_of_beads
|
| | strings_of_beads = generate_all_strings_of_beads()
|
| | # Result: List[Dict] where each inner dict contains 'source' and 'beads' keys.
|
| | """
|
| |
|
| | import logging
|
| | import random
|
| | from typing import List, Dict
|
| |
|
| | from .config import ProbeConfig
|
| | from datasets import load_dataset
|
| | from transformers import AutoTokenizer
|
| |
|
| |
|
| | logging.basicConfig(level=logging.INFO)
|
| | logger = logging.getLogger(__name__)
|
| |
|
| | def generate_all_strings_of_beads(config: ProbeConfig) -> List[Dict]:
|
| | """
|
| | Generate all 'strings of beads' across languages using the official tokenizer.
|
| |
|
| | Args:
|
| | config: ProbeConfig instance with all parameters (languages, samples_per_language, etc.).
|
| |
|
| | For each language:
|
| | - Stream Wikipedia articles.
|
| | - For each article (up to config.samples_per_language):
|
| | - Tokenize the full text.
|
| | - Sequentially slice into 64-token chunks.
|
| | - Decode only exact 64-token chunks to bead texts.
|
| | - Form a 'string' as [bead1, bead2, ...] if at least one bead exists.
|
| | - Collect all strings into a global 2D list.
|
| |
|
| | Returns:
|
| | List of dictionaries: Outer list by language/order, inner dicts contain 'source' and 'beads' keys.
|
| | """
|
| |
|
| | random.seed(config.seed)
|
| |
|
| |
|
| | logger.info(f"Loading official tokenizer: {config.tokenizer_name}")
|
| | tokenizer = AutoTokenizer.from_pretrained(config.tokenizer_name)
|
| |
|
| | all_strings_of_beads: List[Dict] = []
|
| |
|
| | if config.languages is None:
|
| | raise ValueError("config.languages must be set to a list of language codes.")
|
| |
|
| | for lang in config.languages:
|
| | logger.info(f"Starting processing for language: {lang} (target: {config.samples_per_language} strings)")
|
| |
|
| |
|
| | dataset = load_dataset(
|
| | "wikimedia/wikipedia",
|
| | f"20231101.{lang}",
|
| | streaming=True,
|
| | split="train"
|
| | )
|
| |
|
| | strings_for_lang: List[Dict] = []
|
| | article_count = 0
|
| |
|
| | for example in dataset:
|
| | if len(strings_for_lang) >= config.samples_per_language:
|
| | break
|
| |
|
| |
|
| | text = example.get("text", "").strip()
|
| | if not text:
|
| | continue
|
| |
|
| |
|
| | article_id = example.get("id", "")
|
| |
|
| |
|
| | tokens = tokenizer.encode(text, add_special_tokens=False)
|
| |
|
| |
|
| | beads: List[str] = []
|
| | for i in range(0, len(tokens), config.chunk_token_size):
|
| | chunk_tokens = tokens[i:i + config.chunk_token_size]
|
| |
|
| |
|
| | if len(chunk_tokens) == config.chunk_token_size:
|
| | bead_text = tokenizer.decode(chunk_tokens, skip_special_tokens=True).strip()
|
| | if bead_text:
|
| | beads.append(bead_text)
|
| |
|
| |
|
| | if beads:
|
| | string_data = {
|
| | "source": {
|
| | "dataset": "wikimedia/wikipedia",
|
| | "article_id": article_id,
|
| | "lang": lang
|
| | },
|
| | "beads": beads
|
| | }
|
| | strings_for_lang.append(string_data)
|
| | article_count += 1
|
| |
|
| |
|
| | if article_count % 100 == 0:
|
| | logger.info(
|
| | f"Language {lang}: Processed {article_count} articles, "
|
| | f"collected {len(strings_for_lang)} strings so far"
|
| | )
|
| |
|
| |
|
| | all_strings_of_beads.extend(strings_for_lang)
|
| | logger.info(f"Completed {lang}: {len(strings_for_lang)} strings generated "
|
| | f"(from {article_count} articles)")
|
| |
|
| | total_strings = len(all_strings_of_beads)
|
| | logger.info(f"Generation complete: {total_strings} total strings of beads across {len(config.languages)} languages")
|
| |
|
| | return all_strings_of_beads
|
| |
|
| | if __name__ == "__main__":
|
| |
|
| | from config import ProbeConfig
|
| |
|
| |
|
| | test_config = ProbeConfig(
|
| | languages=['en', 'ko'],
|
| | samples_per_language=5,
|
| | chunk_token_size=64,
|
| | tokenizer_name="google-bert/bert-base-multilingual-cased",
|
| | output_file="probes_atomic.jsonl",
|
| | seed=42
|
| | )
|
| |
|
| |
|
| | strings = generate_all_strings_of_beads(test_config)
|
| | print(f"Generated {len(strings)} strings of beads.")
|
| | if strings:
|
| | avg_beads_per_string = sum(len(s['beads']) for s in strings) / len(strings)
|
| | print(f"Average beads per string: {avg_beads_per_string:.2f}") |