Upload folder using huggingface_hub
Browse files- .gitignore +8 -8
- README.md +68 -63
- main.py +38 -38
- pyproject.toml +19 -19
- src/finesse_benchmark_database/__init__.py +47 -47
- src/finesse_benchmark_database/chunker.py +153 -153
- src/finesse_benchmark_database/config.py +78 -78
- src/finesse_benchmark_database/main.py +98 -98
- src/finesse_benchmark_database/writer.py +82 -82
.gitignore
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
-
*.jsonl
|
| 2 |
-
__pycache__/
|
| 3 |
-
*.pyc
|
| 4 |
-
.venv/
|
| 5 |
-
.env
|
| 6 |
-
.DS_Store
|
| 7 |
-
.pypirc
|
| 8 |
-
dist/
|
| 9 |
!probes.jsonl
|
|
|
|
| 1 |
+
*.jsonl
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.pyc
|
| 4 |
+
.venv/
|
| 5 |
+
.env
|
| 6 |
+
.DS_Store
|
| 7 |
+
.pypirc
|
| 8 |
+
dist/
|
| 9 |
!probes.jsonl
|
README.md
CHANGED
|
@@ -1,64 +1,69 @@
|
|
| 1 |
-
---
|
| 2 |
-
license: apache-2.0
|
| 3 |
-
---
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
#
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
- `
|
| 55 |
-
- `
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
Contributions are welcome! Please open issues or pull requests on the [GitHub repository](https://github.com/your-repo/finesse-benchmark-database).
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
---
|
| 4 |
+
|
| 5 |
+
[Github](https://github.com/enzoescipy/finesse-benchmark)
|
| 6 |
+
[huggingface](https://huggingface.co/datasets/enzoescipy/finesse-benchmark-database)
|
| 7 |
+
[pypi](https://pypi.org/project/finesse-benchmark/)
|
| 8 |
+
[blog](https://www.winter-sci-dev.com/posts/embed-sequence-merger-vbert-ppe-article/)
|
| 9 |
+
|
| 10 |
+
# Finesse Benchmark Database
|
| 11 |
+
|
| 12 |
+
## Overview
|
| 13 |
+
|
| 14 |
+
`finesse-benchmark-database` is a data generation factory for atomic probes in the Finesse benchmark. It generates `probes_atomic.jsonl` files from Wikimedia Wikipedia datasets, leveraging Hugging Face's `datasets` library, tokenizers from `transformers`, and optional PyTorch support.
|
| 15 |
+
|
| 16 |
+
This tool is designed to create high-quality, language-specific probe datasets for benchmarking fine-grained understanding in NLP tasks.
|
| 17 |
+
|
| 18 |
+
## Installation
|
| 19 |
+
|
| 20 |
+
Install the package from PyPI:
|
| 21 |
+
|
| 22 |
+
```bash
|
| 23 |
+
pip install finesse-benchmark-database
|
| 24 |
+
```
|
| 25 |
+
|
| 26 |
+
Ensure you have Python 3.10+ installed.
|
| 27 |
+
|
| 28 |
+
## Usage
|
| 29 |
+
|
| 30 |
+
Here's a complete example of how to configure and generate a dataset:
|
| 31 |
+
|
| 32 |
+
```python
|
| 33 |
+
from finesse_benchmark_database.config import ProbeConfig
|
| 34 |
+
from finesse_benchmark_database.main import generate_dataset
|
| 35 |
+
|
| 36 |
+
# Define the configuration
|
| 37 |
+
my_config = ProbeConfig(
|
| 38 |
+
languages=['en', 'ko'], # Languages to generate probes for
|
| 39 |
+
samples_per_language=10, # Number of samples per language (reduce for testing)
|
| 40 |
+
output_file='my_first_probes.jsonl', # Output file path
|
| 41 |
+
seed=123 # Random seed for reproducibility
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
# Generate the dataset
|
| 45 |
+
print(f"Generating '{my_config.output_file}'...")
|
| 46 |
+
generate_dataset(my_config)
|
| 47 |
+
print("Dataset generation completed!")
|
| 48 |
+
```
|
| 49 |
+
|
| 50 |
+
### Configuration Options
|
| 51 |
+
|
| 52 |
+
- `languages`: List of language codes (e.g., ['en', 'ko', 'fr']).
|
| 53 |
+
- `samples_per_language`: Number of probe samples to generate per language.
|
| 54 |
+
- `output_file`: Path to the output JSONL file.
|
| 55 |
+
- `seed`: Optional seed for deterministic results.
|
| 56 |
+
|
| 57 |
+
## Requirements
|
| 58 |
+
|
| 59 |
+
- `datasets`
|
| 60 |
+
- `transformers`
|
| 61 |
+
- `torch` (for tokenization)
|
| 62 |
+
|
| 63 |
+
## License
|
| 64 |
+
|
| 65 |
+
This project is licensed under the Apache-2.0 license.
|
| 66 |
+
|
| 67 |
+
## Contributing
|
| 68 |
+
|
| 69 |
Contributions are welcome! Please open issues or pull requests on the [GitHub repository](https://github.com/your-repo/finesse-benchmark-database).
|
main.py
CHANGED
|
@@ -1,39 +1,39 @@
|
|
| 1 |
-
#!/usr/bin/env python3
|
| 2 |
-
"""
|
| 3 |
-
Example script to demonstrate the usage of the finesse-benchmark-database package.
|
| 4 |
-
This script creates a simple configuration and generates a probes_atomic.jsonl file.
|
| 5 |
-
"""
|
| 6 |
-
|
| 7 |
-
from finesse_benchmark_database.config import ProbeConfig
|
| 8 |
-
from finesse_benchmark_database.main import generate_dataset
|
| 9 |
-
|
| 10 |
-
LANGUAGES = [
|
| 11 |
-
'en',
|
| 12 |
-
'ko',
|
| 13 |
-
'es',
|
| 14 |
-
'ja',
|
| 15 |
-
'ru',
|
| 16 |
-
'zh',
|
| 17 |
-
'ar',
|
| 18 |
-
'id',
|
| 19 |
-
'de',
|
| 20 |
-
'vi',
|
| 21 |
-
]
|
| 22 |
-
|
| 23 |
-
if __name__ == "__main__":
|
| 24 |
-
# Define a simple configuration for demonstration
|
| 25 |
-
demo_config = ProbeConfig(
|
| 26 |
-
languages=LANGUAGES, # Start with English for simplicity
|
| 27 |
-
samples_per_language=5000,
|
| 28 |
-
output_file='probes.jsonl', # Output file for the demo
|
| 29 |
-
seed=42 # Fixed seed for reproducibility
|
| 30 |
-
)
|
| 31 |
-
|
| 32 |
-
print(f"Generating demo dataset: '{demo_config.output_file}'")
|
| 33 |
-
print("This will create a small probes_atomic.jsonl file using Wikipedia data.")
|
| 34 |
-
|
| 35 |
-
# Generate the dataset
|
| 36 |
-
generate_dataset(demo_config)
|
| 37 |
-
|
| 38 |
-
print("Demo generation completed! Check 'demo_probes.jsonl' for the output.")
|
| 39 |
print("You can now inspect the file or integrate this into your Finesse benchmark workflow.")
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Example script to demonstrate the usage of the finesse-benchmark-database package.
|
| 4 |
+
This script creates a simple configuration and generates a probes_atomic.jsonl file.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from finesse_benchmark_database.config import ProbeConfig
|
| 8 |
+
from finesse_benchmark_database.main import generate_dataset
|
| 9 |
+
|
| 10 |
+
LANGUAGES = [
|
| 11 |
+
'en',
|
| 12 |
+
'ko',
|
| 13 |
+
'es',
|
| 14 |
+
'ja',
|
| 15 |
+
'ru',
|
| 16 |
+
'zh',
|
| 17 |
+
'ar',
|
| 18 |
+
'id',
|
| 19 |
+
'de',
|
| 20 |
+
'vi',
|
| 21 |
+
]
|
| 22 |
+
|
| 23 |
+
if __name__ == "__main__":
|
| 24 |
+
# Define a simple configuration for demonstration
|
| 25 |
+
demo_config = ProbeConfig(
|
| 26 |
+
languages=LANGUAGES, # Start with English for simplicity
|
| 27 |
+
samples_per_language=5000,
|
| 28 |
+
output_file='probes.jsonl', # Output file for the demo
|
| 29 |
+
seed=42 # Fixed seed for reproducibility
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
print(f"Generating demo dataset: '{demo_config.output_file}'")
|
| 33 |
+
print("This will create a small probes_atomic.jsonl file using Wikipedia data.")
|
| 34 |
+
|
| 35 |
+
# Generate the dataset
|
| 36 |
+
generate_dataset(demo_config)
|
| 37 |
+
|
| 38 |
+
print("Demo generation completed! Check 'demo_probes.jsonl' for the output.")
|
| 39 |
print("You can now inspect the file or integrate this into your Finesse benchmark workflow.")
|
pyproject.toml
CHANGED
|
@@ -1,19 +1,19 @@
|
|
| 1 |
-
[tool.poetry]
|
| 2 |
-
name = "finesse-benchmark-database"
|
| 3 |
-
version = "0.1.
|
| 4 |
-
description = "Data generation factory for atomic probes in Finesse benchmark. Generates probes_atomic.jsonl from Wikimedia Wikipedia."
|
| 5 |
-
authors = ["winter.sci.dev <enzoescipy@gmail.com>"]
|
| 6 |
-
readme = "README.md"
|
| 7 |
-
packages = [{include = "finesse_benchmark_database", from = "src"}]
|
| 8 |
-
|
| 9 |
-
[tool.poetry.dependencies]
|
| 10 |
-
python = "^3.10"
|
| 11 |
-
datasets = "^4.3.0"
|
| 12 |
-
transformers = "^4.35.0"
|
| 13 |
-
torch = "^2.1.0" # For tokenizer if needed
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
[build-system]
|
| 18 |
-
requires = ["poetry-core"]
|
| 19 |
-
build-backend = "poetry.core.masonry.api"
|
|
|
|
| 1 |
+
[tool.poetry]
|
| 2 |
+
name = "finesse-benchmark-database"
|
| 3 |
+
version = "0.1.13"
|
| 4 |
+
description = "Data generation factory for atomic probes in Finesse benchmark. Generates probes_atomic.jsonl from Wikimedia Wikipedia."
|
| 5 |
+
authors = ["winter.sci.dev <enzoescipy@gmail.com>"]
|
| 6 |
+
readme = "README.md"
|
| 7 |
+
packages = [{include = "finesse_benchmark_database", from = "src"}]
|
| 8 |
+
|
| 9 |
+
[tool.poetry.dependencies]
|
| 10 |
+
python = "^3.10"
|
| 11 |
+
datasets = "^4.3.0"
|
| 12 |
+
transformers = "^4.35.0"
|
| 13 |
+
torch = "^2.1.0" # For tokenizer if needed
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
[build-system]
|
| 18 |
+
requires = ["poetry-core"]
|
| 19 |
+
build-backend = "poetry.core.masonry.api"
|
src/finesse_benchmark_database/__init__.py
CHANGED
|
@@ -1,48 +1,48 @@
|
|
| 1 |
-
"""finesse-benchmark-database: Multilingual Atomic Probe Generator for Long-Context Evaluation
|
| 2 |
-
|
| 3 |
-
This package provides a flexible, configurable library for generating high-quality, traceable datasets of 'strings of beads'—
|
| 4 |
-
atomic 64-token text chunks sourced from multilingual Wikipedia articles. It serves as the foundational data generation
|
| 5 |
-
pipeline for the Finesse long-context benchmarking framework, ensuring reproducibility, semantic diversity, and
|
| 6 |
-
complete metadata tracking for advanced LLM evaluation.
|
| 7 |
-
|
| 8 |
-
Core Principles:
|
| 9 |
-
- Atomic Beads: Exact 64-token chunks (discard incompletes) to test pure memory granularity.
|
| 10 |
-
- Traceable Origins: Each bead/string includes full metadata (dataset, article_id, lang) for debugging and verification.
|
| 11 |
-
- Multilingual Balance: Supports 10+ languages with configurable quotas for fair coverage.
|
| 12 |
-
- Library-First Design: Instantiable via ProbeConfig for custom experiments; no globals or hardcoding.
|
| 13 |
-
- JSONL Output: Efficient streaming format for large-scale datasets.
|
| 14 |
-
|
| 15 |
-
Key Components:
|
| 16 |
-
- ProbeConfig: Dataclass for all settings (languages, samples, chunk size, etc.).
|
| 17 |
-
- generate_all_strings_of_beads(config): Produces list of {'source': metadata, 'beads': [text_chunks]} dicts.
|
| 18 |
-
- write_strings_to_probes_atomic(config, strings): Serializes to JSONL with auto-assigned string_ids.
|
| 19 |
-
|
| 20 |
-
Example Usage:
|
| 21 |
-
from finesse_benchmark_database import ProbeConfig, generate_all_strings_of_beads, write_strings_to_probes_atomic
|
| 22 |
-
|
| 23 |
-
config = ProbeConfig(
|
| 24 |
-
languages=['en', 'ko'],
|
| 25 |
-
samples_per_language=100,
|
| 26 |
-
chunk_token_size=64,
|
| 27 |
-
output_file='my_probes.jsonl',
|
| 28 |
-
seed=42
|
| 29 |
-
)
|
| 30 |
-
|
| 31 |
-
beads_strings = generate_all_strings_of_beads(config)
|
| 32 |
-
write_strings_to_probes_atomic(config, beads_strings)
|
| 33 |
-
# Outputs my_probes.jsonl with ~200 traceable strings of beads.
|
| 34 |
-
|
| 35 |
-
Installation:
|
| 36 |
-
pip install finesse-benchmark-database
|
| 37 |
-
# Or via Poetry: poetry add finesse-benchmark-database
|
| 38 |
-
|
| 39 |
-
This package powers the creation of ~1M+ atomic probes for rigorous long-context memory testing.
|
| 40 |
-
See main.py for a full pipeline example.
|
| 41 |
-
"""
|
| 42 |
-
|
| 43 |
-
__version__ = "0.1.0"
|
| 44 |
-
|
| 45 |
-
from .config import ProbeConfig
|
| 46 |
-
from .chunker import generate_all_strings_of_beads
|
| 47 |
-
from .writer import write_strings_to_probes_atomic
|
| 48 |
from .main import generate_dataset
|
|
|
|
| 1 |
+
"""finesse-benchmark-database: Multilingual Atomic Probe Generator for Long-Context Evaluation
|
| 2 |
+
|
| 3 |
+
This package provides a flexible, configurable library for generating high-quality, traceable datasets of 'strings of beads'—
|
| 4 |
+
atomic 64-token text chunks sourced from multilingual Wikipedia articles. It serves as the foundational data generation
|
| 5 |
+
pipeline for the Finesse long-context benchmarking framework, ensuring reproducibility, semantic diversity, and
|
| 6 |
+
complete metadata tracking for advanced LLM evaluation.
|
| 7 |
+
|
| 8 |
+
Core Principles:
|
| 9 |
+
- Atomic Beads: Exact 64-token chunks (discard incompletes) to test pure memory granularity.
|
| 10 |
+
- Traceable Origins: Each bead/string includes full metadata (dataset, article_id, lang) for debugging and verification.
|
| 11 |
+
- Multilingual Balance: Supports 10+ languages with configurable quotas for fair coverage.
|
| 12 |
+
- Library-First Design: Instantiable via ProbeConfig for custom experiments; no globals or hardcoding.
|
| 13 |
+
- JSONL Output: Efficient streaming format for large-scale datasets.
|
| 14 |
+
|
| 15 |
+
Key Components:
|
| 16 |
+
- ProbeConfig: Dataclass for all settings (languages, samples, chunk size, etc.).
|
| 17 |
+
- generate_all_strings_of_beads(config): Produces list of {'source': metadata, 'beads': [text_chunks]} dicts.
|
| 18 |
+
- write_strings_to_probes_atomic(config, strings): Serializes to JSONL with auto-assigned string_ids.
|
| 19 |
+
|
| 20 |
+
Example Usage:
|
| 21 |
+
from finesse_benchmark_database import ProbeConfig, generate_all_strings_of_beads, write_strings_to_probes_atomic
|
| 22 |
+
|
| 23 |
+
config = ProbeConfig(
|
| 24 |
+
languages=['en', 'ko'],
|
| 25 |
+
samples_per_language=100,
|
| 26 |
+
chunk_token_size=64,
|
| 27 |
+
output_file='my_probes.jsonl',
|
| 28 |
+
seed=42
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
beads_strings = generate_all_strings_of_beads(config)
|
| 32 |
+
write_strings_to_probes_atomic(config, beads_strings)
|
| 33 |
+
# Outputs my_probes.jsonl with ~200 traceable strings of beads.
|
| 34 |
+
|
| 35 |
+
Installation:
|
| 36 |
+
pip install finesse-benchmark-database
|
| 37 |
+
# Or via Poetry: poetry add finesse-benchmark-database
|
| 38 |
+
|
| 39 |
+
This package powers the creation of ~1M+ atomic probes for rigorous long-context memory testing.
|
| 40 |
+
See main.py for a full pipeline example.
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
__version__ = "0.1.0"
|
| 44 |
+
|
| 45 |
+
from .config import ProbeConfig
|
| 46 |
+
from .chunker import generate_all_strings_of_beads
|
| 47 |
+
from .writer import write_strings_to_probes_atomic
|
| 48 |
from .main import generate_dataset
|
src/finesse_benchmark_database/chunker.py
CHANGED
|
@@ -1,154 +1,154 @@
|
|
| 1 |
-
"""Chunker module for Finesse Benchmark Database Generator
|
| 2 |
-
|
| 3 |
-
This module implements the 'Gemstone Necklace Crafter' aka the atomic bead generator.
|
| 4 |
-
It transforms raw Wikimedia Wikipedia documents into 'strings of beads': sequential chains
|
| 5 |
-
of exactly 64-token atomic beads from each source article, preserving semantic flow within
|
| 6 |
-
each string while ensuring atomicity and reproducibility across languages.
|
| 7 |
-
|
| 8 |
-
Key Principles:
|
| 9 |
-
- Uses official reference tokenizer: bert-base-multilingual-cased for universal fairness.
|
| 10 |
-
- Exactly 64-token beads only; discard all incomplete final chunks (golden rule).
|
| 11 |
-
- One 'string' per Wikipedia article, with beads in original sequential order.
|
| 12 |
-
- Balanced collection: samples_per_language strings per language.
|
| 13 |
-
- Streaming processing for efficiency on massive datasets.
|
| 14 |
-
- Logged progress for transparency and debugging.
|
| 15 |
-
|
| 16 |
-
Usage:
|
| 17 |
-
from chunker import generate_all_strings_of_beads
|
| 18 |
-
strings_of_beads = generate_all_strings_of_beads()
|
| 19 |
-
# Result: List[Dict] where each inner dict contains 'source' and 'beads' keys.
|
| 20 |
-
"""
|
| 21 |
-
|
| 22 |
-
import logging
|
| 23 |
-
import random
|
| 24 |
-
from typing import List, Dict
|
| 25 |
-
|
| 26 |
-
from .config import ProbeConfig
|
| 27 |
-
from datasets import load_dataset
|
| 28 |
-
from transformers import AutoTokenizer
|
| 29 |
-
|
| 30 |
-
# Configure logging for progress tracking
|
| 31 |
-
logging.basicConfig(level=logging.INFO)
|
| 32 |
-
logger = logging.getLogger(__name__)
|
| 33 |
-
|
| 34 |
-
def generate_all_strings_of_beads(config: ProbeConfig) -> List[Dict]:
|
| 35 |
-
"""
|
| 36 |
-
Generate all 'strings of beads' across languages using the official tokenizer.
|
| 37 |
-
|
| 38 |
-
Args:
|
| 39 |
-
config: ProbeConfig instance with all parameters (languages, samples_per_language, etc.).
|
| 40 |
-
|
| 41 |
-
For each language:
|
| 42 |
-
- Stream Wikipedia articles.
|
| 43 |
-
- For each article (up to config.samples_per_language):
|
| 44 |
-
- Tokenize the full text.
|
| 45 |
-
- Sequentially slice into 64-token chunks.
|
| 46 |
-
- Decode only exact 64-token chunks to bead texts.
|
| 47 |
-
- Form a 'string' as [bead1, bead2, ...] if at least one bead exists.
|
| 48 |
-
- Collect all strings into a global 2D list.
|
| 49 |
-
|
| 50 |
-
Returns:
|
| 51 |
-
List of dictionaries: Outer list by language/order, inner dicts contain 'source' and 'beads' keys.
|
| 52 |
-
"""
|
| 53 |
-
# Set global seed for reproducibility
|
| 54 |
-
random.seed(config.seed)
|
| 55 |
-
|
| 56 |
-
# Load the official reference tokenizer (our 'public scale')
|
| 57 |
-
logger.info(f"Loading official tokenizer: {config.tokenizer_name}")
|
| 58 |
-
tokenizer = AutoTokenizer.from_pretrained(config.tokenizer_name)
|
| 59 |
-
|
| 60 |
-
all_strings_of_beads: List[Dict] = []
|
| 61 |
-
|
| 62 |
-
if config.languages is None:
|
| 63 |
-
raise ValueError("config.languages must be set to a list of language codes.")
|
| 64 |
-
|
| 65 |
-
for lang in config.languages:
|
| 66 |
-
logger.info(f"Starting processing for language: {lang} (target: {config.samples_per_language} strings)")
|
| 67 |
-
|
| 68 |
-
# Load streaming dataset with the specified split
|
| 69 |
-
dataset = load_dataset(
|
| 70 |
-
"wikimedia/wikipedia",
|
| 71 |
-
f"20231101.{lang}",
|
| 72 |
-
streaming=True,
|
| 73 |
-
split="train"
|
| 74 |
-
)
|
| 75 |
-
|
| 76 |
-
strings_for_lang: List[Dict] = []
|
| 77 |
-
article_count = 0
|
| 78 |
-
|
| 79 |
-
for example in dataset:
|
| 80 |
-
if len(strings_for_lang) >= config.samples_per_language:
|
| 81 |
-
break
|
| 82 |
-
|
| 83 |
-
# Extract and clean text
|
| 84 |
-
text = example.get("text", "").strip()
|
| 85 |
-
if not text:
|
| 86 |
-
continue
|
| 87 |
-
|
| 88 |
-
# Extract article ID for metadata
|
| 89 |
-
article_id = example.get("id", "")
|
| 90 |
-
|
| 91 |
-
# Tokenize the full article
|
| 92 |
-
tokens = tokenizer.encode(text, add_special_tokens=False)
|
| 93 |
-
|
| 94 |
-
# Sequentially chunk into exact config.chunk_token_size-token beads
|
| 95 |
-
beads: List[str] = []
|
| 96 |
-
for i in range(0, len(tokens), config.chunk_token_size):
|
| 97 |
-
chunk_tokens = tokens[i:i + config.chunk_token_size]
|
| 98 |
-
|
| 99 |
-
# Golden rule: Only accept exactly 64 tokens; discard incompletes
|
| 100 |
-
if len(chunk_tokens) == config.chunk_token_size:
|
| 101 |
-
bead_text = tokenizer.decode(chunk_tokens, skip_special_tokens=True).strip()
|
| 102 |
-
if bead_text: # Ensure non-empty after decoding
|
| 103 |
-
beads.append(bead_text)
|
| 104 |
-
|
| 105 |
-
# Only add the string if it has at least one bead
|
| 106 |
-
if beads:
|
| 107 |
-
string_data = {
|
| 108 |
-
"source": {
|
| 109 |
-
"dataset": "wikimedia/wikipedia",
|
| 110 |
-
"article_id": article_id,
|
| 111 |
-
"lang": lang
|
| 112 |
-
},
|
| 113 |
-
"beads": beads
|
| 114 |
-
}
|
| 115 |
-
strings_for_lang.append(string_data)
|
| 116 |
-
article_count += 1
|
| 117 |
-
|
| 118 |
-
# Log progress every 100 articles
|
| 119 |
-
if article_count % 100 == 0:
|
| 120 |
-
logger.info(
|
| 121 |
-
f"Language {lang}: Processed {article_count} articles, "
|
| 122 |
-
f"collected {len(strings_for_lang)} strings so far"
|
| 123 |
-
)
|
| 124 |
-
|
| 125 |
-
# Add language's strings to the global collection
|
| 126 |
-
all_strings_of_beads.extend(strings_for_lang)
|
| 127 |
-
logger.info(f"Completed {lang}: {len(strings_for_lang)} strings generated "
|
| 128 |
-
f"(from {article_count} articles)")
|
| 129 |
-
|
| 130 |
-
total_strings = len(all_strings_of_beads)
|
| 131 |
-
logger.info(f"Generation complete: {total_strings} total strings of beads across {len(config.languages)} languages")
|
| 132 |
-
|
| 133 |
-
return all_strings_of_beads
|
| 134 |
-
|
| 135 |
-
if __name__ == "__main__":
|
| 136 |
-
# Example library usage: Create config and generate
|
| 137 |
-
from config import ProbeConfig
|
| 138 |
-
|
| 139 |
-
# User creates and populates config
|
| 140 |
-
test_config = ProbeConfig(
|
| 141 |
-
languages=['en', 'ko'], # Test with 2 languages
|
| 142 |
-
samples_per_language=5, # Small sample for demo
|
| 143 |
-
chunk_token_size=64,
|
| 144 |
-
tokenizer_name="google-bert/bert-base-multilingual-cased",
|
| 145 |
-
output_file="probes_atomic.jsonl",
|
| 146 |
-
seed=42
|
| 147 |
-
)
|
| 148 |
-
|
| 149 |
-
# Generate and print summary
|
| 150 |
-
strings = generate_all_strings_of_beads(test_config)
|
| 151 |
-
print(f"Generated {len(strings)} strings of beads.")
|
| 152 |
-
if strings:
|
| 153 |
-
avg_beads_per_string = sum(len(s['beads']) for s in strings) / len(strings)
|
| 154 |
print(f"Average beads per string: {avg_beads_per_string:.2f}")
|
|
|
|
| 1 |
+
"""Chunker module for Finesse Benchmark Database Generator
|
| 2 |
+
|
| 3 |
+
This module implements the 'Gemstone Necklace Crafter' aka the atomic bead generator.
|
| 4 |
+
It transforms raw Wikimedia Wikipedia documents into 'strings of beads': sequential chains
|
| 5 |
+
of exactly 64-token atomic beads from each source article, preserving semantic flow within
|
| 6 |
+
each string while ensuring atomicity and reproducibility across languages.
|
| 7 |
+
|
| 8 |
+
Key Principles:
|
| 9 |
+
- Uses official reference tokenizer: bert-base-multilingual-cased for universal fairness.
|
| 10 |
+
- Exactly 64-token beads only; discard all incomplete final chunks (golden rule).
|
| 11 |
+
- One 'string' per Wikipedia article, with beads in original sequential order.
|
| 12 |
+
- Balanced collection: samples_per_language strings per language.
|
| 13 |
+
- Streaming processing for efficiency on massive datasets.
|
| 14 |
+
- Logged progress for transparency and debugging.
|
| 15 |
+
|
| 16 |
+
Usage:
|
| 17 |
+
from chunker import generate_all_strings_of_beads
|
| 18 |
+
strings_of_beads = generate_all_strings_of_beads()
|
| 19 |
+
# Result: List[Dict] where each inner dict contains 'source' and 'beads' keys.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
import logging
|
| 23 |
+
import random
|
| 24 |
+
from typing import List, Dict
|
| 25 |
+
|
| 26 |
+
from .config import ProbeConfig
|
| 27 |
+
from datasets import load_dataset
|
| 28 |
+
from transformers import AutoTokenizer
|
| 29 |
+
|
| 30 |
+
# Configure logging for progress tracking
|
| 31 |
+
logging.basicConfig(level=logging.INFO)
|
| 32 |
+
logger = logging.getLogger(__name__)
|
| 33 |
+
|
| 34 |
+
def generate_all_strings_of_beads(config: ProbeConfig) -> List[Dict]:
|
| 35 |
+
"""
|
| 36 |
+
Generate all 'strings of beads' across languages using the official tokenizer.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
config: ProbeConfig instance with all parameters (languages, samples_per_language, etc.).
|
| 40 |
+
|
| 41 |
+
For each language:
|
| 42 |
+
- Stream Wikipedia articles.
|
| 43 |
+
- For each article (up to config.samples_per_language):
|
| 44 |
+
- Tokenize the full text.
|
| 45 |
+
- Sequentially slice into 64-token chunks.
|
| 46 |
+
- Decode only exact 64-token chunks to bead texts.
|
| 47 |
+
- Form a 'string' as [bead1, bead2, ...] if at least one bead exists.
|
| 48 |
+
- Collect all strings into a global 2D list.
|
| 49 |
+
|
| 50 |
+
Returns:
|
| 51 |
+
List of dictionaries: Outer list by language/order, inner dicts contain 'source' and 'beads' keys.
|
| 52 |
+
"""
|
| 53 |
+
# Set global seed for reproducibility
|
| 54 |
+
random.seed(config.seed)
|
| 55 |
+
|
| 56 |
+
# Load the official reference tokenizer (our 'public scale')
|
| 57 |
+
logger.info(f"Loading official tokenizer: {config.tokenizer_name}")
|
| 58 |
+
tokenizer = AutoTokenizer.from_pretrained(config.tokenizer_name)
|
| 59 |
+
|
| 60 |
+
all_strings_of_beads: List[Dict] = []
|
| 61 |
+
|
| 62 |
+
if config.languages is None:
|
| 63 |
+
raise ValueError("config.languages must be set to a list of language codes.")
|
| 64 |
+
|
| 65 |
+
for lang in config.languages:
|
| 66 |
+
logger.info(f"Starting processing for language: {lang} (target: {config.samples_per_language} strings)")
|
| 67 |
+
|
| 68 |
+
# Load streaming dataset with the specified split
|
| 69 |
+
dataset = load_dataset(
|
| 70 |
+
"wikimedia/wikipedia",
|
| 71 |
+
f"20231101.{lang}",
|
| 72 |
+
streaming=True,
|
| 73 |
+
split="train"
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
strings_for_lang: List[Dict] = []
|
| 77 |
+
article_count = 0
|
| 78 |
+
|
| 79 |
+
for example in dataset:
|
| 80 |
+
if len(strings_for_lang) >= config.samples_per_language:
|
| 81 |
+
break
|
| 82 |
+
|
| 83 |
+
# Extract and clean text
|
| 84 |
+
text = example.get("text", "").strip()
|
| 85 |
+
if not text:
|
| 86 |
+
continue
|
| 87 |
+
|
| 88 |
+
# Extract article ID for metadata
|
| 89 |
+
article_id = example.get("id", "")
|
| 90 |
+
|
| 91 |
+
# Tokenize the full article
|
| 92 |
+
tokens = tokenizer.encode(text, add_special_tokens=False)
|
| 93 |
+
|
| 94 |
+
# Sequentially chunk into exact config.chunk_token_size-token beads
|
| 95 |
+
beads: List[str] = []
|
| 96 |
+
for i in range(0, len(tokens), config.chunk_token_size):
|
| 97 |
+
chunk_tokens = tokens[i:i + config.chunk_token_size]
|
| 98 |
+
|
| 99 |
+
# Golden rule: Only accept exactly 64 tokens; discard incompletes
|
| 100 |
+
if len(chunk_tokens) == config.chunk_token_size:
|
| 101 |
+
bead_text = tokenizer.decode(chunk_tokens, skip_special_tokens=True).strip()
|
| 102 |
+
if bead_text: # Ensure non-empty after decoding
|
| 103 |
+
beads.append(bead_text)
|
| 104 |
+
|
| 105 |
+
# Only add the string if it has at least one bead
|
| 106 |
+
if beads:
|
| 107 |
+
string_data = {
|
| 108 |
+
"source": {
|
| 109 |
+
"dataset": "wikimedia/wikipedia",
|
| 110 |
+
"article_id": article_id,
|
| 111 |
+
"lang": lang
|
| 112 |
+
},
|
| 113 |
+
"beads": beads
|
| 114 |
+
}
|
| 115 |
+
strings_for_lang.append(string_data)
|
| 116 |
+
article_count += 1
|
| 117 |
+
|
| 118 |
+
# Log progress every 100 articles
|
| 119 |
+
if article_count % 100 == 0:
|
| 120 |
+
logger.info(
|
| 121 |
+
f"Language {lang}: Processed {article_count} articles, "
|
| 122 |
+
f"collected {len(strings_for_lang)} strings so far"
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
# Add language's strings to the global collection
|
| 126 |
+
all_strings_of_beads.extend(strings_for_lang)
|
| 127 |
+
logger.info(f"Completed {lang}: {len(strings_for_lang)} strings generated "
|
| 128 |
+
f"(from {article_count} articles)")
|
| 129 |
+
|
| 130 |
+
total_strings = len(all_strings_of_beads)
|
| 131 |
+
logger.info(f"Generation complete: {total_strings} total strings of beads across {len(config.languages)} languages")
|
| 132 |
+
|
| 133 |
+
return all_strings_of_beads
|
| 134 |
+
|
| 135 |
+
if __name__ == "__main__":
|
| 136 |
+
# Example library usage: Create config and generate
|
| 137 |
+
from config import ProbeConfig
|
| 138 |
+
|
| 139 |
+
# User creates and populates config
|
| 140 |
+
test_config = ProbeConfig(
|
| 141 |
+
languages=['en', 'ko'], # Test with 2 languages
|
| 142 |
+
samples_per_language=5, # Small sample for demo
|
| 143 |
+
chunk_token_size=64,
|
| 144 |
+
tokenizer_name="google-bert/bert-base-multilingual-cased",
|
| 145 |
+
output_file="probes_atomic.jsonl",
|
| 146 |
+
seed=42
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
# Generate and print summary
|
| 150 |
+
strings = generate_all_strings_of_beads(test_config)
|
| 151 |
+
print(f"Generated {len(strings)} strings of beads.")
|
| 152 |
+
if strings:
|
| 153 |
+
avg_beads_per_string = sum(len(s['beads']) for s in strings) / len(strings)
|
| 154 |
print(f"Average beads per string: {avg_beads_per_string:.2f}")
|
src/finesse_benchmark_database/config.py
CHANGED
|
@@ -1,79 +1,79 @@
|
|
| 1 |
-
"""Configuration module for Finesse Benchmark Database Generator
|
| 2 |
-
|
| 3 |
-
This module holds all configurable parameters for generating atomic probes from Wikimedia Wikipedia datasets.
|
| 4 |
-
Based on the 'Beads and String' model: 64-token atomic beads from diverse languages, ensuring semantic continuity within strings but independence across probes.
|
| 5 |
-
|
| 6 |
-
Key Principles:
|
| 7 |
-
- Fixed 64-token chunk size for atomic beads.
|
| 8 |
-
- Balanced sampling across languages for global diversity.
|
| 9 |
-
- Seeded randomness for perfect reproducibility.
|
| 10 |
-
- Output in probes_atomic.jsonl format for dynamic assembly in evaluation.
|
| 11 |
-
|
| 12 |
-
Usage:
|
| 13 |
-
from config import tokenizer_name, languages, chunk_token_size, samples_per_language, output_file, seed
|
| 14 |
-
"""
|
| 15 |
-
|
| 16 |
-
from dataclasses import dataclass
|
| 17 |
-
import random
|
| 18 |
-
|
| 19 |
-
@dataclass
|
| 20 |
-
class ProbeConfig:
|
| 21 |
-
"""Central configuration for probe generation.
|
| 22 |
-
|
| 23 |
-
This dataclass serves as a flexible template for library users.
|
| 24 |
-
Instantiate and populate it with desired values before passing to generate functions.
|
| 25 |
-
|
| 26 |
-
Example:
|
| 27 |
-
config = ProbeConfig(
|
| 28 |
-
languages=['en', 'ko'],
|
| 29 |
-
samples_per_language=10,
|
| 30 |
-
chunk_token_size=64
|
| 31 |
-
)
|
| 32 |
-
"""
|
| 33 |
-
|
| 34 |
-
# Tokenizer for tokenization (default: multilingual BERT)
|
| 35 |
-
tokenizer_name: str = "google-bert/bert-base-multilingual-cased"
|
| 36 |
-
|
| 37 |
-
# Languages for balanced multilingual coverage (must be set by user)
|
| 38 |
-
languages: list[str] = None
|
| 39 |
-
|
| 40 |
-
# Atomic bead size (golden rule from design; override for custom experiments)
|
| 41 |
-
chunk_token_size: int = 64
|
| 42 |
-
|
| 43 |
-
# Number of 'strings of beads' (source documents) per language: The number of complete
|
| 44 |
-
# Wikipedia articles to process per language. Each document is chunked sequentially into
|
| 45 |
-
# multiple 64-token atomic beads, preserving original order and semantic flow within
|
| 46 |
-
# the string. This is NOT the total count of individual beads (which will be much higher,
|
| 47 |
-
# depending on document lengths), but the number of such connected 'necklaces' or
|
| 48 |
-
# 'strings' for balanced multilingual coverage.
|
| 49 |
-
samples_per_language: int = 10000
|
| 50 |
-
|
| 51 |
-
# Output file for atomic probes
|
| 52 |
-
output_file: str = "probes_atomic.jsonl"
|
| 53 |
-
|
| 54 |
-
# Fixed seed for reproducibility (immutable law; set to None for non-deterministic runs)
|
| 55 |
-
seed: int = 42
|
| 56 |
-
|
| 57 |
-
def get_config() -> ProbeConfig:
|
| 58 |
-
"""Instantiate and return the configuration with languages initialized."""
|
| 59 |
-
config = ProbeConfig()
|
| 60 |
-
config.languages = [
|
| 61 |
-
'en', # English
|
| 62 |
-
'ko', # Korean
|
| 63 |
-
'es', # Spanish
|
| 64 |
-
'ja', # Japanese
|
| 65 |
-
'ru', # Russian
|
| 66 |
-
'zh', # Chinese
|
| 67 |
-
'ar', # Arabic
|
| 68 |
-
'id', # Indonesian
|
| 69 |
-
'de', # German
|
| 70 |
-
'vi', # Vietnamese
|
| 71 |
-
]
|
| 72 |
-
|
| 73 |
-
# Set global seed for all randomness
|
| 74 |
-
random.seed(config.seed)
|
| 75 |
-
|
| 76 |
-
return config
|
| 77 |
-
|
| 78 |
-
# Default config instance
|
| 79 |
CONFIG = get_config()
|
|
|
|
| 1 |
+
"""Configuration module for Finesse Benchmark Database Generator
|
| 2 |
+
|
| 3 |
+
This module holds all configurable parameters for generating atomic probes from Wikimedia Wikipedia datasets.
|
| 4 |
+
Based on the 'Beads and String' model: 64-token atomic beads from diverse languages, ensuring semantic continuity within strings but independence across probes.
|
| 5 |
+
|
| 6 |
+
Key Principles:
|
| 7 |
+
- Fixed 64-token chunk size for atomic beads.
|
| 8 |
+
- Balanced sampling across languages for global diversity.
|
| 9 |
+
- Seeded randomness for perfect reproducibility.
|
| 10 |
+
- Output in probes_atomic.jsonl format for dynamic assembly in evaluation.
|
| 11 |
+
|
| 12 |
+
Usage:
|
| 13 |
+
from config import tokenizer_name, languages, chunk_token_size, samples_per_language, output_file, seed
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
from dataclasses import dataclass
|
| 17 |
+
import random
|
| 18 |
+
|
| 19 |
+
@dataclass
|
| 20 |
+
class ProbeConfig:
|
| 21 |
+
"""Central configuration for probe generation.
|
| 22 |
+
|
| 23 |
+
This dataclass serves as a flexible template for library users.
|
| 24 |
+
Instantiate and populate it with desired values before passing to generate functions.
|
| 25 |
+
|
| 26 |
+
Example:
|
| 27 |
+
config = ProbeConfig(
|
| 28 |
+
languages=['en', 'ko'],
|
| 29 |
+
samples_per_language=10,
|
| 30 |
+
chunk_token_size=64
|
| 31 |
+
)
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
# Tokenizer for tokenization (default: multilingual BERT)
|
| 35 |
+
tokenizer_name: str = "google-bert/bert-base-multilingual-cased"
|
| 36 |
+
|
| 37 |
+
# Languages for balanced multilingual coverage (must be set by user)
|
| 38 |
+
languages: list[str] = None
|
| 39 |
+
|
| 40 |
+
# Atomic bead size (golden rule from design; override for custom experiments)
|
| 41 |
+
chunk_token_size: int = 64
|
| 42 |
+
|
| 43 |
+
# Number of 'strings of beads' (source documents) per language: The number of complete
|
| 44 |
+
# Wikipedia articles to process per language. Each document is chunked sequentially into
|
| 45 |
+
# multiple 64-token atomic beads, preserving original order and semantic flow within
|
| 46 |
+
# the string. This is NOT the total count of individual beads (which will be much higher,
|
| 47 |
+
# depending on document lengths), but the number of such connected 'necklaces' or
|
| 48 |
+
# 'strings' for balanced multilingual coverage.
|
| 49 |
+
samples_per_language: int = 10000
|
| 50 |
+
|
| 51 |
+
# Output file for atomic probes
|
| 52 |
+
output_file: str = "probes_atomic.jsonl"
|
| 53 |
+
|
| 54 |
+
# Fixed seed for reproducibility (immutable law; set to None for non-deterministic runs)
|
| 55 |
+
seed: int = 42
|
| 56 |
+
|
| 57 |
+
def get_config() -> ProbeConfig:
|
| 58 |
+
"""Instantiate and return the configuration with languages initialized."""
|
| 59 |
+
config = ProbeConfig()
|
| 60 |
+
config.languages = [
|
| 61 |
+
'en', # English
|
| 62 |
+
'ko', # Korean
|
| 63 |
+
'es', # Spanish
|
| 64 |
+
'ja', # Japanese
|
| 65 |
+
'ru', # Russian
|
| 66 |
+
'zh', # Chinese
|
| 67 |
+
'ar', # Arabic
|
| 68 |
+
'id', # Indonesian
|
| 69 |
+
'de', # German
|
| 70 |
+
'vi', # Vietnamese
|
| 71 |
+
]
|
| 72 |
+
|
| 73 |
+
# Set global seed for all randomness
|
| 74 |
+
random.seed(config.seed)
|
| 75 |
+
|
| 76 |
+
return config
|
| 77 |
+
|
| 78 |
+
# Default config instance
|
| 79 |
CONFIG = get_config()
|
src/finesse_benchmark_database/main.py
CHANGED
|
@@ -1,99 +1,99 @@
|
|
| 1 |
-
"""Main orchestration module for Finesse Benchmark Database Generator
|
| 2 |
-
|
| 3 |
-
This script serves as the 'Conductor' that orchestrates the entire pipeline: from configuration setup
|
| 4 |
-
through atomic bead generation to final JSONL serialization. It ties together config, chunker, and writer
|
| 5 |
-
for end-to-end execution of our 'Gemstone Necklace' production process.
|
| 6 |
-
|
| 7 |
-
Key Principles:
|
| 8 |
-
- Ensures all modules use the shared CONFIG for consistency.
|
| 9 |
-
- Executes chunking first (memory-intensive), then writing (I/O-focused).
|
| 10 |
-
- Comprehensive logging for audit trail and debugging.
|
| 11 |
-
- Designed for one-shot full generation; scale via external orchestration if needed.
|
| 12 |
-
|
| 13 |
-
Usage:
|
| 14 |
-
python main.py
|
| 15 |
-
# Runs the full pipeline: generates ~100,000 strings of beads and writes to probes_atomic.jsonl
|
| 16 |
-
"""
|
| 17 |
-
|
| 18 |
-
import logging
|
| 19 |
-
|
| 20 |
-
from .config import ProbeConfig
|
| 21 |
-
from .chunker import generate_all_strings_of_beads
|
| 22 |
-
from .writer import write_strings_to_probes_atomic
|
| 23 |
-
|
| 24 |
-
# Configure logging for the entire pipeline
|
| 25 |
-
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 26 |
-
logger = logging.getLogger(__name__)
|
| 27 |
-
|
| 28 |
-
def generate_dataset(config: ProbeConfig):
|
| 29 |
-
logger.info(f"Config set: {len(config.languages)} languages, {config.chunk_token_size}-token beads, "
|
| 30 |
-
f"{config.samples_per_language} strings per language")
|
| 31 |
-
|
| 32 |
-
# Step 2: Generate all structured 'strings of beads' using the chunker
|
| 33 |
-
logger.info("Starting bead generation with chunker...")
|
| 34 |
-
all_strings_of_beads = generate_all_strings_of_beads(config)
|
| 35 |
-
total_strings = len(all_strings_of_beads)
|
| 36 |
-
logger.info(f"Bead generation complete: {total_strings} total strings produced "
|
| 37 |
-
f"(~{total_strings / len(config.languages):.0f} per language)")
|
| 38 |
-
|
| 39 |
-
# Step 3: Serialize to the atomic probes JSONL file using the writer
|
| 40 |
-
logger.info(f"Starting serialization to {config.output_file}...")
|
| 41 |
-
write_strings_to_probes_atomic(config, all_strings_of_beads)
|
| 42 |
-
logger.info(f"Serialization complete: All {total_strings} strings saved with metadata.")
|
| 43 |
-
|
| 44 |
-
# Final summary
|
| 45 |
-
estimated_beads = sum(len(s['beads']) for s in all_strings_of_beads)
|
| 46 |
-
logger.info("=== Pipeline Summary (Test Run) ===")
|
| 47 |
-
logger.info(f"- Total strings: {total_strings}")
|
| 48 |
-
logger.info(f"- Total atomic beads: {estimated_beads}")
|
| 49 |
-
logger.info(f"- Coverage: {len(set(s['source']['lang'] for s in all_strings_of_beads))} languages")
|
| 50 |
-
logger.info(f"- Output file: {config.output_file}")
|
| 51 |
-
logger.info("For full production: Increase samples_per_language and add more languages in config.")
|
| 52 |
-
logger.info("=== Finesse Benchmark Database Generation Example Finished Successfully ===")
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
def main() -> None:
|
| 56 |
-
"""Execute the full Finesse benchmark database generation pipeline as a library example.
|
| 57 |
-
|
| 58 |
-
This demonstrates how users would create a ProbeConfig, generate data, and write output.
|
| 59 |
-
For production, adjust config parameters as needed (e.g., more languages, larger samples).
|
| 60 |
-
"""
|
| 61 |
-
logger.info("=== Finesse Benchmark Database Pipeline Started (Library Example) ===")
|
| 62 |
-
|
| 63 |
-
# Step 1: User creates and configures ProbeConfig for this run
|
| 64 |
-
logger.info("Creating test configuration...")
|
| 65 |
-
test_config = ProbeConfig(
|
| 66 |
-
languages=['en'], # Single language for quick demo
|
| 67 |
-
samples_per_language=5, # Small sample for testing
|
| 68 |
-
chunk_token_size=64,
|
| 69 |
-
tokenizer_name="google-bert/bert-base-multilingual-cased",
|
| 70 |
-
output_file="test_probes_atomic.jsonl",
|
| 71 |
-
seed=42
|
| 72 |
-
)
|
| 73 |
-
logger.info(f"Config set: {len(test_config.languages)} languages, {test_config.chunk_token_size}-token beads, "
|
| 74 |
-
f"{test_config.samples_per_language} strings per language")
|
| 75 |
-
|
| 76 |
-
# Step 2: Generate all structured 'strings of beads' using the chunker
|
| 77 |
-
logger.info("Starting bead generation with chunker...")
|
| 78 |
-
all_strings_of_beads = generate_all_strings_of_beads(test_config)
|
| 79 |
-
total_strings = len(all_strings_of_beads)
|
| 80 |
-
logger.info(f"Bead generation complete: {total_strings} total strings produced "
|
| 81 |
-
f"(~{total_strings / len(test_config.languages):.0f} per language)")
|
| 82 |
-
|
| 83 |
-
# Step 3: Serialize to the atomic probes JSONL file using the writer
|
| 84 |
-
logger.info(f"Starting serialization to {test_config.output_file}...")
|
| 85 |
-
write_strings_to_probes_atomic(test_config, all_strings_of_beads)
|
| 86 |
-
logger.info(f"Serialization complete: All {total_strings} strings saved with metadata.")
|
| 87 |
-
|
| 88 |
-
# Final summary
|
| 89 |
-
estimated_beads = sum(len(s['beads']) for s in all_strings_of_beads)
|
| 90 |
-
logger.info("=== Pipeline Summary (Test Run) ===")
|
| 91 |
-
logger.info(f"- Total strings: {total_strings}")
|
| 92 |
-
logger.info(f"- Total atomic beads: {estimated_beads}")
|
| 93 |
-
logger.info(f"- Coverage: {len(set(s['source']['lang'] for s in all_strings_of_beads))} languages")
|
| 94 |
-
logger.info(f"- Output file: {test_config.output_file}")
|
| 95 |
-
logger.info("For full production: Increase samples_per_language and add more languages in config.")
|
| 96 |
-
logger.info("=== Finesse Benchmark Database Generation Example Finished Successfully ===")
|
| 97 |
-
|
| 98 |
-
if __name__ == "__main__":
|
| 99 |
main()
|
|
|
|
| 1 |
+
"""Main orchestration module for Finesse Benchmark Database Generator
|
| 2 |
+
|
| 3 |
+
This script serves as the 'Conductor' that orchestrates the entire pipeline: from configuration setup
|
| 4 |
+
through atomic bead generation to final JSONL serialization. It ties together config, chunker, and writer
|
| 5 |
+
for end-to-end execution of our 'Gemstone Necklace' production process.
|
| 6 |
+
|
| 7 |
+
Key Principles:
|
| 8 |
+
- Ensures all modules use the shared CONFIG for consistency.
|
| 9 |
+
- Executes chunking first (memory-intensive), then writing (I/O-focused).
|
| 10 |
+
- Comprehensive logging for audit trail and debugging.
|
| 11 |
+
- Designed for one-shot full generation; scale via external orchestration if needed.
|
| 12 |
+
|
| 13 |
+
Usage:
|
| 14 |
+
python main.py
|
| 15 |
+
# Runs the full pipeline: generates ~100,000 strings of beads and writes to probes_atomic.jsonl
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
import logging
|
| 19 |
+
|
| 20 |
+
from .config import ProbeConfig
|
| 21 |
+
from .chunker import generate_all_strings_of_beads
|
| 22 |
+
from .writer import write_strings_to_probes_atomic
|
| 23 |
+
|
| 24 |
+
# Configure logging for the entire pipeline
|
| 25 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 26 |
+
logger = logging.getLogger(__name__)
|
| 27 |
+
|
| 28 |
+
def generate_dataset(config: ProbeConfig):
|
| 29 |
+
logger.info(f"Config set: {len(config.languages)} languages, {config.chunk_token_size}-token beads, "
|
| 30 |
+
f"{config.samples_per_language} strings per language")
|
| 31 |
+
|
| 32 |
+
# Step 2: Generate all structured 'strings of beads' using the chunker
|
| 33 |
+
logger.info("Starting bead generation with chunker...")
|
| 34 |
+
all_strings_of_beads = generate_all_strings_of_beads(config)
|
| 35 |
+
total_strings = len(all_strings_of_beads)
|
| 36 |
+
logger.info(f"Bead generation complete: {total_strings} total strings produced "
|
| 37 |
+
f"(~{total_strings / len(config.languages):.0f} per language)")
|
| 38 |
+
|
| 39 |
+
# Step 3: Serialize to the atomic probes JSONL file using the writer
|
| 40 |
+
logger.info(f"Starting serialization to {config.output_file}...")
|
| 41 |
+
write_strings_to_probes_atomic(config, all_strings_of_beads)
|
| 42 |
+
logger.info(f"Serialization complete: All {total_strings} strings saved with metadata.")
|
| 43 |
+
|
| 44 |
+
# Final summary
|
| 45 |
+
estimated_beads = sum(len(s['beads']) for s in all_strings_of_beads)
|
| 46 |
+
logger.info("=== Pipeline Summary (Test Run) ===")
|
| 47 |
+
logger.info(f"- Total strings: {total_strings}")
|
| 48 |
+
logger.info(f"- Total atomic beads: {estimated_beads}")
|
| 49 |
+
logger.info(f"- Coverage: {len(set(s['source']['lang'] for s in all_strings_of_beads))} languages")
|
| 50 |
+
logger.info(f"- Output file: {config.output_file}")
|
| 51 |
+
logger.info("For full production: Increase samples_per_language and add more languages in config.")
|
| 52 |
+
logger.info("=== Finesse Benchmark Database Generation Example Finished Successfully ===")
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def main() -> None:
|
| 56 |
+
"""Execute the full Finesse benchmark database generation pipeline as a library example.
|
| 57 |
+
|
| 58 |
+
This demonstrates how users would create a ProbeConfig, generate data, and write output.
|
| 59 |
+
For production, adjust config parameters as needed (e.g., more languages, larger samples).
|
| 60 |
+
"""
|
| 61 |
+
logger.info("=== Finesse Benchmark Database Pipeline Started (Library Example) ===")
|
| 62 |
+
|
| 63 |
+
# Step 1: User creates and configures ProbeConfig for this run
|
| 64 |
+
logger.info("Creating test configuration...")
|
| 65 |
+
test_config = ProbeConfig(
|
| 66 |
+
languages=['en'], # Single language for quick demo
|
| 67 |
+
samples_per_language=5, # Small sample for testing
|
| 68 |
+
chunk_token_size=64,
|
| 69 |
+
tokenizer_name="google-bert/bert-base-multilingual-cased",
|
| 70 |
+
output_file="test_probes_atomic.jsonl",
|
| 71 |
+
seed=42
|
| 72 |
+
)
|
| 73 |
+
logger.info(f"Config set: {len(test_config.languages)} languages, {test_config.chunk_token_size}-token beads, "
|
| 74 |
+
f"{test_config.samples_per_language} strings per language")
|
| 75 |
+
|
| 76 |
+
# Step 2: Generate all structured 'strings of beads' using the chunker
|
| 77 |
+
logger.info("Starting bead generation with chunker...")
|
| 78 |
+
all_strings_of_beads = generate_all_strings_of_beads(test_config)
|
| 79 |
+
total_strings = len(all_strings_of_beads)
|
| 80 |
+
logger.info(f"Bead generation complete: {total_strings} total strings produced "
|
| 81 |
+
f"(~{total_strings / len(test_config.languages):.0f} per language)")
|
| 82 |
+
|
| 83 |
+
# Step 3: Serialize to the atomic probes JSONL file using the writer
|
| 84 |
+
logger.info(f"Starting serialization to {test_config.output_file}...")
|
| 85 |
+
write_strings_to_probes_atomic(test_config, all_strings_of_beads)
|
| 86 |
+
logger.info(f"Serialization complete: All {total_strings} strings saved with metadata.")
|
| 87 |
+
|
| 88 |
+
# Final summary
|
| 89 |
+
estimated_beads = sum(len(s['beads']) for s in all_strings_of_beads)
|
| 90 |
+
logger.info("=== Pipeline Summary (Test Run) ===")
|
| 91 |
+
logger.info(f"- Total strings: {total_strings}")
|
| 92 |
+
logger.info(f"- Total atomic beads: {estimated_beads}")
|
| 93 |
+
logger.info(f"- Coverage: {len(set(s['source']['lang'] for s in all_strings_of_beads))} languages")
|
| 94 |
+
logger.info(f"- Output file: {test_config.output_file}")
|
| 95 |
+
logger.info("For full production: Increase samples_per_language and add more languages in config.")
|
| 96 |
+
logger.info("=== Finesse Benchmark Database Generation Example Finished Successfully ===")
|
| 97 |
+
|
| 98 |
+
if __name__ == "__main__":
|
| 99 |
main()
|
src/finesse_benchmark_database/writer.py
CHANGED
|
@@ -1,83 +1,83 @@
|
|
| 1 |
-
"""Writer module for Finesse Benchmark Database Generator
|
| 2 |
-
|
| 3 |
-
This module implements the 'Eternal Scribe': it takes the structured 'strings of beads' output from chunker.py
|
| 4 |
-
and serializes them into probes_atomic.jsonl format. Each line is an independent JSON object with metadata
|
| 5 |
-
for full traceability, ensuring the dataset is reproducible, queryable, and efficient for large-scale use.
|
| 6 |
-
|
| 7 |
-
Key Principles:
|
| 8 |
-
- JSONL format for streaming efficiency (one complete JSON object per line).
|
| 9 |
-
- Assigns immutable 'string_id' for unique identification.
|
| 10 |
-
- Preserves all source metadata (dataset, article_id, lang) for debugging and verification.
|
| 11 |
-
- UTF-8 encoding to handle multilingual content without corruption.
|
| 12 |
-
- Logged serialization progress for transparency.
|
| 13 |
-
|
| 14 |
-
Usage:
|
| 15 |
-
from writer import write_strings_to_probes_atomic
|
| 16 |
-
from chunker import generate_all_strings_of_beads
|
| 17 |
-
strings = generate_all_strings_of_beads()
|
| 18 |
-
write_strings_to_probes_atomic(strings)
|
| 19 |
-
# Results in probes_atomic.jsonl with all probes serialized.
|
| 20 |
-
"""
|
| 21 |
-
|
| 22 |
-
import json
|
| 23 |
-
import logging
|
| 24 |
-
from typing import List, Dict
|
| 25 |
-
|
| 26 |
-
from .config import ProbeConfig
|
| 27 |
-
|
| 28 |
-
# Configure logging for progress tracking
|
| 29 |
-
logging.basicConfig(level=logging.INFO)
|
| 30 |
-
logger = logging.getLogger(__name__)
|
| 31 |
-
|
| 32 |
-
def write_strings_to_probes_atomic(config: ProbeConfig, all_strings_of_beads: List[Dict]) -> None:
|
| 33 |
-
"""
|
| 34 |
-
Write the structured strings of beads to the atomic probes JSONL file.
|
| 35 |
-
|
| 36 |
-
Args:
|
| 37 |
-
config: ProbeConfig instance specifying output_file and SEED.
|
| 38 |
-
all_strings_of_beads: List of dicts from chunker.py, each with 'source' and 'beads'.
|
| 39 |
-
|
| 40 |
-
Side Effects:
|
| 41 |
-
Creates/appends to config.output_file.
|
| 42 |
-
Each string gets a sequential 'string_id' assigned.
|
| 43 |
-
"""
|
| 44 |
-
output_path = config.output_file
|
| 45 |
-
logger.info(f"Starting serialization to {output_path}. Total strings: {len(all_strings_of_beads)}")
|
| 46 |
-
|
| 47 |
-
with open(output_path, 'w', encoding='utf-8') as file:
|
| 48 |
-
for i, string_data in enumerate(all_strings_of_beads):
|
| 49 |
-
# Assign unique string_id (immutable global identifier)
|
| 50 |
-
string_data['string_id'] = i
|
| 51 |
-
|
| 52 |
-
# Serialize to JSON line
|
| 53 |
-
json_line = json.dumps(string_data, ensure_ascii=False, separators=(',', ':'))
|
| 54 |
-
file.write(json_line + '\n')
|
| 55 |
-
|
| 56 |
-
# Log progress every 1000 strings
|
| 57 |
-
if (i + 1) % 1000 == 0:
|
| 58 |
-
logger.info(f"Serialized {i + 1} strings to {output_path}")
|
| 59 |
-
|
| 60 |
-
total_written = len(all_strings_of_beads)
|
| 61 |
-
logger.info(f"Serialization complete: {total_written} strings written to {output_path}")
|
| 62 |
-
logger.info(f"File structure: Each line is a JSON object with 'string_id', 'source' (metadata), and 'beads' ({config.chunk_token_size}-token chunks).")
|
| 63 |
-
|
| 64 |
-
if __name__ == "__main__":
|
| 65 |
-
# Example library usage: Create config and run full pipeline
|
| 66 |
-
from config import ProbeConfig
|
| 67 |
-
from chunker import generate_all_strings_of_beads
|
| 68 |
-
|
| 69 |
-
# User creates and populates config for a small test run
|
| 70 |
-
demo_config = ProbeConfig(
|
| 71 |
-
languages=['en'], # Single language for quick demo
|
| 72 |
-
samples_per_language=3, # Very small sample
|
| 73 |
-
chunk_token_size=64,
|
| 74 |
-
tokenizer_name="google-bert/bert-base-multilingual-cased",
|
| 75 |
-
output_file="demo_probes_atomic.jsonl",
|
| 76 |
-
seed=42
|
| 77 |
-
)
|
| 78 |
-
|
| 79 |
-
print("Generating strings of beads...")
|
| 80 |
-
strings = generate_all_strings_of_beads(demo_config)
|
| 81 |
-
print(f"Generated {len(strings)} strings. Now serializing...")
|
| 82 |
-
write_strings_to_probes_atomic(demo_config, strings)
|
| 83 |
print("Pipeline complete. Check demo_probes_atomic.jsonl for output.")
|
|
|
|
| 1 |
+
"""Writer module for Finesse Benchmark Database Generator
|
| 2 |
+
|
| 3 |
+
This module implements the 'Eternal Scribe': it takes the structured 'strings of beads' output from chunker.py
|
| 4 |
+
and serializes them into probes_atomic.jsonl format. Each line is an independent JSON object with metadata
|
| 5 |
+
for full traceability, ensuring the dataset is reproducible, queryable, and efficient for large-scale use.
|
| 6 |
+
|
| 7 |
+
Key Principles:
|
| 8 |
+
- JSONL format for streaming efficiency (one complete JSON object per line).
|
| 9 |
+
- Assigns immutable 'string_id' for unique identification.
|
| 10 |
+
- Preserves all source metadata (dataset, article_id, lang) for debugging and verification.
|
| 11 |
+
- UTF-8 encoding to handle multilingual content without corruption.
|
| 12 |
+
- Logged serialization progress for transparency.
|
| 13 |
+
|
| 14 |
+
Usage:
|
| 15 |
+
from writer import write_strings_to_probes_atomic
|
| 16 |
+
from chunker import generate_all_strings_of_beads
|
| 17 |
+
strings = generate_all_strings_of_beads()
|
| 18 |
+
write_strings_to_probes_atomic(strings)
|
| 19 |
+
# Results in probes_atomic.jsonl with all probes serialized.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
import json
|
| 23 |
+
import logging
|
| 24 |
+
from typing import List, Dict
|
| 25 |
+
|
| 26 |
+
from .config import ProbeConfig
|
| 27 |
+
|
| 28 |
+
# Configure logging for progress tracking
|
| 29 |
+
logging.basicConfig(level=logging.INFO)
|
| 30 |
+
logger = logging.getLogger(__name__)
|
| 31 |
+
|
| 32 |
+
def write_strings_to_probes_atomic(config: ProbeConfig, all_strings_of_beads: List[Dict]) -> None:
|
| 33 |
+
"""
|
| 34 |
+
Write the structured strings of beads to the atomic probes JSONL file.
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
config: ProbeConfig instance specifying output_file and SEED.
|
| 38 |
+
all_strings_of_beads: List of dicts from chunker.py, each with 'source' and 'beads'.
|
| 39 |
+
|
| 40 |
+
Side Effects:
|
| 41 |
+
Creates/appends to config.output_file.
|
| 42 |
+
Each string gets a sequential 'string_id' assigned.
|
| 43 |
+
"""
|
| 44 |
+
output_path = config.output_file
|
| 45 |
+
logger.info(f"Starting serialization to {output_path}. Total strings: {len(all_strings_of_beads)}")
|
| 46 |
+
|
| 47 |
+
with open(output_path, 'w', encoding='utf-8') as file:
|
| 48 |
+
for i, string_data in enumerate(all_strings_of_beads):
|
| 49 |
+
# Assign unique string_id (immutable global identifier)
|
| 50 |
+
string_data['string_id'] = i
|
| 51 |
+
|
| 52 |
+
# Serialize to JSON line
|
| 53 |
+
json_line = json.dumps(string_data, ensure_ascii=False, separators=(',', ':'))
|
| 54 |
+
file.write(json_line + '\n')
|
| 55 |
+
|
| 56 |
+
# Log progress every 1000 strings
|
| 57 |
+
if (i + 1) % 1000 == 0:
|
| 58 |
+
logger.info(f"Serialized {i + 1} strings to {output_path}")
|
| 59 |
+
|
| 60 |
+
total_written = len(all_strings_of_beads)
|
| 61 |
+
logger.info(f"Serialization complete: {total_written} strings written to {output_path}")
|
| 62 |
+
logger.info(f"File structure: Each line is a JSON object with 'string_id', 'source' (metadata), and 'beads' ({config.chunk_token_size}-token chunks).")
|
| 63 |
+
|
| 64 |
+
if __name__ == "__main__":
|
| 65 |
+
# Example library usage: Create config and run full pipeline
|
| 66 |
+
from config import ProbeConfig
|
| 67 |
+
from chunker import generate_all_strings_of_beads
|
| 68 |
+
|
| 69 |
+
# User creates and populates config for a small test run
|
| 70 |
+
demo_config = ProbeConfig(
|
| 71 |
+
languages=['en'], # Single language for quick demo
|
| 72 |
+
samples_per_language=3, # Very small sample
|
| 73 |
+
chunk_token_size=64,
|
| 74 |
+
tokenizer_name="google-bert/bert-base-multilingual-cased",
|
| 75 |
+
output_file="demo_probes_atomic.jsonl",
|
| 76 |
+
seed=42
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
print("Generating strings of beads...")
|
| 80 |
+
strings = generate_all_strings_of_beads(demo_config)
|
| 81 |
+
print(f"Generated {len(strings)} strings. Now serializing...")
|
| 82 |
+
write_strings_to_probes_atomic(demo_config, strings)
|
| 83 |
print("Pipeline complete. Check demo_probes_atomic.jsonl for output.")
|