| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | """ |
| | Load the prefill dataset and generate example prompts for each task type. |
| | |
| | Usage: |
| | uv run generate_examples.py # from local files |
| | uv run generate_examples.py --repo di2ox3/prefill-dataset # from HuggingFace |
| | """ |
| | from __future__ import annotations |
| |
|
| | import argparse |
| | import json |
| | import textwrap |
| | from pathlib import Path |
| |
|
| | import pyarrow.parquet as pq |
| | from transformers import AutoTokenizer |
| |
|
| |
|
| | def load_tables(repo: str | None): |
| | if repo: |
| | from datasets import load_dataset |
| | docs = load_dataset(repo, data_files="data/documents.parquet", split="train") |
| | tasks = load_dataset(repo, data_files="data/tasks.parquet", split="train") |
| | trans = load_dataset(repo, data_files="data/translations.parquet", split="train") |
| | return docs.to_pandas(), tasks.to_pandas(), trans.to_pandas() |
| | else: |
| | base = Path(__file__).parent / "output" |
| | docs = pq.read_table(base / "documents.parquet").to_pandas() |
| | tasks = pq.read_table(base / "tasks.parquet").to_pandas() |
| | trans = pq.read_table(base / "translations.parquet").to_pandas() |
| | return docs, tasks, trans |
| |
|
| |
|
| | def show_summary(docs, tasks, trans): |
| | print("=" * 60) |
| | print("DATASET SUMMARY") |
| | print("=" * 60) |
| | total_tokens = docs["token_count"].sum() |
| | print(f"Documents: {len(docs)}") |
| | print(f"Total tokens: {total_tokens:,} ({total_tokens / 1e6:.1f}M)") |
| | print(f"Tasks: {len(tasks)}") |
| | print(f"Translations: {len(trans)}") |
| | print() |
| | print("Documents by source:") |
| | for src, group in docs.groupby("source"): |
| | print(f" {src}: {len(group)} docs, {group['token_count'].sum():,} tokens") |
| | print() |
| | print("Tasks by type:") |
| | for tt, group in tasks.groupby("task_type"): |
| | print(f" {tt}: {len(group)}") |
| | print() |
| | print("Translations by language:") |
| | for lang, group in trans.groupby("target_language"): |
| | print(f" {lang}: {len(group)}") |
| |
|
| |
|
| | def show_qa_example(docs, tasks): |
| | qa_tasks = tasks[tasks.task_type == "qa"] |
| | if qa_tasks.empty: |
| | print("No QA tasks found.") |
| | return |
| | task = qa_tasks.iloc[0] |
| | doc = docs[docs.doc_id == task.doc_id].iloc[0] |
| | answers = json.loads(task.answer) |
| |
|
| | print() |
| | print("-" * 60) |
| | print("EXAMPLE: Question Answering") |
| | print("-" * 60) |
| | print(f"Document: {doc.title} ({doc.token_count:,} tokens)") |
| | print(f"Question: {task.question}") |
| | print(f"Answers: {answers}") |
| | print(f"Context (first 300 chars): {doc.text[:300]}...") |
| |
|
| |
|
| | def show_translation_example(docs, tasks, trans): |
| | tr_tasks = tasks[tasks.task_type == "translation"] |
| | if tr_tasks.empty: |
| | print("No translation tasks found.") |
| | return |
| | task = tr_tasks.iloc[0] |
| | doc = docs[docs.doc_id == task.doc_id].iloc[0] |
| | meta = json.loads(task.metadata) |
| | tr_row = trans[trans.doc_id == task.doc_id] |
| |
|
| | print() |
| | print("-" * 60) |
| | print("EXAMPLE: Translation") |
| | print("-" * 60) |
| | print(f"Document: {doc.title} ({doc.token_count:,} tokens)") |
| | print(f"Target language: {meta.get('target_language', '?')}") |
| | print(f"Passage tokens: {meta.get('passage_token_start')}-{meta.get('passage_token_end')}") |
| | print(f"Question (truncated):") |
| | print(textwrap.shorten(task.question, width=300, placeholder="...")) |
| | if not tr_row.empty: |
| | tr = tr_row.iloc[0] |
| | print(f"Translation text (first 300 chars): {tr.target_text[:300]}...") |
| |
|
| |
|
| | def show_retrieval_example(docs, tasks): |
| | ret_tasks = tasks[tasks.task_type == "retrieval"] |
| | if ret_tasks.empty: |
| | print("No retrieval tasks found.") |
| | return |
| | task = ret_tasks.iloc[0] |
| | doc = docs[docs.doc_id == task.doc_id].iloc[0] |
| | meta = json.loads(task.metadata) |
| | answer_text = json.loads(task.answer) |
| |
|
| | print() |
| | print("-" * 60) |
| | print("EXAMPLE: Retrieval") |
| | print("-" * 60) |
| | print(f"Document: {doc.title} ({doc.token_count:,} tokens)") |
| | print(f"Token range: {meta.get('passage_token_start')}-{meta.get('passage_token_end')}") |
| | print(f"Question: {textwrap.shorten(task.question, width=200, placeholder='...')}") |
| | print(f"Answer (first 200 chars): {answer_text[:200]}...") |
| |
|
| |
|
| | def verify_offsets(docs): |
| | """Spot-check that char_offsets correctly map tokens back to text.""" |
| | print() |
| | print("-" * 60) |
| | print("OFFSET VERIFICATION (first 3 docs, first 10 tokens each)") |
| | print("-" * 60) |
| | tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B") |
| |
|
| | for i in range(min(3, len(docs))): |
| | doc = docs.iloc[i] |
| | text = doc.text |
| | offsets = doc.char_offsets |
| | token_ids = doc.token_ids |
| | print(f"\n Doc: {doc.doc_id} ({doc.token_count:,} tokens)") |
| | for j in range(min(10, len(offsets))): |
| | start = offsets[j] |
| | end = offsets[j + 1] if j + 1 < len(offsets) else len(text) |
| | span = text[start:end] |
| | decoded = tokenizer.decode([token_ids[j]]) |
| | match = "OK" if span.strip() == decoded.strip() else "~" |
| | print(f" [{j:3d}] offset={start:6d} span={span!r:30s} decoded={decoded!r:30s} {match}") |
| |
|
| |
|
| | def main(): |
| | parser = argparse.ArgumentParser(description="Explore the prefill dataset") |
| | parser.add_argument("--repo", type=str, default=None, |
| | help="HuggingFace repo ID (e.g. di2ox3/prefill-dataset)") |
| | parser.add_argument("--verify", action="store_true", |
| | help="Run offset verification (loads tokenizer)") |
| | args = parser.parse_args() |
| |
|
| | docs, tasks, trans = load_tables(args.repo) |
| |
|
| | show_summary(docs, tasks, trans) |
| | show_qa_example(docs, tasks) |
| | show_translation_example(docs, tasks, trans) |
| | show_retrieval_example(docs, tasks) |
| |
|
| | if args.verify: |
| | verify_offsets(docs) |
| |
|
| | print() |
| | print("=" * 60) |
| | print("Done.") |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|