File size: 5,990 Bytes
2d2dc44 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 | #!/usr/bin/env python3
# /// script
# requires-python = ">=3.11"
# dependencies = [
# "datasets>=3.0,<4",
# "transformers>=4.51,<5",
# "pyarrow>=17.0,<19",
# ]
# ///
"""
Load the prefill dataset and generate example prompts for each task type.
Usage:
uv run generate_examples.py # from local files
uv run generate_examples.py --repo di2ox3/prefill-dataset # from HuggingFace
"""
from __future__ import annotations
import argparse
import json
import textwrap
from pathlib import Path
import pyarrow.parquet as pq
from transformers import AutoTokenizer
def load_tables(repo: str | None):
if repo:
from datasets import load_dataset
docs = load_dataset(repo, data_files="data/documents.parquet", split="train")
tasks = load_dataset(repo, data_files="data/tasks.parquet", split="train")
trans = load_dataset(repo, data_files="data/translations.parquet", split="train")
return docs.to_pandas(), tasks.to_pandas(), trans.to_pandas()
else:
base = Path(__file__).parent / "output"
docs = pq.read_table(base / "documents.parquet").to_pandas()
tasks = pq.read_table(base / "tasks.parquet").to_pandas()
trans = pq.read_table(base / "translations.parquet").to_pandas()
return docs, tasks, trans
def show_summary(docs, tasks, trans):
print("=" * 60)
print("DATASET SUMMARY")
print("=" * 60)
total_tokens = docs["token_count"].sum()
print(f"Documents: {len(docs)}")
print(f"Total tokens: {total_tokens:,} ({total_tokens / 1e6:.1f}M)")
print(f"Tasks: {len(tasks)}")
print(f"Translations: {len(trans)}")
print()
print("Documents by source:")
for src, group in docs.groupby("source"):
print(f" {src}: {len(group)} docs, {group['token_count'].sum():,} tokens")
print()
print("Tasks by type:")
for tt, group in tasks.groupby("task_type"):
print(f" {tt}: {len(group)}")
print()
print("Translations by language:")
for lang, group in trans.groupby("target_language"):
print(f" {lang}: {len(group)}")
def show_qa_example(docs, tasks):
qa_tasks = tasks[tasks.task_type == "qa"]
if qa_tasks.empty:
print("No QA tasks found.")
return
task = qa_tasks.iloc[0]
doc = docs[docs.doc_id == task.doc_id].iloc[0]
answers = json.loads(task.answer)
print()
print("-" * 60)
print("EXAMPLE: Question Answering")
print("-" * 60)
print(f"Document: {doc.title} ({doc.token_count:,} tokens)")
print(f"Question: {task.question}")
print(f"Answers: {answers}")
print(f"Context (first 300 chars): {doc.text[:300]}...")
def show_translation_example(docs, tasks, trans):
tr_tasks = tasks[tasks.task_type == "translation"]
if tr_tasks.empty:
print("No translation tasks found.")
return
task = tr_tasks.iloc[0]
doc = docs[docs.doc_id == task.doc_id].iloc[0]
meta = json.loads(task.metadata)
tr_row = trans[trans.doc_id == task.doc_id]
print()
print("-" * 60)
print("EXAMPLE: Translation")
print("-" * 60)
print(f"Document: {doc.title} ({doc.token_count:,} tokens)")
print(f"Target language: {meta.get('target_language', '?')}")
print(f"Passage tokens: {meta.get('passage_token_start')}-{meta.get('passage_token_end')}")
print(f"Question (truncated):")
print(textwrap.shorten(task.question, width=300, placeholder="..."))
if not tr_row.empty:
tr = tr_row.iloc[0]
print(f"Translation text (first 300 chars): {tr.target_text[:300]}...")
def show_retrieval_example(docs, tasks):
ret_tasks = tasks[tasks.task_type == "retrieval"]
if ret_tasks.empty:
print("No retrieval tasks found.")
return
task = ret_tasks.iloc[0]
doc = docs[docs.doc_id == task.doc_id].iloc[0]
meta = json.loads(task.metadata)
answer_text = json.loads(task.answer)
print()
print("-" * 60)
print("EXAMPLE: Retrieval")
print("-" * 60)
print(f"Document: {doc.title} ({doc.token_count:,} tokens)")
print(f"Token range: {meta.get('passage_token_start')}-{meta.get('passage_token_end')}")
print(f"Question: {textwrap.shorten(task.question, width=200, placeholder='...')}")
print(f"Answer (first 200 chars): {answer_text[:200]}...")
def verify_offsets(docs):
"""Spot-check that char_offsets correctly map tokens back to text."""
print()
print("-" * 60)
print("OFFSET VERIFICATION (first 3 docs, first 10 tokens each)")
print("-" * 60)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B")
for i in range(min(3, len(docs))):
doc = docs.iloc[i]
text = doc.text
offsets = doc.char_offsets
token_ids = doc.token_ids
print(f"\n Doc: {doc.doc_id} ({doc.token_count:,} tokens)")
for j in range(min(10, len(offsets))):
start = offsets[j]
end = offsets[j + 1] if j + 1 < len(offsets) else len(text)
span = text[start:end]
decoded = tokenizer.decode([token_ids[j]])
match = "OK" if span.strip() == decoded.strip() else "~"
print(f" [{j:3d}] offset={start:6d} span={span!r:30s} decoded={decoded!r:30s} {match}")
def main():
parser = argparse.ArgumentParser(description="Explore the prefill dataset")
parser.add_argument("--repo", type=str, default=None,
help="HuggingFace repo ID (e.g. di2ox3/prefill-dataset)")
parser.add_argument("--verify", action="store_true",
help="Run offset verification (loads tokenizer)")
args = parser.parse_args()
docs, tasks, trans = load_tables(args.repo)
show_summary(docs, tasks, trans)
show_qa_example(docs, tasks)
show_translation_example(docs, tasks, trans)
show_retrieval_example(docs, tasks)
if args.verify:
verify_offsets(docs)
print()
print("=" * 60)
print("Done.")
if __name__ == "__main__":
main()
|