di2ox3 commited on
Commit
2d2dc44
·
verified ·
1 Parent(s): a7955f0

Upload generate_examples.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. generate_examples.py +176 -0
generate_examples.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # /// script
3
+ # requires-python = ">=3.11"
4
+ # dependencies = [
5
+ # "datasets>=3.0,<4",
6
+ # "transformers>=4.51,<5",
7
+ # "pyarrow>=17.0,<19",
8
+ # ]
9
+ # ///
10
+ """
11
+ Load the prefill dataset and generate example prompts for each task type.
12
+
13
+ Usage:
14
+ uv run generate_examples.py # from local files
15
+ uv run generate_examples.py --repo di2ox3/prefill-dataset # from HuggingFace
16
+ """
17
+ from __future__ import annotations
18
+
19
+ import argparse
20
+ import json
21
+ import textwrap
22
+ from pathlib import Path
23
+
24
+ import pyarrow.parquet as pq
25
+ from transformers import AutoTokenizer
26
+
27
+
28
+ def load_tables(repo: str | None):
29
+ if repo:
30
+ from datasets import load_dataset
31
+ docs = load_dataset(repo, data_files="data/documents.parquet", split="train")
32
+ tasks = load_dataset(repo, data_files="data/tasks.parquet", split="train")
33
+ trans = load_dataset(repo, data_files="data/translations.parquet", split="train")
34
+ return docs.to_pandas(), tasks.to_pandas(), trans.to_pandas()
35
+ else:
36
+ base = Path(__file__).parent / "output"
37
+ docs = pq.read_table(base / "documents.parquet").to_pandas()
38
+ tasks = pq.read_table(base / "tasks.parquet").to_pandas()
39
+ trans = pq.read_table(base / "translations.parquet").to_pandas()
40
+ return docs, tasks, trans
41
+
42
+
43
+ def show_summary(docs, tasks, trans):
44
+ print("=" * 60)
45
+ print("DATASET SUMMARY")
46
+ print("=" * 60)
47
+ total_tokens = docs["token_count"].sum()
48
+ print(f"Documents: {len(docs)}")
49
+ print(f"Total tokens: {total_tokens:,} ({total_tokens / 1e6:.1f}M)")
50
+ print(f"Tasks: {len(tasks)}")
51
+ print(f"Translations: {len(trans)}")
52
+ print()
53
+ print("Documents by source:")
54
+ for src, group in docs.groupby("source"):
55
+ print(f" {src}: {len(group)} docs, {group['token_count'].sum():,} tokens")
56
+ print()
57
+ print("Tasks by type:")
58
+ for tt, group in tasks.groupby("task_type"):
59
+ print(f" {tt}: {len(group)}")
60
+ print()
61
+ print("Translations by language:")
62
+ for lang, group in trans.groupby("target_language"):
63
+ print(f" {lang}: {len(group)}")
64
+
65
+
66
+ def show_qa_example(docs, tasks):
67
+ qa_tasks = tasks[tasks.task_type == "qa"]
68
+ if qa_tasks.empty:
69
+ print("No QA tasks found.")
70
+ return
71
+ task = qa_tasks.iloc[0]
72
+ doc = docs[docs.doc_id == task.doc_id].iloc[0]
73
+ answers = json.loads(task.answer)
74
+
75
+ print()
76
+ print("-" * 60)
77
+ print("EXAMPLE: Question Answering")
78
+ print("-" * 60)
79
+ print(f"Document: {doc.title} ({doc.token_count:,} tokens)")
80
+ print(f"Question: {task.question}")
81
+ print(f"Answers: {answers}")
82
+ print(f"Context (first 300 chars): {doc.text[:300]}...")
83
+
84
+
85
+ def show_translation_example(docs, tasks, trans):
86
+ tr_tasks = tasks[tasks.task_type == "translation"]
87
+ if tr_tasks.empty:
88
+ print("No translation tasks found.")
89
+ return
90
+ task = tr_tasks.iloc[0]
91
+ doc = docs[docs.doc_id == task.doc_id].iloc[0]
92
+ meta = json.loads(task.metadata)
93
+ tr_row = trans[trans.doc_id == task.doc_id]
94
+
95
+ print()
96
+ print("-" * 60)
97
+ print("EXAMPLE: Translation")
98
+ print("-" * 60)
99
+ print(f"Document: {doc.title} ({doc.token_count:,} tokens)")
100
+ print(f"Target language: {meta.get('target_language', '?')}")
101
+ print(f"Passage tokens: {meta.get('passage_token_start')}-{meta.get('passage_token_end')}")
102
+ print(f"Question (truncated):")
103
+ print(textwrap.shorten(task.question, width=300, placeholder="..."))
104
+ if not tr_row.empty:
105
+ tr = tr_row.iloc[0]
106
+ print(f"Translation text (first 300 chars): {tr.target_text[:300]}...")
107
+
108
+
109
+ def show_retrieval_example(docs, tasks):
110
+ ret_tasks = tasks[tasks.task_type == "retrieval"]
111
+ if ret_tasks.empty:
112
+ print("No retrieval tasks found.")
113
+ return
114
+ task = ret_tasks.iloc[0]
115
+ doc = docs[docs.doc_id == task.doc_id].iloc[0]
116
+ meta = json.loads(task.metadata)
117
+ answer_text = json.loads(task.answer)
118
+
119
+ print()
120
+ print("-" * 60)
121
+ print("EXAMPLE: Retrieval")
122
+ print("-" * 60)
123
+ print(f"Document: {doc.title} ({doc.token_count:,} tokens)")
124
+ print(f"Token range: {meta.get('passage_token_start')}-{meta.get('passage_token_end')}")
125
+ print(f"Question: {textwrap.shorten(task.question, width=200, placeholder='...')}")
126
+ print(f"Answer (first 200 chars): {answer_text[:200]}...")
127
+
128
+
129
+ def verify_offsets(docs):
130
+ """Spot-check that char_offsets correctly map tokens back to text."""
131
+ print()
132
+ print("-" * 60)
133
+ print("OFFSET VERIFICATION (first 3 docs, first 10 tokens each)")
134
+ print("-" * 60)
135
+ tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B")
136
+
137
+ for i in range(min(3, len(docs))):
138
+ doc = docs.iloc[i]
139
+ text = doc.text
140
+ offsets = doc.char_offsets
141
+ token_ids = doc.token_ids
142
+ print(f"\n Doc: {doc.doc_id} ({doc.token_count:,} tokens)")
143
+ for j in range(min(10, len(offsets))):
144
+ start = offsets[j]
145
+ end = offsets[j + 1] if j + 1 < len(offsets) else len(text)
146
+ span = text[start:end]
147
+ decoded = tokenizer.decode([token_ids[j]])
148
+ match = "OK" if span.strip() == decoded.strip() else "~"
149
+ print(f" [{j:3d}] offset={start:6d} span={span!r:30s} decoded={decoded!r:30s} {match}")
150
+
151
+
152
+ def main():
153
+ parser = argparse.ArgumentParser(description="Explore the prefill dataset")
154
+ parser.add_argument("--repo", type=str, default=None,
155
+ help="HuggingFace repo ID (e.g. di2ox3/prefill-dataset)")
156
+ parser.add_argument("--verify", action="store_true",
157
+ help="Run offset verification (loads tokenizer)")
158
+ args = parser.parse_args()
159
+
160
+ docs, tasks, trans = load_tables(args.repo)
161
+
162
+ show_summary(docs, tasks, trans)
163
+ show_qa_example(docs, tasks)
164
+ show_translation_example(docs, tasks, trans)
165
+ show_retrieval_example(docs, tasks)
166
+
167
+ if args.verify:
168
+ verify_offsets(docs)
169
+
170
+ print()
171
+ print("=" * 60)
172
+ print("Done.")
173
+
174
+
175
+ if __name__ == "__main__":
176
+ main()