viktoroo commited on
Commit
e75ad4a
·
verified ·
1 Parent(s): 1c0757f

Add dataset creation script

Browse files
Files changed (1) hide show
  1. create_dataset.py +174 -0
create_dataset.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Create viktoroo/longbench-pro-128k-plus from caskcsg/LongBench-Pro by:
4
+ - filtering to token_length in {"128k", "256k"}
5
+ - keeping only fields: id, context
6
+ - renaming context -> text
7
+ - pushing the filtered dataset to the (already-existing) public repo
8
+ - uploading this script and a hardcoded README.md into the same dataset repo
9
+
10
+ Requirements:
11
+ pip install -U datasets huggingface_hub
12
+
13
+ Auth:
14
+ export HF_TOKEN=... (must have write access to viktoroo/longbench-pro-128k-plus)
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import os
20
+ import sys
21
+ import tempfile
22
+ from pathlib import Path
23
+
24
+ from datasets import load_dataset, DatasetDict
25
+ from huggingface_hub import HfApi
26
+
27
+ from dotenv import load_dotenv
28
+ load_dotenv()
29
+
30
+
31
+ SOURCE_DATASET = "caskcsg/LongBench-Pro"
32
+ TARGET_REPO = "viktoroo/longbench-pro-128k-plus" # existing, public
33
+ ALLOWED_TOKEN_LENGTH = {"128k", "256k"} # values in token_length field
34
+
35
+ README_MD = """---
36
+ license: other
37
+ language:
38
+ - en
39
+ - zh
40
+ tags:
41
+ - long-context
42
+ - benchmark
43
+ - evaluation
44
+ - rag
45
+ pretty_name: LongBench Pro 128k+
46
+ ---
47
+
48
+ # LongBench Pro 128k+
49
+
50
+ This dataset is a filtered subset of **LongBench Pro** (`caskcsg/LongBench-Pro`).
51
+
52
+ ## What is included
53
+
54
+ Only examples whose `token_length` field is one of:
55
+
56
+ - `128k`
57
+ - `256k`
58
+
59
+ ## Columns
60
+
61
+ This repo keeps only:
62
+
63
+ - `id`: example identifier (copied from source)
64
+ - `text`: the original `context` field (renamed from `context` → `text`)
65
+
66
+ All other fields from the source dataset are dropped.
67
+
68
+ ## Intended use
69
+
70
+ Use this dataset when you want to benchmark long-context behavior specifically at **≥128k** length buckets, while keeping the input surface minimal (`id`, `text`).
71
+
72
+ ## Provenance / attribution
73
+
74
+ Source dataset: `caskcsg/LongBench-Pro`.
75
+
76
+ This repo contains a derived subset. Please consult the source dataset card for:
77
+ - full task definitions
78
+ - original annotations/fields
79
+ - licensing/usage terms
80
+
81
+ ## Reproducibility
82
+
83
+ The filtering logic and transformation used to build this dataset are contained in `create_dataset.py` in this repo.
84
+ """
85
+
86
+
87
+ def require_token() -> str:
88
+ token = os.environ.get("HF_TOKEN") or os.environ.get("HUGGINGFACE_TOKEN")
89
+ if not token:
90
+ raise RuntimeError("Missing HF_TOKEN (or HUGGINGFACE_TOKEN) env var.")
91
+ return token
92
+
93
+
94
+ def filter_and_project(ds: DatasetDict) -> DatasetDict:
95
+ out = DatasetDict()
96
+ for split, d in ds.items():
97
+ if "token_length" not in d.column_names:
98
+ raise RuntimeError(f"Split '{split}' has no 'token_length' column.")
99
+
100
+ if "context" not in d.column_names:
101
+ raise RuntimeError(f"Split '{split}' has no 'context' column.")
102
+
103
+ if "id" not in d.column_names:
104
+ raise RuntimeError(f"Split '{split}' has no 'id' column.")
105
+
106
+ d2 = d.filter(lambda ex: ex["token_length"] in ALLOWED_TOKEN_LENGTH)
107
+
108
+ # Keep only id + context, then rename context -> text
109
+ d2 = d2.select_columns(["id", "context"]).rename_column("context", "text")
110
+ out[split] = d2
111
+
112
+ return out
113
+
114
+
115
+ def main() -> int:
116
+ token = require_token()
117
+
118
+ print(f"Loading source dataset: {SOURCE_DATASET}")
119
+ ds = load_dataset(SOURCE_DATASET) # DatasetDict
120
+
121
+ print("Filtering and projecting columns...")
122
+ out = filter_and_project(ds)
123
+
124
+ # Quick stats
125
+ for split, d in out.items():
126
+ print(f"Split '{split}': {len(d)} rows; columns={d.column_names}")
127
+
128
+ print(f"Pushing dataset to hub: {TARGET_REPO}")
129
+ out.push_to_hub(
130
+ TARGET_REPO,
131
+ token=token,
132
+ private=False,
133
+ commit_message="Create/update filtered LongBench Pro subset (128k, 256k) with id+text",
134
+ )
135
+
136
+ # Upload README and script to the dataset repo
137
+ api = HfApi(token=token)
138
+
139
+ # Determine the path to this script (works when running as a file)
140
+ script_path = Path(__file__).resolve()
141
+
142
+ with tempfile.TemporaryDirectory() as td:
143
+ td_path = Path(td)
144
+ readme_path = td_path / "README.md"
145
+ readme_path.write_text(README_MD, encoding="utf-8")
146
+
147
+ print("Uploading README.md...")
148
+ api.upload_file(
149
+ path_or_fileobj=str(readme_path),
150
+ path_in_repo="README.md",
151
+ repo_id=TARGET_REPO,
152
+ repo_type="dataset",
153
+ commit_message="Add dataset README",
154
+ )
155
+
156
+ print("Uploading create_dataset.py...")
157
+ api.upload_file(
158
+ path_or_fileobj=str(script_path),
159
+ path_in_repo="create_dataset.py",
160
+ repo_id=TARGET_REPO,
161
+ repo_type="dataset",
162
+ commit_message="Add dataset creation script",
163
+ )
164
+
165
+ print("Done.")
166
+ return 0
167
+
168
+
169
+ if __name__ == "__main__":
170
+ try:
171
+ raise SystemExit(main())
172
+ except Exception as e:
173
+ print(f"ERROR: {e}", file=sys.stderr)
174
+ raise