longbench-pro-128k-plus / create_dataset.py
viktoroo's picture
Add dataset creation script
e75ad4a verified
#!/usr/bin/env python3
"""
Create viktoroo/longbench-pro-128k-plus from caskcsg/LongBench-Pro by:
- filtering to token_length in {"128k", "256k"}
- keeping only fields: id, context
- renaming context -> text
- pushing the filtered dataset to the (already-existing) public repo
- uploading this script and a hardcoded README.md into the same dataset repo
Requirements:
pip install -U datasets huggingface_hub
Auth:
export HF_TOKEN=... (must have write access to viktoroo/longbench-pro-128k-plus)
"""
from __future__ import annotations
import os
import sys
import tempfile
from pathlib import Path
from datasets import load_dataset, DatasetDict
from huggingface_hub import HfApi
from dotenv import load_dotenv
load_dotenv()
SOURCE_DATASET = "caskcsg/LongBench-Pro"
TARGET_REPO = "viktoroo/longbench-pro-128k-plus" # existing, public
ALLOWED_TOKEN_LENGTH = {"128k", "256k"} # values in token_length field
README_MD = """---
license: other
language:
- en
- zh
tags:
- long-context
- benchmark
- evaluation
- rag
pretty_name: LongBench Pro 128k+
---
# LongBench Pro 128k+
This dataset is a filtered subset of **LongBench Pro** (`caskcsg/LongBench-Pro`).
## What is included
Only examples whose `token_length` field is one of:
- `128k`
- `256k`
## Columns
This repo keeps only:
- `id`: example identifier (copied from source)
- `text`: the original `context` field (renamed from `context` → `text`)
All other fields from the source dataset are dropped.
## Intended use
Use this dataset when you want to benchmark long-context behavior specifically at **≥128k** length buckets, while keeping the input surface minimal (`id`, `text`).
## Provenance / attribution
Source dataset: `caskcsg/LongBench-Pro`.
This repo contains a derived subset. Please consult the source dataset card for:
- full task definitions
- original annotations/fields
- licensing/usage terms
## Reproducibility
The filtering logic and transformation used to build this dataset are contained in `create_dataset.py` in this repo.
"""
def require_token() -> str:
token = os.environ.get("HF_TOKEN") or os.environ.get("HUGGINGFACE_TOKEN")
if not token:
raise RuntimeError("Missing HF_TOKEN (or HUGGINGFACE_TOKEN) env var.")
return token
def filter_and_project(ds: DatasetDict) -> DatasetDict:
out = DatasetDict()
for split, d in ds.items():
if "token_length" not in d.column_names:
raise RuntimeError(f"Split '{split}' has no 'token_length' column.")
if "context" not in d.column_names:
raise RuntimeError(f"Split '{split}' has no 'context' column.")
if "id" not in d.column_names:
raise RuntimeError(f"Split '{split}' has no 'id' column.")
d2 = d.filter(lambda ex: ex["token_length"] in ALLOWED_TOKEN_LENGTH)
# Keep only id + context, then rename context -> text
d2 = d2.select_columns(["id", "context"]).rename_column("context", "text")
out[split] = d2
return out
def main() -> int:
token = require_token()
print(f"Loading source dataset: {SOURCE_DATASET}")
ds = load_dataset(SOURCE_DATASET) # DatasetDict
print("Filtering and projecting columns...")
out = filter_and_project(ds)
# Quick stats
for split, d in out.items():
print(f"Split '{split}': {len(d)} rows; columns={d.column_names}")
print(f"Pushing dataset to hub: {TARGET_REPO}")
out.push_to_hub(
TARGET_REPO,
token=token,
private=False,
commit_message="Create/update filtered LongBench Pro subset (128k, 256k) with id+text",
)
# Upload README and script to the dataset repo
api = HfApi(token=token)
# Determine the path to this script (works when running as a file)
script_path = Path(__file__).resolve()
with tempfile.TemporaryDirectory() as td:
td_path = Path(td)
readme_path = td_path / "README.md"
readme_path.write_text(README_MD, encoding="utf-8")
print("Uploading README.md...")
api.upload_file(
path_or_fileobj=str(readme_path),
path_in_repo="README.md",
repo_id=TARGET_REPO,
repo_type="dataset",
commit_message="Add dataset README",
)
print("Uploading create_dataset.py...")
api.upload_file(
path_or_fileobj=str(script_path),
path_in_repo="create_dataset.py",
repo_id=TARGET_REPO,
repo_type="dataset",
commit_message="Add dataset creation script",
)
print("Done.")
return 0
if __name__ == "__main__":
try:
raise SystemExit(main())
except Exception as e:
print(f"ERROR: {e}", file=sys.stderr)
raise