UDD-1 / src /upload_to_hf.py
rain1024's picture
Expand UDD-1 to 40K sentences across 5 domains
f5d0a0d
"""
Upload UD dataset to HuggingFace Hub.
Dataset: undertheseanlp/UDD-v0.1
Loads train/dev/test JSONL splits and uploads as DatasetDict with domain field.
Usage:
export $(cat .env | xargs) && python upload_to_hf.py
"""
import json
import os
from os.path import expanduser, join
from datasets import Dataset, DatasetDict
from huggingface_hub import HfApi, login
# Sent_id prefix -> domain mapping
DOMAIN_MAP = {
"vlc-": "legal",
"uvn-": "news",
"uvw-": "wikipedia",
"uvb-f-": "fiction",
"uvb-n-": "non-fiction",
}
def get_domain(sent_id):
"""Extract domain from sent_id prefix."""
for prefix, domain in DOMAIN_MAP.items():
if sent_id.startswith(prefix):
return domain
return "unknown"
def load_jsonl(filepath):
"""Load JSONL file and add domain field."""
data = []
with open(filepath, "r", encoding="utf-8") as f:
for line in f:
row = json.loads(line)
row["domain"] = get_domain(row.get("sent_id", ""))
data.append(row)
return data
def main():
# Login with token from environment
token = os.environ.get("HF_TOKEN")
if token:
print("Logging in with HF_TOKEN...")
login(token=token)
else:
print("Warning: HF_TOKEN not set. Using cached credentials.")
source_folder = expanduser("~/Downloads/UD_Vietnamese-UUD-v0.1")
readme_file = join(source_folder, "README.md")
# Load all splits
splits = {}
for split_name, filename in [("train", "train.jsonl"), ("validation", "dev.jsonl"), ("test", "test.jsonl")]:
filepath = join(source_folder, filename)
if os.path.isfile(filepath):
print(f"Loading {split_name} from {filepath}...")
data = load_jsonl(filepath)
splits[split_name] = Dataset.from_list(data)
print(f" {split_name}: {len(data)} sentences")
else:
print(f"Warning: {filepath} not found, skipping {split_name} split")
if not splits:
print("Error: No data files found!")
return
# Create DatasetDict
print("\nCreating HuggingFace DatasetDict...")
dataset_dict = DatasetDict(splits)
print(f"Dataset: {dataset_dict}")
for split_name, ds in dataset_dict.items():
print(f" {split_name}: {len(ds)} rows, features: {list(ds.features.keys())}")
# Print domain distribution
for split_name, ds in dataset_dict.items():
domains = {}
for row in ds:
d = row["domain"]
domains[d] = domains.get(d, 0) + 1
domain_str = ", ".join(f"{d}: {c}" for d, c in sorted(domains.items()))
print(f" {split_name} domains: {domain_str}")
# Push to HuggingFace Hub
repo_id = "undertheseanlp/UDD-v0.1"
print(f"\nPushing to HuggingFace Hub: {repo_id}")
dataset_dict.push_to_hub(
repo_id,
private=False,
commit_message="Update: 40K sentences from 5 domains (legal, news, wikipedia, fiction, non-fiction)"
)
# Upload README.md
if os.path.isfile(readme_file):
print("Uploading README.md...")
api = HfApi()
api.upload_file(
path_or_fileobj=readme_file,
path_in_repo="README.md",
repo_id=repo_id,
repo_type="dataset",
commit_message="Update README with dataset card"
)
print(f"\nDone! Dataset available at: https://huggingface.co/datasets/{repo_id}")
if __name__ == "__main__":
main()