UVW-2026 / scripts /create_splits.py
rain1024's picture
Add Wikidata enrichment and quality scoring scripts
a0a7929
#!/usr/bin/env python3
"""
Create train/dev/test splits for UVW 2026 dataset.
UVW 2026: Underthesea Vietnamese Wikipedia Dataset
https://github.com/undertheseanlp/underthesea/issues/896
Uses streaming approach to minimize memory usage.
"""
import json
import random
from pathlib import Path
from tqdm import tqdm
DATA_DIR = Path(__file__).parent.parent / "data" / "processed"
SPLITS_DIR = Path(__file__).parent.parent / "data" / "splits"
# Input file (with wikidata and quality scores)
INPUT_FILE = DATA_DIR / "uvw_2026_wikidata.jsonl"
# Split ratios
TRAIN_RATIO = 0.8
DEV_RATIO = 0.1
TEST_RATIO = 0.1
# Random seed for reproducibility
SEED = 42
def count_lines(path: Path) -> int:
"""Count lines in file without loading into memory."""
count = 0
with open(path, "r", encoding="utf-8") as f:
for _ in f:
count += 1
return count
def create_split_indices(total: int, seed: int = SEED) -> dict[str, list[int]]:
"""Create shuffled indices for splits without loading articles."""
random.seed(seed)
indices = list(range(total))
random.shuffle(indices)
train_end = int(total * TRAIN_RATIO)
dev_end = train_end + int(total * DEV_RATIO)
# Create index-to-split mapping
index_to_split = {}
for i, idx in enumerate(indices):
if i < train_end:
index_to_split[idx] = "train"
elif i < dev_end:
index_to_split[idx] = "dev"
else:
index_to_split[idx] = "test"
return index_to_split
def main():
"""Create dataset splits using streaming."""
if not INPUT_FILE.exists():
print(f"Dataset not found: {INPUT_FILE}")
print("Please run add_wikidata.py first.")
return
print(f"Input: {INPUT_FILE}")
print(f"Output: {SPLITS_DIR}")
# Count total articles
print("\nCounting articles...")
total = count_lines(INPUT_FILE)
print(f"Total articles: {total:,}")
# Create split indices
print("\nCreating split indices...")
index_to_split = create_split_indices(total)
# Calculate split sizes
split_counts = {"train": 0, "dev": 0, "test": 0}
for split_name in index_to_split.values():
split_counts[split_name] += 1
for split_name, count in split_counts.items():
pct = count / total * 100
print(f" {split_name}: {count:,} articles ({pct:.1f}%)")
# Create output directory
SPLITS_DIR.mkdir(parents=True, exist_ok=True)
# Open all output files
print("\nWriting splits (streaming)...")
split_files = {
"train": open(SPLITS_DIR / "train.jsonl", "w", encoding="utf-8"),
"dev": open(SPLITS_DIR / "dev.jsonl", "w", encoding="utf-8"),
"test": open(SPLITS_DIR / "test.jsonl", "w", encoding="utf-8"),
}
plaintext_dirs = {}
plaintext_files = {}
for split_name in ["train", "dev", "test"]:
pdir = SPLITS_DIR / "plaintext" / split_name
pdir.mkdir(parents=True, exist_ok=True)
plaintext_dirs[split_name] = pdir
plaintext_files[split_name] = open(pdir / "sentences.txt", "w", encoding="utf-8")
try:
with open(INPUT_FILE, "r", encoding="utf-8") as fin:
for idx, line in enumerate(tqdm(fin, total=total, desc="Processing")):
split_name = index_to_split[idx]
article = json.loads(line)
# Write JSONL
split_files[split_name].write(line)
# Write plaintext sentences
content = article["content"]
for sent in content.replace("\n", " ").split("."):
sent = sent.strip()
if sent and len(sent) > 10:
plaintext_files[split_name].write(sent + ".\n")
finally:
for f in split_files.values():
f.close()
for f in plaintext_files.values():
f.close()
# Save split statistics
stats = {
"seed": SEED,
"source": str(INPUT_FILE.name),
"ratios": {
"train": TRAIN_RATIO,
"dev": DEV_RATIO,
"test": TEST_RATIO,
},
"counts": split_counts,
}
stats_path = SPLITS_DIR / "split_info.json"
with open(stats_path, "w", encoding="utf-8") as f:
json.dump(stats, f, indent=2)
print(f"\nSplit info saved to: {stats_path}")
print("\nOutput files:")
for split_name in ["train", "dev", "test"]:
print(f" - {SPLITS_DIR / f'{split_name}.jsonl'}")
print(f" - {plaintext_dirs[split_name] / 'sentences.txt'}")
print("\nSplit creation complete!")
if __name__ == "__main__":
main()