Datasets:
File size: 4,637 Bytes
a9e6fc2 a0a7929 a9e6fc2 a0a7929 a9e6fc2 a0a7929 a9e6fc2 a0a7929 a9e6fc2 a0a7929 a9e6fc2 a0a7929 a9e6fc2 a0a7929 a9e6fc2 a0a7929 a9e6fc2 a0a7929 a9e6fc2 a0a7929 a9e6fc2 a0a7929 a9e6fc2 a0a7929 a9e6fc2 a0a7929 a9e6fc2 a0a7929 a9e6fc2 a0a7929 a9e6fc2 a0a7929 a9e6fc2 a0a7929 a9e6fc2 a0a7929 a9e6fc2 a0a7929 a9e6fc2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
#!/usr/bin/env python3
"""
Create train/dev/test splits for UVW 2026 dataset.
UVW 2026: Underthesea Vietnamese Wikipedia Dataset
https://github.com/undertheseanlp/underthesea/issues/896
Uses streaming approach to minimize memory usage.
"""
import json
import random
from pathlib import Path
from tqdm import tqdm
DATA_DIR = Path(__file__).parent.parent / "data" / "processed"
SPLITS_DIR = Path(__file__).parent.parent / "data" / "splits"
# Input file (with wikidata and quality scores)
INPUT_FILE = DATA_DIR / "uvw_2026_wikidata.jsonl"
# Split ratios
TRAIN_RATIO = 0.8
DEV_RATIO = 0.1
TEST_RATIO = 0.1
# Random seed for reproducibility
SEED = 42
def count_lines(path: Path) -> int:
"""Count lines in file without loading into memory."""
count = 0
with open(path, "r", encoding="utf-8") as f:
for _ in f:
count += 1
return count
def create_split_indices(total: int, seed: int = SEED) -> dict[str, list[int]]:
"""Create shuffled indices for splits without loading articles."""
random.seed(seed)
indices = list(range(total))
random.shuffle(indices)
train_end = int(total * TRAIN_RATIO)
dev_end = train_end + int(total * DEV_RATIO)
# Create index-to-split mapping
index_to_split = {}
for i, idx in enumerate(indices):
if i < train_end:
index_to_split[idx] = "train"
elif i < dev_end:
index_to_split[idx] = "dev"
else:
index_to_split[idx] = "test"
return index_to_split
def main():
"""Create dataset splits using streaming."""
if not INPUT_FILE.exists():
print(f"Dataset not found: {INPUT_FILE}")
print("Please run add_wikidata.py first.")
return
print(f"Input: {INPUT_FILE}")
print(f"Output: {SPLITS_DIR}")
# Count total articles
print("\nCounting articles...")
total = count_lines(INPUT_FILE)
print(f"Total articles: {total:,}")
# Create split indices
print("\nCreating split indices...")
index_to_split = create_split_indices(total)
# Calculate split sizes
split_counts = {"train": 0, "dev": 0, "test": 0}
for split_name in index_to_split.values():
split_counts[split_name] += 1
for split_name, count in split_counts.items():
pct = count / total * 100
print(f" {split_name}: {count:,} articles ({pct:.1f}%)")
# Create output directory
SPLITS_DIR.mkdir(parents=True, exist_ok=True)
# Open all output files
print("\nWriting splits (streaming)...")
split_files = {
"train": open(SPLITS_DIR / "train.jsonl", "w", encoding="utf-8"),
"dev": open(SPLITS_DIR / "dev.jsonl", "w", encoding="utf-8"),
"test": open(SPLITS_DIR / "test.jsonl", "w", encoding="utf-8"),
}
plaintext_dirs = {}
plaintext_files = {}
for split_name in ["train", "dev", "test"]:
pdir = SPLITS_DIR / "plaintext" / split_name
pdir.mkdir(parents=True, exist_ok=True)
plaintext_dirs[split_name] = pdir
plaintext_files[split_name] = open(pdir / "sentences.txt", "w", encoding="utf-8")
try:
with open(INPUT_FILE, "r", encoding="utf-8") as fin:
for idx, line in enumerate(tqdm(fin, total=total, desc="Processing")):
split_name = index_to_split[idx]
article = json.loads(line)
# Write JSONL
split_files[split_name].write(line)
# Write plaintext sentences
content = article["content"]
for sent in content.replace("\n", " ").split("."):
sent = sent.strip()
if sent and len(sent) > 10:
plaintext_files[split_name].write(sent + ".\n")
finally:
for f in split_files.values():
f.close()
for f in plaintext_files.values():
f.close()
# Save split statistics
stats = {
"seed": SEED,
"source": str(INPUT_FILE.name),
"ratios": {
"train": TRAIN_RATIO,
"dev": DEV_RATIO,
"test": TEST_RATIO,
},
"counts": split_counts,
}
stats_path = SPLITS_DIR / "split_info.json"
with open(stats_path, "w", encoding="utf-8") as f:
json.dump(stats, f, indent=2)
print(f"\nSplit info saved to: {stats_path}")
print("\nOutput files:")
for split_name in ["train", "dev", "test"]:
print(f" - {SPLITS_DIR / f'{split_name}.jsonl'}")
print(f" - {plaintext_dirs[split_name] / 'sentences.txt'}")
print("\nSplit creation complete!")
if __name__ == "__main__":
main()
|