Datasets:
File size: 7,551 Bytes
a9e6fc2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 |
#!/usr/bin/env python3
"""
Extract and clean articles from Vietnamese Wikipedia dump.
UVW 2026: Underthesea Vietnamese Wikipedia Dataset
https://github.com/undertheseanlp/underthesea/issues/896
"""
import bz2
import json
import re
import unicodedata
from pathlib import Path
from typing import Iterator, Optional
from xml.etree import ElementTree as ET
from tqdm import tqdm
RAW_DIR = Path(__file__).parent.parent / "data" / "raw"
OUTPUT_DIR = Path(__file__).parent.parent / "data" / "processed"
# Wikipedia namespace (0.11 as of 2026)
NAMESPACE = "{http://www.mediawiki.org/xml/export-0.11/}"
# Patterns to remove from Wikipedia markup
WIKI_PATTERNS = [
(r"\{\{[^}]+\}\}", ""), # Templates
(r"\[\[Category:[^\]]+\]\]", ""), # Categories
(r"\[\[Thể loại:[^\]]+\]\]", ""), # Vietnamese categories
(r"\[\[File:[^\]]+\]\]", ""), # Files
(r"\[\[Tập tin:[^\]]+\]\]", ""), # Vietnamese files
(r"\[\[Image:[^\]]+\]\]", ""), # Images
(r"\[\[Hình:[^\]]+\]\]", ""), # Vietnamese images
(r"<ref[^>]*>.*?</ref>", ""), # References
(r"<ref[^/]*/>", ""), # Self-closing refs
(r"<!--.*?-->", ""), # Comments
(r"\{\|.*?\|\}", ""), # Tables
(r"<[^>]+>", ""), # HTML tags
(r"\[https?://[^\s\]]+\s*([^\]]*)\]", r"\1"), # External links
(r"\[\[([^|\]]+)\|([^\]]+)\]\]", r"\2"), # Internal links with display text
(r"\[\[([^\]]+)\]\]", r"\1"), # Simple internal links
(r"'''?", ""), # Bold/italic
(r"={2,}([^=]+)={2,}", r"\1"), # Headers
(r"\*+", ""), # List markers
(r"#+", ""), # Numbered list markers
]
def normalize_text(text: str) -> str:
"""Normalize Unicode text (NFC normalization)."""
return unicodedata.normalize("NFC", text)
def clean_wiki_markup(text: str) -> str:
"""Remove Wikipedia markup and clean text."""
# Apply regex patterns
for pattern, replacement in WIKI_PATTERNS:
text = re.sub(pattern, replacement, text, flags=re.DOTALL | re.MULTILINE)
# Clean up whitespace
text = re.sub(r"\n{3,}", "\n\n", text)
text = re.sub(r"[ \t]+", " ", text)
text = "\n".join(line.strip() for line in text.split("\n"))
text = text.strip()
return text
def is_valid_article(title: str, text: str) -> bool:
"""Check if an article should be included."""
# Skip special pages
skip_prefixes = [
"Wikipedia:", "Thảo luận:", "Thành viên:", "Bản mẫu:",
"Module:", "MediaWiki:", "Trợ giúp:", "Cổng thông tin:",
"Chủ đề:", "TimedText:", "Gadget:", "Gadget definition:",
]
for prefix in skip_prefixes:
if title.startswith(prefix):
return False
# Skip redirects
if text.strip().lower().startswith("#redirect") or text.strip().lower().startswith("#đổi"):
return False
# Skip disambiguation pages
if "{{trang định hướng}}" in text.lower() or "{{disambiguation}}" in text.lower():
return False
return True
def parse_wikipedia_dump(dump_path: Path) -> Iterator[dict]:
"""Parse Wikipedia XML dump and yield articles."""
with bz2.open(dump_path, "rt", encoding="utf-8") as f:
context = ET.iterparse(f, events=("end",))
for event, elem in context:
if elem.tag == f"{NAMESPACE}page":
title_elem = elem.find(f"{NAMESPACE}title")
text_elem = elem.find(f".//{NAMESPACE}text")
if title_elem is not None and text_elem is not None:
title = title_elem.text or ""
text = text_elem.text or ""
if is_valid_article(title, text):
yield {
"title": title,
"text": text,
}
# Clear element to save memory
elem.clear()
def process_article(article: dict) -> Optional[dict]:
"""Process a single article."""
title = normalize_text(article["title"])
text = clean_wiki_markup(article["text"])
text = normalize_text(text)
# Skip articles with too little content
if len(text) < 100:
return None
# Count sentences (simple heuristic)
sentences = [s.strip() for s in re.split(r"[.!?]", text) if s.strip()]
return {
"id": title.replace(" ", "_"),
"title": title,
"content": text,
"num_chars": len(text),
"num_sentences": len(sentences),
}
def save_plaintext(articles: list, output_dir: Path) -> None:
"""Save articles in plaintext format (one file per article)."""
plaintext_dir = output_dir / "plaintext"
plaintext_dir.mkdir(parents=True, exist_ok=True)
for article in tqdm(articles, desc="Saving plaintext"):
# Sanitize filename
filename = re.sub(r'[<>:"/\\|?*]', "_", article["id"])[:200]
filepath = plaintext_dir / f"{filename}.txt"
with open(filepath, "w", encoding="utf-8") as f:
f.write(f"{article['title']}\n\n")
f.write(article["content"])
def save_jsonl(articles: list, output_path: Path) -> None:
"""Save articles in JSONL format."""
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(output_path, "w", encoding="utf-8") as f:
for article in articles:
f.write(json.dumps(article, ensure_ascii=False) + "\n")
def save_metadata(articles: list, output_path: Path) -> None:
"""Save dataset metadata."""
total_chars = sum(a["num_chars"] for a in articles)
total_sentences = sum(a["num_sentences"] for a in articles)
metadata = {
"name": "UVW 2026",
"full_name": "Underthesea Vietnamese Wikipedia Dataset",
"version": "1.0.0",
"year": 2026,
"language": "vi",
"license": "CC BY-SA 4.0",
"source": "Vietnamese Wikipedia",
"url": "https://vi.wikipedia.org",
"statistics": {
"num_articles": len(articles),
"num_characters": total_chars,
"num_sentences": total_sentences,
"avg_chars_per_article": total_chars // len(articles) if articles else 0,
},
}
with open(output_path, "w", encoding="utf-8") as f:
json.dump(metadata, f, ensure_ascii=False, indent=2)
def main():
"""Extract and process Vietnamese Wikipedia articles."""
dump_path = RAW_DIR / "viwiki-latest-pages-articles.xml.bz2"
if not dump_path.exists():
print(f"Wikipedia dump not found: {dump_path}")
print("Please run download_wikipedia.py first.")
return
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
print("Parsing and processing Wikipedia dump...")
print(f"Input: {dump_path}")
articles = []
for raw_article in tqdm(parse_wikipedia_dump(dump_path), desc="Processing"):
processed = process_article(raw_article)
if processed:
articles.append(processed)
print(f"\nTotal articles extracted: {len(articles)}")
# Save in multiple formats
print("\nSaving datasets...")
# JSONL format (HuggingFace compatible)
save_jsonl(articles, OUTPUT_DIR / "uvw_2026.jsonl")
print(f" - JSONL: {OUTPUT_DIR / 'uvw_2026.jsonl'}")
# Plaintext format (underthesea compatible)
save_plaintext(articles, OUTPUT_DIR)
print(f" - Plaintext: {OUTPUT_DIR / 'plaintext/'}")
# Metadata
save_metadata(articles, OUTPUT_DIR / "metadata.json")
print(f" - Metadata: {OUTPUT_DIR / 'metadata.json'}")
print("\nDataset creation complete!")
if __name__ == "__main__":
main()
|