UVW-2026 / scripts /extract_articles.py
rain1024's picture
Add scripts
a9e6fc2 verified
#!/usr/bin/env python3
"""
Extract and clean articles from Vietnamese Wikipedia dump.
UVW 2026: Underthesea Vietnamese Wikipedia Dataset
https://github.com/undertheseanlp/underthesea/issues/896
"""
import bz2
import json
import re
import unicodedata
from pathlib import Path
from typing import Iterator, Optional
from xml.etree import ElementTree as ET
from tqdm import tqdm
RAW_DIR = Path(__file__).parent.parent / "data" / "raw"
OUTPUT_DIR = Path(__file__).parent.parent / "data" / "processed"
# Wikipedia namespace (0.11 as of 2026)
NAMESPACE = "{http://www.mediawiki.org/xml/export-0.11/}"
# Patterns to remove from Wikipedia markup
WIKI_PATTERNS = [
(r"\{\{[^}]+\}\}", ""), # Templates
(r"\[\[Category:[^\]]+\]\]", ""), # Categories
(r"\[\[Thể loại:[^\]]+\]\]", ""), # Vietnamese categories
(r"\[\[File:[^\]]+\]\]", ""), # Files
(r"\[\[Tập tin:[^\]]+\]\]", ""), # Vietnamese files
(r"\[\[Image:[^\]]+\]\]", ""), # Images
(r"\[\[Hình:[^\]]+\]\]", ""), # Vietnamese images
(r"<ref[^>]*>.*?</ref>", ""), # References
(r"<ref[^/]*/>", ""), # Self-closing refs
(r"<!--.*?-->", ""), # Comments
(r"\{\|.*?\|\}", ""), # Tables
(r"<[^>]+>", ""), # HTML tags
(r"\[https?://[^\s\]]+\s*([^\]]*)\]", r"\1"), # External links
(r"\[\[([^|\]]+)\|([^\]]+)\]\]", r"\2"), # Internal links with display text
(r"\[\[([^\]]+)\]\]", r"\1"), # Simple internal links
(r"'''?", ""), # Bold/italic
(r"={2,}([^=]+)={2,}", r"\1"), # Headers
(r"\*+", ""), # List markers
(r"#+", ""), # Numbered list markers
]
def normalize_text(text: str) -> str:
"""Normalize Unicode text (NFC normalization)."""
return unicodedata.normalize("NFC", text)
def clean_wiki_markup(text: str) -> str:
"""Remove Wikipedia markup and clean text."""
# Apply regex patterns
for pattern, replacement in WIKI_PATTERNS:
text = re.sub(pattern, replacement, text, flags=re.DOTALL | re.MULTILINE)
# Clean up whitespace
text = re.sub(r"\n{3,}", "\n\n", text)
text = re.sub(r"[ \t]+", " ", text)
text = "\n".join(line.strip() for line in text.split("\n"))
text = text.strip()
return text
def is_valid_article(title: str, text: str) -> bool:
"""Check if an article should be included."""
# Skip special pages
skip_prefixes = [
"Wikipedia:", "Thảo luận:", "Thành viên:", "Bản mẫu:",
"Module:", "MediaWiki:", "Trợ giúp:", "Cổng thông tin:",
"Chủ đề:", "TimedText:", "Gadget:", "Gadget definition:",
]
for prefix in skip_prefixes:
if title.startswith(prefix):
return False
# Skip redirects
if text.strip().lower().startswith("#redirect") or text.strip().lower().startswith("#đổi"):
return False
# Skip disambiguation pages
if "{{trang định hướng}}" in text.lower() or "{{disambiguation}}" in text.lower():
return False
return True
def parse_wikipedia_dump(dump_path: Path) -> Iterator[dict]:
"""Parse Wikipedia XML dump and yield articles."""
with bz2.open(dump_path, "rt", encoding="utf-8") as f:
context = ET.iterparse(f, events=("end",))
for event, elem in context:
if elem.tag == f"{NAMESPACE}page":
title_elem = elem.find(f"{NAMESPACE}title")
text_elem = elem.find(f".//{NAMESPACE}text")
if title_elem is not None and text_elem is not None:
title = title_elem.text or ""
text = text_elem.text or ""
if is_valid_article(title, text):
yield {
"title": title,
"text": text,
}
# Clear element to save memory
elem.clear()
def process_article(article: dict) -> Optional[dict]:
"""Process a single article."""
title = normalize_text(article["title"])
text = clean_wiki_markup(article["text"])
text = normalize_text(text)
# Skip articles with too little content
if len(text) < 100:
return None
# Count sentences (simple heuristic)
sentences = [s.strip() for s in re.split(r"[.!?]", text) if s.strip()]
return {
"id": title.replace(" ", "_"),
"title": title,
"content": text,
"num_chars": len(text),
"num_sentences": len(sentences),
}
def save_plaintext(articles: list, output_dir: Path) -> None:
"""Save articles in plaintext format (one file per article)."""
plaintext_dir = output_dir / "plaintext"
plaintext_dir.mkdir(parents=True, exist_ok=True)
for article in tqdm(articles, desc="Saving plaintext"):
# Sanitize filename
filename = re.sub(r'[<>:"/\\|?*]', "_", article["id"])[:200]
filepath = plaintext_dir / f"{filename}.txt"
with open(filepath, "w", encoding="utf-8") as f:
f.write(f"{article['title']}\n\n")
f.write(article["content"])
def save_jsonl(articles: list, output_path: Path) -> None:
"""Save articles in JSONL format."""
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(output_path, "w", encoding="utf-8") as f:
for article in articles:
f.write(json.dumps(article, ensure_ascii=False) + "\n")
def save_metadata(articles: list, output_path: Path) -> None:
"""Save dataset metadata."""
total_chars = sum(a["num_chars"] for a in articles)
total_sentences = sum(a["num_sentences"] for a in articles)
metadata = {
"name": "UVW 2026",
"full_name": "Underthesea Vietnamese Wikipedia Dataset",
"version": "1.0.0",
"year": 2026,
"language": "vi",
"license": "CC BY-SA 4.0",
"source": "Vietnamese Wikipedia",
"url": "https://vi.wikipedia.org",
"statistics": {
"num_articles": len(articles),
"num_characters": total_chars,
"num_sentences": total_sentences,
"avg_chars_per_article": total_chars // len(articles) if articles else 0,
},
}
with open(output_path, "w", encoding="utf-8") as f:
json.dump(metadata, f, ensure_ascii=False, indent=2)
def main():
"""Extract and process Vietnamese Wikipedia articles."""
dump_path = RAW_DIR / "viwiki-latest-pages-articles.xml.bz2"
if not dump_path.exists():
print(f"Wikipedia dump not found: {dump_path}")
print("Please run download_wikipedia.py first.")
return
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
print("Parsing and processing Wikipedia dump...")
print(f"Input: {dump_path}")
articles = []
for raw_article in tqdm(parse_wikipedia_dump(dump_path), desc="Processing"):
processed = process_article(raw_article)
if processed:
articles.append(processed)
print(f"\nTotal articles extracted: {len(articles)}")
# Save in multiple formats
print("\nSaving datasets...")
# JSONL format (HuggingFace compatible)
save_jsonl(articles, OUTPUT_DIR / "uvw_2026.jsonl")
print(f" - JSONL: {OUTPUT_DIR / 'uvw_2026.jsonl'}")
# Plaintext format (underthesea compatible)
save_plaintext(articles, OUTPUT_DIR)
print(f" - Plaintext: {OUTPUT_DIR / 'plaintext/'}")
# Metadata
save_metadata(articles, OUTPUT_DIR / "metadata.json")
print(f" - Metadata: {OUTPUT_DIR / 'metadata.json'}")
print("\nDataset creation complete!")
if __name__ == "__main__":
main()