UVW-2026 / scripts /add_wikidata.py
rain1024's picture
Add Wikidata enrichment and quality scoring scripts
a0a7929
#!/usr/bin/env python3
"""
Add Wikidata ID and main category to UVW 2026 dataset.
Uses concurrent requests to Wikidata API for faster processing.
Fields added:
- wikidata_id: The Q-ID from Wikidata (e.g., Q1930)
- main_category: The P31 (instance of) property label in Vietnamese
API Reference:
- https://www.wikidata.org/w/api.php?action=help&modules=wbgetentities
"""
import json
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path
from typing import Optional
import urllib.request
import urllib.parse
import urllib.error
import threading
from tqdm import tqdm
INPUT_PATH = Path(__file__).parent.parent / "data" / "processed" / "uvw_2026_quality.jsonl"
OUTPUT_PATH = Path(__file__).parent.parent / "data" / "processed" / "uvw_2026_wikidata.jsonl"
CACHE_PATH = Path(__file__).parent.parent / "data" / "processed" / "wikidata_cache.json"
# Wikidata API endpoint
WIKIDATA_API = "https://www.wikidata.org/w/api.php"
# Batch size for API requests (max 50 titles per request)
BATCH_SIZE = 50
# Number of concurrent workers
MAX_WORKERS = 8
# Request timeout
TIMEOUT = 60
# Thread-safe label cache
_label_cache: dict[str, Optional[str]] = {}
_label_cache_lock = threading.Lock()
def load_cache() -> dict:
"""Load cached Wikidata mappings."""
if CACHE_PATH.exists():
with open(CACHE_PATH, "r", encoding="utf-8") as f:
return json.load(f)
return {}
def save_cache(cache: dict):
"""Save Wikidata mappings to cache."""
with open(CACHE_PATH, "w", encoding="utf-8") as f:
json.dump(cache, f, ensure_ascii=False)
def fetch_entity_labels_batch(entity_ids: list[str]) -> dict[str, Optional[str]]:
"""Fetch Vietnamese/English labels for a batch of Wikidata entity IDs."""
global _label_cache
with _label_cache_lock:
to_fetch = [eid for eid in entity_ids if eid not in _label_cache]
if not to_fetch:
with _label_cache_lock:
return {eid: _label_cache.get(eid) for eid in entity_ids}
ids_param = "|".join(to_fetch[:50]) # Max 50
params = {
"action": "wbgetentities",
"ids": ids_param,
"props": "labels",
"languages": "vi|en",
"format": "json",
}
url = f"{WIKIDATA_API}?{urllib.parse.urlencode(params)}"
try:
req = urllib.request.Request(
url,
headers={"User-Agent": "UVW2026-Dataset/1.0 (https://github.com/undertheseanlp)"}
)
with urllib.request.urlopen(req, timeout=TIMEOUT) as response:
data = json.loads(response.read().decode("utf-8"))
with _label_cache_lock:
if "entities" in data:
for entity_id, entity in data["entities"].items():
if "labels" in entity:
labels = entity["labels"]
if "vi" in labels:
_label_cache[entity_id] = labels["vi"]["value"]
elif "en" in labels:
_label_cache[entity_id] = labels["en"]["value"]
else:
_label_cache[entity_id] = None
else:
_label_cache[entity_id] = None
except Exception:
with _label_cache_lock:
for eid in to_fetch[:50]:
if eid not in _label_cache:
_label_cache[eid] = None
with _label_cache_lock:
return {eid: _label_cache.get(eid) for eid in entity_ids}
def fetch_wikidata_batch(titles: list[str], batch_id: int = 0) -> dict[str, dict]:
"""
Fetch Wikidata entities for a batch of Vietnamese Wikipedia titles.
Args:
titles: List of Wikipedia article titles (max 50)
batch_id: Batch identifier for error reporting
Returns:
Dict mapping title to {wikidata_id, main_category}
"""
results = {}
category_ids_to_fetch = []
title_to_category_id = {}
titles_param = "|".join(titles)
params = {
"action": "wbgetentities",
"sites": "viwiki",
"titles": titles_param,
"props": "claims|sitelinks",
"format": "json",
}
url = f"{WIKIDATA_API}?{urllib.parse.urlencode(params)}"
try:
req = urllib.request.Request(
url,
headers={"User-Agent": "UVW2026-Dataset/1.0 (https://github.com/undertheseanlp)"}
)
with urllib.request.urlopen(req, timeout=TIMEOUT) as response:
data = json.loads(response.read().decode("utf-8"))
if "entities" not in data:
# Mark all as missing
for title in titles:
results[title] = {"wikidata_id": None, "main_category": None}
return results
# Track which titles we found
found_titles = set()
for entity_id, entity in data["entities"].items():
if entity_id.startswith("-"): # Missing entity
continue
# Get the title this entity corresponds to
if "sitelinks" in entity and "viwiki" in entity["sitelinks"]:
title = entity["sitelinks"]["viwiki"]["title"]
else:
continue
found_titles.add(title)
# Extract Wikidata ID
wikidata_id = entity.get("id")
# Extract P31 category ID
category_id = None
if "claims" in entity and "P31" in entity["claims"]:
p31_claims = entity["claims"]["P31"]
if p31_claims:
first_claim = p31_claims[0]
if "mainsnak" in first_claim:
mainsnak = first_claim["mainsnak"]
if mainsnak.get("datatype") == "wikibase-item":
datavalue = mainsnak.get("datavalue", {})
if datavalue.get("type") == "wikibase-entityid":
category_id = datavalue["value"].get("id")
results[title] = {
"wikidata_id": wikidata_id,
"main_category": None,
}
if category_id:
category_ids_to_fetch.append(category_id)
title_to_category_id[title] = category_id
# Mark missing titles
for title in titles:
if title not in found_titles:
results[title] = {"wikidata_id": None, "main_category": None}
except Exception as e:
# On error, mark all as missing
for title in titles:
results[title] = {"wikidata_id": None, "main_category": None}
return results
# Fetch category labels
if category_ids_to_fetch:
unique_ids = list(set(category_ids_to_fetch))
labels = fetch_entity_labels_batch(unique_ids)
for title, category_id in title_to_category_id.items():
if title in results:
results[title]["main_category"] = labels.get(category_id)
return results
def process_batch(args: tuple) -> tuple[int, dict[str, dict]]:
"""Process a single batch, returns (batch_id, results)."""
batch_id, titles = args
results = fetch_wikidata_batch(titles, batch_id)
return batch_id, results
def count_lines(path: Path) -> int:
"""Count lines in a file without loading it all into memory."""
count = 0
with open(path, "r", encoding="utf-8") as f:
for _ in f:
count += 1
return count
def iter_titles(path: Path):
"""Iterate over titles from JSONL file without loading full articles."""
with open(path, "r", encoding="utf-8") as f:
for line in f:
# Extract title with minimal parsing
article = json.loads(line)
yield article["title"]
def iter_articles(path: Path):
"""Iterate over articles from JSONL file one at a time."""
with open(path, "r", encoding="utf-8") as f:
for line in f:
yield json.loads(line)
def batch_iterator(iterable, batch_size: int):
"""Yield batches from an iterable without loading all into memory."""
batch = []
for item in iterable:
batch.append(item)
if len(batch) >= batch_size:
yield batch
batch = []
if batch:
yield batch
def main():
"""Add Wikidata information to dataset."""
print("Adding Wikidata IDs and categories to UVW 2026 dataset...")
print(f"Input: {INPUT_PATH}")
print(f"Output: {OUTPUT_PATH}")
print(f"Using {MAX_WORKERS} concurrent workers")
# Load cache
cache = load_cache()
print(f"Loaded {len(cache):,} cached Wikidata mappings")
# Count total articles (streaming, no memory)
print("Counting articles...")
total = count_lines(INPUT_PATH)
print(f"Found {total:,} articles")
# First pass: collect uncached titles (streaming, only store titles)
print("Finding uncached titles...")
uncached_titles = []
for title in tqdm(iter_titles(INPUT_PATH), total=total, desc="Scanning titles"):
if title not in cache:
uncached_titles.append(title)
print(f"Need to fetch {len(uncached_titles):,} titles from Wikidata API")
# Fetch in batches using thread pool with controlled submission
if uncached_titles:
num_batches = (len(uncached_titles) + BATCH_SIZE - 1) // BATCH_SIZE
print(f"Processing {num_batches:,} batches...")
completed = 0
last_save = 0
# Process batches in chunks to limit memory from pending futures
SUBMIT_CHUNK_SIZE = MAX_WORKERS * 10 # Limit pending futures
with tqdm(total=num_batches, desc="Fetching Wikidata", smoothing=0.1) as pbar:
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
batch_id = 0
for chunk_start in range(0, len(uncached_titles), BATCH_SIZE * SUBMIT_CHUNK_SIZE):
chunk_end = min(chunk_start + BATCH_SIZE * SUBMIT_CHUNK_SIZE, len(uncached_titles))
chunk_titles = uncached_titles[chunk_start:chunk_end]
# Create batches for this chunk
title_batches = [chunk_titles[i:i + BATCH_SIZE]
for i in range(0, len(chunk_titles), BATCH_SIZE)]
# Submit this chunk's batches
future_to_batch = {
executor.submit(process_batch, (batch_id + idx, titles_list)): batch_id + idx
for idx, titles_list in enumerate(title_batches)
}
batch_id += len(title_batches)
for future in as_completed(future_to_batch):
try:
_, results = future.result()
cache.update(results)
completed += 1
pbar.update(1)
# Save cache periodically
if completed - last_save >= 1000:
save_cache(cache)
last_save = completed
except Exception as e:
print(f"Batch error: {e}")
# Clear uncached_titles to free memory before writing
del uncached_titles
# Final cache save
save_cache(cache)
print(f"Saved {len(cache):,} entries to cache")
# Second pass: stream through articles and write output
stats = {
"with_wikidata_id": 0,
"with_category": 0,
"missing": 0,
}
category_counts: dict[str, int] = {}
print("Writing output (streaming)...")
with open(OUTPUT_PATH, "w", encoding="utf-8") as fout:
for article in tqdm(iter_articles(INPUT_PATH), total=total, desc="Writing output"):
title = article["title"]
wikidata_info = cache.get(title, {})
article["wikidata_id"] = wikidata_info.get("wikidata_id")
article["main_category"] = wikidata_info.get("main_category")
if article["wikidata_id"]:
stats["with_wikidata_id"] += 1
else:
stats["missing"] += 1
if article["main_category"]:
stats["with_category"] += 1
category = article["main_category"]
category_counts[category] = category_counts.get(category, 0) + 1
fout.write(json.dumps(article, ensure_ascii=False) + "\n")
# Print statistics
print("\nWikidata coverage:")
print("-" * 50)
print(f" With Wikidata ID: {stats['with_wikidata_id']:>10,} ({stats['with_wikidata_id']/total*100:5.1f}%)")
print(f" With category: {stats['with_category']:>10,} ({stats['with_category']/total*100:5.1f}%)")
print(f" Missing: {stats['missing']:>10,} ({stats['missing']/total*100:5.1f}%)")
print(f"\nTop 30 categories:")
print("-" * 50)
top_categories = sorted(category_counts.items(), key=lambda x: -x[1])[:30]
for category, count in top_categories:
pct = count / total * 100
print(f" {category:40s} {count:>8,} ({pct:5.2f}%)")
print(f"\nTotal unique categories: {len(category_counts):,}")
print(f"Total articles: {total:,}")
print(f"Output saved to: {OUTPUT_PATH}")
print(f"Cache saved to: {CACHE_PATH}")
if __name__ == "__main__":
main()