UVW-2026 / scripts /wikipedia_quality_score.py
rain1024's picture
Add Wikidata enrichment and quality scoring scripts
a0a7929
#!/usr/bin/env python3
"""
Add quality score (1-10) to UVW 2026 dataset.
Quality scoring based on Wikipedia article quality research:
- Article length (comprehensiveness)
- Number of sentences (content depth)
- Sentence density (readability)
- Markup cleanliness (penalize remaining Wikipedia markup)
- Wikidata bonus (articles with Wikidata ID and category are higher quality)
References:
- https://meta.wikimedia.org/wiki/Research:Prioritization_of_Wikipedia_Articles/Language-Agnostic_Quality
- https://dl.acm.org/doi/10.1145/3625286
"""
import argparse
import json
import re
from pathlib import Path
from tqdm import tqdm
DATA_DIR = Path(__file__).parent.parent / "data" / "processed"
INPUT_PATH = DATA_DIR / "uvw_2026.jsonl"
OUTPUT_PATH = DATA_DIR / "uvw_2026_quality.jsonl"
WIKIDATA_INPUT_PATH = DATA_DIR / "uvw_2026_wikidata.jsonl"
# Patterns to detect remaining Wikipedia markup
MARKUP_PATTERNS = [
r"\{\{[^}]*\}\}", # Templates {{...}}
r"\[\[[^\]]*\]\]", # Internal links [[...]]
r"<[^>]+>", # HTML tags <...>
r"\{\|.*?\|\}", # Tables {|...|}
r"'''", # Bold markup
r"''", # Italic markup
r"={2,}", # Headers ==...==
r"\[\s*https?://", # External links [http...
]
def count_remaining_markup(content: str) -> int:
"""Count remaining Wikipedia markup patterns in content."""
count = 0
for pattern in MARKUP_PATTERNS:
matches = re.findall(pattern, content, flags=re.DOTALL)
count += len(matches)
return count
def calculate_markup_penalty(content: str, num_chars: int) -> float:
"""
Calculate penalty (0-3) based on remaining markup.
Returns:
0: No markup found (clean)
1: Minor markup (<0.1% of content)
2: Moderate markup (0.1-1% of content)
3: Heavy markup (>1% of content)
"""
if num_chars == 0:
return 0
markup_count = count_remaining_markup(content)
if markup_count == 0:
return 0
# Estimate markup density (rough approximation: each markup ~10 chars average)
markup_chars = markup_count * 10
markup_ratio = markup_chars / num_chars
if markup_ratio < 0.001: # <0.1%
return 1
elif markup_ratio < 0.01: # 0.1-1%
return 2
else: # >1%
return 3
def calculate_quality_score(
num_chars: int,
num_sentences: int,
content: str,
wikidata_id: str | None = None,
main_category: str | None = None,
) -> int:
"""
Calculate quality score from 1-10 based on article metrics.
Scoring criteria:
- Length score (40%): Based on character count thresholds
- Sentence score (30%): Based on number of sentences
- Density score (30%): Based on average sentence length (optimal ~80-150 chars)
- Markup penalty: Reduces score if Wikipedia markup remains
- Wikidata bonus: +0.5 for having wikidata_id, +0.5 for having main_category
"""
# 1. Length score (1-10) - based on Wikipedia quality research
# Longer articles tend to be more comprehensive
if num_chars < 200:
length_score = 1
elif num_chars < 500:
length_score = 2
elif num_chars < 1000:
length_score = 3
elif num_chars < 2000:
length_score = 4
elif num_chars < 5000:
length_score = 5
elif num_chars < 10000:
length_score = 6
elif num_chars < 20000:
length_score = 7
elif num_chars < 50000:
length_score = 8
elif num_chars < 100000:
length_score = 9
else:
length_score = 10
# 2. Sentence score (1-10) - content depth
if num_sentences < 3:
sentence_score = 1
elif num_sentences < 5:
sentence_score = 2
elif num_sentences < 10:
sentence_score = 3
elif num_sentences < 20:
sentence_score = 4
elif num_sentences < 50:
sentence_score = 5
elif num_sentences < 100:
sentence_score = 6
elif num_sentences < 200:
sentence_score = 7
elif num_sentences < 500:
sentence_score = 8
elif num_sentences < 1000:
sentence_score = 9
else:
sentence_score = 10
# 3. Density score (1-10) - readability
# Optimal Vietnamese sentence length: ~80-150 chars
if num_sentences > 0:
avg_sentence_len = num_chars / num_sentences
if avg_sentence_len < 20: # Too short - likely fragments
density_score = 3
elif avg_sentence_len < 40:
density_score = 5
elif avg_sentence_len < 80:
density_score = 8
elif avg_sentence_len < 150: # Optimal range
density_score = 10
elif avg_sentence_len < 250:
density_score = 7
elif avg_sentence_len < 400:
density_score = 5
else: # Too long - hard to read
density_score = 3
else:
density_score = 1
# Weighted average: length (40%), sentences (30%), density (30%)
base_score = (length_score * 0.4) + (sentence_score * 0.3) + (density_score * 0.3)
# 4. Markup penalty - reduce score if Wikipedia markup remains
markup_penalty = calculate_markup_penalty(content, num_chars)
# 5. Wikidata bonus - articles with Wikidata are typically higher quality
wikidata_bonus = 0.0
if wikidata_id:
wikidata_bonus += 0.5
if main_category:
wikidata_bonus += 0.5
final_score = base_score - markup_penalty + wikidata_bonus
# Round to nearest integer, ensure 1-10 range
return max(1, min(10, round(final_score)))
def count_lines(path: Path) -> int:
"""Count lines in a file without loading into memory."""
count = 0
with open(path, "r", encoding="utf-8") as f:
for _ in f:
count += 1
return count
def process_quality_scores(input_path: Path, output_path: Path, use_wikidata: bool = False):
"""
Process quality scores with streaming.
Args:
input_path: Input JSONL file
output_path: Output JSONL file
use_wikidata: If True, use wikidata_id and main_category for bonus
"""
print(f"Input: {input_path}")
print(f"Output: {output_path}")
print(f"Wikidata bonus: {'enabled' if use_wikidata else 'disabled'}")
# Count lines first (streaming)
print("Counting articles...")
total = count_lines(input_path)
print(f"Found {total:,} articles")
# Process and add quality scores (streaming)
quality_distribution = {i: 0 for i in range(1, 11)}
markup_stats = {"clean": 0, "minor": 0, "moderate": 0, "heavy": 0}
wikidata_stats = {"with_id": 0, "with_category": 0, "none": 0}
score_changes = {"increased": 0, "same": 0, "decreased": 0}
with open(input_path, "r", encoding="utf-8") as fin, \
open(output_path, "w", encoding="utf-8") as fout:
for line in tqdm(fin, total=total, desc="Processing"):
article = json.loads(line)
content = article["content"]
# Get wikidata fields if available
wikidata_id = article.get("wikidata_id") if use_wikidata else None
main_category = article.get("main_category") if use_wikidata else None
# Track wikidata stats
if use_wikidata:
if wikidata_id and main_category:
wikidata_stats["with_category"] += 1
elif wikidata_id:
wikidata_stats["with_id"] += 1
else:
wikidata_stats["none"] += 1
# Track old score for comparison
old_score = article.get("quality_score")
# Calculate quality score
quality = calculate_quality_score(
article["num_chars"],
article["num_sentences"],
content,
wikidata_id,
main_category,
)
article["quality_score"] = quality
quality_distribution[quality] += 1
# Track score changes
if old_score is not None:
if quality > old_score:
score_changes["increased"] += 1
elif quality < old_score:
score_changes["decreased"] += 1
else:
score_changes["same"] += 1
# Track markup stats
markup_count = count_remaining_markup(content)
if markup_count == 0:
markup_stats["clean"] += 1
elif markup_count < 5:
markup_stats["minor"] += 1
elif markup_count < 20:
markup_stats["moderate"] += 1
else:
markup_stats["heavy"] += 1
fout.write(json.dumps(article, ensure_ascii=False) + "\n")
# Print distribution
print("\nQuality score distribution:")
print("-" * 40)
for score in range(1, 11):
count = quality_distribution[score]
pct = count / total * 100
bar = "█" * int(pct / 2)
print(f" {score:2d}: {count:8,} ({pct:5.1f}%) {bar}")
print("\nMarkup cleanliness:")
print("-" * 40)
for level, count in markup_stats.items():
pct = count / total * 100
print(f" {level:10s}: {count:8,} ({pct:5.1f}%)")
if use_wikidata:
print("\nWikidata coverage:")
print("-" * 40)
print(f" With ID + category: {wikidata_stats['with_category']:>10,} ({wikidata_stats['with_category']/total*100:5.1f}%)")
print(f" With ID only: {wikidata_stats['with_id']:>10,} ({wikidata_stats['with_id']/total*100:5.1f}%)")
print(f" No Wikidata: {wikidata_stats['none']:>10,} ({wikidata_stats['none']/total*100:5.1f}%)")
if score_changes["increased"] > 0 or score_changes["decreased"] > 0:
print("\nScore changes from previous:")
print("-" * 40)
print(f" Increased: {score_changes['increased']:>10,}")
print(f" Same: {score_changes['same']:>10,}")
print(f" Decreased: {score_changes['decreased']:>10,}")
print(f"\nTotal articles: {total:,}")
print(f"Output saved to: {output_path}")
def main():
"""Add quality scores to dataset."""
parser = argparse.ArgumentParser(description="Add quality scores to UVW 2026 dataset")
parser.add_argument(
"--recalculate",
action="store_true",
help="Recalculate scores from uvw_2026_wikidata.jsonl with Wikidata bonus",
)
args = parser.parse_args()
print("Adding quality scores to UVW 2026 dataset...")
if args.recalculate:
# Recalculate from wikidata file (in-place via temp file)
import tempfile
import shutil
temp_path = WIKIDATA_INPUT_PATH.with_suffix(".tmp")
process_quality_scores(WIKIDATA_INPUT_PATH, temp_path, use_wikidata=True)
# Replace original with temp
shutil.move(temp_path, WIKIDATA_INPUT_PATH)
print(f"\nUpdated: {WIKIDATA_INPUT_PATH}")
else:
# Original behavior: process from base file
process_quality_scores(INPUT_PATH, OUTPUT_PATH, use_wikidata=False)
if __name__ == "__main__":
main()