UVW-2026 / scripts /add_quality_score.py
rain1024's picture
Add quality scoring script
b0e730e verified
#!/usr/bin/env python3
"""
Add quality score (1-10) to UVW 2026 dataset.
Quality scoring based on Wikipedia article quality research:
- Article length (comprehensiveness)
- Number of sentences (content depth)
- Sentence density (readability)
References:
- https://meta.wikimedia.org/wiki/Research:Prioritization_of_Wikipedia_Articles/Language-Agnostic_Quality
- https://dl.acm.org/doi/10.1145/3625286
"""
import json
import math
from pathlib import Path
from tqdm import tqdm
INPUT_PATH = Path(__file__).parent.parent / "data" / "processed" / "uvw_2026.jsonl"
OUTPUT_PATH = Path(__file__).parent.parent / "data" / "processed" / "uvw_2026_quality.jsonl"
def calculate_quality_score(num_chars: int, num_sentences: int) -> int:
"""
Calculate quality score from 1-10 based on article metrics.
Scoring criteria:
- Length score (40%): Based on character count thresholds
- Sentence score (30%): Based on number of sentences
- Density score (30%): Based on average sentence length (optimal ~80-150 chars)
"""
# 1. Length score (1-10) - based on Wikipedia quality research
# Longer articles tend to be more comprehensive
if num_chars < 200:
length_score = 1
elif num_chars < 500:
length_score = 2
elif num_chars < 1000:
length_score = 3
elif num_chars < 2000:
length_score = 4
elif num_chars < 5000:
length_score = 5
elif num_chars < 10000:
length_score = 6
elif num_chars < 20000:
length_score = 7
elif num_chars < 50000:
length_score = 8
elif num_chars < 100000:
length_score = 9
else:
length_score = 10
# 2. Sentence score (1-10) - content depth
if num_sentences < 3:
sentence_score = 1
elif num_sentences < 5:
sentence_score = 2
elif num_sentences < 10:
sentence_score = 3
elif num_sentences < 20:
sentence_score = 4
elif num_sentences < 50:
sentence_score = 5
elif num_sentences < 100:
sentence_score = 6
elif num_sentences < 200:
sentence_score = 7
elif num_sentences < 500:
sentence_score = 8
elif num_sentences < 1000:
sentence_score = 9
else:
sentence_score = 10
# 3. Density score (1-10) - readability
# Optimal Vietnamese sentence length: ~80-150 chars
if num_sentences > 0:
avg_sentence_len = num_chars / num_sentences
if avg_sentence_len < 20: # Too short - likely fragments
density_score = 3
elif avg_sentence_len < 40:
density_score = 5
elif avg_sentence_len < 80:
density_score = 8
elif avg_sentence_len < 150: # Optimal range
density_score = 10
elif avg_sentence_len < 250:
density_score = 7
elif avg_sentence_len < 400:
density_score = 5
else: # Too long - hard to read
density_score = 3
else:
density_score = 1
# Weighted average: length (40%), sentences (30%), density (30%)
final_score = (length_score * 0.4) + (sentence_score * 0.3) + (density_score * 0.3)
# Round to nearest integer, ensure 1-10 range
return max(1, min(10, round(final_score)))
def main():
"""Add quality scores to dataset."""
print("Adding quality scores to UVW 2026 dataset...")
print(f"Input: {INPUT_PATH}")
print(f"Output: {OUTPUT_PATH}")
# Count lines first
with open(INPUT_PATH, "r", encoding="utf-8") as f:
total = sum(1 for _ in f)
# Process and add quality scores
quality_distribution = {i: 0 for i in range(1, 11)}
with open(INPUT_PATH, "r", encoding="utf-8") as fin, \
open(OUTPUT_PATH, "w", encoding="utf-8") as fout:
for line in tqdm(fin, total=total, desc="Processing"):
article = json.loads(line)
# Calculate quality score
quality = calculate_quality_score(
article["num_chars"],
article["num_sentences"]
)
article["quality"] = quality
quality_distribution[quality] += 1
fout.write(json.dumps(article, ensure_ascii=False) + "\n")
# Print distribution
print("\nQuality score distribution:")
print("-" * 40)
for score in range(1, 11):
count = quality_distribution[score]
pct = count / total * 100
bar = "█" * int(pct / 2)
print(f" {score:2d}: {count:8,} ({pct:5.1f}%) {bar}")
print(f"\nTotal articles: {total:,}")
print(f"Output saved to: {OUTPUT_PATH}")
if __name__ == "__main__":
main()