UVW-2026 / scripts /analyze_dataset.py
rain1024's picture
Add scripts
a9e6fc2 verified
#!/usr/bin/env python3
"""
Analyze UVW 2026 dataset statistics.
UVW 2026: Underthesea Vietnamese Wikipedia Dataset
https://github.com/undertheseanlp/underthesea/issues/896
"""
import json
from collections import Counter
from pathlib import Path
from tqdm import tqdm
DATA_DIR = Path(__file__).parent.parent / "data" / "processed"
SPLITS_DIR = Path(__file__).parent.parent / "data" / "splits"
def load_jsonl(path: Path) -> list:
"""Load data from JSONL file."""
data = []
with open(path, "r", encoding="utf-8") as f:
for line in f:
data.append(json.loads(line))
return data
def analyze_content_length(articles: list) -> dict:
"""Analyze content length distribution."""
lengths = [a["num_chars"] for a in articles]
lengths.sort()
n = len(lengths)
return {
"min": lengths[0],
"max": lengths[-1],
"mean": sum(lengths) // n,
"median": lengths[n // 2],
"p10": lengths[int(n * 0.1)],
"p25": lengths[int(n * 0.25)],
"p75": lengths[int(n * 0.75)],
"p90": lengths[int(n * 0.9)],
}
def analyze_titles(articles: list) -> dict:
"""Analyze article titles."""
titles = [a["title"] for a in articles]
# First character distribution
first_chars = Counter(t[0].upper() for t in titles if t)
# Title length
title_lengths = [len(t) for t in titles]
return {
"total": len(titles),
"avg_title_length": sum(title_lengths) // len(title_lengths),
"first_char_distribution": dict(first_chars.most_common(26)),
}
def count_vietnamese_chars(text: str) -> int:
"""Count Vietnamese-specific characters."""
vietnamese_chars = set("àáảãạăằắẳẵặâầấẩẫậèéẻẽẹêềếểễệìíỉĩịòóỏõọôồốổỗộơờớởỡợùúủũụưừứửữựỳýỷỹỵđ")
vietnamese_chars.update(c.upper() for c in vietnamese_chars)
return sum(1 for c in text if c in vietnamese_chars)
def analyze_language(articles: list) -> dict:
"""Analyze Vietnamese language characteristics."""
total_chars = 0
vietnamese_chars = 0
for article in tqdm(articles[:1000], desc="Analyzing language"): # Sample for speed
content = article["content"]
total_chars += len(content)
vietnamese_chars += count_vietnamese_chars(content)
return {
"sample_size": min(1000, len(articles)),
"vietnamese_char_ratio": vietnamese_chars / total_chars if total_chars else 0,
}
def main():
"""Analyze dataset statistics."""
jsonl_path = DATA_DIR / "uvw_2026.jsonl"
if not jsonl_path.exists():
print(f"Dataset not found: {jsonl_path}")
print("Please run extract_articles.py first.")
return
print("Loading dataset...")
articles = load_jsonl(jsonl_path)
print(f"Total articles: {len(articles)}")
print("\n" + "=" * 50)
print("CONTENT LENGTH ANALYSIS")
print("=" * 50)
length_stats = analyze_content_length(articles)
for key, value in length_stats.items():
print(f" {key}: {value:,} chars")
print("\n" + "=" * 50)
print("TITLE ANALYSIS")
print("=" * 50)
title_stats = analyze_titles(articles)
print(f" Total titles: {title_stats['total']:,}")
print(f" Avg title length: {title_stats['avg_title_length']} chars")
print(f" First character distribution (top 10):")
for char, count in list(title_stats["first_char_distribution"].items())[:10]:
print(f" {char}: {count:,}")
print("\n" + "=" * 50)
print("LANGUAGE ANALYSIS")
print("=" * 50)
lang_stats = analyze_language(articles)
print(f" Sample size: {lang_stats['sample_size']}")
print(f" Vietnamese char ratio: {lang_stats['vietnamese_char_ratio']:.2%}")
print("\n" + "=" * 50)
print("OVERALL STATISTICS")
print("=" * 50)
total_chars = sum(a["num_chars"] for a in articles)
total_sentences = sum(a["num_sentences"] for a in articles)
print(f" Total articles: {len(articles):,}")
print(f" Total characters: {total_chars:,}")
print(f" Total sentences: {total_sentences:,}")
print(f" Avg chars/article: {total_chars // len(articles):,}")
print(f" Avg sentences/article: {total_sentences // len(articles)}")
# Size categories
size_categories = {
"small (<1K chars)": 0,
"medium (1K-10K chars)": 0,
"large (10K-100K chars)": 0,
"very large (>100K chars)": 0,
}
for a in articles:
chars = a["num_chars"]
if chars < 1000:
size_categories["small (<1K chars)"] += 1
elif chars < 10000:
size_categories["medium (1K-10K chars)"] += 1
elif chars < 100000:
size_categories["large (10K-100K chars)"] += 1
else:
size_categories["very large (>100K chars)"] += 1
print("\n Article size distribution:")
for category, count in size_categories.items():
pct = count / len(articles) * 100
print(f" {category}: {count:,} ({pct:.1f}%)")
# Save analysis
analysis = {
"total_articles": len(articles),
"total_characters": total_chars,
"total_sentences": total_sentences,
"content_length": length_stats,
"title_stats": title_stats,
"language_stats": lang_stats,
"size_distribution": size_categories,
}
analysis_path = DATA_DIR / "analysis.json"
with open(analysis_path, "w", encoding="utf-8") as f:
json.dump(analysis, f, ensure_ascii=False, indent=2)
print(f"\nAnalysis saved to: {analysis_path}")
if __name__ == "__main__":
main()