Datasets:
File size: 5,671 Bytes
a9e6fc2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 | #!/usr/bin/env python3
"""
Analyze UVW 2026 dataset statistics.
UVW 2026: Underthesea Vietnamese Wikipedia Dataset
https://github.com/undertheseanlp/underthesea/issues/896
"""
import json
from collections import Counter
from pathlib import Path
from tqdm import tqdm
DATA_DIR = Path(__file__).parent.parent / "data" / "processed"
SPLITS_DIR = Path(__file__).parent.parent / "data" / "splits"
def load_jsonl(path: Path) -> list:
"""Load data from JSONL file."""
data = []
with open(path, "r", encoding="utf-8") as f:
for line in f:
data.append(json.loads(line))
return data
def analyze_content_length(articles: list) -> dict:
"""Analyze content length distribution."""
lengths = [a["num_chars"] for a in articles]
lengths.sort()
n = len(lengths)
return {
"min": lengths[0],
"max": lengths[-1],
"mean": sum(lengths) // n,
"median": lengths[n // 2],
"p10": lengths[int(n * 0.1)],
"p25": lengths[int(n * 0.25)],
"p75": lengths[int(n * 0.75)],
"p90": lengths[int(n * 0.9)],
}
def analyze_titles(articles: list) -> dict:
"""Analyze article titles."""
titles = [a["title"] for a in articles]
# First character distribution
first_chars = Counter(t[0].upper() for t in titles if t)
# Title length
title_lengths = [len(t) for t in titles]
return {
"total": len(titles),
"avg_title_length": sum(title_lengths) // len(title_lengths),
"first_char_distribution": dict(first_chars.most_common(26)),
}
def count_vietnamese_chars(text: str) -> int:
"""Count Vietnamese-specific characters."""
vietnamese_chars = set("àáảãạăằắẳẵặâầấẩẫậèéẻẽẹêềếểễệìíỉĩịòóỏõọôồốổỗộơờớởỡợùúủũụưừứửữựỳýỷỹỵđ")
vietnamese_chars.update(c.upper() for c in vietnamese_chars)
return sum(1 for c in text if c in vietnamese_chars)
def analyze_language(articles: list) -> dict:
"""Analyze Vietnamese language characteristics."""
total_chars = 0
vietnamese_chars = 0
for article in tqdm(articles[:1000], desc="Analyzing language"): # Sample for speed
content = article["content"]
total_chars += len(content)
vietnamese_chars += count_vietnamese_chars(content)
return {
"sample_size": min(1000, len(articles)),
"vietnamese_char_ratio": vietnamese_chars / total_chars if total_chars else 0,
}
def main():
"""Analyze dataset statistics."""
jsonl_path = DATA_DIR / "uvw_2026.jsonl"
if not jsonl_path.exists():
print(f"Dataset not found: {jsonl_path}")
print("Please run extract_articles.py first.")
return
print("Loading dataset...")
articles = load_jsonl(jsonl_path)
print(f"Total articles: {len(articles)}")
print("\n" + "=" * 50)
print("CONTENT LENGTH ANALYSIS")
print("=" * 50)
length_stats = analyze_content_length(articles)
for key, value in length_stats.items():
print(f" {key}: {value:,} chars")
print("\n" + "=" * 50)
print("TITLE ANALYSIS")
print("=" * 50)
title_stats = analyze_titles(articles)
print(f" Total titles: {title_stats['total']:,}")
print(f" Avg title length: {title_stats['avg_title_length']} chars")
print(f" First character distribution (top 10):")
for char, count in list(title_stats["first_char_distribution"].items())[:10]:
print(f" {char}: {count:,}")
print("\n" + "=" * 50)
print("LANGUAGE ANALYSIS")
print("=" * 50)
lang_stats = analyze_language(articles)
print(f" Sample size: {lang_stats['sample_size']}")
print(f" Vietnamese char ratio: {lang_stats['vietnamese_char_ratio']:.2%}")
print("\n" + "=" * 50)
print("OVERALL STATISTICS")
print("=" * 50)
total_chars = sum(a["num_chars"] for a in articles)
total_sentences = sum(a["num_sentences"] for a in articles)
print(f" Total articles: {len(articles):,}")
print(f" Total characters: {total_chars:,}")
print(f" Total sentences: {total_sentences:,}")
print(f" Avg chars/article: {total_chars // len(articles):,}")
print(f" Avg sentences/article: {total_sentences // len(articles)}")
# Size categories
size_categories = {
"small (<1K chars)": 0,
"medium (1K-10K chars)": 0,
"large (10K-100K chars)": 0,
"very large (>100K chars)": 0,
}
for a in articles:
chars = a["num_chars"]
if chars < 1000:
size_categories["small (<1K chars)"] += 1
elif chars < 10000:
size_categories["medium (1K-10K chars)"] += 1
elif chars < 100000:
size_categories["large (10K-100K chars)"] += 1
else:
size_categories["very large (>100K chars)"] += 1
print("\n Article size distribution:")
for category, count in size_categories.items():
pct = count / len(articles) * 100
print(f" {category}: {count:,} ({pct:.1f}%)")
# Save analysis
analysis = {
"total_articles": len(articles),
"total_characters": total_chars,
"total_sentences": total_sentences,
"content_length": length_stats,
"title_stats": title_stats,
"language_stats": lang_stats,
"size_distribution": size_categories,
}
analysis_path = DATA_DIR / "analysis.json"
with open(analysis_path, "w", encoding="utf-8") as f:
json.dump(analysis, f, ensure_ascii=False, indent=2)
print(f"\nAnalysis saved to: {analysis_path}")
if __name__ == "__main__":
main()
|