UDD-1 / src /ws_statistics.py
rain1024's picture
Add word segmentation dataset pipeline and technical report v1.1
1b2f095
# /// script
# requires-python = ">=3.9"
# dependencies = []
# ///
"""
Convert WS BIO dataset to CoNLL-U format and compute statistics.
Reads udd-ws-v1.1-{train,dev,test}.txt (BIO format) and:
1. Converts to CoNLL-U: udd-ws-v1.1-{train,dev,test}.conllu
2. Prints statistics matching the style of statistics.py
CoNLL-U format: each word is a token, multi-syllable words have
syllables joined by space in FORM field (Vietnamese UD convention).
"""
from collections import Counter
from os.path import dirname, isfile, join
def parse_bio_file(filepath):
"""Parse BIO file into sentences with metadata and word-level tokens.
Returns list of dicts with keys: sent_id, text, words, domain.
Each word is a string (syllables joined by space for multi-syllable words).
"""
sentences = []
current = {"sent_id": "", "text": "", "syllables": [], "tags": []}
with open(filepath, "r", encoding="utf-8") as f:
for line in f:
line = line.rstrip("\n")
if line.startswith("# sent_id = "):
current["sent_id"] = line.split("= ", 1)[1]
continue
if line.startswith("# text = "):
current["text"] = line.split("= ", 1)[1]
continue
if line.startswith("#"):
continue
if not line:
if current["syllables"]:
words = bio_to_words(current["syllables"], current["tags"])
domain = sent_id_to_domain(current["sent_id"])
sentences.append({
"sent_id": current["sent_id"],
"text": current["text"],
"words": words,
"domain": domain,
})
current = {"sent_id": "", "text": "", "syllables": [], "tags": []}
continue
parts = line.split("\t")
if len(parts) == 2:
current["syllables"].append(parts[0])
current["tags"].append(parts[1])
if current["syllables"]:
words = bio_to_words(current["syllables"], current["tags"])
domain = sent_id_to_domain(current["sent_id"])
sentences.append({
"sent_id": current["sent_id"],
"text": current["text"],
"words": words,
"domain": domain,
})
return sentences
def bio_to_words(syllables, tags):
"""Convert syllable-level BIO tags to word list."""
words = []
current = []
for syl, tag in zip(syllables, tags):
if tag == "B-W":
if current:
words.append(" ".join(current))
current = [syl]
else:
current.append(syl)
if current:
words.append(" ".join(current))
return words
def sent_id_to_domain(sent_id):
if sent_id.startswith("vlc-"):
return "legal"
elif sent_id.startswith("uvn-"):
return "news"
elif sent_id.startswith("uvw-"):
return "wikipedia"
elif sent_id.startswith("uvb-f-"):
return "fiction"
elif sent_id.startswith("uvb-n-"):
return "non-fiction"
return "unknown"
def write_conllu(sentences, filepath):
"""Write sentences to CoNLL-U format."""
with open(filepath, "w", encoding="utf-8") as f:
for sent in sentences:
f.write(f"# sent_id = {sent['sent_id']}\n")
f.write(f"# text = {sent['text']}\n")
for i, word in enumerate(sent["words"], 1):
# ID FORM LEMMA UPOS XPOS FEATS HEAD DEPREL DEPS MISC
f.write(f"{i}\t{word}\t_\t_\t_\t_\t_\t_\t_\t_\n")
f.write("\n")
def compute_statistics(sentences):
"""Compute statistics from parsed sentences."""
stats = {}
stats["num_sentences"] = len(sentences)
stats["num_words"] = sum(len(s["words"]) for s in sentences)
stats["num_syllables"] = sum(
sum(len(w.split()) for w in s["words"]) for s in sentences
)
# Sentence length (in words)
sent_lengths = [len(s["words"]) for s in sentences]
stats["avg_sent_length"] = sum(sent_lengths) / len(sent_lengths) if sent_lengths else 0
stats["min_sent_length"] = min(sent_lengths) if sent_lengths else 0
stats["max_sent_length"] = max(sent_lengths) if sent_lengths else 0
# Sentence length in syllables
sent_syl_lengths = [sum(len(w.split()) for w in s["words"]) for s in sentences]
stats["avg_sent_syl_length"] = sum(sent_syl_lengths) / len(sent_syl_lengths) if sent_syl_lengths else 0
# Word length distribution (by syllable count)
word_syl_counts = Counter()
for s in sentences:
for w in s["words"]:
n_syls = len(w.split())
word_syl_counts[n_syls] += 1
stats["word_syl_counts"] = word_syl_counts
# Domain distribution
domain_counts = Counter(s["domain"] for s in sentences)
stats["domain_counts"] = domain_counts
return stats
def print_statistics(name, stats):
"""Print statistics in the same format as statistics.py."""
print("=" * 60)
print(f" {name}")
print("=" * 60)
print("\n## Basic Statistics")
print(f" Sentences: {stats['num_sentences']:>10,}")
print(f" Words: {stats['num_words']:>10,}")
print(f" Syllables: {stats['num_syllables']:>10,}")
print(f" Avg word/sent: {stats['avg_sent_length']:>10.2f}")
print(f" Avg syl/sent: {stats['avg_sent_syl_length']:>10.2f}")
print(f" Avg syl/word: {stats['num_syllables']/stats['num_words']:>10.2f}")
print(f" Min word/sent: {stats['min_sent_length']:>10}")
print(f" Max word/sent: {stats['max_sent_length']:>10}")
print("\n## Word Length Distribution (by syllable count)")
print(f" {'Syllables':<12} {'Count':>10} {'Percent':>8}")
print(" " + "-" * 32)
total_words = stats["num_words"]
for n_syls in sorted(stats["word_syl_counts"]):
count = stats["word_syl_counts"][n_syls]
pct = count / total_words * 100
print(f" {n_syls:<12} {count:>10,} {pct:>7.2f}%")
print("\n## Domain Distribution")
print(f" {'Domain':<15} {'Count':>10} {'Percent':>8}")
print(" " + "-" * 35)
total_sents = stats["num_sentences"]
for domain in ["legal", "news", "wikipedia", "fiction", "non-fiction"]:
count = stats["domain_counts"].get(domain, 0)
pct = count / total_sents * 100
print(f" {domain:<15} {count:>10,} {pct:>7.2f}%")
print()
def main():
base_dir = dirname(dirname(__file__))
splits = {
"train": "udd-ws-v1.1-train.txt",
"dev": "udd-ws-v1.1-dev.txt",
"test": "udd-ws-v1.1-test.txt",
}
all_stats = {}
for split_name, filename in splits.items():
bio_path = join(base_dir, filename)
if not isfile(bio_path):
print(f"WARNING: {bio_path} not found, skipping")
continue
# Parse BIO
print(f"Reading {filename}...")
sentences = parse_bio_file(bio_path)
# Write CoNLL-U
conllu_path = bio_path.replace(".txt", ".conllu")
write_conllu(sentences, conllu_path)
print(f" → {conllu_path}")
# Compute statistics
stats = compute_statistics(sentences)
all_stats[split_name] = stats
# Print per-split statistics
for split_name, stats in all_stats.items():
print_statistics(f"udd-ws-v1.1 — {split_name}", stats)
# Print combined statistics
if all_stats:
combined = {
"num_sentences": sum(s["num_sentences"] for s in all_stats.values()),
"num_words": sum(s["num_words"] for s in all_stats.values()),
"num_syllables": sum(s["num_syllables"] for s in all_stats.values()),
"min_sent_length": min(s["min_sent_length"] for s in all_stats.values()),
"max_sent_length": max(s["max_sent_length"] for s in all_stats.values()),
"word_syl_counts": Counter(),
"domain_counts": Counter(),
}
for s in all_stats.values():
combined["word_syl_counts"] += s["word_syl_counts"]
combined["domain_counts"] += s["domain_counts"]
combined["avg_sent_length"] = combined["num_words"] / combined["num_sentences"]
combined["avg_sent_syl_length"] = combined["num_syllables"] / combined["num_sentences"]
print_statistics("udd-ws-v1.1 — TOTAL", combined)
# Summary table
print("=" * 60)
print(" Summary")
print("=" * 60)
print(f"\n {'Split':<8} {'Sentences':>10} {'Words':>10} {'Syllables':>12}")
print(" " + "-" * 42)
for split_name, stats in all_stats.items():
print(f" {split_name:<8} {stats['num_sentences']:>10,} {stats['num_words']:>10,} {stats['num_syllables']:>12,}")
print(f" {'TOTAL':<8} {combined['num_sentences']:>10,} {combined['num_words']:>10,} {combined['num_syllables']:>12,}")
print()
if __name__ == "__main__":
main()