UDD-1 / src /check_ws_errors.py
rain1024's picture
Fix v1.1 WS dataset: cross-boundary splits, long token decomposition, compound merges
5542d72
# /// script
# requires-python = ">=3.9"
# dependencies = []
# ///
"""Rule-based word segmentation error checker for UDD-1.1 BIO files.
Seven diagnostic rules:
1. Inconsistent segmentation (same form appears both merged and split)
2. Dictionary over-segmentation (adjacent words form a dictionary entry)
3. Dictionary under-segmentation (multi-syllable word not in dictionary)
4. Long token detection (4+ syllables)
5. Punctuation boundary errors (punctuation inside multi-syllable words)
6. Number-word boundary errors (mixed numeric + text syllables)
7. Single-character anomalies (non-Vietnamese artifacts)
Usage:
uv run src/check_ws_errors.py # Run all rules
uv run src/check_ws_errors.py --no-dict # Skip dictionary rules (2, 3)
"""
import argparse
import re
import sys
from collections import Counter, defaultdict
from os.path import dirname, isfile, join
# ============================================================================
# Constants
# ============================================================================
FUNCTION_WORDS = {
"là", "của", "và", "có", "trong", "cho", "được", "các", "này",
"với", "không", "một", "để", "theo", "đã", "từ", "về", "người",
"khi", "đến", "tại", "do", "bị", "cũng", "nhưng", "hoặc",
"nếu", "thì", "phải", "hay", "vì", "đó", "nào", "lại",
"những", "mà", "sẽ", "ra", "còn", "rất", "đều", "chỉ",
"vẫn", "nên", "như", "bằng", "qua", "trên", "dưới",
}
PUNCT_CHARS = set(".,;:!?()[]{}\"'`-—–…/\\|@#$%^&*~<>")
# ============================================================================
# BIO parsing (inlined from fix_ws_errors.py to avoid import issues)
# ============================================================================
def parse_bio_file(filepath):
"""Parse BIO file into list of sentences.
Returns list of dicts with keys: sent_id, text, syllables, tags.
"""
sentences = []
current = {"sent_id": "", "text": "", "syllables": [], "tags": []}
with open(filepath, "r", encoding="utf-8") as f:
for line in f:
line = line.rstrip("\n")
if line.startswith("# sent_id = "):
current["sent_id"] = line.split("= ", 1)[1]
continue
if line.startswith("# text = "):
current["text"] = line.split("= ", 1)[1]
continue
if line.startswith("#"):
continue
if not line:
if current["syllables"]:
sentences.append(dict(current))
current = {"sent_id": "", "text": "", "syllables": [], "tags": []}
continue
parts = line.split("\t")
if len(parts) == 2:
current["syllables"].append(parts[0])
current["tags"].append(parts[1])
if current["syllables"]:
sentences.append(dict(current))
return sentences
def bio_to_words(syllables, tags):
"""Convert syllable-level BIO tags to word list (each word is a list of syllables)."""
words = []
current = []
for syl, tag in zip(syllables, tags):
if tag == "B-W":
if current:
words.append(current)
current = [syl]
else:
current.append(syl)
if current:
words.append(current)
return words
# ============================================================================
# Dictionary loading
# ============================================================================
def load_dictionary():
"""Load Vietnamese dictionary from underthesea (Viet74K / UTS_Dictionary)."""
try:
from underthesea.corpus import viet_dict_74K
words = viet_dict_74K.words
word_set = set(w.lower().strip() for w in words if w.strip())
return word_set, "Viet74K"
except Exception:
pass
try:
from underthesea.datasets.uts_dictionary import UTSDictionary
d = UTSDictionary()
word_set = set(w.lower().strip() for w in d.words if w.strip())
return word_set, "UTS_Dictionary"
except Exception:
pass
return None, None
# ============================================================================
# Rule 1: Inconsistent Segmentation
# ============================================================================
def rule_inconsistency(all_sentences):
"""Find words that appear both merged and split across the dataset.
For each multi-syllable word, check if its syllables appear as separate
single-syllable words elsewhere. For each adjacent pair of single-syllable
words, check if their concatenation exists as a word elsewhere.
"""
# Build vocabulary: word_form (space-joined lowercase) -> count
word_counts = Counter()
for sent in all_sentences:
words = bio_to_words(sent["syllables"], sent["tags"])
for w in words:
form = " ".join(s.lower() for s in w)
word_counts[form] += 1
# Set of multi-syllable word forms
multi_forms = {form for form in word_counts if " " in form}
# Scan for split occurrences of multi-syllable words
split_counts = Counter()
split_examples = {} # form -> first sent_id
for sent in all_sentences:
words = bio_to_words(sent["syllables"], sent["tags"])
# Check bigrams of single-syllable words
for i in range(len(words) - 1):
if len(words[i]) == 1 and len(words[i + 1]) == 1:
bigram = words[i][0].lower() + " " + words[i + 1][0].lower()
if bigram in multi_forms:
split_counts[bigram] += 1
if bigram not in split_examples:
split_examples[bigram] = sent["sent_id"]
# Check trigrams of single-syllable words
for i in range(len(words) - 2):
if len(words[i]) == 1 and len(words[i + 1]) == 1 and len(words[i + 2]) == 1:
trigram = (words[i][0].lower() + " " +
words[i + 1][0].lower() + " " +
words[i + 2][0].lower())
if trigram in multi_forms:
split_counts[trigram] += 1
if trigram not in split_examples:
split_examples[trigram] = sent["sent_id"]
# Combine: only keep forms that appear BOTH ways
findings = []
for form in split_counts:
as_single = word_counts.get(form, 0)
as_split = split_counts[form]
if as_single > 0 and as_split > 0:
total = as_single + as_split
majority = "single" if as_single >= as_split else "split"
findings.append({
"form": form,
"as_single": as_single,
"as_split": as_split,
"total": total,
"majority": majority,
"example_sent_id": split_examples.get(form, ""),
})
findings.sort(key=lambda x: x["total"], reverse=True)
return findings
# ============================================================================
# Rule 2: Dictionary Over-Segmentation
# ============================================================================
def rule_dict_over_seg(all_sentences, dict_set):
"""Find adjacent single-syllable words that together form a dictionary entry."""
over_seg = Counter()
over_seg_examples = {}
for sent in all_sentences:
words = bio_to_words(sent["syllables"], sent["tags"])
word_forms = [" ".join(w).lower() for w in words]
word_lengths = [len(w) for w in words]
# Check bigrams of single-syllable words
for i in range(len(words) - 1):
if word_lengths[i] == 1 and word_lengths[i + 1] == 1:
w1 = word_forms[i]
w2 = word_forms[i + 1]
# Skip if both are function words
if w1 in FUNCTION_WORDS and w2 in FUNCTION_WORDS:
continue
bigram = w1 + " " + w2
if bigram in dict_set:
over_seg[bigram] += 1
if bigram not in over_seg_examples:
over_seg_examples[bigram] = sent["sent_id"]
# Check trigrams of single-syllable words
for i in range(len(words) - 2):
if word_lengths[i] == 1 and word_lengths[i + 1] == 1 and word_lengths[i + 2] == 1:
w1 = word_forms[i]
w2 = word_forms[i + 1]
w3 = word_forms[i + 2]
# Skip if all are function words
if w1 in FUNCTION_WORDS and w2 in FUNCTION_WORDS and w3 in FUNCTION_WORDS:
continue
trigram = w1 + " " + w2 + " " + w3
if trigram in dict_set:
over_seg[trigram] += 1
if trigram not in over_seg_examples:
over_seg_examples[trigram] = sent["sent_id"]
findings = []
for form, count in over_seg.most_common():
findings.append({
"form": form,
"count": count,
"example_sent_id": over_seg_examples.get(form, ""),
})
return findings
# ============================================================================
# Rule 3: Dictionary Under-Segmentation
# ============================================================================
def rule_dict_under_seg(all_sentences, dict_set):
"""Find multi-syllable words NOT in dictionary where all syllables ARE."""
under_seg = Counter()
under_seg_examples = {}
for sent in all_sentences:
words = bio_to_words(sent["syllables"], sent["tags"])
for w in words:
if len(w) < 2:
continue
form_lower = " ".join(s.lower() for s in w)
if form_lower in dict_set:
continue
# Check if all individual syllables are in dictionary
syllables = [s.lower() for s in w]
if all(s in dict_set for s in syllables):
under_seg[form_lower] += 1
if form_lower not in under_seg_examples:
under_seg_examples[form_lower] = sent["sent_id"]
findings = []
for form, count in under_seg.most_common():
syllables = form.split()
findings.append({
"form": form,
"count": count,
"syllables": syllables,
"example_sent_id": under_seg_examples.get(form, ""),
})
return findings
# ============================================================================
# Rule 4: Long Token Detection
# ============================================================================
def rule_long_tokens(all_sentences):
"""Find tokens with 4+ syllables (suspicious for Vietnamese)."""
long_tokens = Counter()
long_examples = {}
long_lengths = {}
for sent in all_sentences:
words = bio_to_words(sent["syllables"], sent["tags"])
for w in words:
if len(w) >= 4:
form = " ".join(w)
long_tokens[form] += 1
long_lengths[form] = len(w)
if form not in long_examples:
long_examples[form] = sent["sent_id"]
findings = []
for form, count in long_tokens.most_common():
findings.append({
"form": form,
"count": count,
"syllable_count": long_lengths[form],
"example_sent_id": long_examples.get(form, ""),
})
return findings
# ============================================================================
# Rule 5: Punctuation Boundary Errors
# ============================================================================
def rule_punct_boundary(all_sentences):
"""Find punctuation characters inside multi-syllable word groups."""
punct_errors = Counter()
punct_examples = {}
for sent in all_sentences:
words = bio_to_words(sent["syllables"], sent["tags"])
for w in words:
if len(w) < 2:
continue
# Check if any syllable is purely punctuation
has_punct = False
has_text = False
for syl in w:
if all(c in PUNCT_CHARS for c in syl):
has_punct = True
else:
has_text = True
if has_punct and has_text:
form = " ".join(w)
punct_errors[form] += 1
if form not in punct_examples:
punct_examples[form] = sent["sent_id"]
findings = []
for form, count in punct_errors.most_common():
findings.append({
"form": form,
"count": count,
"example_sent_id": punct_examples.get(form, ""),
})
return findings
# ============================================================================
# Rule 6: Number-Word Boundary Errors
# ============================================================================
def _is_numeric(s):
"""Check if a syllable is numeric (digits, decimals, commas in numbers)."""
return bool(re.match(r'^[\d.,]+$', s))
def rule_number_boundary(all_sentences):
"""Find multi-syllable words with mixed numeric and text syllables."""
mixed_tokens = Counter()
mixed_examples = {}
for sent in all_sentences:
words = bio_to_words(sent["syllables"], sent["tags"])
for w in words:
if len(w) < 2:
continue
has_num = False
has_text = False
for syl in w:
if all(c in PUNCT_CHARS for c in syl):
continue # ignore pure punctuation syllables
if _is_numeric(syl):
has_num = True
else:
has_text = True
if has_num and has_text:
form = " ".join(w)
mixed_tokens[form] += 1
if form not in mixed_examples:
mixed_examples[form] = sent["sent_id"]
findings = []
for form, count in mixed_tokens.most_common():
findings.append({
"form": form,
"count": count,
"example_sent_id": mixed_examples.get(form, ""),
})
return findings
# ============================================================================
# Rule 7: Single-Character Anomalies
# ============================================================================
# Vietnamese base consonants and vowels (without tone marks)
_VIETNAMESE_BASE_CHARS = set(
"aăâbcdđeêghiklmnoôơpqrstuưvxy"
)
# Non-Vietnamese Latin letters (not in Vietnamese alphabet)
_NON_VIETNAMESE_LATIN = set("fjwz")
def _is_vietnamese_letter(ch):
"""Check if a single character is a Vietnamese letter (including tonal variants)."""
import unicodedata
# Decompose to get the base character (strip combining marks)
decomposed = unicodedata.normalize("NFD", ch.lower())
base = decomposed[0] if decomposed else ch.lower()
# đ doesn't decompose, check directly
if ch.lower() == "đ":
return True
return base in _VIETNAMESE_BASE_CHARS
def rule_single_char(all_sentences):
"""Find standalone single characters that are likely segmentation artifacts."""
char_counts = Counter()
char_examples = {}
for sent in all_sentences:
words = bio_to_words(sent["syllables"], sent["tags"])
for w in words:
if len(w) != 1:
continue
syl = w[0]
if len(syl) != 1:
continue
ch = syl
# Flag non-alphanumeric single chars (excluding common punctuation)
if ch in "]|{}[":
char_counts[ch] += 1
if ch not in char_examples:
char_examples[ch] = sent["sent_id"]
continue
# Flag standalone single Latin letters not in Vietnamese alphabet
if ch.isalpha() and ch.lower() in _NON_VIETNAMESE_LATIN:
char_counts[ch] += 1
if ch not in char_examples:
char_examples[ch] = sent["sent_id"]
findings = []
for ch, count in char_counts.most_common():
findings.append({
"char": ch,
"count": count,
"example_sent_id": char_examples.get(ch, ""),
})
return findings
# ============================================================================
# Report generation
# ============================================================================
def generate_report(results, output_path):
"""Generate WS_CHECK_REPORT.md from all rule findings."""
lines = []
lines.append("# WS Check Report")
lines.append("")
lines.append("Diagnostic report generated by `src/check_ws_errors.py`.")
lines.append("Detects potential word segmentation errors in UDD-1.1 BIO files.")
lines.append("")
# Summary table
lines.append("## Summary")
lines.append("")
lines.append("| Rule | Description | Unique Forms | Total Occurrences |")
lines.append("|:-----|:------------|-------------:|------------------:|")
rule_names = [
("rule1", "Inconsistent segmentation"),
("rule2", "Dictionary over-segmentation"),
("rule3", "Dictionary under-segmentation"),
("rule4", "Long tokens (4+ syllables)"),
("rule5", "Punctuation boundary errors"),
("rule6", "Number-word boundary errors"),
("rule7", "Single-character anomalies"),
]
for key, desc in rule_names:
findings = results.get(key)
if findings is None:
lines.append(f"| {key} | {desc} | skipped | skipped |")
continue
unique = len(findings)
if key == "rule1":
total = sum(f["total"] for f in findings)
elif key == "rule7":
total = sum(f["count"] for f in findings)
elif key in ("rule2", "rule3", "rule4", "rule5", "rule6"):
total = sum(f["count"] for f in findings)
else:
total = unique
lines.append(f"| {key} | {desc} | {unique:,} | {total:,} |")
lines.append("")
# Rule 1: Inconsistency
if results.get("rule1") is not None:
findings = results["rule1"]
lines.append("## Rule 1: Inconsistent Segmentation")
lines.append("")
lines.append(f"Words appearing both as single token and split across the dataset. "
f"**{len(findings):,}** unique forms found.")
lines.append("")
if findings:
lines.append("| Form | As Single | As Split | Total | Majority | Example |")
lines.append("|:-----|----------:|---------:|------:|:---------|:--------|")
for f in findings[:50]:
lines.append(
f"| {f['form']} | {f['as_single']:,} | {f['as_split']:,} | "
f"{f['total']:,} | {f['majority']} | {f['example_sent_id']} |"
)
if len(findings) > 50:
lines.append(f"| ... | | | | | *{len(findings) - 50} more* |")
lines.append("")
# Rule 2: Dictionary over-segmentation
if results.get("rule2") is not None:
findings = results["rule2"]
lines.append("## Rule 2: Dictionary Over-Segmentation")
lines.append("")
lines.append(f"Adjacent single-syllable words forming a dictionary entry. "
f"**{len(findings):,}** unique forms found.")
lines.append("")
if findings:
lines.append("| Dictionary Word | Times Split | Example |")
lines.append("|:----------------|------------:|:--------|")
for f in findings[:50]:
lines.append(f"| {f['form']} | {f['count']:,} | {f['example_sent_id']} |")
if len(findings) > 50:
lines.append(f"| ... | | *{len(findings) - 50} more* |")
lines.append("")
# Rule 3: Dictionary under-segmentation
if results.get("rule3") is not None:
findings = results["rule3"]
lines.append("## Rule 3: Dictionary Under-Segmentation")
lines.append("")
lines.append(f"Multi-syllable words NOT in dictionary, but all syllables are. "
f"**{len(findings):,}** unique forms found.")
lines.append("")
if findings:
lines.append("| Word | Count | Sub-parts | Example |")
lines.append("|:-----|------:|:----------|:--------|")
for f in findings[:50]:
parts = " + ".join(f["syllables"])
lines.append(f"| {f['form']} | {f['count']:,} | {parts} | {f['example_sent_id']} |")
if len(findings) > 50:
lines.append(f"| ... | | | *{len(findings) - 50} more* |")
lines.append("")
# Rule 4: Long tokens
if results.get("rule4") is not None:
findings = results["rule4"]
lines.append("## Rule 4: Long Tokens (4+ Syllables)")
lines.append("")
lines.append(f"Tokens with 4 or more syllables (unusual for Vietnamese). "
f"**{len(findings):,}** unique forms found.")
lines.append("")
if findings:
lines.append("| Word | Syllables | Count | Example |")
lines.append("|:-----|----------:|------:|:--------|")
for f in findings[:50]:
lines.append(
f"| {f['form']} | {f['syllable_count']} | {f['count']:,} | "
f"{f['example_sent_id']} |"
)
if len(findings) > 50:
lines.append(f"| ... | | | *{len(findings) - 50} more* |")
lines.append("")
# Rule 5: Punctuation boundary
if results.get("rule5") is not None:
findings = results["rule5"]
lines.append("## Rule 5: Punctuation Boundary Errors")
lines.append("")
lines.append(f"Punctuation characters inside multi-syllable word groups. "
f"**{len(findings):,}** unique forms found.")
lines.append("")
if findings:
lines.append("| Word | Count | Example |")
lines.append("|:-----|------:|:--------|")
for f in findings[:50]:
lines.append(f"| {f['form']} | {f['count']:,} | {f['example_sent_id']} |")
if len(findings) > 50:
lines.append(f"| ... | | *{len(findings) - 50} more* |")
lines.append("")
# Rule 6: Number-word boundary
if results.get("rule6") is not None:
findings = results["rule6"]
lines.append("## Rule 6: Number-Word Boundary Errors")
lines.append("")
lines.append(f"Multi-syllable words with mixed numeric and text syllables. "
f"**{len(findings):,}** unique forms found.")
lines.append("")
if findings:
lines.append("| Token | Count | Example |")
lines.append("|:------|------:|:--------|")
for f in findings[:50]:
lines.append(f"| {f['form']} | {f['count']:,} | {f['example_sent_id']} |")
if len(findings) > 50:
lines.append(f"| ... | | *{len(findings) - 50} more* |")
lines.append("")
# Rule 7: Single-character anomalies
if results.get("rule7") is not None:
findings = results["rule7"]
lines.append("## Rule 7: Single-Character Anomalies")
lines.append("")
lines.append(f"Standalone single characters that are likely artifacts. "
f"**{len(findings):,}** unique forms found.")
lines.append("")
if findings:
lines.append("| Character | Count | Example |")
lines.append("|:----------|------:|:--------|")
for f in findings:
lines.append(f"| `{f['char']}` | {f['count']:,} | {f['example_sent_id']} |")
lines.append("")
report = "\n".join(lines)
with open(output_path, "w", encoding="utf-8") as f:
f.write(report)
print(f"\nReport written to {output_path}")
return report
# ============================================================================
# Main
# ============================================================================
def main():
parser = argparse.ArgumentParser(
description="Check word segmentation errors in UDD-1.1 BIO files."
)
parser.add_argument(
"--no-dict", action="store_true",
help="Skip dictionary-based rules (2, 3)"
)
args = parser.parse_args()
base_dir = dirname(dirname(__file__))
bio_files = [
join(base_dir, f"udd-ws-v1.1-{split}.txt")
for split in ("train", "dev", "test")
]
# Check all files exist
for path in bio_files:
if not isfile(path):
print(f"ERROR: {path} not found", file=sys.stderr)
sys.exit(1)
# Load all sentences
print("Loading BIO files...")
all_sentences = []
for path in bio_files:
sents = parse_bio_file(path)
print(f" {path.rsplit('/', 1)[-1]}: {len(sents):,} sentences")
all_sentences.extend(sents)
print(f" Total: {len(all_sentences):,} sentences")
# Load dictionary if needed
dict_set = None
if not args.no_dict:
print("\nLoading dictionary...")
dict_set, dict_name = load_dictionary()
if dict_set:
print(f" Loaded {dict_name}: {len(dict_set):,} entries")
else:
print(" WARNING: No dictionary available, skipping rules 2 & 3")
# Run rules
results = {}
print("\nRule 1: Inconsistent segmentation...")
results["rule1"] = rule_inconsistency(all_sentences)
print(f" Found {len(results['rule1']):,} inconsistent forms")
if dict_set:
print("\nRule 2: Dictionary over-segmentation...")
results["rule2"] = rule_dict_over_seg(all_sentences, dict_set)
print(f" Found {len(results['rule2']):,} over-segmented forms")
print("\nRule 3: Dictionary under-segmentation...")
results["rule3"] = rule_dict_under_seg(all_sentences, dict_set)
print(f" Found {len(results['rule3']):,} under-segmented forms")
else:
results["rule2"] = None
results["rule3"] = None
print("\nRule 4: Long tokens (4+ syllables)...")
results["rule4"] = rule_long_tokens(all_sentences)
print(f" Found {len(results['rule4']):,} long token forms")
print("\nRule 5: Punctuation boundary errors...")
results["rule5"] = rule_punct_boundary(all_sentences)
print(f" Found {len(results['rule5']):,} punctuation boundary errors")
print("\nRule 6: Number-word boundary errors...")
results["rule6"] = rule_number_boundary(all_sentences)
print(f" Found {len(results['rule6']):,} number-word boundary errors")
print("\nRule 7: Single-character anomalies...")
results["rule7"] = rule_single_char(all_sentences)
print(f" Found {len(results['rule7']):,} single-character anomalies")
# Generate report
report_path = join(base_dir, "WS_CHECK_REPORT.md")
generate_report(results, report_path)
# Print summary
print(f"\n{'=' * 50}")
print("SUMMARY")
for key, label in [
("rule1", "Inconsistent segmentation"),
("rule2", "Dict over-segmentation"),
("rule3", "Dict under-segmentation"),
("rule4", "Long tokens (4+ syl)"),
("rule5", "Punctuation boundary"),
("rule6", "Number-word boundary"),
("rule7", "Single-char anomalies"),
]:
findings = results.get(key)
if findings is None:
print(f" {label}: skipped")
else:
print(f" {label}: {len(findings):,} unique forms")
if __name__ == "__main__":
main()