Datasets:
File size: 36,735 Bytes
5542d72 bbc68b5 5542d72 bbc68b5 5542d72 bbc68b5 5542d72 bbc68b5 5542d72 bbc68b5 5542d72 bbc68b5 5542d72 bbc68b5 5542d72 bbc68b5 5542d72 bbc68b5 5542d72 bbc68b5 5542d72 bbc68b5 5542d72 bbc68b5 5542d72 bbc68b5 5542d72 bbc68b5 5542d72 bbc68b5 5542d72 bbc68b5 5542d72 bbc68b5 5542d72 bbc68b5 5542d72 bbc68b5 5542d72 bbc68b5 5542d72 bbc68b5 5542d72 bbc68b5 5542d72 bbc68b5 5542d72 bbc68b5 5542d72 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 | # /// script
# requires-python = ">=3.9"
# dependencies = []
# ///
"""Fix known word segmentation errors in UDD-1.1 BIO files.
Seven fix passes:
1. Split cross-boundary merges (uppercase mid-token signals)
1.5 Split long tokens (5+ syllables) via vocab-based greedy decomposition
2. Merge always-split compounds (dictionary compounds + inconsistent forms)
2.5 Split foreign word merges (Latin-script tokens without Vietnamese diacritics)
2.75 Split proper name boundary merges (uppercase→lowercase transitions within words)
3. Validate BIO invariants
Usage:
uv run src/fix_ws_errors.py # Fix all splits, write report
uv run src/fix_ws_errors.py --dry-run # Report only, no file changes
"""
import argparse
import re
import sys
from collections import Counter, defaultdict
from os.path import dirname, isfile, join
# ============================================================================
# Constants
# ============================================================================
# Compounds that should ALWAYS be merged (conservative curated list)
# Source: SEGMENTATION_EVAL.md sections 2c, 3, 6c + annotation guidelines
# Stored as tuples of lowercase syllables
MERGE_TERMS = {
# Always-split dictionary compounds (high confidence from 6c)
("vụ", "án"), # 892× split, legal compound
("phạt", "tù"), # 422× split
("hủy", "bỏ"), # 147× split
("chữa", "bệnh"), # 112× split
("lời", "khai"), # 102× split
("kèm", "theo"), # 101× split
("ghi", "rõ"), # 99× split
("trả", "lại"), # 94× split
("khám", "bệnh"), # 57× split
("rút", "gọn"), # 51× split
("giấy", "chứng", "nhận"), # 41× split, 3 syllables
("tù", "chung", "thân"), # 38× split, 3 syllables
("quá", "hạn"), # 31× split
("làm", "chủ"), # 30× split
("ô", "nhiễm", "môi", "trường"), # 26× split, 4 syllables
# Inconsistent forms — majority is split, should be single
("phiên", "tòa"), # 576 split vs 18 single
("hàng", "hóa"), # 175 split vs 6 single
("chủ", "tọa"), # 125 split vs 7 single
("bị", "hại"), # 96 split vs 6 single
("tiền", "công"), # 62 split vs 2 single
("thuê", "khoán"), # 62 split vs 2 single
("hòa", "giải"), # 53 split vs 30 single
("bốc", "hàng"), # 35 split vs 1 single
# ---- Cycle 1 gold corrections: new compound merges ----
("ủy", "ban"), # committee
("lính", "thú"), # soldier
("mu", "rùa"), # turtle shell
("trêu", "ghẹo"), # tease
("sương", "mai"), # morning dew
("mái", "nhà"), # roof
("nghiến", "răng"), # gnash teeth
("nheo", "nheo"), # squint
("dơn", "dớt"), # pale/sickly
("xua", "tay"), # wave hand
("nói", "gở"), # say unlucky things
("bơi", "chó"), # dog paddle
("người", "thương"), # beloved
("chăn", "lợn"), # pig herding
("khay", "trà"), # tea tray
("đồng", "tự"), # homograph
("tại", "ngũ"), # in service (military)
("hành", "chánh"), # administration
("lượng", "tử"), # quantum
("tích", "lũy"), # accumulate
("siêu", "máy", "tính"), # supercomputer
("đường", "thẳng"), # straight line
("đầm", "đuôi", "cá"), # fishtail dress
("như", "điên"), # like crazy
("tẩy", "chay"), # boycott
}
# Build index for efficient longest-match lookup: {length: [term, ...]}
_MERGE_BY_LENGTH = defaultdict(list)
for _term in MERGE_TERMS:
_MERGE_BY_LENGTH[len(_term)].append(_term)
MERGE_MAX_LEN = max(len(t) for t in MERGE_TERMS)
# Tokens with uppercase mid-token that are LEGITIMATE (not errors).
# Stored as lowercase strings (space-joined syllables) for comparison.
# Source: SEGMENTATION_EVAL.md section 2b — proper names and titles.
CROSS_BOUNDARY_WHITELIST = {
"xã hội chủ nghĩa việt nam",
"bộ tư pháp",
"mặt trận tổ quốc việt nam",
"đảng cộng sản việt nam",
"tổng liên đoàn lao động",
"hội đồng trọng tài",
"chủ tịch", # all-caps title
"đại lý", # all-caps title
"nguyễn sinh hùng", # personal name
"luật bảo hiểm xã hội",
"luật bảo vệ",
"bộ luật lao động",
"pháp lệnh dân số",
"bộ tài nguyên và môi trường",
# Roman numeral sessions
"khóa xiii",
"khóa xv",
"khóa xiv",
"khóa xii",
"khóa xi",
}
# ============================================================================
# BIO file I/O
# ============================================================================
def parse_bio_file(filepath):
"""Parse BIO file into list of sentences.
Returns list of dicts with keys: sent_id, text, syllables, tags.
"""
sentences = []
current = {"sent_id": "", "text": "", "syllables": [], "tags": []}
with open(filepath, "r", encoding="utf-8") as f:
for line in f:
line = line.rstrip("\n")
if line.startswith("# sent_id = "):
current["sent_id"] = line.split("= ", 1)[1]
continue
if line.startswith("# text = "):
current["text"] = line.split("= ", 1)[1]
continue
if line.startswith("#"):
continue
if not line:
if current["syllables"]:
sentences.append(dict(current))
current = {"sent_id": "", "text": "", "syllables": [], "tags": []}
continue
parts = line.split("\t")
if len(parts) == 2:
current["syllables"].append(parts[0])
current["tags"].append(parts[1])
if current["syllables"]:
sentences.append(dict(current))
return sentences
def write_bio_file(sentences, filepath):
"""Write sentences back to BIO format."""
with open(filepath, "w", encoding="utf-8") as f:
for sent in sentences:
f.write(f"# sent_id = {sent['sent_id']}\n")
f.write(f"# text = {sent['text']}\n")
for syl, tag in zip(sent["syllables"], sent["tags"]):
f.write(f"{syl}\t{tag}\n")
f.write("\n")
def bio_to_words(syllables, tags):
"""Convert syllable-level BIO tags to word list."""
words = []
current = []
for syl, tag in zip(syllables, tags):
if tag == "B-W":
if current:
words.append(" ".join(current))
current = [syl]
else:
current.append(syl)
if current:
words.append(" ".join(current))
return words
def write_conllu(sentences, filepath):
"""Write sentences to CoNLL-U format (word-level, no syntactic annotation)."""
with open(filepath, "w", encoding="utf-8") as f:
for sent in sentences:
f.write(f"# sent_id = {sent['sent_id']}\n")
f.write(f"# text = {sent['text']}\n")
words = bio_to_words(sent["syllables"], sent["tags"])
for i, word in enumerate(words, 1):
f.write(f"{i}\t{word}\t_\t_\t_\t_\t_\t_\t_\t_\n")
f.write("\n")
# ============================================================================
# Vocab for long-token splitting
# ============================================================================
def build_split_vocab(all_sentences, min_count=5):
"""Build vocab of 2-4 syllable words for decomposing long tokens.
Counts every 2-4 syllable word in the dataset (case-insensitive).
Returns the set of forms that appear at least `min_count` times.
Excludes entries with function words at boundaries (e.g. "tài nguyên và").
"""
# Function words that should never be at the edge of a compound
boundary_stopwords = {"và", "hoặc"}
vocab = Counter()
for sent in all_sentences:
words = bio_to_words(sent["syllables"], sent["tags"])
for w in words:
syls = w.split()
if 2 <= len(syls) <= 4:
form = " ".join(s.lower() for s in syls)
vocab[form] += 1
return {
form for form, count in vocab.items()
if count >= min_count
and form.split()[0] not in boundary_stopwords
and form.split()[-1] not in boundary_stopwords
}
def build_viet_syllables(all_sentences, min_count=50):
"""Build set of common Vietnamese syllables for foreign word filtering.
Counts individual syllables across all sentences and returns those
appearing at least `min_count` times (lowercased). These are used to
distinguish Vietnamese multi-syllable words (like "kinh doanh") from
truly foreign tokens (like "Max Planck").
"""
counts = Counter()
for sent in all_sentences:
for syl in sent["syllables"]:
counts[syl.lower()] += 1
return {syl for syl, c in counts.items() if c >= min_count}
# ============================================================================
# Fix passes
# ============================================================================
def fix_cross_boundary(syllables, tags):
"""Pass 1: Split cross-boundary merges.
Detects transitions from a non-uppercase syllable to an uppercase syllable
within a multi-syllable word. This catches errors like "tố tụng Người"
(lowercase "tụng" → uppercase "Người") while preserving proper names like
"Việt Nam" (both uppercase, no transition).
If the word (lowercased) is in CROSS_BOUNDARY_WHITELIST, skip it.
Returns (new_tags, list of change descriptions).
"""
new_tags = list(tags)
changes = []
# Reconstruct word spans: list of (start_idx, end_idx) for each word
word_spans = []
current_start = 0
for i in range(len(tags)):
if tags[i] == "B-W" and i > 0:
word_spans.append((current_start, i))
current_start = i
word_spans.append((current_start, len(tags)))
for start, end in word_spans:
if end - start < 2:
continue # single-syllable word
# Check for lowercase→uppercase transitions within the word.
# A transition is: preceding syllable does NOT start with uppercase,
# AND current syllable starts with uppercase. This catches real
# cross-boundary merges (e.g., "tụng" → "Người") while ignoring
# proper names where all syllables are uppercase (e.g., "Việt Nam").
has_transition = False
for j in range(start + 1, end):
prev_syl = syllables[j - 1]
curr_syl = syllables[j]
if (curr_syl and curr_syl[0].isupper() and
prev_syl and not prev_syl[0].isupper()):
has_transition = True
break
if not has_transition:
continue
# Check whitelist
word_lower = " ".join(s.lower() for s in syllables[start:end])
if word_lower in CROSS_BOUNDARY_WHITELIST:
continue
# Split at each lowercase→uppercase transition
word_before = " ".join(syllables[start:end])
for j in range(start + 1, end):
prev_syl = syllables[j - 1]
curr_syl = syllables[j]
if (curr_syl and curr_syl[0].isupper() and
prev_syl and not prev_syl[0].isupper()):
new_tags[j] = "B-W"
word_parts = bio_to_words(syllables[start:end], new_tags[start:end])
changes.append(f"split \"{word_before}\" → {' + '.join(repr(p) for p in word_parts)}")
return new_tags, changes
def fix_split_long_tokens(syllables, tags, vocab):
"""Pass 1.5: Split 5+ syllable tokens into sub-words via vocab decomposition.
For each word with 5+ syllables, apply greedy left-to-right longest-match
against the vocab (trying 4→3→2 syllable matches). Unmatched syllables
become single-syllable words.
Returns (new_tags, list of change descriptions).
"""
new_tags = list(tags)
changes = []
# Reconstruct word spans
word_spans = []
current_start = 0
for i in range(len(tags)):
if tags[i] == "B-W" and i > 0:
word_spans.append((current_start, i))
current_start = i
word_spans.append((current_start, len(tags)))
for start, end in word_spans:
n_syls = end - start
if n_syls < 5:
continue
# Greedy left-to-right longest-match decomposition
word_before = " ".join(syllables[start:end])
pos = start
sub_words = [] # list of (sub_start, sub_end) index pairs
while pos < end:
matched = False
# Try longest match first (4 → 3 → 2 syllables)
for length in range(min(4, end - pos), 1, -1):
candidate = " ".join(
s.lower() for s in syllables[pos:pos + length]
)
if candidate in vocab:
sub_words.append((pos, pos + length))
pos += length
matched = True
break
if not matched:
sub_words.append((pos, pos + 1))
pos += 1
# Only change if we actually split into multiple sub-words
if len(sub_words) <= 1:
continue
# Update tags: B-W at start of each sub-word, I-W within
for sw_start, sw_end in sub_words:
new_tags[sw_start] = "B-W"
for j in range(sw_start + 1, sw_end):
new_tags[j] = "I-W"
word_parts = bio_to_words(
syllables[start:end], new_tags[start:end]
)
changes.append(
f"split \"{word_before}\" → "
f"{' + '.join(repr(p) for p in word_parts)}"
)
return new_tags, changes
def fix_merge_compounds(syllables, tags):
"""Pass 2: Merge always-split compounds.
Scan syllables left-to-right. At each B-W position, check if the next N
syllables (all at B-W positions) match a MERGE_TERMS entry (case-insensitive).
Longest match first. If so, change subsequent B-W tags to I-W.
Returns (new_tags, list of change descriptions).
"""
new_tags = list(tags)
changes = []
n = len(syllables)
i = 0
while i < n:
if new_tags[i] != "B-W":
i += 1
continue
matched = False
# Try longest match first
for length in range(min(MERGE_MAX_LEN, n - i), 1, -1):
if length not in _MERGE_BY_LENGTH:
continue
# Check all syllables in range are B-W (separate words)
all_bw = True
for j in range(i, i + length):
if j > i and new_tags[j] != "B-W":
all_bw = False
break
if not all_bw:
continue
# Check if syllables match any MERGE_TERMS entry
candidate = tuple(s.lower() for s in syllables[i:i + length])
if candidate in MERGE_TERMS:
# Merge: change B-W to I-W for positions after the first
parts_before = [syllables[j] for j in range(i, i + length)]
for j in range(i + 1, i + length):
new_tags[j] = "I-W"
merged = " ".join(parts_before)
changes.append(f"merge \"{merged}\"")
i += length
matched = True
break
if not matched:
i += 1
return new_tags, changes
def _is_latin_no_vietnamese(s):
"""Check if a string is purely Latin-script without Vietnamese diacritics.
Returns True for ASCII Latin (a-z, A-Z, 0-9, hyphen) and common Latin
extensions BUT NOT Vietnamese-specific characters (ă, â, đ, ê, ô, ơ, ư
and their tone marks).
"""
# Vietnamese diacritics pattern: any character with Vietnamese-specific marks
vietnamese_chars = re.compile(
r'[àáảãạăắằẳẵặâấầẩẫậèéẻẽẹêếềểễệìíỉĩịòóỏõọôốồổỗộơớờởỡợ'
r'ùúủũụưứừửữựỳýỷỹỵđÀÁẢÃẠĂẮẰẲẴẶÂẤẦẨẪẬÈÉẺẼẸÊẾỀỂỄỆÌÍỈĨỊ'
r'ÒÓỎÕỌÔỐỒỔỖỘƠỚỜỞỠỢÙÚỦŨỤƯỨỪỬỮỰỲÝỶỸỴĐ]'
)
if vietnamese_chars.search(s):
return False
# Must contain at least one Latin letter
return bool(re.search(r'[a-zA-Z]', s))
# Known foreign proper names that should stay merged (whitelist)
FOREIGN_NAME_WHITELIST = {
"beethoven", "homer", "odysseus", "cecelia", "ahern", "holly",
"hideoshi", "gurth", "euler", "hilbert", "rydberg", "bohr",
"frankael-zermelo", "giambattista", "valli", "dachau",
"habsburg", "newton", "einstein", "darwin", "shakespeare",
}
def fix_foreign_words(syllables, tags, viet_syllables):
"""Pass 2.5: Split foreign word merges.
Detects multi-syllable tokens where ALL syllables are Latin-script only
(no Vietnamese diacritics) AND none of the syllables are common Vietnamese
syllables. Each such foreign syllable becomes its own word (B-W).
Args:
syllables: list of syllable strings.
tags: list of BIO tag strings.
viet_syllables: set of common Vietnamese syllables (lowercase) for
filtering out false positives like "kinh doanh".
Returns (new_tags, list of change descriptions).
"""
new_tags = list(tags)
changes = []
# Reconstruct word spans
word_spans = []
current_start = 0
for i in range(len(tags)):
if tags[i] == "B-W" and i > 0:
word_spans.append((current_start, i))
current_start = i
word_spans.append((current_start, len(tags)))
for start, end in word_spans:
n_syls = end - start
if n_syls < 2:
continue
# Check if ALL syllables are Latin-only (no Vietnamese diacritics)
all_latin = all(_is_latin_no_vietnamese(syllables[j]) for j in range(start, end))
if not all_latin:
continue
# Check if ANY syllable is a common Vietnamese syllable → skip
has_viet = any(
syllables[j].lower() in viet_syllables for j in range(start, end)
)
if has_viet:
continue
# Check whitelist: if the whole token is a known name, skip
token_lower = " ".join(syllables[start:end]).lower()
if token_lower in FOREIGN_NAME_WHITELIST:
continue
# Split: make each syllable its own word
word_before = " ".join(syllables[start:end])
for j in range(start + 1, end):
new_tags[j] = "B-W"
parts = [syllables[j] for j in range(start, end)]
changes.append(f"split-foreign \"{word_before}\" → {' + '.join(repr(p) for p in parts)}")
return new_tags, changes
# Vietnamese institutional compound prefixes that should NOT be split by
# the name-boundary pass. Lowercased prefix tuples → the compound is legitimate.
# First-syllable prefixes that start Vietnamese institutional compounds.
# Any multi-syllable word starting with one of these + lowercase continuation
# is likely a legitimate compound, not a proper name boundary error.
NAME_BOUNDARY_WHITELIST_S1 = {
"ủy", # Ủy ban (nhân dân / thường vụ / ...)
"viện", # Viện kiểm sát / Viện nghiên cứu
"tổng", # Tổng giám đốc / Tổng thư ký / ...
"nhà", # Nhà khoa học / Nhà xuất bản / Nhà đầu tư
"phòng", # Phòng thí nghiệm
"cảng", # Cảng hàng không
"xuất", # Xuất nhập khẩu
"sách", # Sách (title compounds)
"thuế", # Thuế thu nhập
"cây", # Cây lương thực
"nói", # Nói tóm lại
"bộ", # Bộ luật dân sự / Bộ Tài chính
"đại", # Đại hội đồng
"lò", # Lò phản ứng
"ngay", # Ngay lập tức
"việc", # Việc làm ăn
"vùng", # Vùng kinh tế
"sân", # Sân vận động
"tiểu", # Tiểu văn hóa
"trang", # Trang thiết bị
"tết", # Tết dương lịch
"thuyết", # Thuyết sinh vật học
"điểm", # Điểm nóng chảy
"lý", # Lý thuyết
"hệ", # Hệ tiên đề
}
def fix_proper_name_boundary(syllables, tags, vocab):
"""Pass 2.75: Split proper name boundary merges.
Detects multi-syllable tokens where uppercase syllables are followed by
a lowercase common word (in vocab). Pattern: [Uppercase...][lowercase_common]
→ split before the lowercase word.
This catches cases like "Tống_tiêu_diệt" → "Tống" + "tiêu_diệt" where
a proper name is merged with the following verb/noun.
Skips known Vietnamese institutional compounds (NAME_BOUNDARY_WHITELIST_PREFIXES).
Returns (new_tags, list of change descriptions).
"""
new_tags = list(tags)
changes = []
# Reconstruct word spans
word_spans = []
current_start = 0
for i in range(len(tags)):
if tags[i] == "B-W" and i > 0:
word_spans.append((current_start, i))
current_start = i
word_spans.append((current_start, len(tags)))
for start, end in word_spans:
n_syls = end - start
if n_syls < 3:
# Need at least 3 syllables: Name + common_word(s)
continue
# Check whitelist: if the first syllable is a known institutional prefix, skip
if syllables[start].lower() in NAME_BOUNDARY_WHITELIST_S1:
continue
# Find the transition point: last uppercase syllable before lowercase
# Look for pattern: [Title/Upper...][lower...]
# where the lowercase portion forms a known vocab word
split_pos = None
for j in range(start + 1, end):
curr_syl = syllables[j]
prev_syl = syllables[j - 1]
# Transition: previous is title/upper, current is lowercase
if (prev_syl and prev_syl[0].isupper() and
curr_syl and not curr_syl[0].isupper()):
# Check if remaining syllables form a vocab word
remaining = " ".join(s.lower() for s in syllables[j:end])
if remaining in vocab:
split_pos = j
break
# Also check if just 2 syllables from here form a vocab word
if end - j >= 2:
two_syl = " ".join(s.lower() for s in syllables[j:j+2])
if two_syl in vocab:
split_pos = j
break
if split_pos is None:
continue
word_before = " ".join(syllables[start:end])
new_tags[split_pos] = "B-W"
word_parts = bio_to_words(syllables[start:end], new_tags[start:end])
changes.append(
f"split-name-boundary \"{word_before}\" → "
f"{' + '.join(repr(p) for p in word_parts)}"
)
return new_tags, changes
def validate_sentence(syllables, tags):
"""Pass 3: Validate BIO invariants.
Returns list of error descriptions (empty if valid).
"""
errors = []
if not syllables:
return errors
if tags[0] != "B-W":
errors.append(f"sentence starts with {tags[0]} instead of B-W")
for i, tag in enumerate(tags):
if tag not in ("B-W", "I-W"):
errors.append(f"position {i}: invalid tag '{tag}'")
return errors
# ============================================================================
# Report generation
# ============================================================================
def generate_report(all_stats, output_path=None):
"""Generate markdown report of all changes."""
lines = []
lines.append("# WS Fix Report")
lines.append("")
lines.append("Fixes applied by `src/fix_ws_errors.py` to UDD-1.1 word segmentation BIO files.")
lines.append("")
# Summary table
lines.append("## Summary")
lines.append("")
lines.append("| File | Cross-boundary | Long token | Compound merges | Foreign splits | Name boundary | Validation errors |")
lines.append("|------|---------------:|-----------:|----------------:|---------------:|--------------:|------------------:|")
total_splits = 0
total_long_splits = 0
total_merges = 0
total_foreign = 0
total_name_boundary = 0
total_errors = 0
for fname, stats in all_stats.items():
n_splits = stats["n_cross_boundary"]
n_long = stats["n_split_long"]
n_merges = stats["n_merge"]
n_foreign = stats.get("n_foreign", 0)
n_name_boundary = stats.get("n_name_boundary", 0)
n_errors = stats["n_validation_errors"]
total_splits += n_splits
total_long_splits += n_long
total_merges += n_merges
total_foreign += n_foreign
total_name_boundary += n_name_boundary
total_errors += n_errors
lines.append(f"| {fname} | {n_splits:,} | {n_long:,} | {n_merges:,} | {n_foreign:,} | {n_name_boundary:,} | {n_errors:,} |")
lines.append(f"| **TOTAL** | **{total_splits:,}** | **{total_long_splits:,}** | **{total_merges:,}** | **{total_foreign:,}** | **{total_name_boundary:,}** | **{total_errors:,}** |")
lines.append("")
# Merge term frequency across all files
lines.append("## Merge Frequency by Term")
lines.append("")
merge_counts = Counter()
for stats in all_stats.values():
merge_counts += stats["merge_term_counts"]
lines.append("| Term | Count |")
lines.append("|:-----|------:|")
for term, count in merge_counts.most_common():
lines.append(f"| {term} | {count:,} |")
lines.append("")
# Cross-boundary split examples
lines.append("## Cross-Boundary Split Examples")
lines.append("")
for fname, stats in all_stats.items():
if stats["cross_boundary_examples"]:
lines.append(f"### {fname}")
lines.append("")
for ex in stats["cross_boundary_examples"][:20]:
lines.append(f"- {ex}")
if len(stats["cross_boundary_examples"]) > 20:
lines.append(f"- ... and {len(stats['cross_boundary_examples']) - 20} more")
lines.append("")
# Long token split examples
lines.append("## Long Token Split Examples")
lines.append("")
for fname, stats in all_stats.items():
if stats["split_long_examples"]:
lines.append(f"### {fname}")
lines.append("")
for ex in stats["split_long_examples"][:30]:
lines.append(f"- {ex}")
if len(stats["split_long_examples"]) > 30:
lines.append(f"- ... and {len(stats['split_long_examples']) - 30} more")
lines.append("")
# Foreign word split examples
lines.append("## Foreign Word Split Examples")
lines.append("")
for fname, stats in all_stats.items():
examples = stats.get("foreign_examples", [])
if examples:
lines.append(f"### {fname}")
lines.append("")
for ex in examples[:30]:
lines.append(f"- {ex}")
if len(examples) > 30:
lines.append(f"- ... and {len(examples) - 30} more")
lines.append("")
# Name boundary split examples
lines.append("## Name Boundary Split Examples")
lines.append("")
for fname, stats in all_stats.items():
examples = stats.get("name_boundary_examples", [])
if examples:
lines.append(f"### {fname}")
lines.append("")
for ex in examples[:30]:
lines.append(f"- {ex}")
if len(examples) > 30:
lines.append(f"- ... and {len(examples) - 30} more")
lines.append("")
report = "\n".join(lines)
if output_path:
with open(output_path, "w", encoding="utf-8") as f:
f.write(report)
print(f"\nReport written to {output_path}")
return report
# ============================================================================
# Main
# ============================================================================
def process_file(filepath, vocab=None, viet_syllables=None, sentences=None, dry_run=False):
"""Process a single BIO file: apply fixes, optionally write back.
Args:
filepath: Path to BIO file.
vocab: Set of known 2-4 syllable words for long-token splitting.
If None, Pass 1.5 and 2.75 are skipped.
viet_syllables: Set of common Vietnamese syllables for foreign word
filtering. If None, Pass 2.5 is skipped.
sentences: Pre-parsed sentences (avoids re-parsing if already loaded).
dry_run: If True, report changes without modifying files.
Returns (sentences, stats_dict).
"""
print(f"\nProcessing {filepath}...")
if sentences is None:
sentences = parse_bio_file(filepath)
print(f" Loaded {len(sentences):,} sentences")
total_syllables_before = sum(len(s["syllables"]) for s in sentences)
total_words_before = sum(
sum(1 for t in s["tags"] if t == "B-W") for s in sentences
)
n_cross_boundary = 0
n_split_long = 0
n_merge = 0
n_foreign = 0
n_name_boundary = 0
n_validation_errors = 0
cross_boundary_examples = []
split_long_examples = []
foreign_examples = []
name_boundary_examples = []
merge_term_counts = Counter()
for sent in sentences:
syls = sent["syllables"]
# Pass 1: Cross-boundary splits
tags, cb_changes = fix_cross_boundary(syls, sent["tags"])
n_cross_boundary += len(cb_changes)
for ch in cb_changes:
cross_boundary_examples.append(f"[{sent['sent_id']}] {ch}")
# Pass 1.5: Split long tokens (5+ syllables)
if vocab is not None:
tags, split_changes = fix_split_long_tokens(syls, tags, vocab)
n_split_long += len(split_changes)
for ch in split_changes:
split_long_examples.append(f"[{sent['sent_id']}] {ch}")
# Pass 2: Merge compounds
tags, merge_changes = fix_merge_compounds(syls, tags)
n_merge += len(merge_changes)
for ch in merge_changes:
# Extract the merged term for counting
# Format: 'merge "term"'
term = ch.split('"')[1] if '"' in ch else ch
merge_term_counts[term.lower()] += 1
# Pass 2.5: Split foreign word merges
if viet_syllables is not None:
tags, fw_changes = fix_foreign_words(syls, tags, viet_syllables)
n_foreign += len(fw_changes)
for ch in fw_changes:
foreign_examples.append(f"[{sent['sent_id']}] {ch}")
# Pass 2.75: Split proper name boundary merges
if vocab is not None:
tags, nb_changes = fix_proper_name_boundary(syls, tags, vocab)
n_name_boundary += len(nb_changes)
for ch in nb_changes:
name_boundary_examples.append(f"[{sent['sent_id']}] {ch}")
# Pass 3: Validate
errors = validate_sentence(syls, tags)
n_validation_errors += len(errors)
if errors:
print(f" WARN [{sent['sent_id']}]: {'; '.join(errors)}")
sent["tags"] = tags
total_syllables_after = sum(len(s["syllables"]) for s in sentences)
total_words_after = sum(
sum(1 for t in s["tags"] if t == "B-W") for s in sentences
)
print(f" Cross-boundary splits: {n_cross_boundary:,}")
print(f" Long token splits: {n_split_long:,}")
print(f" Compound merges: {n_merge:,}")
print(f" Foreign word splits: {n_foreign:,}")
print(f" Name boundary splits: {n_name_boundary:,}")
print(f" Validation errors: {n_validation_errors:,}")
print(f" Words: {total_words_before:,} → {total_words_after:,} "
f"(Δ{total_words_after - total_words_before:+,})")
assert total_syllables_before == total_syllables_after, \
f"Syllable count changed: {total_syllables_before} → {total_syllables_after}"
print(f" Syllables: {total_syllables_before:,} (unchanged)")
if not dry_run:
write_bio_file(sentences, filepath)
print(f" Written: {filepath}")
# Also regenerate CoNLL-U
conllu_path = filepath.replace(".txt", ".conllu")
write_conllu(sentences, conllu_path)
print(f" Written: {conllu_path}")
stats = {
"n_cross_boundary": n_cross_boundary,
"n_split_long": n_split_long,
"n_merge": n_merge,
"n_foreign": n_foreign,
"n_name_boundary": n_name_boundary,
"n_validation_errors": n_validation_errors,
"cross_boundary_examples": cross_boundary_examples,
"split_long_examples": split_long_examples,
"foreign_examples": foreign_examples,
"name_boundary_examples": name_boundary_examples,
"merge_term_counts": merge_term_counts,
"words_before": total_words_before,
"words_after": total_words_after,
}
return sentences, stats
def main():
parser = argparse.ArgumentParser(
description="Fix known word segmentation errors in UDD-1.1 BIO files."
)
parser.add_argument(
"--dry-run", action="store_true",
help="Report changes without modifying files"
)
args = parser.parse_args()
base_dir = dirname(dirname(__file__))
bio_files = [
join(base_dir, f"udd-ws-v1.1-{split}.txt")
for split in ("train", "dev", "test")
]
# Check all files exist
for path in bio_files:
if not isfile(path):
print(f"ERROR: {path} not found", file=sys.stderr)
sys.exit(1)
if args.dry_run:
print("=== DRY RUN — no files will be modified ===")
# Phase 1: Parse all files
all_sentences_by_file = {}
for path in bio_files:
print(f"Loading {path}...")
all_sentences_by_file[path] = parse_bio_file(path)
print(f" {len(all_sentences_by_file[path]):,} sentences")
# Phase 2: Build vocab from all sentences
all_sents = [s for sents in all_sentences_by_file.values() for s in sents]
vocab = build_split_vocab(all_sents)
print(f"\nBuilt split vocab: {len(vocab):,} entries "
f"(2-4 syllable words with count >= 5)")
viet_syllables = build_viet_syllables(all_sents)
print(f"Built Vietnamese syllable set: {len(viet_syllables):,} entries "
f"(syllables with count >= 50)")
# Phase 3: Process each file
all_stats = {}
for path in bio_files:
fname = path.rsplit("/", 1)[-1]
_, stats = process_file(
path,
vocab=vocab,
viet_syllables=viet_syllables,
sentences=all_sentences_by_file[path],
dry_run=args.dry_run,
)
all_stats[fname] = stats
# Generate report
report_path = join(base_dir, "WS_FIX_REPORT.md")
if not args.dry_run:
generate_report(all_stats, report_path)
else:
report = generate_report(all_stats)
print("\n" + report)
# Final summary
total_splits = sum(s["n_cross_boundary"] for s in all_stats.values())
total_long = sum(s["n_split_long"] for s in all_stats.values())
total_merges = sum(s["n_merge"] for s in all_stats.values())
total_foreign = sum(s.get("n_foreign", 0) for s in all_stats.values())
total_name_boundary = sum(s.get("n_name_boundary", 0) for s in all_stats.values())
total_errors = sum(s["n_validation_errors"] for s in all_stats.values())
print(f"\n{'='*50}")
print(f"TOTAL: {total_splits:,} cross-boundary splits, "
f"{total_long:,} long token splits, "
f"{total_merges:,} compound merges, "
f"{total_foreign:,} foreign word splits, "
f"{total_name_boundary:,} name boundary splits, "
f"{total_errors:,} validation errors")
if args.dry_run:
print("(dry run — no files modified)")
if __name__ == "__main__":
main()
|