| """Evaluate CRF word segmentation against gold annotations. |
| |
| Compares silver (CRF) predictions from udd-ws-v1.1-{dev,test}.txt against |
| gold corrections in gold_ws_cycle1.txt. Reports Word F1/Precision/Recall, |
| per-domain breakdown, and detailed error analysis. |
| |
| With --model, uses the CRF model to predict directly on gold sentences |
| (instead of reading from silver files). This is needed when gold has been |
| merged into silver files. |
| |
| Usage: |
| python src/eval_ws_gold.py |
| python src/eval_ws_gold.py --model path/to/model.crfsuite |
| """ |
|
|
| import argparse |
| import sys |
| from collections import Counter, defaultdict |
| from pathlib import Path |
|
|
|
|
| def parse_bio_file(path): |
| """Parse BIO file into dict of {sent_id: [(syllable, tag), ...]}.""" |
| sentences = {} |
| current_id = None |
| current_tokens = [] |
|
|
| with open(path, "r", encoding="utf-8") as f: |
| for line in f: |
| line = line.rstrip("\n") |
| if line.startswith("# sent_id = "): |
| if current_id and current_tokens: |
| sentences[current_id] = current_tokens |
| current_id = line.split("= ", 1)[1] |
| current_tokens = [] |
| elif line.startswith("# text = "): |
| continue |
| elif line.strip() == "": |
| if current_id and current_tokens: |
| sentences[current_id] = current_tokens |
| current_id = None |
| current_tokens = [] |
| elif "\t" in line: |
| parts = line.split("\t") |
| if len(parts) >= 2: |
| current_tokens.append((parts[0], parts[1])) |
|
|
| if current_id and current_tokens: |
| sentences[current_id] = current_tokens |
|
|
| return sentences |
|
|
|
|
| def bio_to_words(tokens): |
| """Convert BIO token list to list of word strings.""" |
| words = [] |
| current = [] |
| for syl, tag in tokens: |
| if tag == "B-W": |
| if current: |
| words.append("_".join(current)) |
| current = [syl] |
| elif tag == "I-W": |
| current.append(syl) |
| if current: |
| words.append("_".join(current)) |
| return words |
|
|
|
|
| def bio_to_word_spans(tokens): |
| """Convert BIO tokens to word spans as (start_idx, end_idx) tuples.""" |
| spans = [] |
| start = 0 |
| for i, (syl, tag) in enumerate(tokens): |
| if tag == "B-W" and i > 0: |
| spans.append((start, i)) |
| start = i |
| spans.append((start, len(tokens))) |
| return spans |
|
|
|
|
| def get_domain(sent_id): |
| """Extract domain from sent_id prefix.""" |
| if sent_id.startswith("vlc-"): |
| return "legal" |
| elif sent_id.startswith("uvn-"): |
| return "news" |
| elif sent_id.startswith("uvw-"): |
| return "wikipedia" |
| elif sent_id.startswith("uvb-f-"): |
| return "fiction" |
| elif sent_id.startswith("uvb-n-"): |
| return "non-fiction" |
| return "unknown" |
|
|
|
|
| def compute_word_metrics(silver_words, gold_words): |
| """Compute word-level precision, recall, F1. |
| |
| Uses multiset intersection (same as CoNLL WS eval). |
| """ |
| silver_counter = Counter(silver_words) |
| gold_counter = Counter(gold_words) |
|
|
| |
| tp = sum((silver_counter & gold_counter).values()) |
| pred_total = len(silver_words) |
| gold_total = len(gold_words) |
|
|
| precision = tp / pred_total if pred_total > 0 else 0 |
| recall = tp / gold_total if gold_total > 0 else 0 |
| f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0 |
|
|
| return precision, recall, f1, tp, pred_total, gold_total |
|
|
|
|
| def compute_boundary_metrics(silver_tokens, gold_tokens): |
| """Compute boundary-level (syllable tag) accuracy.""" |
| assert len(silver_tokens) == len(gold_tokens), \ |
| f"Length mismatch: {len(silver_tokens)} vs {len(gold_tokens)}" |
|
|
| correct = 0 |
| total = len(silver_tokens) |
| changes = {"B→I": 0, "I→B": 0} |
|
|
| for (s_syl, s_tag), (g_syl, g_tag) in zip(silver_tokens, gold_tokens): |
| if s_tag == g_tag: |
| correct += 1 |
| else: |
| key = f"{s_tag[0]}→{g_tag[0]}" |
| changes[key] = changes.get(key, 0) + 1 |
|
|
| accuracy = correct / total if total > 0 else 0 |
| return accuracy, correct, total, changes |
|
|
|
|
| def find_differences(sent_id, silver_tokens, gold_tokens): |
| """Find specific word segmentation differences between silver and gold.""" |
| diffs = [] |
| silver_words = bio_to_words(silver_tokens) |
| gold_words = bio_to_words(gold_tokens) |
|
|
| |
| silver_spans = bio_to_word_spans(silver_tokens) |
| gold_spans = bio_to_word_spans(gold_tokens) |
|
|
| if silver_spans == gold_spans: |
| return [] |
|
|
| |
| silver_set = set(silver_spans) |
| gold_set = set(gold_spans) |
|
|
| only_silver = silver_set - gold_set |
| only_gold = gold_set - silver_set |
|
|
| |
| def span_to_word(tokens, start, end): |
| return "_".join(t[0] for t in tokens[start:end]) |
|
|
| for s in sorted(only_silver): |
| word = span_to_word(silver_tokens, s[0], s[1]) |
| diffs.append(("silver", s, word)) |
|
|
| for g in sorted(only_gold): |
| word = span_to_word(gold_tokens, g[0], g[1]) |
| diffs.append(("gold", g, word)) |
|
|
| return diffs |
|
|
|
|
| def classify_error(silver_word, gold_words_at_pos): |
| """Classify error type: over-merge, over-split, boundary-shift.""" |
| s_parts = silver_word.split("_") |
| if len(s_parts) > 1 and all(len(g.split("_")) < len(s_parts) for g in gold_words_at_pos): |
| return "over-merge" |
| if len(s_parts) < max(len(g.split("_")) for g in gold_words_at_pos): |
| return "over-split" |
| return "boundary-shift" |
|
|
|
|
| def predict_with_model(model_path, gold): |
| """Use CRF model to predict on gold sentence syllables. |
| |
| Returns dict of {sent_id: [(syllable, tag), ...]}. |
| """ |
| import pycrfsuite |
| |
| sys.path.insert(0, str(Path(__file__).parent)) |
| from al_score_ws import extract_syllable_features, load_dictionary |
|
|
| model_dir = model_path.parent |
| dict_path = model_dir / "dictionary.txt" |
|
|
| tagger = pycrfsuite.Tagger() |
| tagger.open(str(model_path)) |
| print(f"Model loaded: {model_path}") |
|
|
| dictionary = None |
| if dict_path.exists(): |
| dictionary = load_dictionary(dict_path) |
| print(f"Dictionary loaded: {len(dictionary)} entries") |
|
|
| tag_map = {"B": "B-W", "I": "I-W"} |
| predictions = {} |
| for sid, tokens in gold.items(): |
| syllables = [t[0] for t in tokens] |
| |
| xseq = [ |
| [f"{k}={v}" for k, v in extract_syllable_features(syllables, i, dictionary).items()] |
| for i in range(len(syllables)) |
| ] |
| pred_tags = tagger.tag(xseq) |
| predictions[sid] = [ |
| (syl, tag_map.get(tag, tag)) for syl, tag in zip(syllables, pred_tags) |
| ] |
|
|
| return predictions |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser(description="Evaluate WS against gold") |
| parser.add_argument("--model", type=str, default=None, |
| help="CRF model path for direct prediction") |
| args = parser.parse_args() |
|
|
| base = Path("/home/claude-code/projects/workspace_underthesea/UDD-1") |
| gold_path = base / "gold_ws_cycle1.txt" |
|
|
| |
| gold = parse_bio_file(gold_path) |
| print(f"Gold sentences: {len(gold)}") |
|
|
| if args.model: |
| |
| model_path = Path(args.model) |
| if not model_path.exists(): |
| |
| tree1_models = base.parent / "tree-1" / "models" / "word_segmentation" |
| model_dirs = sorted(tree1_models.glob("udd_ws_v1_1-*")) |
| if model_dirs: |
| model_path = model_dirs[-1] / "model.crfsuite" |
| silver = predict_with_model(model_path, gold) |
| print(f"CRF predictions: {len(silver)} sentences") |
| else: |
| |
| silver_dev = parse_bio_file(base / "udd-ws-v1.1-dev.txt") |
| silver_test = parse_bio_file(base / "udd-ws-v1.1-test.txt") |
| silver = {**silver_dev, **silver_test} |
| print(f"Silver sentences loaded: {len(silver_dev)} dev + {len(silver_test)} test") |
|
|
| |
| matched = [] |
| missing = [] |
| for sid in gold: |
| if sid in silver: |
| matched.append(sid) |
| else: |
| missing.append(sid) |
|
|
| print(f"Matched: {len(matched)}, Missing in silver: {len(missing)}") |
| if missing: |
| print(f" Missing: {missing}") |
|
|
| |
| total_tp = total_pred = total_gold = 0 |
| total_syl_correct = total_syl = 0 |
| all_changes = Counter() |
| domain_stats = defaultdict(lambda: {"tp": 0, "pred": 0, "gold": 0, "syl_correct": 0, "syl_total": 0, "n": 0}) |
| all_diffs = [] |
| error_types = Counter() |
|
|
| for sid in matched: |
| s_tokens = silver[sid] |
| g_tokens = gold[sid] |
|
|
| |
| s_syls = [t[0] for t in s_tokens] |
| g_syls = [t[0] for t in g_tokens] |
| if s_syls != g_syls: |
| print(f" WARNING: syllable mismatch in {sid}") |
| print(f" Silver: {' '.join(s_syls[:10])}...") |
| print(f" Gold: {' '.join(g_syls[:10])}...") |
| continue |
|
|
| |
| s_words = bio_to_words(s_tokens) |
| g_words = bio_to_words(g_tokens) |
| p, r, f1, tp, pred, gtotal = compute_word_metrics(s_words, g_words) |
| total_tp += tp |
| total_pred += pred |
| total_gold += gtotal |
|
|
| |
| acc, correct, total, changes = compute_boundary_metrics(s_tokens, g_tokens) |
| total_syl_correct += correct |
| total_syl += total |
| all_changes.update(changes) |
|
|
| |
| domain = get_domain(sid) |
| ds = domain_stats[domain] |
| ds["tp"] += tp |
| ds["pred"] += pred |
| ds["gold"] += gtotal |
| ds["syl_correct"] += correct |
| ds["syl_total"] += total |
| ds["n"] += 1 |
|
|
| |
| diffs = find_differences(sid, s_tokens, g_tokens) |
| if diffs: |
| all_diffs.append((sid, domain, diffs, s_tokens, g_tokens)) |
|
|
| |
| print("\n" + "=" * 60) |
| print("EVALUATION: CRF Silver vs Gold (Cycle 1)") |
| print("=" * 60) |
|
|
| |
| overall_p = total_tp / total_pred if total_pred else 0 |
| overall_r = total_tp / total_gold if total_gold else 0 |
| overall_f1 = 2 * overall_p * overall_r / (overall_p + overall_r) if (overall_p + overall_r) else 0 |
| syl_acc = total_syl_correct / total_syl if total_syl else 0 |
|
|
| print(f"\n## Overall ({len(matched)} sentences)") |
| print(f" Syllable Accuracy: {syl_acc:.4f} ({total_syl_correct}/{total_syl})") |
| print(f" Word Precision: {overall_p:.4f}") |
| print(f" Word Recall: {overall_r:.4f}") |
| print(f" Word F1: {overall_f1:.4f}") |
| print(f" Boundary changes: {dict(all_changes)}") |
| print(f" B→I (over-merge in silver): {all_changes.get('B→I', 0)}") |
| print(f" I→B (over-split in silver): {all_changes.get('I→B', 0)}") |
|
|
| |
| print(f"\n## Per-Domain Breakdown") |
| print(f" {'Domain':<14} {'N':>4} {'Syl Acc':>8} {'P':>7} {'R':>7} {'F1':>7}") |
| print(f" {'-'*14} {'-'*4} {'-'*8} {'-'*7} {'-'*7} {'-'*7}") |
| for domain in ["legal", "news", "wikipedia", "fiction", "non-fiction"]: |
| ds = domain_stats[domain] |
| if ds["n"] == 0: |
| continue |
| dp = ds["tp"] / ds["pred"] if ds["pred"] else 0 |
| dr = ds["tp"] / ds["gold"] if ds["gold"] else 0 |
| df1 = 2 * dp * dr / (dp + dr) if (dp + dr) else 0 |
| dacc = ds["syl_correct"] / ds["syl_total"] if ds["syl_total"] else 0 |
| print(f" {domain:<14} {ds['n']:>4} {dacc:>8.4f} {dp:>7.4f} {dr:>7.4f} {df1:>7.4f}") |
|
|
| |
| print(f"\n## Error Analysis ({len(all_diffs)} sentences with differences)") |
|
|
| merge_errors = [] |
| split_errors = [] |
|
|
| for sid, domain, diffs, s_tokens, g_tokens in all_diffs: |
| s_spans = set(bio_to_word_spans(s_tokens)) |
| g_spans = set(bio_to_word_spans(g_tokens)) |
|
|
| only_silver = s_spans - g_spans |
| only_gold = g_spans - s_spans |
|
|
| def span_word(tokens, s, e): |
| return "_".join(t[0] for t in tokens[s:e]) |
|
|
| for span in only_silver: |
| word = span_word(s_tokens, span[0], span[1]) |
| n_syls = span[1] - span[0] |
| |
| overlapping_gold = [g for g in only_gold if g[0] < span[1] and g[1] > span[0]] |
| if overlapping_gold and n_syls > 1: |
| gold_words = [span_word(g_tokens, g[0], g[1]) for g in overlapping_gold] |
| merge_errors.append((sid, domain, word, gold_words)) |
|
|
| for span in only_gold: |
| word = span_word(g_tokens, span[0], span[1]) |
| n_syls = span[1] - span[0] |
| overlapping_silver = [s for s in only_silver if s[0] < span[1] and s[1] > span[0]] |
| if overlapping_silver and n_syls > 1: |
| silver_words = [span_word(s_tokens, s[0], s[1]) for s in overlapping_silver] |
| split_errors.append((sid, domain, word, silver_words)) |
|
|
| print(f"\n### Over-merge errors (silver merged what gold splits): {len(merge_errors)}") |
| for sid, domain, silver_word, gold_words in sorted(merge_errors, key=lambda x: x[1]): |
| print(f" [{domain:>12}] {sid}: {silver_word} → {' | '.join(gold_words)}") |
|
|
| print(f"\n### Over-split errors (silver split what gold merges): {len(split_errors)}") |
| for sid, domain, gold_word, silver_words in sorted(split_errors, key=lambda x: x[1]): |
| print(f" [{domain:>12}] {sid}: {' | '.join(silver_words)} → {gold_word}") |
|
|
| |
| print(f"\n## All Differences (sentence-level)") |
| for sid, domain, diffs, s_tokens, g_tokens in sorted(all_diffs, key=lambda x: x[1]): |
| s_words = bio_to_words(s_tokens) |
| g_words = bio_to_words(g_tokens) |
| s_set = set(s_words) |
| g_set = set(g_words) |
|
|
| s_only = Counter(s_words) - Counter(g_words) |
| g_only = Counter(g_words) - Counter(s_words) |
| if s_only or g_only: |
| print(f"\n [{domain}] {sid}") |
| if s_only: |
| print(f" Silver only: {dict(s_only)}") |
| if g_only: |
| print(f" Gold only: {dict(g_only)}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|