File size: 3,160 Bytes
9ec538b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
"""Evaluate sentence segmentation (sent_tokenize) against test cases."""
import json
import argparse
from underthesea import sent_tokenize as underthesea_sent_tokenize


def evaluate(test_cases_path: str, verbose: bool = False, improved: bool = False):
    if improved:
        from sent_tokenize import sent_tokenize
    else:
        sent_tokenize = underthesea_sent_tokenize
    with open(test_cases_path, "r", encoding="utf-8") as f:
        test_cases = json.load(f)

    total = len(test_cases)
    correct = 0
    incorrect = 0
    by_category = {}
    failures = []

    for tc in test_cases:
        input_text = tc["input"]
        expected = tc["expected"]
        category = tc["category"]

        actual = sent_tokenize(input_text)
        is_correct = actual == expected

        if category not in by_category:
            by_category[category] = {"total": 0, "correct": 0}
        by_category[category]["total"] += 1

        if is_correct:
            correct += 1
            by_category[category]["correct"] += 1
        else:
            incorrect += 1
            failures.append(
                {
                    "id": tc["id"],
                    "category": category,
                    "input": input_text,
                    "expected": expected,
                    "actual": actual,
                }
            )

    # Print report
    print("=" * 60)
    label = "IMPROVED (trained Punkt)" if improved else "BASELINE (underthesea)"
    print(f"SENTENCE SEGMENTATION EVALUATION - {label}")
    print("=" * 60)
    print(f"\nTotal: {total}  Correct: {correct}  Incorrect: {incorrect}")
    print(f"Accuracy: {100 * correct / total:.1f}%")
    print()
    print(f"{'Category':<25} {'Total':>6} {'Correct':>8} {'Acc':>7}")
    print("-" * 48)
    for cat in sorted(by_category):
        stats = by_category[cat]
        acc = 100 * stats["correct"] / stats["total"]
        print(f"{cat:<25} {stats['total']:>6} {stats['correct']:>8} {acc:>6.1f}%")

    if verbose and failures:
        print(f"\n{'='*60}")
        print(f"FAILURES ({len(failures)})")
        print("=" * 60)
        for f in failures:
            print(f"\n[{f['id']}] {f['category']}")
            print(f"  Input:    {f['input'][:100]}...")
            print(f"  Expected: {[s[:60] for s in f['expected']]}")
            print(f"  Actual:   {[s[:60] for s in f['actual']]}")

    return {
        "total": total,
        "correct": correct,
        "incorrect": incorrect,
        "accuracy": correct / total,
        "by_category": by_category,
        "failures": failures,
    }


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--test-cases",
        default="test_cases.json",
        help="Path to test cases JSON file",
    )
    parser.add_argument(
        "-v", "--verbose", action="store_true", help="Show failure details"
    )
    parser.add_argument(
        "--improved",
        action="store_true",
        help="Use trained Punkt model instead of underthesea default",
    )
    args = parser.parse_args()
    evaluate(args.test_cases, verbose=args.verbose, improved=args.improved)