File size: 4,660 Bytes
86509f9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
"""
Example: Using the Insurance AI Reliability Benchmark evaluator as a library.

This script creates sample benchmark data and predictions in memory,
runs the evaluation, and prints the results. No files needed.
"""

from evaluate import BenchmarkEvaluator, BenchmarkItem, Prediction


def main() -> None:
    # -- 1. Define benchmark items (ground truth) --
    benchmark = [
        BenchmarkItem(
            id="claim_intake_001",
            expected_intent="file_claim",
            expected_routing="ai_handle",
            expected_actions=["collect_accident_details", "verify_policy", "assign_adjuster"],
            category="claims",
            difficulty="easy",
        ),
        BenchmarkItem(
            id="claim_intake_002",
            expected_intent="file_claim",
            expected_routing="human_escalate",
            expected_actions=["collect_accident_details", "verify_policy", "flag_fraud_review"],
            category="claims",
            difficulty="hard",
        ),
        BenchmarkItem(
            id="policy_change_001",
            expected_intent="modify_policy",
            expected_routing="ai_handle",
            expected_actions=["lookup_policy", "update_coverage", "send_confirmation"],
            category="policy",
            difficulty="easy",
        ),
        BenchmarkItem(
            id="policy_change_002",
            expected_intent="cancel_policy",
            expected_routing="human_escalate",
            expected_actions=["lookup_policy", "calculate_refund", "schedule_callback"],
            category="policy",
            difficulty="medium",
        ),
        BenchmarkItem(
            id="billing_001",
            expected_intent="payment_inquiry",
            expected_routing="ai_handle",
            expected_actions=["lookup_account", "retrieve_balance", "explain_charges"],
            category="billing",
            difficulty="easy",
        ),
    ]

    # -- 2. Create predictions (simulating an AI agent's output) --
    predictions = [
        # Perfect match.
        Prediction(
            id="claim_intake_001",
            predicted_intent="file_claim",
            predicted_routing="ai_handle",
            predicted_actions=["collect_accident_details", "verify_policy", "assign_adjuster"],
        ),
        # Wrong routing, missing one action, one extra action.
        Prediction(
            id="claim_intake_002",
            predicted_intent="file_claim",
            predicted_routing="ai_handle",  # should be human_escalate
            predicted_actions=["collect_accident_details", "verify_policy", "notify_agent"],
        ),
        # Correct intent and routing, partial action overlap.
        Prediction(
            id="policy_change_001",
            predicted_intent="modify_policy",
            predicted_routing="ai_handle",
            predicted_actions=["lookup_policy", "update_coverage"],  # missing send_confirmation
        ),
        # Wrong intent, correct routing.
        Prediction(
            id="policy_change_002",
            predicted_intent="modify_policy",  # should be cancel_policy
            predicted_routing="human_escalate",
            predicted_actions=["lookup_policy", "calculate_refund", "schedule_callback"],
        ),
        # Perfect match.
        Prediction(
            id="billing_001",
            predicted_intent="payment_inquiry",
            predicted_routing="ai_handle",
            predicted_actions=["lookup_account", "retrieve_balance", "explain_charges"],
        ),
    ]

    # -- 3. Run evaluation --
    evaluator = BenchmarkEvaluator(benchmark)
    results = evaluator.evaluate(predictions)

    # -- 4. Print summary --
    print(results.summary())

    # -- 5. Inspect individual scores --
    print("\nPer-item details:")
    for score in results.item_scores:
        print(
            f"  {score.id:25s}  "
            f"intent={'OK' if score.intent_correct else 'MISS':4s}  "
            f"routing={'OK' if score.routing_correct else 'MISS':4s}  "
            f"actions={score.action_completeness:.2f}"
        )

    # -- 6. Access metrics programmatically --
    print(f"\nComposite reliability score: {results.overall.composite_score:.2%}")
    print(f"Items with perfect intent:   {sum(s.intent_correct for s in results.item_scores)}/{results.overall.count}")

    if results.missing_predictions:
        print(f"\nWarning: {len(results.missing_predictions)} benchmark items had no prediction.")
    if results.extra_predictions:
        print(f"\nWarning: {len(results.extra_predictions)} predictions had no matching benchmark item.")


if __name__ == "__main__":
    main()