| | """ |
| | Example: Using the Insurance AI Reliability Benchmark evaluator as a library. |
| | |
| | This script creates sample benchmark data and predictions in memory, |
| | runs the evaluation, and prints the results. No files needed. |
| | """ |
| |
|
| | from evaluate import BenchmarkEvaluator, BenchmarkItem, Prediction |
| |
|
| |
|
| | def main() -> None: |
| | |
| | benchmark = [ |
| | BenchmarkItem( |
| | id="claim_intake_001", |
| | expected_intent="file_claim", |
| | expected_routing="ai_handle", |
| | expected_actions=["collect_accident_details", "verify_policy", "assign_adjuster"], |
| | category="claims", |
| | difficulty="easy", |
| | ), |
| | BenchmarkItem( |
| | id="claim_intake_002", |
| | expected_intent="file_claim", |
| | expected_routing="human_escalate", |
| | expected_actions=["collect_accident_details", "verify_policy", "flag_fraud_review"], |
| | category="claims", |
| | difficulty="hard", |
| | ), |
| | BenchmarkItem( |
| | id="policy_change_001", |
| | expected_intent="modify_policy", |
| | expected_routing="ai_handle", |
| | expected_actions=["lookup_policy", "update_coverage", "send_confirmation"], |
| | category="policy", |
| | difficulty="easy", |
| | ), |
| | BenchmarkItem( |
| | id="policy_change_002", |
| | expected_intent="cancel_policy", |
| | expected_routing="human_escalate", |
| | expected_actions=["lookup_policy", "calculate_refund", "schedule_callback"], |
| | category="policy", |
| | difficulty="medium", |
| | ), |
| | BenchmarkItem( |
| | id="billing_001", |
| | expected_intent="payment_inquiry", |
| | expected_routing="ai_handle", |
| | expected_actions=["lookup_account", "retrieve_balance", "explain_charges"], |
| | category="billing", |
| | difficulty="easy", |
| | ), |
| | ] |
| |
|
| | |
| | predictions = [ |
| | |
| | Prediction( |
| | id="claim_intake_001", |
| | predicted_intent="file_claim", |
| | predicted_routing="ai_handle", |
| | predicted_actions=["collect_accident_details", "verify_policy", "assign_adjuster"], |
| | ), |
| | |
| | Prediction( |
| | id="claim_intake_002", |
| | predicted_intent="file_claim", |
| | predicted_routing="ai_handle", |
| | predicted_actions=["collect_accident_details", "verify_policy", "notify_agent"], |
| | ), |
| | |
| | Prediction( |
| | id="policy_change_001", |
| | predicted_intent="modify_policy", |
| | predicted_routing="ai_handle", |
| | predicted_actions=["lookup_policy", "update_coverage"], |
| | ), |
| | |
| | Prediction( |
| | id="policy_change_002", |
| | predicted_intent="modify_policy", |
| | predicted_routing="human_escalate", |
| | predicted_actions=["lookup_policy", "calculate_refund", "schedule_callback"], |
| | ), |
| | |
| | Prediction( |
| | id="billing_001", |
| | predicted_intent="payment_inquiry", |
| | predicted_routing="ai_handle", |
| | predicted_actions=["lookup_account", "retrieve_balance", "explain_charges"], |
| | ), |
| | ] |
| |
|
| | |
| | evaluator = BenchmarkEvaluator(benchmark) |
| | results = evaluator.evaluate(predictions) |
| |
|
| | |
| | print(results.summary()) |
| |
|
| | |
| | print("\nPer-item details:") |
| | for score in results.item_scores: |
| | print( |
| | f" {score.id:25s} " |
| | f"intent={'OK' if score.intent_correct else 'MISS':4s} " |
| | f"routing={'OK' if score.routing_correct else 'MISS':4s} " |
| | f"actions={score.action_completeness:.2f}" |
| | ) |
| |
|
| | |
| | print(f"\nComposite reliability score: {results.overall.composite_score:.2%}") |
| | print(f"Items with perfect intent: {sum(s.intent_correct for s in results.item_scores)}/{results.overall.count}") |
| |
|
| | if results.missing_predictions: |
| | print(f"\nWarning: {len(results.missing_predictions)} benchmark items had no prediction.") |
| | if results.extra_predictions: |
| | print(f"\nWarning: {len(results.extra_predictions)} predictions had no matching benchmark item.") |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|