pashas's picture
Upload folder using huggingface_hub
86509f9 verified
"""
Example: Using the Insurance AI Reliability Benchmark evaluator as a library.
This script creates sample benchmark data and predictions in memory,
runs the evaluation, and prints the results. No files needed.
"""
from evaluate import BenchmarkEvaluator, BenchmarkItem, Prediction
def main() -> None:
# -- 1. Define benchmark items (ground truth) --
benchmark = [
BenchmarkItem(
id="claim_intake_001",
expected_intent="file_claim",
expected_routing="ai_handle",
expected_actions=["collect_accident_details", "verify_policy", "assign_adjuster"],
category="claims",
difficulty="easy",
),
BenchmarkItem(
id="claim_intake_002",
expected_intent="file_claim",
expected_routing="human_escalate",
expected_actions=["collect_accident_details", "verify_policy", "flag_fraud_review"],
category="claims",
difficulty="hard",
),
BenchmarkItem(
id="policy_change_001",
expected_intent="modify_policy",
expected_routing="ai_handle",
expected_actions=["lookup_policy", "update_coverage", "send_confirmation"],
category="policy",
difficulty="easy",
),
BenchmarkItem(
id="policy_change_002",
expected_intent="cancel_policy",
expected_routing="human_escalate",
expected_actions=["lookup_policy", "calculate_refund", "schedule_callback"],
category="policy",
difficulty="medium",
),
BenchmarkItem(
id="billing_001",
expected_intent="payment_inquiry",
expected_routing="ai_handle",
expected_actions=["lookup_account", "retrieve_balance", "explain_charges"],
category="billing",
difficulty="easy",
),
]
# -- 2. Create predictions (simulating an AI agent's output) --
predictions = [
# Perfect match.
Prediction(
id="claim_intake_001",
predicted_intent="file_claim",
predicted_routing="ai_handle",
predicted_actions=["collect_accident_details", "verify_policy", "assign_adjuster"],
),
# Wrong routing, missing one action, one extra action.
Prediction(
id="claim_intake_002",
predicted_intent="file_claim",
predicted_routing="ai_handle", # should be human_escalate
predicted_actions=["collect_accident_details", "verify_policy", "notify_agent"],
),
# Correct intent and routing, partial action overlap.
Prediction(
id="policy_change_001",
predicted_intent="modify_policy",
predicted_routing="ai_handle",
predicted_actions=["lookup_policy", "update_coverage"], # missing send_confirmation
),
# Wrong intent, correct routing.
Prediction(
id="policy_change_002",
predicted_intent="modify_policy", # should be cancel_policy
predicted_routing="human_escalate",
predicted_actions=["lookup_policy", "calculate_refund", "schedule_callback"],
),
# Perfect match.
Prediction(
id="billing_001",
predicted_intent="payment_inquiry",
predicted_routing="ai_handle",
predicted_actions=["lookup_account", "retrieve_balance", "explain_charges"],
),
]
# -- 3. Run evaluation --
evaluator = BenchmarkEvaluator(benchmark)
results = evaluator.evaluate(predictions)
# -- 4. Print summary --
print(results.summary())
# -- 5. Inspect individual scores --
print("\nPer-item details:")
for score in results.item_scores:
print(
f" {score.id:25s} "
f"intent={'OK' if score.intent_correct else 'MISS':4s} "
f"routing={'OK' if score.routing_correct else 'MISS':4s} "
f"actions={score.action_completeness:.2f}"
)
# -- 6. Access metrics programmatically --
print(f"\nComposite reliability score: {results.overall.composite_score:.2%}")
print(f"Items with perfect intent: {sum(s.intent_correct for s in results.item_scores)}/{results.overall.count}")
if results.missing_predictions:
print(f"\nWarning: {len(results.missing_predictions)} benchmark items had no prediction.")
if results.extra_predictions:
print(f"\nWarning: {len(results.extra_predictions)} predictions had no matching benchmark item.")
if __name__ == "__main__":
main()