ClaimsGPT / utils /llm_explainer.py
Bader Alabddan
Add debug logging for API key detection
746c06c
"""LLM-based explanation generator for claim decisions.
This module provides AI-assisted explanations for claim decisions.
The LLM is NON-AUTHORITATIVE and only generates explanations.
All decisions are made by rule-based logic.
"""
import os
from typing import Dict, Any, Optional
from utils.logger import logger
class LLMExplainer:
"""Generate professional insurance explanations using LLM."""
# LLM Configuration (as per requirements)
TEMPERATURE = 0.2 # Deterministic
MAX_TOKENS = 200
TIMEOUT_SECONDS = 5
def __init__(self):
"""Initialize LLM explainer with API configuration."""
self.api_key = os.getenv("OPENAI_API_KEY") or os.getenv("HF_TOKEN")
self.model_available = self.api_key is not None
# Debug logging to diagnose API key loading
if self.api_key:
key_preview = self.api_key[:10] + "..." if len(self.api_key) > 10 else "[short]"
logger.log_execution_step("LLM_INIT", f"API key found: {key_preview}")
else:
logger.log_execution_step("LLM_INIT", "No API key found - will use fallback explanations")
logger.log_execution_step("LLM_INIT", f"Checked env vars: OPENAI_API_KEY={os.getenv('OPENAI_API_KEY') is not None}, HF_TOKEN={os.getenv('HF_TOKEN') is not None}")
def generate_explanation(
self,
claim_data: Dict[str, Any],
severity: str,
matched_rules: list,
confidence: float
) -> tuple[str, bool]:
"""
Generate professional explanation for claim decision.
Args:
claim_data: Claim information
severity: Final severity (APPROVED/REJECTED/REVIEW)
matched_rules: List of rules that were applied
confidence: Decision confidence score
Returns:
Tuple of (explanation_text, llm_used)
"""
# Try LLM explanation first
if self.model_available:
try:
explanation = self._call_llm(claim_data, severity, matched_rules, confidence)
if explanation:
logger.log_execution_step("LLM_EXPLANATION", "Generated successfully")
return explanation, True
except Exception as e:
logger.log_error(e, "LLM_EXPLANATION_FAILED")
# Fallback to rule-based explanation
logger.log_execution_step("FALLBACK_EXPLANATION", "Using rule-based explanation")
return self._generate_fallback_explanation(claim_data, severity, matched_rules, confidence), False
def _call_llm(
self,
claim_data: Dict[str, Any],
severity: str,
matched_rules: list,
confidence: float
) -> Optional[str]:
"""
Call LLM API to generate explanation.
GOVERNED: LLM is NON-AUTHORITATIVE - only generates explanations.
All decisions are made by rule-based logic.
"""
# Build prompt
prompt = self._build_prompt(claim_data, severity, matched_rules, confidence)
try:
# Try OpenAI API
if self.api_key and self.api_key.startswith('sk-'):
return self._call_openai(prompt)
# Try Hugging Face API
elif self.api_key:
return self._call_huggingface(prompt)
except Exception as e:
logger.log_error(e, "LLM_API_CALL_FAILED")
return None
return None
def _call_openai(self, prompt: str) -> Optional[str]:
"""Call OpenAI API for explanation generation."""
try:
import openai
from openai import OpenAI
client = OpenAI(api_key=self.api_key, timeout=self.TIMEOUT_SECONDS)
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are an insurance claims processor. Provide factual, professional explanations only. Never make decisions."},
{"role": "user", "content": prompt}
],
temperature=self.TEMPERATURE,
max_tokens=self.MAX_TOKENS
)
explanation = response.choices[0].message.content.strip()
logger.log_execution_step("OPENAI_CALL", f"Model: {response.model}, Tokens: {response.usage.total_tokens}")
return explanation
except ImportError:
logger.log_error(Exception("openai package not installed"), "OPENAI_IMPORT")
return None
except Exception as e:
logger.log_error(e, "OPENAI_API_ERROR")
return None
def _call_huggingface(self, prompt: str) -> Optional[str]:
"""Call Hugging Face Inference API for explanation generation."""
try:
import requests
API_URL = "https://api-inference.huggingface.co/models/gpt2"
headers = {"Authorization": f"Bearer {self.api_key}"}
payload = {
"inputs": prompt,
"parameters": {
"max_new_tokens": self.MAX_TOKENS,
"temperature": self.TEMPERATURE,
"return_full_text": False
}
}
response = requests.post(
API_URL,
headers=headers,
json=payload,
timeout=self.TIMEOUT_SECONDS
)
if response.status_code == 200:
result = response.json()
if isinstance(result, list) and len(result) > 0:
explanation = result[0].get('generated_text', '').strip()
logger.log_execution_step("HF_CALL", "Inference API call successful")
return explanation
logger.log_error(Exception(f"HF API returned {response.status_code}"), "HF_API_ERROR")
return None
except Exception as e:
logger.log_error(e, "HF_API_ERROR")
return None
def _build_prompt(
self,
claim_data: Dict[str, Any],
severity: str,
matched_rules: list,
confidence: float
) -> str:
"""Build LLM prompt for explanation generation."""
claim_type = claim_data.get('claim_type', 'unknown')
claim_amount = claim_data.get('claim_amount', 0)
prompt = f"""You are an insurance claims processor. Explain why this {claim_type} claim was classified as {severity}.
Claim Details:
- Type: {claim_type}
- Amount: ${claim_amount:,.2f}
- Decision: {severity}
- Confidence: {confidence:.1%}
Applied Rules:
{chr(10).join(f'- {rule}' for rule in matched_rules)}
Provide a professional, factual explanation in 2-3 sentences. No emojis. No speculation.
Focus on the rules that were applied and why they led to this decision."""
return prompt
def _generate_fallback_explanation(
self,
claim_data: Dict[str, Any],
severity: str,
matched_rules: list,
confidence: float
) -> str:
"""Generate rule-based explanation when LLM is unavailable."""
claim_type = claim_data.get('claim_type', 'unknown')
claim_amount = claim_data.get('claim_amount', 0)
parts = [f"This {claim_type} claim for ${claim_amount:,.2f} was classified as {severity}."]
if matched_rules:
parts.append(f"Decision based on: {', '.join(matched_rules)}.")
if severity == "APPROVED":
parts.append("All validation checks passed and no fraud indicators were detected.")
elif severity == "REJECTED":
parts.append("The claim failed critical validation requirements.")
elif severity == "REVIEW":
parts.append("Manual review is required due to complexity or risk factors.")
parts.append(f"Decision confidence: {confidence:.1%}.")
return " ".join(parts)