File size: 8,197 Bytes
a143e9d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
746c06c
 
 
 
 
a143e9d
746c06c
a143e9d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
746c06c
 
a143e9d
 
 
 
746c06c
 
 
 
 
 
 
 
 
 
a143e9d
 
 
746c06c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a143e9d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
"""LLM-based explanation generator for claim decisions.

This module provides AI-assisted explanations for claim decisions.
The LLM is NON-AUTHORITATIVE and only generates explanations.
All decisions are made by rule-based logic.
"""

import os
from typing import Dict, Any, Optional
from utils.logger import logger


class LLMExplainer:
    """Generate professional insurance explanations using LLM."""
    
    # LLM Configuration (as per requirements)
    TEMPERATURE = 0.2  # Deterministic
    MAX_TOKENS = 200
    TIMEOUT_SECONDS = 5
    
    def __init__(self):
        """Initialize LLM explainer with API configuration."""
        self.api_key = os.getenv("OPENAI_API_KEY") or os.getenv("HF_TOKEN")
        self.model_available = self.api_key is not None
        
        # Debug logging to diagnose API key loading
        if self.api_key:
            key_preview = self.api_key[:10] + "..." if len(self.api_key) > 10 else "[short]"
            logger.log_execution_step("LLM_INIT", f"API key found: {key_preview}")
        else:
            logger.log_execution_step("LLM_INIT", "No API key found - will use fallback explanations")
            logger.log_execution_step("LLM_INIT", f"Checked env vars: OPENAI_API_KEY={os.getenv('OPENAI_API_KEY') is not None}, HF_TOKEN={os.getenv('HF_TOKEN') is not None}")
    
    def generate_explanation(
        self,
        claim_data: Dict[str, Any],
        severity: str,
        matched_rules: list,
        confidence: float
    ) -> tuple[str, bool]:
        """
        Generate professional explanation for claim decision.
        
        Args:
            claim_data: Claim information
            severity: Final severity (APPROVED/REJECTED/REVIEW)
            matched_rules: List of rules that were applied
            confidence: Decision confidence score
            
        Returns:
            Tuple of (explanation_text, llm_used)
        """
        # Try LLM explanation first
        if self.model_available:
            try:
                explanation = self._call_llm(claim_data, severity, matched_rules, confidence)
                if explanation:
                    logger.log_execution_step("LLM_EXPLANATION", "Generated successfully")
                    return explanation, True
            except Exception as e:
                logger.log_error(e, "LLM_EXPLANATION_FAILED")
        
        # Fallback to rule-based explanation
        logger.log_execution_step("FALLBACK_EXPLANATION", "Using rule-based explanation")
        return self._generate_fallback_explanation(claim_data, severity, matched_rules, confidence), False
    
    def _call_llm(
        self,
        claim_data: Dict[str, Any],
        severity: str,
        matched_rules: list,
        confidence: float
    ) -> Optional[str]:
        """
        Call LLM API to generate explanation.
        
        GOVERNED: LLM is NON-AUTHORITATIVE - only generates explanations.
        All decisions are made by rule-based logic.
        """
        # Build prompt
        prompt = self._build_prompt(claim_data, severity, matched_rules, confidence)
        
        try:
            # Try OpenAI API
            if self.api_key and self.api_key.startswith('sk-'):
                return self._call_openai(prompt)
            # Try Hugging Face API
            elif self.api_key:
                return self._call_huggingface(prompt)
        except Exception as e:
            logger.log_error(e, "LLM_API_CALL_FAILED")
            return None
        
        return None
    
    def _call_openai(self, prompt: str) -> Optional[str]:
        """Call OpenAI API for explanation generation."""
        try:
            import openai
            from openai import OpenAI
            
            client = OpenAI(api_key=self.api_key, timeout=self.TIMEOUT_SECONDS)
            
            response = client.chat.completions.create(
                model="gpt-3.5-turbo",
                messages=[
                    {"role": "system", "content": "You are an insurance claims processor. Provide factual, professional explanations only. Never make decisions."},
                    {"role": "user", "content": prompt}
                ],
                temperature=self.TEMPERATURE,
                max_tokens=self.MAX_TOKENS
            )
            
            explanation = response.choices[0].message.content.strip()
            logger.log_execution_step("OPENAI_CALL", f"Model: {response.model}, Tokens: {response.usage.total_tokens}")
            
            return explanation
            
        except ImportError:
            logger.log_error(Exception("openai package not installed"), "OPENAI_IMPORT")
            return None
        except Exception as e:
            logger.log_error(e, "OPENAI_API_ERROR")
            return None
    
    def _call_huggingface(self, prompt: str) -> Optional[str]:
        """Call Hugging Face Inference API for explanation generation."""
        try:
            import requests
            
            API_URL = "https://api-inference.huggingface.co/models/gpt2"
            headers = {"Authorization": f"Bearer {self.api_key}"}
            
            payload = {
                "inputs": prompt,
                "parameters": {
                    "max_new_tokens": self.MAX_TOKENS,
                    "temperature": self.TEMPERATURE,
                    "return_full_text": False
                }
            }
            
            response = requests.post(
                API_URL,
                headers=headers,
                json=payload,
                timeout=self.TIMEOUT_SECONDS
            )
            
            if response.status_code == 200:
                result = response.json()
                if isinstance(result, list) and len(result) > 0:
                    explanation = result[0].get('generated_text', '').strip()
                    logger.log_execution_step("HF_CALL", "Inference API call successful")
                    return explanation
            
            logger.log_error(Exception(f"HF API returned {response.status_code}"), "HF_API_ERROR")
            return None
            
        except Exception as e:
            logger.log_error(e, "HF_API_ERROR")
            return None
    
    def _build_prompt(
        self,
        claim_data: Dict[str, Any],
        severity: str,
        matched_rules: list,
        confidence: float
    ) -> str:
        """Build LLM prompt for explanation generation."""
        claim_type = claim_data.get('claim_type', 'unknown')
        claim_amount = claim_data.get('claim_amount', 0)
        
        prompt = f"""You are an insurance claims processor. Explain why this {claim_type} claim was classified as {severity}.

Claim Details:
- Type: {claim_type}
- Amount: ${claim_amount:,.2f}
- Decision: {severity}
- Confidence: {confidence:.1%}

Applied Rules:
{chr(10).join(f'- {rule}' for rule in matched_rules)}

Provide a professional, factual explanation in 2-3 sentences. No emojis. No speculation.
Focus on the rules that were applied and why they led to this decision."""
        
        return prompt
    
    def _generate_fallback_explanation(
        self,
        claim_data: Dict[str, Any],
        severity: str,
        matched_rules: list,
        confidence: float
    ) -> str:
        """Generate rule-based explanation when LLM is unavailable."""
        claim_type = claim_data.get('claim_type', 'unknown')
        claim_amount = claim_data.get('claim_amount', 0)
        
        parts = [f"This {claim_type} claim for ${claim_amount:,.2f} was classified as {severity}."]
        
        if matched_rules:
            parts.append(f"Decision based on: {', '.join(matched_rules)}.")
        
        if severity == "APPROVED":
            parts.append("All validation checks passed and no fraud indicators were detected.")
        elif severity == "REJECTED":
            parts.append("The claim failed critical validation requirements.")
        elif severity == "REVIEW":
            parts.append("Manual review is required due to complexity or risk factors.")
        
        parts.append(f"Decision confidence: {confidence:.1%}.")
        
        return " ".join(parts)