Spaces:
Sleeping
Sleeping
File size: 12,056 Bytes
dbaeeae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 |
#!/usr/bin/env python3
"""
LLM Fallback Router Integration Example
This example demonstrates how to integrate the LLMFallbackRouter
with the existing VoucherBot system as a fallback for the regex-based router.
Usage:
python llm_fallback_router_example.py
"""
import os
import json
from dotenv import load_dotenv
from llm_fallback_router import LLMFallbackRouter, InvalidInputError, LLMProcessingError, InvalidLLMResponseError
# Import existing components
from agent_setup import initialize_caseworker_agent
from enhanced_semantic_router_v2 import EnhancedSemanticRouterV2, Intent
# Load environment variables
load_dotenv()
class MockLLMClient:
"""
Mock LLM client for demonstration purposes.
In a real implementation, this would be replaced with actual LLM clients
like OpenAI, Anthropic, or the Gemini client used in the project.
"""
def __init__(self):
self.call_count = 0
def generate(self, prompt: str) -> str:
"""
Generate a mock response based on the prompt content.
In production, this would make actual API calls to an LLM.
"""
self.call_count += 1
# Extract the message from the prompt
message_start = prompt.find('Message: "') + 10
message_end = prompt.find('"', message_start)
message = prompt[message_start:message_end] if message_start > 9 else ""
# Simple rule-based mock responses
message_lower = message.lower()
if any(word in message_lower for word in ["find", "search", "look for", "apartment", "listing"]):
return json.dumps({
"intent": "SEARCH_LISTINGS",
"parameters": {
"borough": "Brooklyn" if "brooklyn" in message_lower or "bk" in message_lower else None,
"bedrooms": 2 if "2" in message or "two" in message_lower else None,
"max_rent": 3000 if "$3000" in message or "3000" in message else None,
"voucher_type": "Section 8" if "section" in message_lower else None
},
"reasoning": "User is looking for apartment listings with specified criteria"
})
elif any(word in message_lower for word in ["what about", "try", "instead", "change"]):
return json.dumps({
"intent": "REFINE_SEARCH",
"parameters": {
"borough": "Queens" if "queens" in message_lower else None
},
"reasoning": "User wants to modify their existing search parameters"
})
elif any(word in message_lower for word in ["violation", "safe", "building", "inspect"]):
return json.dumps({
"intent": "CHECK_VIOLATIONS",
"parameters": {},
"reasoning": "User wants to check building safety violations"
})
elif any(word in message_lower for word in ["help", "assist", "what can you do"]):
return json.dumps({
"intent": "HELP_REQUEST",
"parameters": {},
"reasoning": "User is requesting help or information about available features"
})
else:
return json.dumps({
"intent": "UNKNOWN",
"parameters": {},
"reasoning": "Unable to determine user intent from the message"
})
class TwoTierSemanticRouter:
"""
Combined router that uses regex-based routing first, then falls back to LLM.
This demonstrates the two-tier architecture mentioned in the specification.
"""
def __init__(self, llm_client=None, debug=False):
# Initialize the regex-based router (V2)
self.regex_router = EnhancedSemanticRouterV2()
# Initialize the LLM fallback router
if llm_client is None:
llm_client = MockLLMClient()
self.llm_router = LLMFallbackRouter(llm_client, debug=debug)
self.debug = debug
def route(self, message: str, context: dict = None) -> dict:
"""
Route a message using the two-tier system.
Args:
message: User message to route
context: Optional context dictionary with conversation state
Returns:
Dictionary with routing results including:
- intent: Classified intent
- parameters: Extracted parameters
- reasoning: Explanation of the classification
- router_used: Which router was used ("regex" or "llm")
- confidence: Confidence level (if available)
"""
if self.debug:
print(f"\n🔍 Routing message: '{message}'")
# Step 1: Try regex-based routing first
try:
regex_intent = self.regex_router.classify_intent(message, context)
regex_params = self.regex_router.extract_parameters(message)
# Check if regex router was successful
if regex_intent != Intent.UNCLASSIFIED and (regex_params or regex_intent in [Intent.SHOW_HELP, Intent.CHECK_VIOLATIONS]):
if self.debug:
print("✅ Regex router succeeded")
return {
"intent": regex_intent.value,
"parameters": regex_params,
"reasoning": f"Classified by regex patterns as {regex_intent.value}",
"router_used": "regex",
"confidence": 0.95 # Regex patterns are highly confident when they match
}
except Exception as e:
if self.debug:
print(f"⚠️ Regex router failed: {e}")
# Step 2: Fall back to LLM router
if self.debug:
print("🧠 Falling back to LLM router")
try:
# Convert context to string format for LLM
context_str = None
if context:
context_str = f"Previous search: {json.dumps(context)}"
llm_result = self.llm_router.route(message, context_str)
llm_result["router_used"] = "llm"
llm_result["confidence"] = 0.8 # LLM results are generally less confident
if self.debug:
print("✅ LLM router succeeded")
return llm_result
except (InvalidInputError, LLMProcessingError, InvalidLLMResponseError) as e:
if self.debug:
print(f"❌ LLM router failed: {e}")
# Both routers failed - return unknown intent
return {
"intent": "UNKNOWN",
"parameters": {},
"reasoning": f"Both regex and LLM routers failed. Error: {e}",
"router_used": "none",
"confidence": 0.0
}
def demonstrate_integration():
"""Demonstrate the LLM Fallback Router integration."""
print("🏠 VoucherBot LLM Fallback Router Integration Demo")
print("=" * 60)
# Initialize the two-tier router
mock_llm = MockLLMClient()
router = TwoTierSemanticRouter(mock_llm, debug=True)
# Test cases that demonstrate fallback behavior
test_cases = [
# Cases that should work with regex router
{
"message": "Find apartments in Brooklyn with 2 bedrooms",
"context": None,
"expected_router": "regex"
},
{
"message": "Show me help",
"context": None,
"expected_router": "regex"
},
# Cases that should fall back to LLM
{
"message": "I'm looking for a place but not sure where to start",
"context": None,
"expected_router": "llm"
},
{
"message": "¿Dónde puedo encontrar apartamentos?", # Spanish
"context": None,
"expected_router": "llm"
},
{
"message": "What about trying somewhere else?",
"context": {"borough": "Brooklyn", "bedrooms": 2},
"expected_router": "llm"
},
# Edge cases
{
"message": "yo wassup", # Very informal
"context": None,
"expected_router": "llm"
}
]
print("\n📋 Running Test Cases:")
print("-" * 40)
for i, test_case in enumerate(test_cases, 1):
print(f"\n{i}. Testing: '{test_case['message']}'")
result = router.route(test_case["message"], test_case["context"])
print(f" Intent: {result['intent']}")
print(f" Router Used: {result['router_used']}")
print(f" Confidence: {result['confidence']}")
print(f" Parameters: {result['parameters']}")
print(f" Reasoning: {result['reasoning']}")
# Verify expected router was used
if result['router_used'] == test_case['expected_router']:
print(" ✅ Expected router used")
else:
print(f" ⚠️ Expected {test_case['expected_router']}, got {result['router_used']}")
def demonstrate_real_integration():
"""
Demonstrate how this would integrate with the actual VoucherBot system.
"""
print("\n\n🔧 Real Integration Example")
print("=" * 40)
# This is how you would integrate with the actual system
print("Integration points:")
print("1. Replace MockLLMClient with actual Gemini client from agent_setup.py")
print("2. Integrate TwoTierSemanticRouter into email_handler.py")
print("3. Update app.py to use the new router for message classification")
# Example integration code
integration_code = '''
# In email_handler.py - replace the current classification logic
from llm_fallback_router import LLMFallbackRouter
from agent_setup import initialize_caseworker_agent
# Initialize LLM client (use the same one from agent_setup)
caseworker_agent = initialize_caseworker_agent()
llm_client = caseworker_agent.model # Extract the model
# Create the two-tier router
two_tier_router = TwoTierSemanticRouter(llm_client)
# Use in classification
def enhanced_classify_message(message: str, context: dict = None) -> str:
result = two_tier_router.route(message, context)
return result["intent"]
'''
print("\nExample integration code:")
print(integration_code)
def demonstrate_error_handling():
"""Demonstrate robust error handling."""
print("\n\n🛡️ Error Handling Demo")
print("=" * 30)
# Create router with a failing LLM client
class FailingLLMClient:
def generate(self, prompt):
raise Exception("API timeout")
failing_router = TwoTierSemanticRouter(FailingLLMClient(), debug=True)
# Test error handling
test_messages = [
"", # Empty message
"x" * 1001, # Too long message
"Normal message" # Should fall back gracefully
]
for message in test_messages:
print(f"\nTesting error handling for: '{message[:20]}{'...' if len(message) > 20 else ''}'")
try:
result = failing_router.route(message)
print(f"Result: {result['intent']} (Router: {result['router_used']})")
except Exception as e:
print(f"Error handled: {e}")
if __name__ == "__main__":
# Run all demonstrations
demonstrate_integration()
demonstrate_real_integration()
demonstrate_error_handling()
print("\n\n🎯 Summary")
print("=" * 20)
print("✅ LLMFallbackRouter successfully created")
print("✅ Two-tier routing system demonstrated")
print("✅ Error handling validated")
print("✅ Integration path defined")
print("\nThe LLMFallbackRouter is ready for integration into VoucherBot!") |