pythonprincess commited on
Commit
278dad3
Β·
verified Β·
1 Parent(s): f8aa31c

Upload orchestrator.py

Browse files
Files changed (1) hide show
  1. app/orchestrator.py +1631 -0
app/orchestrator.py ADDED
@@ -0,0 +1,1631 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ 🎭 PENNY Orchestrator - Request Routing & Coordination Engine
3
+
4
+ This is Penny's decision-making brain. She analyzes each request, determines
5
+ the best way to help, and coordinates between her specialized AI models and
6
+ civic data tools.
7
+
8
+ MISSION: Route every resident request to the right resource while maintaining
9
+ Penny's warm, helpful personality and ensuring fast, accurate responses.
10
+
11
+ FEATURES:
12
+ - Enhanced intent classification with confidence scoring
13
+ - Compound intent handling (weather + events)
14
+ - Graceful fallbacks when services are unavailable
15
+ - Performance tracking for all operations
16
+ - Context-aware responses
17
+ - Emergency routing with immediate escalation
18
+
19
+ ENHANCEMENTS (Phase 1):
20
+ - βœ… Structured logging with performance tracking
21
+ - βœ… Safe imports with availability flags
22
+ - βœ… Result format checking helper
23
+ - βœ… Enhanced error handling patterns
24
+ - βœ… Service availability tracking
25
+ - βœ… Fixed function signature mismatches
26
+ - βœ… Integration with enhanced modules
27
+ """
28
+
29
+ import logging
30
+ import time
31
+ from typing import Dict, Any, Optional, List, Tuple
32
+ from datetime import datetime
33
+ from dataclasses import dataclass, field
34
+ from enum import Enum
35
+
36
+ # --- ENHANCED MODULE IMPORTS ---
37
+ from app.intents import classify_intent_detailed, IntentType, IntentMatch
38
+ from app.location_utils import (
39
+ extract_location_detailed,
40
+ LocationMatch,
41
+ LocationStatus,
42
+ get_city_coordinates
43
+ )
44
+ from app.logging_utils import (
45
+ log_interaction,
46
+ sanitize_for_logging,
47
+ LogLevel
48
+ )
49
+
50
+ # --- AGENT IMPORTS (with availability tracking) ---
51
+ try:
52
+ from app.weather_agent import (
53
+ get_weather_for_location,
54
+ recommend_outfit,
55
+ weather_to_event_recommendations,
56
+ format_weather_summary
57
+ )
58
+ WEATHER_AGENT_AVAILABLE = True
59
+ except ImportError as e:
60
+ logger = logging.getLogger(__name__)
61
+ logger.warning(f"Weather agent not available: {e}")
62
+ WEATHER_AGENT_AVAILABLE = False
63
+
64
+ try:
65
+ from app.event_weather import get_event_recommendations_with_weather
66
+ EVENT_WEATHER_AVAILABLE = True
67
+ except ImportError as e:
68
+ logger = logging.getLogger(__name__)
69
+ logger.warning(f"Event weather integration not available: {e}")
70
+ EVENT_WEATHER_AVAILABLE = False
71
+
72
+ try:
73
+ from app.tool_agent import handle_tool_request
74
+ TOOL_AGENT_AVAILABLE = True
75
+ except ImportError as e:
76
+ logger = logging.getLogger(__name__)
77
+ logger.warning(f"Tool agent not available: {e}")
78
+ TOOL_AGENT_AVAILABLE = False
79
+
80
+ # --- MODEL IMPORTS (with availability tracking) ---
81
+ try:
82
+ from models.translation.translation_utils import translate_text
83
+ TRANSLATION_AVAILABLE = True
84
+ except ImportError as e:
85
+ logger = logging.getLogger(__name__)
86
+ logger.warning(f"Translation service not available: {e}")
87
+ TRANSLATION_AVAILABLE = False
88
+
89
+ try:
90
+ from models.sentiment.sentiment_utils import get_sentiment_analysis
91
+ SENTIMENT_AVAILABLE = True
92
+ except ImportError as e:
93
+ logger = logging.getLogger(__name__)
94
+ logger.warning(f"Sentiment service not available: {e}")
95
+ SENTIMENT_AVAILABLE = False
96
+
97
+ try:
98
+ from models.bias.bias_utils import check_bias
99
+ BIAS_AVAILABLE = True
100
+ except ImportError as e:
101
+ logger = logging.getLogger(__name__)
102
+ logger.warning(f"Bias detection service not available: {e}")
103
+ BIAS_AVAILABLE = False
104
+
105
+ try:
106
+ from models.gemma.gemma_utils import generate_response
107
+ LLM_AVAILABLE = True
108
+ except ImportError as e:
109
+ logger = logging.getLogger(__name__)
110
+ logger.warning(f"LLM service not available: {e}")
111
+ LLM_AVAILABLE = False
112
+
113
+ # --- LOGGING SETUP ---
114
+ logger = logging.getLogger(__name__)
115
+
116
+ # --- CONFIGURATION ---
117
+ CORE_MODEL_ID = "penny-core-agent"
118
+ MAX_RESPONSE_TIME_MS = 5000 # 5 seconds - log if exceeded
119
+
120
+ # --- TRACKING COUNTERS ---
121
+ _orchestration_count = 0
122
+ _emergency_count = 0
123
+
124
+
125
+ # ============================================================
126
+ # COMPATIBILITY HELPER - Result Format Checking
127
+ # ============================================================
128
+
129
+ def _check_result_success(
130
+ result: Dict[str, Any],
131
+ expected_keys: List[str]
132
+ ) -> Tuple[bool, Optional[str]]:
133
+ """
134
+ βœ… Check if a utility function result indicates success.
135
+
136
+ Handles multiple return format patterns:
137
+ - Explicit "success" key (preferred)
138
+ - Presence of expected data keys (implicit success)
139
+ - Presence of "error" key (explicit failure)
140
+
141
+ This helper fixes compatibility issues where different utility
142
+ functions return different result formats.
143
+
144
+ Args:
145
+ result: Dictionary returned from utility function
146
+ expected_keys: List of keys that indicate successful data
147
+
148
+ Returns:
149
+ Tuple of (is_success, error_message)
150
+
151
+ Example:
152
+ result = await translate_text(message, "en", "es")
153
+ success, error = _check_result_success(result, ["translated_text"])
154
+ if success:
155
+ text = result.get("translated_text")
156
+ """
157
+ # Check for explicit success key
158
+ if "success" in result:
159
+ return result["success"], result.get("error")
160
+
161
+ # Check for explicit error (presence = failure)
162
+ if "error" in result and result["error"]:
163
+ return False, result["error"]
164
+
165
+ # Check for expected data keys (implicit success)
166
+ has_data = any(key in result for key in expected_keys)
167
+ if has_data:
168
+ return True, None
169
+
170
+ # Unknown format - assume failure
171
+ return False, "Unexpected response format"
172
+
173
+
174
+ # ============================================================
175
+ # SERVICE AVAILABILITY CHECK
176
+ # ============================================================
177
+
178
+ def get_service_availability() -> Dict[str, bool]:
179
+ """
180
+ πŸ“Š Returns which services are currently available.
181
+
182
+ Used for health checks, debugging, and deciding whether
183
+ to attempt service calls or use fallbacks.
184
+
185
+ Returns:
186
+ Dictionary mapping service names to availability status
187
+ """
188
+ return {
189
+ "translation": TRANSLATION_AVAILABLE,
190
+ "sentiment": SENTIMENT_AVAILABLE,
191
+ "bias_detection": BIAS_AVAILABLE,
192
+ "llm": LLM_AVAILABLE,
193
+ "tool_agent": TOOL_AGENT_AVAILABLE,
194
+ "weather": WEATHER_AGENT_AVAILABLE,
195
+ "event_weather": EVENT_WEATHER_AVAILABLE
196
+ }
197
+
198
+
199
+ # ============================================================
200
+ # ORCHESTRATION RESULT STRUCTURE
201
+ # ============================================================
202
+
203
+ @dataclass
204
+ class OrchestrationResult:
205
+ """
206
+ πŸ“¦ Structured result from orchestration pipeline.
207
+
208
+ This format is used throughout the system for consistency
209
+ and makes it easy to track what happened during request processing.
210
+ """
211
+ intent: str # Detected intent
212
+ reply: str # User-facing response
213
+ success: bool # Whether request succeeded
214
+ tenant_id: Optional[str] = None # City/location identifier
215
+ data: Optional[Dict[str, Any]] = None # Raw data from services
216
+ model_id: Optional[str] = None # Which model/service was used
217
+ error: Optional[str] = None # Error message if failed
218
+ response_time_ms: Optional[float] = None
219
+ confidence: Optional[float] = None # Intent confidence score
220
+ fallback_used: bool = False # True if fallback logic triggered
221
+
222
+ def to_dict(self) -> Dict[str, Any]:
223
+ """Converts to dictionary for API responses."""
224
+ return {
225
+ "intent": self.intent,
226
+ "reply": self.reply,
227
+ "success": self.success,
228
+ "tenant_id": self.tenant_id,
229
+ "data": self.data,
230
+ "model_id": self.model_id,
231
+ "error": self.error,
232
+ "response_time_ms": self.response_time_ms,
233
+ "confidence": self.confidence,
234
+ "fallback_used": self.fallback_used
235
+ }
236
+
237
+
238
+ # ============================================================
239
+ # MAIN ORCHESTRATOR FUNCTION (ENHANCED)
240
+ # ============================================================
241
+
242
+ async def run_orchestrator(
243
+ message: str,
244
+ context: Dict[str, Any] = None
245
+ ) -> Dict[str, Any]:
246
+ """
247
+ 🧠 Main decision-making brain of Penny.
248
+
249
+ This function:
250
+ 1. Analyzes the user's message to determine intent
251
+ 2. Extracts location/city information
252
+ 3. Routes to the appropriate specialized service
253
+ 4. Handles errors gracefully with helpful fallbacks
254
+ 5. Tracks performance and logs the interaction
255
+
256
+ Args:
257
+ message: User's input text
258
+ context: Additional context (tenant_id, lat, lon, session_id, etc.)
259
+
260
+ Returns:
261
+ Dictionary with response and metadata
262
+
263
+ Example:
264
+ result = await run_orchestrator(
265
+ message="What's the weather in Atlanta?",
266
+ context={"lat": 33.7490, "lon": -84.3880}
267
+ )
268
+ """
269
+ global _orchestration_count
270
+ _orchestration_count += 1
271
+
272
+ start_time = time.time()
273
+
274
+ # Initialize context if not provided
275
+ if context is None:
276
+ context = {}
277
+
278
+ # Sanitize message for logging (PII protection)
279
+ safe_message = sanitize_for_logging(message)
280
+ logger.info(f"🎭 Orchestrator processing: '{safe_message[:50]}...'")
281
+
282
+ try:
283
+ # === STEP 1: CLASSIFY INTENT (Enhanced) ===
284
+ intent_result = classify_intent_detailed(message)
285
+ intent = intent_result.intent
286
+ confidence = intent_result.confidence
287
+
288
+ logger.info(
289
+ f"Intent detected: {intent.value} "
290
+ f"(confidence: {confidence:.2f})"
291
+ )
292
+
293
+ # === STEP 2: EXTRACT LOCATION ===
294
+ tenant_id = context.get("tenant_id")
295
+ lat = context.get("lat")
296
+ lon = context.get("lon")
297
+
298
+ # If tenant_id not provided, try to extract from message
299
+ if not tenant_id or tenant_id == "unknown":
300
+ location_result = extract_location_detailed(message)
301
+
302
+ if location_result.status == LocationStatus.FOUND:
303
+ tenant_id = location_result.tenant_id
304
+ logger.info(f"Location extracted: {tenant_id}")
305
+
306
+ # Get coordinates for this tenant if available
307
+ coords = get_city_coordinates(tenant_id)
308
+ if coords and lat is None and lon is None:
309
+ lat, lon = coords["lat"], coords["lon"]
310
+ logger.info(f"Coordinates loaded: {lat}, {lon}")
311
+
312
+ elif location_result.status == LocationStatus.USER_LOCATION_NEEDED:
313
+ logger.info("User location services needed")
314
+ else:
315
+ logger.info(f"No location detected: {location_result.status}")
316
+
317
+ # === STEP 3: HANDLE EMERGENCY INTENTS (CRITICAL) ===
318
+ if intent == IntentType.EMERGENCY:
319
+ result = await _handle_emergency(
320
+ message=message,
321
+ context=context,
322
+ start_time=start_time
323
+ )
324
+ # Set confidence and metadata before returning
325
+ result.confidence = confidence
326
+ result.tenant_id = tenant_id
327
+ response_time = (time.time() - start_time) * 1000
328
+ result.response_time_ms = round(response_time, 2)
329
+ return result.to_dict()
330
+
331
+ # === STEP 4: ROUTE TO APPROPRIATE HANDLER ===
332
+
333
+ # Translation
334
+ if intent == IntentType.TRANSLATION:
335
+ result = await _handle_translation(message, context)
336
+
337
+ # Sentiment Analysis
338
+ elif intent == IntentType.SENTIMENT_ANALYSIS:
339
+ result = await _handle_sentiment(message, context)
340
+
341
+ # Bias Detection
342
+ elif intent == IntentType.BIAS_DETECTION:
343
+ result = await _handle_bias(message, context)
344
+
345
+ # Document Processing
346
+ elif intent == IntentType.DOCUMENT_PROCESSING:
347
+ result = await _handle_document(message, context)
348
+
349
+ # Weather (includes compound weather+events handling)
350
+ elif intent == IntentType.WEATHER:
351
+ result = await _handle_weather(
352
+ message=message,
353
+ context=context,
354
+ tenant_id=tenant_id,
355
+ lat=lat,
356
+ lon=lon,
357
+ intent_result=intent_result
358
+ )
359
+
360
+ # Events
361
+ elif intent == IntentType.EVENTS:
362
+ result = await _handle_events(
363
+ message=message,
364
+ context=context,
365
+ tenant_id=tenant_id,
366
+ lat=lat,
367
+ lon=lon,
368
+ intent_result=intent_result
369
+ )
370
+
371
+ # Government & Officials
372
+ elif intent == IntentType.GOVERNMENT:
373
+ result = await _handle_government(
374
+ message=message,
375
+ context=context,
376
+ tenant_id=tenant_id
377
+ )
378
+
379
+ # Local Resources
380
+ elif intent == IntentType.LOCAL_RESOURCES:
381
+ result = await _handle_local_resources(
382
+ message=message,
383
+ context=context,
384
+ tenant_id=tenant_id,
385
+ lat=lat,
386
+ lon=lon
387
+ )
388
+
389
+ # Greeting, Help, Unknown
390
+ elif intent in [IntentType.GREETING, IntentType.HELP, IntentType.UNKNOWN]:
391
+ result = await _handle_conversational(
392
+ message=message,
393
+ intent=intent,
394
+ context=context
395
+ )
396
+
397
+ else:
398
+ # Unhandled intent type (shouldn't happen, but safety net)
399
+ result = await _handle_fallback(message, intent, context)
400
+
401
+ # === STEP 5: ADD METADATA & LOG INTERACTION ===
402
+ response_time = (time.time() - start_time) * 1000
403
+ result.response_time_ms = round(response_time, 2)
404
+ result.confidence = confidence
405
+ result.tenant_id = tenant_id
406
+
407
+ # Log the interaction with structured logging
408
+ log_interaction(
409
+ tenant_id=tenant_id or "unknown",
410
+ interaction_type="orchestration",
411
+ intent=intent.value,
412
+ response_time_ms=response_time,
413
+ success=result.success,
414
+ metadata={
415
+ "confidence": confidence,
416
+ "fallback_used": result.fallback_used,
417
+ "model_id": result.model_id,
418
+ "orchestration_count": _orchestration_count
419
+ }
420
+ )
421
+
422
+ # Log slow responses
423
+ if response_time > MAX_RESPONSE_TIME_MS:
424
+ logger.warning(
425
+ f"⚠️ Slow response: {response_time:.0f}ms "
426
+ f"(intent: {intent.value})"
427
+ )
428
+
429
+ logger.info(
430
+ f"βœ… Orchestration complete: {intent.value} "
431
+ f"({response_time:.0f}ms)"
432
+ )
433
+
434
+ return result.to_dict()
435
+
436
+ except Exception as e:
437
+ # === CATASTROPHIC FAILURE HANDLER ===
438
+ response_time = (time.time() - start_time) * 1000
439
+ logger.error(
440
+ f"❌ Orchestrator error: {e} "
441
+ f"(response_time: {response_time:.0f}ms)",
442
+ exc_info=True
443
+ )
444
+
445
+ # Log failed interaction
446
+ log_interaction(
447
+ tenant_id=context.get("tenant_id", "unknown"),
448
+ interaction_type="orchestration_error",
449
+ intent="error",
450
+ response_time_ms=response_time,
451
+ success=False,
452
+ metadata={
453
+ "error": str(e),
454
+ "error_type": type(e).__name__
455
+ }
456
+ )
457
+
458
+ error_result = OrchestrationResult(
459
+ intent="error",
460
+ reply=(
461
+ "I'm having trouble processing your request right now. "
462
+ "Please try again in a moment, or let me know if you need "
463
+ "immediate assistance! πŸ’›"
464
+ ),
465
+ success=False,
466
+ error=str(e),
467
+ model_id="orchestrator",
468
+ fallback_used=True,
469
+ response_time_ms=round(response_time, 2)
470
+ )
471
+
472
+ return error_result.to_dict()
473
+
474
+
475
+ # ============================================================
476
+ # SPECIALIZED INTENT HANDLERS (ENHANCED)
477
+ # ============================================================
478
+
479
+ async def _handle_emergency(
480
+ message: str,
481
+ context: Dict[str, Any],
482
+ start_time: float
483
+ ) -> OrchestrationResult:
484
+ """
485
+ 🚨 CRITICAL: Emergency intent handler.
486
+
487
+ This function handles crisis situations with immediate routing
488
+ to appropriate services. All emergency interactions are logged
489
+ for compliance and safety tracking.
490
+
491
+ IMPORTANT: This is a compliance-critical function. All emergency
492
+ interactions must be logged and handled with priority.
493
+ """
494
+ global _emergency_count
495
+ _emergency_count += 1
496
+
497
+ # Sanitize message for logging (but keep full context for safety review)
498
+ safe_message = sanitize_for_logging(message)
499
+ logger.warning(f"🚨 EMERGENCY INTENT DETECTED (#{_emergency_count}): {safe_message[:100]}")
500
+
501
+ # TODO: Integrate with safety_utils.py when enhanced
502
+ # from app.safety_utils import route_emergency
503
+ # result = await route_emergency(message, context)
504
+
505
+ # Provide crisis resources with city-specific CSB info if available
506
+ reply = (
507
+ "🚨 **Oh honey, if this is a life-threatening emergency, please call 911 immediately!**\n\n"
508
+ "**For crisis support:**\n"
509
+ "β€’ **National Suicide Prevention Lifeline:** 988\n"
510
+ "β€’ **Crisis Text Line:** Text HOME to 741741\n"
511
+ "β€’ **National Domestic Violence Hotline:** 1-800-799-7233\n\n"
512
+ )
513
+
514
+ # Try to add city-specific behavioral health resources
515
+ tenant_id = context.get("tenant_id")
516
+ if tenant_id:
517
+ try:
518
+ from app.location_utils import load_city_resources
519
+ city_data = load_city_resources(tenant_id)
520
+ behavioral_health = city_data.get("services", {}).get("behavioral_health", {})
521
+ resources = behavioral_health.get("resources", [])
522
+
523
+ if resources:
524
+ reply += f"**Local crisis resources in your area:**\n"
525
+ # Show first 2 crisis resources
526
+ crisis_resources = [r for r in resources if "crisis" in r.get("name", "").lower() or "988" in r.get("phone", "")][:2]
527
+ for resource in crisis_resources:
528
+ name = resource.get("name", "")
529
+ phone = resource.get("phone", "")
530
+ if name and phone:
531
+ reply += f"β€’ **{name}:** {phone}\n"
532
+
533
+ # Check for CSB
534
+ csb_resource = next((r for r in resources if "csb" in r.get("name", "").lower() or "community services" in r.get("name", "").lower()), None)
535
+ if csb_resource:
536
+ reply += f"β€’ **{csb_resource.get('name', 'Community Services Board')}:** {csb_resource.get('phone', 'Check website')}\n"
537
+
538
+ reply += "\n"
539
+ except Exception as e:
540
+ logger.debug(f"Could not load city-specific behavioral health resources: {e}")
541
+
542
+ reply += (
543
+ "I'm here to help connect you with local resources, sugar. "
544
+ "What kind of support do you need right now?"
545
+ )
546
+
547
+ # Log emergency interaction for compliance (CRITICAL)
548
+ response_time = (time.time() - start_time) * 1000
549
+ log_interaction(
550
+ tenant_id=context.get("tenant_id", "emergency"),
551
+ interaction_type="emergency",
552
+ intent=IntentType.EMERGENCY.value,
553
+ response_time_ms=response_time,
554
+ success=True,
555
+ metadata={
556
+ "emergency_number": _emergency_count,
557
+ "message_length": len(message),
558
+ "timestamp": datetime.now().isoformat(),
559
+ "action": "crisis_resources_provided"
560
+ }
561
+ )
562
+
563
+ logger.critical(
564
+ f"EMERGENCY LOG #{_emergency_count}: Resources provided "
565
+ f"({response_time:.0f}ms)"
566
+ )
567
+
568
+ return OrchestrationResult(
569
+ intent=IntentType.EMERGENCY.value,
570
+ reply=reply,
571
+ success=True,
572
+ model_id="emergency_router",
573
+ data={"crisis_resources_provided": True},
574
+ response_time_ms=round(response_time, 2)
575
+ )
576
+
577
+
578
+ async def _handle_translation(
579
+ message: str,
580
+ context: Dict[str, Any]
581
+ ) -> OrchestrationResult:
582
+ """
583
+ 🌍 Translation handler - 27 languages supported.
584
+
585
+ Handles translation requests with graceful fallback if service
586
+ is unavailable.
587
+ """
588
+ logger.info("🌍 Processing translation request")
589
+
590
+ # Check service availability first
591
+ if not TRANSLATION_AVAILABLE:
592
+ logger.warning("Translation service not available")
593
+ return OrchestrationResult(
594
+ intent=IntentType.TRANSLATION.value,
595
+ reply="Translation isn't available right now. Try again soon! 🌍",
596
+ success=False,
597
+ error="Service not loaded",
598
+ fallback_used=True
599
+ )
600
+
601
+ try:
602
+ # Extract language parameters from context or parse from message
603
+ source_lang = context.get("source_lang", "eng_Latn")
604
+ target_lang = context.get("target_lang", "spa_Latn")
605
+
606
+ # Parse target language from message if present
607
+ # Examples: "translate to Spanish", "in Spanish", "to Spanish"
608
+ message_lower = message.lower()
609
+ language_keywords = {
610
+ "spanish": "spa_Latn", "espaΓ±ol": "spa_Latn", "es": "spa_Latn",
611
+ "french": "fra_Latn", "franΓ§ais": "fra_Latn", "fr": "fra_Latn",
612
+ "chinese": "zho_Hans", "mandarin": "zho_Hans", "zh": "zho_Hans",
613
+ "arabic": "arb_Arab", "ar": "arb_Arab",
614
+ "hindi": "hin_Deva", "hi": "hin_Deva",
615
+ "portuguese": "por_Latn", "pt": "por_Latn",
616
+ "russian": "rus_Cyrl", "ru": "rus_Cyrl",
617
+ "german": "deu_Latn", "de": "deu_Latn",
618
+ "vietnamese": "vie_Latn", "vi": "vie_Latn",
619
+ "tagalog": "tgl_Latn", "tl": "tgl_Latn",
620
+ "urdu": "urd_Arab", "ur": "urd_Arab",
621
+ "swahili": "swh_Latn", "sw": "swh_Latn",
622
+ "english": "eng_Latn", "en": "eng_Latn"
623
+ }
624
+
625
+ # Check for "to [language]" or "in [language]" patterns
626
+ for lang_name, lang_code in language_keywords.items():
627
+ if f"to {lang_name}" in message_lower or f"in {lang_name}" in message_lower:
628
+ target_lang = lang_code
629
+ logger.info(f"🌍 Detected target language from message: {lang_name} -> {lang_code}")
630
+ break
631
+
632
+ result = await translate_text(message, source_lang, target_lang)
633
+
634
+ # Check if translation service was actually available
635
+ if not result.get("available", True):
636
+ error_msg = result.get("error", "Translation service is temporarily unavailable.")
637
+ logger.warning(f"Translation service unavailable: {error_msg}")
638
+ return OrchestrationResult(
639
+ intent=IntentType.TRANSLATION.value,
640
+ reply=(
641
+ "I'm having trouble accessing the translation service right now. "
642
+ "Please try again in a moment! 🌍"
643
+ ),
644
+ success=False,
645
+ error=error_msg,
646
+ fallback_used=True
647
+ )
648
+
649
+ # Use compatibility helper to check result
650
+ success, error = _check_result_success(result, ["translated_text"])
651
+
652
+ if success:
653
+ translated = result.get("translated_text", "")
654
+
655
+ # Check if translation was skipped (same source/target language)
656
+ if result.get("skipped", False):
657
+ reply = (
658
+ f"The text is already in {target_lang}. "
659
+ f"No translation needed! 🌍"
660
+ )
661
+ else:
662
+ reply = (
663
+ f"Here's the translation:\n\n"
664
+ f"**{translated}**\n\n"
665
+ f"(Translated from {source_lang} to {target_lang})"
666
+ )
667
+
668
+ return OrchestrationResult(
669
+ intent=IntentType.TRANSLATION.value,
670
+ reply=reply,
671
+ success=True,
672
+ data=result,
673
+ model_id="penny-translate-agent"
674
+ )
675
+ else:
676
+ raise Exception(error or "Translation failed")
677
+
678
+ except Exception as e:
679
+ logger.error(f"Translation error: {e}", exc_info=True)
680
+ return OrchestrationResult(
681
+ intent=IntentType.TRANSLATION.value,
682
+ reply=(
683
+ "I had trouble translating that. Could you rephrase? πŸ’¬"
684
+ ),
685
+ success=False,
686
+ error=str(e),
687
+ fallback_used=True
688
+ )
689
+
690
+
691
+ async def _handle_sentiment(
692
+ message: str,
693
+ context: Dict[str, Any]
694
+ ) -> OrchestrationResult:
695
+ """
696
+ 😊 Sentiment analysis handler.
697
+
698
+ Analyzes the emotional tone of text with graceful fallback
699
+ if service is unavailable.
700
+ """
701
+ logger.info("😊 Processing sentiment analysis")
702
+
703
+ # Check service availability first
704
+ if not SENTIMENT_AVAILABLE:
705
+ logger.warning("Sentiment service not available")
706
+ return OrchestrationResult(
707
+ intent=IntentType.SENTIMENT_ANALYSIS.value,
708
+ reply="Sentiment analysis isn't available right now. Try again soon! 😊",
709
+ success=False,
710
+ error="Service not loaded",
711
+ fallback_used=True
712
+ )
713
+
714
+ try:
715
+ result = await get_sentiment_analysis(message)
716
+
717
+ # Use compatibility helper to check result
718
+ success, error = _check_result_success(result, ["label", "score"])
719
+
720
+ if success:
721
+ sentiment = result.get("label", "neutral")
722
+ confidence = result.get("score", 0.0)
723
+
724
+ reply = (
725
+ f"The overall sentiment detected is: **{sentiment}**\n"
726
+ f"Confidence: {confidence:.1%}"
727
+ )
728
+
729
+ return OrchestrationResult(
730
+ intent=IntentType.SENTIMENT_ANALYSIS.value,
731
+ reply=reply,
732
+ success=True,
733
+ data=result,
734
+ model_id="penny-sentiment-agent"
735
+ )
736
+ else:
737
+ raise Exception(error or "Sentiment analysis failed")
738
+
739
+ except Exception as e:
740
+ logger.error(f"Sentiment analysis error: {e}", exc_info=True)
741
+ return OrchestrationResult(
742
+ intent=IntentType.SENTIMENT_ANALYSIS.value,
743
+ reply="I couldn't analyze the sentiment right now. Try again? 😊",
744
+ success=False,
745
+ error=str(e),
746
+ fallback_used=True
747
+ )
748
+
749
+ async def _handle_bias(
750
+ message: str,
751
+ context: Dict[str, Any]
752
+ ) -> OrchestrationResult:
753
+ """
754
+ βš–οΈ Bias detection handler.
755
+
756
+ Analyzes text for potential bias patterns with graceful fallback
757
+ if service is unavailable.
758
+ """
759
+ logger.info("βš–οΈ Processing bias detection")
760
+
761
+ # Check service availability first
762
+ if not BIAS_AVAILABLE:
763
+ logger.warning("Bias detection service not available")
764
+ return OrchestrationResult(
765
+ intent=IntentType.BIAS_DETECTION.value,
766
+ reply="Bias detection isn't available right now. Try again soon! βš–οΈ",
767
+ success=False,
768
+ error="Service not loaded",
769
+ fallback_used=True
770
+ )
771
+
772
+ try:
773
+ result = await check_bias(message)
774
+
775
+ # Use compatibility helper to check result
776
+ success, error = _check_result_success(result, ["analysis"])
777
+
778
+ if success:
779
+ analysis = result.get("analysis", [])
780
+
781
+ if analysis:
782
+ top_result = analysis[0]
783
+ label = top_result.get("label", "unknown")
784
+ score = top_result.get("score", 0.0)
785
+
786
+ reply = (
787
+ f"Bias analysis complete:\n\n"
788
+ f"**Most likely category:** {label}\n"
789
+ f"**Confidence:** {score:.1%}"
790
+ )
791
+ else:
792
+ reply = "The text appears relatively neutral. βš–οΈ"
793
+
794
+ return OrchestrationResult(
795
+ intent=IntentType.BIAS_DETECTION.value,
796
+ reply=reply,
797
+ success=True,
798
+ data=result,
799
+ model_id="penny-bias-checker"
800
+ )
801
+ else:
802
+ raise Exception(error or "Bias detection failed")
803
+
804
+ except Exception as e:
805
+ logger.error(f"Bias detection error: {e}", exc_info=True)
806
+ return OrchestrationResult(
807
+ intent=IntentType.BIAS_DETECTION.value,
808
+ reply="I couldn't check for bias right now. Try again? βš–οΈ",
809
+ success=False,
810
+ error=str(e),
811
+ fallback_used=True
812
+ )
813
+
814
+
815
+ async def _handle_document(
816
+ message: str,
817
+ context: Dict[str, Any]
818
+ ) -> OrchestrationResult:
819
+ """
820
+ πŸ“„ Document processing handler.
821
+
822
+ Note: Actual file upload happens in router.py via FastAPI.
823
+ This handler just provides instructions.
824
+ """
825
+ logger.info("πŸ“„ Document processing requested")
826
+
827
+ reply = (
828
+ "I can help you process documents! πŸ“„\n\n"
829
+ "Please upload your document (PDF or image) using the "
830
+ "`/upload-document` endpoint. I can extract text, analyze forms, "
831
+ "and help you understand civic documents.\n\n"
832
+ "What kind of document do you need help with?"
833
+ )
834
+
835
+ return OrchestrationResult(
836
+ intent=IntentType.DOCUMENT_PROCESSING.value,
837
+ reply=reply,
838
+ success=True,
839
+ model_id="document_router"
840
+ )
841
+
842
+
843
+ async def _handle_weather(
844
+ message: str,
845
+ context: Dict[str, Any],
846
+ tenant_id: Optional[str],
847
+ lat: Optional[float],
848
+ lon: Optional[float],
849
+ intent_result: IntentMatch
850
+ ) -> OrchestrationResult:
851
+ """
852
+ 🌀️ Weather handler with compound intent support.
853
+
854
+ Handles both simple weather queries and compound weather+events queries.
855
+ Uses enhanced weather_agent.py with caching and performance tracking.
856
+ """
857
+ logger.info("🌀️ Processing weather request")
858
+
859
+ # Check service availability first
860
+ if not WEATHER_AGENT_AVAILABLE:
861
+ logger.warning("Weather agent not available")
862
+ return OrchestrationResult(
863
+ intent=IntentType.WEATHER.value,
864
+ reply="Weather service isn't available right now. Try again soon! 🌀️",
865
+ success=False,
866
+ error="Weather agent not loaded",
867
+ fallback_used=True
868
+ )
869
+
870
+ # Check for compound intent (weather + events)
871
+ is_compound = intent_result.is_compound or IntentType.EVENTS in intent_result.secondary_intents
872
+
873
+ # === ENHANCED LOCATION RESOLUTION ===
874
+ # Try multiple strategies to get coordinates
875
+
876
+ # Strategy 1: Use provided coordinates
877
+ if lat is not None and lon is not None:
878
+ logger.info(f"Using provided coordinates: {lat}, {lon}")
879
+
880
+ # Strategy 2: Get coordinates from tenant_id (try multiple formats)
881
+ elif tenant_id:
882
+ # Try tenant_id as-is first
883
+ coords = get_city_coordinates(tenant_id)
884
+
885
+ # If that fails and tenant_id doesn't have state suffix, try adding common suffixes
886
+ if not coords and "_" not in tenant_id:
887
+ # Try common state abbreviations for known cities
888
+ state_suffixes = ["_va", "_ga", "_al", "_tx", "_ri", "_wa"]
889
+ for suffix in state_suffixes:
890
+ test_tenant_id = tenant_id + suffix
891
+ coords = get_city_coordinates(test_tenant_id)
892
+ if coords:
893
+ tenant_id = test_tenant_id # Update tenant_id to normalized form
894
+ logger.info(f"Normalized tenant_id to {tenant_id}")
895
+ break
896
+
897
+ if coords:
898
+ lat, lon = coords["lat"], coords["lon"]
899
+ logger.info(f"βœ… Using city coordinates for {tenant_id}: {lat}, {lon}")
900
+
901
+ # Strategy 3: Extract location from message if still no coordinates
902
+ if lat is None or lon is None:
903
+ logger.info("No coordinates from tenant_id, trying to extract from message")
904
+ location_result = extract_location_detailed(message)
905
+
906
+ if location_result.status == LocationStatus.FOUND:
907
+ extracted_tenant_id = location_result.tenant_id
908
+ logger.info(f"πŸ“ Location extracted from message: {extracted_tenant_id}")
909
+
910
+ # Update tenant_id if we extracted a better one
911
+ if not tenant_id or tenant_id != extracted_tenant_id:
912
+ tenant_id = extracted_tenant_id
913
+ logger.info(f"Updated tenant_id to {tenant_id}")
914
+
915
+ # Get coordinates for extracted location
916
+ coords = get_city_coordinates(tenant_id)
917
+ if coords:
918
+ lat, lon = coords["lat"], coords["lon"]
919
+ logger.info(f"βœ… Coordinates found from message extraction: {lat}, {lon}")
920
+
921
+ # Final check: if still no coordinates, return error
922
+ if lat is None or lon is None:
923
+ logger.warning(f"❌ No coordinates available for weather request (tenant_id: {tenant_id})")
924
+ return OrchestrationResult(
925
+ intent=IntentType.WEATHER.value,
926
+ reply=(
927
+ "I need to know your location to check the weather! πŸ“ "
928
+ "You can tell me your city, or share your location."
929
+ ),
930
+ success=False,
931
+ error="Location required"
932
+ )
933
+
934
+ try:
935
+ # Use combined weather + events if compound intent detected
936
+ if is_compound and tenant_id and EVENT_WEATHER_AVAILABLE:
937
+ logger.info("Using weather+events combined handler")
938
+ result = await get_event_recommendations_with_weather(tenant_id, lat, lon)
939
+
940
+ # Build response
941
+ weather = result.get("weather", {})
942
+ weather_summary = result.get("weather_summary", "Weather unavailable")
943
+ suggestions = result.get("suggestions", [])
944
+
945
+ reply_lines = [f"🌀️ **Weather Update:**\n{weather_summary}\n"]
946
+
947
+ if suggestions:
948
+ reply_lines.append("\nπŸ“… **Event Suggestions Based on Weather:**")
949
+ for suggestion in suggestions[:5]: # Top 5 suggestions
950
+ reply_lines.append(f"β€’ {suggestion}")
951
+
952
+ reply = "\n".join(reply_lines)
953
+
954
+ return OrchestrationResult(
955
+ intent=IntentType.WEATHER.value,
956
+ reply=reply,
957
+ success=True,
958
+ data=result,
959
+ model_id="weather_events_combined"
960
+ )
961
+
962
+ else:
963
+ # Simple weather query using enhanced weather_agent
964
+ weather = await get_weather_for_location(lat, lon)
965
+
966
+ # Use enhanced weather_agent's format_weather_summary
967
+ if format_weather_summary:
968
+ weather_text = format_weather_summary(weather)
969
+ else:
970
+ # Fallback formatting
971
+ temp = weather.get("temperature", {}).get("value")
972
+ phrase = weather.get("phrase", "Conditions unavailable")
973
+ if temp:
974
+ weather_text = f"{phrase}, {int(temp)}Β°F"
975
+ else:
976
+ weather_text = phrase
977
+
978
+ # Get outfit recommendation from enhanced weather_agent
979
+ if recommend_outfit:
980
+ temp = weather.get("temperature", {}).get("value", 70)
981
+ condition = weather.get("phrase", "Clear")
982
+ outfit = recommend_outfit(temp, condition)
983
+ reply = f"🌀️ {weather_text}\n\nπŸ‘• {outfit}"
984
+ else:
985
+ reply = f"🌀️ {weather_text}"
986
+
987
+ return OrchestrationResult(
988
+ intent=IntentType.WEATHER.value,
989
+ reply=reply,
990
+ success=True,
991
+ data=weather,
992
+ model_id="azure-maps-weather"
993
+ )
994
+
995
+ except Exception as e:
996
+ logger.error(f"Weather error: {e}", exc_info=True)
997
+ return OrchestrationResult(
998
+ intent=IntentType.WEATHER.value,
999
+ reply=(
1000
+ "I'm having trouble getting weather data right now. "
1001
+ "Can I help you with something else? πŸ’›"
1002
+ ),
1003
+ success=False,
1004
+ error=str(e),
1005
+ fallback_used=True
1006
+ )
1007
+
1008
+
1009
+ async def _handle_events(
1010
+ message: str,
1011
+ context: Dict[str, Any],
1012
+ tenant_id: Optional[str],
1013
+ lat: Optional[float],
1014
+ lon: Optional[float],
1015
+ intent_result: IntentMatch
1016
+ ) -> OrchestrationResult:
1017
+ """
1018
+ πŸ“… Events handler.
1019
+
1020
+ Routes event queries to tool_agent with proper error handling
1021
+ and graceful degradation.
1022
+ """
1023
+ logger.info("πŸ“… Processing events request")
1024
+
1025
+ if not tenant_id:
1026
+ return OrchestrationResult(
1027
+ intent=IntentType.EVENTS.value,
1028
+ reply=(
1029
+ "I'd love to help you find events! πŸ“… "
1030
+ "Which city are you interested in? "
1031
+ "I have information for Atlanta, Birmingham, Chesterfield, "
1032
+ "El Paso, Providence, and Seattle."
1033
+ ),
1034
+ success=False,
1035
+ error="City required"
1036
+ )
1037
+
1038
+ # Check tool agent availability
1039
+ if not TOOL_AGENT_AVAILABLE:
1040
+ logger.warning("Tool agent not available")
1041
+ return OrchestrationResult(
1042
+ intent=IntentType.EVENTS.value,
1043
+ reply=(
1044
+ "Event information isn't available right now. "
1045
+ "Try again soon! πŸ“…"
1046
+ ),
1047
+ success=False,
1048
+ error="Tool agent not loaded",
1049
+ fallback_used=True
1050
+ )
1051
+
1052
+ try:
1053
+ # FIXED: Add role parameter (compatibility fix)
1054
+ tool_response = await handle_tool_request(
1055
+ user_input=message,
1056
+ role=context.get("role", "resident"), # ← ADDED
1057
+ lat=lat,
1058
+ lon=lon,
1059
+ context=context
1060
+ )
1061
+
1062
+ reply = tool_response.get("response", "Events information retrieved.")
1063
+
1064
+ return OrchestrationResult(
1065
+ intent=IntentType.EVENTS.value,
1066
+ reply=reply,
1067
+ success=True,
1068
+ data=tool_response,
1069
+ model_id="events_tool"
1070
+ )
1071
+
1072
+ except Exception as e:
1073
+ logger.error(f"Events error: {e}", exc_info=True)
1074
+ return OrchestrationResult(
1075
+ intent=IntentType.EVENTS.value,
1076
+ reply=(
1077
+ "I'm having trouble loading event information right now. "
1078
+ "Check back soon! πŸ“…"
1079
+ ),
1080
+ success=False,
1081
+ error=str(e),
1082
+ fallback_used=True
1083
+ )
1084
+
1085
+ async def _handle_government(
1086
+ message: str,
1087
+ context: Dict[str, Any],
1088
+ tenant_id: Optional[str]
1089
+ ) -> OrchestrationResult:
1090
+ """
1091
+ πŸ›οΈ Government officials and representatives handler.
1092
+
1093
+ Provides information about city council members, mayor, and other elected officials.
1094
+ """
1095
+ logger.info("πŸ›οΈ Processing government/officials request")
1096
+
1097
+ if not tenant_id:
1098
+ return OrchestrationResult(
1099
+ intent=IntentType.GOVERNMENT.value,
1100
+ reply=(
1101
+ "I can help you find information about your city officials! πŸ›οΈ "
1102
+ "Which city are you asking about? "
1103
+ "I cover Atlanta, Birmingham, Chesterfield, El Paso, "
1104
+ "Norfolk, Providence, and Seattle."
1105
+ ),
1106
+ success=False,
1107
+ error="City required"
1108
+ )
1109
+
1110
+ try:
1111
+ # Load city resources data (which will contain government info)
1112
+ from app.location_utils import load_city_resources
1113
+ city_data = load_city_resources(tenant_id)
1114
+
1115
+ # Extract government/officials section
1116
+ government_info = city_data.get("government", {})
1117
+ officials = government_info.get("officials", [])
1118
+
1119
+ message_lower = message.lower()
1120
+
1121
+ # Check what they're asking for
1122
+ if "council" in message_lower or "representative" in message_lower:
1123
+ # Look for council members
1124
+ council_members = [o for o in officials if "council" in o.get("title", "").lower() or "council" in o.get("role", "").lower()]
1125
+
1126
+ if council_members:
1127
+ reply = f"πŸ›οΈ **City Council Members for {city_data.get('city', 'your city')}:**\n\n"
1128
+ for member in council_members[:5]: # Limit to 5
1129
+ name = member.get("name", "Unknown")
1130
+ title = member.get("title", member.get("role", ""))
1131
+ district = member.get("district", "")
1132
+ email = member.get("email", "")
1133
+ phone = member.get("phone", "")
1134
+
1135
+ reply += f"**{name}**"
1136
+ if title:
1137
+ reply += f" - {title}"
1138
+ if district:
1139
+ reply += f" (District {district})"
1140
+ reply += "\n"
1141
+ if email:
1142
+ reply += f"πŸ“§ {email}\n"
1143
+ if phone:
1144
+ reply += f"πŸ“ž {phone}\n"
1145
+ reply += "\n"
1146
+
1147
+ if len(council_members) > 5:
1148
+ reply += f"_... and {len(council_members) - 5} more council members._\n\n"
1149
+
1150
+ reply += f"πŸ’‘ For complete information, visit: {city_data.get('official_links', {}).get('city_homepage', 'the city website')}"
1151
+
1152
+ return OrchestrationResult(
1153
+ intent=IntentType.GOVERNMENT.value,
1154
+ reply=reply,
1155
+ success=True,
1156
+ data={"officials": council_members},
1157
+ model_id="government_data"
1158
+ )
1159
+ else:
1160
+ return OrchestrationResult(
1161
+ intent=IntentType.GOVERNMENT.value,
1162
+ reply=(
1163
+ f"I don't have council member information for {city_data.get('city', 'your city')} yet. "
1164
+ f"Check the city's official website: {city_data.get('official_links', {}).get('city_homepage', 'your city website')} πŸ›οΈ"
1165
+ ),
1166
+ success=False,
1167
+ error="Council data not available"
1168
+ )
1169
+
1170
+ elif "mayor" in message_lower:
1171
+ # Look for mayor
1172
+ mayor = next((o for o in officials if "mayor" in o.get("title", "").lower() or "mayor" in o.get("role", "").lower()), None)
1173
+
1174
+ if mayor:
1175
+ name = mayor.get("name", "Unknown")
1176
+ email = mayor.get("email", "")
1177
+ phone = mayor.get("phone", "")
1178
+
1179
+ reply = f"πŸ›οΈ **Mayor of {city_data.get('city', 'your city')}:**\n\n"
1180
+ reply += f"**{name}**\n"
1181
+ if email:
1182
+ reply += f"πŸ“§ {email}\n"
1183
+ if phone:
1184
+ reply += f"πŸ“ž {phone}\n"
1185
+ reply += f"\nπŸ’‘ Visit: {city_data.get('official_links', {}).get('city_homepage', 'the city website')} for more information."
1186
+
1187
+ return OrchestrationResult(
1188
+ intent=IntentType.GOVERNMENT.value,
1189
+ reply=reply,
1190
+ success=True,
1191
+ data={"mayor": mayor},
1192
+ model_id="government_data"
1193
+ )
1194
+ else:
1195
+ return OrchestrationResult(
1196
+ intent=IntentType.GOVERNMENT.value,
1197
+ reply=(
1198
+ f"I don't have mayor information for {city_data.get('city', 'your city')} yet. "
1199
+ f"Check the city's official website: {city_data.get('official_links', {}).get('city_homepage', 'your city website')} πŸ›οΈ"
1200
+ ),
1201
+ success=False,
1202
+ error="Mayor data not available"
1203
+ )
1204
+
1205
+ else:
1206
+ # General government info
1207
+ if officials:
1208
+ reply = f"πŸ›οΈ **Government Officials for {city_data.get('city', 'your city')}:**\n\n"
1209
+ for official in officials[:5]: # Limit to 5
1210
+ name = official.get("name", "Unknown")
1211
+ title = official.get("title", official.get("role", ""))
1212
+ reply += f"**{name}** - {title}\n"
1213
+
1214
+ if len(officials) > 5:
1215
+ reply += f"\n_... and {len(officials) - 5} more officials._\n"
1216
+
1217
+ reply += f"\nπŸ’‘ For complete information, visit: {city_data.get('official_links', {}).get('city_homepage', 'the city website')}"
1218
+
1219
+ return OrchestrationResult(
1220
+ intent=IntentType.GOVERNMENT.value,
1221
+ reply=reply,
1222
+ success=True,
1223
+ data={"officials": officials},
1224
+ model_id="government_data"
1225
+ )
1226
+ else:
1227
+ return OrchestrationResult(
1228
+ intent=IntentType.GOVERNMENT.value,
1229
+ reply=(
1230
+ f"I don't have government official information for {city_data.get('city', 'your city')} yet. "
1231
+ f"Check the city's official website: {city_data.get('official_links', {}).get('city_homepage', 'your city website')} πŸ›οΈ"
1232
+ ),
1233
+ success=False,
1234
+ error="Government data not available"
1235
+ )
1236
+
1237
+ except FileNotFoundError:
1238
+ logger.warning(f"Government data file not found for {tenant_id}")
1239
+ return OrchestrationResult(
1240
+ intent=IntentType.GOVERNMENT.value,
1241
+ reply=(
1242
+ f"Government information for your city isn't available yet. "
1243
+ f"Check your city's official website for the most current information! πŸ›οΈ"
1244
+ ),
1245
+ success=False,
1246
+ error="Data file not found",
1247
+ fallback_used=True
1248
+ )
1249
+
1250
+ except Exception as e:
1251
+ logger.error(f"Government query error: {e}", exc_info=True)
1252
+ return OrchestrationResult(
1253
+ intent=IntentType.GOVERNMENT.value,
1254
+ reply=(
1255
+ "I'm having trouble accessing government information right now. "
1256
+ "Please try again in a moment! πŸ›οΈ"
1257
+ ),
1258
+ success=False,
1259
+ error=str(e),
1260
+ fallback_used=True
1261
+ )
1262
+
1263
+
1264
+ async def _handle_local_resources(
1265
+ message: str,
1266
+ context: Dict[str, Any],
1267
+ tenant_id: Optional[str],
1268
+ lat: Optional[float],
1269
+ lon: Optional[float]
1270
+ ) -> OrchestrationResult:
1271
+ """
1272
+ πŸ›οΈ Local resources handler (shelters, libraries, food banks, etc.).
1273
+
1274
+ Routes resource queries to tool_agent with proper error handling.
1275
+ """
1276
+ logger.info("πŸ›οΈ Processing local resources request")
1277
+
1278
+ if not tenant_id:
1279
+ return OrchestrationResult(
1280
+ intent=IntentType.LOCAL_RESOURCES.value,
1281
+ reply=(
1282
+ "I can help you find local resources! πŸ›οΈ "
1283
+ "Which city do you need help in? "
1284
+ "I cover Atlanta, Birmingham, Chesterfield, El Paso, "
1285
+ "Providence, and Seattle."
1286
+ ),
1287
+ success=False,
1288
+ error="City required"
1289
+ )
1290
+
1291
+ # Check tool agent availability
1292
+ if not TOOL_AGENT_AVAILABLE:
1293
+ logger.warning("Tool agent not available")
1294
+ return OrchestrationResult(
1295
+ intent=IntentType.LOCAL_RESOURCES.value,
1296
+ reply=(
1297
+ "Resource information isn't available right now. "
1298
+ "Try again soon! πŸ›οΈ"
1299
+ ),
1300
+ success=False,
1301
+ error="Tool agent not loaded",
1302
+ fallback_used=True
1303
+ )
1304
+
1305
+ try:
1306
+ # FIXED: Add role parameter (compatibility fix)
1307
+ tool_response = await handle_tool_request(
1308
+ user_input=message,
1309
+ role=context.get("role", "resident"), # ← ADDED
1310
+ lat=lat,
1311
+ lon=lon,
1312
+ context=context
1313
+ )
1314
+
1315
+ reply = tool_response.get("response", "Resource information retrieved.")
1316
+
1317
+ return OrchestrationResult(
1318
+ intent=IntentType.LOCAL_RESOURCES.value,
1319
+ reply=reply,
1320
+ success=True,
1321
+ data=tool_response,
1322
+ model_id="resources_tool"
1323
+ )
1324
+
1325
+ except Exception as e:
1326
+ logger.error(f"Resources error: {e}", exc_info=True)
1327
+ return OrchestrationResult(
1328
+ intent=IntentType.LOCAL_RESOURCES.value,
1329
+ reply=(
1330
+ "I'm having trouble finding resource information right now. "
1331
+ "Would you like to try a different search? πŸ’›"
1332
+ ),
1333
+ success=False,
1334
+ error=str(e),
1335
+ fallback_used=True
1336
+ )
1337
+
1338
+
1339
+ async def _handle_conversational(
1340
+ message: str,
1341
+ intent: IntentType,
1342
+ context: Dict[str, Any]
1343
+ ) -> OrchestrationResult:
1344
+ """
1345
+ πŸ’¬ Handles conversational intents (greeting, help, unknown).
1346
+ Uses Penny's core LLM for natural responses with graceful fallback.
1347
+ """
1348
+ logger.info(f"πŸ’¬ Processing conversational intent: {intent.value}")
1349
+
1350
+ # Check LLM availability
1351
+ use_llm = LLM_AVAILABLE
1352
+
1353
+ try:
1354
+ if use_llm:
1355
+ # Build prompt based on intent
1356
+ if intent == IntentType.GREETING:
1357
+ prompt = (
1358
+ f"The user greeted you with: '{message}'\n\n"
1359
+ "Respond warmly as Penny, introduce yourself briefly, "
1360
+ "and ask how you can help them with civic services today."
1361
+ )
1362
+
1363
+ elif intent == IntentType.HELP:
1364
+ prompt = (
1365
+ f"The user asked for help: '{message}'\n\n"
1366
+ "Explain Penny's main features:\n"
1367
+ "- Finding local resources (shelters, libraries, food banks)\n"
1368
+ "- Community events and activities\n"
1369
+ "- Weather information\n"
1370
+ "- 27-language translation\n"
1371
+ "- Document processing help\n\n"
1372
+ "Ask which city they need assistance in."
1373
+ )
1374
+
1375
+ else: # UNKNOWN
1376
+ prompt = (
1377
+ f"The user said: '{message}'\n\n"
1378
+ "You're not sure what they need help with. "
1379
+ "Respond kindly, acknowledge their request, and ask them to "
1380
+ "clarify or rephrase. Mention a few things you can help with."
1381
+ )
1382
+
1383
+ # Call Penny's core LLM
1384
+ llm_result = await generate_response(prompt=prompt, max_new_tokens=200)
1385
+
1386
+ # Use compatibility helper to check result
1387
+ success, error = _check_result_success(llm_result, ["response"])
1388
+
1389
+ if success:
1390
+ reply = llm_result.get("response", "")
1391
+
1392
+ return OrchestrationResult(
1393
+ intent=intent.value,
1394
+ reply=reply,
1395
+ success=True,
1396
+ data=llm_result,
1397
+ model_id=CORE_MODEL_ID
1398
+ )
1399
+ else:
1400
+ raise Exception(error or "LLM generation failed")
1401
+
1402
+ else:
1403
+ # LLM not available, use fallback directly
1404
+ logger.info("LLM not available, using fallback responses")
1405
+ raise Exception("LLM service not loaded")
1406
+
1407
+ except Exception as e:
1408
+ logger.warning(f"Conversational handler using fallback: {e}")
1409
+
1410
+ # Hardcoded fallback responses (Penny's sweet southern neighborly voice)
1411
+ fallback_replies = {
1412
+ IntentType.GREETING: (
1413
+ "Well hello there, sugar! πŸ‘‹ I'm Penny, and I'm so glad you stopped by! "
1414
+ "I've lived in this community for years and I know just about everything "
1415
+ "there is to know about our wonderful city. I can help you find local resources, "
1416
+ "events, weather, and so much more! What city are you in, darlin'?"
1417
+ ),
1418
+ IntentType.HELP: (
1419
+ "Oh honey, I'd be delighted to help! πŸ’› I'm Penny, and I can help you with:\n\n"
1420
+ "πŸ›οΈ Local resources (shelters, libraries, food banks, and more)\n"
1421
+ "πŸ“… Community events and things to do\n"
1422
+ "🌀️ Weather updates and what to wear\n"
1423
+ "🌍 Translation in 27 languages (because everyone should feel welcome!)\n"
1424
+ "πŸ›οΈ City officials and representatives\n"
1425
+ "πŸ“„ Document help\n\n"
1426
+ "What would you like to know about, sweetie? I'm here to help!"
1427
+ ),
1428
+ IntentType.UNKNOWN: (
1429
+ "Oh honey, I'm not quite sure I understood that. Could you say that again "
1430
+ "a little differently? I'm best at helping with local services, events, weather, "
1431
+ "city officials, and translation! Just tell me what you need, darlin'! πŸ’¬"
1432
+ )
1433
+ }
1434
+
1435
+ return OrchestrationResult(
1436
+ intent=intent.value,
1437
+ reply=fallback_replies.get(intent, "How can I help you today? πŸ’›"),
1438
+ success=True,
1439
+ model_id="fallback",
1440
+ fallback_used=True
1441
+ )
1442
+
1443
+
1444
+ async def _handle_fallback(
1445
+ message: str,
1446
+ intent: IntentType,
1447
+ context: Dict[str, Any]
1448
+ ) -> OrchestrationResult:
1449
+ """
1450
+ πŸ†˜ Ultimate fallback handler for unhandled intents.
1451
+
1452
+ This is a safety net that should rarely trigger, but ensures
1453
+ users always get a helpful response.
1454
+ """
1455
+ logger.warning(f"⚠️ Fallback triggered for intent: {intent.value}")
1456
+
1457
+ reply = (
1458
+ "Oh honey, I'm not quite sure how to help with that just yet. "
1459
+ "I'm still learning new things every day! πŸ€–\n\n"
1460
+ "But I'm really good at helping with:\n"
1461
+ "πŸ›οΈ Finding local resources and services\n"
1462
+ "πŸ“… Community events and things to do\n"
1463
+ "🌀️ Weather updates and what to wear\n"
1464
+ "πŸ›οΈ City officials and representatives\n"
1465
+ "🌍 Translation in lots of languages\n\n"
1466
+ "Could you try asking me in a different way, sugar? I'd love to help! πŸ’›"
1467
+ )
1468
+
1469
+ return OrchestrationResult(
1470
+ intent=intent.value,
1471
+ reply=reply,
1472
+ success=False,
1473
+ error="Unhandled intent",
1474
+ fallback_used=True
1475
+ )
1476
+
1477
+
1478
+ # ============================================================
1479
+ # HEALTH CHECK & DIAGNOSTICS (ENHANCED)
1480
+ # ============================================================
1481
+
1482
+ def get_orchestrator_health() -> Dict[str, Any]:
1483
+ """
1484
+ πŸ“Š Returns comprehensive orchestrator health status.
1485
+
1486
+ Used by the main application health check endpoint to monitor
1487
+ the orchestrator and all its service dependencies.
1488
+
1489
+ Returns:
1490
+ Dictionary with health information including:
1491
+ - status: operational/degraded
1492
+ - service_availability: which services are loaded
1493
+ - statistics: orchestration counts
1494
+ - supported_intents: list of all intent types
1495
+ - features: available orchestrator features
1496
+ """
1497
+ # Get service availability
1498
+ services = get_service_availability()
1499
+
1500
+ # Determine overall status
1501
+ # Orchestrator is operational even if some services are down (graceful degradation)
1502
+ critical_services = ["weather", "tool_agent"] # Must have these
1503
+ critical_available = all(services.get(svc, False) for svc in critical_services)
1504
+
1505
+ status = "operational" if critical_available else "degraded"
1506
+
1507
+ return {
1508
+ "status": status,
1509
+ "core_model": CORE_MODEL_ID,
1510
+ "max_response_time_ms": MAX_RESPONSE_TIME_MS,
1511
+ "statistics": {
1512
+ "total_orchestrations": _orchestration_count,
1513
+ "emergency_interactions": _emergency_count
1514
+ },
1515
+ "service_availability": services,
1516
+ "supported_intents": [intent.value for intent in IntentType],
1517
+ "features": {
1518
+ "emergency_routing": True,
1519
+ "compound_intents": True,
1520
+ "fallback_handling": True,
1521
+ "performance_tracking": True,
1522
+ "context_aware": True,
1523
+ "multi_language": TRANSLATION_AVAILABLE,
1524
+ "sentiment_analysis": SENTIMENT_AVAILABLE,
1525
+ "bias_detection": BIAS_AVAILABLE,
1526
+ "weather_integration": WEATHER_AGENT_AVAILABLE,
1527
+ "event_recommendations": EVENT_WEATHER_AVAILABLE
1528
+ }
1529
+ }
1530
+
1531
+
1532
+ def get_orchestrator_stats() -> Dict[str, Any]:
1533
+ """
1534
+ πŸ“ˆ Returns orchestrator statistics.
1535
+
1536
+ Useful for monitoring and analytics.
1537
+ """
1538
+ return {
1539
+ "total_orchestrations": _orchestration_count,
1540
+ "emergency_interactions": _emergency_count,
1541
+ "services_available": sum(1 for v in get_service_availability().values() if v),
1542
+ "services_total": len(get_service_availability())
1543
+ }
1544
+
1545
+
1546
+ # ============================================================
1547
+ # TESTING & DEBUGGING (ENHANCED)
1548
+ # ============================================================
1549
+
1550
+ if __name__ == "__main__":
1551
+ """
1552
+ πŸ§ͺ Test the orchestrator with sample queries.
1553
+ Run with: python -m app.orchestrator
1554
+ """
1555
+ import asyncio
1556
+
1557
+ print("=" * 60)
1558
+ print("πŸ§ͺ Testing Penny's Orchestrator")
1559
+ print("=" * 60)
1560
+
1561
+ # Display service availability first
1562
+ print("\nπŸ“Š Service Availability Check:")
1563
+ services = get_service_availability()
1564
+ for service, available in services.items():
1565
+ status = "βœ…" if available else "❌"
1566
+ print(f" {status} {service}: {'Available' if available else 'Not loaded'}")
1567
+
1568
+ print("\n" + "=" * 60)
1569
+
1570
+ test_queries = [
1571
+ {
1572
+ "name": "Greeting",
1573
+ "message": "Hi Penny!",
1574
+ "context": {}
1575
+ },
1576
+ {
1577
+ "name": "Weather with location",
1578
+ "message": "What's the weather?",
1579
+ "context": {"lat": 33.7490, "lon": -84.3880}
1580
+ },
1581
+ {
1582
+ "name": "Events in city",
1583
+ "message": "Events in Atlanta",
1584
+ "context": {"tenant_id": "atlanta_ga"}
1585
+ },
1586
+ {
1587
+ "name": "Help request",
1588
+ "message": "I need help",
1589
+ "context": {}
1590
+ },
1591
+ {
1592
+ "name": "Translation",
1593
+ "message": "Translate hello",
1594
+ "context": {"source_lang": "eng_Latn", "target_lang": "spa_Latn"}
1595
+ }
1596
+ ]
1597
+
1598
+ async def run_tests():
1599
+ for i, query in enumerate(test_queries, 1):
1600
+ print(f"\n--- Test {i}: {query['name']} ---")
1601
+ print(f"Query: {query['message']}")
1602
+
1603
+ try:
1604
+ result = await run_orchestrator(query["message"], query["context"])
1605
+ print(f"Intent: {result['intent']}")
1606
+ print(f"Success: {result['success']}")
1607
+ print(f"Fallback: {result.get('fallback_used', False)}")
1608
+
1609
+ # Truncate long replies
1610
+ reply = result['reply']
1611
+ if len(reply) > 150:
1612
+ reply = reply[:150] + "..."
1613
+ print(f"Reply: {reply}")
1614
+
1615
+ if result.get('response_time_ms'):
1616
+ print(f"Response time: {result['response_time_ms']:.0f}ms")
1617
+
1618
+ except Exception as e:
1619
+ print(f"❌ Error: {e}")
1620
+
1621
+ asyncio.run(run_tests())
1622
+
1623
+ print("\n" + "=" * 60)
1624
+ print("πŸ“Š Final Statistics:")
1625
+ stats = get_orchestrator_stats()
1626
+ for key, value in stats.items():
1627
+ print(f" {key}: {value}")
1628
+
1629
+ print("\n" + "=" * 60)
1630
+ print("βœ… Tests complete")
1631
+ print("=" * 60)