pythonprincess commited on
Commit
ce47b98
Β·
verified Β·
1 Parent(s): 4959b98

Upload orchestrator.py

Browse files
Files changed (1) hide show
  1. app/orchestrator.py +1382 -0
app/orchestrator.py ADDED
@@ -0,0 +1,1382 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ 🎭 PENNY Orchestrator - Request Routing & Coordination Engine
3
+
4
+ This is Penny's decision-making brain. She analyzes each request, determines
5
+ the best way to help, and coordinates between her specialized AI models and
6
+ civic data tools.
7
+
8
+ MISSION: Route every resident request to the right resource while maintaining
9
+ Penny's warm, helpful personality and ensuring fast, accurate responses.
10
+
11
+ FEATURES:
12
+ - Enhanced intent classification with confidence scoring
13
+ - Compound intent handling (weather + events)
14
+ - Graceful fallbacks when services are unavailable
15
+ - Performance tracking for all operations
16
+ - Context-aware responses
17
+ - Emergency routing with immediate escalation
18
+
19
+ ENHANCEMENTS (Phase 1):
20
+ - βœ… Structured logging with performance tracking
21
+ - βœ… Safe imports with availability flags
22
+ - βœ… Result format checking helper
23
+ - βœ… Enhanced error handling patterns
24
+ - βœ… Service availability tracking
25
+ - βœ… Fixed function signature mismatches
26
+ - βœ… Integration with enhanced modules
27
+ """
28
+
29
+ import logging
30
+ import time
31
+ from typing import Dict, Any, Optional, List, Tuple
32
+ from datetime import datetime
33
+ from dataclasses import dataclass, field
34
+ from enum import Enum
35
+
36
+ # --- ENHANCED MODULE IMPORTS ---
37
+ from app.intents import classify_intent_detailed, IntentType, IntentMatch
38
+ from app.location_utils import (
39
+ extract_location_detailed,
40
+ LocationMatch,
41
+ LocationStatus,
42
+ get_city_coordinates
43
+ )
44
+ from app.logging_utils import (
45
+ log_interaction,
46
+ sanitize_for_logging,
47
+ LogLevel
48
+ )
49
+
50
+ # --- AGENT IMPORTS (with availability tracking) ---
51
+ try:
52
+ from app.weather_agent import (
53
+ get_weather_for_location,
54
+ recommend_outfit,
55
+ weather_to_event_recommendations,
56
+ format_weather_summary
57
+ )
58
+ WEATHER_AGENT_AVAILABLE = True
59
+ except ImportError as e:
60
+ logger = logging.getLogger(__name__)
61
+ logger.warning(f"Weather agent not available: {e}")
62
+ WEATHER_AGENT_AVAILABLE = False
63
+
64
+ try:
65
+ from app.event_weather import get_event_recommendations_with_weather
66
+ EVENT_WEATHER_AVAILABLE = True
67
+ except ImportError as e:
68
+ logger = logging.getLogger(__name__)
69
+ logger.warning(f"Event weather integration not available: {e}")
70
+ EVENT_WEATHER_AVAILABLE = False
71
+
72
+ try:
73
+ from app.tool_agent import handle_tool_request
74
+ TOOL_AGENT_AVAILABLE = True
75
+ except ImportError as e:
76
+ logger = logging.getLogger(__name__)
77
+ logger.warning(f"Tool agent not available: {e}")
78
+ TOOL_AGENT_AVAILABLE = False
79
+
80
+ # --- MODEL IMPORTS (with availability tracking) ---
81
+ try:
82
+ from models.translation.translation_utils import translate_text
83
+ TRANSLATION_AVAILABLE = True
84
+ except ImportError as e:
85
+ logger = logging.getLogger(__name__)
86
+ logger.warning(f"Translation service not available: {e}")
87
+ TRANSLATION_AVAILABLE = False
88
+
89
+ try:
90
+ from models.sentiment.sentiment_utils import get_sentiment_analysis
91
+ SENTIMENT_AVAILABLE = True
92
+ except ImportError as e:
93
+ logger = logging.getLogger(__name__)
94
+ logger.warning(f"Sentiment service not available: {e}")
95
+ SENTIMENT_AVAILABLE = False
96
+
97
+ try:
98
+ from models.bias.bias_utils import check_bias
99
+ BIAS_AVAILABLE = True
100
+ except ImportError as e:
101
+ logger = logging.getLogger(__name__)
102
+ logger.warning(f"Bias detection service not available: {e}")
103
+ BIAS_AVAILABLE = False
104
+
105
+ try:
106
+ from models.gemma.gemma_utils import generate_response
107
+ LLM_AVAILABLE = True
108
+ except ImportError as e:
109
+ logger = logging.getLogger(__name__)
110
+ logger.warning(f"LLM service not available: {e}")
111
+ LLM_AVAILABLE = False
112
+
113
+ # --- LOGGING SETUP ---
114
+ logger = logging.getLogger(__name__)
115
+
116
+ # --- CONFIGURATION ---
117
+ CORE_MODEL_ID = "penny-core-agent"
118
+ MAX_RESPONSE_TIME_MS = 5000 # 5 seconds - log if exceeded
119
+
120
+ # --- TRACKING COUNTERS ---
121
+ _orchestration_count = 0
122
+ _emergency_count = 0
123
+
124
+
125
+ # ============================================================
126
+ # COMPATIBILITY HELPER - Result Format Checking
127
+ # ============================================================
128
+
129
+ def _check_result_success(
130
+ result: Dict[str, Any],
131
+ expected_keys: List[str]
132
+ ) -> Tuple[bool, Optional[str]]:
133
+ """
134
+ βœ… Check if a utility function result indicates success.
135
+
136
+ Handles multiple return format patterns:
137
+ - Explicit "success" key (preferred)
138
+ - Presence of expected data keys (implicit success)
139
+ - Presence of "error" key (explicit failure)
140
+
141
+ This helper fixes compatibility issues where different utility
142
+ functions return different result formats.
143
+
144
+ Args:
145
+ result: Dictionary returned from utility function
146
+ expected_keys: List of keys that indicate successful data
147
+
148
+ Returns:
149
+ Tuple of (is_success, error_message)
150
+
151
+ Example:
152
+ result = await translate_text(message, "en", "es")
153
+ success, error = _check_result_success(result, ["translated_text"])
154
+ if success:
155
+ text = result.get("translated_text")
156
+ """
157
+ # Check for explicit success key
158
+ if "success" in result:
159
+ return result["success"], result.get("error")
160
+
161
+ # Check for explicit error (presence = failure)
162
+ if "error" in result and result["error"]:
163
+ return False, result["error"]
164
+
165
+ # Check for expected data keys (implicit success)
166
+ has_data = any(key in result for key in expected_keys)
167
+ if has_data:
168
+ return True, None
169
+
170
+ # Unknown format - assume failure
171
+ return False, "Unexpected response format"
172
+
173
+
174
+ # ============================================================
175
+ # SERVICE AVAILABILITY CHECK
176
+ # ============================================================
177
+
178
+ def get_service_availability() -> Dict[str, bool]:
179
+ """
180
+ πŸ“Š Returns which services are currently available.
181
+
182
+ Used for health checks, debugging, and deciding whether
183
+ to attempt service calls or use fallbacks.
184
+
185
+ Returns:
186
+ Dictionary mapping service names to availability status
187
+ """
188
+ return {
189
+ "translation": TRANSLATION_AVAILABLE,
190
+ "sentiment": SENTIMENT_AVAILABLE,
191
+ "bias_detection": BIAS_AVAILABLE,
192
+ "llm": LLM_AVAILABLE,
193
+ "tool_agent": TOOL_AGENT_AVAILABLE,
194
+ "weather": WEATHER_AGENT_AVAILABLE,
195
+ "event_weather": EVENT_WEATHER_AVAILABLE
196
+ }
197
+
198
+
199
+ # ============================================================
200
+ # ORCHESTRATION RESULT STRUCTURE
201
+ # ============================================================
202
+
203
+ @dataclass
204
+ class OrchestrationResult:
205
+ """
206
+ πŸ“¦ Structured result from orchestration pipeline.
207
+
208
+ This format is used throughout the system for consistency
209
+ and makes it easy to track what happened during request processing.
210
+ """
211
+ intent: str # Detected intent
212
+ reply: str # User-facing response
213
+ success: bool # Whether request succeeded
214
+ tenant_id: Optional[str] = None # City/location identifier
215
+ data: Optional[Dict[str, Any]] = None # Raw data from services
216
+ model_id: Optional[str] = None # Which model/service was used
217
+ error: Optional[str] = None # Error message if failed
218
+ response_time_ms: Optional[float] = None
219
+ confidence: Optional[float] = None # Intent confidence score
220
+ fallback_used: bool = False # True if fallback logic triggered
221
+
222
+ def to_dict(self) -> Dict[str, Any]:
223
+ """Converts to dictionary for API responses."""
224
+ return {
225
+ "intent": self.intent,
226
+ "reply": self.reply,
227
+ "success": self.success,
228
+ "tenant_id": self.tenant_id,
229
+ "data": self.data,
230
+ "model_id": self.model_id,
231
+ "error": self.error,
232
+ "response_time_ms": self.response_time_ms,
233
+ "confidence": self.confidence,
234
+ "fallback_used": self.fallback_used
235
+ }
236
+
237
+
238
+ # ============================================================
239
+ # MAIN ORCHESTRATOR FUNCTION (ENHANCED)
240
+ # ============================================================
241
+
242
+ async def run_orchestrator(
243
+ message: str,
244
+ context: Dict[str, Any] = None
245
+ ) -> Dict[str, Any]:
246
+ """
247
+ 🧠 Main decision-making brain of Penny.
248
+
249
+ This function:
250
+ 1. Analyzes the user's message to determine intent
251
+ 2. Extracts location/city information
252
+ 3. Routes to the appropriate specialized service
253
+ 4. Handles errors gracefully with helpful fallbacks
254
+ 5. Tracks performance and logs the interaction
255
+
256
+ Args:
257
+ message: User's input text
258
+ context: Additional context (tenant_id, lat, lon, session_id, etc.)
259
+
260
+ Returns:
261
+ Dictionary with response and metadata
262
+
263
+ Example:
264
+ result = await run_orchestrator(
265
+ message="What's the weather in Atlanta?",
266
+ context={"lat": 33.7490, "lon": -84.3880}
267
+ )
268
+ """
269
+ global _orchestration_count
270
+ _orchestration_count += 1
271
+
272
+ start_time = time.time()
273
+
274
+ # Initialize context if not provided
275
+ if context is None:
276
+ context = {}
277
+
278
+ # Sanitize message for logging (PII protection)
279
+ safe_message = sanitize_for_logging(message)
280
+ logger.info(f"🎭 Orchestrator processing: '{safe_message[:50]}...'")
281
+
282
+ try:
283
+ # === STEP 1: CLASSIFY INTENT (Enhanced) ===
284
+ intent_result = classify_intent_detailed(message)
285
+ intent = intent_result.intent
286
+ confidence = intent_result.confidence
287
+
288
+ logger.info(
289
+ f"Intent detected: {intent.value} "
290
+ f"(confidence: {confidence:.2f})"
291
+ )
292
+
293
+ # === STEP 2: EXTRACT LOCATION ===
294
+ tenant_id = context.get("tenant_id")
295
+ lat = context.get("lat")
296
+ lon = context.get("lon")
297
+
298
+ # If tenant_id not provided, try to extract from message
299
+ if not tenant_id or tenant_id == "unknown":
300
+ location_result = extract_location_detailed(message)
301
+
302
+ if location_result.status == LocationStatus.FOUND:
303
+ tenant_id = location_result.tenant_id
304
+ logger.info(f"Location extracted: {tenant_id}")
305
+
306
+ # Get coordinates for this tenant if available
307
+ coords = get_city_coordinates(tenant_id)
308
+ if coords and lat is None and lon is None:
309
+ lat, lon = coords["lat"], coords["lon"]
310
+ logger.info(f"Coordinates loaded: {lat}, {lon}")
311
+
312
+ elif location_result.status == LocationStatus.USER_LOCATION_NEEDED:
313
+ logger.info("User location services needed")
314
+ else:
315
+ logger.info(f"No location detected: {location_result.status}")
316
+
317
+ # === STEP 3: HANDLE EMERGENCY INTENTS (CRITICAL) ===
318
+ if intent == IntentType.EMERGENCY:
319
+ result = await _handle_emergency(
320
+ message=message,
321
+ context=context,
322
+ start_time=start_time
323
+ )
324
+ return result.to_dict()
325
+
326
+ # === STEP 4: ROUTE TO APPROPRIATE HANDLER ===
327
+
328
+ # Translation
329
+ if intent == IntentType.TRANSLATION:
330
+ result = await _handle_translation(message, context)
331
+
332
+ # Sentiment Analysis
333
+ elif intent == IntentType.SENTIMENT_ANALYSIS:
334
+ result = await _handle_sentiment(message, context)
335
+
336
+ # Bias Detection
337
+ elif intent == IntentType.BIAS_DETECTION:
338
+ result = await _handle_bias(message, context)
339
+
340
+ # Document Processing
341
+ elif intent == IntentType.DOCUMENT_PROCESSING:
342
+ result = await _handle_document(message, context)
343
+
344
+ # Weather (includes compound weather+events handling)
345
+ elif intent == IntentType.WEATHER:
346
+ result = await _handle_weather(
347
+ message=message,
348
+ context=context,
349
+ tenant_id=tenant_id,
350
+ lat=lat,
351
+ lon=lon,
352
+ intent_result=intent_result
353
+ )
354
+
355
+ # Events
356
+ elif intent == IntentType.EVENTS:
357
+ result = await _handle_events(
358
+ message=message,
359
+ context=context,
360
+ tenant_id=tenant_id,
361
+ lat=lat,
362
+ lon=lon,
363
+ intent_result=intent_result
364
+ )
365
+
366
+ # Local Resources
367
+ elif intent == IntentType.LOCAL_RESOURCES:
368
+ result = await _handle_local_resources(
369
+ message=message,
370
+ context=context,
371
+ tenant_id=tenant_id,
372
+ lat=lat,
373
+ lon=lon
374
+ )
375
+
376
+ # Greeting, Help, Unknown
377
+ elif intent in [IntentType.GREETING, IntentType.HELP, IntentType.UNKNOWN]:
378
+ result = await _handle_conversational(
379
+ message=message,
380
+ intent=intent,
381
+ context=context
382
+ )
383
+
384
+ else:
385
+ # Unhandled intent type (shouldn't happen, but safety net)
386
+ result = await _handle_fallback(message, intent, context)
387
+
388
+ # === STEP 5: ADD METADATA & LOG INTERACTION ===
389
+ response_time = (time.time() - start_time) * 1000
390
+ result.response_time_ms = round(response_time, 2)
391
+ result.confidence = confidence
392
+ result.tenant_id = tenant_id
393
+
394
+ # Log the interaction with structured logging
395
+ log_interaction(
396
+ tenant_id=tenant_id or "unknown",
397
+ interaction_type="orchestration",
398
+ intent=intent.value,
399
+ response_time_ms=response_time,
400
+ success=result.success,
401
+ metadata={
402
+ "confidence": confidence,
403
+ "fallback_used": result.fallback_used,
404
+ "model_id": result.model_id,
405
+ "orchestration_count": _orchestration_count
406
+ }
407
+ )
408
+
409
+ # Log slow responses
410
+ if response_time > MAX_RESPONSE_TIME_MS:
411
+ logger.warning(
412
+ f"⚠️ Slow response: {response_time:.0f}ms "
413
+ f"(intent: {intent.value})"
414
+ )
415
+
416
+ logger.info(
417
+ f"βœ… Orchestration complete: {intent.value} "
418
+ f"({response_time:.0f}ms)"
419
+ )
420
+
421
+ return result.to_dict()
422
+
423
+ except Exception as e:
424
+ # === CATASTROPHIC FAILURE HANDLER ===
425
+ response_time = (time.time() - start_time) * 1000
426
+ logger.error(
427
+ f"❌ Orchestrator error: {e} "
428
+ f"(response_time: {response_time:.0f}ms)",
429
+ exc_info=True
430
+ )
431
+
432
+ # Log failed interaction
433
+ log_interaction(
434
+ tenant_id=context.get("tenant_id", "unknown"),
435
+ interaction_type="orchestration_error",
436
+ intent="error",
437
+ response_time_ms=response_time,
438
+ success=False,
439
+ metadata={
440
+ "error": str(e),
441
+ "error_type": type(e).__name__
442
+ }
443
+ )
444
+
445
+ error_result = OrchestrationResult(
446
+ intent="error",
447
+ reply=(
448
+ "I'm having trouble processing your request right now. "
449
+ "Please try again in a moment, or let me know if you need "
450
+ "immediate assistance! πŸ’›"
451
+ ),
452
+ success=False,
453
+ error=str(e),
454
+ model_id="orchestrator",
455
+ fallback_used=True,
456
+ response_time_ms=round(response_time, 2)
457
+ )
458
+
459
+ return error_result.to_dict()
460
+
461
+
462
+ # ============================================================
463
+ # SPECIALIZED INTENT HANDLERS (ENHANCED)
464
+ # ============================================================
465
+
466
+ async def _handle_emergency(
467
+ message: str,
468
+ context: Dict[str, Any],
469
+ start_time: float
470
+ ) -> OrchestrationResult:
471
+ """
472
+ 🚨 CRITICAL: Emergency intent handler.
473
+
474
+ This function handles crisis situations with immediate routing
475
+ to appropriate services. All emergency interactions are logged
476
+ for compliance and safety tracking.
477
+
478
+ IMPORTANT: This is a compliance-critical function. All emergency
479
+ interactions must be logged and handled with priority.
480
+ """
481
+ global _emergency_count
482
+ _emergency_count += 1
483
+
484
+ # Sanitize message for logging (but keep full context for safety review)
485
+ safe_message = sanitize_for_logging(message)
486
+ logger.warning(f"🚨 EMERGENCY INTENT DETECTED (#{_emergency_count}): {safe_message[:100]}")
487
+
488
+ # TODO: Integrate with safety_utils.py when enhanced
489
+ # from app.safety_utils import route_emergency
490
+ # result = await route_emergency(message, context)
491
+
492
+ # For now, provide crisis resources
493
+ reply = (
494
+ "🚨 **If this is a life-threatening emergency, please call 911 immediately.**\n\n"
495
+ "For crisis support:\n"
496
+ "- **National Suicide Prevention Lifeline:** 988\n"
497
+ "- **Crisis Text Line:** Text HOME to 741741\n"
498
+ "- **National Domestic Violence Hotline:** 1-800-799-7233\n\n"
499
+ "I'm here to help connect you with local resources. "
500
+ "What kind of support do you need right now?"
501
+ )
502
+
503
+ # Log emergency interaction for compliance (CRITICAL)
504
+ response_time = (time.time() - start_time) * 1000
505
+ log_interaction(
506
+ tenant_id=context.get("tenant_id", "emergency"),
507
+ interaction_type="emergency",
508
+ intent=IntentType.EMERGENCY.value,
509
+ response_time_ms=response_time,
510
+ success=True,
511
+ metadata={
512
+ "emergency_number": _emergency_count,
513
+ "message_length": len(message),
514
+ "timestamp": datetime.now().isoformat(),
515
+ "action": "crisis_resources_provided"
516
+ }
517
+ )
518
+
519
+ logger.critical(
520
+ f"EMERGENCY LOG #{_emergency_count}: Resources provided "
521
+ f"({response_time:.0f}ms)"
522
+ )
523
+
524
+ return OrchestrationResult(
525
+ intent=IntentType.EMERGENCY.value,
526
+ reply=reply,
527
+ success=True,
528
+ model_id="emergency_router",
529
+ data={"crisis_resources_provided": True},
530
+ response_time_ms=round(response_time, 2)
531
+ )
532
+
533
+
534
+ async def _handle_translation(
535
+ message: str,
536
+ context: Dict[str, Any]
537
+ ) -> OrchestrationResult:
538
+ """
539
+ 🌍 Translation handler - 27 languages supported.
540
+
541
+ Handles translation requests with graceful fallback if service
542
+ is unavailable.
543
+ """
544
+ logger.info("🌍 Processing translation request")
545
+
546
+ # Check service availability first
547
+ if not TRANSLATION_AVAILABLE:
548
+ logger.warning("Translation service not available")
549
+ return OrchestrationResult(
550
+ intent=IntentType.TRANSLATION.value,
551
+ reply="Translation isn't available right now. Try again soon! 🌍",
552
+ success=False,
553
+ error="Service not loaded",
554
+ fallback_used=True
555
+ )
556
+
557
+ try:
558
+ # Extract language parameters from context
559
+ source_lang = context.get("source_lang", "eng_Latn")
560
+ target_lang = context.get("target_lang", "spa_Latn")
561
+
562
+ # TODO: Parse languages from message when enhanced
563
+ # Example: "Translate 'hello' to Spanish"
564
+
565
+ result = await translate_text(message, source_lang, target_lang)
566
+
567
+ # Check if translation service was actually available
568
+ if not result.get("available", True):
569
+ error_msg = result.get("error", "Translation service is temporarily unavailable.")
570
+ logger.warning(f"Translation service unavailable: {error_msg}")
571
+ return OrchestrationResult(
572
+ intent=IntentType.TRANSLATION.value,
573
+ reply=(
574
+ "I'm having trouble accessing the translation service right now. "
575
+ "Please try again in a moment! 🌍"
576
+ ),
577
+ success=False,
578
+ error=error_msg,
579
+ fallback_used=True
580
+ )
581
+
582
+ # Use compatibility helper to check result
583
+ success, error = _check_result_success(result, ["translated_text"])
584
+
585
+ if success:
586
+ translated = result.get("translated_text", "")
587
+
588
+ # Check if translation was skipped (same source/target language)
589
+ if result.get("skipped", False):
590
+ reply = (
591
+ f"The text is already in {target_lang}. "
592
+ f"No translation needed! 🌍"
593
+ )
594
+ else:
595
+ reply = (
596
+ f"Here's the translation:\n\n"
597
+ f"**{translated}**\n\n"
598
+ f"(Translated from {source_lang} to {target_lang})"
599
+ )
600
+
601
+ return OrchestrationResult(
602
+ intent=IntentType.TRANSLATION.value,
603
+ reply=reply,
604
+ success=True,
605
+ data=result,
606
+ model_id="penny-translate-agent"
607
+ )
608
+ else:
609
+ raise Exception(error or "Translation failed")
610
+
611
+ except Exception as e:
612
+ logger.error(f"Translation error: {e}", exc_info=True)
613
+ return OrchestrationResult(
614
+ intent=IntentType.TRANSLATION.value,
615
+ reply=(
616
+ "I had trouble translating that. Could you rephrase? πŸ’¬"
617
+ ),
618
+ success=False,
619
+ error=str(e),
620
+ fallback_used=True
621
+ )
622
+
623
+
624
+ async def _handle_sentiment(
625
+ message: str,
626
+ context: Dict[str, Any]
627
+ ) -> OrchestrationResult:
628
+ """
629
+ 😊 Sentiment analysis handler.
630
+
631
+ Analyzes the emotional tone of text with graceful fallback
632
+ if service is unavailable.
633
+ """
634
+ logger.info("😊 Processing sentiment analysis")
635
+
636
+ # Check service availability first
637
+ if not SENTIMENT_AVAILABLE:
638
+ logger.warning("Sentiment service not available")
639
+ return OrchestrationResult(
640
+ intent=IntentType.SENTIMENT_ANALYSIS.value,
641
+ reply="Sentiment analysis isn't available right now. Try again soon! 😊",
642
+ success=False,
643
+ error="Service not loaded",
644
+ fallback_used=True
645
+ )
646
+
647
+ try:
648
+ result = await get_sentiment_analysis(message)
649
+
650
+ # Use compatibility helper to check result
651
+ success, error = _check_result_success(result, ["label", "score"])
652
+
653
+ if success:
654
+ sentiment = result.get("label", "neutral")
655
+ confidence = result.get("score", 0.0)
656
+
657
+ reply = (
658
+ f"The overall sentiment detected is: **{sentiment}**\n"
659
+ f"Confidence: {confidence:.1%}"
660
+ )
661
+
662
+ return OrchestrationResult(
663
+ intent=IntentType.SENTIMENT_ANALYSIS.value,
664
+ reply=reply,
665
+ success=True,
666
+ data=result,
667
+ model_id="penny-sentiment-agent"
668
+ )
669
+ else:
670
+ raise Exception(error or "Sentiment analysis failed")
671
+
672
+ except Exception as e:
673
+ logger.error(f"Sentiment analysis error: {e}", exc_info=True)
674
+ return OrchestrationResult(
675
+ intent=IntentType.SENTIMENT_ANALYSIS.value,
676
+ reply="I couldn't analyze the sentiment right now. Try again? 😊",
677
+ success=False,
678
+ error=str(e),
679
+ fallback_used=True
680
+ )
681
+
682
+ async def _handle_bias(
683
+ message: str,
684
+ context: Dict[str, Any]
685
+ ) -> OrchestrationResult:
686
+ """
687
+ βš–οΈ Bias detection handler.
688
+
689
+ Analyzes text for potential bias patterns with graceful fallback
690
+ if service is unavailable.
691
+ """
692
+ logger.info("βš–οΈ Processing bias detection")
693
+
694
+ # Check service availability first
695
+ if not BIAS_AVAILABLE:
696
+ logger.warning("Bias detection service not available")
697
+ return OrchestrationResult(
698
+ intent=IntentType.BIAS_DETECTION.value,
699
+ reply="Bias detection isn't available right now. Try again soon! βš–οΈ",
700
+ success=False,
701
+ error="Service not loaded",
702
+ fallback_used=True
703
+ )
704
+
705
+ try:
706
+ result = await check_bias(message)
707
+
708
+ # Use compatibility helper to check result
709
+ success, error = _check_result_success(result, ["analysis"])
710
+
711
+ if success:
712
+ analysis = result.get("analysis", [])
713
+
714
+ if analysis:
715
+ top_result = analysis[0]
716
+ label = top_result.get("label", "unknown")
717
+ score = top_result.get("score", 0.0)
718
+
719
+ reply = (
720
+ f"Bias analysis complete:\n\n"
721
+ f"**Most likely category:** {label}\n"
722
+ f"**Confidence:** {score:.1%}"
723
+ )
724
+ else:
725
+ reply = "The text appears relatively neutral. βš–οΈ"
726
+
727
+ return OrchestrationResult(
728
+ intent=IntentType.BIAS_DETECTION.value,
729
+ reply=reply,
730
+ success=True,
731
+ data=result,
732
+ model_id="penny-bias-checker"
733
+ )
734
+ else:
735
+ raise Exception(error or "Bias detection failed")
736
+
737
+ except Exception as e:
738
+ logger.error(f"Bias detection error: {e}", exc_info=True)
739
+ return OrchestrationResult(
740
+ intent=IntentType.BIAS_DETECTION.value,
741
+ reply="I couldn't check for bias right now. Try again? βš–οΈ",
742
+ success=False,
743
+ error=str(e),
744
+ fallback_used=True
745
+ )
746
+
747
+
748
+ async def _handle_document(
749
+ message: str,
750
+ context: Dict[str, Any]
751
+ ) -> OrchestrationResult:
752
+ """
753
+ πŸ“„ Document processing handler.
754
+
755
+ Note: Actual file upload happens in router.py via FastAPI.
756
+ This handler just provides instructions.
757
+ """
758
+ logger.info("πŸ“„ Document processing requested")
759
+
760
+ reply = (
761
+ "I can help you process documents! πŸ“„\n\n"
762
+ "Please upload your document (PDF or image) using the "
763
+ "`/upload-document` endpoint. I can extract text, analyze forms, "
764
+ "and help you understand civic documents.\n\n"
765
+ "What kind of document do you need help with?"
766
+ )
767
+
768
+ return OrchestrationResult(
769
+ intent=IntentType.DOCUMENT_PROCESSING.value,
770
+ reply=reply,
771
+ success=True,
772
+ model_id="document_router"
773
+ )
774
+
775
+
776
+ async def _handle_weather(
777
+ message: str,
778
+ context: Dict[str, Any],
779
+ tenant_id: Optional[str],
780
+ lat: Optional[float],
781
+ lon: Optional[float],
782
+ intent_result: IntentMatch
783
+ ) -> OrchestrationResult:
784
+ """
785
+ 🌀️ Weather handler with compound intent support.
786
+
787
+ Handles both simple weather queries and compound weather+events queries.
788
+ Uses enhanced weather_agent.py with caching and performance tracking.
789
+ """
790
+ logger.info("🌀️ Processing weather request")
791
+
792
+ # Check service availability first
793
+ if not WEATHER_AGENT_AVAILABLE:
794
+ logger.warning("Weather agent not available")
795
+ return OrchestrationResult(
796
+ intent=IntentType.WEATHER.value,
797
+ reply="Weather service isn't available right now. Try again soon! 🌀️",
798
+ success=False,
799
+ error="Weather agent not loaded",
800
+ fallback_used=True
801
+ )
802
+
803
+ # Check for compound intent (weather + events)
804
+ is_compound = intent_result.is_compound or IntentType.EVENTS in intent_result.secondary_intents
805
+
806
+ # === ENHANCED LOCATION RESOLUTION ===
807
+ # Try multiple strategies to get coordinates
808
+
809
+ # Strategy 1: Use provided coordinates
810
+ if lat is not None and lon is not None:
811
+ logger.info(f"Using provided coordinates: {lat}, {lon}")
812
+
813
+ # Strategy 2: Get coordinates from tenant_id (try multiple formats)
814
+ elif tenant_id:
815
+ # Try tenant_id as-is first
816
+ coords = get_city_coordinates(tenant_id)
817
+
818
+ # If that fails and tenant_id doesn't have state suffix, try adding common suffixes
819
+ if not coords and "_" not in tenant_id:
820
+ # Try common state abbreviations for known cities
821
+ state_suffixes = ["_va", "_ga", "_al", "_tx", "_ri", "_wa"]
822
+ for suffix in state_suffixes:
823
+ test_tenant_id = tenant_id + suffix
824
+ coords = get_city_coordinates(test_tenant_id)
825
+ if coords:
826
+ tenant_id = test_tenant_id # Update tenant_id to normalized form
827
+ logger.info(f"Normalized tenant_id to {tenant_id}")
828
+ break
829
+
830
+ if coords:
831
+ lat, lon = coords["lat"], coords["lon"]
832
+ logger.info(f"βœ… Using city coordinates for {tenant_id}: {lat}, {lon}")
833
+
834
+ # Strategy 3: Extract location from message if still no coordinates
835
+ if lat is None or lon is None:
836
+ logger.info("No coordinates from tenant_id, trying to extract from message")
837
+ location_result = extract_location_detailed(message)
838
+
839
+ if location_result.status == LocationStatus.FOUND:
840
+ extracted_tenant_id = location_result.tenant_id
841
+ logger.info(f"πŸ“ Location extracted from message: {extracted_tenant_id}")
842
+
843
+ # Update tenant_id if we extracted a better one
844
+ if not tenant_id or tenant_id != extracted_tenant_id:
845
+ tenant_id = extracted_tenant_id
846
+ logger.info(f"Updated tenant_id to {tenant_id}")
847
+
848
+ # Get coordinates for extracted location
849
+ coords = get_city_coordinates(tenant_id)
850
+ if coords:
851
+ lat, lon = coords["lat"], coords["lon"]
852
+ logger.info(f"βœ… Coordinates found from message extraction: {lat}, {lon}")
853
+
854
+ # Final check: if still no coordinates, return error
855
+ if lat is None or lon is None:
856
+ logger.warning(f"❌ No coordinates available for weather request (tenant_id: {tenant_id})")
857
+ return OrchestrationResult(
858
+ intent=IntentType.WEATHER.value,
859
+ reply=(
860
+ "I need to know your location to check the weather! πŸ“ "
861
+ "You can tell me your city, or share your location."
862
+ ),
863
+ success=False,
864
+ error="Location required"
865
+ )
866
+
867
+ try:
868
+ # Use combined weather + events if compound intent detected
869
+ if is_compound and tenant_id and EVENT_WEATHER_AVAILABLE:
870
+ logger.info("Using weather+events combined handler")
871
+ result = await get_event_recommendations_with_weather(tenant_id, lat, lon)
872
+
873
+ # Build response
874
+ weather = result.get("weather", {})
875
+ weather_summary = result.get("weather_summary", "Weather unavailable")
876
+ suggestions = result.get("suggestions", [])
877
+
878
+ reply_lines = [f"🌀️ **Weather Update:**\n{weather_summary}\n"]
879
+
880
+ if suggestions:
881
+ reply_lines.append("\nπŸ“… **Event Suggestions Based on Weather:**")
882
+ for suggestion in suggestions[:5]: # Top 5 suggestions
883
+ reply_lines.append(f"β€’ {suggestion}")
884
+
885
+ reply = "\n".join(reply_lines)
886
+
887
+ return OrchestrationResult(
888
+ intent=IntentType.WEATHER.value,
889
+ reply=reply,
890
+ success=True,
891
+ data=result,
892
+ model_id="weather_events_combined"
893
+ )
894
+
895
+ else:
896
+ # Simple weather query using enhanced weather_agent
897
+ weather = await get_weather_for_location(lat, lon)
898
+
899
+ # Use enhanced weather_agent's format_weather_summary
900
+ if format_weather_summary:
901
+ weather_text = format_weather_summary(weather)
902
+ else:
903
+ # Fallback formatting
904
+ temp = weather.get("temperature", {}).get("value")
905
+ phrase = weather.get("phrase", "Conditions unavailable")
906
+ if temp:
907
+ weather_text = f"{phrase}, {int(temp)}Β°F"
908
+ else:
909
+ weather_text = phrase
910
+
911
+ # Get outfit recommendation from enhanced weather_agent
912
+ if recommend_outfit:
913
+ temp = weather.get("temperature", {}).get("value", 70)
914
+ condition = weather.get("phrase", "Clear")
915
+ outfit = recommend_outfit(temp, condition)
916
+ reply = f"🌀️ {weather_text}\n\nπŸ‘• {outfit}"
917
+ else:
918
+ reply = f"🌀️ {weather_text}"
919
+
920
+ return OrchestrationResult(
921
+ intent=IntentType.WEATHER.value,
922
+ reply=reply,
923
+ success=True,
924
+ data=weather,
925
+ model_id="azure-maps-weather"
926
+ )
927
+
928
+ except Exception as e:
929
+ logger.error(f"Weather error: {e}", exc_info=True)
930
+ return OrchestrationResult(
931
+ intent=IntentType.WEATHER.value,
932
+ reply=(
933
+ "I'm having trouble getting weather data right now. "
934
+ "Can I help you with something else? πŸ’›"
935
+ ),
936
+ success=False,
937
+ error=str(e),
938
+ fallback_used=True
939
+ )
940
+
941
+
942
+ async def _handle_events(
943
+ message: str,
944
+ context: Dict[str, Any],
945
+ tenant_id: Optional[str],
946
+ lat: Optional[float],
947
+ lon: Optional[float],
948
+ intent_result: IntentMatch
949
+ ) -> OrchestrationResult:
950
+ """
951
+ πŸ“… Events handler.
952
+
953
+ Routes event queries to tool_agent with proper error handling
954
+ and graceful degradation.
955
+ """
956
+ logger.info("πŸ“… Processing events request")
957
+
958
+ if not tenant_id:
959
+ return OrchestrationResult(
960
+ intent=IntentType.EVENTS.value,
961
+ reply=(
962
+ "I'd love to help you find events! πŸ“… "
963
+ "Which city are you interested in? "
964
+ "I have information for Atlanta, Birmingham, Chesterfield, "
965
+ "El Paso, Providence, and Seattle."
966
+ ),
967
+ success=False,
968
+ error="City required"
969
+ )
970
+
971
+ # Check tool agent availability
972
+ if not TOOL_AGENT_AVAILABLE:
973
+ logger.warning("Tool agent not available")
974
+ return OrchestrationResult(
975
+ intent=IntentType.EVENTS.value,
976
+ reply=(
977
+ "Event information isn't available right now. "
978
+ "Try again soon! πŸ“…"
979
+ ),
980
+ success=False,
981
+ error="Tool agent not loaded",
982
+ fallback_used=True
983
+ )
984
+
985
+ try:
986
+ # FIXED: Add role parameter (compatibility fix)
987
+ tool_response = await handle_tool_request(
988
+ user_input=message,
989
+ role=context.get("role", "resident"), # ← ADDED
990
+ lat=lat,
991
+ lon=lon,
992
+ context=context
993
+ )
994
+
995
+ reply = tool_response.get("response", "Events information retrieved.")
996
+
997
+ return OrchestrationResult(
998
+ intent=IntentType.EVENTS.value,
999
+ reply=reply,
1000
+ success=True,
1001
+ data=tool_response,
1002
+ model_id="events_tool"
1003
+ )
1004
+
1005
+ except Exception as e:
1006
+ logger.error(f"Events error: {e}", exc_info=True)
1007
+ return OrchestrationResult(
1008
+ intent=IntentType.EVENTS.value,
1009
+ reply=(
1010
+ "I'm having trouble loading event information right now. "
1011
+ "Check back soon! πŸ“…"
1012
+ ),
1013
+ success=False,
1014
+ error=str(e),
1015
+ fallback_used=True
1016
+ )
1017
+
1018
+ async def _handle_local_resources(
1019
+ message: str,
1020
+ context: Dict[str, Any],
1021
+ tenant_id: Optional[str],
1022
+ lat: Optional[float],
1023
+ lon: Optional[float]
1024
+ ) -> OrchestrationResult:
1025
+ """
1026
+ πŸ›οΈ Local resources handler (shelters, libraries, food banks, etc.).
1027
+
1028
+ Routes resource queries to tool_agent with proper error handling.
1029
+ """
1030
+ logger.info("πŸ›οΈ Processing local resources request")
1031
+
1032
+ if not tenant_id:
1033
+ return OrchestrationResult(
1034
+ intent=IntentType.LOCAL_RESOURCES.value,
1035
+ reply=(
1036
+ "I can help you find local resources! πŸ›οΈ "
1037
+ "Which city do you need help in? "
1038
+ "I cover Atlanta, Birmingham, Chesterfield, El Paso, "
1039
+ "Providence, and Seattle."
1040
+ ),
1041
+ success=False,
1042
+ error="City required"
1043
+ )
1044
+
1045
+ # Check tool agent availability
1046
+ if not TOOL_AGENT_AVAILABLE:
1047
+ logger.warning("Tool agent not available")
1048
+ return OrchestrationResult(
1049
+ intent=IntentType.LOCAL_RESOURCES.value,
1050
+ reply=(
1051
+ "Resource information isn't available right now. "
1052
+ "Try again soon! πŸ›οΈ"
1053
+ ),
1054
+ success=False,
1055
+ error="Tool agent not loaded",
1056
+ fallback_used=True
1057
+ )
1058
+
1059
+ try:
1060
+ # FIXED: Add role parameter (compatibility fix)
1061
+ tool_response = await handle_tool_request(
1062
+ user_input=message,
1063
+ role=context.get("role", "resident"), # ← ADDED
1064
+ lat=lat,
1065
+ lon=lon,
1066
+ context=context
1067
+ )
1068
+
1069
+ reply = tool_response.get("response", "Resource information retrieved.")
1070
+
1071
+ return OrchestrationResult(
1072
+ intent=IntentType.LOCAL_RESOURCES.value,
1073
+ reply=reply,
1074
+ success=True,
1075
+ data=tool_response,
1076
+ model_id="resources_tool"
1077
+ )
1078
+
1079
+ except Exception as e:
1080
+ logger.error(f"Resources error: {e}", exc_info=True)
1081
+ return OrchestrationResult(
1082
+ intent=IntentType.LOCAL_RESOURCES.value,
1083
+ reply=(
1084
+ "I'm having trouble finding resource information right now. "
1085
+ "Would you like to try a different search? πŸ’›"
1086
+ ),
1087
+ success=False,
1088
+ error=str(e),
1089
+ fallback_used=True
1090
+ )
1091
+
1092
+
1093
+ async def _handle_conversational(
1094
+ message: str,
1095
+ intent: IntentType,
1096
+ context: Dict[str, Any]
1097
+ ) -> OrchestrationResult:
1098
+ """
1099
+ πŸ’¬ Handles conversational intents (greeting, help, unknown).
1100
+ Uses Penny's core LLM for natural responses with graceful fallback.
1101
+ """
1102
+ logger.info(f"πŸ’¬ Processing conversational intent: {intent.value}")
1103
+
1104
+ # Check LLM availability
1105
+ use_llm = LLM_AVAILABLE
1106
+
1107
+ try:
1108
+ if use_llm:
1109
+ # Build prompt based on intent
1110
+ if intent == IntentType.GREETING:
1111
+ prompt = (
1112
+ f"The user greeted you with: '{message}'\n\n"
1113
+ "Respond warmly as Penny, introduce yourself briefly, "
1114
+ "and ask how you can help them with civic services today."
1115
+ )
1116
+
1117
+ elif intent == IntentType.HELP:
1118
+ prompt = (
1119
+ f"The user asked for help: '{message}'\n\n"
1120
+ "Explain Penny's main features:\n"
1121
+ "- Finding local resources (shelters, libraries, food banks)\n"
1122
+ "- Community events and activities\n"
1123
+ "- Weather information\n"
1124
+ "- 27-language translation\n"
1125
+ "- Document processing help\n\n"
1126
+ "Ask which city they need assistance in."
1127
+ )
1128
+
1129
+ else: # UNKNOWN
1130
+ prompt = (
1131
+ f"The user said: '{message}'\n\n"
1132
+ "You're not sure what they need help with. "
1133
+ "Respond kindly, acknowledge their request, and ask them to "
1134
+ "clarify or rephrase. Mention a few things you can help with."
1135
+ )
1136
+
1137
+ # Call Penny's core LLM
1138
+ llm_result = await generate_response(prompt=prompt, max_new_tokens=200)
1139
+
1140
+ # Use compatibility helper to check result
1141
+ success, error = _check_result_success(llm_result, ["response"])
1142
+
1143
+ if success:
1144
+ reply = llm_result.get("response", "")
1145
+
1146
+ return OrchestrationResult(
1147
+ intent=intent.value,
1148
+ reply=reply,
1149
+ success=True,
1150
+ data=llm_result,
1151
+ model_id=CORE_MODEL_ID
1152
+ )
1153
+ else:
1154
+ raise Exception(error or "LLM generation failed")
1155
+
1156
+ else:
1157
+ # LLM not available, use fallback directly
1158
+ logger.info("LLM not available, using fallback responses")
1159
+ raise Exception("LLM service not loaded")
1160
+
1161
+ except Exception as e:
1162
+ logger.warning(f"Conversational handler using fallback: {e}")
1163
+
1164
+ # Hardcoded fallback responses (Penny's friendly voice)
1165
+ fallback_replies = {
1166
+ IntentType.GREETING: (
1167
+ "Hi there! πŸ‘‹ I'm Penny, your civic assistant. "
1168
+ "I can help you find local resources, events, weather, and more. "
1169
+ "What city are you in?"
1170
+ ),
1171
+ IntentType.HELP: (
1172
+ "I'm Penny! πŸ’› I can help you with:\n\n"
1173
+ "πŸ›οΈ Local resources (shelters, libraries, food banks)\n"
1174
+ "πŸ“… Community events\n"
1175
+ "🌀️ Weather updates\n"
1176
+ "🌍 Translation (27 languages)\n"
1177
+ "πŸ“„ Document help\n\n"
1178
+ "What would you like to know about?"
1179
+ ),
1180
+ IntentType.UNKNOWN: (
1181
+ "I'm not sure I understood that. Could you rephrase? "
1182
+ "I'm best at helping with local services, events, weather, "
1183
+ "and translation! πŸ’¬"
1184
+ )
1185
+ }
1186
+
1187
+ return OrchestrationResult(
1188
+ intent=intent.value,
1189
+ reply=fallback_replies.get(intent, "How can I help you today? πŸ’›"),
1190
+ success=True,
1191
+ model_id="fallback",
1192
+ fallback_used=True
1193
+ )
1194
+
1195
+
1196
+ async def _handle_fallback(
1197
+ message: str,
1198
+ intent: IntentType,
1199
+ context: Dict[str, Any]
1200
+ ) -> OrchestrationResult:
1201
+ """
1202
+ πŸ†˜ Ultimate fallback handler for unhandled intents.
1203
+
1204
+ This is a safety net that should rarely trigger, but ensures
1205
+ users always get a helpful response.
1206
+ """
1207
+ logger.warning(f"⚠️ Fallback triggered for intent: {intent.value}")
1208
+
1209
+ reply = (
1210
+ "I've processed your request, but I'm not sure how to help with that yet. "
1211
+ "I'm still learning! πŸ€–\n\n"
1212
+ "I'm best at:\n"
1213
+ "πŸ›οΈ Finding local resources\n"
1214
+ "πŸ“… Community events\n"
1215
+ "🌀️ Weather updates\n"
1216
+ "🌍 Translation\n\n"
1217
+ "Could you rephrase your question? πŸ’›"
1218
+ )
1219
+
1220
+ return OrchestrationResult(
1221
+ intent=intent.value,
1222
+ reply=reply,
1223
+ success=False,
1224
+ error="Unhandled intent",
1225
+ fallback_used=True
1226
+ )
1227
+
1228
+
1229
+ # ============================================================
1230
+ # HEALTH CHECK & DIAGNOSTICS (ENHANCED)
1231
+ # ============================================================
1232
+
1233
+ def get_orchestrator_health() -> Dict[str, Any]:
1234
+ """
1235
+ πŸ“Š Returns comprehensive orchestrator health status.
1236
+
1237
+ Used by the main application health check endpoint to monitor
1238
+ the orchestrator and all its service dependencies.
1239
+
1240
+ Returns:
1241
+ Dictionary with health information including:
1242
+ - status: operational/degraded
1243
+ - service_availability: which services are loaded
1244
+ - statistics: orchestration counts
1245
+ - supported_intents: list of all intent types
1246
+ - features: available orchestrator features
1247
+ """
1248
+ # Get service availability
1249
+ services = get_service_availability()
1250
+
1251
+ # Determine overall status
1252
+ # Orchestrator is operational even if some services are down (graceful degradation)
1253
+ critical_services = ["weather", "tool_agent"] # Must have these
1254
+ critical_available = all(services.get(svc, False) for svc in critical_services)
1255
+
1256
+ status = "operational" if critical_available else "degraded"
1257
+
1258
+ return {
1259
+ "status": status,
1260
+ "core_model": CORE_MODEL_ID,
1261
+ "max_response_time_ms": MAX_RESPONSE_TIME_MS,
1262
+ "statistics": {
1263
+ "total_orchestrations": _orchestration_count,
1264
+ "emergency_interactions": _emergency_count
1265
+ },
1266
+ "service_availability": services,
1267
+ "supported_intents": [intent.value for intent in IntentType],
1268
+ "features": {
1269
+ "emergency_routing": True,
1270
+ "compound_intents": True,
1271
+ "fallback_handling": True,
1272
+ "performance_tracking": True,
1273
+ "context_aware": True,
1274
+ "multi_language": TRANSLATION_AVAILABLE,
1275
+ "sentiment_analysis": SENTIMENT_AVAILABLE,
1276
+ "bias_detection": BIAS_AVAILABLE,
1277
+ "weather_integration": WEATHER_AGENT_AVAILABLE,
1278
+ "event_recommendations": EVENT_WEATHER_AVAILABLE
1279
+ }
1280
+ }
1281
+
1282
+
1283
+ def get_orchestrator_stats() -> Dict[str, Any]:
1284
+ """
1285
+ πŸ“ˆ Returns orchestrator statistics.
1286
+
1287
+ Useful for monitoring and analytics.
1288
+ """
1289
+ return {
1290
+ "total_orchestrations": _orchestration_count,
1291
+ "emergency_interactions": _emergency_count,
1292
+ "services_available": sum(1 for v in get_service_availability().values() if v),
1293
+ "services_total": len(get_service_availability())
1294
+ }
1295
+
1296
+
1297
+ # ============================================================
1298
+ # TESTING & DEBUGGING (ENHANCED)
1299
+ # ============================================================
1300
+
1301
+ if __name__ == "__main__":
1302
+ """
1303
+ πŸ§ͺ Test the orchestrator with sample queries.
1304
+ Run with: python -m app.orchestrator
1305
+ """
1306
+ import asyncio
1307
+
1308
+ print("=" * 60)
1309
+ print("πŸ§ͺ Testing Penny's Orchestrator")
1310
+ print("=" * 60)
1311
+
1312
+ # Display service availability first
1313
+ print("\nπŸ“Š Service Availability Check:")
1314
+ services = get_service_availability()
1315
+ for service, available in services.items():
1316
+ status = "βœ…" if available else "❌"
1317
+ print(f" {status} {service}: {'Available' if available else 'Not loaded'}")
1318
+
1319
+ print("\n" + "=" * 60)
1320
+
1321
+ test_queries = [
1322
+ {
1323
+ "name": "Greeting",
1324
+ "message": "Hi Penny!",
1325
+ "context": {}
1326
+ },
1327
+ {
1328
+ "name": "Weather with location",
1329
+ "message": "What's the weather?",
1330
+ "context": {"lat": 33.7490, "lon": -84.3880}
1331
+ },
1332
+ {
1333
+ "name": "Events in city",
1334
+ "message": "Events in Atlanta",
1335
+ "context": {"tenant_id": "atlanta_ga"}
1336
+ },
1337
+ {
1338
+ "name": "Help request",
1339
+ "message": "I need help",
1340
+ "context": {}
1341
+ },
1342
+ {
1343
+ "name": "Translation",
1344
+ "message": "Translate hello",
1345
+ "context": {"source_lang": "eng_Latn", "target_lang": "spa_Latn"}
1346
+ }
1347
+ ]
1348
+
1349
+ async def run_tests():
1350
+ for i, query in enumerate(test_queries, 1):
1351
+ print(f"\n--- Test {i}: {query['name']} ---")
1352
+ print(f"Query: {query['message']}")
1353
+
1354
+ try:
1355
+ result = await run_orchestrator(query["message"], query["context"])
1356
+ print(f"Intent: {result['intent']}")
1357
+ print(f"Success: {result['success']}")
1358
+ print(f"Fallback: {result.get('fallback_used', False)}")
1359
+
1360
+ # Truncate long replies
1361
+ reply = result['reply']
1362
+ if len(reply) > 150:
1363
+ reply = reply[:150] + "..."
1364
+ print(f"Reply: {reply}")
1365
+
1366
+ if result.get('response_time_ms'):
1367
+ print(f"Response time: {result['response_time_ms']:.0f}ms")
1368
+
1369
+ except Exception as e:
1370
+ print(f"❌ Error: {e}")
1371
+
1372
+ asyncio.run(run_tests())
1373
+
1374
+ print("\n" + "=" * 60)
1375
+ print("πŸ“Š Final Statistics:")
1376
+ stats = get_orchestrator_stats()
1377
+ for key, value in stats.items():
1378
+ print(f" {key}: {value}")
1379
+
1380
+ print("\n" + "=" * 60)
1381
+ print("βœ… Tests complete")
1382
+ print("=" * 60)