pythonprincess commited on
Commit
dd1c89f
Β·
verified Β·
1 Parent(s): e6068a1

Upload orchestrator.py

Browse files
Files changed (1) hide show
  1. app/orchestrator.py +1263 -0
app/orchestrator.py ADDED
@@ -0,0 +1,1263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ 🎭 PENNY Orchestrator - Request Routing & Coordination Engine
3
+
4
+ This is Penny's decision-making brain. She analyzes each request, determines
5
+ the best way to help, and coordinates between her specialized AI models and
6
+ civic data tools.
7
+
8
+ MISSION: Route every resident request to the right resource while maintaining
9
+ Penny's warm, helpful personality and ensuring fast, accurate responses.
10
+
11
+ FEATURES:
12
+ - Enhanced intent classification with confidence scoring
13
+ - Compound intent handling (weather + events)
14
+ - Graceful fallbacks when services are unavailable
15
+ - Performance tracking for all operations
16
+ - Context-aware responses
17
+ - Emergency routing with immediate escalation
18
+
19
+ ENHANCEMENTS (Phase 1):
20
+ - βœ… Structured logging with performance tracking
21
+ - βœ… Safe imports with availability flags
22
+ - βœ… Result format checking helper
23
+ - βœ… Enhanced error handling patterns
24
+ - βœ… Service availability tracking
25
+ - βœ… Fixed function signature mismatches
26
+ - βœ… Integration with enhanced modules
27
+ """
28
+
29
+ import logging
30
+ import time
31
+ from typing import Dict, Any, Optional, List, Tuple
32
+ from datetime import datetime
33
+ from dataclasses import dataclass, field
34
+ from enum import Enum
35
+
36
+ # --- ENHANCED MODULE IMPORTS ---
37
+ from app.intents import classify_intent_detailed, IntentType, IntentMatch
38
+ from app.location_utils import (
39
+ extract_location_detailed,
40
+ LocationMatch,
41
+ LocationStatus,
42
+ get_city_coordinates
43
+ )
44
+ from app.logging_utils import (
45
+ log_interaction,
46
+ sanitize_for_logging,
47
+ LogLevel
48
+ )
49
+
50
+ # --- AGENT IMPORTS (with availability tracking) ---
51
+ try:
52
+ from app.weather_agent import (
53
+ get_weather_for_location,
54
+ recommend_outfit,
55
+ weather_to_event_recommendations,
56
+ format_weather_summary
57
+ )
58
+ WEATHER_AGENT_AVAILABLE = True
59
+ except ImportError as e:
60
+ logger = logging.getLogger(__name__)
61
+ logger.warning(f"Weather agent not available: {e}")
62
+ WEATHER_AGENT_AVAILABLE = False
63
+
64
+ try:
65
+ from app.event_weather import get_event_recommendations_with_weather
66
+ EVENT_WEATHER_AVAILABLE = True
67
+ except ImportError as e:
68
+ logger = logging.getLogger(__name__)
69
+ logger.warning(f"Event weather integration not available: {e}")
70
+ EVENT_WEATHER_AVAILABLE = False
71
+
72
+ try:
73
+ from app.tool_agent import handle_tool_request
74
+ TOOL_AGENT_AVAILABLE = True
75
+ except ImportError as e:
76
+ logger = logging.getLogger(__name__)
77
+ logger.warning(f"Tool agent not available: {e}")
78
+ TOOL_AGENT_AVAILABLE = False
79
+
80
+ # --- MODEL IMPORTS (with availability tracking) ---
81
+ try:
82
+ from models.translation.translation_utils import translate_text
83
+ TRANSLATION_AVAILABLE = True
84
+ except ImportError as e:
85
+ logger = logging.getLogger(__name__)
86
+ logger.warning(f"Translation service not available: {e}")
87
+ TRANSLATION_AVAILABLE = False
88
+
89
+ try:
90
+ from models.sentiment.sentiment_utils import get_sentiment_analysis
91
+ SENTIMENT_AVAILABLE = True
92
+ except ImportError as e:
93
+ logger = logging.getLogger(__name__)
94
+ logger.warning(f"Sentiment service not available: {e}")
95
+ SENTIMENT_AVAILABLE = False
96
+
97
+ try:
98
+ from models.bias.bias_utils import check_bias
99
+ BIAS_AVAILABLE = True
100
+ except ImportError as e:
101
+ logger = logging.getLogger(__name__)
102
+ logger.warning(f"Bias detection service not available: {e}")
103
+ BIAS_AVAILABLE = False
104
+
105
+ try:
106
+ from models.gemma.gemma_utils import generate_response
107
+ LLM_AVAILABLE = True
108
+ except ImportError as e:
109
+ logger = logging.getLogger(__name__)
110
+ logger.warning(f"LLM service not available: {e}")
111
+ LLM_AVAILABLE = False
112
+
113
+ # --- LOGGING SETUP ---
114
+ logger = logging.getLogger(__name__)
115
+
116
+ # --- CONFIGURATION ---
117
+ CORE_MODEL_ID = "penny-core-agent"
118
+ MAX_RESPONSE_TIME_MS = 5000 # 5 seconds - log if exceeded
119
+
120
+ # --- TRACKING COUNTERS ---
121
+ _orchestration_count = 0
122
+ _emergency_count = 0
123
+
124
+
125
+ # ============================================================
126
+ # COMPATIBILITY HELPER - Result Format Checking
127
+ # ============================================================
128
+
129
+ def _check_result_success(
130
+ result: Dict[str, Any],
131
+ expected_keys: List[str]
132
+ ) -> Tuple[bool, Optional[str]]:
133
+ """
134
+ βœ… Check if a utility function result indicates success.
135
+
136
+ Handles multiple return format patterns:
137
+ - Explicit "success" key (preferred)
138
+ - Presence of expected data keys (implicit success)
139
+ - Presence of "error" key (explicit failure)
140
+
141
+ This helper fixes compatibility issues where different utility
142
+ functions return different result formats.
143
+
144
+ Args:
145
+ result: Dictionary returned from utility function
146
+ expected_keys: List of keys that indicate successful data
147
+
148
+ Returns:
149
+ Tuple of (is_success, error_message)
150
+
151
+ Example:
152
+ result = await translate_text(message, "en", "es")
153
+ success, error = _check_result_success(result, ["translated_text"])
154
+ if success:
155
+ text = result.get("translated_text")
156
+ """
157
+ # Check for explicit success key
158
+ if "success" in result:
159
+ return result["success"], result.get("error")
160
+
161
+ # Check for explicit error (presence = failure)
162
+ if "error" in result and result["error"]:
163
+ return False, result["error"]
164
+
165
+ # Check for expected data keys (implicit success)
166
+ has_data = any(key in result for key in expected_keys)
167
+ if has_data:
168
+ return True, None
169
+
170
+ # Unknown format - assume failure
171
+ return False, "Unexpected response format"
172
+
173
+
174
+ # ============================================================
175
+ # SERVICE AVAILABILITY CHECK
176
+ # ============================================================
177
+
178
+ def get_service_availability() -> Dict[str, bool]:
179
+ """
180
+ πŸ“Š Returns which services are currently available.
181
+
182
+ Used for health checks, debugging, and deciding whether
183
+ to attempt service calls or use fallbacks.
184
+
185
+ Returns:
186
+ Dictionary mapping service names to availability status
187
+ """
188
+ return {
189
+ "translation": TRANSLATION_AVAILABLE,
190
+ "sentiment": SENTIMENT_AVAILABLE,
191
+ "bias_detection": BIAS_AVAILABLE,
192
+ "llm": LLM_AVAILABLE,
193
+ "tool_agent": TOOL_AGENT_AVAILABLE,
194
+ "weather": WEATHER_AGENT_AVAILABLE,
195
+ "event_weather": EVENT_WEATHER_AVAILABLE
196
+ }
197
+
198
+
199
+ # ============================================================
200
+ # ORCHESTRATION RESULT STRUCTURE
201
+ # ============================================================
202
+
203
+ @dataclass
204
+ class OrchestrationResult:
205
+ """
206
+ πŸ“¦ Structured result from orchestration pipeline.
207
+
208
+ This format is used throughout the system for consistency
209
+ and makes it easy to track what happened during request processing.
210
+ """
211
+ intent: str # Detected intent
212
+ reply: str # User-facing response
213
+ success: bool # Whether request succeeded
214
+ tenant_id: Optional[str] = None # City/location identifier
215
+ data: Optional[Dict[str, Any]] = None # Raw data from services
216
+ model_id: Optional[str] = None # Which model/service was used
217
+ error: Optional[str] = None # Error message if failed
218
+ response_time_ms: Optional[float] = None
219
+ confidence: Optional[float] = None # Intent confidence score
220
+ fallback_used: bool = False # True if fallback logic triggered
221
+
222
+ def to_dict(self) -> Dict[str, Any]:
223
+ """Converts to dictionary for API responses."""
224
+ return {
225
+ "intent": self.intent,
226
+ "reply": self.reply,
227
+ "success": self.success,
228
+ "tenant_id": self.tenant_id,
229
+ "data": self.data,
230
+ "model_id": self.model_id,
231
+ "error": self.error,
232
+ "response_time_ms": self.response_time_ms,
233
+ "confidence": self.confidence,
234
+ "fallback_used": self.fallback_used
235
+ }
236
+
237
+
238
+ # ============================================================
239
+ # MAIN ORCHESTRATOR FUNCTION (ENHANCED)
240
+ # ============================================================
241
+
242
+ async def run_orchestrator(
243
+ message: str,
244
+ context: Dict[str, Any] = None
245
+ ) -> Dict[str, Any]:
246
+ """
247
+ 🧠 Main decision-making brain of Penny.
248
+
249
+ This function:
250
+ 1. Analyzes the user's message to determine intent
251
+ 2. Extracts location/city information
252
+ 3. Routes to the appropriate specialized service
253
+ 4. Handles errors gracefully with helpful fallbacks
254
+ 5. Tracks performance and logs the interaction
255
+
256
+ Args:
257
+ message: User's input text
258
+ context: Additional context (tenant_id, lat, lon, session_id, etc.)
259
+
260
+ Returns:
261
+ Dictionary with response and metadata
262
+
263
+ Example:
264
+ result = await run_orchestrator(
265
+ message="What's the weather in Atlanta?",
266
+ context={"lat": 33.7490, "lon": -84.3880}
267
+ )
268
+ """
269
+ global _orchestration_count
270
+ _orchestration_count += 1
271
+
272
+ start_time = time.time()
273
+
274
+ # Initialize context if not provided
275
+ if context is None:
276
+ context = {}
277
+
278
+ # Sanitize message for logging (PII protection)
279
+ safe_message = sanitize_for_logging(message)
280
+ logger.info(f"🎭 Orchestrator processing: '{safe_message[:50]}...'")
281
+
282
+ try:
283
+ # === STEP 1: CLASSIFY INTENT (Enhanced) ===
284
+ intent_result = classify_intent_detailed(message)
285
+ intent = intent_result.intent
286
+ confidence = intent_result.confidence
287
+
288
+ logger.info(
289
+ f"Intent detected: {intent.value} "
290
+ f"(confidence: {confidence:.2f})"
291
+ )
292
+
293
+ # === STEP 2: EXTRACT LOCATION ===
294
+ tenant_id = context.get("tenant_id")
295
+ lat = context.get("lat")
296
+ lon = context.get("lon")
297
+
298
+ # If tenant_id not provided, try to extract from message
299
+ if not tenant_id or tenant_id == "unknown":
300
+ location_result = extract_location_detailed(message)
301
+
302
+ if location_result.status == LocationStatus.FOUND:
303
+ tenant_id = location_result.tenant_id
304
+ logger.info(f"Location extracted: {tenant_id}")
305
+
306
+ # Get coordinates for this tenant if available
307
+ coords = get_city_coordinates(tenant_id)
308
+ if coords and lat is None and lon is None:
309
+ lat, lon = coords["lat"], coords["lon"]
310
+ logger.info(f"Coordinates loaded: {lat}, {lon}")
311
+
312
+ elif location_result.status == LocationStatus.USER_LOCATION_NEEDED:
313
+ logger.info("User location services needed")
314
+ else:
315
+ logger.info(f"No location detected: {location_result.status}")
316
+
317
+ # === STEP 3: HANDLE EMERGENCY INTENTS (CRITICAL) ===
318
+ if intent == IntentType.EMERGENCY:
319
+ return await _handle_emergency(
320
+ message=message,
321
+ context=context,
322
+ start_time=start_time
323
+ )
324
+
325
+ # === STEP 4: ROUTE TO APPROPRIATE HANDLER ===
326
+
327
+ # Translation
328
+ if intent == IntentType.TRANSLATION:
329
+ result = await _handle_translation(message, context)
330
+
331
+ # Sentiment Analysis
332
+ elif intent == IntentType.SENTIMENT_ANALYSIS:
333
+ result = await _handle_sentiment(message, context)
334
+
335
+ # Bias Detection
336
+ elif intent == IntentType.BIAS_DETECTION:
337
+ result = await _handle_bias(message, context)
338
+
339
+ # Document Processing
340
+ elif intent == IntentType.DOCUMENT_PROCESSING:
341
+ result = await _handle_document(message, context)
342
+
343
+ # Weather (includes compound weather+events handling)
344
+ elif intent == IntentType.WEATHER:
345
+ result = await _handle_weather(
346
+ message=message,
347
+ context=context,
348
+ tenant_id=tenant_id,
349
+ lat=lat,
350
+ lon=lon,
351
+ intent_result=intent_result
352
+ )
353
+
354
+ # Events
355
+ elif intent == IntentType.EVENTS:
356
+ result = await _handle_events(
357
+ message=message,
358
+ context=context,
359
+ tenant_id=tenant_id,
360
+ lat=lat,
361
+ lon=lon,
362
+ intent_result=intent_result
363
+ )
364
+
365
+ # Local Resources
366
+ elif intent == IntentType.LOCAL_RESOURCES:
367
+ result = await _handle_local_resources(
368
+ message=message,
369
+ context=context,
370
+ tenant_id=tenant_id,
371
+ lat=lat,
372
+ lon=lon
373
+ )
374
+
375
+ # Greeting, Help, Unknown
376
+ elif intent in [IntentType.GREETING, IntentType.HELP, IntentType.UNKNOWN]:
377
+ result = await _handle_conversational(
378
+ message=message,
379
+ intent=intent,
380
+ context=context
381
+ )
382
+
383
+ else:
384
+ # Unhandled intent type (shouldn't happen, but safety net)
385
+ result = await _handle_fallback(message, intent, context)
386
+
387
+ # === STEP 5: ADD METADATA & LOG INTERACTION ===
388
+ response_time = (time.time() - start_time) * 1000
389
+ result.response_time_ms = round(response_time, 2)
390
+ result.confidence = confidence
391
+ result.tenant_id = tenant_id
392
+
393
+ # Log the interaction with structured logging
394
+ log_interaction(
395
+ tenant_id=tenant_id or "unknown",
396
+ interaction_type="orchestration",
397
+ intent=intent.value,
398
+ response_time_ms=response_time,
399
+ success=result.success,
400
+ metadata={
401
+ "confidence": confidence,
402
+ "fallback_used": result.fallback_used,
403
+ "model_id": result.model_id,
404
+ "orchestration_count": _orchestration_count
405
+ }
406
+ )
407
+
408
+ # Log slow responses
409
+ if response_time > MAX_RESPONSE_TIME_MS:
410
+ logger.warning(
411
+ f"⚠️ Slow response: {response_time:.0f}ms "
412
+ f"(intent: {intent.value})"
413
+ )
414
+
415
+ logger.info(
416
+ f"βœ… Orchestration complete: {intent.value} "
417
+ f"({response_time:.0f}ms)"
418
+ )
419
+
420
+ return result.to_dict()
421
+
422
+ except Exception as e:
423
+ # === CATASTROPHIC FAILURE HANDLER ===
424
+ response_time = (time.time() - start_time) * 1000
425
+ logger.error(
426
+ f"❌ Orchestrator error: {e} "
427
+ f"(response_time: {response_time:.0f}ms)",
428
+ exc_info=True
429
+ )
430
+
431
+ # Log failed interaction
432
+ log_interaction(
433
+ tenant_id=context.get("tenant_id", "unknown"),
434
+ interaction_type="orchestration_error",
435
+ intent="error",
436
+ response_time_ms=response_time,
437
+ success=False,
438
+ metadata={
439
+ "error": str(e),
440
+ "error_type": type(e).__name__
441
+ }
442
+ )
443
+
444
+ error_result = OrchestrationResult(
445
+ intent="error",
446
+ reply=(
447
+ "I'm having trouble processing your request right now. "
448
+ "Please try again in a moment, or let me know if you need "
449
+ "immediate assistance! πŸ’›"
450
+ ),
451
+ success=False,
452
+ error=str(e),
453
+ model_id="orchestrator",
454
+ fallback_used=True,
455
+ response_time_ms=round(response_time, 2)
456
+ )
457
+
458
+ return error_result.to_dict()
459
+
460
+
461
+ # ============================================================
462
+ # SPECIALIZED INTENT HANDLERS (ENHANCED)
463
+ # ============================================================
464
+
465
+ async def _handle_emergency(
466
+ message: str,
467
+ context: Dict[str, Any],
468
+ start_time: float
469
+ ) -> OrchestrationResult:
470
+ """
471
+ 🚨 CRITICAL: Emergency intent handler.
472
+
473
+ This function handles crisis situations with immediate routing
474
+ to appropriate services. All emergency interactions are logged
475
+ for compliance and safety tracking.
476
+
477
+ IMPORTANT: This is a compliance-critical function. All emergency
478
+ interactions must be logged and handled with priority.
479
+ """
480
+ global _emergency_count
481
+ _emergency_count += 1
482
+
483
+ # Sanitize message for logging (but keep full context for safety review)
484
+ safe_message = sanitize_for_logging(message)
485
+ logger.warning(f"🚨 EMERGENCY INTENT DETECTED (#{_emergency_count}): {safe_message[:100]}")
486
+
487
+ # TODO: Integrate with safety_utils.py when enhanced
488
+ # from app.safety_utils import route_emergency
489
+ # result = await route_emergency(message, context)
490
+
491
+ # For now, provide crisis resources
492
+ reply = (
493
+ "🚨 **If this is a life-threatening emergency, please call 911 immediately.**\n\n"
494
+ "For crisis support:\n"
495
+ "- **National Suicide Prevention Lifeline:** 988\n"
496
+ "- **Crisis Text Line:** Text HOME to 741741\n"
497
+ "- **National Domestic Violence Hotline:** 1-800-799-7233\n\n"
498
+ "I'm here to help connect you with local resources. "
499
+ "What kind of support do you need right now?"
500
+ )
501
+
502
+ # Log emergency interaction for compliance (CRITICAL)
503
+ response_time = (time.time() - start_time) * 1000
504
+ log_interaction(
505
+ tenant_id=context.get("tenant_id", "emergency"),
506
+ interaction_type="emergency",
507
+ intent=IntentType.EMERGENCY.value,
508
+ response_time_ms=response_time,
509
+ success=True,
510
+ metadata={
511
+ "emergency_number": _emergency_count,
512
+ "message_length": len(message),
513
+ "timestamp": datetime.now().isoformat(),
514
+ "action": "crisis_resources_provided"
515
+ }
516
+ )
517
+
518
+ logger.critical(
519
+ f"EMERGENCY LOG #{_emergency_count}: Resources provided "
520
+ f"({response_time:.0f}ms)"
521
+ )
522
+
523
+ return OrchestrationResult(
524
+ intent=IntentType.EMERGENCY.value,
525
+ reply=reply,
526
+ success=True,
527
+ model_id="emergency_router",
528
+ data={"crisis_resources_provided": True},
529
+ response_time_ms=round(response_time, 2)
530
+ )
531
+
532
+
533
+ async def _handle_translation(
534
+ message: str,
535
+ context: Dict[str, Any]
536
+ ) -> OrchestrationResult:
537
+ """
538
+ 🌍 Translation handler - 27 languages supported.
539
+
540
+ Handles translation requests with graceful fallback if service
541
+ is unavailable.
542
+ """
543
+ logger.info("🌍 Processing translation request")
544
+
545
+ # Check service availability first
546
+ if not TRANSLATION_AVAILABLE:
547
+ logger.warning("Translation service not available")
548
+ return OrchestrationResult(
549
+ intent=IntentType.TRANSLATION.value,
550
+ reply="Translation isn't available right now. Try again soon! 🌍",
551
+ success=False,
552
+ error="Service not loaded",
553
+ fallback_used=True
554
+ )
555
+
556
+ try:
557
+ # Extract language parameters from context
558
+ source_lang = context.get("source_lang", "eng_Latn")
559
+ target_lang = context.get("target_lang", "spa_Latn")
560
+
561
+ # TODO: Parse languages from message when enhanced
562
+ # Example: "Translate 'hello' to Spanish"
563
+
564
+ result = await translate_text(message, source_lang, target_lang)
565
+
566
+ # Use compatibility helper to check result
567
+ success, error = _check_result_success(result, ["translated_text"])
568
+
569
+ if success:
570
+ translated = result.get("translated_text", "")
571
+ reply = (
572
+ f"Here's the translation:\n\n"
573
+ f"**{translated}**\n\n"
574
+ f"(Translated from {source_lang} to {target_lang})"
575
+ )
576
+
577
+ return OrchestrationResult(
578
+ intent=IntentType.TRANSLATION.value,
579
+ reply=reply,
580
+ success=True,
581
+ data=result,
582
+ model_id="penny-translate-agent"
583
+ )
584
+ else:
585
+ raise Exception(error or "Translation failed")
586
+
587
+ except Exception as e:
588
+ logger.error(f"Translation error: {e}", exc_info=True)
589
+ return OrchestrationResult(
590
+ intent=IntentType.TRANSLATION.value,
591
+ reply=(
592
+ "I had trouble translating that. Could you rephrase? πŸ’¬"
593
+ ),
594
+ success=False,
595
+ error=str(e),
596
+ fallback_used=True
597
+ )
598
+
599
+
600
+ async def _handle_events(
601
+ message: str,
602
+ context: Dict[str, Any],
603
+ tenant_id: Optional[str],
604
+ lat: Optional[float],
605
+ lon: Optional[float],
606
+ intent_result: IntentMatch
607
+ ) -> OrchestrationResult:
608
+ """
609
+ πŸ“… Events handler.
610
+
611
+ Routes event queries to tool_agent with proper error handling
612
+ and graceful degradation.
613
+ """
614
+ logger.info("πŸ“… Processing events request")
615
+
616
+ if not tenant_id:
617
+ return OrchestrationResult(
618
+ intent=IntentType.EVENTS.value,
619
+ reply=(
620
+ "I'd love to help you find events! πŸ“… "
621
+ "Which city are you interested in? "
622
+ "I have information for Atlanta, Birmingham, Chesterfield, "
623
+ "El Paso, Providence, and Seattle."
624
+ ),
625
+ success=False,
626
+ error="City required"
627
+ )
628
+
629
+ # Check tool agent availability
630
+ if not TOOL_AGENT_AVAILABLE:
631
+ logger.warning("Tool agent not available")
632
+ return OrchestrationResult(
633
+ intent=IntentType.EVENTS.value,
634
+ reply=(
635
+ "Event information isn't available right now. "
636
+ "Try again soon! πŸ“…"
637
+ ),
638
+ success=False,
639
+ error="Tool agent not loaded",
640
+ fallback_used=True
641
+ )
642
+
643
+ try:
644
+ # FIXED: Add context parameter
645
+ tool_response = await handle_tool_request(
646
+ user_input=message,
647
+ role=context.get("role", "resident"),
648
+ lat=lat,
649
+ lon=lon,
650
+ context=context # ✨ ADDED THIS LINE
651
+ )
652
+
653
+ reply = tool_response.get("response", "Events information retrieved.")
654
+
655
+ return OrchestrationResult(
656
+ intent=IntentType.EVENTS.value,
657
+ reply=reply,
658
+ success=True,
659
+ data=tool_response,
660
+ model_id="events_tool"
661
+ )
662
+
663
+ except Exception as e:
664
+ logger.error(f"Events error: {e}", exc_info=True)
665
+ return OrchestrationResult(
666
+ intent=IntentType.EVENTS.value,
667
+ reply=(
668
+ "I'm having trouble loading event information right now. "
669
+ "Check back soon! πŸ“…"
670
+ ),
671
+ success=False,
672
+ error=str(e),
673
+ fallback_used=True
674
+ )
675
+
676
+ async def _handle_local_resources(
677
+ message: str,
678
+ context: Dict[str, Any],
679
+ tenant_id: Optional[str],
680
+ lat: Optional[float],
681
+ lon: Optional[float]
682
+ ) -> OrchestrationResult:
683
+ """
684
+ πŸ›οΈ Local resources handler (shelters, libraries, food banks, etc.).
685
+
686
+ Routes resource queries to tool_agent with proper error handling.
687
+ """
688
+ logger.info("πŸ›οΈ Processing local resources request")
689
+
690
+ if not tenant_id:
691
+ return OrchestrationResult(
692
+ intent=IntentType.LOCAL_RESOURCES.value,
693
+ reply=(
694
+ "I can help you find local resources! πŸ›οΈ "
695
+ "Which city do you need help in? "
696
+ "I cover Atlanta, Birmingham, Chesterfield, El Paso, "
697
+ "Providence, and Seattle."
698
+ ),
699
+ success=False,
700
+ error="City required"
701
+ )
702
+
703
+ # Check tool agent availability
704
+ if not TOOL_AGENT_AVAILABLE:
705
+ logger.warning("Tool agent not available")
706
+ return OrchestrationResult(
707
+ intent=IntentType.LOCAL_RESOURCES.value,
708
+ reply=(
709
+ "Resource information isn't available right now. "
710
+ "Try again soon! πŸ›οΈ"
711
+ ),
712
+ success=False,
713
+ error="Tool agent not loaded",
714
+ fallback_used=True
715
+ )
716
+
717
+ try:
718
+ # FIXED: Add context parameter
719
+ tool_response = await handle_tool_request(
720
+ user_input=message,
721
+ role=context.get("role", "resident"),
722
+ lat=lat,
723
+ lon=lon,
724
+ context=context # ✨ ADDED THIS LINE
725
+ )
726
+
727
+ reply = tool_response.get("response", "Resource information retrieved.")
728
+
729
+ return OrchestrationResult(
730
+ intent=IntentType.LOCAL_RESOURCES.value,
731
+ reply=reply,
732
+ success=True,
733
+ data=tool_response,
734
+ model_id="resources_tool"
735
+ )
736
+
737
+ except Exception as e:
738
+ logger.error(f"Resources error: {e}", exc_info=True)
739
+ return OrchestrationResult(
740
+ intent=IntentType.LOCAL_RESOURCES.value,
741
+ reply=(
742
+ "I'm having trouble finding resource information right now. "
743
+ "Would you like to try a different search? πŸ’›"
744
+ ),
745
+ success=False,
746
+ error=str(e),
747
+ fallback_used=True
748
+ )
749
+
750
+ try:
751
+ # Use combined weather + events if compound intent detected
752
+ if is_compound and tenant_id and EVENT_WEATHER_AVAILABLE:
753
+ logger.info("Using weather+events combined handler")
754
+ result = await get_event_recommendations_with_weather(tenant_id, lat, lon)
755
+
756
+ # Build response
757
+ weather = result.get("weather", {})
758
+ weather_summary = result.get("weather_summary", "Weather unavailable")
759
+ suggestions = result.get("suggestions", [])
760
+
761
+ reply_lines = [f"🌀️ **Weather Update:**\n{weather_summary}\n"]
762
+
763
+ if suggestions:
764
+ reply_lines.append("\nπŸ“… **Event Suggestions Based on Weather:**")
765
+ for suggestion in suggestions[:5]: # Top 5 suggestions
766
+ reply_lines.append(f"β€’ {suggestion}")
767
+
768
+ reply = "\n".join(reply_lines)
769
+
770
+ return OrchestrationResult(
771
+ intent=IntentType.WEATHER.value,
772
+ reply=reply,
773
+ success=True,
774
+ data=result,
775
+ model_id="weather_events_combined"
776
+ )
777
+
778
+ else:
779
+ # Simple weather query using enhanced weather_agent
780
+ weather = await get_weather_for_location(lat, lon)
781
+
782
+ # Use enhanced weather_agent's format_weather_summary
783
+ if format_weather_summary:
784
+ weather_text = format_weather_summary(weather)
785
+ else:
786
+ # Fallback formatting
787
+ temp = weather.get("temperature", {}).get("value")
788
+ phrase = weather.get("phrase", "Conditions unavailable")
789
+ if temp:
790
+ weather_text = f"{phrase}, {int(temp)}Β°F"
791
+ else:
792
+ weather_text = phrase
793
+
794
+ # Get outfit recommendation from enhanced weather_agent
795
+ if recommend_outfit:
796
+ temp = weather.get("temperature", {}).get("value", 70)
797
+ condition = weather.get("phrase", "Clear")
798
+ outfit = recommend_outfit(temp, condition)
799
+ reply = f"🌀️ {weather_text}\n\nπŸ‘• {outfit}"
800
+ else:
801
+ reply = f"🌀️ {weather_text}"
802
+
803
+ return OrchestrationResult(
804
+ intent=IntentType.WEATHER.value,
805
+ reply=reply,
806
+ success=True,
807
+ data=weather,
808
+ model_id="azure-maps-weather"
809
+ )
810
+
811
+ except Exception as e:
812
+ logger.error(f"Weather error: {e}", exc_info=True)
813
+ return OrchestrationResult(
814
+ intent=IntentType.WEATHER.value,
815
+ reply=(
816
+ "I'm having trouble getting weather data right now. "
817
+ "Can I help you with something else? πŸ’›"
818
+ ),
819
+ success=False,
820
+ error=str(e),
821
+ fallback_used=True
822
+ )
823
+
824
+
825
+ async def _handle_events(
826
+ message: str,
827
+ context: Dict[str, Any],
828
+ tenant_id: Optional[str],
829
+ lat: Optional[float],
830
+ lon: Optional[float],
831
+ intent_result: IntentMatch
832
+ ) -> OrchestrationResult:
833
+ """
834
+ πŸ“… Events handler.
835
+
836
+ Routes event queries to tool_agent with proper error handling
837
+ and graceful degradation.
838
+ """
839
+ logger.info("πŸ“… Processing events request")
840
+
841
+ if not tenant_id:
842
+ return OrchestrationResult(
843
+ intent=IntentType.EVENTS.value,
844
+ reply=(
845
+ "I'd love to help you find events! πŸ“… "
846
+ "Which city are you interested in? "
847
+ "I have information for Atlanta, Birmingham, Chesterfield, "
848
+ "El Paso, Providence, and Seattle."
849
+ ),
850
+ success=False,
851
+ error="City required"
852
+ )
853
+
854
+ # Check tool agent availability
855
+ if not TOOL_AGENT_AVAILABLE:
856
+ logger.warning("Tool agent not available")
857
+ return OrchestrationResult(
858
+ intent=IntentType.EVENTS.value,
859
+ reply=(
860
+ "Event information isn't available right now. "
861
+ "Try again soon! πŸ“…"
862
+ ),
863
+ success=False,
864
+ error="Tool agent not loaded",
865
+ fallback_used=True
866
+ )
867
+
868
+ try:
869
+ # FIXED: Add role parameter (compatibility fix)
870
+ tool_response = await handle_tool_request(
871
+ user_input=message,
872
+ role=context.get("role", "resident"), # ← ADDED
873
+ lat=lat,
874
+ lon=lon
875
+ )
876
+
877
+ reply = tool_response.get("response", "Events information retrieved.")
878
+
879
+ return OrchestrationResult(
880
+ intent=IntentType.EVENTS.value,
881
+ reply=reply,
882
+ success=True,
883
+ data=tool_response,
884
+ model_id="events_tool"
885
+ )
886
+
887
+ except Exception as e:
888
+ logger.error(f"Events error: {e}", exc_info=True)
889
+ return OrchestrationResult(
890
+ intent=IntentType.EVENTS.value,
891
+ reply=(
892
+ "I'm having trouble loading event information right now. "
893
+ "Check back soon! πŸ“…"
894
+ ),
895
+ success=False,
896
+ error=str(e),
897
+ fallback_used=True
898
+ )
899
+
900
+ async def _handle_local_resources(
901
+ message: str,
902
+ context: Dict[str, Any],
903
+ tenant_id: Optional[str],
904
+ lat: Optional[float],
905
+ lon: Optional[float]
906
+ ) -> OrchestrationResult:
907
+ """
908
+ πŸ›οΈ Local resources handler (shelters, libraries, food banks, etc.).
909
+
910
+ Routes resource queries to tool_agent with proper error handling.
911
+ """
912
+ logger.info("πŸ›οΈ Processing local resources request")
913
+
914
+ if not tenant_id:
915
+ return OrchestrationResult(
916
+ intent=IntentType.LOCAL_RESOURCES.value,
917
+ reply=(
918
+ "I can help you find local resources! πŸ›οΈ "
919
+ "Which city do you need help in? "
920
+ "I cover Atlanta, Birmingham, Chesterfield, El Paso, "
921
+ "Providence, and Seattle."
922
+ ),
923
+ success=False,
924
+ error="City required"
925
+ )
926
+
927
+ # Check tool agent availability
928
+ if not TOOL_AGENT_AVAILABLE:
929
+ logger.warning("Tool agent not available")
930
+ return OrchestrationResult(
931
+ intent=IntentType.LOCAL_RESOURCES.value,
932
+ reply=(
933
+ "Resource information isn't available right now. "
934
+ "Try again soon! πŸ›οΈ"
935
+ ),
936
+ success=False,
937
+ error="Tool agent not loaded",
938
+ fallback_used=True
939
+ )
940
+
941
+ try:
942
+ # FIXED: Add role parameter (compatibility fix)
943
+ tool_response = await handle_tool_request(
944
+ user_input=message,
945
+ role=context.get("role", "resident"), # ← ADDED
946
+ lat=lat,
947
+ lon=lon
948
+ )
949
+
950
+ reply = tool_response.get("response", "Resource information retrieved.")
951
+
952
+ return OrchestrationResult(
953
+ intent=IntentType.LOCAL_RESOURCES.value,
954
+ reply=reply,
955
+ success=True,
956
+ data=tool_response,
957
+ model_id="resources_tool"
958
+ )
959
+
960
+ except Exception as e:
961
+ logger.error(f"Resources error: {e}", exc_info=True)
962
+ return OrchestrationResult(
963
+ intent=IntentType.LOCAL_RESOURCES.value,
964
+ reply=(
965
+ "I'm having trouble finding resource information right now. "
966
+ "Would you like to try a different search? πŸ’›"
967
+ ),
968
+ success=False,
969
+ error=str(e),
970
+ fallback_used=True
971
+ )
972
+
973
+
974
+ async def _handle_conversational(
975
+ message: str,
976
+ intent: IntentType,
977
+ context: Dict[str, Any]
978
+ ) -> OrchestrationResult:
979
+ """
980
+ πŸ’¬ Handles conversational intents (greeting, help, unknown).
981
+ Uses Penny's core LLM for natural responses with graceful fallback.
982
+ """
983
+ logger.info(f"πŸ’¬ Processing conversational intent: {intent.value}")
984
+
985
+ # Check LLM availability
986
+ use_llm = LLM_AVAILABLE
987
+
988
+ try:
989
+ if use_llm:
990
+ # Build prompt based on intent
991
+ if intent == IntentType.GREETING:
992
+ prompt = (
993
+ f"The user greeted you with: '{message}'\n\n"
994
+ "Respond warmly as Penny, introduce yourself briefly, "
995
+ "and ask how you can help them with civic services today."
996
+ )
997
+
998
+ elif intent == IntentType.HELP:
999
+ prompt = (
1000
+ f"The user asked for help: '{message}'\n\n"
1001
+ "Explain Penny's main features:\n"
1002
+ "- Finding local resources (shelters, libraries, food banks)\n"
1003
+ "- Community events and activities\n"
1004
+ "- Weather information\n"
1005
+ "- 27-language translation\n"
1006
+ "- Document processing help\n\n"
1007
+ "Ask which city they need assistance in."
1008
+ )
1009
+
1010
+ else: # UNKNOWN
1011
+ prompt = (
1012
+ f"The user said: '{message}'\n\n"
1013
+ "You're not sure what they need help with. "
1014
+ "Respond kindly, acknowledge their request, and ask them to "
1015
+ "clarify or rephrase. Mention a few things you can help with."
1016
+ )
1017
+
1018
+ # Call Penny's core LLM
1019
+ llm_result = await generate_response(prompt=prompt, max_new_tokens=200)
1020
+
1021
+ # Use compatibility helper to check result
1022
+ success, error = _check_result_success(llm_result, ["response"])
1023
+
1024
+ if success:
1025
+ reply = llm_result.get("response", "")
1026
+
1027
+ return OrchestrationResult(
1028
+ intent=intent.value,
1029
+ reply=reply,
1030
+ success=True,
1031
+ data=llm_result,
1032
+ model_id=CORE_MODEL_ID
1033
+ )
1034
+ else:
1035
+ raise Exception(error or "LLM generation failed")
1036
+
1037
+ else:
1038
+ # LLM not available, use fallback directly
1039
+ logger.info("LLM not available, using fallback responses")
1040
+ raise Exception("LLM service not loaded")
1041
+
1042
+ except Exception as e:
1043
+ logger.warning(f"Conversational handler using fallback: {e}")
1044
+
1045
+ # Hardcoded fallback responses (Penny's friendly voice)
1046
+ fallback_replies = {
1047
+ IntentType.GREETING: (
1048
+ "Hi there! πŸ‘‹ I'm Penny, your civic assistant. "
1049
+ "I can help you find local resources, events, weather, and more. "
1050
+ "What city are you in?"
1051
+ ),
1052
+ IntentType.HELP: (
1053
+ "I'm Penny! πŸ’› I can help you with:\n\n"
1054
+ "πŸ›οΈ Local resources (shelters, libraries, food banks)\n"
1055
+ "πŸ“… Community events\n"
1056
+ "🌀️ Weather updates\n"
1057
+ "🌍 Translation (27 languages)\n"
1058
+ "πŸ“„ Document help\n\n"
1059
+ "What would you like to know about?"
1060
+ ),
1061
+ IntentType.UNKNOWN: (
1062
+ "I'm not sure I understood that. Could you rephrase? "
1063
+ "I'm best at helping with local services, events, weather, "
1064
+ "and translation! πŸ’¬"
1065
+ )
1066
+ }
1067
+
1068
+ return OrchestrationResult(
1069
+ intent=intent.value,
1070
+ reply=fallback_replies.get(intent, "How can I help you today? πŸ’›"),
1071
+ success=True,
1072
+ model_id="fallback",
1073
+ fallback_used=True
1074
+ )
1075
+
1076
+
1077
+ async def _handle_fallback(
1078
+ message: str,
1079
+ intent: IntentType,
1080
+ context: Dict[str, Any]
1081
+ ) -> OrchestrationResult:
1082
+ """
1083
+ πŸ†˜ Ultimate fallback handler for unhandled intents.
1084
+
1085
+ This is a safety net that should rarely trigger, but ensures
1086
+ users always get a helpful response.
1087
+ """
1088
+ logger.warning(f"⚠️ Fallback triggered for intent: {intent.value}")
1089
+
1090
+ reply = (
1091
+ "I've processed your request, but I'm not sure how to help with that yet. "
1092
+ "I'm still learning! πŸ€–\n\n"
1093
+ "I'm best at:\n"
1094
+ "πŸ›οΈ Finding local resources\n"
1095
+ "πŸ“… Community events\n"
1096
+ "🌀️ Weather updates\n"
1097
+ "🌍 Translation\n\n"
1098
+ "Could you rephrase your question? πŸ’›"
1099
+ )
1100
+
1101
+ return OrchestrationResult(
1102
+ intent=intent.value,
1103
+ reply=reply,
1104
+ success=False,
1105
+ error="Unhandled intent",
1106
+ fallback_used=True
1107
+ )
1108
+
1109
+
1110
+ # ============================================================
1111
+ # HEALTH CHECK & DIAGNOSTICS (ENHANCED)
1112
+ # ============================================================
1113
+
1114
+ def get_orchestrator_health() -> Dict[str, Any]:
1115
+ """
1116
+ πŸ“Š Returns comprehensive orchestrator health status.
1117
+
1118
+ Used by the main application health check endpoint to monitor
1119
+ the orchestrator and all its service dependencies.
1120
+
1121
+ Returns:
1122
+ Dictionary with health information including:
1123
+ - status: operational/degraded
1124
+ - service_availability: which services are loaded
1125
+ - statistics: orchestration counts
1126
+ - supported_intents: list of all intent types
1127
+ - features: available orchestrator features
1128
+ """
1129
+ # Get service availability
1130
+ services = get_service_availability()
1131
+
1132
+ # Determine overall status
1133
+ # Orchestrator is operational even if some services are down (graceful degradation)
1134
+ critical_services = ["weather", "tool_agent"] # Must have these
1135
+ critical_available = all(services.get(svc, False) for svc in critical_services)
1136
+
1137
+ status = "operational" if critical_available else "degraded"
1138
+
1139
+ return {
1140
+ "status": status,
1141
+ "core_model": CORE_MODEL_ID,
1142
+ "max_response_time_ms": MAX_RESPONSE_TIME_MS,
1143
+ "statistics": {
1144
+ "total_orchestrations": _orchestration_count,
1145
+ "emergency_interactions": _emergency_count
1146
+ },
1147
+ "service_availability": services,
1148
+ "supported_intents": [intent.value for intent in IntentType],
1149
+ "features": {
1150
+ "emergency_routing": True,
1151
+ "compound_intents": True,
1152
+ "fallback_handling": True,
1153
+ "performance_tracking": True,
1154
+ "context_aware": True,
1155
+ "multi_language": TRANSLATION_AVAILABLE,
1156
+ "sentiment_analysis": SENTIMENT_AVAILABLE,
1157
+ "bias_detection": BIAS_AVAILABLE,
1158
+ "weather_integration": WEATHER_AGENT_AVAILABLE,
1159
+ "event_recommendations": EVENT_WEATHER_AVAILABLE
1160
+ }
1161
+ }
1162
+
1163
+
1164
+ def get_orchestrator_stats() -> Dict[str, Any]:
1165
+ """
1166
+ πŸ“ˆ Returns orchestrator statistics.
1167
+
1168
+ Useful for monitoring and analytics.
1169
+ """
1170
+ return {
1171
+ "total_orchestrations": _orchestration_count,
1172
+ "emergency_interactions": _emergency_count,
1173
+ "services_available": sum(1 for v in get_service_availability().values() if v),
1174
+ "services_total": len(get_service_availability())
1175
+ }
1176
+
1177
+
1178
+ # ============================================================
1179
+ # TESTING & DEBUGGING (ENHANCED)
1180
+ # ============================================================
1181
+
1182
+ if __name__ == "__main__":
1183
+ """
1184
+ πŸ§ͺ Test the orchestrator with sample queries.
1185
+ Run with: python -m app.orchestrator
1186
+ """
1187
+ import asyncio
1188
+
1189
+ print("=" * 60)
1190
+ print("πŸ§ͺ Testing Penny's Orchestrator")
1191
+ print("=" * 60)
1192
+
1193
+ # Display service availability first
1194
+ print("\nπŸ“Š Service Availability Check:")
1195
+ services = get_service_availability()
1196
+ for service, available in services.items():
1197
+ status = "βœ…" if available else "❌"
1198
+ print(f" {status} {service}: {'Available' if available else 'Not loaded'}")
1199
+
1200
+ print("\n" + "=" * 60)
1201
+
1202
+ test_queries = [
1203
+ {
1204
+ "name": "Greeting",
1205
+ "message": "Hi Penny!",
1206
+ "context": {}
1207
+ },
1208
+ {
1209
+ "name": "Weather with location",
1210
+ "message": "What's the weather?",
1211
+ "context": {"lat": 33.7490, "lon": -84.3880}
1212
+ },
1213
+ {
1214
+ "name": "Events in city",
1215
+ "message": "Events in Atlanta",
1216
+ "context": {"tenant_id": "atlanta_ga"}
1217
+ },
1218
+ {
1219
+ "name": "Help request",
1220
+ "message": "I need help",
1221
+ "context": {}
1222
+ },
1223
+ {
1224
+ "name": "Translation",
1225
+ "message": "Translate hello",
1226
+ "context": {"source_lang": "eng_Latn", "target_lang": "spa_Latn"}
1227
+ }
1228
+ ]
1229
+
1230
+ async def run_tests():
1231
+ for i, query in enumerate(test_queries, 1):
1232
+ print(f"\n--- Test {i}: {query['name']} ---")
1233
+ print(f"Query: {query['message']}")
1234
+
1235
+ try:
1236
+ result = await run_orchestrator(query["message"], query["context"])
1237
+ print(f"Intent: {result['intent']}")
1238
+ print(f"Success: {result['success']}")
1239
+ print(f"Fallback: {result.get('fallback_used', False)}")
1240
+
1241
+ # Truncate long replies
1242
+ reply = result['reply']
1243
+ if len(reply) > 150:
1244
+ reply = reply[:150] + "..."
1245
+ print(f"Reply: {reply}")
1246
+
1247
+ if result.get('response_time_ms'):
1248
+ print(f"Response time: {result['response_time_ms']:.0f}ms")
1249
+
1250
+ except Exception as e:
1251
+ print(f"❌ Error: {e}")
1252
+
1253
+ asyncio.run(run_tests())
1254
+
1255
+ print("\n" + "=" * 60)
1256
+ print("πŸ“Š Final Statistics:")
1257
+ stats = get_orchestrator_stats()
1258
+ for key, value in stats.items():
1259
+ print(f" {key}: {value}")
1260
+
1261
+ print("\n" + "=" * 60)
1262
+ print("βœ… Tests complete")
1263
+ print("=" * 60)