pythonprincess commited on
Commit
37a2956
·
verified ·
1 Parent(s): 1844ac6

Delete app/orchestrator.py

Browse files
Files changed (1) hide show
  1. app/orchestrator.py +0 -1442
app/orchestrator.py DELETED
@@ -1,1442 +0,0 @@
1
- """
2
- 🎭 PENNY Orchestrator - Request Routing & Coordination Engine
3
-
4
- This is Penny's decision-making brain. She analyzes each request, determines
5
- the best way to help, and coordinates between her specialized AI models and
6
- civic data tools.
7
-
8
- MISSION: Route every resident request to the right resource while maintaining
9
- Penny's warm, helpful personality and ensuring fast, accurate responses.
10
-
11
- FEATURES:
12
- - Enhanced intent classification with confidence scoring
13
- - Compound intent handling (weather + events)
14
- - Graceful fallbacks when services are unavailable
15
- - Performance tracking for all operations
16
- - Context-aware responses
17
- - Emergency routing with immediate escalation
18
-
19
- ENHANCEMENTS (Phase 1):
20
- - ✅ Structured logging with performance tracking
21
- - ✅ Safe imports with availability flags
22
- - ✅ Result format checking helper
23
- - ✅ Enhanced error handling patterns
24
- - ✅ Service availability tracking
25
- - ✅ Fixed function signature mismatches
26
- - ✅ Integration with enhanced modules
27
- """
28
-
29
- import logging
30
- import time
31
- from typing import Dict, Any, Optional, List, Tuple
32
- from datetime import datetime
33
- from dataclasses import dataclass, field
34
- from enum import Enum
35
-
36
- # --- ENHANCED MODULE IMPORTS ---
37
- from app.intents import classify_intent_detailed, IntentType, IntentMatch
38
- from app.location_utils import (
39
- extract_location_detailed,
40
- LocationMatch,
41
- LocationStatus,
42
- get_city_coordinates
43
- )
44
- from app.logging_utils import (
45
- log_interaction,
46
- sanitize_for_logging,
47
- LogLevel
48
- )
49
-
50
- # --- AGENT IMPORTS (with availability tracking) ---
51
- try:
52
- from app.weather_agent import (
53
- get_weather_for_location,
54
- recommend_outfit,
55
- weather_to_event_recommendations,
56
- format_weather_summary
57
- )
58
- WEATHER_AGENT_AVAILABLE = True
59
- except ImportError as e:
60
- logger = logging.getLogger(__name__)
61
- logger.warning(f"Weather agent not available: {e}")
62
- WEATHER_AGENT_AVAILABLE = False
63
-
64
- try:
65
- from app.event_weather import get_event_recommendations_with_weather
66
- EVENT_WEATHER_AVAILABLE = True
67
- except ImportError as e:
68
- logger = logging.getLogger(__name__)
69
- logger.warning(f"Event weather integration not available: {e}")
70
- EVENT_WEATHER_AVAILABLE = False
71
-
72
- try:
73
- from app.tool_agent import handle_tool_request
74
- TOOL_AGENT_AVAILABLE = True
75
- except ImportError as e:
76
- logger = logging.getLogger(__name__)
77
- logger.warning(f"Tool agent not available: {e}")
78
- TOOL_AGENT_AVAILABLE = False
79
-
80
- # --- MODEL IMPORTS (with availability tracking) ---
81
- try:
82
- from models.translation.translation_utils import translate_text
83
- TRANSLATION_AVAILABLE = True
84
- except ImportError as e:
85
- logger = logging.getLogger(__name__)
86
- logger.warning(f"Translation service not available: {e}")
87
- TRANSLATION_AVAILABLE = False
88
-
89
- try:
90
- from models.sentiment.sentiment_utils import get_sentiment_analysis
91
- SENTIMENT_AVAILABLE = True
92
- except ImportError as e:
93
- logger = logging.getLogger(__name__)
94
- logger.warning(f"Sentiment service not available: {e}")
95
- SENTIMENT_AVAILABLE = False
96
-
97
- try:
98
- from models.bias.bias_utils import check_bias
99
- BIAS_AVAILABLE = True
100
- except ImportError as e:
101
- logger = logging.getLogger(__name__)
102
- logger.warning(f"Bias detection service not available: {e}")
103
- BIAS_AVAILABLE = False
104
-
105
- try:
106
- from models.gemma.gemma_utils import generate_response
107
- LLM_AVAILABLE = True
108
- except ImportError as e:
109
- logger = logging.getLogger(__name__)
110
- logger.warning(f"LLM service not available: {e}")
111
- LLM_AVAILABLE = False
112
-
113
- # --- LOGGING SETUP ---
114
- logger = logging.getLogger(__name__)
115
-
116
- # --- CONFIGURATION ---
117
- CORE_MODEL_ID = "penny-core-agent"
118
- MAX_RESPONSE_TIME_MS = 5000 # 5 seconds - log if exceeded
119
-
120
- # --- TRACKING COUNTERS ---
121
- _orchestration_count = 0
122
- _emergency_count = 0
123
-
124
-
125
- # ============================================================
126
- # COMPATIBILITY HELPER - Result Format Checking
127
- # ============================================================
128
-
129
- def _check_result_success(
130
- result: Dict[str, Any],
131
- expected_keys: List[str]
132
- ) -> Tuple[bool, Optional[str]]:
133
- """
134
- ✅ Check if a utility function result indicates success.
135
-
136
- Handles multiple return format patterns:
137
- - Explicit "success" key (preferred)
138
- - Presence of expected data keys (implicit success)
139
- - Presence of "error" key (explicit failure)
140
-
141
- This helper fixes compatibility issues where different utility
142
- functions return different result formats.
143
-
144
- Args:
145
- result: Dictionary returned from utility function
146
- expected_keys: List of keys that indicate successful data
147
-
148
- Returns:
149
- Tuple of (is_success, error_message)
150
-
151
- Example:
152
- result = await translate_text(message, "en", "es")
153
- success, error = _check_result_success(result, ["translated_text"])
154
- if success:
155
- text = result.get("translated_text")
156
- """
157
- # Check for explicit success key
158
- if "success" in result:
159
- return result["success"], result.get("error")
160
-
161
- # Check for explicit error (presence = failure)
162
- if "error" in result and result["error"]:
163
- return False, result["error"]
164
-
165
- # Check for expected data keys (implicit success)
166
- has_data = any(key in result for key in expected_keys)
167
- if has_data:
168
- return True, None
169
-
170
- # Unknown format - assume failure
171
- return False, "Unexpected response format"
172
-
173
-
174
- # ============================================================
175
- # SERVICE AVAILABILITY CHECK
176
- # ============================================================
177
-
178
- def get_service_availability() -> Dict[str, bool]:
179
- """
180
- 📊 Returns which services are currently available.
181
-
182
- Used for health checks, debugging, and deciding whether
183
- to attempt service calls or use fallbacks.
184
-
185
- Returns:
186
- Dictionary mapping service names to availability status
187
- """
188
- return {
189
- "translation": TRANSLATION_AVAILABLE,
190
- "sentiment": SENTIMENT_AVAILABLE,
191
- "bias_detection": BIAS_AVAILABLE,
192
- "llm": LLM_AVAILABLE,
193
- "tool_agent": TOOL_AGENT_AVAILABLE,
194
- "weather": WEATHER_AGENT_AVAILABLE,
195
- "event_weather": EVENT_WEATHER_AVAILABLE
196
- }
197
-
198
-
199
- # ============================================================
200
- # ORCHESTRATION RESULT STRUCTURE
201
- # ============================================================
202
-
203
- @dataclass
204
- class OrchestrationResult:
205
- """
206
- 📦 Structured result from orchestration pipeline.
207
-
208
- This format is used throughout the system for consistency
209
- and makes it easy to track what happened during request processing.
210
- """
211
- intent: str # Detected intent
212
- reply: str # User-facing response
213
- success: bool # Whether request succeeded
214
- tenant_id: Optional[str] = None # City/location identifier
215
- data: Optional[Dict[str, Any]] = None # Raw data from services
216
- model_id: Optional[str] = None # Which model/service was used
217
- error: Optional[str] = None # Error message if failed
218
- response_time_ms: Optional[float] = None
219
- confidence: Optional[float] = None # Intent confidence score
220
- fallback_used: bool = False # True if fallback logic triggered
221
-
222
- def to_dict(self) -> Dict[str, Any]:
223
- """Converts to dictionary for API responses."""
224
- return {
225
- "intent": self.intent,
226
- "reply": self.reply,
227
- "success": self.success,
228
- "tenant_id": self.tenant_id,
229
- "data": self.data,
230
- "model_id": self.model_id,
231
- "error": self.error,
232
- "response_time_ms": self.response_time_ms,
233
- "confidence": self.confidence,
234
- "fallback_used": self.fallback_used
235
- }
236
-
237
-
238
- # ============================================================
239
- # MAIN ORCHESTRATOR FUNCTION (ENHANCED)
240
- # ============================================================
241
-
242
- async def run_orchestrator(
243
- message: str,
244
- context: Dict[str, Any] = None
245
- ) -> Dict[str, Any]:
246
- """
247
- 🧠 Main decision-making brain of Penny.
248
-
249
- This function:
250
- 1. Analyzes the user's message to determine intent
251
- 2. Extracts location/city information
252
- 3. Routes to the appropriate specialized service
253
- 4. Handles errors gracefully with helpful fallbacks
254
- 5. Tracks performance and logs the interaction
255
-
256
- Args:
257
- message: User's input text
258
- context: Additional context (tenant_id, lat, lon, session_id, etc.)
259
-
260
- Returns:
261
- Dictionary with response and metadata
262
-
263
- Example:
264
- result = await run_orchestrator(
265
- message="What's the weather in Atlanta?",
266
- context={"lat": 33.7490, "lon": -84.3880}
267
- )
268
- """
269
- global _orchestration_count
270
- _orchestration_count += 1
271
-
272
- start_time = time.time()
273
-
274
- # Initialize context if not provided
275
- if context is None:
276
- context = {}
277
-
278
- # Sanitize message for logging (PII protection)
279
- safe_message = sanitize_for_logging(message)
280
- logger.info(f"🎭 Orchestrator processing: '{safe_message[:50]}...'")
281
-
282
- try:
283
- # === STEP 1: CLASSIFY INTENT (Enhanced) ===
284
- intent_result = classify_intent_detailed(message)
285
- intent = intent_result.intent
286
- confidence = intent_result.confidence
287
-
288
- logger.info(
289
- f"Intent detected: {intent.value} "
290
- f"(confidence: {confidence:.2f})"
291
- )
292
-
293
- # === STEP 2: EXTRACT LOCATION ===
294
- tenant_id = context.get("tenant_id")
295
- lat = context.get("lat")
296
- lon = context.get("lon")
297
-
298
- # If tenant_id not provided, try to extract from message
299
- if not tenant_id or tenant_id == "unknown":
300
- location_result = extract_location_detailed(message)
301
-
302
- if location_result.status == LocationStatus.FOUND:
303
- tenant_id = location_result.tenant_id
304
- logger.info(f"Location extracted: {tenant_id}")
305
-
306
- # Get coordinates for this tenant if available
307
- coords = get_city_coordinates(tenant_id)
308
- if coords and lat is None and lon is None:
309
- lat, lon = coords["lat"], coords["lon"]
310
- logger.info(f"Coordinates loaded: {lat}, {lon}")
311
-
312
- elif location_result.status == LocationStatus.USER_LOCATION_NEEDED:
313
- logger.info("User location services needed")
314
- else:
315
- logger.info(f"No location detected: {location_result.status}")
316
-
317
- # === STEP 3: HANDLE EMERGENCY INTENTS (CRITICAL) ===
318
- if intent == IntentType.EMERGENCY:
319
- return await _handle_emergency(
320
- message=message,
321
- context=context,
322
- start_time=start_time
323
- )
324
-
325
- # === STEP 4: ROUTE TO APPROPRIATE HANDLER ===
326
-
327
- # Translation
328
- if intent == IntentType.TRANSLATION:
329
- result = await _handle_translation(message, context)
330
-
331
- # Sentiment Analysis
332
- elif intent == IntentType.SENTIMENT_ANALYSIS:
333
- result = await _handle_sentiment(message, context)
334
-
335
- # Bias Detection
336
- elif intent == IntentType.BIAS_DETECTION:
337
- result = await _handle_bias(message, context)
338
-
339
- # Document Processing
340
- elif intent == IntentType.DOCUMENT_PROCESSING:
341
- result = await _handle_document(message, context)
342
-
343
- # Weather (includes compound weather+events handling)
344
- async def _handle_weather(
345
- message: str,
346
- context: Dict[str, Any],
347
- tenant_id: Optional[str],
348
- lat: Optional[float],
349
- lon: Optional[float],
350
- intent_result: IntentMatch
351
- ) -> OrchestrationResult:
352
- """
353
- 🌤️ Weather handler with compound intent support.
354
-
355
- Handles both simple weather queries and compound weather+events queries.
356
- Uses enhanced weather_agent.py with caching and performance tracking.
357
- """
358
- logger.info("🌤️ Processing weather request")
359
-
360
- # Check service availability first
361
- if not WEATHER_AGENT_AVAILABLE:
362
- logger.warning("Weather agent not available")
363
- return OrchestrationResult(
364
- intent=IntentType.WEATHER.value,
365
- reply="Weather service isn't available right now. Try again soon! 🌤️",
366
- success=False,
367
- error="Weather agent not loaded",
368
- fallback_used=True
369
- )
370
-
371
- # Check for compound intent (weather + events)
372
- is_compound = intent_result.is_compound or IntentType.EVENTS in intent_result.secondary_intents
373
-
374
- # === ENHANCED LOCATION RESOLUTION ===
375
-
376
- # Step 1: Try to extract location from the query itself
377
- if not tenant_id or (lat is None or lon is None):
378
- location_result = extract_location_detailed(message)
379
-
380
- if location_result.status == LocationStatus.FOUND:
381
- tenant_id = location_result.tenant_id
382
- logger.info(f"📍 Location extracted from query: {tenant_id}")
383
-
384
- # Get coordinates for extracted location
385
- coords = get_city_coordinates(tenant_id)
386
- if coords:
387
- lat, lon = coords["lat"], coords["lon"]
388
- logger.info(f"✅ Coordinates found: {lat}, {lon}")
389
-
390
- # Step 2: If still no coordinates, try to get from tenant_id
391
- if (lat is None or lon is None) and tenant_id:
392
- coords = get_city_coordinates(tenant_id)
393
- if coords:
394
- lat, lon = coords["lat"], coords["lon"]
395
- logger.info(f"✅ Coordinates from tenant_id: {lat}, {lon}")
396
-
397
- # Step 3: If still no coordinates, ask for location
398
- if lat is None or lon is None:
399
- logger.warning("❌ No coordinates available for weather request")
400
- return OrchestrationResult(
401
- intent=IntentType.WEATHER.value,
402
- reply=(
403
- "I need to know your location to check the weather! 📍 "
404
- "You can tell me your city, or share your location."
405
- ),
406
- success=False,
407
- error="Location required"
408
- )
409
-
410
- try:
411
- # Use combined weather + events if compound intent detected
412
- if is_compound and tenant_id and EVENT_WEATHER_AVAILABLE:
413
- logger.info("Using weather+events combined handler")
414
- result = await get_event_recommendations_with_weather(tenant_id, lat, lon)
415
-
416
- # Build response
417
- weather = result.get("weather", {})
418
- weather_summary = result.get("weather_summary", "Weather unavailable")
419
- suggestions = result.get("suggestions", [])
420
-
421
- reply_lines = [f"🌤️ **Weather Update:**\n{weather_summary}\n"]
422
-
423
- if suggestions:
424
- reply_lines.append("\n📅 **Event Suggestions Based on Weather:**")
425
- for suggestion in suggestions[:5]: # Top 5 suggestions
426
- reply_lines.append(f"• {suggestion}")
427
-
428
- reply = "\n".join(reply_lines)
429
-
430
- return OrchestrationResult(
431
- intent=IntentType.WEATHER.value,
432
- reply=reply,
433
- success=True,
434
- data=result,
435
- model_id="weather_events_combined"
436
- )
437
-
438
- else:
439
- # Simple weather query using enhanced weather_agent
440
- logger.info(f"🌤️ Fetching weather for coordinates: {lat}, {lon}")
441
- weather = await get_weather_for_location(lat, lon)
442
-
443
- # Use enhanced weather_agent's format_weather_summary
444
- weather_text = format_weather_summary(weather)
445
-
446
- # Get outfit recommendation from enhanced weather_agent
447
- temp = weather.get("temperature", {}).get("value", 70)
448
- condition = weather.get("phrase", "Clear")
449
- outfit = recommend_outfit(temp, condition)
450
-
451
- # Build friendly response with location name if available
452
- location_name = tenant_id.replace("_", " ").title() if tenant_id else "your location"
453
- reply = f"🌤️ **Weather for {location_name}:**\n\n{weather_text}\n\n👕 {outfit}"
454
-
455
- logger.info(f"✅ Weather fetched successfully for {location_name}")
456
-
457
- return OrchestrationResult(
458
- intent=IntentType.WEATHER.value,
459
- reply=reply,
460
- success=True,
461
- data=weather,
462
- model_id="azure-maps-weather",
463
- tenant_id=tenant_id
464
- )
465
-
466
- except Exception as e:
467
- logger.error(f"❌ Weather error: {e}", exc_info=True)
468
- return OrchestrationResult(
469
- intent=IntentType.WEATHER.value,
470
- reply=(
471
- "I'm having trouble getting weather data right now. "
472
- "Can I help you with something else? 💛"
473
- ),
474
- success=False,
475
- error=str(e),
476
- fallback_used=True
477
- )
478
-
479
- # Events
480
- elif intent == IntentType.EVENTS:
481
- result = await _handle_events(
482
- message=message,
483
- context=context,
484
- tenant_id=tenant_id,
485
- lat=lat,
486
- lon=lon,
487
- intent_result=intent_result
488
- )
489
-
490
- # Local Resources
491
- elif intent == IntentType.LOCAL_RESOURCES:
492
- result = await _handle_local_resources(
493
- message=message,
494
- context=context,
495
- tenant_id=tenant_id,
496
- lat=lat,
497
- lon=lon
498
- )
499
-
500
- # Greeting, Help, Unknown
501
- elif intent in [IntentType.GREETING, IntentType.HELP, IntentType.UNKNOWN]:
502
- result = await _handle_conversational(
503
- message=message,
504
- intent=intent,
505
- context=context
506
- )
507
-
508
- else:
509
- # Unhandled intent type (shouldn't happen, but safety net)
510
- result = await _handle_fallback(message, intent, context)
511
-
512
- # === STEP 5: ADD METADATA & LOG INTERACTION ===
513
- response_time = (time.time() - start_time) * 1000
514
- result.response_time_ms = round(response_time, 2)
515
- result.confidence = confidence
516
- result.tenant_id = tenant_id
517
-
518
- # Log the interaction with structured logging
519
- log_interaction(
520
- tenant_id=tenant_id or "unknown",
521
- interaction_type="orchestration",
522
- intent=intent.value,
523
- response_time_ms=response_time,
524
- success=result.success,
525
- metadata={
526
- "confidence": confidence,
527
- "fallback_used": result.fallback_used,
528
- "model_id": result.model_id,
529
- "orchestration_count": _orchestration_count
530
- }
531
- )
532
-
533
- # Log slow responses
534
- if response_time > MAX_RESPONSE_TIME_MS:
535
- logger.warning(
536
- f"⚠️ Slow response: {response_time:.0f}ms "
537
- f"(intent: {intent.value})"
538
- )
539
-
540
- logger.info(
541
- f"✅ Orchestration complete: {intent.value} "
542
- f"({response_time:.0f}ms)"
543
- )
544
-
545
- return result.to_dict()
546
-
547
- except Exception as e:
548
- # === CATASTROPHIC FAILURE HANDLER ===
549
- response_time = (time.time() - start_time) * 1000
550
- logger.error(
551
- f"❌ Orchestrator error: {e} "
552
- f"(response_time: {response_time:.0f}ms)",
553
- exc_info=True
554
- )
555
-
556
- # Log failed interaction
557
- log_interaction(
558
- tenant_id=context.get("tenant_id", "unknown"),
559
- interaction_type="orchestration_error",
560
- intent="error",
561
- response_time_ms=response_time,
562
- success=False,
563
- metadata={
564
- "error": str(e),
565
- "error_type": type(e).__name__
566
- }
567
- )
568
-
569
- error_result = OrchestrationResult(
570
- intent="error",
571
- reply=(
572
- "I'm having trouble processing your request right now. "
573
- "Please try again in a moment, or let me know if you need "
574
- "immediate assistance! 💛"
575
- ),
576
- success=False,
577
- error=str(e),
578
- model_id="orchestrator",
579
- fallback_used=True,
580
- response_time_ms=round(response_time, 2)
581
- )
582
-
583
- return error_result.to_dict()
584
-
585
-
586
- # ============================================================
587
- # SPECIALIZED INTENT HANDLERS (ENHANCED)
588
- # ============================================================
589
-
590
- async def _handle_emergency(
591
- message: str,
592
- context: Dict[str, Any],
593
- start_time: float
594
- ) -> OrchestrationResult:
595
- """
596
- 🚨 CRITICAL: Emergency intent handler.
597
-
598
- This function handles crisis situations with immediate routing
599
- to appropriate services. All emergency interactions are logged
600
- for compliance and safety tracking.
601
-
602
- IMPORTANT: This is a compliance-critical function. All emergency
603
- interactions must be logged and handled with priority.
604
- """
605
- global _emergency_count
606
- _emergency_count += 1
607
-
608
- # Sanitize message for logging (but keep full context for safety review)
609
- safe_message = sanitize_for_logging(message)
610
- logger.warning(f"🚨 EMERGENCY INTENT DETECTED (#{_emergency_count}): {safe_message[:100]}")
611
-
612
- # TODO: Integrate with safety_utils.py when enhanced
613
- # from app.safety_utils import route_emergency
614
- # result = await route_emergency(message, context)
615
-
616
- # For now, provide crisis resources
617
- reply = (
618
- "🚨 **If this is a life-threatening emergency, please call 911 immediately.**\n\n"
619
- "For crisis support:\n"
620
- "- **National Suicide Prevention Lifeline:** 988\n"
621
- "- **Crisis Text Line:** Text HOME to 741741\n"
622
- "- **National Domestic Violence Hotline:** 1-800-799-7233\n\n"
623
- "I'm here to help connect you with local resources. "
624
- "What kind of support do you need right now?"
625
- )
626
-
627
- # Log emergency interaction for compliance (CRITICAL)
628
- response_time = (time.time() - start_time) * 1000
629
- log_interaction(
630
- tenant_id=context.get("tenant_id", "emergency"),
631
- interaction_type="emergency",
632
- intent=IntentType.EMERGENCY.value,
633
- response_time_ms=response_time,
634
- success=True,
635
- metadata={
636
- "emergency_number": _emergency_count,
637
- "message_length": len(message),
638
- "timestamp": datetime.now().isoformat(),
639
- "action": "crisis_resources_provided"
640
- }
641
- )
642
-
643
- logger.critical(
644
- f"EMERGENCY LOG #{_emergency_count}: Resources provided "
645
- f"({response_time:.0f}ms)"
646
- )
647
-
648
- return OrchestrationResult(
649
- intent=IntentType.EMERGENCY.value,
650
- reply=reply,
651
- success=True,
652
- model_id="emergency_router",
653
- data={"crisis_resources_provided": True},
654
- response_time_ms=round(response_time, 2)
655
- )
656
-
657
-
658
- async def _handle_translation(
659
- message: str,
660
- context: Dict[str, Any]
661
- ) -> OrchestrationResult:
662
- """
663
- 🌍 Translation handler - 27 languages supported.
664
-
665
- Handles translation requests with graceful fallback if service
666
- is unavailable.
667
- """
668
- logger.info("🌍 Processing translation request")
669
-
670
- # Check service availability first
671
- if not TRANSLATION_AVAILABLE:
672
- logger.warning("Translation service not available")
673
- return OrchestrationResult(
674
- intent=IntentType.TRANSLATION.value,
675
- reply="Translation isn't available right now. Try again soon! 🌍",
676
- success=False,
677
- error="Service not loaded",
678
- fallback_used=True
679
- )
680
-
681
- try:
682
- # Extract language parameters from context
683
- source_lang = context.get("source_lang", "eng_Latn")
684
- target_lang = context.get("target_lang", "spa_Latn")
685
-
686
- # TODO: Parse languages from message when enhanced
687
- # Example: "Translate 'hello' to Spanish"
688
-
689
- result = await translate_text(message, source_lang, target_lang)
690
-
691
- # Use compatibility helper to check result
692
- success, error = _check_result_success(result, ["translated_text"])
693
-
694
- if success:
695
- translated = result.get("translated_text", "")
696
- reply = (
697
- f"Here's the translation:\n\n"
698
- f"**{translated}**\n\n"
699
- f"(Translated from {source_lang} to {target_lang})"
700
- )
701
-
702
- return OrchestrationResult(
703
- intent=IntentType.TRANSLATION.value,
704
- reply=reply,
705
- success=True,
706
- data=result,
707
- model_id="penny-translate-agent"
708
- )
709
- else:
710
- raise Exception(error or "Translation failed")
711
-
712
- except Exception as e:
713
- logger.error(f"Translation error: {e}", exc_info=True)
714
- return OrchestrationResult(
715
- intent=IntentType.TRANSLATION.value,
716
- reply=(
717
- "I had trouble translating that. Could you rephrase? 💬"
718
- ),
719
- success=False,
720
- error=str(e),
721
- fallback_used=True
722
- )
723
-
724
-
725
- async def _handle_sentiment(
726
- message: str,
727
- context: Dict[str, Any]
728
- ) -> OrchestrationResult:
729
- """
730
- 😊 Sentiment analysis handler.
731
-
732
- Analyzes the emotional tone of text with graceful fallback
733
- if service is unavailable.
734
- """
735
- logger.info("😊 Processing sentiment analysis")
736
-
737
- # Check service availability first
738
- if not SENTIMENT_AVAILABLE:
739
- logger.warning("Sentiment service not available")
740
- return OrchestrationResult(
741
- intent=IntentType.SENTIMENT_ANALYSIS.value,
742
- reply="Sentiment analysis isn't available right now. Try again soon! 😊",
743
- success=False,
744
- error="Service not loaded",
745
- fallback_used=True
746
- )
747
-
748
- try:
749
- result = await get_sentiment_analysis(message)
750
-
751
- # Use compatibility helper to check result
752
- success, error = _check_result_success(result, ["label", "score"])
753
-
754
- if success:
755
- sentiment = result.get("label", "neutral")
756
- confidence = result.get("score", 0.0)
757
-
758
- reply = (
759
- f"The overall sentiment detected is: **{sentiment}**\n"
760
- f"Confidence: {confidence:.1%}"
761
- )
762
-
763
- return OrchestrationResult(
764
- intent=IntentType.SENTIMENT_ANALYSIS.value,
765
- reply=reply,
766
- success=True,
767
- data=result,
768
- model_id="penny-sentiment-agent"
769
- )
770
- else:
771
- raise Exception(error or "Sentiment analysis failed")
772
-
773
- except Exception as e:
774
- logger.error(f"Sentiment analysis error: {e}", exc_info=True)
775
- return OrchestrationResult(
776
- intent=IntentType.SENTIMENT_ANALYSIS.value,
777
- reply="I couldn't analyze the sentiment right now. Try again? 😊",
778
- success=False,
779
- error=str(e),
780
- fallback_used=True
781
- )
782
-
783
- async def _handle_bias(
784
- message: str,
785
- context: Dict[str, Any]
786
- ) -> OrchestrationResult:
787
- """
788
- ⚖️ Bias detection handler.
789
-
790
- Analyzes text for potential bias patterns with graceful fallback
791
- if service is unavailable.
792
- """
793
- logger.info("⚖️ Processing bias detection")
794
-
795
- # Check service availability first
796
- if not BIAS_AVAILABLE:
797
- logger.warning("Bias detection service not available")
798
- return OrchestrationResult(
799
- intent=IntentType.BIAS_DETECTION.value,
800
- reply="Bias detection isn't available right now. Try again soon! ⚖️",
801
- success=False,
802
- error="Service not loaded",
803
- fallback_used=True
804
- )
805
-
806
- try:
807
- result = await check_bias(message)
808
-
809
- # Use compatibility helper to check result
810
- success, error = _check_result_success(result, ["analysis"])
811
-
812
- if success:
813
- analysis = result.get("analysis", [])
814
-
815
- if analysis:
816
- top_result = analysis[0]
817
- label = top_result.get("label", "unknown")
818
- score = top_result.get("score", 0.0)
819
-
820
- reply = (
821
- f"Bias analysis complete:\n\n"
822
- f"**Most likely category:** {label}\n"
823
- f"**Confidence:** {score:.1%}"
824
- )
825
- else:
826
- reply = "The text appears relatively neutral. ⚖️"
827
-
828
- return OrchestrationResult(
829
- intent=IntentType.BIAS_DETECTION.value,
830
- reply=reply,
831
- success=True,
832
- data=result,
833
- model_id="penny-bias-checker"
834
- )
835
- else:
836
- raise Exception(error or "Bias detection failed")
837
-
838
- except Exception as e:
839
- logger.error(f"Bias detection error: {e}", exc_info=True)
840
- return OrchestrationResult(
841
- intent=IntentType.BIAS_DETECTION.value,
842
- reply="I couldn't check for bias right now. Try again? ⚖️",
843
- success=False,
844
- error=str(e),
845
- fallback_used=True
846
- )
847
-
848
-
849
- async def _handle_document(
850
- message: str,
851
- context: Dict[str, Any]
852
- ) -> OrchestrationResult:
853
- """
854
- 📄 Document processing handler.
855
-
856
- Note: Actual file upload happens in router.py via FastAPI.
857
- This handler just provides instructions.
858
- """
859
- logger.info("📄 Document processing requested")
860
-
861
- reply = (
862
- "I can help you process documents! 📄\n\n"
863
- "Please upload your document (PDF or image) using the "
864
- "`/upload-document` endpoint. I can extract text, analyze forms, "
865
- "and help you understand civic documents.\n\n"
866
- "What kind of document do you need help with?"
867
- )
868
-
869
- return OrchestrationResult(
870
- intent=IntentType.DOCUMENT_PROCESSING.value,
871
- reply=reply,
872
- success=True,
873
- model_id="document_router"
874
- )
875
-
876
-
877
- async def _handle_weather(
878
- message: str,
879
- context: Dict[str, Any],
880
- tenant_id: Optional[str],
881
- lat: Optional[float],
882
- lon: Optional[float],
883
- intent_result: IntentMatch
884
- ) -> OrchestrationResult:
885
- """
886
- 🌤️ Weather handler with compound intent support.
887
-
888
- Handles both simple weather queries and compound weather+events queries.
889
- Uses enhanced weather_agent.py with caching and performance tracking.
890
- """
891
- logger.info("🌤️ Processing weather request")
892
-
893
- # Check service availability first
894
- if not WEATHER_AGENT_AVAILABLE:
895
- logger.warning("Weather agent not available")
896
- return OrchestrationResult(
897
- intent=IntentType.WEATHER.value,
898
- reply="Weather service isn't available right now. Try again soon! 🌤️",
899
- success=False,
900
- error="Weather agent not loaded",
901
- fallback_used=True
902
- )
903
-
904
- # Check for compound intent (weather + events)
905
- is_compound = intent_result.is_compound or IntentType.EVENTS in intent_result.secondary_intents
906
-
907
- # Validate location
908
- if lat is None or lon is None:
909
- # Try to get coordinates from tenant_id
910
- if tenant_id:
911
- coords = get_city_coordinates(tenant_id)
912
- if coords and lat is None and lon is None:
913
- lat, lon = coords["lat"], coords["lon"]
914
- logger.info(f"Using city coordinates for {tenant_id}: {lat}, {lon}")
915
-
916
- if lat is None or lon is None:
917
- return OrchestrationResult(
918
- intent=IntentType.WEATHER.value,
919
- reply=(
920
- "I need to know your location to check the weather! 📍 "
921
- "You can tell me your city, or share your location."
922
- ),
923
- success=False,
924
- error="Location required"
925
- )
926
-
927
- try:
928
- # Use combined weather + events if compound intent detected
929
- if is_compound and tenant_id and EVENT_WEATHER_AVAILABLE:
930
- logger.info("Using weather+events combined handler")
931
- result = await get_event_recommendations_with_weather(tenant_id, lat, lon)
932
-
933
- # Build response
934
- weather = result.get("weather", {})
935
- weather_summary = result.get("weather_summary", "Weather unavailable")
936
- suggestions = result.get("suggestions", [])
937
-
938
- reply_lines = [f"🌤️ **Weather Update:**\n{weather_summary}\n"]
939
-
940
- if suggestions:
941
- reply_lines.append("\n📅 **Event Suggestions Based on Weather:**")
942
- for suggestion in suggestions[:5]: # Top 5 suggestions
943
- reply_lines.append(f"• {suggestion}")
944
-
945
- reply = "\n".join(reply_lines)
946
-
947
- return OrchestrationResult(
948
- intent=IntentType.WEATHER.value,
949
- reply=reply,
950
- success=True,
951
- data=result,
952
- model_id="weather_events_combined"
953
- )
954
-
955
- else:
956
- # Simple weather query using enhanced weather_agent
957
- weather = await get_weather_for_location(lat, lon)
958
-
959
- # Use enhanced weather_agent's format_weather_summary
960
- if format_weather_summary:
961
- weather_text = format_weather_summary(weather)
962
- else:
963
- # Fallback formatting
964
- temp = weather.get("temperature", {}).get("value")
965
- phrase = weather.get("phrase", "Conditions unavailable")
966
- if temp:
967
- weather_text = f"{phrase}, {int(temp)}°F"
968
- else:
969
- weather_text = phrase
970
-
971
- # Get outfit recommendation from enhanced weather_agent
972
- if recommend_outfit:
973
- temp = weather.get("temperature", {}).get("value", 70)
974
- condition = weather.get("phrase", "Clear")
975
- outfit = recommend_outfit(temp, condition)
976
- reply = f"🌤️ {weather_text}\n\n👕 {outfit}"
977
- else:
978
- reply = f"🌤️ {weather_text}"
979
-
980
- return OrchestrationResult(
981
- intent=IntentType.WEATHER.value,
982
- reply=reply,
983
- success=True,
984
- data=weather,
985
- model_id="azure-maps-weather"
986
- )
987
-
988
- except Exception as e:
989
- logger.error(f"Weather error: {e}", exc_info=True)
990
- return OrchestrationResult(
991
- intent=IntentType.WEATHER.value,
992
- reply=(
993
- "I'm having trouble getting weather data right now. "
994
- "Can I help you with something else? 💛"
995
- ),
996
- success=False,
997
- error=str(e),
998
- fallback_used=True
999
- )
1000
-
1001
-
1002
- async def _handle_events(
1003
- message: str,
1004
- context: Dict[str, Any],
1005
- tenant_id: Optional[str],
1006
- lat: Optional[float],
1007
- lon: Optional[float],
1008
- intent_result: IntentMatch
1009
- ) -> OrchestrationResult:
1010
- """
1011
- 📅 Events handler.
1012
-
1013
- Routes event queries to tool_agent with proper error handling
1014
- and graceful degradation.
1015
- """
1016
- logger.info("📅 Processing events request")
1017
-
1018
- if not tenant_id:
1019
- return OrchestrationResult(
1020
- intent=IntentType.EVENTS.value,
1021
- reply=(
1022
- "I'd love to help you find events! 📅 "
1023
- "Which city are you interested in? "
1024
- "I have information for Atlanta, Birmingham, Chesterfield, "
1025
- "El Paso, Providence, and Seattle."
1026
- ),
1027
- success=False,
1028
- error="City required"
1029
- )
1030
-
1031
- # Check tool agent availability
1032
- if not TOOL_AGENT_AVAILABLE:
1033
- logger.warning("Tool agent not available")
1034
- return OrchestrationResult(
1035
- intent=IntentType.EVENTS.value,
1036
- reply=(
1037
- "Event information isn't available right now. "
1038
- "Try again soon! 📅"
1039
- ),
1040
- success=False,
1041
- error="Tool agent not loaded",
1042
- fallback_used=True
1043
- )
1044
-
1045
- try:
1046
- # FIXED: Add role parameter (compatibility fix)
1047
- tool_response = await handle_tool_request(
1048
- user_input=message,
1049
- role=context.get("role", "resident"), # ← ADDED
1050
- lat=lat,
1051
- lon=lon,
1052
- context=context
1053
- )
1054
-
1055
- reply = tool_response.get("response", "Events information retrieved.")
1056
-
1057
- return OrchestrationResult(
1058
- intent=IntentType.EVENTS.value,
1059
- reply=reply,
1060
- success=True,
1061
- data=tool_response,
1062
- model_id="events_tool"
1063
- )
1064
-
1065
- except Exception as e:
1066
- logger.error(f"Events error: {e}", exc_info=True)
1067
- return OrchestrationResult(
1068
- intent=IntentType.EVENTS.value,
1069
- reply=(
1070
- "I'm having trouble loading event information right now. "
1071
- "Check back soon! 📅"
1072
- ),
1073
- success=False,
1074
- error=str(e),
1075
- fallback_used=True
1076
- )
1077
-
1078
- async def _handle_local_resources(
1079
- message: str,
1080
- context: Dict[str, Any],
1081
- tenant_id: Optional[str],
1082
- lat: Optional[float],
1083
- lon: Optional[float]
1084
- ) -> OrchestrationResult:
1085
- """
1086
- 🏛️ Local resources handler (shelters, libraries, food banks, etc.).
1087
-
1088
- Routes resource queries to tool_agent with proper error handling.
1089
- """
1090
- logger.info("🏛️ Processing local resources request")
1091
-
1092
- if not tenant_id:
1093
- return OrchestrationResult(
1094
- intent=IntentType.LOCAL_RESOURCES.value,
1095
- reply=(
1096
- "I can help you find local resources! 🏛️ "
1097
- "Which city do you need help in? "
1098
- "I cover Atlanta, Birmingham, Chesterfield, El Paso, "
1099
- "Providence, and Seattle."
1100
- ),
1101
- success=False,
1102
- error="City required"
1103
- )
1104
-
1105
- # Check tool agent availability
1106
- if not TOOL_AGENT_AVAILABLE:
1107
- logger.warning("Tool agent not available")
1108
- return OrchestrationResult(
1109
- intent=IntentType.LOCAL_RESOURCES.value,
1110
- reply=(
1111
- "Resource information isn't available right now. "
1112
- "Try again soon! 🏛️"
1113
- ),
1114
- success=False,
1115
- error="Tool agent not loaded",
1116
- fallback_used=True
1117
- )
1118
-
1119
- try:
1120
- # FIXED: Add role parameter (compatibility fix)
1121
- tool_response = await handle_tool_request(
1122
- user_input=message,
1123
- role=context.get("role", "resident"), # ← ADDED
1124
- lat=lat,
1125
- lon=lon,
1126
- context=context
1127
- )
1128
-
1129
- reply = tool_response.get("response", "Resource information retrieved.")
1130
-
1131
- return OrchestrationResult(
1132
- intent=IntentType.LOCAL_RESOURCES.value,
1133
- reply=reply,
1134
- success=True,
1135
- data=tool_response,
1136
- model_id="resources_tool"
1137
- )
1138
-
1139
- except Exception as e:
1140
- logger.error(f"Resources error: {e}", exc_info=True)
1141
- return OrchestrationResult(
1142
- intent=IntentType.LOCAL_RESOURCES.value,
1143
- reply=(
1144
- "I'm having trouble finding resource information right now. "
1145
- "Would you like to try a different search? 💛"
1146
- ),
1147
- success=False,
1148
- error=str(e),
1149
- fallback_used=True
1150
- )
1151
-
1152
-
1153
- async def _handle_conversational(
1154
- message: str,
1155
- intent: IntentType,
1156
- context: Dict[str, Any]
1157
- ) -> OrchestrationResult:
1158
- """
1159
- 💬 Handles conversational intents (greeting, help, unknown).
1160
- Uses Penny's core LLM for natural responses with graceful fallback.
1161
- """
1162
- logger.info(f"💬 Processing conversational intent: {intent.value}")
1163
-
1164
- # Check LLM availability
1165
- use_llm = LLM_AVAILABLE
1166
-
1167
- try:
1168
- if use_llm:
1169
- # Build prompt based on intent
1170
- if intent == IntentType.GREETING:
1171
- prompt = (
1172
- f"The user greeted you with: '{message}'\n\n"
1173
- "Respond warmly as Penny, introduce yourself briefly, "
1174
- "and ask how you can help them with civic services today."
1175
- )
1176
-
1177
- elif intent == IntentType.HELP:
1178
- prompt = (
1179
- f"The user asked for help: '{message}'\n\n"
1180
- "Explain Penny's main features:\n"
1181
- "- Finding local resources (shelters, libraries, food banks)\n"
1182
- "- Community events and activities\n"
1183
- "- Weather information\n"
1184
- "- 27-language translation\n"
1185
- "- Document processing help\n\n"
1186
- "Ask which city they need assistance in."
1187
- )
1188
-
1189
- else: # UNKNOWN
1190
- prompt = (
1191
- f"The user said: '{message}'\n\n"
1192
- "You're not sure what they need help with. "
1193
- "Respond kindly, acknowledge their request, and ask them to "
1194
- "clarify or rephrase. Mention a few things you can help with."
1195
- )
1196
-
1197
- # Call Penny's core LLM
1198
- llm_result = await generate_response(prompt=prompt, max_new_tokens=200)
1199
-
1200
- # Use compatibility helper to check result
1201
- success, error = _check_result_success(llm_result, ["response"])
1202
-
1203
- if success:
1204
- reply = llm_result.get("response", "")
1205
-
1206
- return OrchestrationResult(
1207
- intent=intent.value,
1208
- reply=reply,
1209
- success=True,
1210
- data=llm_result,
1211
- model_id=CORE_MODEL_ID
1212
- )
1213
- else:
1214
- raise Exception(error or "LLM generation failed")
1215
-
1216
- else:
1217
- # LLM not available, use fallback directly
1218
- logger.info("LLM not available, using fallback responses")
1219
- raise Exception("LLM service not loaded")
1220
-
1221
- except Exception as e:
1222
- logger.warning(f"Conversational handler using fallback: {e}")
1223
-
1224
- # Hardcoded fallback responses (Penny's friendly voice)
1225
- fallback_replies = {
1226
- IntentType.GREETING: (
1227
- "Hi there! 👋 I'm Penny, your civic assistant. "
1228
- "I can help you find local resources, events, weather, and more. "
1229
- "What city are you in?"
1230
- ),
1231
- IntentType.HELP: (
1232
- "I'm Penny! 💛 I can help you with:\n\n"
1233
- "🏛️ Local resources (shelters, libraries, food banks)\n"
1234
- "📅 Community events\n"
1235
- "🌤️ Weather updates\n"
1236
- "🌍 Translation (27 languages)\n"
1237
- "📄 Document help\n\n"
1238
- "What would you like to know about?"
1239
- ),
1240
- IntentType.UNKNOWN: (
1241
- "I'm not sure I understood that. Could you rephrase? "
1242
- "I'm best at helping with local services, events, weather, "
1243
- "and translation! 💬"
1244
- )
1245
- }
1246
-
1247
- return OrchestrationResult(
1248
- intent=intent.value,
1249
- reply=fallback_replies.get(intent, "How can I help you today? 💛"),
1250
- success=True,
1251
- model_id="fallback",
1252
- fallback_used=True
1253
- )
1254
-
1255
-
1256
- async def _handle_fallback(
1257
- message: str,
1258
- intent: IntentType,
1259
- context: Dict[str, Any]
1260
- ) -> OrchestrationResult:
1261
- """
1262
- 🆘 Ultimate fallback handler for unhandled intents.
1263
-
1264
- This is a safety net that should rarely trigger, but ensures
1265
- users always get a helpful response.
1266
- """
1267
- logger.warning(f"⚠️ Fallback triggered for intent: {intent.value}")
1268
-
1269
- reply = (
1270
- "I've processed your request, but I'm not sure how to help with that yet. "
1271
- "I'm still learning! 🤖\n\n"
1272
- "I'm best at:\n"
1273
- "🏛️ Finding local resources\n"
1274
- "📅 Community events\n"
1275
- "🌤️ Weather updates\n"
1276
- "🌍 Translation\n\n"
1277
- "Could you rephrase your question? 💛"
1278
- )
1279
-
1280
- return OrchestrationResult(
1281
- intent=intent.value,
1282
- reply=reply,
1283
- success=False,
1284
- error="Unhandled intent",
1285
- fallback_used=True
1286
- )
1287
-
1288
-
1289
- # ============================================================
1290
- # HEALTH CHECK & DIAGNOSTICS (ENHANCED)
1291
- # ============================================================
1292
-
1293
- def get_orchestrator_health() -> Dict[str, Any]:
1294
- """
1295
- 📊 Returns comprehensive orchestrator health status.
1296
-
1297
- Used by the main application health check endpoint to monitor
1298
- the orchestrator and all its service dependencies.
1299
-
1300
- Returns:
1301
- Dictionary with health information including:
1302
- - status: operational/degraded
1303
- - service_availability: which services are loaded
1304
- - statistics: orchestration counts
1305
- - supported_intents: list of all intent types
1306
- - features: available orchestrator features
1307
- """
1308
- # Get service availability
1309
- services = get_service_availability()
1310
-
1311
- # Determine overall status
1312
- # Orchestrator is operational even if some services are down (graceful degradation)
1313
- critical_services = ["weather", "tool_agent"] # Must have these
1314
- critical_available = all(services.get(svc, False) for svc in critical_services)
1315
-
1316
- status = "operational" if critical_available else "degraded"
1317
-
1318
- return {
1319
- "status": status,
1320
- "core_model": CORE_MODEL_ID,
1321
- "max_response_time_ms": MAX_RESPONSE_TIME_MS,
1322
- "statistics": {
1323
- "total_orchestrations": _orchestration_count,
1324
- "emergency_interactions": _emergency_count
1325
- },
1326
- "service_availability": services,
1327
- "supported_intents": [intent.value for intent in IntentType],
1328
- "features": {
1329
- "emergency_routing": True,
1330
- "compound_intents": True,
1331
- "fallback_handling": True,
1332
- "performance_tracking": True,
1333
- "context_aware": True,
1334
- "multi_language": TRANSLATION_AVAILABLE,
1335
- "sentiment_analysis": SENTIMENT_AVAILABLE,
1336
- "bias_detection": BIAS_AVAILABLE,
1337
- "weather_integration": WEATHER_AGENT_AVAILABLE,
1338
- "event_recommendations": EVENT_WEATHER_AVAILABLE
1339
- }
1340
- }
1341
-
1342
-
1343
- def get_orchestrator_stats() -> Dict[str, Any]:
1344
- """
1345
- 📈 Returns orchestrator statistics.
1346
-
1347
- Useful for monitoring and analytics.
1348
- """
1349
- return {
1350
- "total_orchestrations": _orchestration_count,
1351
- "emergency_interactions": _emergency_count,
1352
- "services_available": sum(1 for v in get_service_availability().values() if v),
1353
- "services_total": len(get_service_availability())
1354
- }
1355
-
1356
-
1357
- # ============================================================
1358
- # TESTING & DEBUGGING (ENHANCED)
1359
- # ============================================================
1360
-
1361
- if __name__ == "__main__":
1362
- """
1363
- 🧪 Test the orchestrator with sample queries.
1364
- Run with: python -m app.orchestrator
1365
- """
1366
- import asyncio
1367
-
1368
- print("=" * 60)
1369
- print("🧪 Testing Penny's Orchestrator")
1370
- print("=" * 60)
1371
-
1372
- # Display service availability first
1373
- print("\n📊 Service Availability Check:")
1374
- services = get_service_availability()
1375
- for service, available in services.items():
1376
- status = "✅" if available else "❌"
1377
- print(f" {status} {service}: {'Available' if available else 'Not loaded'}")
1378
-
1379
- print("\n" + "=" * 60)
1380
-
1381
- test_queries = [
1382
- {
1383
- "name": "Greeting",
1384
- "message": "Hi Penny!",
1385
- "context": {}
1386
- },
1387
- {
1388
- "name": "Weather with location",
1389
- "message": "What's the weather?",
1390
- "context": {"lat": 33.7490, "lon": -84.3880}
1391
- },
1392
- {
1393
- "name": "Events in city",
1394
- "message": "Events in Atlanta",
1395
- "context": {"tenant_id": "atlanta_ga"}
1396
- },
1397
- {
1398
- "name": "Help request",
1399
- "message": "I need help",
1400
- "context": {}
1401
- },
1402
- {
1403
- "name": "Translation",
1404
- "message": "Translate hello",
1405
- "context": {"source_lang": "eng_Latn", "target_lang": "spa_Latn"}
1406
- }
1407
- ]
1408
-
1409
- async def run_tests():
1410
- for i, query in enumerate(test_queries, 1):
1411
- print(f"\n--- Test {i}: {query['name']} ---")
1412
- print(f"Query: {query['message']}")
1413
-
1414
- try:
1415
- result = await run_orchestrator(query["message"], query["context"])
1416
- print(f"Intent: {result['intent']}")
1417
- print(f"Success: {result['success']}")
1418
- print(f"Fallback: {result.get('fallback_used', False)}")
1419
-
1420
- # Truncate long replies
1421
- reply = result['reply']
1422
- if len(reply) > 150:
1423
- reply = reply[:150] + "..."
1424
- print(f"Reply: {reply}")
1425
-
1426
- if result.get('response_time_ms'):
1427
- print(f"Response time: {result['response_time_ms']:.0f}ms")
1428
-
1429
- except Exception as e:
1430
- print(f"❌ Error: {e}")
1431
-
1432
- asyncio.run(run_tests())
1433
-
1434
- print("\n" + "=" * 60)
1435
- print("📊 Final Statistics:")
1436
- stats = get_orchestrator_stats()
1437
- for key, value in stats.items():
1438
- print(f" {key}: {value}")
1439
-
1440
- print("\n" + "=" * 60)
1441
- print("✅ Tests complete")
1442
- print("=" * 60)