Upload 14 files
Browse files- prompts/__init__.py +18 -0
- prompts/__pycache__/__init__.cpython-312.pyc +0 -0
- prompts/__pycache__/prompt_templates.cpython-312.pyc +0 -0
- prompts/prompt_templates.py +391 -0
- services/__init__.py +9 -0
- services/__pycache__/__init__.cpython-312.pyc +0 -0
- services/__pycache__/firebase_service.cpython-312.pyc +0 -0
- services/__pycache__/gemini_service.cpython-312.pyc +0 -0
- services/firebase_service.py +515 -0
- services/gemini_service.py +401 -0
- utils/__init__.py +8 -0
- utils/__pycache__/__init__.cpython-312.pyc +0 -0
- utils/__pycache__/response_parser.cpython-312.pyc +0 -0
- utils/response_parser.py +375 -0
prompts/__init__.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Prompts module for CareFlow Nexus AI Agents
|
| 3 |
+
Contains all prompt templates for Gemini AI interactions
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from .prompt_templates import (
|
| 7 |
+
BedAllocatorPrompts,
|
| 8 |
+
CommonPrompts,
|
| 9 |
+
StateManagerPrompts,
|
| 10 |
+
TaskCoordinatorPrompts,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
__all__ = [
|
| 14 |
+
"StateManagerPrompts",
|
| 15 |
+
"BedAllocatorPrompts",
|
| 16 |
+
"TaskCoordinatorPrompts",
|
| 17 |
+
"CommonPrompts",
|
| 18 |
+
]
|
prompts/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (500 Bytes). View file
|
|
|
prompts/__pycache__/prompt_templates.cpython-312.pyc
ADDED
|
Binary file (11 kB). View file
|
|
|
prompts/prompt_templates.py
ADDED
|
@@ -0,0 +1,391 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Prompt Templates for CareFlow Nexus AI Agents
|
| 3 |
+
Contains all prompt templates for State Manager, Bed Allocator, and Task Coordinator agents
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class StateManagerPrompts:
|
| 8 |
+
"""Prompt templates for State Manager Agent"""
|
| 9 |
+
|
| 10 |
+
STATE_ANALYSIS = """
|
| 11 |
+
You are a State Manager AI for a hospital bed management system.
|
| 12 |
+
|
| 13 |
+
CURRENT HOSPITAL STATE:
|
| 14 |
+
|
| 15 |
+
Beds:
|
| 16 |
+
- Total Beds: {total_beds}
|
| 17 |
+
- Available: {available_beds}
|
| 18 |
+
- Occupied: {occupied_beds}
|
| 19 |
+
- In Cleaning: {cleaning_beds}
|
| 20 |
+
- In Maintenance: {maintenance_beds}
|
| 21 |
+
- Utilization Rate: {utilization_rate}%
|
| 22 |
+
|
| 23 |
+
Patients:
|
| 24 |
+
- Total Patients: {total_patients}
|
| 25 |
+
- Waiting for Admission: {waiting_patients}
|
| 26 |
+
- Currently Admitted: {admitted_patients}
|
| 27 |
+
|
| 28 |
+
Staff:
|
| 29 |
+
- Nurses On Shift: {nurses_count}
|
| 30 |
+
- Cleaners On Shift: {cleaners_count}
|
| 31 |
+
- Total Staff Available: {total_staff}
|
| 32 |
+
|
| 33 |
+
Tasks:
|
| 34 |
+
- Active Tasks: {active_tasks}
|
| 35 |
+
- Pending Tasks: {pending_tasks}
|
| 36 |
+
- In Progress Tasks: {in_progress_tasks}
|
| 37 |
+
- Overdue Tasks: {overdue_tasks}
|
| 38 |
+
|
| 39 |
+
Ward Breakdown:
|
| 40 |
+
{ward_summary}
|
| 41 |
+
|
| 42 |
+
TASK:
|
| 43 |
+
Analyze this hospital state and identify:
|
| 44 |
+
1. Critical issues requiring immediate attention
|
| 45 |
+
2. Operational bottlenecks
|
| 46 |
+
3. Capacity forecast for next 4-6 hours
|
| 47 |
+
4. Proactive recommendations
|
| 48 |
+
|
| 49 |
+
Respond ONLY with valid JSON in this exact format:
|
| 50 |
+
{{
|
| 51 |
+
"critical_alerts": [
|
| 52 |
+
{{
|
| 53 |
+
"type": "alert type",
|
| 54 |
+
"severity": "critical/high/medium",
|
| 55 |
+
"message": "what's wrong",
|
| 56 |
+
"action_needed": "what to do"
|
| 57 |
+
}}
|
| 58 |
+
],
|
| 59 |
+
"bottlenecks": [
|
| 60 |
+
{{
|
| 61 |
+
"area": "which area (cleaning, staffing, beds)",
|
| 62 |
+
"description": "issue description",
|
| 63 |
+
"impact": "how it affects operations",
|
| 64 |
+
"recommendation": "suggested solution"
|
| 65 |
+
}}
|
| 66 |
+
],
|
| 67 |
+
"capacity_forecast": {{
|
| 68 |
+
"next_4_hours": "forecast description",
|
| 69 |
+
"bed_availability_trend": "increasing/stable/decreasing",
|
| 70 |
+
"staffing_adequacy": "sufficient/stretched/insufficient"
|
| 71 |
+
}},
|
| 72 |
+
"recommendations": [
|
| 73 |
+
"proactive action 1",
|
| 74 |
+
"proactive action 2"
|
| 75 |
+
]
|
| 76 |
+
}}
|
| 77 |
+
"""
|
| 78 |
+
|
| 79 |
+
BOTTLENECK_DETECTION = """
|
| 80 |
+
Analyze the following hospital operations data for bottlenecks:
|
| 81 |
+
|
| 82 |
+
{operational_data}
|
| 83 |
+
|
| 84 |
+
Identify specific bottlenecks and provide actionable recommendations.
|
| 85 |
+
|
| 86 |
+
Respond with JSON:
|
| 87 |
+
{{
|
| 88 |
+
"bottlenecks": [
|
| 89 |
+
{{
|
| 90 |
+
"type": "cleaning_backlog/staff_overload/bed_shortage",
|
| 91 |
+
"severity": "low/medium/high/critical",
|
| 92 |
+
"count": 0,
|
| 93 |
+
"description": "detailed issue",
|
| 94 |
+
"recommendation": "action to take"
|
| 95 |
+
}}
|
| 96 |
+
]
|
| 97 |
+
}}
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
class BedAllocatorPrompts:
|
| 102 |
+
"""Prompt templates for Bed Allocator Agent"""
|
| 103 |
+
|
| 104 |
+
BED_ALLOCATION = """
|
| 105 |
+
You are a Bed Allocator AI for a hospital. Your job is to match patients with the most suitable beds.
|
| 106 |
+
|
| 107 |
+
PATIENT INFORMATION:
|
| 108 |
+
- Name: {patient_name}
|
| 109 |
+
- Age: {age}
|
| 110 |
+
- Gender: {gender}
|
| 111 |
+
- Diagnosis: {diagnosis}
|
| 112 |
+
- Severity: {severity}
|
| 113 |
+
- Mobility Status: {mobility_status}
|
| 114 |
+
|
| 115 |
+
EXTRACTED REQUIREMENTS:
|
| 116 |
+
- Needs Oxygen: {needs_oxygen}
|
| 117 |
+
- Needs Ventilator: {needs_ventilator}
|
| 118 |
+
- Needs Cardiac Monitor: {needs_cardiac_monitor}
|
| 119 |
+
- Needs Isolation: {needs_isolation}
|
| 120 |
+
- Preferred Ward: {preferred_ward}
|
| 121 |
+
|
| 122 |
+
AVAILABLE BEDS (Pre-filtered by requirements):
|
| 123 |
+
{beds_json}
|
| 124 |
+
|
| 125 |
+
CURRENT CONTEXT:
|
| 126 |
+
- Time of Day: {current_time}
|
| 127 |
+
- Day of Week: {day_of_week}
|
| 128 |
+
- Overall Hospital Occupancy: {occupancy_rate}%
|
| 129 |
+
- Staff Availability: {staff_summary}
|
| 130 |
+
|
| 131 |
+
SCORING CRITERIA:
|
| 132 |
+
1. Medical Appropriateness (40 points):
|
| 133 |
+
- Equipment match (oxygen, ventilator, monitors)
|
| 134 |
+
- Isolation capability if needed
|
| 135 |
+
- Ward specialization
|
| 136 |
+
|
| 137 |
+
2. Patient Safety & Comfort (25 points):
|
| 138 |
+
- Proximity to nursing station for monitoring
|
| 139 |
+
- Appropriate ward environment
|
| 140 |
+
- Infection control considerations
|
| 141 |
+
|
| 142 |
+
3. Operational Efficiency (20 points):
|
| 143 |
+
- Current ward workload distribution
|
| 144 |
+
- Staff availability in ward
|
| 145 |
+
- Bed location logistics
|
| 146 |
+
|
| 147 |
+
4. Resource Optimization (15 points):
|
| 148 |
+
- Avoid over-specification (don't use ICU for minor cases)
|
| 149 |
+
- Balance ward occupancy
|
| 150 |
+
- Equipment availability vs future needs
|
| 151 |
+
|
| 152 |
+
TASK:
|
| 153 |
+
Rank the available beds and recommend the TOP 3 most suitable options.
|
| 154 |
+
For each bed, provide:
|
| 155 |
+
- Match score (0-100)
|
| 156 |
+
- Detailed reasoning
|
| 157 |
+
- Pros (advantages)
|
| 158 |
+
- Cons (concerns or limitations)
|
| 159 |
+
|
| 160 |
+
Respond ONLY with valid JSON in this exact format:
|
| 161 |
+
{{
|
| 162 |
+
"recommendations": [
|
| 163 |
+
{{
|
| 164 |
+
"bed_id": "bed_id_here",
|
| 165 |
+
"bed_number": "bed number",
|
| 166 |
+
"ward": "ward name",
|
| 167 |
+
"score": 85,
|
| 168 |
+
"reasoning": "This bed is ideal because it has oxygen equipment which is required for the pneumonia patient. It's located in the Respiratory ward with specialized staff and has high proximity to nursing station (8/10) for close monitoring.",
|
| 169 |
+
"pros": [
|
| 170 |
+
"Has required oxygen equipment",
|
| 171 |
+
"In specialized Respiratory ward",
|
| 172 |
+
"Close to nursing station for monitoring",
|
| 173 |
+
"Currently available and clean"
|
| 174 |
+
],
|
| 175 |
+
"cons": [
|
| 176 |
+
"Ward is at 75% capacity - relatively busy",
|
| 177 |
+
"No cardiac monitor (not required but could be useful)"
|
| 178 |
+
]
|
| 179 |
+
}}
|
| 180 |
+
],
|
| 181 |
+
"overall_confidence": 90,
|
| 182 |
+
"considerations": "Patient has moderate severity pneumonia requiring oxygen support and close monitoring. All recommended beds meet core requirements."
|
| 183 |
+
}}
|
| 184 |
+
"""
|
| 185 |
+
|
| 186 |
+
REQUIREMENT_EXTRACTION = """
|
| 187 |
+
You are a medical requirements analyzer for hospital bed allocation.
|
| 188 |
+
|
| 189 |
+
PATIENT DATA:
|
| 190 |
+
- Age: {age}
|
| 191 |
+
- Gender: {gender}
|
| 192 |
+
- Diagnosis: {diagnosis}
|
| 193 |
+
- Severity: {severity}
|
| 194 |
+
- Admission Type: {admission_type}
|
| 195 |
+
- Mobility Status: {mobility_status}
|
| 196 |
+
|
| 197 |
+
TASK:
|
| 198 |
+
Extract the medical care requirements needed for this patient.
|
| 199 |
+
|
| 200 |
+
Consider:
|
| 201 |
+
- What equipment is needed? (oxygen, ventilator, cardiac monitor)
|
| 202 |
+
- Is isolation required? (infectious diseases, immunocompromised)
|
| 203 |
+
- What ward is most appropriate? (ICU, General, Isolation, Cardiac, Respiratory)
|
| 204 |
+
- How much nursing attention is needed? (proximity to nursing station: 1-10)
|
| 205 |
+
- Any special considerations?
|
| 206 |
+
|
| 207 |
+
Respond ONLY with valid JSON:
|
| 208 |
+
{{
|
| 209 |
+
"needs_oxygen": true,
|
| 210 |
+
"needs_ventilator": false,
|
| 211 |
+
"needs_cardiac_monitor": false,
|
| 212 |
+
"needs_isolation": true,
|
| 213 |
+
"preferred_ward": "Respiratory",
|
| 214 |
+
"proximity_preference": 8,
|
| 215 |
+
"special_considerations": [
|
| 216 |
+
"Patient needs close monitoring due to moderate severity",
|
| 217 |
+
"Infectious precautions required"
|
| 218 |
+
],
|
| 219 |
+
"confidence": 95,
|
| 220 |
+
"reasoning": "Pneumonia diagnosis requires oxygen support and isolation to prevent spread. Moderate severity indicates need for close monitoring."
|
| 221 |
+
}}
|
| 222 |
+
"""
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
class TaskCoordinatorPrompts:
|
| 226 |
+
"""Prompt templates for Task Coordinator Agent"""
|
| 227 |
+
|
| 228 |
+
STAFF_ASSIGNMENT = """
|
| 229 |
+
You are a Task Coordinator AI for hospital operations.
|
| 230 |
+
|
| 231 |
+
TASK TO ASSIGN:
|
| 232 |
+
- Task ID: {task_id}
|
| 233 |
+
- Type: {task_type}
|
| 234 |
+
- Description: {description}
|
| 235 |
+
- Priority: {priority}
|
| 236 |
+
- Location: Ward {ward}, Bed {bed_number}
|
| 237 |
+
- Estimated Duration: {duration} minutes
|
| 238 |
+
- Patient: {patient_name} (if applicable)
|
| 239 |
+
|
| 240 |
+
AVAILABLE STAFF CANDIDATES:
|
| 241 |
+
{staff_json}
|
| 242 |
+
|
| 243 |
+
CURRENT CONTEXT:
|
| 244 |
+
- Current Time: {current_time}
|
| 245 |
+
- Ward Activity Level: {activity_level}
|
| 246 |
+
- Total Pending Tasks: {pending_tasks_count}
|
| 247 |
+
|
| 248 |
+
SELECTION CRITERIA:
|
| 249 |
+
1. Role Appropriateness (Must Match): {required_role}
|
| 250 |
+
2. Current Workload (Fair Distribution)
|
| 251 |
+
3. Ward Assignment (Preference for same ward)
|
| 252 |
+
4. Location Proximity (Efficiency)
|
| 253 |
+
5. Recent Task History (Avoid overloading)
|
| 254 |
+
|
| 255 |
+
TASK:
|
| 256 |
+
Select the most appropriate staff member for this task.
|
| 257 |
+
|
| 258 |
+
Consider:
|
| 259 |
+
- Who has the lowest current workload?
|
| 260 |
+
- Who is already in or near this ward?
|
| 261 |
+
- Who hasn't been assigned a task recently?
|
| 262 |
+
- Balance efficiency with fairness
|
| 263 |
+
|
| 264 |
+
Respond ONLY with valid JSON:
|
| 265 |
+
{{
|
| 266 |
+
"recommended_staff_id": "staff_id_here",
|
| 267 |
+
"staff_name": "Staff Name",
|
| 268 |
+
"reasoning": "This staff member has the lowest current workload (2 tasks) and is already assigned to the same ward, making them the most efficient choice. They have the appropriate role and capacity.",
|
| 269 |
+
"workload_impact": "Workload will increase from 2 to 3 tasks, still below maximum of 5.",
|
| 270 |
+
"concerns": [
|
| 271 |
+
"Ward is busy - staff may need support"
|
| 272 |
+
],
|
| 273 |
+
"alternatives": [
|
| 274 |
+
{{
|
| 275 |
+
"staff_id": "alt_id",
|
| 276 |
+
"staff_name": "Alt Name",
|
| 277 |
+
"reason": "Second choice with 3 current tasks"
|
| 278 |
+
}}
|
| 279 |
+
],
|
| 280 |
+
"confidence": 85
|
| 281 |
+
}}
|
| 282 |
+
"""
|
| 283 |
+
|
| 284 |
+
WORKFLOW_ORCHESTRATION = """
|
| 285 |
+
You are orchestrating a multi-step hospital workflow.
|
| 286 |
+
|
| 287 |
+
WORKFLOW TYPE: {workflow_type}
|
| 288 |
+
|
| 289 |
+
CONTEXT:
|
| 290 |
+
{context_json}
|
| 291 |
+
|
| 292 |
+
CURRENT STEP: {current_step}
|
| 293 |
+
PREVIOUS STEPS COMPLETED: {completed_steps}
|
| 294 |
+
|
| 295 |
+
AVAILABLE STAFF:
|
| 296 |
+
- Nurses: {nurses_available}
|
| 297 |
+
- Cleaners: {cleaners_available}
|
| 298 |
+
|
| 299 |
+
TASK:
|
| 300 |
+
Determine the next task(s) to create and assign.
|
| 301 |
+
|
| 302 |
+
Respond with JSON:
|
| 303 |
+
{{
|
| 304 |
+
"next_tasks": [
|
| 305 |
+
{{
|
| 306 |
+
"task_type": "cleaning",
|
| 307 |
+
"priority": "high",
|
| 308 |
+
"assigned_role": "cleaner",
|
| 309 |
+
"description": "Task description",
|
| 310 |
+
"estimated_duration": 30
|
| 311 |
+
}}
|
| 312 |
+
],
|
| 313 |
+
"workflow_status": "in_progress/completed",
|
| 314 |
+
"reasoning": "Why these tasks next"
|
| 315 |
+
}}
|
| 316 |
+
"""
|
| 317 |
+
|
| 318 |
+
TASK_ESCALATION = """
|
| 319 |
+
You are handling a delayed hospital task that needs escalation.
|
| 320 |
+
|
| 321 |
+
TASK DETAILS:
|
| 322 |
+
{task_json}
|
| 323 |
+
|
| 324 |
+
DELAY INFORMATION:
|
| 325 |
+
- Expected Duration: {expected_duration} minutes
|
| 326 |
+
- Actual Time Elapsed: {actual_elapsed} minutes
|
| 327 |
+
- Delay: {delay_minutes} minutes
|
| 328 |
+
- Current Status: {status}
|
| 329 |
+
|
| 330 |
+
CONTEXT:
|
| 331 |
+
- Ward: {ward}
|
| 332 |
+
- Priority: {priority}
|
| 333 |
+
- Patient Waiting: {patient_waiting}
|
| 334 |
+
|
| 335 |
+
AVAILABLE OPTIONS:
|
| 336 |
+
1. Reassign to different staff
|
| 337 |
+
2. Escalate to supervisor
|
| 338 |
+
3. Increase priority
|
| 339 |
+
4. Request additional support
|
| 340 |
+
|
| 341 |
+
Available staff for reassignment:
|
| 342 |
+
{available_staff_json}
|
| 343 |
+
|
| 344 |
+
TASK:
|
| 345 |
+
Recommend the best course of action to resolve this delay.
|
| 346 |
+
|
| 347 |
+
Respond with JSON:
|
| 348 |
+
{{
|
| 349 |
+
"action": "reassign/escalate/increase_priority/request_support",
|
| 350 |
+
"reasoning": "Why this action is appropriate",
|
| 351 |
+
"recommended_staff_id": "staff_id if reassigning",
|
| 352 |
+
"escalation_message": "Message to supervisor if escalating",
|
| 353 |
+
"priority_change": "new priority if increasing",
|
| 354 |
+
"urgency": "low/medium/high/critical"
|
| 355 |
+
}}
|
| 356 |
+
"""
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
class CommonPrompts:
|
| 360 |
+
"""Common prompts used across multiple agents"""
|
| 361 |
+
|
| 362 |
+
GENERATE_REASONING = """
|
| 363 |
+
Explain the following decision in simple, clear language:
|
| 364 |
+
|
| 365 |
+
DECISION: {decision}
|
| 366 |
+
|
| 367 |
+
FACTORS CONSIDERED:
|
| 368 |
+
{factors_json}
|
| 369 |
+
|
| 370 |
+
Provide a 2-3 sentence explanation suitable for hospital staff.
|
| 371 |
+
"""
|
| 372 |
+
|
| 373 |
+
VALIDATE_DECISION = """
|
| 374 |
+
Validate this decision for potential issues:
|
| 375 |
+
|
| 376 |
+
{decision_json}
|
| 377 |
+
|
| 378 |
+
Check for:
|
| 379 |
+
- Safety concerns
|
| 380 |
+
- Resource conflicts
|
| 381 |
+
- Policy violations
|
| 382 |
+
- Logic errors
|
| 383 |
+
|
| 384 |
+
Respond with JSON:
|
| 385 |
+
{{
|
| 386 |
+
"is_valid": true/false,
|
| 387 |
+
"concerns": ["list of issues if any"],
|
| 388 |
+
"severity": "none/low/medium/high",
|
| 389 |
+
"recommendation": "proceed/review/reject"
|
| 390 |
+
}}
|
| 391 |
+
"""
|
services/__init__.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Services module for CareFlow Nexus AI Agents
|
| 3 |
+
Provides Firebase and Gemini AI service integrations
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from .firebase_service import FirebaseService
|
| 7 |
+
from .gemini_service import GeminiService
|
| 8 |
+
|
| 9 |
+
__all__ = ["FirebaseService", "GeminiService"]
|
services/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (446 Bytes). View file
|
|
|
services/__pycache__/firebase_service.cpython-312.pyc
ADDED
|
Binary file (24.4 kB). View file
|
|
|
services/__pycache__/gemini_service.cpython-312.pyc
ADDED
|
Binary file (14.8 kB). View file
|
|
|
services/firebase_service.py
ADDED
|
@@ -0,0 +1,515 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Firebase Service for CareFlow Nexus
|
| 3 |
+
Handles all Firebase Firestore operations for beds, patients, staff, and tasks
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import logging
|
| 7 |
+
import os
|
| 8 |
+
from datetime import datetime
|
| 9 |
+
from typing import Any, Dict, List, Optional
|
| 10 |
+
|
| 11 |
+
import firebase_admin
|
| 12 |
+
from firebase_admin import credentials, firestore
|
| 13 |
+
from google.cloud.firestore_v1.base_query import FieldFilter
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class FirebaseService:
|
| 19 |
+
"""Service class for all Firebase Firestore operations"""
|
| 20 |
+
|
| 21 |
+
def __init__(self, service_account_path: str = None):
|
| 22 |
+
"""
|
| 23 |
+
Initialize Firebase service
|
| 24 |
+
|
| 25 |
+
Args:
|
| 26 |
+
service_account_path: Path to Firebase service account JSON file
|
| 27 |
+
"""
|
| 28 |
+
try:
|
| 29 |
+
if not firebase_admin._apps:
|
| 30 |
+
if service_account_path and os.path.exists(service_account_path):
|
| 31 |
+
cred = credentials.Certificate(service_account_path)
|
| 32 |
+
firebase_admin.initialize_app(cred)
|
| 33 |
+
else:
|
| 34 |
+
# Use default credentials
|
| 35 |
+
firebase_admin.initialize_app()
|
| 36 |
+
|
| 37 |
+
self.db = firestore.client()
|
| 38 |
+
logger.info("Firebase service initialized successfully")
|
| 39 |
+
except Exception as e:
|
| 40 |
+
logger.error(f"Failed to initialize Firebase: {e}")
|
| 41 |
+
raise
|
| 42 |
+
|
| 43 |
+
# ==================== BED OPERATIONS ====================
|
| 44 |
+
|
| 45 |
+
async def get_bed(self, bed_id: str) -> Optional[Dict]:
|
| 46 |
+
"""Get a specific bed by ID"""
|
| 47 |
+
try:
|
| 48 |
+
doc = self.db.collection("beds").document(bed_id).get()
|
| 49 |
+
if doc.exists:
|
| 50 |
+
data = doc.to_dict()
|
| 51 |
+
data["id"] = doc.id
|
| 52 |
+
return data
|
| 53 |
+
return None
|
| 54 |
+
except Exception as e:
|
| 55 |
+
logger.error(f"Error getting bed {bed_id}: {e}")
|
| 56 |
+
return None
|
| 57 |
+
|
| 58 |
+
async def get_all_beds(self, filters: Optional[Dict] = None) -> List[Dict]:
|
| 59 |
+
"""
|
| 60 |
+
Get all beds with optional filters
|
| 61 |
+
|
| 62 |
+
Args:
|
| 63 |
+
filters: Dict with keys like 'status', 'ward', 'has_oxygen', etc.
|
| 64 |
+
"""
|
| 65 |
+
try:
|
| 66 |
+
query = self.db.collection("beds")
|
| 67 |
+
|
| 68 |
+
if filters:
|
| 69 |
+
if "status" in filters:
|
| 70 |
+
query = query.where(
|
| 71 |
+
filter=FieldFilter("status", "==", filters["status"])
|
| 72 |
+
)
|
| 73 |
+
if "ward" in filters:
|
| 74 |
+
query = query.where(
|
| 75 |
+
filter=FieldFilter("ward", "==", filters["ward"])
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
docs = query.stream()
|
| 79 |
+
beds = []
|
| 80 |
+
for doc in docs:
|
| 81 |
+
data = doc.to_dict()
|
| 82 |
+
data["id"] = doc.id
|
| 83 |
+
|
| 84 |
+
# Apply equipment filters (not indexed in Firestore)
|
| 85 |
+
if filters:
|
| 86 |
+
equipment = data.get("equipment", {})
|
| 87 |
+
if (
|
| 88 |
+
"has_oxygen" in filters
|
| 89 |
+
and equipment.get("has_oxygen") != filters["has_oxygen"]
|
| 90 |
+
):
|
| 91 |
+
continue
|
| 92 |
+
if (
|
| 93 |
+
"has_ventilator" in filters
|
| 94 |
+
and equipment.get("has_ventilator") != filters["has_ventilator"]
|
| 95 |
+
):
|
| 96 |
+
continue
|
| 97 |
+
if (
|
| 98 |
+
"is_isolation" in filters
|
| 99 |
+
and equipment.get("is_isolation") != filters["is_isolation"]
|
| 100 |
+
):
|
| 101 |
+
continue
|
| 102 |
+
|
| 103 |
+
beds.append(data)
|
| 104 |
+
|
| 105 |
+
logger.info(f"Retrieved {len(beds)} beds")
|
| 106 |
+
return beds
|
| 107 |
+
except Exception as e:
|
| 108 |
+
logger.error(f"Error getting beds: {e}")
|
| 109 |
+
return []
|
| 110 |
+
|
| 111 |
+
async def get_available_beds(self, filters: Optional[Dict] = None) -> List[Dict]:
|
| 112 |
+
"""Get all beds with status 'ready'"""
|
| 113 |
+
filter_dict = filters or {}
|
| 114 |
+
filter_dict["status"] = "ready"
|
| 115 |
+
return await self.get_all_beds(filter_dict)
|
| 116 |
+
|
| 117 |
+
async def update_bed_status(
|
| 118 |
+
self, bed_id: str, status: str, notes: Optional[str] = None
|
| 119 |
+
) -> bool:
|
| 120 |
+
"""
|
| 121 |
+
Update bed status
|
| 122 |
+
|
| 123 |
+
Args:
|
| 124 |
+
bed_id: Bed document ID
|
| 125 |
+
status: New status (ready, reserved, occupied, cleaning, maintenance)
|
| 126 |
+
notes: Optional notes
|
| 127 |
+
"""
|
| 128 |
+
try:
|
| 129 |
+
update_data = {"status": status, "last_updated": firestore.SERVER_TIMESTAMP}
|
| 130 |
+
if notes:
|
| 131 |
+
update_data["notes"] = notes
|
| 132 |
+
|
| 133 |
+
self.db.collection("beds").document(bed_id).update(update_data)
|
| 134 |
+
logger.info(f"Updated bed {bed_id} status to {status}")
|
| 135 |
+
return True
|
| 136 |
+
except Exception as e:
|
| 137 |
+
logger.error(f"Error updating bed status: {e}")
|
| 138 |
+
return False
|
| 139 |
+
|
| 140 |
+
async def assign_bed_to_patient(self, bed_id: str, patient_id: str) -> bool:
|
| 141 |
+
"""Assign a bed to a patient"""
|
| 142 |
+
try:
|
| 143 |
+
self.db.collection("beds").document(bed_id).update(
|
| 144 |
+
{
|
| 145 |
+
"assigned_patient_id": patient_id,
|
| 146 |
+
"status": "reserved",
|
| 147 |
+
"last_updated": firestore.SERVER_TIMESTAMP,
|
| 148 |
+
}
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
self.db.collection("patients").document(patient_id).update(
|
| 152 |
+
{
|
| 153 |
+
"assigned_bed_id": bed_id,
|
| 154 |
+
"status": "admitted",
|
| 155 |
+
"admission_time": firestore.SERVER_TIMESTAMP,
|
| 156 |
+
}
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
logger.info(f"Assigned bed {bed_id} to patient {patient_id}")
|
| 160 |
+
return True
|
| 161 |
+
except Exception as e:
|
| 162 |
+
logger.error(f"Error assigning bed: {e}")
|
| 163 |
+
return False
|
| 164 |
+
|
| 165 |
+
# ==================== PATIENT OPERATIONS ====================
|
| 166 |
+
|
| 167 |
+
async def get_patient(self, patient_id: str) -> Optional[Dict]:
|
| 168 |
+
"""Get a specific patient by ID"""
|
| 169 |
+
try:
|
| 170 |
+
doc = self.db.collection("patients").document(patient_id).get()
|
| 171 |
+
if doc.exists:
|
| 172 |
+
data = doc.to_dict()
|
| 173 |
+
data["id"] = doc.id
|
| 174 |
+
return data
|
| 175 |
+
return None
|
| 176 |
+
except Exception as e:
|
| 177 |
+
logger.error(f"Error getting patient {patient_id}: {e}")
|
| 178 |
+
return None
|
| 179 |
+
|
| 180 |
+
async def get_all_patients(self, filters: Optional[Dict] = None) -> List[Dict]:
|
| 181 |
+
"""Get all patients with optional filters"""
|
| 182 |
+
try:
|
| 183 |
+
query = self.db.collection("patients")
|
| 184 |
+
|
| 185 |
+
if filters and "status" in filters:
|
| 186 |
+
query = query.where(
|
| 187 |
+
filter=FieldFilter("status", "==", filters["status"])
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
docs = query.stream()
|
| 191 |
+
patients = []
|
| 192 |
+
for doc in docs:
|
| 193 |
+
data = doc.to_dict()
|
| 194 |
+
data["id"] = doc.id
|
| 195 |
+
patients.append(data)
|
| 196 |
+
|
| 197 |
+
logger.info(f"Retrieved {len(patients)} patients")
|
| 198 |
+
return patients
|
| 199 |
+
except Exception as e:
|
| 200 |
+
logger.error(f"Error getting patients: {e}")
|
| 201 |
+
return []
|
| 202 |
+
|
| 203 |
+
async def create_patient(self, patient_data: Dict) -> Optional[str]:
|
| 204 |
+
"""Create a new patient record"""
|
| 205 |
+
try:
|
| 206 |
+
patient_data["created_at"] = firestore.SERVER_TIMESTAMP
|
| 207 |
+
patient_data["status"] = patient_data.get("status", "waiting")
|
| 208 |
+
|
| 209 |
+
doc_ref = self.db.collection("patients").add(patient_data)
|
| 210 |
+
patient_id = doc_ref[1].id
|
| 211 |
+
|
| 212 |
+
logger.info(f"Created patient with ID: {patient_id}")
|
| 213 |
+
return patient_id
|
| 214 |
+
except Exception as e:
|
| 215 |
+
logger.error(f"Error creating patient: {e}")
|
| 216 |
+
return None
|
| 217 |
+
|
| 218 |
+
async def update_patient(self, patient_id: str, updates: Dict) -> bool:
|
| 219 |
+
"""Update patient information"""
|
| 220 |
+
try:
|
| 221 |
+
updates["updated_at"] = firestore.SERVER_TIMESTAMP
|
| 222 |
+
self.db.collection("patients").document(patient_id).update(updates)
|
| 223 |
+
logger.info(f"Updated patient {patient_id}")
|
| 224 |
+
return True
|
| 225 |
+
except Exception as e:
|
| 226 |
+
logger.error(f"Error updating patient: {e}")
|
| 227 |
+
return False
|
| 228 |
+
|
| 229 |
+
# ==================== STAFF OPERATIONS ====================
|
| 230 |
+
|
| 231 |
+
async def get_staff(self, staff_id: str) -> Optional[Dict]:
|
| 232 |
+
"""Get a specific staff member by ID"""
|
| 233 |
+
try:
|
| 234 |
+
doc = self.db.collection("staff").document(staff_id).get()
|
| 235 |
+
if doc.exists:
|
| 236 |
+
data = doc.to_dict()
|
| 237 |
+
data["id"] = doc.id
|
| 238 |
+
return data
|
| 239 |
+
return None
|
| 240 |
+
except Exception as e:
|
| 241 |
+
logger.error(f"Error getting staff {staff_id}: {e}")
|
| 242 |
+
return None
|
| 243 |
+
|
| 244 |
+
async def get_all_staff(self, filters: Optional[Dict] = None) -> List[Dict]:
|
| 245 |
+
"""Get all staff with optional filters"""
|
| 246 |
+
try:
|
| 247 |
+
query = self.db.collection("staff")
|
| 248 |
+
|
| 249 |
+
if filters:
|
| 250 |
+
if "role" in filters:
|
| 251 |
+
query = query.where(
|
| 252 |
+
filter=FieldFilter("role", "==", filters["role"])
|
| 253 |
+
)
|
| 254 |
+
if "is_on_shift" in filters:
|
| 255 |
+
query = query.where(
|
| 256 |
+
filter=FieldFilter("is_on_shift", "==", filters["is_on_shift"])
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
docs = query.stream()
|
| 260 |
+
staff_list = []
|
| 261 |
+
for doc in docs:
|
| 262 |
+
data = doc.to_dict()
|
| 263 |
+
data["id"] = doc.id
|
| 264 |
+
|
| 265 |
+
# Apply ward filter if specified
|
| 266 |
+
if filters and "assigned_ward" in filters:
|
| 267 |
+
if data.get("assigned_ward") != filters["assigned_ward"]:
|
| 268 |
+
continue
|
| 269 |
+
|
| 270 |
+
staff_list.append(data)
|
| 271 |
+
|
| 272 |
+
logger.info(f"Retrieved {len(staff_list)} staff members")
|
| 273 |
+
return staff_list
|
| 274 |
+
except Exception as e:
|
| 275 |
+
logger.error(f"Error getting staff: {e}")
|
| 276 |
+
return []
|
| 277 |
+
|
| 278 |
+
async def get_available_staff(
|
| 279 |
+
self, role: str, ward: Optional[str] = None, max_workload: int = 5
|
| 280 |
+
) -> List[Dict]:
|
| 281 |
+
"""Get available staff by role with workload filtering"""
|
| 282 |
+
filters = {"role": role, "is_on_shift": True}
|
| 283 |
+
if ward:
|
| 284 |
+
filters["assigned_ward"] = ward
|
| 285 |
+
|
| 286 |
+
staff_list = await self.get_all_staff(filters)
|
| 287 |
+
|
| 288 |
+
# Filter by workload
|
| 289 |
+
available = [s for s in staff_list if s.get("current_load", 0) < max_workload]
|
| 290 |
+
|
| 291 |
+
# Sort by current load (least busy first)
|
| 292 |
+
available.sort(key=lambda x: x.get("current_load", 0))
|
| 293 |
+
|
| 294 |
+
return available
|
| 295 |
+
|
| 296 |
+
async def get_staff_workload(self, staff_id: str) -> int:
|
| 297 |
+
"""Get current workload count for a staff member"""
|
| 298 |
+
try:
|
| 299 |
+
doc = self.db.collection("staff").document(staff_id).get()
|
| 300 |
+
if doc.exists:
|
| 301 |
+
return doc.to_dict().get("current_load", 0)
|
| 302 |
+
return 0
|
| 303 |
+
except Exception as e:
|
| 304 |
+
logger.error(f"Error getting staff workload: {e}")
|
| 305 |
+
return 0
|
| 306 |
+
|
| 307 |
+
async def update_staff_workload(self, staff_id: str, increment: int) -> bool:
|
| 308 |
+
"""
|
| 309 |
+
Update staff workload
|
| 310 |
+
|
| 311 |
+
Args:
|
| 312 |
+
staff_id: Staff document ID
|
| 313 |
+
increment: Amount to increment (positive or negative)
|
| 314 |
+
"""
|
| 315 |
+
try:
|
| 316 |
+
staff_ref = self.db.collection("staff").document(staff_id)
|
| 317 |
+
staff_doc = staff_ref.get()
|
| 318 |
+
|
| 319 |
+
if staff_doc.exists:
|
| 320 |
+
current_load = staff_doc.to_dict().get("current_load", 0)
|
| 321 |
+
new_load = max(0, current_load + increment)
|
| 322 |
+
|
| 323 |
+
staff_ref.update(
|
| 324 |
+
{"current_load": new_load, "updated_at": firestore.SERVER_TIMESTAMP}
|
| 325 |
+
)
|
| 326 |
+
logger.info(
|
| 327 |
+
f"Updated staff {staff_id} workload: {current_load} -> {new_load}"
|
| 328 |
+
)
|
| 329 |
+
return True
|
| 330 |
+
return False
|
| 331 |
+
except Exception as e:
|
| 332 |
+
logger.error(f"Error updating staff workload: {e}")
|
| 333 |
+
return False
|
| 334 |
+
|
| 335 |
+
# ==================== TASK OPERATIONS ====================
|
| 336 |
+
|
| 337 |
+
async def create_task(self, task_data: Dict) -> Optional[str]:
|
| 338 |
+
"""Create a new task"""
|
| 339 |
+
try:
|
| 340 |
+
task_data["created_at"] = firestore.SERVER_TIMESTAMP
|
| 341 |
+
task_data["status"] = task_data.get("status", "pending")
|
| 342 |
+
|
| 343 |
+
doc_ref = self.db.collection("tasks").add(task_data)
|
| 344 |
+
task_id = doc_ref[1].id
|
| 345 |
+
|
| 346 |
+
# Increment staff workload if assigned
|
| 347 |
+
if "assigned_to" in task_data:
|
| 348 |
+
await self.update_staff_workload(task_data["assigned_to"], 1)
|
| 349 |
+
|
| 350 |
+
logger.info(f"Created task with ID: {task_id}")
|
| 351 |
+
return task_id
|
| 352 |
+
except Exception as e:
|
| 353 |
+
logger.error(f"Error creating task: {e}")
|
| 354 |
+
return None
|
| 355 |
+
|
| 356 |
+
async def get_task(self, task_id: str) -> Optional[Dict]:
|
| 357 |
+
"""Get a specific task by ID"""
|
| 358 |
+
try:
|
| 359 |
+
doc = self.db.collection("tasks").document(task_id).get()
|
| 360 |
+
if doc.exists:
|
| 361 |
+
data = doc.to_dict()
|
| 362 |
+
data["id"] = doc.id
|
| 363 |
+
return data
|
| 364 |
+
return None
|
| 365 |
+
except Exception as e:
|
| 366 |
+
logger.error(f"Error getting task {task_id}: {e}")
|
| 367 |
+
return None
|
| 368 |
+
|
| 369 |
+
async def get_tasks(self, filters: Optional[Dict] = None) -> List[Dict]:
|
| 370 |
+
"""Get tasks with optional filters"""
|
| 371 |
+
try:
|
| 372 |
+
query = self.db.collection("tasks")
|
| 373 |
+
|
| 374 |
+
if filters:
|
| 375 |
+
if "status" in filters:
|
| 376 |
+
if isinstance(filters["status"], list):
|
| 377 |
+
# Multiple statuses - filter in memory
|
| 378 |
+
pass
|
| 379 |
+
else:
|
| 380 |
+
query = query.where(
|
| 381 |
+
filter=FieldFilter("status", "==", filters["status"])
|
| 382 |
+
)
|
| 383 |
+
|
| 384 |
+
if "assigned_to" in filters:
|
| 385 |
+
query = query.where(
|
| 386 |
+
filter=FieldFilter("assigned_to", "==", filters["assigned_to"])
|
| 387 |
+
)
|
| 388 |
+
|
| 389 |
+
if "priority" in filters:
|
| 390 |
+
query = query.where(
|
| 391 |
+
filter=FieldFilter("priority", "==", filters["priority"])
|
| 392 |
+
)
|
| 393 |
+
|
| 394 |
+
# Order by creation time
|
| 395 |
+
query = query.order_by("created_at", direction=firestore.Query.DESCENDING)
|
| 396 |
+
|
| 397 |
+
docs = query.stream()
|
| 398 |
+
tasks = []
|
| 399 |
+
for doc in docs:
|
| 400 |
+
data = doc.to_dict()
|
| 401 |
+
data["id"] = doc.id
|
| 402 |
+
|
| 403 |
+
# Apply multi-status filter if needed
|
| 404 |
+
if (
|
| 405 |
+
filters
|
| 406 |
+
and "status" in filters
|
| 407 |
+
and isinstance(filters["status"], list)
|
| 408 |
+
):
|
| 409 |
+
if data.get("status") not in filters["status"]:
|
| 410 |
+
continue
|
| 411 |
+
|
| 412 |
+
tasks.append(data)
|
| 413 |
+
|
| 414 |
+
logger.info(f"Retrieved {len(tasks)} tasks")
|
| 415 |
+
return tasks
|
| 416 |
+
except Exception as e:
|
| 417 |
+
logger.error(f"Error getting tasks: {e}")
|
| 418 |
+
return []
|
| 419 |
+
|
| 420 |
+
async def update_task_status(
|
| 421 |
+
self, task_id: str, status: str, notes: Optional[str] = None
|
| 422 |
+
) -> bool:
|
| 423 |
+
"""Update task status"""
|
| 424 |
+
try:
|
| 425 |
+
update_data = {"status": status, "updated_at": firestore.SERVER_TIMESTAMP}
|
| 426 |
+
|
| 427 |
+
if status == "in_progress" and notes is None:
|
| 428 |
+
update_data["started_at"] = firestore.SERVER_TIMESTAMP
|
| 429 |
+
elif status == "completed":
|
| 430 |
+
update_data["completed_at"] = firestore.SERVER_TIMESTAMP
|
| 431 |
+
|
| 432 |
+
# Decrease staff workload
|
| 433 |
+
task = await self.get_task(task_id)
|
| 434 |
+
if task and "assigned_to" in task:
|
| 435 |
+
await self.update_staff_workload(task["assigned_to"], -1)
|
| 436 |
+
|
| 437 |
+
if notes:
|
| 438 |
+
update_data["notes"] = notes
|
| 439 |
+
|
| 440 |
+
self.db.collection("tasks").document(task_id).update(update_data)
|
| 441 |
+
logger.info(f"Updated task {task_id} status to {status}")
|
| 442 |
+
return True
|
| 443 |
+
except Exception as e:
|
| 444 |
+
logger.error(f"Error updating task status: {e}")
|
| 445 |
+
return False
|
| 446 |
+
|
| 447 |
+
# ==================== EVENT LOGGING ====================
|
| 448 |
+
|
| 449 |
+
async def log_event(self, event_data: Dict) -> Optional[str]:
|
| 450 |
+
"""Log an event to the event_logs collection"""
|
| 451 |
+
try:
|
| 452 |
+
event_data["timestamp"] = firestore.SERVER_TIMESTAMP
|
| 453 |
+
|
| 454 |
+
doc_ref = self.db.collection("event_logs").add(event_data)
|
| 455 |
+
event_id = doc_ref[1].id
|
| 456 |
+
|
| 457 |
+
logger.debug(f"Logged event with ID: {event_id}")
|
| 458 |
+
return event_id
|
| 459 |
+
except Exception as e:
|
| 460 |
+
logger.error(f"Error logging event: {e}")
|
| 461 |
+
return None
|
| 462 |
+
|
| 463 |
+
# ==================== ANALYTICS ====================
|
| 464 |
+
|
| 465 |
+
async def get_metrics(self) -> Dict[str, Any]:
|
| 466 |
+
"""Get current system metrics"""
|
| 467 |
+
try:
|
| 468 |
+
beds = await self.get_all_beds()
|
| 469 |
+
patients = await self.get_all_patients()
|
| 470 |
+
staff = await self.get_all_staff({"is_on_shift": True})
|
| 471 |
+
tasks = await self.get_tasks({"status": ["pending", "in_progress"]})
|
| 472 |
+
|
| 473 |
+
total_beds = len(beds)
|
| 474 |
+
available_beds = len([b for b in beds if b["status"] == "ready"])
|
| 475 |
+
occupied_beds = len([b for b in beds if b["status"] == "occupied"])
|
| 476 |
+
cleaning_beds = len([b for b in beds if b["status"] == "cleaning"])
|
| 477 |
+
|
| 478 |
+
metrics = {
|
| 479 |
+
"beds": {
|
| 480 |
+
"total": total_beds,
|
| 481 |
+
"available": available_beds,
|
| 482 |
+
"occupied": occupied_beds,
|
| 483 |
+
"cleaning": cleaning_beds,
|
| 484 |
+
"utilization_rate": (occupied_beds / total_beds * 100)
|
| 485 |
+
if total_beds > 0
|
| 486 |
+
else 0,
|
| 487 |
+
},
|
| 488 |
+
"patients": {
|
| 489 |
+
"total": len(patients),
|
| 490 |
+
"waiting": len(
|
| 491 |
+
[p for p in patients if p.get("status") == "waiting"]
|
| 492 |
+
),
|
| 493 |
+
"admitted": len(
|
| 494 |
+
[p for p in patients if p.get("status") == "admitted"]
|
| 495 |
+
),
|
| 496 |
+
},
|
| 497 |
+
"staff": {
|
| 498 |
+
"on_shift": len(staff),
|
| 499 |
+
"nurses": len([s for s in staff if s["role"] == "nurse"]),
|
| 500 |
+
"cleaners": len([s for s in staff if s["role"] == "cleaner"]),
|
| 501 |
+
},
|
| 502 |
+
"tasks": {
|
| 503 |
+
"active": len(tasks),
|
| 504 |
+
"pending": len([t for t in tasks if t["status"] == "pending"]),
|
| 505 |
+
"in_progress": len(
|
| 506 |
+
[t for t in tasks if t["status"] == "in_progress"]
|
| 507 |
+
),
|
| 508 |
+
},
|
| 509 |
+
"timestamp": datetime.now().isoformat(),
|
| 510 |
+
}
|
| 511 |
+
|
| 512 |
+
return metrics
|
| 513 |
+
except Exception as e:
|
| 514 |
+
logger.error(f"Error getting metrics: {e}")
|
| 515 |
+
return {}
|
services/gemini_service.py
ADDED
|
@@ -0,0 +1,401 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Gemini Service for CareFlow Nexus
|
| 3 |
+
Handles all Gemini AI API interactions using gemini-2.0-flash-exp model
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import json
|
| 7 |
+
import logging
|
| 8 |
+
import re
|
| 9 |
+
from typing import Any, Dict, List, Optional
|
| 10 |
+
|
| 11 |
+
import google.generativeai as genai
|
| 12 |
+
from tenacity import retry, stop_after_attempt, wait_exponential
|
| 13 |
+
|
| 14 |
+
logger = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class GeminiService:
|
| 18 |
+
"""Service class for Gemini AI API operations"""
|
| 19 |
+
|
| 20 |
+
def __init__(self, api_key: str, model_name: str = "gemini-2.0-flash-exp"):
|
| 21 |
+
"""
|
| 22 |
+
Initialize Gemini service
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
api_key: Google API key for Gemini
|
| 26 |
+
model_name: Model name (default: gemini-2.0-flash-exp)
|
| 27 |
+
"""
|
| 28 |
+
try:
|
| 29 |
+
genai.configure(api_key=api_key)
|
| 30 |
+
self.model_name = model_name
|
| 31 |
+
self.model = genai.GenerativeModel(model_name)
|
| 32 |
+
logger.info(f"Gemini service initialized with model: {model_name}")
|
| 33 |
+
except Exception as e:
|
| 34 |
+
logger.error(f"Failed to initialize Gemini service: {e}")
|
| 35 |
+
raise
|
| 36 |
+
|
| 37 |
+
@retry(
|
| 38 |
+
stop=stop_after_attempt(3),
|
| 39 |
+
wait=wait_exponential(multiplier=1, min=2, max=10),
|
| 40 |
+
reraise=True,
|
| 41 |
+
)
|
| 42 |
+
async def generate_response(
|
| 43 |
+
self, prompt: str, temperature: float = 0.7, max_tokens: int = 2048
|
| 44 |
+
) -> str:
|
| 45 |
+
"""
|
| 46 |
+
Generate a text response from Gemini
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
prompt: Input prompt
|
| 50 |
+
temperature: Sampling temperature (0.0-1.0)
|
| 51 |
+
max_tokens: Maximum tokens in response
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
Generated text response
|
| 55 |
+
"""
|
| 56 |
+
try:
|
| 57 |
+
generation_config = genai.GenerationConfig(
|
| 58 |
+
temperature=temperature,
|
| 59 |
+
max_output_tokens=max_tokens,
|
| 60 |
+
candidate_count=1,
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
response = self.model.generate_content(
|
| 64 |
+
prompt, generation_config=generation_config
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
if response.text:
|
| 68 |
+
logger.debug(f"Generated response: {response.text[:100]}...")
|
| 69 |
+
return response.text
|
| 70 |
+
else:
|
| 71 |
+
logger.warning("Empty response from Gemini")
|
| 72 |
+
return ""
|
| 73 |
+
|
| 74 |
+
except Exception as e:
|
| 75 |
+
logger.error(f"Error generating response: {e}")
|
| 76 |
+
raise
|
| 77 |
+
|
| 78 |
+
@retry(
|
| 79 |
+
stop=stop_after_attempt(3),
|
| 80 |
+
wait=wait_exponential(multiplier=1, min=2, max=10),
|
| 81 |
+
reraise=True,
|
| 82 |
+
)
|
| 83 |
+
async def generate_json_response(
|
| 84 |
+
self, prompt: str, temperature: float = 0.5, max_tokens: int = 2048
|
| 85 |
+
) -> Dict[str, Any]:
|
| 86 |
+
"""
|
| 87 |
+
Generate a JSON response from Gemini
|
| 88 |
+
|
| 89 |
+
Args:
|
| 90 |
+
prompt: Input prompt (should instruct to return JSON)
|
| 91 |
+
temperature: Sampling temperature
|
| 92 |
+
max_tokens: Maximum tokens in response
|
| 93 |
+
|
| 94 |
+
Returns:
|
| 95 |
+
Parsed JSON response as dictionary
|
| 96 |
+
"""
|
| 97 |
+
try:
|
| 98 |
+
# Add JSON instruction to prompt if not present
|
| 99 |
+
if "json" not in prompt.lower():
|
| 100 |
+
prompt = f"{prompt}\n\nRespond ONLY with valid JSON."
|
| 101 |
+
|
| 102 |
+
text_response = await self.generate_response(
|
| 103 |
+
prompt, temperature=temperature, max_tokens=max_tokens
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
# Extract JSON from response
|
| 107 |
+
json_data = self._extract_json(text_response)
|
| 108 |
+
|
| 109 |
+
if json_data:
|
| 110 |
+
logger.debug("Successfully extracted JSON from response")
|
| 111 |
+
return json_data
|
| 112 |
+
else:
|
| 113 |
+
logger.warning("Failed to extract JSON, returning empty dict")
|
| 114 |
+
return {}
|
| 115 |
+
|
| 116 |
+
except Exception as e:
|
| 117 |
+
logger.error(f"Error generating JSON response: {e}")
|
| 118 |
+
return {}
|
| 119 |
+
|
| 120 |
+
def _extract_json(self, text: str) -> Optional[Dict[str, Any]]:
|
| 121 |
+
"""
|
| 122 |
+
Extract JSON from text response
|
| 123 |
+
|
| 124 |
+
Args:
|
| 125 |
+
text: Text containing JSON
|
| 126 |
+
|
| 127 |
+
Returns:
|
| 128 |
+
Parsed JSON dictionary or None
|
| 129 |
+
"""
|
| 130 |
+
try:
|
| 131 |
+
# Try direct JSON parse first
|
| 132 |
+
return json.loads(text)
|
| 133 |
+
except json.JSONDecodeError:
|
| 134 |
+
pass
|
| 135 |
+
|
| 136 |
+
# Try to find JSON in markdown code blocks
|
| 137 |
+
json_pattern = r"```(?:json)?\s*(\{.*?\})\s*```"
|
| 138 |
+
matches = re.findall(json_pattern, text, re.DOTALL)
|
| 139 |
+
|
| 140 |
+
if matches:
|
| 141 |
+
try:
|
| 142 |
+
return json.loads(matches[0])
|
| 143 |
+
except json.JSONDecodeError:
|
| 144 |
+
pass
|
| 145 |
+
|
| 146 |
+
# Try to find any JSON object in the text
|
| 147 |
+
json_pattern = r"\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\}"
|
| 148 |
+
matches = re.findall(json_pattern, text, re.DOTALL)
|
| 149 |
+
|
| 150 |
+
for match in matches:
|
| 151 |
+
try:
|
| 152 |
+
return json.loads(match)
|
| 153 |
+
except json.JSONDecodeError:
|
| 154 |
+
continue
|
| 155 |
+
|
| 156 |
+
logger.warning("Could not extract valid JSON from response")
|
| 157 |
+
return None
|
| 158 |
+
|
| 159 |
+
async def generate_structured(
|
| 160 |
+
self,
|
| 161 |
+
system_instruction: str,
|
| 162 |
+
user_prompt: str,
|
| 163 |
+
temperature: float = 0.5,
|
| 164 |
+
max_tokens: int = 2048,
|
| 165 |
+
) -> Dict[str, Any]:
|
| 166 |
+
"""
|
| 167 |
+
Generate structured response with system instruction
|
| 168 |
+
|
| 169 |
+
Args:
|
| 170 |
+
system_instruction: System-level instruction/context
|
| 171 |
+
user_prompt: User query/request
|
| 172 |
+
temperature: Sampling temperature
|
| 173 |
+
max_tokens: Maximum tokens
|
| 174 |
+
|
| 175 |
+
Returns:
|
| 176 |
+
Parsed JSON response
|
| 177 |
+
"""
|
| 178 |
+
try:
|
| 179 |
+
# Create model with system instruction
|
| 180 |
+
model_with_system = genai.GenerativeModel(
|
| 181 |
+
self.model_name, system_instruction=system_instruction
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
generation_config = genai.GenerationConfig(
|
| 185 |
+
temperature=temperature,
|
| 186 |
+
max_output_tokens=max_tokens,
|
| 187 |
+
candidate_count=1,
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
response = model_with_system.generate_content(
|
| 191 |
+
user_prompt, generation_config=generation_config
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
if response.text:
|
| 195 |
+
return self._extract_json(response.text) or {}
|
| 196 |
+
return {}
|
| 197 |
+
|
| 198 |
+
except Exception as e:
|
| 199 |
+
logger.error(f"Error generating structured response: {e}")
|
| 200 |
+
return {}
|
| 201 |
+
|
| 202 |
+
async def analyze_text(
|
| 203 |
+
self, text: str, analysis_type: str, context: Optional[Dict] = None
|
| 204 |
+
) -> str:
|
| 205 |
+
"""
|
| 206 |
+
Analyze text with specific analysis type
|
| 207 |
+
|
| 208 |
+
Args:
|
| 209 |
+
text: Text to analyze
|
| 210 |
+
analysis_type: Type of analysis (e.g., "diagnosis", "requirements", "sentiment")
|
| 211 |
+
context: Optional additional context
|
| 212 |
+
|
| 213 |
+
Returns:
|
| 214 |
+
Analysis result as text
|
| 215 |
+
"""
|
| 216 |
+
context_str = ""
|
| 217 |
+
if context:
|
| 218 |
+
context_str = f"\n\nContext: {json.dumps(context, indent=2)}"
|
| 219 |
+
|
| 220 |
+
prompt = f"""
|
| 221 |
+
Analyze the following text for {analysis_type}:
|
| 222 |
+
|
| 223 |
+
{text}
|
| 224 |
+
{context_str}
|
| 225 |
+
|
| 226 |
+
Provide a clear, concise analysis.
|
| 227 |
+
"""
|
| 228 |
+
|
| 229 |
+
return await self.generate_response(prompt, temperature=0.3)
|
| 230 |
+
|
| 231 |
+
async def score_and_rank(
|
| 232 |
+
self, items: List[Dict], criteria: str, context: Dict
|
| 233 |
+
) -> Dict[str, Any]:
|
| 234 |
+
"""
|
| 235 |
+
Score and rank items based on criteria
|
| 236 |
+
|
| 237 |
+
Args:
|
| 238 |
+
items: List of items to rank
|
| 239 |
+
criteria: Ranking criteria description
|
| 240 |
+
context: Context information
|
| 241 |
+
|
| 242 |
+
Returns:
|
| 243 |
+
Dictionary with ranked items and scores
|
| 244 |
+
"""
|
| 245 |
+
prompt = f"""
|
| 246 |
+
You are an expert ranking system.
|
| 247 |
+
|
| 248 |
+
CRITERIA: {criteria}
|
| 249 |
+
|
| 250 |
+
CONTEXT:
|
| 251 |
+
{json.dumps(context, indent=2)}
|
| 252 |
+
|
| 253 |
+
ITEMS TO RANK:
|
| 254 |
+
{json.dumps(items, indent=2)}
|
| 255 |
+
|
| 256 |
+
Score each item from 0-100 and rank them. Provide reasoning for each score.
|
| 257 |
+
|
| 258 |
+
Respond with JSON in this format:
|
| 259 |
+
{{
|
| 260 |
+
"rankings": [
|
| 261 |
+
{{
|
| 262 |
+
"item_id": "id",
|
| 263 |
+
"score": 0-100,
|
| 264 |
+
"reasoning": "detailed explanation",
|
| 265 |
+
"pros": ["advantage 1", "advantage 2"],
|
| 266 |
+
"cons": ["concern 1", "concern 2"]
|
| 267 |
+
}}
|
| 268 |
+
],
|
| 269 |
+
"confidence": 0-100,
|
| 270 |
+
"overall_recommendation": "summary"
|
| 271 |
+
}}
|
| 272 |
+
"""
|
| 273 |
+
|
| 274 |
+
return await self.generate_json_response(prompt, temperature=0.5)
|
| 275 |
+
|
| 276 |
+
async def extract_requirements(
|
| 277 |
+
self, diagnosis: str, patient_info: Dict
|
| 278 |
+
) -> Dict[str, Any]:
|
| 279 |
+
"""
|
| 280 |
+
Extract medical requirements from diagnosis
|
| 281 |
+
|
| 282 |
+
Args:
|
| 283 |
+
diagnosis: Patient diagnosis text
|
| 284 |
+
patient_info: Patient information dictionary
|
| 285 |
+
|
| 286 |
+
Returns:
|
| 287 |
+
Dictionary of extracted requirements
|
| 288 |
+
"""
|
| 289 |
+
prompt = f"""
|
| 290 |
+
You are a medical requirements analyzer.
|
| 291 |
+
|
| 292 |
+
PATIENT INFORMATION:
|
| 293 |
+
- Age: {patient_info.get("age", "Unknown")}
|
| 294 |
+
- Gender: {patient_info.get("gender", "Unknown")}
|
| 295 |
+
- Diagnosis: {diagnosis}
|
| 296 |
+
- Severity: {patient_info.get("severity", "moderate")}
|
| 297 |
+
- Mobility: {patient_info.get("mobility_status", "ambulatory")}
|
| 298 |
+
|
| 299 |
+
Extract the medical care requirements and respond with JSON:
|
| 300 |
+
{{
|
| 301 |
+
"needs_oxygen": true/false,
|
| 302 |
+
"needs_ventilator": true/false,
|
| 303 |
+
"needs_cardiac_monitor": true/false,
|
| 304 |
+
"needs_isolation": true/false,
|
| 305 |
+
"preferred_ward": "ward name or null",
|
| 306 |
+
"proximity_preference": 1-10,
|
| 307 |
+
"special_considerations": ["list of special needs"],
|
| 308 |
+
"reasoning": "brief explanation"
|
| 309 |
+
}}
|
| 310 |
+
"""
|
| 311 |
+
|
| 312 |
+
return await self.generate_json_response(prompt, temperature=0.3)
|
| 313 |
+
|
| 314 |
+
async def generate_task_assignment_reasoning(
|
| 315 |
+
self, task: Dict, staff: Dict, context: Dict
|
| 316 |
+
) -> str:
|
| 317 |
+
"""
|
| 318 |
+
Generate reasoning for task assignment
|
| 319 |
+
|
| 320 |
+
Args:
|
| 321 |
+
task: Task details
|
| 322 |
+
staff: Staff member details
|
| 323 |
+
context: Additional context
|
| 324 |
+
|
| 325 |
+
Returns:
|
| 326 |
+
Reasoning text
|
| 327 |
+
"""
|
| 328 |
+
prompt = f"""
|
| 329 |
+
Explain why {staff.get("name")} is the best choice for this task:
|
| 330 |
+
|
| 331 |
+
TASK:
|
| 332 |
+
{json.dumps(task, indent=2)}
|
| 333 |
+
|
| 334 |
+
STAFF MEMBER:
|
| 335 |
+
{json.dumps(staff, indent=2)}
|
| 336 |
+
|
| 337 |
+
CONTEXT:
|
| 338 |
+
{json.dumps(context, indent=2)}
|
| 339 |
+
|
| 340 |
+
Provide a clear, concise explanation in 2-3 sentences.
|
| 341 |
+
"""
|
| 342 |
+
|
| 343 |
+
return await self.generate_response(prompt, temperature=0.5)
|
| 344 |
+
|
| 345 |
+
async def detect_bottlenecks(self, system_state: Dict) -> Dict[str, Any]:
|
| 346 |
+
"""
|
| 347 |
+
Analyze system state for bottlenecks
|
| 348 |
+
|
| 349 |
+
Args:
|
| 350 |
+
system_state: Current system state metrics
|
| 351 |
+
|
| 352 |
+
Returns:
|
| 353 |
+
Bottleneck analysis
|
| 354 |
+
"""
|
| 355 |
+
prompt = f"""
|
| 356 |
+
You are a hospital operations analyst.
|
| 357 |
+
|
| 358 |
+
CURRENT SYSTEM STATE:
|
| 359 |
+
{json.dumps(system_state, indent=2)}
|
| 360 |
+
|
| 361 |
+
Analyze for bottlenecks, inefficiencies, and potential issues.
|
| 362 |
+
|
| 363 |
+
Respond with JSON:
|
| 364 |
+
{{
|
| 365 |
+
"bottlenecks": [
|
| 366 |
+
{{
|
| 367 |
+
"type": "bottleneck type",
|
| 368 |
+
"severity": "low/medium/high/critical",
|
| 369 |
+
"description": "what's wrong",
|
| 370 |
+
"impact": "how it affects operations",
|
| 371 |
+
"recommendation": "suggested action"
|
| 372 |
+
}}
|
| 373 |
+
],
|
| 374 |
+
"alerts": ["urgent issues requiring immediate attention"],
|
| 375 |
+
"recommendations": ["proactive suggestions"],
|
| 376 |
+
"capacity_forecast": "prediction for next 4-6 hours"
|
| 377 |
+
}}
|
| 378 |
+
"""
|
| 379 |
+
|
| 380 |
+
return await self.generate_json_response(prompt, temperature=0.3)
|
| 381 |
+
|
| 382 |
+
def validate_json_schema(self, data: Dict, required_keys: List[str]) -> bool:
|
| 383 |
+
"""
|
| 384 |
+
Validate JSON data has required keys
|
| 385 |
+
|
| 386 |
+
Args:
|
| 387 |
+
data: JSON data to validate
|
| 388 |
+
required_keys: List of required keys
|
| 389 |
+
|
| 390 |
+
Returns:
|
| 391 |
+
True if valid, False otherwise
|
| 392 |
+
"""
|
| 393 |
+
return all(key in data for key in required_keys)
|
| 394 |
+
|
| 395 |
+
async def get_model_info(self) -> Dict[str, Any]:
|
| 396 |
+
"""Get information about the current model"""
|
| 397 |
+
return {
|
| 398 |
+
"model_name": self.model_name,
|
| 399 |
+
"provider": "Google Gemini",
|
| 400 |
+
"version": "2.0-flash-exp",
|
| 401 |
+
}
|
utils/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Utils module for CareFlow Nexus AI Agents
|
| 3 |
+
Provides utility functions for response parsing and validation
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from .response_parser import ResponseParser
|
| 7 |
+
|
| 8 |
+
__all__ = ["ResponseParser"]
|
utils/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (386 Bytes). View file
|
|
|
utils/__pycache__/response_parser.cpython-312.pyc
ADDED
|
Binary file (13.9 kB). View file
|
|
|
utils/response_parser.py
ADDED
|
@@ -0,0 +1,375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Response Parser Utility for CareFlow Nexus
|
| 3 |
+
Handles parsing and validation of Gemini AI responses
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import json
|
| 7 |
+
import logging
|
| 8 |
+
import re
|
| 9 |
+
from typing import Any, Dict, List, Optional
|
| 10 |
+
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class ResponseParser:
|
| 15 |
+
"""Utility class for parsing and validating AI responses"""
|
| 16 |
+
|
| 17 |
+
@staticmethod
|
| 18 |
+
def extract_json(text: str) -> Optional[Dict[str, Any]]:
|
| 19 |
+
"""
|
| 20 |
+
Extract JSON from text response (handles various formats)
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
text: Text containing JSON
|
| 24 |
+
|
| 25 |
+
Returns:
|
| 26 |
+
Parsed JSON dictionary or None
|
| 27 |
+
"""
|
| 28 |
+
if not text:
|
| 29 |
+
return None
|
| 30 |
+
|
| 31 |
+
# Try direct JSON parse first
|
| 32 |
+
try:
|
| 33 |
+
return json.loads(text.strip())
|
| 34 |
+
except json.JSONDecodeError:
|
| 35 |
+
pass
|
| 36 |
+
|
| 37 |
+
# Try to find JSON in markdown code blocks
|
| 38 |
+
patterns = [
|
| 39 |
+
r"```json\s*(\{.*?\})\s*```", # ```json {...} ```
|
| 40 |
+
r"```\s*(\{.*?\})\s*```", # ``` {...} ```
|
| 41 |
+
r"```json\s*(\[.*?\])\s*```", # ```json [...] ```
|
| 42 |
+
r"```\s*(\[.*?\])\s*```", # ``` [...] ```
|
| 43 |
+
]
|
| 44 |
+
|
| 45 |
+
for pattern in patterns:
|
| 46 |
+
matches = re.findall(pattern, text, re.DOTALL)
|
| 47 |
+
if matches:
|
| 48 |
+
try:
|
| 49 |
+
return json.loads(matches[0])
|
| 50 |
+
except json.JSONDecodeError:
|
| 51 |
+
continue
|
| 52 |
+
|
| 53 |
+
# Try to find any JSON object or array in the text
|
| 54 |
+
json_object_pattern = r"\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\}"
|
| 55 |
+
json_array_pattern = r"\[[^\[\]]*(?:\[[^\[\]]*\][^\[\]]*)*\]"
|
| 56 |
+
|
| 57 |
+
for pattern in [json_object_pattern, json_array_pattern]:
|
| 58 |
+
matches = re.findall(pattern, text, re.DOTALL)
|
| 59 |
+
for match in matches:
|
| 60 |
+
try:
|
| 61 |
+
parsed = json.loads(match)
|
| 62 |
+
# Verify it's a meaningful JSON (not just empty)
|
| 63 |
+
if parsed:
|
| 64 |
+
return parsed
|
| 65 |
+
except json.JSONDecodeError:
|
| 66 |
+
continue
|
| 67 |
+
|
| 68 |
+
logger.warning("Could not extract valid JSON from response")
|
| 69 |
+
return None
|
| 70 |
+
|
| 71 |
+
@staticmethod
|
| 72 |
+
def validate_required_fields(
|
| 73 |
+
data: Dict[str, Any], required_fields: List[str]
|
| 74 |
+
) -> tuple[bool, List[str]]:
|
| 75 |
+
"""
|
| 76 |
+
Validate that dictionary contains required fields
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
data: Dictionary to validate
|
| 80 |
+
required_fields: List of required field names
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
Tuple of (is_valid, missing_fields)
|
| 84 |
+
"""
|
| 85 |
+
if not isinstance(data, dict):
|
| 86 |
+
return False, required_fields
|
| 87 |
+
|
| 88 |
+
missing = [field for field in required_fields if field not in data]
|
| 89 |
+
return len(missing) == 0, missing
|
| 90 |
+
|
| 91 |
+
@staticmethod
|
| 92 |
+
def sanitize_response(data: Dict[str, Any]) -> Dict[str, Any]:
|
| 93 |
+
"""
|
| 94 |
+
Clean and normalize response data
|
| 95 |
+
|
| 96 |
+
Args:
|
| 97 |
+
data: Raw response data
|
| 98 |
+
|
| 99 |
+
Returns:
|
| 100 |
+
Sanitized dictionary
|
| 101 |
+
"""
|
| 102 |
+
if not isinstance(data, dict):
|
| 103 |
+
return {}
|
| 104 |
+
|
| 105 |
+
sanitized = {}
|
| 106 |
+
for key, value in data.items():
|
| 107 |
+
# Clean key (remove special chars, lowercase)
|
| 108 |
+
clean_key = key.strip().lower().replace(" ", "_")
|
| 109 |
+
|
| 110 |
+
# Clean value based on type
|
| 111 |
+
if isinstance(value, str):
|
| 112 |
+
sanitized[clean_key] = value.strip()
|
| 113 |
+
elif isinstance(value, dict):
|
| 114 |
+
sanitized[clean_key] = ResponseParser.sanitize_response(value)
|
| 115 |
+
elif isinstance(value, list):
|
| 116 |
+
sanitized[clean_key] = [
|
| 117 |
+
ResponseParser.sanitize_response(item)
|
| 118 |
+
if isinstance(item, dict)
|
| 119 |
+
else item
|
| 120 |
+
for item in value
|
| 121 |
+
]
|
| 122 |
+
else:
|
| 123 |
+
sanitized[clean_key] = value
|
| 124 |
+
|
| 125 |
+
return sanitized
|
| 126 |
+
|
| 127 |
+
@staticmethod
|
| 128 |
+
def validate_score(score: Any, min_val: int = 0, max_val: int = 100) -> int:
|
| 129 |
+
"""
|
| 130 |
+
Validate and normalize score to range
|
| 131 |
+
|
| 132 |
+
Args:
|
| 133 |
+
score: Score value (any type)
|
| 134 |
+
min_val: Minimum valid score
|
| 135 |
+
max_val: Maximum valid score
|
| 136 |
+
|
| 137 |
+
Returns:
|
| 138 |
+
Validated score within range
|
| 139 |
+
"""
|
| 140 |
+
try:
|
| 141 |
+
score_int = int(float(score))
|
| 142 |
+
return max(min_val, min(max_val, score_int))
|
| 143 |
+
except (ValueError, TypeError):
|
| 144 |
+
logger.warning(f"Invalid score value: {score}, returning 0")
|
| 145 |
+
return 0
|
| 146 |
+
|
| 147 |
+
@staticmethod
|
| 148 |
+
def parse_bed_allocation_response(response: Dict[str, Any]) -> Dict[str, Any]:
|
| 149 |
+
"""
|
| 150 |
+
Parse and validate bed allocation response
|
| 151 |
+
|
| 152 |
+
Args:
|
| 153 |
+
response: Raw response from AI
|
| 154 |
+
|
| 155 |
+
Returns:
|
| 156 |
+
Validated and structured response
|
| 157 |
+
"""
|
| 158 |
+
try:
|
| 159 |
+
recommendations = response.get("recommendations", [])
|
| 160 |
+
if not isinstance(recommendations, list):
|
| 161 |
+
recommendations = []
|
| 162 |
+
|
| 163 |
+
parsed_recs = []
|
| 164 |
+
for rec in recommendations[:3]: # Top 3 only
|
| 165 |
+
if not isinstance(rec, dict):
|
| 166 |
+
continue
|
| 167 |
+
|
| 168 |
+
parsed_rec = {
|
| 169 |
+
"bed_id": rec.get("bed_id", ""),
|
| 170 |
+
"bed_number": rec.get("bed_number", ""),
|
| 171 |
+
"ward": rec.get("ward", ""),
|
| 172 |
+
"score": ResponseParser.validate_score(rec.get("score", 0)),
|
| 173 |
+
"reasoning": rec.get("reasoning", "No reasoning provided"),
|
| 174 |
+
"pros": rec.get("pros", [])
|
| 175 |
+
if isinstance(rec.get("pros"), list)
|
| 176 |
+
else [],
|
| 177 |
+
"cons": rec.get("cons", [])
|
| 178 |
+
if isinstance(rec.get("cons"), list)
|
| 179 |
+
else [],
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
parsed_recs.append(parsed_rec)
|
| 183 |
+
|
| 184 |
+
return {
|
| 185 |
+
"recommendations": parsed_recs,
|
| 186 |
+
"overall_confidence": ResponseParser.validate_score(
|
| 187 |
+
response.get("overall_confidence", 50)
|
| 188 |
+
),
|
| 189 |
+
"considerations": response.get("considerations", ""),
|
| 190 |
+
}
|
| 191 |
+
except Exception as e:
|
| 192 |
+
logger.error(f"Error parsing bed allocation response: {e}")
|
| 193 |
+
return {
|
| 194 |
+
"recommendations": [],
|
| 195 |
+
"overall_confidence": 0,
|
| 196 |
+
"considerations": "",
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
@staticmethod
|
| 200 |
+
def parse_requirement_extraction_response(
|
| 201 |
+
response: Dict[str, Any],
|
| 202 |
+
) -> Dict[str, Any]:
|
| 203 |
+
"""
|
| 204 |
+
Parse and validate requirement extraction response
|
| 205 |
+
|
| 206 |
+
Args:
|
| 207 |
+
response: Raw response from AI
|
| 208 |
+
|
| 209 |
+
Returns:
|
| 210 |
+
Validated requirements dictionary
|
| 211 |
+
"""
|
| 212 |
+
try:
|
| 213 |
+
return {
|
| 214 |
+
"needs_oxygen": bool(response.get("needs_oxygen", False)),
|
| 215 |
+
"needs_ventilator": bool(response.get("needs_ventilator", False)),
|
| 216 |
+
"needs_cardiac_monitor": bool(
|
| 217 |
+
response.get("needs_cardiac_monitor", False)
|
| 218 |
+
),
|
| 219 |
+
"needs_isolation": bool(response.get("needs_isolation", False)),
|
| 220 |
+
"preferred_ward": response.get("preferred_ward"),
|
| 221 |
+
"proximity_preference": ResponseParser.validate_score(
|
| 222 |
+
response.get("proximity_preference", 5), 1, 10
|
| 223 |
+
),
|
| 224 |
+
"special_considerations": response.get("special_considerations", [])
|
| 225 |
+
if isinstance(response.get("special_considerations"), list)
|
| 226 |
+
else [],
|
| 227 |
+
"confidence": ResponseParser.validate_score(
|
| 228 |
+
response.get("confidence", 50)
|
| 229 |
+
),
|
| 230 |
+
"reasoning": response.get("reasoning", ""),
|
| 231 |
+
}
|
| 232 |
+
except Exception as e:
|
| 233 |
+
logger.error(f"Error parsing requirement extraction response: {e}")
|
| 234 |
+
return {
|
| 235 |
+
"needs_oxygen": False,
|
| 236 |
+
"needs_ventilator": False,
|
| 237 |
+
"needs_cardiac_monitor": False,
|
| 238 |
+
"needs_isolation": False,
|
| 239 |
+
"preferred_ward": None,
|
| 240 |
+
"proximity_preference": 5,
|
| 241 |
+
"special_considerations": [],
|
| 242 |
+
"confidence": 0,
|
| 243 |
+
"reasoning": "Error parsing response",
|
| 244 |
+
}
|
| 245 |
+
|
| 246 |
+
@staticmethod
|
| 247 |
+
def parse_staff_assignment_response(response: Dict[str, Any]) -> Dict[str, Any]:
|
| 248 |
+
"""
|
| 249 |
+
Parse and validate staff assignment response
|
| 250 |
+
|
| 251 |
+
Args:
|
| 252 |
+
response: Raw response from AI
|
| 253 |
+
|
| 254 |
+
Returns:
|
| 255 |
+
Validated assignment dictionary
|
| 256 |
+
"""
|
| 257 |
+
try:
|
| 258 |
+
alternatives = response.get("alternatives", [])
|
| 259 |
+
if not isinstance(alternatives, list):
|
| 260 |
+
alternatives = []
|
| 261 |
+
|
| 262 |
+
return {
|
| 263 |
+
"recommended_staff_id": response.get("recommended_staff_id", ""),
|
| 264 |
+
"staff_name": response.get("staff_name", ""),
|
| 265 |
+
"reasoning": response.get("reasoning", "No reasoning provided"),
|
| 266 |
+
"workload_impact": response.get("workload_impact", ""),
|
| 267 |
+
"concerns": response.get("concerns", [])
|
| 268 |
+
if isinstance(response.get("concerns"), list)
|
| 269 |
+
else [],
|
| 270 |
+
"alternatives": alternatives[:2], # Top 2 alternatives
|
| 271 |
+
"confidence": ResponseParser.validate_score(
|
| 272 |
+
response.get("confidence", 50)
|
| 273 |
+
),
|
| 274 |
+
}
|
| 275 |
+
except Exception as e:
|
| 276 |
+
logger.error(f"Error parsing staff assignment response: {e}")
|
| 277 |
+
return {
|
| 278 |
+
"recommended_staff_id": "",
|
| 279 |
+
"staff_name": "",
|
| 280 |
+
"reasoning": "Error parsing response",
|
| 281 |
+
"workload_impact": "",
|
| 282 |
+
"concerns": [],
|
| 283 |
+
"alternatives": [],
|
| 284 |
+
"confidence": 0,
|
| 285 |
+
}
|
| 286 |
+
|
| 287 |
+
@staticmethod
|
| 288 |
+
def parse_state_analysis_response(response: Dict[str, Any]) -> Dict[str, Any]:
|
| 289 |
+
"""
|
| 290 |
+
Parse and validate state analysis response
|
| 291 |
+
|
| 292 |
+
Args:
|
| 293 |
+
response: Raw response from AI
|
| 294 |
+
|
| 295 |
+
Returns:
|
| 296 |
+
Validated analysis dictionary
|
| 297 |
+
"""
|
| 298 |
+
try:
|
| 299 |
+
return {
|
| 300 |
+
"critical_alerts": response.get("critical_alerts", [])
|
| 301 |
+
if isinstance(response.get("critical_alerts"), list)
|
| 302 |
+
else [],
|
| 303 |
+
"bottlenecks": response.get("bottlenecks", [])
|
| 304 |
+
if isinstance(response.get("bottlenecks"), list)
|
| 305 |
+
else [],
|
| 306 |
+
"capacity_forecast": response.get("capacity_forecast", {})
|
| 307 |
+
if isinstance(response.get("capacity_forecast"), dict)
|
| 308 |
+
else {},
|
| 309 |
+
"recommendations": response.get("recommendations", [])
|
| 310 |
+
if isinstance(response.get("recommendations"), list)
|
| 311 |
+
else [],
|
| 312 |
+
}
|
| 313 |
+
except Exception as e:
|
| 314 |
+
logger.error(f"Error parsing state analysis response: {e}")
|
| 315 |
+
return {
|
| 316 |
+
"critical_alerts": [],
|
| 317 |
+
"bottlenecks": [],
|
| 318 |
+
"capacity_forecast": {},
|
| 319 |
+
"recommendations": [],
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
@staticmethod
|
| 323 |
+
def combine_scores(
|
| 324 |
+
rule_score: float, ai_score: float, rule_weight: float = 0.5
|
| 325 |
+
) -> float:
|
| 326 |
+
"""
|
| 327 |
+
Combine rule-based and AI scores with weights
|
| 328 |
+
|
| 329 |
+
Args:
|
| 330 |
+
rule_score: Rule-based score (0-100)
|
| 331 |
+
ai_score: AI-generated score (0-100)
|
| 332 |
+
rule_weight: Weight for rule score (0-1), AI gets (1-rule_weight)
|
| 333 |
+
|
| 334 |
+
Returns:
|
| 335 |
+
Combined score
|
| 336 |
+
"""
|
| 337 |
+
ai_weight = 1.0 - rule_weight
|
| 338 |
+
combined = (rule_score * rule_weight) + (ai_score * ai_weight)
|
| 339 |
+
return round(combined, 2)
|
| 340 |
+
|
| 341 |
+
@staticmethod
|
| 342 |
+
def format_error_response(
|
| 343 |
+
error_message: str, error_type: str = "general"
|
| 344 |
+
) -> Dict[str, Any]:
|
| 345 |
+
"""
|
| 346 |
+
Format error into standard response structure
|
| 347 |
+
|
| 348 |
+
Args:
|
| 349 |
+
error_message: Error message
|
| 350 |
+
error_type: Type of error
|
| 351 |
+
|
| 352 |
+
Returns:
|
| 353 |
+
Error response dictionary
|
| 354 |
+
"""
|
| 355 |
+
return {
|
| 356 |
+
"success": False,
|
| 357 |
+
"error": True,
|
| 358 |
+
"error_type": error_type,
|
| 359 |
+
"message": error_message,
|
| 360 |
+
"data": None,
|
| 361 |
+
}
|
| 362 |
+
|
| 363 |
+
@staticmethod
|
| 364 |
+
def format_success_response(data: Any, message: str = "Success") -> Dict[str, Any]:
|
| 365 |
+
"""
|
| 366 |
+
Format success response
|
| 367 |
+
|
| 368 |
+
Args:
|
| 369 |
+
data: Response data
|
| 370 |
+
message: Success message
|
| 371 |
+
|
| 372 |
+
Returns:
|
| 373 |
+
Success response dictionary
|
| 374 |
+
"""
|
| 375 |
+
return {"success": True, "error": False, "message": message, "data": data}
|