Spaces:
Runtime error
Runtime error
clinical-mind
#1
by arjitmat - opened
This view is limited to 50 files because it contains too many changes. See the raw diff here.
- .dockerignore +0 -44
- .env.example +0 -8
- .gitattributes +35 -0
- .github/workflows/sync-to-hf.yml +0 -21
- .gitignore +0 -38
- AUDIT_REPORT.md +0 -282
- Dockerfile +0 -37
- OPTIMIZATION_REPORT.md +0 -222
- README.md +3 -140
- README_HF.md +0 -42
- app.py +0 -104
- backend/.env.example +0 -18
- backend/.watchmanconfig +0 -3
- backend/app/__init__.py +0 -1
- backend/app/api/__init__.py +0 -1
- backend/app/api/adversarial.py +0 -181
- backend/app/api/agents.py +0 -140
- backend/app/api/analytics.py +0 -30
- backend/app/api/bias_detection.py +0 -155
- backend/app/api/cases.py +0 -100
- backend/app/api/profile.py +0 -112
- backend/app/api/reasoning.py +0 -214
- backend/app/api/simulation.py +0 -261
- backend/app/api/student.py +0 -36
- backend/app/core/__init__.py +0 -1
- backend/app/core/agents/__init__.py +0 -1
- backend/app/core/agents/base_agent.py +0 -181
- backend/app/core/agents/case_state_manager.py +0 -645
- backend/app/core/agents/clinical_validator.py +0 -213
- backend/app/core/agents/complication_engine.py +0 -1307
- backend/app/core/agents/evaluator.py +0 -215
- backend/app/core/agents/family_agent.py +0 -363
- backend/app/core/agents/knowledge_builder.py +0 -728
- backend/app/core/agents/lab_tech_agent.py +0 -390
- backend/app/core/agents/nurse_agent.py +0 -211
- backend/app/core/agents/orchestrator.py +0 -914
- backend/app/core/agents/patient.py +0 -167
- backend/app/core/agents/patient_agent.py +0 -188
- backend/app/core/agents/response_optimizer.py +0 -218
- backend/app/core/agents/senior_agent.py +0 -187
- backend/app/core/agents/symptom_translator.py +0 -213
- backend/app/core/agents/treatment_engine.py +0 -232
- backend/app/core/agents/tutor.py +0 -104
- backend/app/core/analytics/__init__.py +0 -1
- backend/app/core/analytics/bias_detector.py +0 -118
- backend/app/core/analytics/knowledge_graph.py +0 -79
- backend/app/core/analytics/recommender.py +0 -62
- backend/app/core/rag/__init__.py +0 -7
- backend/app/core/rag/generator.py +0 -465
- backend/app/core/rag/retriever.py +0 -125
.dockerignore
DELETED
|
@@ -1,44 +0,0 @@
|
|
| 1 |
-
# Demo automation (local only, not for HF)
|
| 2 |
-
demo-automation/
|
| 3 |
-
|
| 4 |
-
# Git
|
| 5 |
-
.git/
|
| 6 |
-
.gitignore
|
| 7 |
-
|
| 8 |
-
# Environment files
|
| 9 |
-
.env
|
| 10 |
-
.env.local
|
| 11 |
-
backend/.env
|
| 12 |
-
|
| 13 |
-
# Python
|
| 14 |
-
__pycache__/
|
| 15 |
-
*.pyc
|
| 16 |
-
*.pyo
|
| 17 |
-
*.pyd
|
| 18 |
-
.Python
|
| 19 |
-
backend/venv/
|
| 20 |
-
backend/env/
|
| 21 |
-
|
| 22 |
-
# Node
|
| 23 |
-
frontend/node_modules/
|
| 24 |
-
frontend/npm-debug.log*
|
| 25 |
-
frontend/yarn-debug.log*
|
| 26 |
-
frontend/yarn-error.log*
|
| 27 |
-
|
| 28 |
-
# Build artifacts (we'll build in Docker)
|
| 29 |
-
frontend/build/
|
| 30 |
-
|
| 31 |
-
# IDEs
|
| 32 |
-
.vscode/
|
| 33 |
-
.idea/
|
| 34 |
-
|
| 35 |
-
# macOS
|
| 36 |
-
.DS_Store
|
| 37 |
-
|
| 38 |
-
# Documentation
|
| 39 |
-
*.md
|
| 40 |
-
!README_HF.md
|
| 41 |
-
|
| 42 |
-
# Testing
|
| 43 |
-
*.test.js
|
| 44 |
-
*.spec.js
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.env.example
DELETED
|
@@ -1,8 +0,0 @@
|
|
| 1 |
-
# Required for AI agents to work
|
| 2 |
-
ANTHROPIC_API_KEY=your_anthropic_api_key_here
|
| 3 |
-
|
| 4 |
-
# Optional: Custom API URL for frontend (defaults to http://localhost:8000/api)
|
| 5 |
-
# REACT_APP_API_URL=http://localhost:8000/api
|
| 6 |
-
|
| 7 |
-
# Optional: Port configuration
|
| 8 |
-
# PORT=7860
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitattributes
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.github/workflows/sync-to-hf.yml
DELETED
|
@@ -1,21 +0,0 @@
|
|
| 1 |
-
name: Sync to Hugging Face Spaces
|
| 2 |
-
on:
|
| 3 |
-
push:
|
| 4 |
-
branches: [main]
|
| 5 |
-
workflow_dispatch:
|
| 6 |
-
|
| 7 |
-
jobs:
|
| 8 |
-
sync-to-hub:
|
| 9 |
-
runs-on: ubuntu-latest
|
| 10 |
-
steps:
|
| 11 |
-
- uses: actions/checkout@v4
|
| 12 |
-
with:
|
| 13 |
-
fetch-depth: 0
|
| 14 |
-
lfs: true
|
| 15 |
-
|
| 16 |
-
- name: Push to Hugging Face
|
| 17 |
-
env:
|
| 18 |
-
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
| 19 |
-
run: |
|
| 20 |
-
git remote add hf https://arjitmat:$HF_TOKEN@huggingface.co/spaces/arjitmat/clinical-mind
|
| 21 |
-
git push hf main --force
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitignore
DELETED
|
@@ -1,38 +0,0 @@
|
|
| 1 |
-
# Dependencies
|
| 2 |
-
node_modules/
|
| 3 |
-
frontend/node_modules/
|
| 4 |
-
backend/venv/
|
| 5 |
-
backend/__pycache__/
|
| 6 |
-
backend/app/__pycache__/
|
| 7 |
-
**/__pycache__/
|
| 8 |
-
*.pyc
|
| 9 |
-
|
| 10 |
-
# Build
|
| 11 |
-
frontend/build/
|
| 12 |
-
dist/
|
| 13 |
-
|
| 14 |
-
# Environment
|
| 15 |
-
.env
|
| 16 |
-
.env.local
|
| 17 |
-
backend/.env
|
| 18 |
-
|
| 19 |
-
# IDE
|
| 20 |
-
.vscode/
|
| 21 |
-
.idea/
|
| 22 |
-
*.swp
|
| 23 |
-
*.swo
|
| 24 |
-
|
| 25 |
-
# OS
|
| 26 |
-
.DS_Store
|
| 27 |
-
Thumbs.db
|
| 28 |
-
|
| 29 |
-
# Data
|
| 30 |
-
backend/data/vector_db/
|
| 31 |
-
*.db
|
| 32 |
-
|
| 33 |
-
# Logs
|
| 34 |
-
*.log
|
| 35 |
-
npm-debug.log*
|
| 36 |
-
|
| 37 |
-
# Testing
|
| 38 |
-
coverage/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
AUDIT_REPORT.md
DELETED
|
@@ -1,282 +0,0 @@
|
|
| 1 |
-
# Clinical Mind - System Audit Report
|
| 2 |
-
**Date:** February 15, 2026
|
| 3 |
-
**Version:** 1.0.0
|
| 4 |
-
**Status:** PRODUCTION READY ✅
|
| 5 |
-
|
| 6 |
-
---
|
| 7 |
-
|
| 8 |
-
## 📊 Executive Summary
|
| 9 |
-
|
| 10 |
-
Clinical Mind is a sophisticated multi-agent medical simulation system powered by Claude Opus 4.1 API. The application demonstrates robust architecture with parallel agent processing, real-time communication, and educational value for medical students.
|
| 11 |
-
|
| 12 |
-
### Key Strengths:
|
| 13 |
-
- ✅ **Multi-Agent Orchestration**: 5 specialized AI agents working in parallel
|
| 14 |
-
- ✅ **Real-time Updates**: Live vitals monitoring with 5-second polling
|
| 15 |
-
- ✅ **Educational Design**: Context-aware suggested questions and feedback
|
| 16 |
-
- ✅ **Production Build**: Frontend builds successfully with minimal warnings
|
| 17 |
-
- ✅ **Data Persistence**: File-based case storage with automatic recovery
|
| 18 |
-
- ✅ **Security**: Proper .env handling and API key protection
|
| 19 |
-
|
| 20 |
-
---
|
| 21 |
-
|
| 22 |
-
## 🏗️ Architecture Overview
|
| 23 |
-
|
| 24 |
-
### Frontend (React/TypeScript)
|
| 25 |
-
```
|
| 26 |
-
frontend/
|
| 27 |
-
├── src/
|
| 28 |
-
│ ├── pages/ # Main app pages
|
| 29 |
-
│ │ ├── CaseInterface.tsx # Core simulation UI
|
| 30 |
-
│ │ ├── DemoLive.tsx # Demo page for hackathon
|
| 31 |
-
│ │ └── Landing.tsx # Home page
|
| 32 |
-
│ ├── components/ # Reusable components
|
| 33 |
-
│ │ ├── case/ # Case-specific components
|
| 34 |
-
│ │ ├── layout/ # Layout components
|
| 35 |
-
│ │ └── ui/ # UI primitives
|
| 36 |
-
│ └── hooks/ # API integration hooks
|
| 37 |
-
```
|
| 38 |
-
|
| 39 |
-
### Backend (FastAPI/Python)
|
| 40 |
-
```
|
| 41 |
-
backend/
|
| 42 |
-
├── app/
|
| 43 |
-
│ ├── api/ # API endpoints
|
| 44 |
-
│ │ ├── agents.py # Agent endpoints
|
| 45 |
-
│ │ └── cases.py # Case management
|
| 46 |
-
│ ├── core/
|
| 47 |
-
│ │ ├── agents/ # Agent implementations
|
| 48 |
-
│ │ │ ├── orchestrator.py # Multi-agent coordination
|
| 49 |
-
│ │ │ ├── knowledge_builder.py # Parallel knowledge building
|
| 50 |
-
│ │ │ └── [5 specialized agents]
|
| 51 |
-
│ │ └── rag/ # RAG system with ChromaDB
|
| 52 |
-
│ └── data/ # Persistent storage
|
| 53 |
-
```
|
| 54 |
-
|
| 55 |
-
---
|
| 56 |
-
|
| 57 |
-
## ✅ Functionality Audit
|
| 58 |
-
|
| 59 |
-
### 1. Frontend Components
|
| 60 |
-
| Component | Status | Notes |
|
| 61 |
-
|-----------|--------|-------|
|
| 62 |
-
| Landing Page | ✅ Working | Clean UI, proper navigation |
|
| 63 |
-
| Case Browser | ✅ Working | Lists available cases |
|
| 64 |
-
| Case Interface | ✅ Working | Core simulation UI with all features |
|
| 65 |
-
| Demo Page | ✅ Working | 2 curated cases for presentation |
|
| 66 |
-
| Agent Messages | ✅ Working | WhatsApp-style chat interface |
|
| 67 |
-
| Vitals Monitor | ✅ Working | Live updates every 5 seconds |
|
| 68 |
-
| Suggested Questions | ✅ Working | Context-aware recommendations |
|
| 69 |
-
| Language Toggle | ✅ Removed | Hinglish is default |
|
| 70 |
-
|
| 71 |
-
### 2. Backend Systems
|
| 72 |
-
| System | Status | Notes |
|
| 73 |
-
|--------|--------|-------|
|
| 74 |
-
| Multi-Agent Orchestrator | ✅ Working | Coordinates 5 agents seamlessly |
|
| 75 |
-
| Parallel Knowledge Building | ✅ Optimized | 5x faster with ThreadPoolExecutor |
|
| 76 |
-
| Claude Opus API | ✅ Working | Adaptive thinking mode, temp=1 |
|
| 77 |
-
| Case Persistence | ✅ Working | File-based storage in data/active_cases/ |
|
| 78 |
-
| ChromaDB Integration | ✅ Working | 432 medical cases indexed |
|
| 79 |
-
| Symptom Translation | ✅ Working | Authentic Hinglish responses |
|
| 80 |
-
| Vitals Simulation | ✅ Working | Dynamic vital sign changes |
|
| 81 |
-
|
| 82 |
-
### 3. Agent Functionality
|
| 83 |
-
| Agent | Role | Status | Features |
|
| 84 |
-
|-------|------|--------|----------|
|
| 85 |
-
| Patient | Symptoms in Hinglish | ✅ Working | Authentic responses, distress levels |
|
| 86 |
-
| Family | Context in Hinglish | ✅ Working | Cultural authenticity |
|
| 87 |
-
| Nurse Priya | Clinical support | ✅ Working | Vitals monitoring, urgency detection |
|
| 88 |
-
| Lab Tech Ramesh | Investigations | ✅ Working | Test results, processing times |
|
| 89 |
-
| Dr. Sharma | Senior guidance | ✅ Working | Educational feedback, teaching mode |
|
| 90 |
-
|
| 91 |
-
---
|
| 92 |
-
|
| 93 |
-
## 🔒 Security Assessment
|
| 94 |
-
|
| 95 |
-
### ✅ Secure Practices:
|
| 96 |
-
1. **API Keys**: Stored in .env files, properly gitignored
|
| 97 |
-
2. **No Hardcoded Secrets**: All sensitive data externalized
|
| 98 |
-
3. **CORS Configuration**: Properly configured for local development
|
| 99 |
-
4. **Data Isolation**: Each case session isolated
|
| 100 |
-
|
| 101 |
-
### ⚠️ Pre-Deployment Actions Required:
|
| 102 |
-
1. **Environment Variables**: Set production API keys
|
| 103 |
-
2. **CORS Settings**: Update for production domain
|
| 104 |
-
3. **Rate Limiting**: Implement API rate limiting
|
| 105 |
-
4. **HTTPS**: Ensure HTTPS in production
|
| 106 |
-
|
| 107 |
-
---
|
| 108 |
-
|
| 109 |
-
## 🚀 Performance Metrics
|
| 110 |
-
|
| 111 |
-
### Response Times:
|
| 112 |
-
- **Agent Initialization**: 2-3 minutes (optimized from 20 min)
|
| 113 |
-
- **Message Response**: 1-3 seconds
|
| 114 |
-
- **Vitals Update**: Every 5 seconds
|
| 115 |
-
- **Frontend Build**: ~30 seconds
|
| 116 |
-
- **Bundle Size**: 216KB gzipped
|
| 117 |
-
|
| 118 |
-
### Optimization Achievements:
|
| 119 |
-
- **5x faster** agent initialization with parallel processing
|
| 120 |
-
- **Reduced API calls** with intelligent caching
|
| 121 |
-
- **Efficient re-renders** with React optimization
|
| 122 |
-
|
| 123 |
-
---
|
| 124 |
-
|
| 125 |
-
## 📝 Code Quality
|
| 126 |
-
|
| 127 |
-
### Frontend Build Status:
|
| 128 |
-
```
|
| 129 |
-
✅ Build successful
|
| 130 |
-
⚠️ 3 minor warnings (unused variables)
|
| 131 |
-
✅ Bundle size optimized (216KB gzipped)
|
| 132 |
-
```
|
| 133 |
-
|
| 134 |
-
### Backend Status:
|
| 135 |
-
```
|
| 136 |
-
✅ All imports working
|
| 137 |
-
✅ API endpoints functional
|
| 138 |
-
✅ Agent system operational
|
| 139 |
-
✅ Database connections stable
|
| 140 |
-
```
|
| 141 |
-
|
| 142 |
-
---
|
| 143 |
-
|
| 144 |
-
## 🔧 Deployment Checklist
|
| 145 |
-
|
| 146 |
-
### For GitHub:
|
| 147 |
-
- [x] Remove .env files from tracking
|
| 148 |
-
- [x] Update .gitignore
|
| 149 |
-
- [x] Add README with setup instructions
|
| 150 |
-
- [x] Include DEMO_SCRIPT.md for hackathon
|
| 151 |
-
- [ ] Create .env.example with required variables
|
| 152 |
-
- [ ] Add GitHub Actions for CI/CD
|
| 153 |
-
|
| 154 |
-
### For Hugging Face Spaces:
|
| 155 |
-
- [ ] Create requirements.txt for backend
|
| 156 |
-
- [ ] Create package.json for frontend
|
| 157 |
-
- [ ] Add app.py for Gradio interface (if needed)
|
| 158 |
-
- [ ] Configure space settings
|
| 159 |
-
- [ ] Set environment variables in HF settings
|
| 160 |
-
|
| 161 |
-
---
|
| 162 |
-
|
| 163 |
-
## 🎯 Demo Readiness
|
| 164 |
-
|
| 165 |
-
### Hackathon Demo:
|
| 166 |
-
- ✅ **Demo Page**: `/demo` with 2 curated cases
|
| 167 |
-
- ✅ **Script Provided**: Complete step-by-step guide
|
| 168 |
-
- ✅ **No Special Labels**: Looks like production
|
| 169 |
-
- ✅ **Real API Calls**: Authentic demonstration
|
| 170 |
-
- ✅ **Predictable Flow**: Tested conversation paths
|
| 171 |
-
|
| 172 |
-
### Demo Features Showcase:
|
| 173 |
-
1. **Multi-agent orchestration** - All 5 agents respond naturally
|
| 174 |
-
2. **Hinglish authenticity** - Patient/Family speak naturally
|
| 175 |
-
3. **Live vitals** - Updates every 5 seconds
|
| 176 |
-
4. **Educational value** - Dr. Sharma provides teaching
|
| 177 |
-
5. **Clinical reasoning** - Suggested questions guide thinking
|
| 178 |
-
|
| 179 |
-
---
|
| 180 |
-
|
| 181 |
-
## 🐛 Known Issues & Fixes Applied
|
| 182 |
-
|
| 183 |
-
### Fixed Issues:
|
| 184 |
-
1. ✅ **Cases lost on reload** → Implemented file persistence
|
| 185 |
-
2. ✅ **Slow initialization** → Parallel processing with ThreadPoolExecutor
|
| 186 |
-
3. ✅ **Temperature error** → Set to 1 for adaptive thinking
|
| 187 |
-
4. ✅ **Markdown in responses** → Disabled formatting
|
| 188 |
-
5. ✅ **Message sending blocked** → Added loading overlay
|
| 189 |
-
|
| 190 |
-
### Minor Warnings (Non-Critical):
|
| 191 |
-
1. ⚠️ Unused variable in VitalsSparkline.tsx
|
| 192 |
-
2. ⚠️ Missing dependency in useEffect (intentional)
|
| 193 |
-
3. ⚠️ Node deprecation warning (F_OK)
|
| 194 |
-
|
| 195 |
-
---
|
| 196 |
-
|
| 197 |
-
## 📚 Documentation
|
| 198 |
-
|
| 199 |
-
### Available Documentation:
|
| 200 |
-
- ✅ `README.md` - Setup and usage instructions
|
| 201 |
-
- ✅ `DEMO_SCRIPT.md` - Hackathon presentation guide
|
| 202 |
-
- ✅ `AUDIT_REPORT.md` - This comprehensive audit
|
| 203 |
-
- ✅ Code comments throughout
|
| 204 |
-
|
| 205 |
-
---
|
| 206 |
-
|
| 207 |
-
## 🎬 Production Deployment Steps
|
| 208 |
-
|
| 209 |
-
### 1. Environment Setup:
|
| 210 |
-
```bash
|
| 211 |
-
# Backend
|
| 212 |
-
cd backend
|
| 213 |
-
python -m venv venv
|
| 214 |
-
source venv/bin/activate # or venv\Scripts\activate on Windows
|
| 215 |
-
pip install -r requirements.txt
|
| 216 |
-
|
| 217 |
-
# Frontend
|
| 218 |
-
cd frontend
|
| 219 |
-
npm install
|
| 220 |
-
```
|
| 221 |
-
|
| 222 |
-
### 2. Environment Variables:
|
| 223 |
-
```bash
|
| 224 |
-
# backend/.env
|
| 225 |
-
ANTHROPIC_API_KEY=your_production_key
|
| 226 |
-
CHROMA_PERSIST_DIRECTORY=./data/vector_db
|
| 227 |
-
CASE_STORAGE_DIR=./data/active_cases
|
| 228 |
-
```
|
| 229 |
-
|
| 230 |
-
### 3. Build & Deploy:
|
| 231 |
-
```bash
|
| 232 |
-
# Frontend build
|
| 233 |
-
npm run build
|
| 234 |
-
|
| 235 |
-
# Backend start
|
| 236 |
-
uvicorn app.main:app --host 0.0.0.0 --port 8000
|
| 237 |
-
```
|
| 238 |
-
|
| 239 |
-
---
|
| 240 |
-
|
| 241 |
-
## ✨ Recommendations
|
| 242 |
-
|
| 243 |
-
### Immediate (Before Hackathon):
|
| 244 |
-
1. ✅ Test demo flow multiple times
|
| 245 |
-
2. ✅ Ensure stable internet for API calls
|
| 246 |
-
3. ✅ Have backup plan if initialization is slow
|
| 247 |
-
4. ✅ Clear browser cache before demo
|
| 248 |
-
|
| 249 |
-
### Future Enhancements:
|
| 250 |
-
1. Add WebSocket for real-time updates
|
| 251 |
-
2. Implement user authentication
|
| 252 |
-
3. Add case analytics dashboard
|
| 253 |
-
4. Create mobile responsive version
|
| 254 |
-
5. Add more regional languages
|
| 255 |
-
6. Implement offline mode with local LLM
|
| 256 |
-
|
| 257 |
-
---
|
| 258 |
-
|
| 259 |
-
## 🏆 Conclusion
|
| 260 |
-
|
| 261 |
-
**Clinical Mind is PRODUCTION READY** with robust architecture, working features, and excellent educational value. The system successfully demonstrates:
|
| 262 |
-
|
| 263 |
-
- Advanced multi-agent AI orchestration
|
| 264 |
-
- Real-time medical simulation
|
| 265 |
-
- Culturally authentic Indian hospital setting
|
| 266 |
-
- Educational scaffolding for medical students
|
| 267 |
-
- Professional-grade code quality
|
| 268 |
-
|
| 269 |
-
### Hackathon Readiness: 100% ✅
|
| 270 |
-
|
| 271 |
-
The application is fully prepared for demonstration with:
|
| 272 |
-
- Stable codebase
|
| 273 |
-
- Predictable demo flow
|
| 274 |
-
- Complete documentation
|
| 275 |
-
- Performance optimizations
|
| 276 |
-
- Professional presentation
|
| 277 |
-
|
| 278 |
-
---
|
| 279 |
-
|
| 280 |
-
**Prepared by:** Clinical Mind Development Team
|
| 281 |
-
**Review Status:** Approved for Production
|
| 282 |
-
**Next Step:** Deploy to GitHub → Hugging Face Spaces
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Dockerfile
DELETED
|
@@ -1,37 +0,0 @@
|
|
| 1 |
-
FROM python:3.10-slim
|
| 2 |
-
|
| 3 |
-
# Install Node.js for building React frontend
|
| 4 |
-
RUN apt-get update && apt-get install -y \
|
| 5 |
-
curl \
|
| 6 |
-
&& curl -fsSL https://deb.nodesource.com/setup_18.x | bash - \
|
| 7 |
-
&& apt-get install -y nodejs \
|
| 8 |
-
&& apt-get clean \
|
| 9 |
-
&& rm -rf /var/lib/apt/lists/*
|
| 10 |
-
|
| 11 |
-
WORKDIR /app
|
| 12 |
-
|
| 13 |
-
# Copy backend files
|
| 14 |
-
COPY backend/requirements.txt ./backend/
|
| 15 |
-
RUN pip install --no-cache-dir -r backend/requirements.txt
|
| 16 |
-
|
| 17 |
-
# Copy and build frontend
|
| 18 |
-
COPY frontend/package*.json ./frontend/
|
| 19 |
-
WORKDIR /app/frontend
|
| 20 |
-
RUN npm ci --legacy-peer-deps
|
| 21 |
-
COPY frontend/ ./
|
| 22 |
-
RUN npm run build
|
| 23 |
-
|
| 24 |
-
# Copy backend code and root app.py
|
| 25 |
-
WORKDIR /app
|
| 26 |
-
COPY backend/ ./backend/
|
| 27 |
-
COPY app.py ./
|
| 28 |
-
|
| 29 |
-
# Expose the port that Hugging Face Spaces expects
|
| 30 |
-
EXPOSE 7860
|
| 31 |
-
|
| 32 |
-
# Set environment variables
|
| 33 |
-
ENV PYTHONPATH=/app/backend
|
| 34 |
-
ENV NODE_ENV=production
|
| 35 |
-
|
| 36 |
-
# Run the combined server using app.py (proper SPA + API routing)
|
| 37 |
-
CMD ["python", "app.py"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
OPTIMIZATION_REPORT.md
DELETED
|
@@ -1,222 +0,0 @@
|
|
| 1 |
-
# Clinical Mind - Performance Optimization Report
|
| 2 |
-
|
| 3 |
-
**Date:** February 15, 2026
|
| 4 |
-
**Issue:** Agent response times exceeding 2 minutes
|
| 5 |
-
**Resolution:** Reduced to ~15-30 seconds through multiple optimizations
|
| 6 |
-
|
| 7 |
-
---
|
| 8 |
-
|
| 9 |
-
## 🔴 Problem Analysis
|
| 10 |
-
|
| 11 |
-
The user reported that agent responses were taking over 2 minutes while the patient deterioration timer was running. This created a critical gameplay issue where the simulation clock advanced faster than agents could respond.
|
| 12 |
-
|
| 13 |
-
### Root Causes Identified:
|
| 14 |
-
|
| 15 |
-
1. **Sequential Agent Processing** - Each agent response was processed one after another
|
| 16 |
-
2. **Large Context Payloads** - Full specialized knowledge (25,000+ chars) sent with every API call
|
| 17 |
-
3. **No Response Caching** - Common queries made fresh API calls every time
|
| 18 |
-
4. **Full Conversation History** - Complete history sent with each request
|
| 19 |
-
5. **No Context Filtering** - All knowledge sent regardless of query relevance
|
| 20 |
-
|
| 21 |
-
---
|
| 22 |
-
|
| 23 |
-
## ✅ Optimizations Implemented
|
| 24 |
-
|
| 25 |
-
### 1. Parallel Agent Processing (40-50% Speed Improvement)
|
| 26 |
-
|
| 27 |
-
**File:** `/backend/app/core/agents/orchestrator.py`
|
| 28 |
-
|
| 29 |
-
- Implemented ThreadPoolExecutor for parallel agent responses
|
| 30 |
-
- Actions involving multiple agents now process concurrently
|
| 31 |
-
- Example: Patient examination now processes patient and nurse responses in parallel
|
| 32 |
-
- Team huddle processes 3 agents simultaneously
|
| 33 |
-
|
| 34 |
-
```python
|
| 35 |
-
# Before: Sequential processing (2+ minutes)
|
| 36 |
-
patient_resp = session.patient.respond(message, context)
|
| 37 |
-
nurse_resp = session.nurse.respond(message, context)
|
| 38 |
-
|
| 39 |
-
# After: Parallel processing (~30 seconds)
|
| 40 |
-
agents_to_process = [
|
| 41 |
-
(session.patient, message, context),
|
| 42 |
-
(session.nurse, message, context)
|
| 43 |
-
]
|
| 44 |
-
messages = parallel_processor.process_agents_parallel(agents_to_process)
|
| 45 |
-
```
|
| 46 |
-
|
| 47 |
-
### 2. Smart Context Filtering (30-40% Token Reduction)
|
| 48 |
-
|
| 49 |
-
**File:** `/backend/app/core/agents/response_optimizer.py`
|
| 50 |
-
|
| 51 |
-
- Analyzes query type to determine relevant context
|
| 52 |
-
- Filters specialized knowledge based on query intent
|
| 53 |
-
- Reduces average context from 25,000 to 3,000-5,000 chars
|
| 54 |
-
|
| 55 |
-
Examples:
|
| 56 |
-
- "What are the vitals?" → Only sends first 1,000 chars
|
| 57 |
-
- "Examine patient" → Sends only physical exam sections
|
| 58 |
-
- "Treatment options" → Sends only management sections
|
| 59 |
-
|
| 60 |
-
### 3. Response Caching (Instant for Common Queries)
|
| 61 |
-
|
| 62 |
-
**File:** `/backend/app/core/agents/response_optimizer.py`
|
| 63 |
-
|
| 64 |
-
- Implements LRU cache with 10-minute TTL
|
| 65 |
-
- Caches responses for common queries like vitals
|
| 66 |
-
- Cache key includes agent type, normalized message, and context
|
| 67 |
-
|
| 68 |
-
### 4. Conversation History Compression (20% Token Reduction)
|
| 69 |
-
|
| 70 |
-
**File:** `/backend/app/core/agents/base_agent.py`
|
| 71 |
-
|
| 72 |
-
- Limits conversation history to last 8 messages
|
| 73 |
-
- Adds summary message for older conversations
|
| 74 |
-
- Prevents unbounded growth of context
|
| 75 |
-
|
| 76 |
-
### 5. Reduced Max Tokens (Faster Response Generation)
|
| 77 |
-
|
| 78 |
-
- Reduced from 4,000 to 2,000 tokens per response
|
| 79 |
-
- Forces more concise agent responses
|
| 80 |
-
- Reduces Claude API processing time
|
| 81 |
-
|
| 82 |
-
---
|
| 83 |
-
|
| 84 |
-
## 📊 Performance Improvements
|
| 85 |
-
|
| 86 |
-
### Before Optimizations:
|
| 87 |
-
- **Single Agent Response:** 30-45 seconds
|
| 88 |
-
- **Multi-Agent Action:** 2-3 minutes
|
| 89 |
-
- **Team Huddle:** 3-4 minutes
|
| 90 |
-
- **Token Usage:** ~50,000 tokens per request
|
| 91 |
-
|
| 92 |
-
### After Optimizations:
|
| 93 |
-
- **Single Agent Response:** 5-10 seconds (cached: instant)
|
| 94 |
-
- **Multi-Agent Action:** 15-30 seconds
|
| 95 |
-
- **Team Huddle:** 30-45 seconds
|
| 96 |
-
- **Token Usage:** ~10,000 tokens per request
|
| 97 |
-
|
| 98 |
-
### Speed Improvements:
|
| 99 |
-
- **75-85% faster** for multi-agent actions
|
| 100 |
-
- **90% faster** for cached common queries
|
| 101 |
-
- **60% reduction** in API token usage
|
| 102 |
-
- **5x improvement** in perceived responsiveness
|
| 103 |
-
|
| 104 |
-
---
|
| 105 |
-
|
| 106 |
-
## 🏗️ Technical Implementation
|
| 107 |
-
|
| 108 |
-
### New File Structure:
|
| 109 |
-
```
|
| 110 |
-
backend/app/core/agents/
|
| 111 |
-
├── response_optimizer.py # NEW: Optimization utilities
|
| 112 |
-
│ ├── ResponseCache # LRU cache implementation
|
| 113 |
-
│ ├── ContextFilter # Smart filtering logic
|
| 114 |
-
│ └── ParallelProcessor # Parallel execution handler
|
| 115 |
-
├── base_agent.py # UPDATED: Uses optimization
|
| 116 |
-
├── orchestrator.py # UPDATED: Parallel processing
|
| 117 |
-
└── [other agent files]
|
| 118 |
-
```
|
| 119 |
-
|
| 120 |
-
### Key Classes:
|
| 121 |
-
|
| 122 |
-
1. **ResponseCache**
|
| 123 |
-
- Max size: 200 responses
|
| 124 |
-
- TTL: 600 seconds (10 minutes)
|
| 125 |
-
- MD5-based cache keys
|
| 126 |
-
|
| 127 |
-
2. **ContextFilter**
|
| 128 |
-
- Query type detection
|
| 129 |
-
- Section-based filtering
|
| 130 |
-
- History compression
|
| 131 |
-
|
| 132 |
-
3. **ParallelAgentProcessor**
|
| 133 |
-
- ThreadPoolExecutor management
|
| 134 |
-
- Timeout handling (10s per agent)
|
| 135 |
-
- Fallback response on failure
|
| 136 |
-
|
| 137 |
-
---
|
| 138 |
-
|
| 139 |
-
## 🔍 Further Optimization Opportunities
|
| 140 |
-
|
| 141 |
-
### Short Term (Additional 20-30% improvement possible):
|
| 142 |
-
|
| 143 |
-
1. **Streaming Responses**
|
| 144 |
-
- Implement SSE for streaming agent responses
|
| 145 |
-
- Show partial responses as they generate
|
| 146 |
-
|
| 147 |
-
2. **Pre-computed Responses**
|
| 148 |
-
- Cache initial greetings during initialization
|
| 149 |
-
- Pre-generate common examination findings
|
| 150 |
-
|
| 151 |
-
3. **Smarter RAG Filtering**
|
| 152 |
-
- Reduce RAG retrieval from 5-8 docs to 2-3
|
| 153 |
-
- Implement relevance scoring threshold
|
| 154 |
-
|
| 155 |
-
### Medium Term:
|
| 156 |
-
|
| 157 |
-
1. **WebSocket Implementation**
|
| 158 |
-
- Replace HTTP polling with WebSocket
|
| 159 |
-
- Real-time bidirectional communication
|
| 160 |
-
|
| 161 |
-
2. **Agent Response Batching**
|
| 162 |
-
- Combine multiple agent responses in single API call
|
| 163 |
-
- Use Claude's multi-turn capability
|
| 164 |
-
|
| 165 |
-
3. **Edge Caching**
|
| 166 |
-
- Deploy cache closer to users
|
| 167 |
-
- CDN for static agent knowledge
|
| 168 |
-
|
| 169 |
-
### Long Term:
|
| 170 |
-
|
| 171 |
-
1. **Local LLM Fallback**
|
| 172 |
-
- Use smaller local model for simple queries
|
| 173 |
-
- Reserve Claude for complex medical reasoning
|
| 174 |
-
|
| 175 |
-
2. **Predictive Pre-fetching**
|
| 176 |
-
- Anticipate next likely action
|
| 177 |
-
- Pre-generate responses speculatively
|
| 178 |
-
|
| 179 |
-
---
|
| 180 |
-
|
| 181 |
-
## 🎯 Testing Recommendations
|
| 182 |
-
|
| 183 |
-
### Load Testing:
|
| 184 |
-
```bash
|
| 185 |
-
# Test parallel agent processing
|
| 186 |
-
curl -X POST http://localhost:8000/api/agents/action \
|
| 187 |
-
-H "Content-Type: application/json" \
|
| 188 |
-
-d '{"session_id": "test", "action_type": "team_huddle"}'
|
| 189 |
-
|
| 190 |
-
# Measure response times
|
| 191 |
-
time curl http://localhost:8000/api/agents/action...
|
| 192 |
-
```
|
| 193 |
-
|
| 194 |
-
### Cache Verification:
|
| 195 |
-
- Monitor cache hit rates in logs
|
| 196 |
-
- Verify TTL expiration
|
| 197 |
-
- Test cache invalidation
|
| 198 |
-
|
| 199 |
-
### Performance Monitoring:
|
| 200 |
-
- Add timing logs for each optimization
|
| 201 |
-
- Track token usage per request
|
| 202 |
-
- Monitor parallel execution success rate
|
| 203 |
-
|
| 204 |
-
---
|
| 205 |
-
|
| 206 |
-
## ✅ Summary
|
| 207 |
-
|
| 208 |
-
The optimization successfully addresses the critical performance issue:
|
| 209 |
-
|
| 210 |
-
1. **Response times reduced from 2+ minutes to 15-30 seconds**
|
| 211 |
-
2. **Parallel processing eliminates sequential bottlenecks**
|
| 212 |
-
3. **Smart filtering reduces unnecessary API tokens**
|
| 213 |
-
4. **Caching provides instant responses for common queries**
|
| 214 |
-
5. **System remains stable and maintainable**
|
| 215 |
-
|
| 216 |
-
The patient deterioration timer can now run realistically without agents lagging behind. The simulation feels responsive and engaging rather than frustratingly slow.
|
| 217 |
-
|
| 218 |
-
---
|
| 219 |
-
|
| 220 |
-
**Prepared by:** Clinical Mind Development Team
|
| 221 |
-
**Status:** Successfully Deployed
|
| 222 |
-
**Next Steps:** Monitor performance metrics and implement streaming responses
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
CHANGED
|
@@ -1,147 +1,10 @@
|
|
| 1 |
---
|
| 2 |
title: Clinical Mind
|
| 3 |
-
emoji:
|
| 4 |
colorFrom: green
|
| 5 |
-
colorTo:
|
| 6 |
sdk: docker
|
| 7 |
pinned: false
|
| 8 |
-
license: mit
|
| 9 |
-
app_port: 7860
|
| 10 |
---
|
| 11 |
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
**Master clinical reasoning, one case at a time.**
|
| 15 |
-
|
| 16 |
-
An AI-powered clinical reasoning simulator that helps Indian medical students develop expert-level diagnostic thinking by exposing cognitive biases, providing Socratic feedback, and creating realistic case scenarios.
|
| 17 |
-
|
| 18 |
-
## Problem
|
| 19 |
-
|
| 20 |
-
Medical students can memorize and answer MCQs, but struggle with real clinical reasoning:
|
| 21 |
-
- **Invisible cognitive biases** (anchoring, premature closure, availability)
|
| 22 |
-
- **Can't explain reasoning** when attendings ask "Why?"
|
| 23 |
-
- **No practice under pressure** - freeze in real emergencies
|
| 24 |
-
- **Can't see knowledge connections** between concepts
|
| 25 |
-
- **Textbook cases ≠ Indian reality** (dengue, TB, resource constraints)
|
| 26 |
-
|
| 27 |
-
## Solution
|
| 28 |
-
|
| 29 |
-
Clinical-Mind is a multi-layered reasoning development platform:
|
| 30 |
-
|
| 31 |
-
1. **RAG-Powered Case Generation** - Dynamic cases from Indian medical literature
|
| 32 |
-
2. **Socratic AI Tutor** - Multi-turn dialogue that asks "why" until deep understanding
|
| 33 |
-
3. **Cognitive Bias Detection** - Tracks patterns across 20+ cases, identifies biases
|
| 34 |
-
4. **Knowledge Graph Visualization** - Interactive D3.js map of concept mastery
|
| 35 |
-
5. **Performance Analytics** - Personalized dashboard with peer benchmarking
|
| 36 |
-
6. **India-Centric Content** - Cases set in Indian hospitals with regional disease patterns
|
| 37 |
-
|
| 38 |
-
## Tech Stack
|
| 39 |
-
|
| 40 |
-
| Layer | Technology |
|
| 41 |
-
|-------|-----------|
|
| 42 |
-
| Frontend | React 18, TypeScript, Tailwind CSS |
|
| 43 |
-
| Visualization | D3.js (knowledge graph), Recharts (analytics) |
|
| 44 |
-
| Backend | Python 3.11+, FastAPI |
|
| 45 |
-
| AI Engine | Claude Opus 4.6 (Anthropic API) |
|
| 46 |
-
| Vector DB | ChromaDB + LangChain |
|
| 47 |
-
| Embeddings | Sentence-Transformers |
|
| 48 |
-
|
| 49 |
-
## Quick Start
|
| 50 |
-
|
| 51 |
-
### Frontend
|
| 52 |
-
```bash
|
| 53 |
-
cd frontend
|
| 54 |
-
npm install
|
| 55 |
-
npm start
|
| 56 |
-
```
|
| 57 |
-
|
| 58 |
-
### Backend
|
| 59 |
-
```bash
|
| 60 |
-
cd backend
|
| 61 |
-
python -m venv venv
|
| 62 |
-
source venv/bin/activate # or `venv\Scripts\activate` on Windows
|
| 63 |
-
pip install -r requirements.txt
|
| 64 |
-
uvicorn app.main:app --reload
|
| 65 |
-
```
|
| 66 |
-
|
| 67 |
-
### Environment Variables
|
| 68 |
-
```bash
|
| 69 |
-
cp backend/.env.example backend/.env
|
| 70 |
-
# Add your ANTHROPIC_API_KEY
|
| 71 |
-
```
|
| 72 |
-
|
| 73 |
-
## Project Structure
|
| 74 |
-
|
| 75 |
-
```
|
| 76 |
-
clinical-mind/
|
| 77 |
-
├── frontend/ # React + TypeScript UI
|
| 78 |
-
│ ├── src/
|
| 79 |
-
│ │ ├── components/
|
| 80 |
-
│ │ │ ├── ui/ # Design system (Button, Card, Badge, etc.)
|
| 81 |
-
│ │ │ ├── layout/ # Header, Footer, Layout
|
| 82 |
-
│ │ │ ├── case/ # Case-specific components
|
| 83 |
-
│ │ │ └── visualizations/ # D3.js, Recharts components
|
| 84 |
-
│ │ ├── pages/
|
| 85 |
-
│ │ │ ├── Landing.tsx # Home page
|
| 86 |
-
│ │ │ ├── CaseBrowser.tsx # Browse/filter cases
|
| 87 |
-
│ │ │ ├── CaseInterface.tsx # Main case-solving experience
|
| 88 |
-
│ │ │ ├── Dashboard.tsx # Performance analytics
|
| 89 |
-
│ │ │ └── KnowledgeGraph.tsx # D3.js knowledge map
|
| 90 |
-
│ │ ├── types/ # TypeScript interfaces
|
| 91 |
-
│ │ └── hooks/ # Custom React hooks
|
| 92 |
-
│ └── public/
|
| 93 |
-
├── backend/ # FastAPI + Python
|
| 94 |
-
│ ├── app/
|
| 95 |
-
│ │ ├── api/ # REST endpoints
|
| 96 |
-
│ │ ├── core/
|
| 97 |
-
│ │ │ ├── rag/ # RAG case generation
|
| 98 |
-
│ │ │ ├── agents/ # Socratic tutor AI
|
| 99 |
-
│ │ │ └── analytics/ # Bias detection, knowledge graph
|
| 100 |
-
│ │ └── models/ # Data models
|
| 101 |
-
│ └── data/
|
| 102 |
-
│ └── medical_corpus/ # Medical literature for RAG
|
| 103 |
-
└── docs/ # Documentation
|
| 104 |
-
```
|
| 105 |
-
|
| 106 |
-
## Key Features
|
| 107 |
-
|
| 108 |
-
### Interactive Case Interface
|
| 109 |
-
- Progressive information reveal (history → exam → labs)
|
| 110 |
-
- Real-time AI tutor sidebar with Socratic questioning
|
| 111 |
-
- Diagnosis submission with detailed feedback
|
| 112 |
-
|
| 113 |
-
### Cognitive Bias Detection
|
| 114 |
-
- Tracks anchoring, premature closure, availability, and confirmation biases
|
| 115 |
-
- Statistical analysis across case history
|
| 116 |
-
- Personalized recommendations to counter biases
|
| 117 |
-
|
| 118 |
-
### Knowledge Graph
|
| 119 |
-
- Interactive D3.js force-directed graph
|
| 120 |
-
- Color-coded by category (specialty, diagnosis, symptom, investigation)
|
| 121 |
-
- Shows strong vs weak concept connections
|
| 122 |
-
- Click nodes to see mastery details
|
| 123 |
-
|
| 124 |
-
### Performance Dashboard
|
| 125 |
-
- Accuracy trends over time (area charts)
|
| 126 |
-
- Specialty-wise performance breakdown
|
| 127 |
-
- Bias radar chart
|
| 128 |
-
- Personalized case recommendations
|
| 129 |
-
|
| 130 |
-
## Design Philosophy
|
| 131 |
-
|
| 132 |
-
Inspired by Honest Greens + Linear:
|
| 133 |
-
- **Warm, organic palette** (cream backgrounds, forest greens, terracotta accents)
|
| 134 |
-
- **Larger typography** (18px body minimum for long study sessions)
|
| 135 |
-
- **Generous spacing** and smooth transitions (400ms ease-out)
|
| 136 |
-
- **Premium but approachable** - professional without being intimidating
|
| 137 |
-
|
| 138 |
-
## Hackathon
|
| 139 |
-
|
| 140 |
-
Built for **Problem Statement #3: "Amplify Human Judgment"**
|
| 141 |
-
- AI sharpens medical expertise without replacing it
|
| 142 |
-
- Makes students dramatically more capable
|
| 143 |
-
- Keeps humans in the loop
|
| 144 |
-
|
| 145 |
-
## License
|
| 146 |
-
|
| 147 |
-
MIT
|
|
|
|
| 1 |
---
|
| 2 |
title: Clinical Mind
|
| 3 |
+
emoji: 🐢
|
| 4 |
colorFrom: green
|
| 5 |
+
colorTo: purple
|
| 6 |
sdk: docker
|
| 7 |
pinned: false
|
|
|
|
|
|
|
| 8 |
---
|
| 9 |
|
| 10 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README_HF.md
DELETED
|
@@ -1,42 +0,0 @@
|
|
| 1 |
-
---
|
| 2 |
-
title: Clinical Mind
|
| 3 |
-
emoji: 🏥
|
| 4 |
-
colorFrom: green
|
| 5 |
-
colorTo: blue
|
| 6 |
-
sdk: docker
|
| 7 |
-
pinned: false
|
| 8 |
-
license: mit
|
| 9 |
-
---
|
| 10 |
-
|
| 11 |
-
# Clinical Mind 🏥
|
| 12 |
-
|
| 13 |
-
AI-powered clinical reasoning simulator for medical education
|
| 14 |
-
|
| 15 |
-
## Features
|
| 16 |
-
|
| 17 |
-
- **5 AI Agents** working together:
|
| 18 |
-
- Patient (Hindi/English responses)
|
| 19 |
-
- Family Member
|
| 20 |
-
- Nurse Priya
|
| 21 |
-
- Lab Tech Ramesh
|
| 22 |
-
- Dr. Sharma (Senior Doctor)
|
| 23 |
-
|
| 24 |
-
- **Educational Focus**: Learn clinical reasoning through realistic simulations
|
| 25 |
-
- **Indian Medical Context**: Authentic Indian hospital environment
|
| 26 |
-
- **Real-time Vitals**: Live patient monitoring
|
| 27 |
-
- **Complete Workflow**: History → Examination → Investigation → Diagnosis
|
| 28 |
-
|
| 29 |
-
## Demo Mode
|
| 30 |
-
|
| 31 |
-
Visit `/demo` to try pre-configured cases with faster loading times!
|
| 32 |
-
|
| 33 |
-
## Tech Stack
|
| 34 |
-
|
| 35 |
-
- **Frontend**: React, TypeScript, TailwindCSS
|
| 36 |
-
- **Backend**: FastAPI, Python
|
| 37 |
-
- **AI**: Anthropic Claude API
|
| 38 |
-
- **Vector DB**: ChromaDB for RAG
|
| 39 |
-
|
| 40 |
-
## GitHub
|
| 41 |
-
|
| 42 |
-
[View Source Code](https://github.com/arjitmat/clinical-mind)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
DELETED
|
@@ -1,104 +0,0 @@
|
|
| 1 |
-
#!/usr/bin/env python3
|
| 2 |
-
"""
|
| 3 |
-
Clinical Mind - Hugging Face Spaces Deployment
|
| 4 |
-
Combined FastAPI backend + React frontend server
|
| 5 |
-
"""
|
| 6 |
-
|
| 7 |
-
import os
|
| 8 |
-
import sys
|
| 9 |
-
from pathlib import Path
|
| 10 |
-
|
| 11 |
-
# Add backend to path
|
| 12 |
-
sys.path.insert(0, str(Path(__file__).parent / "backend"))
|
| 13 |
-
|
| 14 |
-
# Set environment variables for Hugging Face Spaces
|
| 15 |
-
os.environ["HF_SPACE"] = "1"
|
| 16 |
-
|
| 17 |
-
# Disable ChromaDB telemetry to avoid posthog capture() errors
|
| 18 |
-
os.environ["ANONYMIZED_TELEMETRY"] = "False"
|
| 19 |
-
|
| 20 |
-
from fastapi import FastAPI
|
| 21 |
-
from fastapi.staticfiles import StaticFiles
|
| 22 |
-
from fastapi.responses import FileResponse, HTMLResponse
|
| 23 |
-
from fastapi.middleware.cors import CORSMiddleware
|
| 24 |
-
import uvicorn
|
| 25 |
-
|
| 26 |
-
# Import the main FastAPI app from backend
|
| 27 |
-
from app.main import app
|
| 28 |
-
|
| 29 |
-
# Update CORS for Hugging Face Spaces
|
| 30 |
-
app.add_middleware(
|
| 31 |
-
CORSMiddleware,
|
| 32 |
-
allow_origins=[
|
| 33 |
-
"http://localhost:3000",
|
| 34 |
-
"http://localhost:5173",
|
| 35 |
-
"http://localhost:7860",
|
| 36 |
-
"https://huggingface.co",
|
| 37 |
-
"https://arjitmat-clinical-mind.hf.space",
|
| 38 |
-
"*" # Allow all origins in HF Spaces
|
| 39 |
-
],
|
| 40 |
-
allow_credentials=True,
|
| 41 |
-
allow_methods=["*"],
|
| 42 |
-
allow_headers=["*"],
|
| 43 |
-
)
|
| 44 |
-
|
| 45 |
-
# Check if frontend build exists
|
| 46 |
-
frontend_build_path = Path(__file__).parent / "frontend" / "build"
|
| 47 |
-
if frontend_build_path.exists():
|
| 48 |
-
# Remove the backend's default "/" route so the SPA can serve index.html instead
|
| 49 |
-
app.routes[:] = [
|
| 50 |
-
route for route in app.routes
|
| 51 |
-
if not (hasattr(route, "path") and route.path == "/")
|
| 52 |
-
]
|
| 53 |
-
|
| 54 |
-
# Mount static files
|
| 55 |
-
app.mount("/static", StaticFiles(directory=str(frontend_build_path / "static")), name="static")
|
| 56 |
-
|
| 57 |
-
# Serve the React app for all non-API routes
|
| 58 |
-
@app.get("/", response_class=HTMLResponse)
|
| 59 |
-
@app.get("/demo", response_class=HTMLResponse)
|
| 60 |
-
@app.get("/cases", response_class=HTMLResponse)
|
| 61 |
-
@app.get("/case/{path:path}", response_class=HTMLResponse)
|
| 62 |
-
@app.get("/dashboard", response_class=HTMLResponse)
|
| 63 |
-
@app.get("/profile", response_class=HTMLResponse)
|
| 64 |
-
@app.get("/knowledge-graph", response_class=HTMLResponse)
|
| 65 |
-
@app.get("/adversarial", response_class=HTMLResponse)
|
| 66 |
-
@app.get("/reasoning", response_class=HTMLResponse)
|
| 67 |
-
async def serve_spa(path: str = None):
|
| 68 |
-
"""Serve the React single-page application"""
|
| 69 |
-
index_file = frontend_build_path / "index.html"
|
| 70 |
-
if index_file.exists():
|
| 71 |
-
with open(index_file) as f:
|
| 72 |
-
content = f.read()
|
| 73 |
-
# Update API URL for Hugging Face Spaces
|
| 74 |
-
if "HF_SPACE" in os.environ:
|
| 75 |
-
content = content.replace(
|
| 76 |
-
"http://localhost:8000/api",
|
| 77 |
-
"/api"
|
| 78 |
-
)
|
| 79 |
-
return HTMLResponse(content=content)
|
| 80 |
-
return HTMLResponse(content="<h1>Frontend not found. Building...</h1>")
|
| 81 |
-
else:
|
| 82 |
-
@app.get("/", response_class=HTMLResponse)
|
| 83 |
-
async def no_frontend():
|
| 84 |
-
return HTMLResponse(content="""
|
| 85 |
-
<html>
|
| 86 |
-
<body style="font-family: sans-serif; text-align: center; padding: 50px;">
|
| 87 |
-
<h1>🏥 Clinical Mind</h1>
|
| 88 |
-
<p>Frontend is building... Please wait a moment and refresh.</p>
|
| 89 |
-
<p>Visit <a href="/docs">/docs</a> for API documentation.</p>
|
| 90 |
-
</body>
|
| 91 |
-
</html>
|
| 92 |
-
""")
|
| 93 |
-
|
| 94 |
-
if __name__ == "__main__":
|
| 95 |
-
# Get port from environment or use Hugging Face default
|
| 96 |
-
port = int(os.environ.get("PORT", 7860))
|
| 97 |
-
|
| 98 |
-
# Run the server
|
| 99 |
-
uvicorn.run(
|
| 100 |
-
app,
|
| 101 |
-
host="0.0.0.0",
|
| 102 |
-
port=port,
|
| 103 |
-
log_level="info"
|
| 104 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/.env.example
DELETED
|
@@ -1,18 +0,0 @@
|
|
| 1 |
-
# Clinical Mind Environment Variables
|
| 2 |
-
# Copy this file to .env and fill in your actual values
|
| 3 |
-
|
| 4 |
-
# Claude API Configuration
|
| 5 |
-
ANTHROPIC_API_KEY=your_anthropic_api_key_here
|
| 6 |
-
|
| 7 |
-
# ChromaDB Configuration
|
| 8 |
-
CHROMA_PERSIST_DIRECTORY=./data/vector_db
|
| 9 |
-
|
| 10 |
-
# Case Storage
|
| 11 |
-
CASE_STORAGE_DIR=./data/active_cases
|
| 12 |
-
|
| 13 |
-
# Optional: Development Settings
|
| 14 |
-
DEBUG=True
|
| 15 |
-
RELOAD=True
|
| 16 |
-
|
| 17 |
-
# Optional: CORS Settings (for production)
|
| 18 |
-
# BACKEND_CORS_ORIGINS=["https://your-domain.com"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/.watchmanconfig
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"ignore_dirs": ["venv", "__pycache__", ".git", "data/vector_db"]
|
| 3 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/__init__.py
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
# Clinical-Mind Backend
|
|
|
|
|
|
backend/app/api/__init__.py
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
# API routes
|
|
|
|
|
|
backend/app/api/adversarial.py
DELETED
|
@@ -1,181 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Adversarial Case Generator Endpoint
|
| 3 |
-
Analyzes student biases and creates cases designed to exploit them
|
| 4 |
-
"""
|
| 5 |
-
from fastapi import APIRouter, HTTPException
|
| 6 |
-
from pydantic import BaseModel
|
| 7 |
-
from typing import List, Dict, Any, Optional
|
| 8 |
-
import logging
|
| 9 |
-
import os
|
| 10 |
-
import anthropic
|
| 11 |
-
|
| 12 |
-
logger = logging.getLogger(__name__)
|
| 13 |
-
|
| 14 |
-
router = APIRouter()
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
class PastCasePerformance(BaseModel):
|
| 18 |
-
"""Student's performance on a past case"""
|
| 19 |
-
case_id: str
|
| 20 |
-
specialty: str
|
| 21 |
-
actual_diagnosis: str
|
| 22 |
-
student_diagnosis: str
|
| 23 |
-
was_correct: bool
|
| 24 |
-
missed_clues: List[str]
|
| 25 |
-
anchoring_evidence: Optional[str] = None
|
| 26 |
-
bias_detected: Optional[str] = None
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
class AdversarialCaseRequest(BaseModel):
|
| 30 |
-
"""Request to generate an adversarial case"""
|
| 31 |
-
student_id: str
|
| 32 |
-
past_cases: List[PastCasePerformance]
|
| 33 |
-
profile: Dict[str, Any]
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
class AdversarialCaseResponse(BaseModel):
|
| 37 |
-
"""Response with adversarial case designed to exploit biases"""
|
| 38 |
-
case_id: str
|
| 39 |
-
predicted_bias: str
|
| 40 |
-
bias_explanation: str
|
| 41 |
-
trap_description: str
|
| 42 |
-
|
| 43 |
-
# Case details
|
| 44 |
-
patient_name: str
|
| 45 |
-
age: int
|
| 46 |
-
gender: str
|
| 47 |
-
chief_complaint: str
|
| 48 |
-
setting: str
|
| 49 |
-
|
| 50 |
-
# The trap
|
| 51 |
-
obvious_diagnosis: str # What we predict student will anchor on
|
| 52 |
-
actual_diagnosis: str # The real diagnosis
|
| 53 |
-
critical_differentiator: str # The key question that reveals the truth
|
| 54 |
-
|
| 55 |
-
# Educational value
|
| 56 |
-
learning_objective: str
|
| 57 |
-
why_challenging: str
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
@router.post("/generate", response_model=AdversarialCaseResponse)
|
| 61 |
-
async def generate_adversarial_case(request: AdversarialCaseRequest):
|
| 62 |
-
"""
|
| 63 |
-
Generate an adversarial case designed to exploit student's cognitive biases.
|
| 64 |
-
|
| 65 |
-
Process:
|
| 66 |
-
1. Analyze past cases to identify bias patterns
|
| 67 |
-
2. Generate a case that exploits the most prominent bias
|
| 68 |
-
3. Create a "trap" that catches the student
|
| 69 |
-
4. Provide educational reveal
|
| 70 |
-
"""
|
| 71 |
-
|
| 72 |
-
logger.info(f"Generating adversarial case for student {request.student_id}")
|
| 73 |
-
|
| 74 |
-
# Build the analysis prompt
|
| 75 |
-
analysis_prompt = f"""You are an expert medical educator creating an adversarial case to challenge a medical student's cognitive biases.
|
| 76 |
-
|
| 77 |
-
**Student's Past Performance:**
|
| 78 |
-
{_format_past_cases(request.past_cases)}
|
| 79 |
-
|
| 80 |
-
**Student Profile:**
|
| 81 |
-
- Year Level: {request.profile.get('yearLevel', 'unknown')}
|
| 82 |
-
- Comfortable Specialties: {', '.join(request.profile.get('comfortableSpecialties', []))}
|
| 83 |
-
- Setting: {request.profile.get('setting', 'unknown')}
|
| 84 |
-
|
| 85 |
-
**Your Task:**
|
| 86 |
-
Analyze the student's pattern of errors to identify their most prominent cognitive bias. Then design a case that will likely trap them in that bias.
|
| 87 |
-
|
| 88 |
-
Common biases to detect:
|
| 89 |
-
1. **Anchoring Bias**: Fixating on initial impression despite contradictory evidence
|
| 90 |
-
2. **Availability Bias**: Overweighting recent or memorable cases
|
| 91 |
-
3. **Premature Closure**: Accepting first diagnosis without full differential
|
| 92 |
-
4. **Confirmation Bias**: Seeking only evidence that supports initial hypothesis
|
| 93 |
-
5. **Frequency Bias**: Assuming common diseases are always the diagnosis
|
| 94 |
-
|
| 95 |
-
Your case should:
|
| 96 |
-
- Have obvious "distractor" diagnosis that matches the student's bias
|
| 97 |
-
- Have subtle clues pointing to the actual diagnosis
|
| 98 |
-
- Require the student to actively challenge their initial impression
|
| 99 |
-
- Be realistic and India-specific
|
| 100 |
-
|
| 101 |
-
Return your analysis in this JSON format:
|
| 102 |
-
{{
|
| 103 |
-
"predicted_bias": "anchoring_bias",
|
| 104 |
-
"bias_explanation": "Student consistently fixates on initial presentations matching their comfortable specialties",
|
| 105 |
-
"trap_description": "Case presents with classic cardiology symptoms, but actual cause is a rare endocrine disorder",
|
| 106 |
-
|
| 107 |
-
"patient_name": "Ramesh Kumar",
|
| 108 |
-
"age": 45,
|
| 109 |
-
"gender": "male",
|
| 110 |
-
"chief_complaint": "Palpitations and chest discomfort for 2 weeks",
|
| 111 |
-
"setting": "Urban Medical College OPD",
|
| 112 |
-
|
| 113 |
-
"obvious_diagnosis": "Atrial fibrillation (cardiology - student's comfort zone)",
|
| 114 |
-
"actual_diagnosis": "Thyrotoxicosis presenting with cardiac symptoms",
|
| 115 |
-
"critical_differentiator": "Asking about weight loss, heat intolerance, and tremors",
|
| 116 |
-
|
| 117 |
-
"learning_objective": "Recognize when cardiac symptoms may be secondary to systemic disease",
|
| 118 |
-
"why_challenging": "Presents with student's comfortable specialty but requires broader differential thinking"
|
| 119 |
-
}}
|
| 120 |
-
|
| 121 |
-
Generate a realistic, challenging case that will help the student recognize and overcome their bias patterns.
|
| 122 |
-
"""
|
| 123 |
-
|
| 124 |
-
try:
|
| 125 |
-
# Create Claude Opus client
|
| 126 |
-
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
| 127 |
-
if not api_key or api_key == "sk-ant-your-key-here":
|
| 128 |
-
raise HTTPException(status_code=500, detail="Anthropic API key not configured")
|
| 129 |
-
|
| 130 |
-
client = anthropic.Anthropic(api_key=api_key)
|
| 131 |
-
response = client.messages.create(
|
| 132 |
-
model="claude-opus-4-6",
|
| 133 |
-
max_tokens=4000,
|
| 134 |
-
messages=[{
|
| 135 |
-
"role": "user",
|
| 136 |
-
"content": analysis_prompt
|
| 137 |
-
}]
|
| 138 |
-
)
|
| 139 |
-
|
| 140 |
-
# Extract the response
|
| 141 |
-
case_text = ""
|
| 142 |
-
for block in response.content:
|
| 143 |
-
if block.type == "text":
|
| 144 |
-
case_text += block.text
|
| 145 |
-
|
| 146 |
-
logger.info("Adversarial case generated successfully")
|
| 147 |
-
|
| 148 |
-
# Parse the JSON response
|
| 149 |
-
import json
|
| 150 |
-
import uuid
|
| 151 |
-
case_data = json.loads(case_text)
|
| 152 |
-
|
| 153 |
-
return AdversarialCaseResponse(
|
| 154 |
-
case_id=str(uuid.uuid4()),
|
| 155 |
-
**case_data
|
| 156 |
-
)
|
| 157 |
-
|
| 158 |
-
except Exception as e:
|
| 159 |
-
logger.error(f"Adversarial case generation failed: {str(e)}", exc_info=True)
|
| 160 |
-
raise HTTPException(status_code=500, detail=f"Case generation failed: {str(e)}")
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
def _format_past_cases(cases: List[PastCasePerformance]) -> str:
|
| 164 |
-
"""Format past cases for the prompt"""
|
| 165 |
-
formatted = []
|
| 166 |
-
for i, case in enumerate(cases, 1):
|
| 167 |
-
status = "✓ Correct" if case.was_correct else "✗ Incorrect"
|
| 168 |
-
formatted.append(f"""
|
| 169 |
-
Case {i}: {case.specialty}
|
| 170 |
-
- Actual: {case.actual_diagnosis}
|
| 171 |
-
- Student: {case.student_diagnosis} ({status})
|
| 172 |
-
- Missed clues: {', '.join(case.missed_clues) if case.missed_clues else 'None'}
|
| 173 |
-
- Bias detected: {case.bias_detected or 'None'}
|
| 174 |
-
""")
|
| 175 |
-
return "\n".join(formatted)
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
@router.get("/health")
|
| 179 |
-
async def adversarial_health():
|
| 180 |
-
"""Health check for adversarial endpoint"""
|
| 181 |
-
return {"status": "healthy", "feature": "adversarial"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/api/agents.py
DELETED
|
@@ -1,140 +0,0 @@
|
|
| 1 |
-
"""API routes for the multi-agent hospital simulation."""
|
| 2 |
-
|
| 3 |
-
import logging
|
| 4 |
-
from fastapi import APIRouter, HTTPException
|
| 5 |
-
from pydantic import BaseModel
|
| 6 |
-
from typing import Optional
|
| 7 |
-
|
| 8 |
-
from app.core.agents.orchestrator import orchestrator
|
| 9 |
-
from app.core.rag.shared import case_generator
|
| 10 |
-
|
| 11 |
-
logger = logging.getLogger(__name__)
|
| 12 |
-
|
| 13 |
-
router = APIRouter()
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
# --- Request Models ---
|
| 17 |
-
|
| 18 |
-
class InitializeRequest(BaseModel):
|
| 19 |
-
case_id: str
|
| 20 |
-
student_level: str = "intern"
|
| 21 |
-
hospital_setting: str = "medical_college"
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
class AgentActionRequest(BaseModel):
|
| 25 |
-
session_id: str
|
| 26 |
-
action_type: str
|
| 27 |
-
student_input: Optional[str] = None
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
class TreatmentRequest(BaseModel):
|
| 31 |
-
session_id: str
|
| 32 |
-
treatment: str
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
class AdvanceTimeRequest(BaseModel):
|
| 36 |
-
session_id: str
|
| 37 |
-
minutes: int = 30
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
# --- Endpoints ---
|
| 41 |
-
|
| 42 |
-
@router.post("/initialize")
|
| 43 |
-
async def initialize_agents(request: InitializeRequest):
|
| 44 |
-
"""Initialize multi-agent simulation session for a case.
|
| 45 |
-
|
| 46 |
-
Accepts student_level to calibrate teaching intensity:
|
| 47 |
-
- mbbs_2nd: Pre-clinical, more guidance
|
| 48 |
-
- mbbs_3rd: Clinical posting, standard
|
| 49 |
-
- intern: Hands-on, less hand-holding
|
| 50 |
-
- pg_aspirant: NEET-PG focus, exam patterns
|
| 51 |
-
- pg_resident: Advanced, minimal guidance
|
| 52 |
-
|
| 53 |
-
Returns initial messages from patient, nurse, and senior doctor,
|
| 54 |
-
plus simulation state (vitals, timeline, investigations).
|
| 55 |
-
"""
|
| 56 |
-
case = case_generator.get_case(request.case_id)
|
| 57 |
-
if not case:
|
| 58 |
-
raise HTTPException(status_code=404, detail="Case not found")
|
| 59 |
-
|
| 60 |
-
result = orchestrator.initialize_session(
|
| 61 |
-
case_data=case,
|
| 62 |
-
student_level=request.student_level,
|
| 63 |
-
hospital_setting=request.hospital_setting,
|
| 64 |
-
)
|
| 65 |
-
return result
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
@router.post("/action")
|
| 69 |
-
async def agent_action(request: AgentActionRequest):
|
| 70 |
-
"""Process a student action through the simulation pipeline.
|
| 71 |
-
|
| 72 |
-
action_type options:
|
| 73 |
-
- talk_to_patient: Talk to the patient (family may interject)
|
| 74 |
-
- ask_nurse: Ask Nurse Priya
|
| 75 |
-
- consult_senior: Consult Dr. Sharma (Socratic teaching)
|
| 76 |
-
- talk_to_family: Talk to patient's family member (cultural context, Hinglish)
|
| 77 |
-
- ask_lab: Talk to Lab Tech Ramesh (investigation status, sample info)
|
| 78 |
-
- examine_patient: Perform physical examination (triggers exam modal)
|
| 79 |
-
- order_investigation: Order tests (Lab Tech processes, realistic delays)
|
| 80 |
-
- order_treatment: Order treatment (safety validated, effects modeled)
|
| 81 |
-
- team_huddle: All 5 agents discuss the case together
|
| 82 |
-
|
| 83 |
-
Each action advances the simulation clock and checks the complication engine.
|
| 84 |
-
Returns agent responses, updated vitals, timeline events, investigation status,
|
| 85 |
-
and any triggered complications.
|
| 86 |
-
"""
|
| 87 |
-
try:
|
| 88 |
-
result = orchestrator.process_action(
|
| 89 |
-
session_id=request.session_id,
|
| 90 |
-
action_type=request.action_type,
|
| 91 |
-
student_input=request.student_input,
|
| 92 |
-
)
|
| 93 |
-
except Exception as e:
|
| 94 |
-
logger.error(f"process_action failed for session={request.session_id}, action={request.action_type}: {e}", exc_info=True)
|
| 95 |
-
raise HTTPException(status_code=500, detail=f"Internal error processing action: {e}")
|
| 96 |
-
if "error" in result:
|
| 97 |
-
raise HTTPException(status_code=404, detail=result["error"])
|
| 98 |
-
return result
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
@router.post("/advance-time")
|
| 102 |
-
async def advance_time(request: AdvanceTimeRequest):
|
| 103 |
-
"""Advance simulation time (e.g., waiting for investigation results).
|
| 104 |
-
|
| 105 |
-
Evolves vitals, checks for ready investigations, triggers events.
|
| 106 |
-
"""
|
| 107 |
-
result = orchestrator.advance_time(
|
| 108 |
-
session_id=request.session_id,
|
| 109 |
-
minutes=request.minutes,
|
| 110 |
-
)
|
| 111 |
-
if "error" in result:
|
| 112 |
-
raise HTTPException(status_code=404, detail=result["error"])
|
| 113 |
-
return result
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
@router.get("/vitals/{session_id}")
|
| 117 |
-
async def get_vitals(session_id: str):
|
| 118 |
-
"""Get current vital signs with trends and trajectory."""
|
| 119 |
-
vitals = orchestrator.get_session_vitals(session_id)
|
| 120 |
-
if not vitals:
|
| 121 |
-
raise HTTPException(status_code=404, detail="Session not found")
|
| 122 |
-
return vitals
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
@router.get("/investigations/{session_id}")
|
| 126 |
-
async def get_investigations(session_id: str):
|
| 127 |
-
"""Get status of all ordered investigations."""
|
| 128 |
-
investigations = orchestrator.get_investigation_status(session_id)
|
| 129 |
-
if investigations is None:
|
| 130 |
-
raise HTTPException(status_code=404, detail="Session not found")
|
| 131 |
-
return {"investigations": investigations}
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
@router.get("/timeline/{session_id}")
|
| 135 |
-
async def get_timeline(session_id: str):
|
| 136 |
-
"""Get complete simulation timeline."""
|
| 137 |
-
timeline = orchestrator.get_timeline(session_id)
|
| 138 |
-
if timeline is None:
|
| 139 |
-
raise HTTPException(status_code=404, detail="Session not found")
|
| 140 |
-
return {"timeline": timeline}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/api/analytics.py
DELETED
|
@@ -1,30 +0,0 @@
|
|
| 1 |
-
from fastapi import APIRouter
|
| 2 |
-
from app.core.session import session
|
| 3 |
-
|
| 4 |
-
router = APIRouter()
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
@router.get("/performance")
|
| 8 |
-
async def get_performance():
|
| 9 |
-
return session.get_performance_data()
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
@router.get("/peer-comparison")
|
| 13 |
-
async def get_peer_comparison():
|
| 14 |
-
profile = session.get_student_profile()
|
| 15 |
-
scores = profile["specialty_scores"]
|
| 16 |
-
return {
|
| 17 |
-
"student_accuracy": profile["accuracy"],
|
| 18 |
-
"peer_average": 62,
|
| 19 |
-
"top_10_average": 88,
|
| 20 |
-
"ranking": f"Top {profile['percentile']}%",
|
| 21 |
-
"specialty_comparison": {
|
| 22 |
-
spec: {"student": score, "average": max(50, score - 10)}
|
| 23 |
-
for spec, score in scores.items()
|
| 24 |
-
},
|
| 25 |
-
}
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
@router.get("/recommendations")
|
| 29 |
-
async def get_recommendations():
|
| 30 |
-
return session.get_recommendations()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/api/bias_detection.py
DELETED
|
@@ -1,155 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Bias Detection Endpoint
|
| 3 |
-
Real-time detection of cognitive biases during simulation
|
| 4 |
-
"""
|
| 5 |
-
from fastapi import APIRouter, HTTPException
|
| 6 |
-
from pydantic import BaseModel
|
| 7 |
-
from typing import List, Dict, Any, Optional
|
| 8 |
-
import logging
|
| 9 |
-
import os
|
| 10 |
-
import anthropic
|
| 11 |
-
|
| 12 |
-
logger = logging.getLogger(__name__)
|
| 13 |
-
|
| 14 |
-
router = APIRouter()
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
class ConversationMessage(BaseModel):
|
| 18 |
-
"""A message in the conversation"""
|
| 19 |
-
role: str # student or patient
|
| 20 |
-
content: str
|
| 21 |
-
timestamp: str
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
class BiasDetectionRequest(BaseModel):
|
| 25 |
-
"""Request for bias detection"""
|
| 26 |
-
case_id: str
|
| 27 |
-
conversation_history: List[ConversationMessage]
|
| 28 |
-
patient_profile: Dict[str, Any]
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
class BiasDetectionResponse(BaseModel):
|
| 32 |
-
"""Response with bias detection result"""
|
| 33 |
-
bias_detected: bool
|
| 34 |
-
bias_type: Optional[str] = None # anchoring, premature_closure, confirmation_bias
|
| 35 |
-
confidence: Optional[float] = None # 0.0 to 1.0
|
| 36 |
-
explanation: Optional[str] = None
|
| 37 |
-
intervention_message: Optional[str] = None
|
| 38 |
-
reflection_questions: Optional[List[str]] = None
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
@router.post("/detect", response_model=BiasDetectionResponse)
|
| 42 |
-
async def detect_bias(request: BiasDetectionRequest):
|
| 43 |
-
"""
|
| 44 |
-
Detect cognitive biases in real-time during simulation.
|
| 45 |
-
|
| 46 |
-
Analyzes conversation to detect:
|
| 47 |
-
- Anchoring bias: Fixating on initial diagnosis
|
| 48 |
-
- Premature closure: Accepting first diagnosis without full differential
|
| 49 |
-
- Confirmation bias: Only seeking evidence for initial hypothesis
|
| 50 |
-
"""
|
| 51 |
-
|
| 52 |
-
logger.info(f"Analyzing bias for case {request.case_id}")
|
| 53 |
-
|
| 54 |
-
# Build the detection prompt
|
| 55 |
-
detection_prompt = f"""You are an expert medical educator monitoring a student's diagnostic reasoning in real-time.
|
| 56 |
-
|
| 57 |
-
**Case Information:**
|
| 58 |
-
Patient: {request.patient_profile.get('name', 'Unknown')}
|
| 59 |
-
Chief Complaint: {request.patient_profile.get('chief_complaint', 'Unknown')}
|
| 60 |
-
Actual Diagnosis: {request.patient_profile.get('actual_diagnosis', 'Unknown')}
|
| 61 |
-
|
| 62 |
-
**Conversation So Far:**
|
| 63 |
-
{_format_conversation(request.conversation_history)}
|
| 64 |
-
|
| 65 |
-
**Your Task:**
|
| 66 |
-
Analyze the student's questioning pattern and reasoning to detect cognitive biases. Look for:
|
| 67 |
-
|
| 68 |
-
1. **Anchoring Bias**: Has the student fixated on one diagnosis without exploring alternatives?
|
| 69 |
-
- Are they only asking questions that support one specific diagnosis?
|
| 70 |
-
- Have they ignored red flags or contradictory evidence?
|
| 71 |
-
|
| 72 |
-
2. **Premature Closure**: Has the student jumped to a conclusion too quickly?
|
| 73 |
-
- Have they asked < 5 substantive questions before diagnosing?
|
| 74 |
-
- Did they skip important screening questions?
|
| 75 |
-
- Are they missing critical differentials?
|
| 76 |
-
|
| 77 |
-
3. **Confirmation Bias**: Is the student only seeking evidence for their initial hypothesis?
|
| 78 |
-
- Are all questions directed toward confirming one diagnosis?
|
| 79 |
-
- Are they ignoring or downplaying contradictory findings?
|
| 80 |
-
|
| 81 |
-
**Detection Criteria:**
|
| 82 |
-
- Only flag if bias is CLEAR and SIGNIFICANT
|
| 83 |
-
- Early in interview (<3 questions): Don't flag yet, student is still gathering data
|
| 84 |
-
- Mid-interview (3-6 questions): Flag if clear anchoring or confirmation bias
|
| 85 |
-
- Late interview (>6 questions): Flag if premature closure without full differential
|
| 86 |
-
|
| 87 |
-
Return your analysis in this JSON format:
|
| 88 |
-
{{
|
| 89 |
-
"bias_detected": true,
|
| 90 |
-
"bias_type": "anchoring",
|
| 91 |
-
"confidence": 0.85,
|
| 92 |
-
"explanation": "Student has asked 5 consecutive questions about cardiac symptoms without considering systemic causes despite patient mentioning weight loss",
|
| 93 |
-
"intervention_message": "Take a step back. You've been focusing heavily on cardiac causes. What other systems could cause these symptoms?",
|
| 94 |
-
"reflection_questions": [
|
| 95 |
-
"What alternative diagnoses have you considered?",
|
| 96 |
-
"Are there any findings that don't fit your current hypothesis?",
|
| 97 |
-
"What key questions haven't you asked yet?"
|
| 98 |
-
]
|
| 99 |
-
}}
|
| 100 |
-
|
| 101 |
-
If NO significant bias detected, return:
|
| 102 |
-
{{
|
| 103 |
-
"bias_detected": false
|
| 104 |
-
}}
|
| 105 |
-
"""
|
| 106 |
-
|
| 107 |
-
try:
|
| 108 |
-
# Create Claude client
|
| 109 |
-
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
| 110 |
-
if not api_key or api_key == "sk-ant-your-key-here":
|
| 111 |
-
raise HTTPException(status_code=500, detail="Anthropic API key not configured")
|
| 112 |
-
|
| 113 |
-
client = anthropic.Anthropic(api_key=api_key)
|
| 114 |
-
response = client.messages.create(
|
| 115 |
-
model="claude-opus-4-6",
|
| 116 |
-
max_tokens=1000,
|
| 117 |
-
messages=[{
|
| 118 |
-
"role": "user",
|
| 119 |
-
"content": detection_prompt
|
| 120 |
-
}]
|
| 121 |
-
)
|
| 122 |
-
|
| 123 |
-
# Extract the response
|
| 124 |
-
detection_text = ""
|
| 125 |
-
for block in response.content:
|
| 126 |
-
if block.type == "text":
|
| 127 |
-
detection_text += block.text
|
| 128 |
-
|
| 129 |
-
logger.info("Bias detection analysis completed")
|
| 130 |
-
|
| 131 |
-
# Parse the JSON response
|
| 132 |
-
import json
|
| 133 |
-
detection_data = json.loads(detection_text)
|
| 134 |
-
|
| 135 |
-
return BiasDetectionResponse(**detection_data)
|
| 136 |
-
|
| 137 |
-
except Exception as e:
|
| 138 |
-
logger.error(f"Bias detection failed: {str(e)}", exc_info=True)
|
| 139 |
-
# Return no bias detected on error to avoid blocking simulation
|
| 140 |
-
return BiasDetectionResponse(bias_detected=False)
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
def _format_conversation(messages: List[ConversationMessage]) -> str:
|
| 144 |
-
"""Format conversation for the prompt"""
|
| 145 |
-
formatted = []
|
| 146 |
-
for i, msg in enumerate(messages, 1):
|
| 147 |
-
role = "Student" if msg.role == "student" else "Patient"
|
| 148 |
-
formatted.append(f"{i}. [{msg.timestamp}] {role}: {msg.content}")
|
| 149 |
-
return "\n".join(formatted)
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
@router.get("/health")
|
| 153 |
-
async def bias_detection_health():
|
| 154 |
-
"""Health check for bias detection endpoint"""
|
| 155 |
-
return {"status": "healthy", "feature": "bias-detection"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/api/cases.py
DELETED
|
@@ -1,100 +0,0 @@
|
|
| 1 |
-
from fastapi import APIRouter, HTTPException
|
| 2 |
-
from pydantic import BaseModel
|
| 3 |
-
from typing import Optional
|
| 4 |
-
from app.core.rag.shared import case_generator
|
| 5 |
-
from app.core.session import session
|
| 6 |
-
|
| 7 |
-
router = APIRouter()
|
| 8 |
-
|
| 9 |
-
SPECIALTIES = [
|
| 10 |
-
{"id": "cardiology", "name": "Cardiology", "icon": "heart", "cases_available": 12, "description": "STEMI, heart failure, IE, AF, aortic dissection, rheumatic heart disease"},
|
| 11 |
-
{"id": "respiratory", "name": "Respiratory", "icon": "lungs", "cases_available": 10, "description": "Pneumonia, COPD, TB pleural effusion, asthma, ILD, lung cancer"},
|
| 12 |
-
{"id": "infectious", "name": "Infectious Disease", "icon": "virus", "cases_available": 10, "description": "Dengue, malaria, typhoid, scrub typhus, leptospirosis, HIV"},
|
| 13 |
-
{"id": "neurology", "name": "Neurology", "icon": "brain", "cases_available": 10, "description": "Stroke, meningitis, GBS, myasthenia gravis, epilepsy, Parkinson's"},
|
| 14 |
-
{"id": "gastro", "name": "Gastroenterology", "icon": "microscope", "cases_available": 10, "description": "Liver abscess, cirrhosis, pancreatitis, IBD, GI bleed, celiac, HCC"},
|
| 15 |
-
{"id": "emergency", "name": "Emergency Medicine", "icon": "alert", "cases_available": 10, "description": "DKA, snake bite, poisoning, burns, anaphylaxis, acute MI"},
|
| 16 |
-
{"id": "nephrology", "name": "Nephrology", "icon": "droplet", "cases_available": 10, "description": "CKD, nephrotic syndrome, AKI, RPGN, RTA, lupus nephritis"},
|
| 17 |
-
{"id": "endocrinology", "name": "Endocrinology", "icon": "activity", "cases_available": 10, "description": "Thyroid storm, Addison crisis, Cushing, pheochromocytoma, HHS"},
|
| 18 |
-
{"id": "pediatrics", "name": "Pediatrics", "icon": "baby", "cases_available": 10, "description": "Bronchiolitis, TOF, Kawasaki, malnutrition, thalassemia, ALL"},
|
| 19 |
-
{"id": "obstetrics", "name": "Obstetrics & Gynecology", "icon": "heart-pulse", "cases_available": 10, "description": "Eclampsia, ectopic, PPH, PCOS, placenta previa, cervical cancer"},
|
| 20 |
-
{"id": "hematology", "name": "Hematology", "icon": "test-tubes", "cases_available": 10, "description": "IDA, sickle cell, ITP, DIC, CML, hemophilia, TTP"},
|
| 21 |
-
{"id": "psychiatry", "name": "Psychiatry", "icon": "brain-cog", "cases_available": 10, "description": "Schizophrenia, depression, bipolar, delirium tremens, OCD, NMS"},
|
| 22 |
-
{"id": "dermatology", "name": "Dermatology", "icon": "scan", "cases_available": 10, "description": "SJS/TEN, leprosy, pemphigus, psoriasis, DRESS, vitiligo"},
|
| 23 |
-
{"id": "orthopedics", "name": "Orthopedics", "icon": "bone", "cases_available": 10, "description": "Fractures, septic arthritis, osteosarcoma, Pott spine, AVN, ACL"},
|
| 24 |
-
]
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
class CaseRequest(BaseModel):
|
| 28 |
-
specialty: str
|
| 29 |
-
difficulty: str = "intermediate"
|
| 30 |
-
year_level: str = "final_year"
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
class CaseActionRequest(BaseModel):
|
| 34 |
-
case_id: str
|
| 35 |
-
action_type: str
|
| 36 |
-
student_input: Optional[str] = None
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
class DiagnosisRequest(BaseModel):
|
| 40 |
-
case_id: str
|
| 41 |
-
diagnosis: str
|
| 42 |
-
reasoning: str = ""
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
@router.get("/specialties")
|
| 46 |
-
async def get_specialties():
|
| 47 |
-
return {"specialties": SPECIALTIES}
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
@router.post("/generate")
|
| 51 |
-
async def generate_case(request: CaseRequest):
|
| 52 |
-
try:
|
| 53 |
-
case = case_generator.generate_case(
|
| 54 |
-
specialty=request.specialty,
|
| 55 |
-
difficulty=request.difficulty,
|
| 56 |
-
year_level=request.year_level,
|
| 57 |
-
)
|
| 58 |
-
return case
|
| 59 |
-
except Exception as e:
|
| 60 |
-
raise HTTPException(status_code=500, detail=str(e))
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
@router.get("/corpus-stats")
|
| 64 |
-
async def get_corpus_stats():
|
| 65 |
-
"""Get RAG corpus statistics."""
|
| 66 |
-
return case_generator.get_corpus_stats()
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
@router.get("/{case_id}")
|
| 70 |
-
async def get_case(case_id: str):
|
| 71 |
-
case = case_generator.get_case(case_id)
|
| 72 |
-
if not case:
|
| 73 |
-
raise HTTPException(status_code=404, detail="Case not found")
|
| 74 |
-
return case
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
@router.post("/{case_id}/action")
|
| 78 |
-
async def case_action(case_id: str, request: CaseActionRequest):
|
| 79 |
-
result = case_generator.process_action(case_id, request.action_type, request.student_input)
|
| 80 |
-
return result
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
@router.post("/{case_id}/diagnose")
|
| 84 |
-
async def submit_diagnosis(case_id: str, request: DiagnosisRequest):
|
| 85 |
-
result = case_generator.evaluate_diagnosis(case_id, request.diagnosis, request.reasoning)
|
| 86 |
-
|
| 87 |
-
# Record result in session tracker for dynamic analytics
|
| 88 |
-
case = case_generator.get_case(case_id)
|
| 89 |
-
if case and "error" not in result:
|
| 90 |
-
session.record_case_result(
|
| 91 |
-
case_id=case_id,
|
| 92 |
-
specialty=case.get("specialty", ""),
|
| 93 |
-
difficulty=case.get("difficulty", ""),
|
| 94 |
-
diagnosis=request.diagnosis,
|
| 95 |
-
correct_diagnosis=result.get("correct_diagnosis", case.get("diagnosis", "")),
|
| 96 |
-
is_correct=result.get("is_correct", False),
|
| 97 |
-
accuracy_score=result.get("accuracy_score", 0),
|
| 98 |
-
)
|
| 99 |
-
|
| 100 |
-
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/api/profile.py
DELETED
|
@@ -1,112 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Profile-based case selection endpoint
|
| 3 |
-
"""
|
| 4 |
-
from fastapi import APIRouter, HTTPException
|
| 5 |
-
from pydantic import BaseModel
|
| 6 |
-
from typing import List, Optional
|
| 7 |
-
import random
|
| 8 |
-
import logging
|
| 9 |
-
|
| 10 |
-
logger = logging.getLogger(__name__)
|
| 11 |
-
|
| 12 |
-
router = APIRouter()
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
class StudentProfile(BaseModel):
|
| 16 |
-
yearLevel: str # final_year, internship, residency, practicing
|
| 17 |
-
comfortableSpecialties: List[str]
|
| 18 |
-
setting: str # urban, rural, community
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
class CaseSelectionRequest(BaseModel):
|
| 22 |
-
profile: StudentProfile
|
| 23 |
-
feature: str # simulation, reasoning-chain, adversarial, bias-interruption
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
class CaseSelectionResponse(BaseModel):
|
| 27 |
-
specialty: str
|
| 28 |
-
difficulty: str
|
| 29 |
-
setting: str
|
| 30 |
-
why_selected: str
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
@router.post("/select-case", response_model=CaseSelectionResponse)
|
| 34 |
-
async def select_case_for_profile(request: CaseSelectionRequest):
|
| 35 |
-
"""
|
| 36 |
-
Select appropriate case based on student profile.
|
| 37 |
-
|
| 38 |
-
Logic:
|
| 39 |
-
1. Map year level to difficulty
|
| 40 |
-
2. Select specialty (70% comfortable, 30% challenge)
|
| 41 |
-
3. Match setting preference
|
| 42 |
-
4. Return case parameters for simulation to use
|
| 43 |
-
"""
|
| 44 |
-
|
| 45 |
-
profile = request.profile
|
| 46 |
-
feature = request.feature
|
| 47 |
-
|
| 48 |
-
# 1. Determine difficulty based on year level
|
| 49 |
-
difficulty_map = {
|
| 50 |
-
"final_year": ["beginner", "intermediate"],
|
| 51 |
-
"internship": ["intermediate"],
|
| 52 |
-
"residency": ["intermediate", "advanced"],
|
| 53 |
-
"practicing": ["advanced"],
|
| 54 |
-
}
|
| 55 |
-
|
| 56 |
-
difficulties = difficulty_map.get(profile.yearLevel, ["intermediate"])
|
| 57 |
-
difficulty = random.choice(difficulties)
|
| 58 |
-
|
| 59 |
-
# 2. Select specialty
|
| 60 |
-
all_specialties = [
|
| 61 |
-
"cardiology", "respiratory", "infectious", "neurology",
|
| 62 |
-
"gastro", "emergency", "pediatrics", "obstetrics"
|
| 63 |
-
]
|
| 64 |
-
|
| 65 |
-
if profile.comfortableSpecialties and len(profile.comfortableSpecialties) > 0:
|
| 66 |
-
# 70% from comfortable areas, 30% challenge
|
| 67 |
-
if random.random() < 0.7:
|
| 68 |
-
specialty = random.choice(profile.comfortableSpecialties)
|
| 69 |
-
reason_specialty = f"your comfort area ({specialty})"
|
| 70 |
-
else:
|
| 71 |
-
# Challenge: pick from non-comfortable
|
| 72 |
-
challenge_specialties = [
|
| 73 |
-
s for s in all_specialties
|
| 74 |
-
if s not in profile.comfortableSpecialties
|
| 75 |
-
]
|
| 76 |
-
if challenge_specialties:
|
| 77 |
-
specialty = random.choice(challenge_specialties)
|
| 78 |
-
reason_specialty = f"a challenge area ({specialty})"
|
| 79 |
-
else:
|
| 80 |
-
specialty = random.choice(all_specialties)
|
| 81 |
-
reason_specialty = specialty
|
| 82 |
-
else:
|
| 83 |
-
specialty = random.choice(all_specialties)
|
| 84 |
-
reason_specialty = specialty
|
| 85 |
-
|
| 86 |
-
# 3. Setting
|
| 87 |
-
setting = profile.setting
|
| 88 |
-
|
| 89 |
-
# 4. Feature-specific adjustments
|
| 90 |
-
if feature == "adversarial":
|
| 91 |
-
# Always use challenge specialty for adversarial
|
| 92 |
-
challenge_specialties = [
|
| 93 |
-
s for s in all_specialties
|
| 94 |
-
if s not in (profile.comfortableSpecialties or [])
|
| 95 |
-
]
|
| 96 |
-
if challenge_specialties:
|
| 97 |
-
specialty = random.choice(challenge_specialties)
|
| 98 |
-
reason_specialty = f"designed to challenge you ({specialty})"
|
| 99 |
-
|
| 100 |
-
# Build explanation
|
| 101 |
-
why_selected = f"Selected {difficulty} difficulty case in {reason_specialty}, matching your {setting} setting preference."
|
| 102 |
-
|
| 103 |
-
logger.info(
|
| 104 |
-
f"Case selection: {specialty}/{difficulty}/{setting} for {profile.yearLevel} student (feature: {feature})"
|
| 105 |
-
)
|
| 106 |
-
|
| 107 |
-
return CaseSelectionResponse(
|
| 108 |
-
specialty=specialty,
|
| 109 |
-
difficulty=difficulty,
|
| 110 |
-
setting=setting,
|
| 111 |
-
why_selected=why_selected,
|
| 112 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/api/reasoning.py
DELETED
|
@@ -1,214 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Reasoning Chain Analysis Endpoint
|
| 3 |
-
Uses Claude Opus extended thinking to analyze student diagnostic approach
|
| 4 |
-
"""
|
| 5 |
-
from fastapi import APIRouter, HTTPException
|
| 6 |
-
from pydantic import BaseModel
|
| 7 |
-
from typing import List, Optional, Dict, Any
|
| 8 |
-
import logging
|
| 9 |
-
import os
|
| 10 |
-
from datetime import datetime
|
| 11 |
-
import anthropic
|
| 12 |
-
|
| 13 |
-
logger = logging.getLogger(__name__)
|
| 14 |
-
|
| 15 |
-
router = APIRouter()
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
class ReasoningStep(BaseModel):
|
| 19 |
-
"""A single step in the reasoning process"""
|
| 20 |
-
step_number: int
|
| 21 |
-
timestamp: str
|
| 22 |
-
category: str # data_gathering, hypothesis_generation, testing, diagnosis
|
| 23 |
-
description: str
|
| 24 |
-
quality: str # excellent, good, acceptable, concerning, critical_gap
|
| 25 |
-
expert_insight: str
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
class ReasoningAnalysisRequest(BaseModel):
|
| 29 |
-
"""Request for reasoning chain analysis"""
|
| 30 |
-
case_id: str
|
| 31 |
-
student_actions: List[Dict[str, Any]] # List of student's questions/actions
|
| 32 |
-
final_diagnosis: str
|
| 33 |
-
case_info: Dict[str, Any] # Patient info, actual diagnosis, etc.
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
class ReasoningAnalysisResponse(BaseModel):
|
| 37 |
-
"""Response with reasoning chain analysis"""
|
| 38 |
-
analysis_id: str
|
| 39 |
-
student_reasoning_steps: List[ReasoningStep]
|
| 40 |
-
expert_reasoning_steps: List[ReasoningStep]
|
| 41 |
-
divergence_points: List[Dict[str, str]]
|
| 42 |
-
overall_assessment: str
|
| 43 |
-
strengths: List[str]
|
| 44 |
-
gaps: List[str]
|
| 45 |
-
learning_recommendations: List[str]
|
| 46 |
-
thinking_time_seconds: float
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
@router.post("/analyze", response_model=ReasoningAnalysisResponse)
|
| 50 |
-
async def analyze_reasoning_chain(request: ReasoningAnalysisRequest):
|
| 51 |
-
"""
|
| 52 |
-
Analyze student's diagnostic reasoning using Claude Opus extended thinking.
|
| 53 |
-
|
| 54 |
-
This uses extended thinking with a 10-minute budget to deeply analyze:
|
| 55 |
-
1. Student's reasoning process step-by-step
|
| 56 |
-
2. Expert reasoning approach for the same case
|
| 57 |
-
3. Divergence points where student deviated from optimal path
|
| 58 |
-
4. Specific gaps and learning opportunities
|
| 59 |
-
"""
|
| 60 |
-
|
| 61 |
-
logger.info(f"Starting reasoning chain analysis for case {request.case_id}")
|
| 62 |
-
|
| 63 |
-
# Build the analysis prompt
|
| 64 |
-
analysis_prompt = f"""You are an expert medical educator analyzing a medical student's diagnostic reasoning process.
|
| 65 |
-
|
| 66 |
-
**Case Information:**
|
| 67 |
-
Patient: {request.case_info.get('patient_name', 'Unknown')}
|
| 68 |
-
Chief Complaint: {request.case_info.get('chief_complaint', 'Unknown')}
|
| 69 |
-
Actual Diagnosis: {request.case_info.get('actual_diagnosis', 'Unknown')}
|
| 70 |
-
|
| 71 |
-
**Student's Actions (chronological):**
|
| 72 |
-
{_format_student_actions(request.student_actions)}
|
| 73 |
-
|
| 74 |
-
**Student's Final Diagnosis:** {request.final_diagnosis}
|
| 75 |
-
|
| 76 |
-
**Your Task:**
|
| 77 |
-
Please spend significant time thinking deeply about this case using extended thinking. Analyze:
|
| 78 |
-
|
| 79 |
-
1. **Student's Reasoning Process:**
|
| 80 |
-
- Break down each action the student took into reasoning steps
|
| 81 |
-
- Categorize each step (data gathering, hypothesis generation, hypothesis testing, diagnosis)
|
| 82 |
-
- Evaluate the quality of each step (excellent, good, acceptable, concerning, critical_gap)
|
| 83 |
-
- Note the reasoning behind each action
|
| 84 |
-
|
| 85 |
-
2. **Expert Reasoning Process:**
|
| 86 |
-
- How would an expert clinician approach this case?
|
| 87 |
-
- What are the critical data points an expert would prioritize?
|
| 88 |
-
- What hypotheses would they generate and in what order?
|
| 89 |
-
- What tests would they use to differentiate?
|
| 90 |
-
|
| 91 |
-
3. **Divergence Analysis:**
|
| 92 |
-
- Where did the student's path diverge from the expert path?
|
| 93 |
-
- Were there critical questions NOT asked?
|
| 94 |
-
- Were there premature conclusions or anchoring biases?
|
| 95 |
-
- Were differential diagnoses appropriately considered?
|
| 96 |
-
|
| 97 |
-
4. **Educational Insights:**
|
| 98 |
-
- What are the student's key strengths?
|
| 99 |
-
- What are critical gaps in their reasoning?
|
| 100 |
-
- Specific learning recommendations for improvement
|
| 101 |
-
|
| 102 |
-
Return your analysis in this JSON format:
|
| 103 |
-
{{
|
| 104 |
-
"student_reasoning_steps": [
|
| 105 |
-
{{
|
| 106 |
-
"step_number": 1,
|
| 107 |
-
"timestamp": "0:30",
|
| 108 |
-
"category": "data_gathering",
|
| 109 |
-
"description": "Asked about chest pain characteristics",
|
| 110 |
-
"quality": "excellent",
|
| 111 |
-
"expert_insight": "Excellent first question - chest pain characterization is critical for cardiac differential"
|
| 112 |
-
}}
|
| 113 |
-
],
|
| 114 |
-
"expert_reasoning_steps": [
|
| 115 |
-
{{
|
| 116 |
-
"step_number": 1,
|
| 117 |
-
"timestamp": "0:00",
|
| 118 |
-
"category": "hypothesis_generation",
|
| 119 |
-
"description": "Generate initial differential: ACS, PE, aortic dissection, anxiety",
|
| 120 |
-
"quality": "excellent",
|
| 121 |
-
"expert_insight": "Expert immediately considers life-threatening causes"
|
| 122 |
-
}}
|
| 123 |
-
],
|
| 124 |
-
"divergence_points": [
|
| 125 |
-
{{
|
| 126 |
-
"student_action": "Did not ask about cocaine use until late in interview",
|
| 127 |
-
"expert_action": "Would ask about substance use early given young patient with chest pain",
|
| 128 |
-
"impact": "Delayed critical diagnosis",
|
| 129 |
-
"learning_point": "Always ask about substance use in chest pain, especially in younger patients"
|
| 130 |
-
}}
|
| 131 |
-
],
|
| 132 |
-
"overall_assessment": "The student demonstrated good basic history-taking skills but showed anchoring bias...",
|
| 133 |
-
"strengths": ["Systematic approach to history", "Good rapport building"],
|
| 134 |
-
"gaps": ["Did not consider substance-induced ACS", "Anchored on anxiety diagnosis"],
|
| 135 |
-
"learning_recommendations": ["Review differential diagnosis of chest pain in young adults", "Practice systematic substance use screening"]
|
| 136 |
-
}}
|
| 137 |
-
"""
|
| 138 |
-
|
| 139 |
-
try:
|
| 140 |
-
start_time = datetime.now()
|
| 141 |
-
|
| 142 |
-
# Create Claude Opus client
|
| 143 |
-
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
| 144 |
-
if not api_key or api_key == "sk-ant-your-key-here":
|
| 145 |
-
raise HTTPException(status_code=500, detail="Anthropic API key not configured")
|
| 146 |
-
|
| 147 |
-
client = anthropic.Anthropic(api_key=api_key)
|
| 148 |
-
response = client.messages.create(
|
| 149 |
-
model="claude-opus-4-6",
|
| 150 |
-
max_tokens=16000,
|
| 151 |
-
temperature=1,
|
| 152 |
-
thinking={
|
| 153 |
-
"type": "adaptive",
|
| 154 |
-
},
|
| 155 |
-
messages=[{
|
| 156 |
-
"role": "user",
|
| 157 |
-
"content": analysis_prompt
|
| 158 |
-
}]
|
| 159 |
-
)
|
| 160 |
-
|
| 161 |
-
thinking_time = (datetime.now() - start_time).total_seconds()
|
| 162 |
-
|
| 163 |
-
# Extract the analysis from response
|
| 164 |
-
analysis_text = ""
|
| 165 |
-
for block in response.content:
|
| 166 |
-
if block.type == "text":
|
| 167 |
-
analysis_text += block.text
|
| 168 |
-
|
| 169 |
-
logger.info(f"Extended thinking completed in {thinking_time:.2f} seconds")
|
| 170 |
-
|
| 171 |
-
# Parse the JSON response
|
| 172 |
-
import json
|
| 173 |
-
analysis_data = json.loads(analysis_text)
|
| 174 |
-
|
| 175 |
-
# Generate analysis ID
|
| 176 |
-
import uuid
|
| 177 |
-
analysis_id = str(uuid.uuid4())
|
| 178 |
-
|
| 179 |
-
# Build response
|
| 180 |
-
return ReasoningAnalysisResponse(
|
| 181 |
-
analysis_id=analysis_id,
|
| 182 |
-
student_reasoning_steps=[
|
| 183 |
-
ReasoningStep(**step) for step in analysis_data["student_reasoning_steps"]
|
| 184 |
-
],
|
| 185 |
-
expert_reasoning_steps=[
|
| 186 |
-
ReasoningStep(**step) for step in analysis_data["expert_reasoning_steps"]
|
| 187 |
-
],
|
| 188 |
-
divergence_points=analysis_data["divergence_points"],
|
| 189 |
-
overall_assessment=analysis_data["overall_assessment"],
|
| 190 |
-
strengths=analysis_data["strengths"],
|
| 191 |
-
gaps=analysis_data["gaps"],
|
| 192 |
-
learning_recommendations=analysis_data["learning_recommendations"],
|
| 193 |
-
thinking_time_seconds=thinking_time
|
| 194 |
-
)
|
| 195 |
-
|
| 196 |
-
except Exception as e:
|
| 197 |
-
logger.error(f"Reasoning analysis failed: {str(e)}", exc_info=True)
|
| 198 |
-
raise HTTPException(status_code=500, detail=f"Analysis failed: {str(e)}")
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
def _format_student_actions(actions: List[Dict[str, Any]]) -> str:
|
| 202 |
-
"""Format student actions for the prompt"""
|
| 203 |
-
formatted = []
|
| 204 |
-
for i, action in enumerate(actions, 1):
|
| 205 |
-
timestamp = action.get('timestamp', 'Unknown')
|
| 206 |
-
content = action.get('content', 'Unknown action')
|
| 207 |
-
formatted.append(f"{i}. [{timestamp}] {content}")
|
| 208 |
-
return "\n".join(formatted)
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
@router.get("/health")
|
| 212 |
-
async def reasoning_health():
|
| 213 |
-
"""Health check for reasoning endpoint"""
|
| 214 |
-
return {"status": "healthy", "feature": "reasoning-chain"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/api/simulation.py
DELETED
|
@@ -1,261 +0,0 @@
|
|
| 1 |
-
"""API endpoints for AI Patient Simulation."""
|
| 2 |
-
import logging
|
| 3 |
-
from typing import List
|
| 4 |
-
|
| 5 |
-
from fastapi import APIRouter, HTTPException
|
| 6 |
-
|
| 7 |
-
from app.core.agents.orchestrator import SimulationOrchestrator
|
| 8 |
-
from app.models.simulation import (
|
| 9 |
-
StartSimulationRequest,
|
| 10 |
-
StartSimulationResponse,
|
| 11 |
-
SendMessageRequest,
|
| 12 |
-
SendMessageResponse,
|
| 13 |
-
CompleteSimulationRequest,
|
| 14 |
-
CompleteSimulationResponse,
|
| 15 |
-
CognitiveAutopsy,
|
| 16 |
-
EvaluationMetrics,
|
| 17 |
-
FeedbackType,
|
| 18 |
-
TutorFeedback,
|
| 19 |
-
)
|
| 20 |
-
|
| 21 |
-
logger = logging.getLogger(__name__)
|
| 22 |
-
|
| 23 |
-
router = APIRouter()
|
| 24 |
-
|
| 25 |
-
# Initialize orchestrator (singleton)
|
| 26 |
-
orchestrator = SimulationOrchestrator()
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
@router.post("/start", response_model=StartSimulationResponse)
|
| 30 |
-
async def start_simulation(request: StartSimulationRequest):
|
| 31 |
-
"""
|
| 32 |
-
Start a new patient simulation.
|
| 33 |
-
|
| 34 |
-
Returns:
|
| 35 |
-
- case_id: Unique identifier for this simulation
|
| 36 |
-
- patient_info: Safe patient demographics (no diagnosis)
|
| 37 |
-
- avatar_path: Path to avatar SVG
|
| 38 |
-
- setting_context: Where the encounter takes place
|
| 39 |
-
- initial_message: Patient's first words
|
| 40 |
-
"""
|
| 41 |
-
try:
|
| 42 |
-
simulation = orchestrator.start_simulation(
|
| 43 |
-
specialty=request.specialty,
|
| 44 |
-
difficulty=request.difficulty,
|
| 45 |
-
)
|
| 46 |
-
|
| 47 |
-
# Build avatar path based on gender and emotional state
|
| 48 |
-
avatar_path = (
|
| 49 |
-
f"/avatars/{simulation.patient_profile.gender.value}/"
|
| 50 |
-
f"{simulation.emotional_state.value}.svg"
|
| 51 |
-
)
|
| 52 |
-
|
| 53 |
-
# Safe patient info (no diagnosis)
|
| 54 |
-
patient_info = {
|
| 55 |
-
"age": simulation.patient_profile.age,
|
| 56 |
-
"gender": simulation.patient_profile.gender.value,
|
| 57 |
-
"name": simulation.patient_profile.name,
|
| 58 |
-
"chief_complaint": simulation.patient_profile.chief_complaint,
|
| 59 |
-
}
|
| 60 |
-
|
| 61 |
-
# Get initial patient message
|
| 62 |
-
initial_message = simulation.messages[0].content
|
| 63 |
-
|
| 64 |
-
return StartSimulationResponse(
|
| 65 |
-
case_id=simulation.case_id,
|
| 66 |
-
patient_info=patient_info,
|
| 67 |
-
avatar_path=avatar_path,
|
| 68 |
-
setting_context=simulation.patient_profile.setting,
|
| 69 |
-
initial_message=initial_message,
|
| 70 |
-
)
|
| 71 |
-
|
| 72 |
-
except Exception as e:
|
| 73 |
-
logger.error(f"Error starting simulation: {e}")
|
| 74 |
-
raise HTTPException(status_code=500, detail=str(e))
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
@router.post("/message", response_model=SendMessageResponse)
|
| 78 |
-
async def send_message(request: SendMessageRequest):
|
| 79 |
-
"""
|
| 80 |
-
Student sends a message to the patient.
|
| 81 |
-
|
| 82 |
-
Multi-agent pipeline:
|
| 83 |
-
1. Evaluator analyzes student message
|
| 84 |
-
2. Updates emotional state & rapport based on communication quality
|
| 85 |
-
3. Patient responds based on new emotional state
|
| 86 |
-
4. Tutor provides real-time Socratic feedback
|
| 87 |
-
|
| 88 |
-
Returns:
|
| 89 |
-
- patient_response: What patient says
|
| 90 |
-
- emotional_state: Current patient emotion
|
| 91 |
-
- rapport_level: Current rapport (1-5)
|
| 92 |
-
- tutor_feedback: Real-time feedback from AI tutor
|
| 93 |
-
- avatar_path: Updated avatar (may change with emotion)
|
| 94 |
-
"""
|
| 95 |
-
try:
|
| 96 |
-
# Process message through multi-agent pipeline
|
| 97 |
-
simulation = orchestrator.process_student_message(
|
| 98 |
-
case_id=request.case_id,
|
| 99 |
-
student_message=request.student_message,
|
| 100 |
-
)
|
| 101 |
-
|
| 102 |
-
# Get latest patient message
|
| 103 |
-
patient_messages = [msg for msg in simulation.messages if msg.role == "patient"]
|
| 104 |
-
latest_patient_message = patient_messages[-1].content
|
| 105 |
-
|
| 106 |
-
# Get feedback from this interaction (last few feedback items)
|
| 107 |
-
recent_feedback = simulation.tutor_feedback[-2:] # Evaluator + Tutor feedback
|
| 108 |
-
|
| 109 |
-
# Update avatar path based on new emotional state
|
| 110 |
-
avatar_path = (
|
| 111 |
-
f"/avatars/{simulation.patient_profile.gender.value}/"
|
| 112 |
-
f"{simulation.emotional_state.value}.svg"
|
| 113 |
-
)
|
| 114 |
-
|
| 115 |
-
return SendMessageResponse(
|
| 116 |
-
patient_response=latest_patient_message,
|
| 117 |
-
emotional_state=simulation.emotional_state,
|
| 118 |
-
rapport_level=simulation.rapport_level,
|
| 119 |
-
tutor_feedback=recent_feedback,
|
| 120 |
-
avatar_path=avatar_path,
|
| 121 |
-
)
|
| 122 |
-
|
| 123 |
-
except ValueError as e:
|
| 124 |
-
raise HTTPException(status_code=404, detail=str(e))
|
| 125 |
-
except Exception as e:
|
| 126 |
-
logger.error(f"Error processing message: {e}")
|
| 127 |
-
raise HTTPException(status_code=500, detail=str(e))
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
@router.post("/complete", response_model=CompleteSimulationResponse)
|
| 131 |
-
async def complete_simulation(request: CompleteSimulationRequest):
|
| 132 |
-
"""
|
| 133 |
-
Complete simulation and get cognitive autopsy.
|
| 134 |
-
|
| 135 |
-
Student provides their diagnosis and reasoning.
|
| 136 |
-
System performs deep analysis of their diagnostic process.
|
| 137 |
-
|
| 138 |
-
Returns:
|
| 139 |
-
- correct_diagnosis: What it actually was
|
| 140 |
-
- diagnosis_correct: Boolean
|
| 141 |
-
- cognitive_autopsy: Deep analysis of thinking process
|
| 142 |
-
- evaluation: Overall communication metrics
|
| 143 |
-
"""
|
| 144 |
-
try:
|
| 145 |
-
# Mark simulation as complete
|
| 146 |
-
simulation = orchestrator.complete_simulation(
|
| 147 |
-
case_id=request.case_id,
|
| 148 |
-
diagnosis=request.diagnosis,
|
| 149 |
-
reasoning=request.reasoning,
|
| 150 |
-
)
|
| 151 |
-
|
| 152 |
-
# Check if diagnosis is correct
|
| 153 |
-
correct_diagnosis = simulation.patient_profile.actual_diagnosis
|
| 154 |
-
diagnosis_correct = (
|
| 155 |
-
request.diagnosis.lower().strip() in correct_diagnosis.lower()
|
| 156 |
-
)
|
| 157 |
-
|
| 158 |
-
# Generate cognitive autopsy
|
| 159 |
-
# TODO: Call Opus API for deep analysis
|
| 160 |
-
# For now, provide a structured template
|
| 161 |
-
cognitive_autopsy = CognitiveAutopsy(
|
| 162 |
-
mental_model=(
|
| 163 |
-
f"You approached this case with a '{request.diagnosis}' framework. "
|
| 164 |
-
"Your initial hypothesis shaped how you interpreted the symptoms."
|
| 165 |
-
),
|
| 166 |
-
breaking_point=(
|
| 167 |
-
"Your reasoning process needed more systematic differential diagnosis. "
|
| 168 |
-
"Consider using a structured approach to avoid premature closure."
|
| 169 |
-
),
|
| 170 |
-
what_you_missed=simulation.patient_profile.key_history_points[:2],
|
| 171 |
-
why_you_missed_it=(
|
| 172 |
-
"These details may have been missed due to closed-ended questioning "
|
| 173 |
-
"or not building enough rapport for the patient to share freely."
|
| 174 |
-
),
|
| 175 |
-
prediction=(
|
| 176 |
-
"In future cases with similar presentations, remember to: "
|
| 177 |
-
"1) Build rapport first, 2) Use open-ended questions, "
|
| 178 |
-
"3) Consider multiple differentials before anchoring."
|
| 179 |
-
),
|
| 180 |
-
)
|
| 181 |
-
|
| 182 |
-
# Calculate evaluation metrics based on simulation history
|
| 183 |
-
evaluation = _calculate_evaluation_metrics(simulation)
|
| 184 |
-
|
| 185 |
-
return CompleteSimulationResponse(
|
| 186 |
-
correct_diagnosis=correct_diagnosis,
|
| 187 |
-
diagnosis_correct=diagnosis_correct,
|
| 188 |
-
cognitive_autopsy=cognitive_autopsy,
|
| 189 |
-
evaluation=evaluation,
|
| 190 |
-
)
|
| 191 |
-
|
| 192 |
-
except ValueError as e:
|
| 193 |
-
raise HTTPException(status_code=404, detail=str(e))
|
| 194 |
-
except Exception as e:
|
| 195 |
-
logger.error(f"Error completing simulation: {e}")
|
| 196 |
-
raise HTTPException(status_code=500, detail=str(e))
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
@router.get("/status/{case_id}")
|
| 200 |
-
async def get_simulation_status(case_id: str):
|
| 201 |
-
"""Get current simulation state (for debugging)."""
|
| 202 |
-
try:
|
| 203 |
-
simulation = orchestrator.get_simulation(case_id)
|
| 204 |
-
return {
|
| 205 |
-
"case_id": simulation.case_id,
|
| 206 |
-
"emotional_state": simulation.emotional_state.value,
|
| 207 |
-
"rapport_level": simulation.rapport_level.value,
|
| 208 |
-
"message_count": len(simulation.messages),
|
| 209 |
-
"completed": simulation.completed_at is not None,
|
| 210 |
-
}
|
| 211 |
-
except ValueError as e:
|
| 212 |
-
raise HTTPException(status_code=404, detail=str(e))
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
def _calculate_evaluation_metrics(simulation) -> EvaluationMetrics:
|
| 216 |
-
"""Calculate overall evaluation metrics from simulation history."""
|
| 217 |
-
|
| 218 |
-
# Count open-ended questions
|
| 219 |
-
student_messages = [msg.content for msg in simulation.messages if msg.role == "student"]
|
| 220 |
-
open_ended_markers = ["tell me", "describe", "how do you", "what happened", "when did"]
|
| 221 |
-
|
| 222 |
-
open_ended_count = sum(
|
| 223 |
-
1
|
| 224 |
-
for msg in student_messages
|
| 225 |
-
if any(marker in msg.lower() for marker in open_ended_markers)
|
| 226 |
-
)
|
| 227 |
-
|
| 228 |
-
# Check if distress was acknowledged
|
| 229 |
-
empathy_markers = ["understand", "worried", "difficult", "sorry", "must be"]
|
| 230 |
-
acknowledged_distress = any(
|
| 231 |
-
any(marker in msg.lower() for marker in empathy_markers)
|
| 232 |
-
for msg in student_messages
|
| 233 |
-
)
|
| 234 |
-
|
| 235 |
-
# Calculate scores based on feedback history
|
| 236 |
-
positive_feedback_count = sum(
|
| 237 |
-
1 for fb in simulation.tutor_feedback if fb.type == FeedbackType.POSITIVE
|
| 238 |
-
)
|
| 239 |
-
critical_feedback_count = sum(
|
| 240 |
-
1 for fb in simulation.tutor_feedback if fb.type == FeedbackType.CRITICAL
|
| 241 |
-
)
|
| 242 |
-
|
| 243 |
-
total_feedback = len(simulation.tutor_feedback)
|
| 244 |
-
feedback_ratio = (
|
| 245 |
-
positive_feedback_count / total_feedback if total_feedback > 0 else 0.5
|
| 246 |
-
)
|
| 247 |
-
|
| 248 |
-
# Score calculations (1-5 scale)
|
| 249 |
-
empathy_score = min(5, max(1, int(feedback_ratio * 5)))
|
| 250 |
-
communication_quality = min(5, max(1, int(simulation.rapport_level.value)))
|
| 251 |
-
bedside_manner = min(5, max(1, int(simulation.rapport_level.value)))
|
| 252 |
-
clinical_reasoning = 3 # Default, would be calculated from diagnosis accuracy
|
| 253 |
-
|
| 254 |
-
return EvaluationMetrics(
|
| 255 |
-
empathy_score=empathy_score,
|
| 256 |
-
communication_quality=communication_quality,
|
| 257 |
-
clinical_reasoning=clinical_reasoning,
|
| 258 |
-
open_ended_questions=open_ended_count,
|
| 259 |
-
acknowledged_distress=acknowledged_distress,
|
| 260 |
-
bedside_manner=bedside_manner,
|
| 261 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/api/student.py
DELETED
|
@@ -1,36 +0,0 @@
|
|
| 1 |
-
from fastapi import APIRouter
|
| 2 |
-
from pydantic import BaseModel
|
| 3 |
-
from typing import Optional
|
| 4 |
-
from app.core.session import session
|
| 5 |
-
|
| 6 |
-
router = APIRouter()
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
class ProfileUpdate(BaseModel):
|
| 10 |
-
name: Optional[str] = None
|
| 11 |
-
year_level: Optional[str] = None
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
@router.get("/profile")
|
| 15 |
-
async def get_profile():
|
| 16 |
-
return session.get_student_profile()
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
@router.put("/profile")
|
| 20 |
-
async def update_profile(update: ProfileUpdate):
|
| 21 |
-
profile = session.get_student_profile()
|
| 22 |
-
if update.name:
|
| 23 |
-
profile["name"] = update.name
|
| 24 |
-
if update.year_level:
|
| 25 |
-
profile["year_level"] = update.year_level
|
| 26 |
-
return profile
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
@router.get("/biases")
|
| 30 |
-
async def get_biases():
|
| 31 |
-
return session.detect_biases()
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
@router.get("/knowledge-graph")
|
| 35 |
-
async def get_knowledge_graph():
|
| 36 |
-
return session.build_knowledge_graph()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/core/__init__.py
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
# Core modules
|
|
|
|
|
|
backend/app/core/agents/__init__.py
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
# AI Agents
|
|
|
|
|
|
backend/app/core/agents/base_agent.py
DELETED
|
@@ -1,181 +0,0 @@
|
|
| 1 |
-
"""Base agent class for the multi-agent hospital ecosystem."""
|
| 2 |
-
|
| 3 |
-
import logging
|
| 4 |
-
import os
|
| 5 |
-
from abc import ABC, abstractmethod
|
| 6 |
-
from typing import Optional
|
| 7 |
-
|
| 8 |
-
import anthropic
|
| 9 |
-
|
| 10 |
-
from app.core.agents.response_optimizer import response_cache, context_filter
|
| 11 |
-
|
| 12 |
-
logger = logging.getLogger(__name__)
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
class BaseAgent(ABC):
|
| 16 |
-
"""Abstract base class for all hospital agents."""
|
| 17 |
-
|
| 18 |
-
agent_type: str = "base"
|
| 19 |
-
display_name: str = "Agent"
|
| 20 |
-
|
| 21 |
-
def __init__(self):
|
| 22 |
-
self.conversation_history: list[dict] = []
|
| 23 |
-
self.specialized_knowledge: str = "" # Dynamic RAG+Claude expertise
|
| 24 |
-
self.api_key = os.environ.get("ANTHROPIC_API_KEY")
|
| 25 |
-
self.client: Optional[anthropic.Anthropic] = None
|
| 26 |
-
|
| 27 |
-
# Check if API key is properly set
|
| 28 |
-
if not self.api_key:
|
| 29 |
-
logger.error(f"{self.display_name}: ANTHROPIC_API_KEY not found in environment")
|
| 30 |
-
elif self.api_key == "sk-ant-your-key-here" or "your_anthropic_api_key" in self.api_key:
|
| 31 |
-
logger.warning(f"{self.display_name}: ANTHROPIC_API_KEY is placeholder, not actual key")
|
| 32 |
-
else:
|
| 33 |
-
try:
|
| 34 |
-
self.client = anthropic.Anthropic(api_key=self.api_key)
|
| 35 |
-
logger.info(f"{self.display_name}: Anthropic client initialized successfully")
|
| 36 |
-
except Exception as e:
|
| 37 |
-
logger.error(f"{self.display_name} client init failed: {e}")
|
| 38 |
-
|
| 39 |
-
def set_specialized_knowledge(self, knowledge: str):
|
| 40 |
-
"""Inject dynamically built expertise into this agent."""
|
| 41 |
-
self.specialized_knowledge = knowledge
|
| 42 |
-
|
| 43 |
-
@abstractmethod
|
| 44 |
-
def get_system_prompt(self, case_context: dict) -> str:
|
| 45 |
-
"""Return the system prompt for this agent given case context."""
|
| 46 |
-
|
| 47 |
-
@abstractmethod
|
| 48 |
-
def get_fallback_response(self, message: str, case_context: dict) -> str:
|
| 49 |
-
"""Return a fallback response when the API is unavailable."""
|
| 50 |
-
|
| 51 |
-
def respond(self, message: str, case_context: dict) -> dict:
|
| 52 |
-
"""Generate a response from this agent.
|
| 53 |
-
|
| 54 |
-
Returns dict with: agent_type, display_name, content, metadata
|
| 55 |
-
"""
|
| 56 |
-
# Check cache first
|
| 57 |
-
cached_response = response_cache.get(self.agent_type, message, case_context)
|
| 58 |
-
if cached_response:
|
| 59 |
-
logger.debug(f"{self.display_name}: Using cached response")
|
| 60 |
-
return cached_response
|
| 61 |
-
|
| 62 |
-
self.conversation_history.append({"role": "user", "content": message})
|
| 63 |
-
|
| 64 |
-
content = ""
|
| 65 |
-
thinking = ""
|
| 66 |
-
using_fallback = False
|
| 67 |
-
|
| 68 |
-
if self.client:
|
| 69 |
-
result = self._respond_with_claude(message, case_context)
|
| 70 |
-
if result:
|
| 71 |
-
content, thinking = result
|
| 72 |
-
logger.debug(f"{self.display_name}: Generated Claude response")
|
| 73 |
-
else:
|
| 74 |
-
logger.warning(f"{self.display_name}: Claude response failed, using fallback")
|
| 75 |
-
using_fallback = True
|
| 76 |
-
else:
|
| 77 |
-
logger.warning(f"{self.display_name}: No Claude client, using fallback")
|
| 78 |
-
using_fallback = True
|
| 79 |
-
|
| 80 |
-
if not content or using_fallback:
|
| 81 |
-
content = self.get_fallback_response(message, case_context)
|
| 82 |
-
logger.info(f"{self.display_name}: Using fallback response")
|
| 83 |
-
|
| 84 |
-
self.conversation_history.append({"role": "assistant", "content": content})
|
| 85 |
-
|
| 86 |
-
response = {
|
| 87 |
-
"agent_type": self.agent_type,
|
| 88 |
-
"display_name": self.display_name,
|
| 89 |
-
"content": content,
|
| 90 |
-
}
|
| 91 |
-
if thinking:
|
| 92 |
-
response["thinking"] = thinking
|
| 93 |
-
|
| 94 |
-
# Cache the response only if it's from Claude, not fallback
|
| 95 |
-
if not using_fallback:
|
| 96 |
-
response_cache.set(self.agent_type, message, case_context, response)
|
| 97 |
-
|
| 98 |
-
return response
|
| 99 |
-
|
| 100 |
-
def _respond_with_claude(
|
| 101 |
-
self, message: str, case_context: dict
|
| 102 |
-
) -> Optional[tuple[str, str]]:
|
| 103 |
-
"""Call Claude with extended thinking. Returns (content, thinking) or None."""
|
| 104 |
-
if not case_context or not isinstance(case_context, dict):
|
| 105 |
-
case_context = {}
|
| 106 |
-
|
| 107 |
-
# Get system prompt with filtered knowledge
|
| 108 |
-
system = self.get_system_prompt(case_context)
|
| 109 |
-
|
| 110 |
-
# Inject current vitals and ward transcript into system prompt
|
| 111 |
-
# so this agent knows the current state and what others have said
|
| 112 |
-
ward_transcript = case_context.get("ward_transcript", "")
|
| 113 |
-
elapsed = case_context.get("elapsed_minutes", 0)
|
| 114 |
-
if ward_transcript:
|
| 115 |
-
system += (
|
| 116 |
-
f"\n\n=== CURRENT WARD STATUS (Minute {elapsed}) ===\n"
|
| 117 |
-
f"Current vitals: BP {case_context.get('current_bp', 'N/A')}, "
|
| 118 |
-
f"HR {case_context.get('current_hr', 'N/A')}, "
|
| 119 |
-
f"RR {case_context.get('current_rr', 'N/A')}, "
|
| 120 |
-
f"Temp {case_context.get('current_temp', 'N/A')}°C, "
|
| 121 |
-
f"SpO2 {case_context.get('current_spo2', 'N/A')}%\n\n"
|
| 122 |
-
f"Recent ward conversation (what others have said):\n{ward_transcript}\n\n"
|
| 123 |
-
"Use this context to avoid repeating what has already been said. "
|
| 124 |
-
"Build on the conversation naturally — acknowledge what others mentioned if relevant."
|
| 125 |
-
)
|
| 126 |
-
elif elapsed:
|
| 127 |
-
system += (
|
| 128 |
-
f"\n\n=== CURRENT STATUS (Minute {elapsed}) ===\n"
|
| 129 |
-
f"Current vitals: BP {case_context.get('current_bp', 'N/A')}, "
|
| 130 |
-
f"HR {case_context.get('current_hr', 'N/A')}, "
|
| 131 |
-
f"RR {case_context.get('current_rr', 'N/A')}, "
|
| 132 |
-
f"Temp {case_context.get('current_temp', 'N/A')}°C, "
|
| 133 |
-
f"SpO2 {case_context.get('current_spo2', 'N/A')}%"
|
| 134 |
-
)
|
| 135 |
-
|
| 136 |
-
# Apply smart context filtering to reduce prompt size
|
| 137 |
-
if self.specialized_knowledge and len(self.specialized_knowledge) > 1000:
|
| 138 |
-
filtered_knowledge = context_filter.filter_knowledge_for_query(
|
| 139 |
-
self.specialized_knowledge,
|
| 140 |
-
message,
|
| 141 |
-
self.agent_type
|
| 142 |
-
)
|
| 143 |
-
# Replace the full knowledge with filtered version in system prompt
|
| 144 |
-
if filtered_knowledge and len(filtered_knowledge) < len(self.specialized_knowledge):
|
| 145 |
-
system = system.replace(self.specialized_knowledge, filtered_knowledge)
|
| 146 |
-
logger.info(f"Filtered knowledge from {len(self.specialized_knowledge)} to {len(filtered_knowledge)} chars")
|
| 147 |
-
|
| 148 |
-
# Compress conversation history to reduce token count
|
| 149 |
-
messages = context_filter.compress_conversation_history(
|
| 150 |
-
self.conversation_history.copy(),
|
| 151 |
-
max_messages=8 # Keep only last 8 messages
|
| 152 |
-
)
|
| 153 |
-
|
| 154 |
-
try:
|
| 155 |
-
response = self.client.messages.create(
|
| 156 |
-
model="claude-3-haiku-20240307", # Fast, cost-effective model for agents
|
| 157 |
-
max_tokens=500, # Shorter for faster responses
|
| 158 |
-
temperature=0.8, # Slightly creative but consistent
|
| 159 |
-
system=system,
|
| 160 |
-
messages=messages,
|
| 161 |
-
)
|
| 162 |
-
|
| 163 |
-
# Handle response content properly
|
| 164 |
-
content = ""
|
| 165 |
-
if hasattr(response, 'content'):
|
| 166 |
-
if isinstance(response.content, list):
|
| 167 |
-
for block in response.content:
|
| 168 |
-
if hasattr(block, 'text'):
|
| 169 |
-
content = block.text.strip()
|
| 170 |
-
break
|
| 171 |
-
elif isinstance(response.content, str):
|
| 172 |
-
content = response.content.strip()
|
| 173 |
-
|
| 174 |
-
return (content, "") if content else None
|
| 175 |
-
except Exception as e:
|
| 176 |
-
logger.error(f"{self.display_name} Claude API error: {e}")
|
| 177 |
-
return None
|
| 178 |
-
|
| 179 |
-
def reset(self):
|
| 180 |
-
"""Reset conversation history for a new case."""
|
| 181 |
-
self.conversation_history = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/core/agents/case_state_manager.py
DELETED
|
@@ -1,645 +0,0 @@
|
|
| 1 |
-
"""Case state manager — time progression, vitals evolution, investigation lifecycle.
|
| 2 |
-
|
| 3 |
-
This is what makes it a SIMULATION, not a chatbot:
|
| 4 |
-
- Simulation clock advances with each student action
|
| 5 |
-
- Vitals evolve based on condition trajectory + treatments
|
| 6 |
-
- Investigations have realistic turnaround times (Indian govt hospital)
|
| 7 |
-
- Patient state can improve or deteriorate based on management
|
| 8 |
-
"""
|
| 9 |
-
|
| 10 |
-
import logging
|
| 11 |
-
import random
|
| 12 |
-
from dataclasses import dataclass, field
|
| 13 |
-
from enum import Enum
|
| 14 |
-
from typing import Optional
|
| 15 |
-
|
| 16 |
-
logger = logging.getLogger(__name__)
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
class PatientTrajectory(str, Enum):
|
| 20 |
-
"""Patient clinical trajectory."""
|
| 21 |
-
STABLE = "stable"
|
| 22 |
-
IMPROVING = "improving"
|
| 23 |
-
DETERIORATING = "deteriorating"
|
| 24 |
-
CRITICAL = "critical"
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
class InvestigationStatus(str, Enum):
|
| 28 |
-
"""Investigation lifecycle status."""
|
| 29 |
-
ORDERED = "ordered"
|
| 30 |
-
SAMPLE_COLLECTED = "sample_collected"
|
| 31 |
-
PROCESSING = "processing"
|
| 32 |
-
READY = "ready"
|
| 33 |
-
DELIVERED = "delivered"
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
# Realistic turnaround times for Indian govt hospital (in simulation minutes)
|
| 37 |
-
INVESTIGATION_TURNAROUND = {
|
| 38 |
-
# Basic labs — available in most govt hospitals
|
| 39 |
-
"cbc": {"turnaround": 120, "urgent": 30, "label": "Complete Blood Count"},
|
| 40 |
-
"rft": {"turnaround": 150, "urgent": 45, "label": "Renal Function Test"},
|
| 41 |
-
"lft": {"turnaround": 150, "urgent": 45, "label": "Liver Function Test"},
|
| 42 |
-
"blood_sugar": {"turnaround": 30, "urgent": 10, "label": "Blood Sugar (Random)"},
|
| 43 |
-
"rbs": {"turnaround": 30, "urgent": 10, "label": "Random Blood Sugar"},
|
| 44 |
-
"fbs": {"turnaround": 30, "urgent": 10, "label": "Fasting Blood Sugar"},
|
| 45 |
-
"urine_routine": {"turnaround": 60, "urgent": 20, "label": "Urine Routine/Microscopy"},
|
| 46 |
-
"serum_electrolytes": {"turnaround": 120, "urgent": 30, "label": "Serum Electrolytes"},
|
| 47 |
-
"abg": {"turnaround": 20, "urgent": 10, "label": "Arterial Blood Gas"},
|
| 48 |
-
"hba1c": {"turnaround": 180, "urgent": 60, "label": "HbA1c"},
|
| 49 |
-
"coagulation": {"turnaround": 120, "urgent": 30, "label": "PT/INR/aPTT"},
|
| 50 |
-
"pt_inr": {"turnaround": 120, "urgent": 30, "label": "PT/INR"},
|
| 51 |
-
"blood_group": {"turnaround": 30, "urgent": 15, "label": "Blood Group & Rh"},
|
| 52 |
-
"crossmatch": {"turnaround": 60, "urgent": 30, "label": "Crossmatch"},
|
| 53 |
-
|
| 54 |
-
# Special labs — may need special request
|
| 55 |
-
"troponin": {"turnaround": 90, "urgent": 30, "label": "Troponin I/T"},
|
| 56 |
-
"d_dimer": {"turnaround": 120, "urgent": 45, "label": "D-Dimer"},
|
| 57 |
-
"bnp": {"turnaround": 120, "urgent": 45, "label": "BNP/NT-proBNP"},
|
| 58 |
-
"procalcitonin": {"turnaround": 180, "urgent": 60, "label": "Procalcitonin"},
|
| 59 |
-
"blood_culture": {"turnaround": 2880, "urgent": 2880, "label": "Blood Culture (Prelim 24h, Final 48h)"},
|
| 60 |
-
"urine_culture": {"turnaround": 2880, "urgent": 2880, "label": "Urine Culture"},
|
| 61 |
-
"csf_analysis": {"turnaround": 120, "urgent": 45, "label": "CSF Analysis"},
|
| 62 |
-
"amylase": {"turnaround": 120, "urgent": 30, "label": "Serum Amylase"},
|
| 63 |
-
"lipase": {"turnaround": 120, "urgent": 30, "label": "Serum Lipase"},
|
| 64 |
-
"thyroid": {"turnaround": 240, "urgent": 120, "label": "Thyroid Profile (T3/T4/TSH)"},
|
| 65 |
-
|
| 66 |
-
# Serology
|
| 67 |
-
"dengue_ns1": {"turnaround": 60, "urgent": 30, "label": "Dengue NS1 Antigen"},
|
| 68 |
-
"dengue_serology": {"turnaround": 120, "urgent": 60, "label": "Dengue IgM/IgG"},
|
| 69 |
-
"malaria_smear": {"turnaround": 60, "urgent": 20, "label": "Peripheral Smear for MP"},
|
| 70 |
-
"malaria_rdt": {"turnaround": 15, "urgent": 10, "label": "Malaria Rapid Test"},
|
| 71 |
-
"widal": {"turnaround": 120, "urgent": 60, "label": "Widal Test"},
|
| 72 |
-
"hiv": {"turnaround": 60, "urgent": 30, "label": "HIV Rapid/ELISA"},
|
| 73 |
-
"hbsag": {"turnaround": 60, "urgent": 30, "label": "HBsAg"},
|
| 74 |
-
"anti_hcv": {"turnaround": 60, "urgent": 30, "label": "Anti-HCV"},
|
| 75 |
-
|
| 76 |
-
# Imaging — availability varies
|
| 77 |
-
"xray_chest": {"turnaround": 30, "urgent": 15, "label": "Chest X-ray PA"},
|
| 78 |
-
"xray": {"turnaround": 30, "urgent": 15, "label": "X-ray"},
|
| 79 |
-
"ecg": {"turnaround": 15, "urgent": 5, "label": "12-lead ECG"},
|
| 80 |
-
"ultrasound": {"turnaround": 120, "urgent": 30, "label": "USG Abdomen (needs radiology call)"},
|
| 81 |
-
"echo": {"turnaround": 240, "urgent": 60, "label": "2D Echocardiography"},
|
| 82 |
-
"ct_scan": {"turnaround": 480, "urgent": 120, "label": "CT Scan (may need referral)"},
|
| 83 |
-
"mri": {"turnaround": 1440, "urgent": 480, "label": "MRI (usually needs referral)"},
|
| 84 |
-
|
| 85 |
-
# Default for unrecognized
|
| 86 |
-
"_default": {"turnaround": 180, "urgent": 60, "label": "Investigation"},
|
| 87 |
-
}
|
| 88 |
-
|
| 89 |
-
# Time cost per student action (simulation minutes)
|
| 90 |
-
ACTION_TIME_COST = {
|
| 91 |
-
"talk_to_patient": 10,
|
| 92 |
-
"ask_nurse": 5,
|
| 93 |
-
"consult_senior": 10,
|
| 94 |
-
"examine_patient": 15,
|
| 95 |
-
"order_investigation": 5,
|
| 96 |
-
"order_treatment": 5,
|
| 97 |
-
"team_huddle": 15,
|
| 98 |
-
"wait_for_results": 30,
|
| 99 |
-
"review_results": 5,
|
| 100 |
-
}
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
@dataclass
|
| 104 |
-
class OrderedInvestigation:
|
| 105 |
-
"""Tracks a single ordered investigation."""
|
| 106 |
-
investigation_id: str
|
| 107 |
-
investigation_type: str
|
| 108 |
-
label: str
|
| 109 |
-
ordered_at: int # simulation minute
|
| 110 |
-
turnaround: int # minutes until ready
|
| 111 |
-
status: InvestigationStatus = InvestigationStatus.ORDERED
|
| 112 |
-
result_text: str = "" # populated from case data when ready
|
| 113 |
-
is_urgent: bool = False
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
@dataclass
|
| 117 |
-
class TreatmentRecord:
|
| 118 |
-
"""Tracks a treatment ordered by the student."""
|
| 119 |
-
treatment_id: str
|
| 120 |
-
description: str
|
| 121 |
-
ordered_at: int
|
| 122 |
-
effects: dict = field(default_factory=dict)
|
| 123 |
-
is_appropriate: bool = True
|
| 124 |
-
safety_note: str = ""
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
@dataclass
|
| 128 |
-
class SimulationEvent:
|
| 129 |
-
"""A timed event in the simulation."""
|
| 130 |
-
event_id: str
|
| 131 |
-
timestamp: int # simulation minute
|
| 132 |
-
event_type: str # "investigation_ready", "vitals_change", "patient_event", "nurse_alert"
|
| 133 |
-
title: str
|
| 134 |
-
description: str
|
| 135 |
-
agent_type: str = "" # which agent delivers this
|
| 136 |
-
delivered: bool = False
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
class CaseStateManager:
|
| 140 |
-
"""Manages the complete simulation state for a case session.
|
| 141 |
-
|
| 142 |
-
Tracks:
|
| 143 |
-
- Simulation clock (elapsed minutes)
|
| 144 |
-
- Vital signs with evolution
|
| 145 |
-
- Investigation orders and lifecycle
|
| 146 |
-
- Treatment log
|
| 147 |
-
- Patient trajectory
|
| 148 |
-
- Timed simulation events
|
| 149 |
-
"""
|
| 150 |
-
|
| 151 |
-
def __init__(self, case_data: dict, student_level: str = "intern"):
|
| 152 |
-
self.case_data = case_data
|
| 153 |
-
self.student_level = student_level
|
| 154 |
-
|
| 155 |
-
# Simulation clock
|
| 156 |
-
self.elapsed_minutes: int = 0
|
| 157 |
-
self.action_count: int = 0
|
| 158 |
-
|
| 159 |
-
# Vitals — start from case data, evolve over time
|
| 160 |
-
vitals = case_data.get("vital_signs", {})
|
| 161 |
-
self.current_vitals: dict = {
|
| 162 |
-
"bp_systolic": self._parse_bp(vitals.get("bp", "120/80"), "systolic"),
|
| 163 |
-
"bp_diastolic": self._parse_bp(vitals.get("bp", "120/80"), "diastolic"),
|
| 164 |
-
"hr": int(vitals.get("hr", 80)),
|
| 165 |
-
"rr": int(vitals.get("rr", 16)),
|
| 166 |
-
"temp": float(vitals.get("temp", 37.0)),
|
| 167 |
-
"spo2": int(vitals.get("spo2", 98)),
|
| 168 |
-
}
|
| 169 |
-
self.baseline_vitals: dict = self.current_vitals.copy()
|
| 170 |
-
self.vitals_history: list[dict] = [
|
| 171 |
-
{"time": 0, **self.current_vitals}
|
| 172 |
-
]
|
| 173 |
-
|
| 174 |
-
# Patient trajectory
|
| 175 |
-
self.trajectory: PatientTrajectory = self._initial_trajectory()
|
| 176 |
-
|
| 177 |
-
# Investigation tracking
|
| 178 |
-
self.investigations: dict[str, OrderedInvestigation] = {}
|
| 179 |
-
self._next_inv_id: int = 0
|
| 180 |
-
|
| 181 |
-
# Treatment log
|
| 182 |
-
self.treatments: list[TreatmentRecord] = []
|
| 183 |
-
|
| 184 |
-
# Simulation events queue
|
| 185 |
-
self.events: list[SimulationEvent] = []
|
| 186 |
-
self._next_event_id: int = 0
|
| 187 |
-
|
| 188 |
-
# Extract investigation results from case data for realistic delivery
|
| 189 |
-
self._case_lab_data = self._extract_lab_data(case_data)
|
| 190 |
-
|
| 191 |
-
def _parse_bp(self, bp_str: str, component: str) -> int:
|
| 192 |
-
"""Parse BP string like '120/80' into systolic or diastolic."""
|
| 193 |
-
try:
|
| 194 |
-
parts = str(bp_str).split("/")
|
| 195 |
-
if component == "systolic":
|
| 196 |
-
return int(parts[0])
|
| 197 |
-
return int(parts[1]) if len(parts) > 1 else 80
|
| 198 |
-
except (ValueError, IndexError):
|
| 199 |
-
return 120 if component == "systolic" else 80
|
| 200 |
-
|
| 201 |
-
def _initial_trajectory(self) -> PatientTrajectory:
|
| 202 |
-
"""Determine initial trajectory from case severity."""
|
| 203 |
-
difficulty = self.case_data.get("difficulty", "intermediate")
|
| 204 |
-
spo2 = self.current_vitals["spo2"]
|
| 205 |
-
hr = self.current_vitals["hr"]
|
| 206 |
-
|
| 207 |
-
if difficulty == "advanced" or spo2 < 90 or hr > 130:
|
| 208 |
-
return PatientTrajectory.DETERIORATING
|
| 209 |
-
if spo2 < 94 or hr > 110:
|
| 210 |
-
return PatientTrajectory.STABLE # at risk but stable initially
|
| 211 |
-
return PatientTrajectory.STABLE
|
| 212 |
-
|
| 213 |
-
def _extract_lab_data(self, case_data: dict) -> str:
|
| 214 |
-
"""Extract lab/investigation data from case stages for result delivery."""
|
| 215 |
-
for stage in case_data.get("stages", []):
|
| 216 |
-
if stage.get("stage") == "labs":
|
| 217 |
-
return stage.get("info", "")
|
| 218 |
-
return ""
|
| 219 |
-
|
| 220 |
-
def advance_time(self, action_type: str) -> list[SimulationEvent]:
|
| 221 |
-
"""Advance the simulation clock and return any triggered events.
|
| 222 |
-
|
| 223 |
-
Called after each student action. Returns events that should be delivered.
|
| 224 |
-
"""
|
| 225 |
-
time_cost = ACTION_TIME_COST.get(action_type, 10)
|
| 226 |
-
self.elapsed_minutes += time_cost
|
| 227 |
-
self.action_count += 1
|
| 228 |
-
|
| 229 |
-
triggered_events: list[SimulationEvent] = []
|
| 230 |
-
|
| 231 |
-
# 1. Evolve vitals
|
| 232 |
-
self._evolve_vitals(time_cost)
|
| 233 |
-
|
| 234 |
-
# 2. Check investigation status
|
| 235 |
-
triggered_events.extend(self._check_investigations())
|
| 236 |
-
|
| 237 |
-
# 3. Check for patient state events
|
| 238 |
-
triggered_events.extend(self._check_patient_events())
|
| 239 |
-
|
| 240 |
-
# 4. Record vitals snapshot
|
| 241 |
-
self.vitals_history.append({
|
| 242 |
-
"time": self.elapsed_minutes,
|
| 243 |
-
**self.current_vitals,
|
| 244 |
-
})
|
| 245 |
-
|
| 246 |
-
return triggered_events
|
| 247 |
-
|
| 248 |
-
def _evolve_vitals(self, minutes_passed: int):
|
| 249 |
-
"""Evolve vital signs based on trajectory and time.
|
| 250 |
-
|
| 251 |
-
Changes are subtle and clinically realistic — not random noise.
|
| 252 |
-
"""
|
| 253 |
-
v = self.current_vitals
|
| 254 |
-
|
| 255 |
-
if self.trajectory == PatientTrajectory.DETERIORATING:
|
| 256 |
-
# Gradual worsening — clinically realistic
|
| 257 |
-
rate = minutes_passed / 60 # fraction of an hour
|
| 258 |
-
v["hr"] = min(180, v["hr"] + int(3 * rate + random.uniform(0, 2)))
|
| 259 |
-
v["rr"] = min(45, v["rr"] + int(2 * rate + random.uniform(0, 1)))
|
| 260 |
-
v["spo2"] = max(70, v["spo2"] - int(1 * rate + random.uniform(0, 1)))
|
| 261 |
-
v["bp_systolic"] = max(60, v["bp_systolic"] - int(2 * rate))
|
| 262 |
-
v["temp"] = min(41.0, v["temp"] + 0.1 * rate)
|
| 263 |
-
|
| 264 |
-
elif self.trajectory == PatientTrajectory.IMPROVING:
|
| 265 |
-
# Gradual improvement toward normal
|
| 266 |
-
rate = minutes_passed / 60
|
| 267 |
-
target_hr = 80
|
| 268 |
-
target_rr = 16
|
| 269 |
-
target_spo2 = 98
|
| 270 |
-
target_bp = 120
|
| 271 |
-
target_temp = 37.0
|
| 272 |
-
|
| 273 |
-
v["hr"] = v["hr"] + int((target_hr - v["hr"]) * 0.1 * rate)
|
| 274 |
-
v["rr"] = v["rr"] + int((target_rr - v["rr"]) * 0.1 * rate)
|
| 275 |
-
v["spo2"] = min(100, v["spo2"] + int((target_spo2 - v["spo2"]) * 0.1 * rate))
|
| 276 |
-
v["bp_systolic"] = v["bp_systolic"] + int((target_bp - v["bp_systolic"]) * 0.1 * rate)
|
| 277 |
-
v["temp"] = v["temp"] + (target_temp - v["temp"]) * 0.1 * rate
|
| 278 |
-
|
| 279 |
-
elif self.trajectory == PatientTrajectory.CRITICAL:
|
| 280 |
-
# Rapid deterioration
|
| 281 |
-
rate = minutes_passed / 30
|
| 282 |
-
v["hr"] = min(200, v["hr"] + int(5 * rate))
|
| 283 |
-
v["rr"] = min(50, v["rr"] + int(3 * rate))
|
| 284 |
-
v["spo2"] = max(60, v["spo2"] - int(3 * rate))
|
| 285 |
-
v["bp_systolic"] = max(50, v["bp_systolic"] - int(5 * rate))
|
| 286 |
-
|
| 287 |
-
# STABLE: very minor fluctuation only
|
| 288 |
-
elif self.trajectory == PatientTrajectory.STABLE:
|
| 289 |
-
v["hr"] += random.choice([-1, 0, 0, 1])
|
| 290 |
-
v["rr"] += random.choice([-1, 0, 0, 0, 1])
|
| 291 |
-
|
| 292 |
-
# Clamp values
|
| 293 |
-
v["hr"] = max(30, min(200, v["hr"]))
|
| 294 |
-
v["rr"] = max(6, min(50, v["rr"]))
|
| 295 |
-
v["spo2"] = max(60, min(100, v["spo2"]))
|
| 296 |
-
v["bp_systolic"] = max(50, min(220, v["bp_systolic"]))
|
| 297 |
-
v["bp_diastolic"] = max(30, min(130, v["bp_diastolic"]))
|
| 298 |
-
v["temp"] = round(max(35.0, min(42.0, v["temp"])), 1)
|
| 299 |
-
|
| 300 |
-
def _check_investigations(self) -> list[SimulationEvent]:
|
| 301 |
-
"""Check if any ordered investigations are now ready."""
|
| 302 |
-
events = []
|
| 303 |
-
for inv_id, inv in self.investigations.items():
|
| 304 |
-
if inv.status in (InvestigationStatus.ORDERED, InvestigationStatus.SAMPLE_COLLECTED, InvestigationStatus.PROCESSING):
|
| 305 |
-
time_since_order = self.elapsed_minutes - inv.ordered_at
|
| 306 |
-
if time_since_order >= inv.turnaround:
|
| 307 |
-
inv.status = InvestigationStatus.READY
|
| 308 |
-
event = SimulationEvent(
|
| 309 |
-
event_id=f"evt-{self._next_event_id}",
|
| 310 |
-
timestamp=self.elapsed_minutes,
|
| 311 |
-
event_type="investigation_ready",
|
| 312 |
-
title=f"{inv.label} Results Ready",
|
| 313 |
-
description=inv.result_text or f"{inv.label} results are now available.",
|
| 314 |
-
agent_type="nurse",
|
| 315 |
-
)
|
| 316 |
-
self._next_event_id += 1
|
| 317 |
-
events.append(event)
|
| 318 |
-
self.events.append(event)
|
| 319 |
-
elif time_since_order >= inv.turnaround * 0.5 and inv.status == InvestigationStatus.ORDERED:
|
| 320 |
-
inv.status = InvestigationStatus.PROCESSING
|
| 321 |
-
return events
|
| 322 |
-
|
| 323 |
-
def _check_patient_events(self) -> list[SimulationEvent]:
|
| 324 |
-
"""Generate patient-state-driven events (deterioration alerts, new symptoms)."""
|
| 325 |
-
events = []
|
| 326 |
-
v = self.current_vitals
|
| 327 |
-
|
| 328 |
-
# Critical vitals trigger nurse alert
|
| 329 |
-
if v["spo2"] < 88 and not self._event_delivered("critical_spo2"):
|
| 330 |
-
event = SimulationEvent(
|
| 331 |
-
event_id=f"evt-{self._next_event_id}",
|
| 332 |
-
timestamp=self.elapsed_minutes,
|
| 333 |
-
event_type="nurse_alert",
|
| 334 |
-
title="Critical SpO2 Alert",
|
| 335 |
-
description=f"Doctor! Patient's SpO2 has dropped to {v['spo2']}%. Should we start high-flow O2?",
|
| 336 |
-
agent_type="nurse",
|
| 337 |
-
)
|
| 338 |
-
self._next_event_id += 1
|
| 339 |
-
events.append(event)
|
| 340 |
-
self.events.append(event)
|
| 341 |
-
|
| 342 |
-
if v["hr"] > 140 and not self._event_delivered("tachycardia_alert"):
|
| 343 |
-
event = SimulationEvent(
|
| 344 |
-
event_id=f"evt-{self._next_event_id}",
|
| 345 |
-
timestamp=self.elapsed_minutes,
|
| 346 |
-
event_type="nurse_alert",
|
| 347 |
-
title="Tachycardia Alert",
|
| 348 |
-
description=f"Doctor, HR is {v['hr']}. Patient is becoming restless. Do you want ECG monitoring?",
|
| 349 |
-
agent_type="nurse",
|
| 350 |
-
)
|
| 351 |
-
self._next_event_id += 1
|
| 352 |
-
events.append(event)
|
| 353 |
-
self.events.append(event)
|
| 354 |
-
|
| 355 |
-
if v["bp_systolic"] < 80 and not self._event_delivered("hypotension_alert"):
|
| 356 |
-
event = SimulationEvent(
|
| 357 |
-
event_id=f"evt-{self._next_event_id}",
|
| 358 |
-
timestamp=self.elapsed_minutes,
|
| 359 |
-
event_type="nurse_alert",
|
| 360 |
-
title="Hypotension Alert",
|
| 361 |
-
description=f"Doctor! BP is {v['bp_systolic']}/{v['bp_diastolic']}. Patient is hypotensive. Should I start IV fluids?",
|
| 362 |
-
agent_type="nurse",
|
| 363 |
-
)
|
| 364 |
-
self._next_event_id += 1
|
| 365 |
-
events.append(event)
|
| 366 |
-
self.events.append(event)
|
| 367 |
-
|
| 368 |
-
# Time-based deterioration warning (if no treatment after 30 min of deterioration)
|
| 369 |
-
if (
|
| 370 |
-
self.trajectory == PatientTrajectory.DETERIORATING
|
| 371 |
-
and self.elapsed_minutes > 30
|
| 372 |
-
and len(self.treatments) == 0
|
| 373 |
-
and not self._event_delivered("no_treatment_warning")
|
| 374 |
-
):
|
| 375 |
-
event = SimulationEvent(
|
| 376 |
-
event_id=f"evt-{self._next_event_id}",
|
| 377 |
-
timestamp=self.elapsed_minutes,
|
| 378 |
-
event_type="senior_concern",
|
| 379 |
-
title="Senior Doctor Concern",
|
| 380 |
-
description="The patient has been here for a while without treatment. Have we started any management?",
|
| 381 |
-
agent_type="senior_doctor",
|
| 382 |
-
)
|
| 383 |
-
self._next_event_id += 1
|
| 384 |
-
events.append(event)
|
| 385 |
-
self.events.append(event)
|
| 386 |
-
|
| 387 |
-
return events
|
| 388 |
-
|
| 389 |
-
def _event_delivered(self, event_key: str) -> bool:
|
| 390 |
-
"""Check if a named event has already been triggered."""
|
| 391 |
-
return any(
|
| 392 |
-
e.event_id.endswith(event_key) or event_key in e.title.lower().replace(" ", "_")
|
| 393 |
-
for e in self.events
|
| 394 |
-
)
|
| 395 |
-
|
| 396 |
-
def order_investigation(
|
| 397 |
-
self, investigation_type: str, is_urgent: bool = False
|
| 398 |
-
) -> OrderedInvestigation:
|
| 399 |
-
"""Order an investigation. Returns the tracking object."""
|
| 400 |
-
inv_type_key = investigation_type.lower().replace(" ", "_").replace("-", "_")
|
| 401 |
-
|
| 402 |
-
# Match against known investigations
|
| 403 |
-
inv_info = INVESTIGATION_TURNAROUND.get(inv_type_key, INVESTIGATION_TURNAROUND["_default"])
|
| 404 |
-
turnaround = inv_info["urgent"] if is_urgent else inv_info["turnaround"]
|
| 405 |
-
|
| 406 |
-
inv_id = f"inv-{self._next_inv_id}"
|
| 407 |
-
self._next_inv_id += 1
|
| 408 |
-
|
| 409 |
-
investigation = OrderedInvestigation(
|
| 410 |
-
investigation_id=inv_id,
|
| 411 |
-
investigation_type=inv_type_key,
|
| 412 |
-
label=inv_info["label"],
|
| 413 |
-
ordered_at=self.elapsed_minutes,
|
| 414 |
-
turnaround=turnaround,
|
| 415 |
-
is_urgent=is_urgent,
|
| 416 |
-
result_text=self._get_investigation_result(inv_type_key),
|
| 417 |
-
)
|
| 418 |
-
self.investigations[inv_id] = investigation
|
| 419 |
-
|
| 420 |
-
logger.info(
|
| 421 |
-
f"Investigation ordered: {investigation.label} "
|
| 422 |
-
f"(ETA: {turnaround}min, urgent={is_urgent})"
|
| 423 |
-
)
|
| 424 |
-
return investigation
|
| 425 |
-
|
| 426 |
-
def _get_investigation_result(self, inv_type: str) -> str:
|
| 427 |
-
"""Extract relevant result text from case data for this investigation type.
|
| 428 |
-
|
| 429 |
-
Uses Claude-style pattern matching on the case lab data to find relevant results.
|
| 430 |
-
"""
|
| 431 |
-
if not self._case_lab_data:
|
| 432 |
-
return f"{inv_type.upper()} results: Within normal limits."
|
| 433 |
-
|
| 434 |
-
# Simple keyword matching against case lab text
|
| 435 |
-
lab_text = self._case_lab_data.lower()
|
| 436 |
-
inv_keywords = {
|
| 437 |
-
"cbc": ["cbc", "hemoglobin", "hb ", "wbc", "platelet", "tlc", "dlc"],
|
| 438 |
-
"rft": ["creatinine", "urea", "bun", "egfr", "rft", "renal"],
|
| 439 |
-
"lft": ["bilirubin", "sgot", "sgpt", "alt", "ast", "lft", "albumin", "liver"],
|
| 440 |
-
"blood_sugar": ["blood sugar", "glucose", "rbs", "fbs"],
|
| 441 |
-
"rbs": ["blood sugar", "glucose", "rbs"],
|
| 442 |
-
"abg": ["abg", "arterial blood gas", "pao2", "pco2", "ph ", "bicarbonate", "hco3"],
|
| 443 |
-
"troponin": ["troponin", "trop"],
|
| 444 |
-
"ecg": ["ecg", "electrocardiogram", "st elevation", "st depression", "qrs", "rhythm"],
|
| 445 |
-
"xray_chest": ["x-ray", "xray", "chest x", "cxr", "infiltrate", "consolidation"],
|
| 446 |
-
"xray": ["x-ray", "xray"],
|
| 447 |
-
"ultrasound": ["usg", "ultrasound", "sonography"],
|
| 448 |
-
"dengue_ns1": ["ns1", "dengue"],
|
| 449 |
-
"dengue_serology": ["dengue igm", "dengue igg"],
|
| 450 |
-
"malaria_smear": ["peripheral smear", "malaria", "mp"],
|
| 451 |
-
"malaria_rdt": ["malaria rapid", "rdt"],
|
| 452 |
-
"blood_culture": ["blood culture", "bacteremia"],
|
| 453 |
-
"serum_electrolytes": ["sodium", "potassium", "electrolyte", "na+", "k+"],
|
| 454 |
-
"coagulation": ["pt ", "inr", "aptt", "coagulation"],
|
| 455 |
-
"pt_inr": ["pt ", "inr"],
|
| 456 |
-
"thyroid": ["tsh", "t3", "t4", "thyroid"],
|
| 457 |
-
"hba1c": ["hba1c", "glycated"],
|
| 458 |
-
"amylase": ["amylase"],
|
| 459 |
-
"lipase": ["lipase"],
|
| 460 |
-
"csf_analysis": ["csf", "cerebrospinal"],
|
| 461 |
-
"d_dimer": ["d-dimer", "d dimer"],
|
| 462 |
-
"echo": ["echo", "echocardiography", "ef ", "ejection fraction", "lvef"],
|
| 463 |
-
}
|
| 464 |
-
|
| 465 |
-
keywords = inv_keywords.get(inv_type, [inv_type])
|
| 466 |
-
relevant_lines = []
|
| 467 |
-
|
| 468 |
-
for line in self._case_lab_data.split("\n"):
|
| 469 |
-
line_lower = line.lower()
|
| 470 |
-
if any(kw in line_lower for kw in keywords):
|
| 471 |
-
relevant_lines.append(line.strip())
|
| 472 |
-
|
| 473 |
-
if relevant_lines:
|
| 474 |
-
return "\n".join(relevant_lines)
|
| 475 |
-
|
| 476 |
-
return f"{inv_type.replace('_', ' ').title()}: Results within normal limits (no specific abnormality noted)."
|
| 477 |
-
|
| 478 |
-
def record_treatment(
|
| 479 |
-
self,
|
| 480 |
-
description: str,
|
| 481 |
-
effects: dict,
|
| 482 |
-
is_appropriate: bool = True,
|
| 483 |
-
safety_note: str = "",
|
| 484 |
-
) -> TreatmentRecord:
|
| 485 |
-
"""Record a treatment and apply its effects to vitals."""
|
| 486 |
-
record = TreatmentRecord(
|
| 487 |
-
treatment_id=f"tx-{len(self.treatments)}",
|
| 488 |
-
description=description,
|
| 489 |
-
ordered_at=self.elapsed_minutes,
|
| 490 |
-
effects=effects,
|
| 491 |
-
is_appropriate=is_appropriate,
|
| 492 |
-
safety_note=safety_note,
|
| 493 |
-
)
|
| 494 |
-
self.treatments.append(record)
|
| 495 |
-
|
| 496 |
-
# Apply immediate effects to vitals
|
| 497 |
-
self._apply_treatment_effects(effects, is_appropriate)
|
| 498 |
-
|
| 499 |
-
return record
|
| 500 |
-
|
| 501 |
-
def _apply_treatment_effects(self, effects: dict, is_appropriate: bool):
|
| 502 |
-
"""Apply treatment effects to current vitals and trajectory."""
|
| 503 |
-
if is_appropriate:
|
| 504 |
-
# Correct treatment shifts trajectory toward improving
|
| 505 |
-
if self.trajectory in (PatientTrajectory.DETERIORATING, PatientTrajectory.CRITICAL):
|
| 506 |
-
self.trajectory = PatientTrajectory.STABLE
|
| 507 |
-
elif self.trajectory == PatientTrajectory.STABLE:
|
| 508 |
-
self.trajectory = PatientTrajectory.IMPROVING
|
| 509 |
-
|
| 510 |
-
# Apply specific effects
|
| 511 |
-
v = self.current_vitals
|
| 512 |
-
if "hr_change" in effects:
|
| 513 |
-
v["hr"] = max(40, min(180, v["hr"] + effects["hr_change"]))
|
| 514 |
-
if "bp_systolic_change" in effects:
|
| 515 |
-
v["bp_systolic"] = max(60, min(200, v["bp_systolic"] + effects["bp_systolic_change"]))
|
| 516 |
-
if "spo2_change" in effects:
|
| 517 |
-
v["spo2"] = min(100, max(60, v["spo2"] + effects["spo2_change"]))
|
| 518 |
-
if "rr_change" in effects:
|
| 519 |
-
v["rr"] = max(8, min(40, v["rr"] + effects["rr_change"]))
|
| 520 |
-
if "temp_change" in effects:
|
| 521 |
-
v["temp"] = round(max(35.0, min(42.0, v["temp"] + effects["temp_change"])), 1)
|
| 522 |
-
else:
|
| 523 |
-
# Wrong treatment worsens trajectory
|
| 524 |
-
if self.trajectory == PatientTrajectory.STABLE:
|
| 525 |
-
self.trajectory = PatientTrajectory.DETERIORATING
|
| 526 |
-
elif self.trajectory == PatientTrajectory.DETERIORATING:
|
| 527 |
-
self.trajectory = PatientTrajectory.CRITICAL
|
| 528 |
-
|
| 529 |
-
def get_vitals_display(self) -> dict:
|
| 530 |
-
"""Get current vitals formatted for display with trends."""
|
| 531 |
-
v = self.current_vitals
|
| 532 |
-
bp_str = f"{v['bp_systolic']}/{v['bp_diastolic']}"
|
| 533 |
-
|
| 534 |
-
# Calculate trends by comparing to 2 snapshots ago
|
| 535 |
-
trends = {}
|
| 536 |
-
if len(self.vitals_history) >= 2:
|
| 537 |
-
prev = self.vitals_history[-2]
|
| 538 |
-
for key in ["hr", "rr", "spo2", "bp_systolic", "temp"]:
|
| 539 |
-
diff = v[key] - prev[key]
|
| 540 |
-
if isinstance(diff, float):
|
| 541 |
-
trends[key] = "rising" if diff > 0.2 else "falling" if diff < -0.2 else "stable"
|
| 542 |
-
else:
|
| 543 |
-
trends[key] = "rising" if diff > 2 else "falling" if diff < -2 else "stable"
|
| 544 |
-
|
| 545 |
-
return {
|
| 546 |
-
"bp": bp_str,
|
| 547 |
-
"hr": v["hr"],
|
| 548 |
-
"rr": v["rr"],
|
| 549 |
-
"temp": v["temp"],
|
| 550 |
-
"spo2": v["spo2"],
|
| 551 |
-
"trends": trends,
|
| 552 |
-
"trajectory": self.trajectory.value,
|
| 553 |
-
"elapsed_minutes": self.elapsed_minutes,
|
| 554 |
-
}
|
| 555 |
-
|
| 556 |
-
def get_investigation_status(self) -> list[dict]:
|
| 557 |
-
"""Get status of all ordered investigations."""
|
| 558 |
-
results = []
|
| 559 |
-
for inv in self.investigations.values():
|
| 560 |
-
remaining = max(0, inv.turnaround - (self.elapsed_minutes - inv.ordered_at))
|
| 561 |
-
results.append({
|
| 562 |
-
"id": inv.investigation_id,
|
| 563 |
-
"type": inv.investigation_type,
|
| 564 |
-
"label": inv.label,
|
| 565 |
-
"status": inv.status.value,
|
| 566 |
-
"ordered_at": inv.ordered_at,
|
| 567 |
-
"estimated_ready": inv.ordered_at + inv.turnaround,
|
| 568 |
-
"remaining_minutes": remaining,
|
| 569 |
-
"is_urgent": inv.is_urgent,
|
| 570 |
-
"result": inv.result_text if inv.status == InvestigationStatus.READY else None,
|
| 571 |
-
})
|
| 572 |
-
return results
|
| 573 |
-
|
| 574 |
-
def get_timeline(self) -> list[dict]:
|
| 575 |
-
"""Get complete simulation timeline for display."""
|
| 576 |
-
timeline = [
|
| 577 |
-
{"time": 0, "type": "patient_arrival", "title": "Patient arrives", "description": self.case_data.get("chief_complaint", "")}
|
| 578 |
-
]
|
| 579 |
-
|
| 580 |
-
# Add investigation orders
|
| 581 |
-
for inv in self.investigations.values():
|
| 582 |
-
timeline.append({
|
| 583 |
-
"time": inv.ordered_at,
|
| 584 |
-
"type": "investigation_ordered",
|
| 585 |
-
"title": f"{inv.label} ordered",
|
| 586 |
-
"description": f"{'Urgent' if inv.is_urgent else 'Routine'} — ETA {inv.turnaround} min",
|
| 587 |
-
})
|
| 588 |
-
if inv.status == InvestigationStatus.READY:
|
| 589 |
-
timeline.append({
|
| 590 |
-
"time": inv.ordered_at + inv.turnaround,
|
| 591 |
-
"type": "investigation_ready",
|
| 592 |
-
"title": f"{inv.label} ready",
|
| 593 |
-
"description": "Results available",
|
| 594 |
-
})
|
| 595 |
-
|
| 596 |
-
# Add treatments
|
| 597 |
-
for tx in self.treatments:
|
| 598 |
-
timeline.append({
|
| 599 |
-
"time": tx.ordered_at,
|
| 600 |
-
"type": "treatment",
|
| 601 |
-
"title": f"Treatment: {tx.description[:50]}",
|
| 602 |
-
"description": tx.safety_note or "Treatment administered",
|
| 603 |
-
})
|
| 604 |
-
|
| 605 |
-
# Add simulation events
|
| 606 |
-
for evt in self.events:
|
| 607 |
-
timeline.append({
|
| 608 |
-
"time": evt.timestamp,
|
| 609 |
-
"type": evt.event_type,
|
| 610 |
-
"title": evt.title,
|
| 611 |
-
"description": evt.description,
|
| 612 |
-
})
|
| 613 |
-
|
| 614 |
-
# Sort by time
|
| 615 |
-
timeline.sort(key=lambda x: x["time"])
|
| 616 |
-
return timeline
|
| 617 |
-
|
| 618 |
-
def get_state_summary(self) -> str:
|
| 619 |
-
"""Generate a natural-language summary of current state for agent context.
|
| 620 |
-
|
| 621 |
-
This is injected into agent prompts so they're aware of what's happening.
|
| 622 |
-
"""
|
| 623 |
-
v = self.current_vitals
|
| 624 |
-
summary_parts = [
|
| 625 |
-
f"SIMULATION TIME: {self.elapsed_minutes} minutes elapsed.",
|
| 626 |
-
f"CURRENT VITALS: BP {v['bp_systolic']}/{v['bp_diastolic']}, HR {v['hr']}, RR {v['rr']}, Temp {v['temp']}°C, SpO2 {v['spo2']}%.",
|
| 627 |
-
f"PATIENT TRAJECTORY: {self.trajectory.value}.",
|
| 628 |
-
]
|
| 629 |
-
|
| 630 |
-
# Pending investigations
|
| 631 |
-
pending = [inv for inv in self.investigations.values() if inv.status != InvestigationStatus.READY]
|
| 632 |
-
ready = [inv for inv in self.investigations.values() if inv.status == InvestigationStatus.READY]
|
| 633 |
-
if pending:
|
| 634 |
-
summary_parts.append(f"PENDING INVESTIGATIONS: {', '.join(inv.label for inv in pending)}.")
|
| 635 |
-
if ready:
|
| 636 |
-
summary_parts.append(f"RESULTS AVAILABLE: {', '.join(inv.label for inv in ready)}.")
|
| 637 |
-
|
| 638 |
-
# Treatments
|
| 639 |
-
if self.treatments:
|
| 640 |
-
recent = self.treatments[-3:]
|
| 641 |
-
summary_parts.append(f"RECENT TREATMENTS: {'; '.join(tx.description for tx in recent)}.")
|
| 642 |
-
else:
|
| 643 |
-
summary_parts.append("NO TREATMENTS ORDERED YET.")
|
| 644 |
-
|
| 645 |
-
return "\n".join(summary_parts)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/core/agents/clinical_validator.py
DELETED
|
@@ -1,213 +0,0 @@
|
|
| 1 |
-
"""Clinical validator — safety gate for dangerous student actions.
|
| 2 |
-
|
| 3 |
-
Catches critical errors BEFORE they harm the virtual patient:
|
| 4 |
-
- Drug interactions with existing medications
|
| 5 |
-
- Contraindicated drugs for the patient's condition
|
| 6 |
-
- Missing critical steps (e.g., checking creatinine before contrast)
|
| 7 |
-
- Dose errors
|
| 8 |
-
|
| 9 |
-
When danger is detected, agents intervene with teaching moments —
|
| 10 |
-
not just blocks. This is how real hospitals work: the nurse catches
|
| 11 |
-
a potentially wrong order and confirms before administering.
|
| 12 |
-
"""
|
| 13 |
-
|
| 14 |
-
import logging
|
| 15 |
-
import os
|
| 16 |
-
from typing import Optional
|
| 17 |
-
|
| 18 |
-
import anthropic
|
| 19 |
-
|
| 20 |
-
logger = logging.getLogger(__name__)
|
| 21 |
-
|
| 22 |
-
VALIDATION_PROMPT = """You are a CLINICAL SAFETY OFFICER in an Indian teaching hospital. Your job is to check if a medical student's action could harm the patient.
|
| 23 |
-
|
| 24 |
-
PATIENT CONTEXT:
|
| 25 |
-
- Diagnosis: {diagnosis}
|
| 26 |
-
- Age/Gender: {age}y {gender}
|
| 27 |
-
- Chief complaint: {chief_complaint}
|
| 28 |
-
- Current vitals: BP {bp}, HR {hr}, RR {rr}, Temp {temp}°C, SpO2 {spo2}%
|
| 29 |
-
- History: {history}
|
| 30 |
-
- Existing treatments: {existing_treatments}
|
| 31 |
-
|
| 32 |
-
STUDENT'S ACTION: "{student_action}"
|
| 33 |
-
ACTION TYPE: {action_type}
|
| 34 |
-
|
| 35 |
-
CHECK FOR:
|
| 36 |
-
1. DANGEROUS drug interactions (with existing treatments or known conditions)
|
| 37 |
-
2. CONTRAINDICATED treatments (e.g., beta-blocker in acute decompensated heart failure with cardiogenic shock)
|
| 38 |
-
3. MISSING PREREQUISITES (e.g., ordering contrast CT without checking renal function)
|
| 39 |
-
4. DOSE ERRORS (if dose is specified — 10x overdose patterns are common student errors)
|
| 40 |
-
5. INAPPROPRIATE for the clinical scenario (e.g., discharging a critically ill patient)
|
| 41 |
-
|
| 42 |
-
RESPOND IN THIS EXACT JSON FORMAT:
|
| 43 |
-
{{
|
| 44 |
-
"safety_level": "safe" | "caution" | "dangerous",
|
| 45 |
-
"issues": [
|
| 46 |
-
{{
|
| 47 |
-
"type": "contraindication" | "interaction" | "missing_step" | "dose_error" | "inappropriate",
|
| 48 |
-
"description": "What the specific issue is",
|
| 49 |
-
"severity": "low" | "medium" | "high" | "critical"
|
| 50 |
-
}}
|
| 51 |
-
],
|
| 52 |
-
"nurse_intervention": "What Nurse Priya would say to gently flag this (or null if safe)",
|
| 53 |
-
"senior_intervention": "What Dr. Sharma would say if it's dangerous (or null if safe/caution)",
|
| 54 |
-
"proceed": true | false,
|
| 55 |
-
"teaching_point": "The clinical lesson here (for the student to learn from)"
|
| 56 |
-
}}
|
| 57 |
-
|
| 58 |
-
IMPORTANT:
|
| 59 |
-
- Most routine actions are SAFE — don't over-flag
|
| 60 |
-
- Investigations are almost always safe (except invasive ones without consent/preparation)
|
| 61 |
-
- History taking and examination are ALWAYS safe
|
| 62 |
-
- Only flag treatments that could genuinely harm
|
| 63 |
-
- In caution cases, nurse confirms but allows proceeding
|
| 64 |
-
- In dangerous cases, both nurse and senior intervene — this is a TEACHING moment"""
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
class ClinicalValidator:
|
| 68 |
-
"""Validates student actions for clinical safety.
|
| 69 |
-
|
| 70 |
-
Uses Claude Opus to reason about safety in the specific clinical context,
|
| 71 |
-
rather than relying on a static rules database.
|
| 72 |
-
"""
|
| 73 |
-
|
| 74 |
-
def __init__(self):
|
| 75 |
-
self.api_key = os.environ.get("ANTHROPIC_API_KEY")
|
| 76 |
-
self.client: Optional[anthropic.Anthropic] = None
|
| 77 |
-
if self.api_key and self.api_key != "sk-ant-your-key-here":
|
| 78 |
-
try:
|
| 79 |
-
self.client = anthropic.Anthropic(api_key=self.api_key)
|
| 80 |
-
except Exception as e:
|
| 81 |
-
logger.warning(f"ClinicalValidator init failed: {e}")
|
| 82 |
-
|
| 83 |
-
def validate_action(
|
| 84 |
-
self,
|
| 85 |
-
student_action: str,
|
| 86 |
-
action_type: str,
|
| 87 |
-
case_data: dict,
|
| 88 |
-
current_vitals: dict,
|
| 89 |
-
existing_treatments: list[dict],
|
| 90 |
-
) -> dict:
|
| 91 |
-
"""Validate a student action for clinical safety.
|
| 92 |
-
|
| 93 |
-
Args:
|
| 94 |
-
student_action: What the student wants to do
|
| 95 |
-
action_type: Category (order_treatment, order_investigation, etc.)
|
| 96 |
-
case_data: Full case data
|
| 97 |
-
current_vitals: Current vital signs
|
| 98 |
-
existing_treatments: Previously ordered treatments
|
| 99 |
-
|
| 100 |
-
Returns:
|
| 101 |
-
Validation result with safety_level, issues, and agent interventions.
|
| 102 |
-
"""
|
| 103 |
-
# History taking, examination, and conversations are always safe
|
| 104 |
-
safe_actions = {"talk_to_patient", "ask_nurse", "consult_senior", "examine_patient", "team_huddle"}
|
| 105 |
-
if action_type in safe_actions:
|
| 106 |
-
return {
|
| 107 |
-
"safety_level": "safe",
|
| 108 |
-
"issues": [],
|
| 109 |
-
"nurse_intervention": None,
|
| 110 |
-
"senior_intervention": None,
|
| 111 |
-
"proceed": True,
|
| 112 |
-
"teaching_point": None,
|
| 113 |
-
}
|
| 114 |
-
|
| 115 |
-
if not self.client:
|
| 116 |
-
return self._fallback_validation(student_action, action_type)
|
| 117 |
-
|
| 118 |
-
vitals = current_vitals
|
| 119 |
-
history = ""
|
| 120 |
-
for stage in case_data.get("stages", []):
|
| 121 |
-
if stage.get("stage") == "history":
|
| 122 |
-
history = stage.get("info", "")[:500]
|
| 123 |
-
break
|
| 124 |
-
|
| 125 |
-
prompt = VALIDATION_PROMPT.format(
|
| 126 |
-
diagnosis=case_data.get("diagnosis", "Under evaluation"),
|
| 127 |
-
age=case_data.get("patient", {}).get("age", "Unknown"),
|
| 128 |
-
gender=case_data.get("patient", {}).get("gender", "Unknown"),
|
| 129 |
-
chief_complaint=case_data.get("chief_complaint", ""),
|
| 130 |
-
bp=f"{vitals.get('bp_systolic', 120)}/{vitals.get('bp_diastolic', 80)}",
|
| 131 |
-
hr=vitals.get("hr", 80),
|
| 132 |
-
rr=vitals.get("rr", 16),
|
| 133 |
-
temp=vitals.get("temp", 37.0),
|
| 134 |
-
spo2=vitals.get("spo2", 98),
|
| 135 |
-
history=history,
|
| 136 |
-
existing_treatments="; ".join(tx.get("description", "") for tx in existing_treatments) or "None",
|
| 137 |
-
student_action=student_action,
|
| 138 |
-
action_type=action_type,
|
| 139 |
-
)
|
| 140 |
-
|
| 141 |
-
try:
|
| 142 |
-
response = self.client.messages.create(
|
| 143 |
-
model="claude-opus-4-6",
|
| 144 |
-
max_tokens=1500,
|
| 145 |
-
temperature=1,
|
| 146 |
-
thinking={
|
| 147 |
-
"type": "adaptive",
|
| 148 |
-
},
|
| 149 |
-
messages=[{"role": "user", "content": prompt}],
|
| 150 |
-
)
|
| 151 |
-
|
| 152 |
-
content = ""
|
| 153 |
-
for block in response.content:
|
| 154 |
-
if block.type == "text":
|
| 155 |
-
content = block.text.strip()
|
| 156 |
-
|
| 157 |
-
if content:
|
| 158 |
-
return self._parse_validation(content)
|
| 159 |
-
|
| 160 |
-
except Exception as e:
|
| 161 |
-
logger.error(f"ClinicalValidator error: {e}")
|
| 162 |
-
|
| 163 |
-
return self._fallback_validation(student_action, action_type)
|
| 164 |
-
|
| 165 |
-
def _parse_validation(self, response_text: str) -> dict:
|
| 166 |
-
"""Parse Claude's JSON validation response."""
|
| 167 |
-
import json
|
| 168 |
-
|
| 169 |
-
try:
|
| 170 |
-
text = response_text
|
| 171 |
-
if "```json" in text:
|
| 172 |
-
text = text.split("```json")[1].split("```")[0]
|
| 173 |
-
elif "```" in text:
|
| 174 |
-
text = text.split("```")[1].split("```")[0]
|
| 175 |
-
|
| 176 |
-
result = json.loads(text.strip())
|
| 177 |
-
|
| 178 |
-
return {
|
| 179 |
-
"safety_level": result.get("safety_level", "safe"),
|
| 180 |
-
"issues": result.get("issues", []),
|
| 181 |
-
"nurse_intervention": result.get("nurse_intervention"),
|
| 182 |
-
"senior_intervention": result.get("senior_intervention"),
|
| 183 |
-
"proceed": result.get("proceed", True),
|
| 184 |
-
"teaching_point": result.get("teaching_point"),
|
| 185 |
-
}
|
| 186 |
-
except (json.JSONDecodeError, IndexError, KeyError) as e:
|
| 187 |
-
logger.warning(f"Failed to parse validation JSON: {e}")
|
| 188 |
-
return self._fallback_validation("", "")
|
| 189 |
-
|
| 190 |
-
def _fallback_validation(self, student_action: str, action_type: str) -> dict:
|
| 191 |
-
"""Conservative fallback — allow with caution for treatments."""
|
| 192 |
-
if action_type == "order_treatment":
|
| 193 |
-
return {
|
| 194 |
-
"safety_level": "caution",
|
| 195 |
-
"issues": [],
|
| 196 |
-
"nurse_intervention": f"Doctor, just confirming the order — {student_action}. Shall I proceed?",
|
| 197 |
-
"senior_intervention": None,
|
| 198 |
-
"proceed": True,
|
| 199 |
-
"teaching_point": None,
|
| 200 |
-
}
|
| 201 |
-
|
| 202 |
-
return {
|
| 203 |
-
"safety_level": "safe",
|
| 204 |
-
"issues": [],
|
| 205 |
-
"nurse_intervention": None,
|
| 206 |
-
"senior_intervention": None,
|
| 207 |
-
"proceed": True,
|
| 208 |
-
"teaching_point": None,
|
| 209 |
-
}
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
# Singleton
|
| 213 |
-
clinical_validator = ClinicalValidator()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/core/agents/complication_engine.py
DELETED
|
@@ -1,1307 +0,0 @@
|
|
| 1 |
-
"""Complication Engine — probabilistic complications, time-based triggers, urgent interruptions.
|
| 2 |
-
|
| 3 |
-
This engine makes clinical simulations DANGEROUS and UNPREDICTABLE — just like real
|
| 4 |
-
medicine. Complications don't happen randomly; they emerge from untreated conditions,
|
| 5 |
-
delayed interventions, and the natural disease trajectory. Every complication is
|
| 6 |
-
clinically realistic, specialty-appropriate, and time-dependent.
|
| 7 |
-
|
| 8 |
-
Works alongside CaseStateManager: the state manager tracks vitals/investigations/time,
|
| 9 |
-
the complication engine decides WHAT GOES WRONG and WHEN.
|
| 10 |
-
|
| 11 |
-
Architecture:
|
| 12 |
-
Orchestrator
|
| 13 |
-
-> advance_time() on CaseStateManager
|
| 14 |
-
-> check_complications() on ComplicationEngine
|
| 15 |
-
-> Merge events, deliver to student via agents
|
| 16 |
-
|
| 17 |
-
Each tick of the simulation clock, the engine:
|
| 18 |
-
1. Evaluates every possible complication for this case
|
| 19 |
-
2. Calculates time-dependent probability (rises if untreated)
|
| 20 |
-
3. Checks vitals criteria (some complications only fire when vitals are deranged)
|
| 21 |
-
4. Checks if preventive treatment was given
|
| 22 |
-
5. Rolls the dice — if triggered, generates a SimulationEvent
|
| 23 |
-
6. Escalates trajectory on the state manager when appropriate
|
| 24 |
-
"""
|
| 25 |
-
|
| 26 |
-
import logging
|
| 27 |
-
import random
|
| 28 |
-
import uuid
|
| 29 |
-
from typing import Optional
|
| 30 |
-
|
| 31 |
-
from .case_state_manager import (
|
| 32 |
-
CaseStateManager,
|
| 33 |
-
PatientTrajectory,
|
| 34 |
-
SimulationEvent,
|
| 35 |
-
)
|
| 36 |
-
|
| 37 |
-
logger = logging.getLogger(__name__)
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
# ---------------------------------------------------------------------------
|
| 41 |
-
# Specialty complication registry
|
| 42 |
-
# ---------------------------------------------------------------------------
|
| 43 |
-
# Each complication is a dict with:
|
| 44 |
-
# name — clinical name
|
| 45 |
-
# description — what happens clinically
|
| 46 |
-
# probability_base — base probability per check (0.0-1.0) before modifiers
|
| 47 |
-
# time_window — (min_minutes, max_minutes) when this can fire
|
| 48 |
-
# vitals_criteria — dict of vital sign thresholds that INCREASE probability
|
| 49 |
-
# treatment_prevents — list of treatment keywords that would prevent this
|
| 50 |
-
# urgency — "urgent" or "critical"
|
| 51 |
-
# agent_message — what the nurse/patient says when it triggers
|
| 52 |
-
# trajectory_effect — what happens to patient trajectory when this fires
|
| 53 |
-
# ---------------------------------------------------------------------------
|
| 54 |
-
|
| 55 |
-
SPECIALTY_COMPLICATIONS: dict[str, list[dict]] = {
|
| 56 |
-
"cardiology": [
|
| 57 |
-
{
|
| 58 |
-
"name": "Cardiogenic Shock",
|
| 59 |
-
"description": "Pump failure with hypotension and end-organ hypoperfusion following acute MI",
|
| 60 |
-
"probability_base": 0.15,
|
| 61 |
-
"time_window": (30, 180),
|
| 62 |
-
"vitals_criteria": {"bp_systolic_below": 90, "hr_above": 110},
|
| 63 |
-
"treatment_prevents": ["pci", "thrombolysis", "streptokinase", "tenecteplase", "aspirin", "heparin", "inotrope", "dobutamine"],
|
| 64 |
-
"urgency": "critical",
|
| 65 |
-
"agent_message": "Doctor! Patient is cold, clammy, and confused. BP has crashed to {bp_systolic}/{bp_diastolic}. Urine output has dropped. I think we're losing him!",
|
| 66 |
-
"trajectory_effect": "critical",
|
| 67 |
-
},
|
| 68 |
-
{
|
| 69 |
-
"name": "Ventricular Tachycardia",
|
| 70 |
-
"description": "Sustained VT from ischemic myocardium — can degenerate to VF",
|
| 71 |
-
"probability_base": 0.12,
|
| 72 |
-
"time_window": (15, 120),
|
| 73 |
-
"vitals_criteria": {"hr_above": 120},
|
| 74 |
-
"treatment_prevents": ["amiodarone", "lidocaine", "beta_blocker", "metoprolol", "defibrillation", "cardioversion"],
|
| 75 |
-
"urgency": "critical",
|
| 76 |
-
"agent_message": "Doctor! Monitor is showing wide-complex tachycardia! HR is {hr}! Patient says chest feels like it's going to explode!",
|
| 77 |
-
"trajectory_effect": "critical",
|
| 78 |
-
},
|
| 79 |
-
{
|
| 80 |
-
"name": "Acute Heart Failure / Pulmonary Edema",
|
| 81 |
-
"description": "Fluid backs up into lungs from failing left ventricle",
|
| 82 |
-
"probability_base": 0.10,
|
| 83 |
-
"time_window": (30, 240),
|
| 84 |
-
"vitals_criteria": {"spo2_below": 92, "rr_above": 24},
|
| 85 |
-
"treatment_prevents": ["furosemide", "lasix", "nitroglycerin", "ntg", "oxygen", "niv", "bipap", "cpap"],
|
| 86 |
-
"urgency": "urgent",
|
| 87 |
-
"agent_message": "Doctor, patient is sitting bolt upright gasping for air. Pink frothy sputum! SpO2 is {spo2}% on room air. I can hear crackles from the doorway!",
|
| 88 |
-
"trajectory_effect": "deteriorating",
|
| 89 |
-
},
|
| 90 |
-
{
|
| 91 |
-
"name": "Cardiac Arrest — VF/Pulseless VT",
|
| 92 |
-
"description": "Cardiac arrest from lethal arrhythmia in acute coronary syndrome",
|
| 93 |
-
"probability_base": 0.05,
|
| 94 |
-
"time_window": (60, 300),
|
| 95 |
-
"vitals_criteria": {"bp_systolic_below": 70, "hr_above": 150},
|
| 96 |
-
"treatment_prevents": ["pci", "thrombolysis", "amiodarone", "defibrillation"],
|
| 97 |
-
"urgency": "critical",
|
| 98 |
-
"agent_message": "CODE BLUE! Patient is unresponsive, no pulse! Monitor shows VF! Starting CPR — we need you here NOW!",
|
| 99 |
-
"trajectory_effect": "critical",
|
| 100 |
-
},
|
| 101 |
-
{
|
| 102 |
-
"name": "Pericardial Tamponade",
|
| 103 |
-
"description": "Fluid accumulation in pericardial sac compressing the heart",
|
| 104 |
-
"probability_base": 0.05,
|
| 105 |
-
"time_window": (45, 240),
|
| 106 |
-
"vitals_criteria": {"bp_systolic_below": 90, "hr_above": 100},
|
| 107 |
-
"treatment_prevents": ["pericardiocentesis", "echo", "echocardiography"],
|
| 108 |
-
"urgency": "critical",
|
| 109 |
-
"agent_message": "Doctor, Beck's triad! Muffled heart sounds, JVP is sky high, and BP keeps dropping. I think there's fluid around the heart!",
|
| 110 |
-
"trajectory_effect": "critical",
|
| 111 |
-
},
|
| 112 |
-
],
|
| 113 |
-
|
| 114 |
-
"respiratory": [
|
| 115 |
-
{
|
| 116 |
-
"name": "Respiratory Failure — Type 1",
|
| 117 |
-
"description": "Hypoxemic respiratory failure requiring mechanical ventilation",
|
| 118 |
-
"probability_base": 0.15,
|
| 119 |
-
"time_window": (30, 180),
|
| 120 |
-
"vitals_criteria": {"spo2_below": 88, "rr_above": 30},
|
| 121 |
-
"treatment_prevents": ["oxygen", "niv", "bipap", "cpap", "intubation", "ventilator", "high_flow_nasal_cannula"],
|
| 122 |
-
"urgency": "critical",
|
| 123 |
-
"agent_message": "Doctor! SpO2 is {spo2}% despite oxygen! Patient is using accessory muscles, can barely speak. RR is {rr}. Do we intubate?",
|
| 124 |
-
"trajectory_effect": "critical",
|
| 125 |
-
},
|
| 126 |
-
{
|
| 127 |
-
"name": "Tension Pneumothorax",
|
| 128 |
-
"description": "Air trapped in pleural space causing mediastinal shift and cardiovascular collapse",
|
| 129 |
-
"probability_base": 0.08,
|
| 130 |
-
"time_window": (15, 120),
|
| 131 |
-
"vitals_criteria": {"bp_systolic_below": 90, "spo2_below": 88},
|
| 132 |
-
"treatment_prevents": ["needle_decompression", "chest_tube", "icd", "intercostal_drain"],
|
| 133 |
-
"urgency": "critical",
|
| 134 |
-
"agent_message": "Doctor! Absent breath sounds on one side, trachea is shifted! BP is dropping fast — {bp_systolic}/{bp_diastolic}! I think it's a tension pneumothorax!",
|
| 135 |
-
"trajectory_effect": "critical",
|
| 136 |
-
},
|
| 137 |
-
{
|
| 138 |
-
"name": "Massive Hemoptysis",
|
| 139 |
-
"description": "Large-volume blood in airways threatening asphyxiation",
|
| 140 |
-
"probability_base": 0.06,
|
| 141 |
-
"time_window": (20, 180),
|
| 142 |
-
"vitals_criteria": {"hr_above": 110, "spo2_below": 92},
|
| 143 |
-
"treatment_prevents": ["tranexamic_acid", "blood_transfusion", "interventional_radiology", "bronchoscopy"],
|
| 144 |
-
"urgency": "critical",
|
| 145 |
-
"agent_message": "Doctor! Patient is coughing up large amounts of bright red blood! There's blood everywhere — I estimate over 200ml already! SpO2 falling!",
|
| 146 |
-
"trajectory_effect": "critical",
|
| 147 |
-
},
|
| 148 |
-
{
|
| 149 |
-
"name": "ARDS Development",
|
| 150 |
-
"description": "Acute respiratory distress syndrome with bilateral infiltrates and refractory hypoxemia",
|
| 151 |
-
"probability_base": 0.10,
|
| 152 |
-
"time_window": (60, 360),
|
| 153 |
-
"vitals_criteria": {"spo2_below": 90, "rr_above": 28},
|
| 154 |
-
"treatment_prevents": ["lung_protective_ventilation", "prone_positioning", "niv", "intubation", "steroids"],
|
| 155 |
-
"urgency": "urgent",
|
| 156 |
-
"agent_message": "Doctor, despite high-flow oxygen, SpO2 won't come above {spo2}%. Bilateral infiltrates on chest X-ray. P/F ratio is very low. This looks like ARDS.",
|
| 157 |
-
"trajectory_effect": "deteriorating",
|
| 158 |
-
},
|
| 159 |
-
],
|
| 160 |
-
|
| 161 |
-
"infectious": [
|
| 162 |
-
{
|
| 163 |
-
"name": "Septic Shock",
|
| 164 |
-
"description": "Distributive shock from overwhelming infection with vasodilation and organ hypoperfusion",
|
| 165 |
-
"probability_base": 0.18,
|
| 166 |
-
"time_window": (30, 120),
|
| 167 |
-
"vitals_criteria": {"bp_systolic_below": 90, "hr_above": 110, "temp_above": 38.5},
|
| 168 |
-
"treatment_prevents": ["antibiotics", "iv_fluids", "noradrenaline", "vasopressor", "normal_saline", "ringer_lactate"],
|
| 169 |
-
"urgency": "critical",
|
| 170 |
-
"agent_message": "Doctor! Patient is burning up at {temp}C but extremities are cold! BP is {bp_systolic}/{bp_diastolic} — not responding to fluids. Altered sensorium. I think we're heading into septic shock!",
|
| 171 |
-
"trajectory_effect": "critical",
|
| 172 |
-
},
|
| 173 |
-
{
|
| 174 |
-
"name": "Disseminated Intravascular Coagulation",
|
| 175 |
-
"description": "DIC with simultaneous clotting and bleeding from consumptive coagulopathy",
|
| 176 |
-
"probability_base": 0.08,
|
| 177 |
-
"time_window": (60, 240),
|
| 178 |
-
"vitals_criteria": {"hr_above": 120, "bp_systolic_below": 90},
|
| 179 |
-
"treatment_prevents": ["antibiotics", "source_control", "ffp", "cryoprecipitate", "platelet_transfusion"],
|
| 180 |
-
"urgency": "critical",
|
| 181 |
-
"agent_message": "Doctor! Patient is oozing from IV sites and gums. Petechiae all over. I can see blood in the urine bag. Labs show very low platelets and high INR!",
|
| 182 |
-
"trajectory_effect": "critical",
|
| 183 |
-
},
|
| 184 |
-
{
|
| 185 |
-
"name": "Multi-Organ Dysfunction",
|
| 186 |
-
"description": "Progressive failure of multiple organ systems from uncontrolled sepsis",
|
| 187 |
-
"probability_base": 0.10,
|
| 188 |
-
"time_window": (90, 360),
|
| 189 |
-
"vitals_criteria": {"bp_systolic_below": 80, "spo2_below": 90},
|
| 190 |
-
"treatment_prevents": ["antibiotics", "iv_fluids", "vasopressor", "organ_support", "icu_transfer"],
|
| 191 |
-
"urgency": "critical",
|
| 192 |
-
"agent_message": "Doctor, patient is oliguric, creatinine is rising, bilirubin is up, and now SpO2 is {spo2}%. Multiple organs are failing. We need ICU!",
|
| 193 |
-
"trajectory_effect": "critical",
|
| 194 |
-
},
|
| 195 |
-
{
|
| 196 |
-
"name": "Severe Drug Reaction — Anaphylaxis",
|
| 197 |
-
"description": "Anaphylactic reaction to administered antibiotic",
|
| 198 |
-
"probability_base": 0.04,
|
| 199 |
-
"time_window": (5, 60),
|
| 200 |
-
"vitals_criteria": {"bp_systolic_below": 90},
|
| 201 |
-
"treatment_prevents": ["test_dose", "allergy_check", "adrenaline", "epinephrine", "hydrocortisone", "chlorpheniramine"],
|
| 202 |
-
"urgency": "critical",
|
| 203 |
-
"agent_message": "Doctor! After the antibiotic injection, patient has developed rash, lip swelling, and is wheezing! BP dropping to {bp_systolic}/{bp_diastolic}! Looks like anaphylaxis!",
|
| 204 |
-
"trajectory_effect": "critical",
|
| 205 |
-
},
|
| 206 |
-
{
|
| 207 |
-
"name": "Dengue Hemorrhagic Manifestations",
|
| 208 |
-
"description": "Plasma leakage and hemorrhagic manifestations in severe dengue",
|
| 209 |
-
"probability_base": 0.20,
|
| 210 |
-
"time_window": (60, 240),
|
| 211 |
-
"vitals_criteria": {"bp_systolic_below": 100, "hr_above": 100},
|
| 212 |
-
"treatment_prevents": ["iv_fluids", "platelet_transfusion", "monitoring", "close_observation"],
|
| 213 |
-
"urgency": "urgent",
|
| 214 |
-
"agent_message": "Doctor, patient has petechiae and gum bleeding. Hematocrit is rising — plasma leakage! Platelet count is dropping fast. BP narrowing — pulse pressure is only 20mmHg!",
|
| 215 |
-
"trajectory_effect": "deteriorating",
|
| 216 |
-
},
|
| 217 |
-
],
|
| 218 |
-
|
| 219 |
-
"neurology": [
|
| 220 |
-
{
|
| 221 |
-
"name": "Cerebral Herniation",
|
| 222 |
-
"description": "Brainstem compression from raised intracranial pressure — uncal or tonsillar herniation",
|
| 223 |
-
"probability_base": 0.12,
|
| 224 |
-
"time_window": (30, 180),
|
| 225 |
-
"vitals_criteria": {"bp_systolic_above": 180, "hr_below": 60},
|
| 226 |
-
"treatment_prevents": ["mannitol", "hypertonic_saline", "decompressive_craniectomy", "neurosurgery_consult", "head_elevation"],
|
| 227 |
-
"urgency": "critical",
|
| 228 |
-
"agent_message": "Doctor! One pupil is fixed and dilated! Patient has Cushing's triad — hypertension, bradycardia, irregular breathing. GCS is dropping! I think the brain is herniating!",
|
| 229 |
-
"trajectory_effect": "critical",
|
| 230 |
-
},
|
| 231 |
-
{
|
| 232 |
-
"name": "Status Epilepticus",
|
| 233 |
-
"description": "Continuous seizure activity lasting >5 minutes or recurrent seizures without regaining consciousness",
|
| 234 |
-
"probability_base": 0.10,
|
| 235 |
-
"time_window": (15, 120),
|
| 236 |
-
"vitals_criteria": {"hr_above": 120, "temp_above": 38.0},
|
| 237 |
-
"treatment_prevents": ["lorazepam", "diazepam", "midazolam", "phenytoin", "levetiracetam", "valproate", "anticonvulsant"],
|
| 238 |
-
"urgency": "critical",
|
| 239 |
-
"agent_message": "Doctor! Patient is seizing — tonic-clonic movements, frothing at the mouth! It's been going on for 5 minutes! We need to stop this NOW!",
|
| 240 |
-
"trajectory_effect": "critical",
|
| 241 |
-
},
|
| 242 |
-
{
|
| 243 |
-
"name": "Raised ICP — Deterioration",
|
| 244 |
-
"description": "Progressive rise in intracranial pressure with decreasing consciousness",
|
| 245 |
-
"probability_base": 0.12,
|
| 246 |
-
"time_window": (30, 240),
|
| 247 |
-
"vitals_criteria": {"bp_systolic_above": 160},
|
| 248 |
-
"treatment_prevents": ["mannitol", "hypertonic_saline", "head_elevation", "ct_scan", "neurosurgery_consult"],
|
| 249 |
-
"urgency": "urgent",
|
| 250 |
-
"agent_message": "Doctor, patient's GCS has dropped from 12 to 9. Projectile vomiting. Headache is excruciating. BP is {bp_systolic}/{bp_diastolic} — I think ICP is rising!",
|
| 251 |
-
"trajectory_effect": "deteriorating",
|
| 252 |
-
},
|
| 253 |
-
{
|
| 254 |
-
"name": "Autonomic Storm",
|
| 255 |
-
"description": "Paroxysmal sympathetic hyperactivity with tachycardia, hypertension, diaphoresis, and posturing",
|
| 256 |
-
"probability_base": 0.06,
|
| 257 |
-
"time_window": (60, 300),
|
| 258 |
-
"vitals_criteria": {"hr_above": 130, "temp_above": 38.5, "bp_systolic_above": 170},
|
| 259 |
-
"treatment_prevents": ["beta_blocker", "propranolol", "morphine", "bromocriptine", "sedation", "midazolam"],
|
| 260 |
-
"urgency": "urgent",
|
| 261 |
-
"agent_message": "Doctor, patient is pouring sweat, HR is {hr}, BP is {bp_systolic}/{bp_diastolic}, posturing! Temperature is {temp}C. This looks like an autonomic storm!",
|
| 262 |
-
"trajectory_effect": "deteriorating",
|
| 263 |
-
},
|
| 264 |
-
],
|
| 265 |
-
|
| 266 |
-
"gastroenterology": [
|
| 267 |
-
{
|
| 268 |
-
"name": "Massive Upper GI Bleed",
|
| 269 |
-
"description": "Torrential hematemesis or melena with hemodynamic instability",
|
| 270 |
-
"probability_base": 0.12,
|
| 271 |
-
"time_window": (45, 240), # Increased minimum time from 15 to 45 minutes
|
| 272 |
-
"vitals_criteria": {"hr_above": 110, "bp_systolic_below": 90},
|
| 273 |
-
"treatment_prevents": ["iv_fluids", "blood_transfusion", "ppi", "pantoprazole", "octreotide", "endoscopy", "sengstaken_tube"],
|
| 274 |
-
"urgency": "critical",
|
| 275 |
-
"agent_message": "Doctor! Patient just vomited a large amount of dark blood — nearly 500ml! HR is {hr}, BP dropping to {bp_systolic}/{bp_diastolic}! We need blood urgently!",
|
| 276 |
-
"trajectory_effect": "critical",
|
| 277 |
-
},
|
| 278 |
-
{
|
| 279 |
-
"name": "Hepatic Encephalopathy",
|
| 280 |
-
"description": "Altered consciousness from hepatic failure with asterixis progressing to coma",
|
| 281 |
-
"probability_base": 0.12,
|
| 282 |
-
"time_window": (60, 300),
|
| 283 |
-
"vitals_criteria": {},
|
| 284 |
-
"treatment_prevents": ["lactulose", "rifaximin", "protein_restriction", "enema"],
|
| 285 |
-
"urgency": "urgent",
|
| 286 |
-
"agent_message": "Doctor, patient has become drowsy and confused. Flapping tremor present. I smell fetor hepaticus. I think the liver is failing — hepatic encephalopathy!",
|
| 287 |
-
"trajectory_effect": "deteriorating",
|
| 288 |
-
},
|
| 289 |
-
{
|
| 290 |
-
"name": "Spontaneous Bacterial Peritonitis",
|
| 291 |
-
"description": "Infection of ascitic fluid in a cirrhotic patient",
|
| 292 |
-
"probability_base": 0.10,
|
| 293 |
-
"time_window": (30, 240),
|
| 294 |
-
"vitals_criteria": {"temp_above": 38.0, "hr_above": 100},
|
| 295 |
-
"treatment_prevents": ["antibiotics", "cefotaxime", "diagnostic_paracentesis", "ascitic_fluid_analysis"],
|
| 296 |
-
"urgency": "urgent",
|
| 297 |
-
"agent_message": "Doctor, the patient's abdomen is becoming more tender and distended. Temperature spiking to {temp}C. Abdominal guarding present. Could be SBP!",
|
| 298 |
-
"trajectory_effect": "deteriorating",
|
| 299 |
-
},
|
| 300 |
-
{
|
| 301 |
-
"name": "Variceal Rupture",
|
| 302 |
-
"description": "Esophageal variceal hemorrhage with massive hematemesis in portal hypertension",
|
| 303 |
-
"probability_base": 0.10,
|
| 304 |
-
"time_window": (15, 120),
|
| 305 |
-
"vitals_criteria": {"hr_above": 120, "bp_systolic_below": 85},
|
| 306 |
-
"treatment_prevents": ["octreotide", "terlipressin", "sengstaken_tube", "endoscopy", "band_ligation"],
|
| 307 |
-
"urgency": "critical",
|
| 308 |
-
"agent_message": "Doctor! Torrential hematemesis — bright red blood everywhere! Patient is going into shock — HR {hr}, BP {bp_systolic}/{bp_diastolic}! Known varices — this is a bleed!",
|
| 309 |
-
"trajectory_effect": "critical",
|
| 310 |
-
},
|
| 311 |
-
],
|
| 312 |
-
|
| 313 |
-
"emergency": [
|
| 314 |
-
{
|
| 315 |
-
"name": "Hemorrhagic Shock",
|
| 316 |
-
"description": "Class III/IV hemorrhagic shock from ongoing blood loss",
|
| 317 |
-
"probability_base": 0.15,
|
| 318 |
-
"time_window": (15, 120),
|
| 319 |
-
"vitals_criteria": {"hr_above": 120, "bp_systolic_below": 80},
|
| 320 |
-
"treatment_prevents": ["iv_fluids", "blood_transfusion", "crossmatch", "massive_transfusion", "surgical_consult"],
|
| 321 |
-
"urgency": "critical",
|
| 322 |
-
"agent_message": "Doctor! Patient is tachycardic at {hr}, BP is {bp_systolic}/{bp_diastolic}, cold and clammy! Altered consciousness! This is Class III shock — we need blood NOW!",
|
| 323 |
-
"trajectory_effect": "critical",
|
| 324 |
-
},
|
| 325 |
-
{
|
| 326 |
-
"name": "Anaphylaxis",
|
| 327 |
-
"description": "Severe systemic allergic reaction with airway compromise and cardiovascular collapse",
|
| 328 |
-
"probability_base": 0.05,
|
| 329 |
-
"time_window": (5, 45),
|
| 330 |
-
"vitals_criteria": {"bp_systolic_below": 90, "spo2_below": 92},
|
| 331 |
-
"treatment_prevents": ["adrenaline", "epinephrine", "hydrocortisone", "chlorpheniramine", "nebulization"],
|
| 332 |
-
"urgency": "critical",
|
| 333 |
-
"agent_message": "Doctor! Sudden urticaria, tongue swelling, stridor developing! BP crashing to {bp_systolic}/{bp_diastolic}! ANAPHYLAXIS — need adrenaline STAT!",
|
| 334 |
-
"trajectory_effect": "critical",
|
| 335 |
-
},
|
| 336 |
-
{
|
| 337 |
-
"name": "Rhabdomyolysis — Acute Kidney Injury",
|
| 338 |
-
"description": "Myoglobin release causing acute kidney injury with dark urine and rising creatinine",
|
| 339 |
-
"probability_base": 0.08,
|
| 340 |
-
"time_window": (60, 360),
|
| 341 |
-
"vitals_criteria": {"hr_above": 100},
|
| 342 |
-
"treatment_prevents": ["iv_fluids", "aggressive_hydration", "normal_saline", "alkalinization"],
|
| 343 |
-
"urgency": "urgent",
|
| 344 |
-
"agent_message": "Doctor, patient's urine has turned dark brown — looks like cola. Muscles are very tender. I suspect rhabdomyolysis — we need to push fluids before the kidneys fail!",
|
| 345 |
-
"trajectory_effect": "deteriorating",
|
| 346 |
-
},
|
| 347 |
-
{
|
| 348 |
-
"name": "Compartment Syndrome",
|
| 349 |
-
"description": "Rising intra-compartmental pressure threatening limb viability",
|
| 350 |
-
"probability_base": 0.08,
|
| 351 |
-
"time_window": (30, 240),
|
| 352 |
-
"vitals_criteria": {"hr_above": 100},
|
| 353 |
-
"treatment_prevents": ["fasciotomy", "orthopedic_consult", "surgical_consult", "cast_removal", "elevation"],
|
| 354 |
-
"urgency": "critical",
|
| 355 |
-
"agent_message": "Doctor! Patient screaming with pain out of proportion. Limb is tense and swollen. Pain on passive stretch! Pulses getting weak — compartment syndrome! We need surgery!",
|
| 356 |
-
"trajectory_effect": "deteriorating",
|
| 357 |
-
},
|
| 358 |
-
],
|
| 359 |
-
|
| 360 |
-
"nephrology": [
|
| 361 |
-
{
|
| 362 |
-
"name": "Hyperkalemia — Cardiac Arrest",
|
| 363 |
-
"description": "Lethal arrhythmia from critically elevated serum potassium",
|
| 364 |
-
"probability_base": 0.12,
|
| 365 |
-
"time_window": (30, 180),
|
| 366 |
-
"vitals_criteria": {"hr_below": 50},
|
| 367 |
-
"treatment_prevents": ["calcium_gluconate", "insulin_dextrose", "salbutamol", "kayexalate", "sodium_bicarbonate", "dialysis"],
|
| 368 |
-
"urgency": "critical",
|
| 369 |
-
"agent_message": "Doctor! ECG showing tall peaked T waves and widening QRS! HR is dropping — {hr}! Potassium must be dangerously high. Patient is getting bradycardic!",
|
| 370 |
-
"trajectory_effect": "critical",
|
| 371 |
-
},
|
| 372 |
-
{
|
| 373 |
-
"name": "Flash Pulmonary Edema",
|
| 374 |
-
"description": "Acute pulmonary edema from fluid overload in oliguric renal failure",
|
| 375 |
-
"probability_base": 0.12,
|
| 376 |
-
"time_window": (30, 240),
|
| 377 |
-
"vitals_criteria": {"spo2_below": 90, "rr_above": 28},
|
| 378 |
-
"treatment_prevents": ["furosemide", "dialysis", "fluid_restriction", "niv", "bipap", "oxygen"],
|
| 379 |
-
"urgency": "critical",
|
| 380 |
-
"agent_message": "Doctor! Patient can't breathe — sitting upright, pink frothy sputum! SpO2 is {spo2}%! Fluid overload — the kidneys aren't making urine. We need urgent dialysis or diuretics!",
|
| 381 |
-
"trajectory_effect": "critical",
|
| 382 |
-
},
|
| 383 |
-
{
|
| 384 |
-
"name": "Uremic Encephalopathy",
|
| 385 |
-
"description": "Altered consciousness from accumulation of uremic toxins",
|
| 386 |
-
"probability_base": 0.08,
|
| 387 |
-
"time_window": (60, 360),
|
| 388 |
-
"vitals_criteria": {},
|
| 389 |
-
"treatment_prevents": ["dialysis", "hemodialysis"],
|
| 390 |
-
"urgency": "urgent",
|
| 391 |
-
"agent_message": "Doctor, patient is confused, drowsy, and has asterixis. Breath smells uremic. Creatinine must be very high. I think the toxins are affecting the brain!",
|
| 392 |
-
"trajectory_effect": "deteriorating",
|
| 393 |
-
},
|
| 394 |
-
],
|
| 395 |
-
|
| 396 |
-
"endocrinology": [
|
| 397 |
-
{
|
| 398 |
-
"name": "Thyroid Storm",
|
| 399 |
-
"description": "Life-threatening thyrotoxicosis with hyperpyrexia, tachycardia, and altered consciousness",
|
| 400 |
-
"probability_base": 0.10,
|
| 401 |
-
"time_window": (30, 180),
|
| 402 |
-
"vitals_criteria": {"hr_above": 140, "temp_above": 39.0},
|
| 403 |
-
"treatment_prevents": ["propranolol", "beta_blocker", "ptu", "methimazole", "lugol_iodine", "hydrocortisone"],
|
| 404 |
-
"urgency": "critical",
|
| 405 |
-
"agent_message": "Doctor! HR is {hr}, temperature is {temp}C and climbing! Patient is agitated, tremulous, and drenched in sweat. Thyroid storm — we need beta-blockers and PTU NOW!",
|
| 406 |
-
"trajectory_effect": "critical",
|
| 407 |
-
},
|
| 408 |
-
{
|
| 409 |
-
"name": "Adrenal Crisis",
|
| 410 |
-
"description": "Acute adrenal insufficiency with refractory hypotension and shock",
|
| 411 |
-
"probability_base": 0.08,
|
| 412 |
-
"time_window": (30, 180),
|
| 413 |
-
"vitals_criteria": {"bp_systolic_below": 80},
|
| 414 |
-
"treatment_prevents": ["hydrocortisone", "steroid", "dexamethasone", "iv_fluids", "fludrocortisone"],
|
| 415 |
-
"urgency": "critical",
|
| 416 |
-
"agent_message": "Doctor! BP is {bp_systolic}/{bp_diastolic} and NOT responding to IV fluids at all! Patient is hyperpigmented and severely hypotensive. Could this be adrenal crisis?",
|
| 417 |
-
"trajectory_effect": "critical",
|
| 418 |
-
},
|
| 419 |
-
{
|
| 420 |
-
"name": "Severe Hypoglycemia — Seizure/Coma",
|
| 421 |
-
"description": "Critical hypoglycemia causing seizures or loss of consciousness",
|
| 422 |
-
"probability_base": 0.10,
|
| 423 |
-
"time_window": (15, 120),
|
| 424 |
-
"vitals_criteria": {"hr_above": 100},
|
| 425 |
-
"treatment_prevents": ["dextrose", "d25", "d50", "glucagon", "glucose", "blood_sugar_check"],
|
| 426 |
-
"urgency": "critical",
|
| 427 |
-
"agent_message": "Doctor! Patient is having a seizure! Cold, sweaty, and unresponsive! Glucometer shows 28 mg/dL — critically low sugar! Give IV dextrose STAT!",
|
| 428 |
-
"trajectory_effect": "critical",
|
| 429 |
-
},
|
| 430 |
-
{
|
| 431 |
-
"name": "Cerebral Edema in DKA",
|
| 432 |
-
"description": "Brain swelling from too-rapid correction of DKA, especially in young patients",
|
| 433 |
-
"probability_base": 0.06,
|
| 434 |
-
"time_window": (120, 480),
|
| 435 |
-
"vitals_criteria": {"bp_systolic_above": 140},
|
| 436 |
-
"treatment_prevents": ["gradual_correction", "slow_insulin", "monitoring", "mannitol", "hypertonic_saline"],
|
| 437 |
-
"urgency": "critical",
|
| 438 |
-
"agent_message": "Doctor! Patient was improving but now suddenly has severe headache, vomiting, and decreasing consciousness! Pupils are sluggish. I think it's cerebral edema from DKA correction!",
|
| 439 |
-
"trajectory_effect": "critical",
|
| 440 |
-
},
|
| 441 |
-
],
|
| 442 |
-
|
| 443 |
-
"pediatrics": [
|
| 444 |
-
{
|
| 445 |
-
"name": "Febrile Seizure — Complex",
|
| 446 |
-
"description": "Prolonged or focal seizure triggered by high fever in a child",
|
| 447 |
-
"probability_base": 0.12,
|
| 448 |
-
"time_window": (10, 90),
|
| 449 |
-
"vitals_criteria": {"temp_above": 39.0, "hr_above": 140},
|
| 450 |
-
"treatment_prevents": ["paracetamol", "ibuprofen", "tepid_sponging", "diazepam", "midazolam", "antipyretic"],
|
| 451 |
-
"urgency": "critical",
|
| 452 |
-
"agent_message": "Doctor! The child is seizing — whole body shaking, eyes rolled up! Mother is panicking! Temperature was {temp}C. It's been going on for 3 minutes!",
|
| 453 |
-
"trajectory_effect": "deteriorating",
|
| 454 |
-
},
|
| 455 |
-
{
|
| 456 |
-
"name": "Dehydration Shock",
|
| 457 |
-
"description": "Severe dehydration progressing to hypovolemic shock in a child",
|
| 458 |
-
"probability_base": 0.15,
|
| 459 |
-
"time_window": (30, 180),
|
| 460 |
-
"vitals_criteria": {"hr_above": 150, "bp_systolic_below": 70},
|
| 461 |
-
"treatment_prevents": ["iv_fluids", "ors", "normal_saline", "ringer_lactate", "bolus"],
|
| 462 |
-
"urgency": "critical",
|
| 463 |
-
"agent_message": "Doctor! Child is lethargic with sunken eyes, dry mouth, and no tears! Skin turgor very poor. CRT >4 seconds. HR is {hr}. This child is in shock!",
|
| 464 |
-
"trajectory_effect": "critical",
|
| 465 |
-
},
|
| 466 |
-
{
|
| 467 |
-
"name": "Reye Syndrome",
|
| 468 |
-
"description": "Hepatic failure and encephalopathy following viral illness, especially with aspirin use",
|
| 469 |
-
"probability_base": 0.03,
|
| 470 |
-
"time_window": (60, 360),
|
| 471 |
-
"vitals_criteria": {},
|
| 472 |
-
"treatment_prevents": ["avoid_aspirin", "supportive_care", "mannitol", "icu_transfer"],
|
| 473 |
-
"urgency": "critical",
|
| 474 |
-
"agent_message": "Doctor! Child was recovering but now has persistent vomiting and is becoming confused. Liver is enlarged and tender. Was aspirin given? I'm worried about Reye syndrome!",
|
| 475 |
-
"trajectory_effect": "critical",
|
| 476 |
-
},
|
| 477 |
-
{
|
| 478 |
-
"name": "Kernicterus Progression",
|
| 479 |
-
"description": "Bilirubin encephalopathy with opisthotonus and neurological damage in neonate",
|
| 480 |
-
"probability_base": 0.06,
|
| 481 |
-
"time_window": (120, 480),
|
| 482 |
-
"vitals_criteria": {},
|
| 483 |
-
"treatment_prevents": ["phototherapy", "exchange_transfusion", "bilirubin_monitoring"],
|
| 484 |
-
"urgency": "critical",
|
| 485 |
-
"agent_message": "Doctor! The baby is arching backward — opisthotonus! High-pitched cry, not feeding. The jaundice has deepened. Bilirubin must be dangerously high — kernicterus!",
|
| 486 |
-
"trajectory_effect": "critical",
|
| 487 |
-
},
|
| 488 |
-
],
|
| 489 |
-
|
| 490 |
-
"obstetrics": [
|
| 491 |
-
{
|
| 492 |
-
"name": "Eclampsia",
|
| 493 |
-
"description": "Tonic-clonic seizures in pre-eclampsia with risk of maternal and fetal death",
|
| 494 |
-
"probability_base": 0.12,
|
| 495 |
-
"time_window": (15, 180),
|
| 496 |
-
"vitals_criteria": {"bp_systolic_above": 160},
|
| 497 |
-
"treatment_prevents": ["magnesium_sulphate", "mgso4", "labetalol", "nifedipine", "antihypertensive"],
|
| 498 |
-
"urgency": "critical",
|
| 499 |
-
"agent_message": "Doctor! The patient is seizing — eclampsia! BP was {bp_systolic}/{bp_diastolic}! She needs magnesium sulphate immediately! Fetal heart rate is dipping!",
|
| 500 |
-
"trajectory_effect": "critical",
|
| 501 |
-
},
|
| 502 |
-
{
|
| 503 |
-
"name": "DIC in Obstetrics",
|
| 504 |
-
"description": "Consumptive coagulopathy from placental abruption, amniotic fluid embolism, or HELLP",
|
| 505 |
-
"probability_base": 0.06,
|
| 506 |
-
"time_window": (30, 240),
|
| 507 |
-
"vitals_criteria": {"hr_above": 120, "bp_systolic_below": 90},
|
| 508 |
-
"treatment_prevents": ["blood_transfusion", "ffp", "cryoprecipitate", "platelet_transfusion", "delivery"],
|
| 509 |
-
"urgency": "critical",
|
| 510 |
-
"agent_message": "Doctor! Uncontrollable bleeding from all IV sites! Blood not clotting in the tube! Uterus is not contracting. This is DIC — we need blood products STAT!",
|
| 511 |
-
"trajectory_effect": "critical",
|
| 512 |
-
},
|
| 513 |
-
{
|
| 514 |
-
"name": "Amniotic Fluid Embolism",
|
| 515 |
-
"description": "Catastrophic amniotic fluid entering maternal circulation causing cardiorespiratory collapse",
|
| 516 |
-
"probability_base": 0.03,
|
| 517 |
-
"time_window": (15, 120),
|
| 518 |
-
"vitals_criteria": {"spo2_below": 88, "bp_systolic_below": 80},
|
| 519 |
-
"treatment_prevents": ["supportive_care", "intubation", "vasopressor", "blood_products"],
|
| 520 |
-
"urgency": "critical",
|
| 521 |
-
"agent_message": "Doctor! Patient suddenly collapsed — can't breathe, cyanotic, SpO2 is {spo2}%! Hypotensive at {bp_systolic}/{bp_diastolic}! Amniotic fluid embolism — CODE BLUE!",
|
| 522 |
-
"trajectory_effect": "critical",
|
| 523 |
-
},
|
| 524 |
-
{
|
| 525 |
-
"name": "Uterine Rupture",
|
| 526 |
-
"description": "Catastrophic rupture of the uterine wall during labor",
|
| 527 |
-
"probability_base": 0.04,
|
| 528 |
-
"time_window": (30, 180),
|
| 529 |
-
"vitals_criteria": {"hr_above": 120, "bp_systolic_below": 90},
|
| 530 |
-
"treatment_prevents": ["monitoring", "cesarean_section", "surgical_consult", "oxytocin_stop"],
|
| 531 |
-
"urgency": "critical",
|
| 532 |
-
"agent_message": "Doctor! Sudden severe abdominal pain — patient screaming! Contractions have stopped but there's a bulge in the abdomen. Fetal heart lost! Uterine rupture — we need emergency surgery!",
|
| 533 |
-
"trajectory_effect": "critical",
|
| 534 |
-
},
|
| 535 |
-
],
|
| 536 |
-
|
| 537 |
-
"hematology": [
|
| 538 |
-
{
|
| 539 |
-
"name": "Massive Hemorrhage",
|
| 540 |
-
"description": "Life-threatening bleeding from severe thrombocytopenia or coagulopathy",
|
| 541 |
-
"probability_base": 0.12,
|
| 542 |
-
"time_window": (30, 240),
|
| 543 |
-
"vitals_criteria": {"hr_above": 120, "bp_systolic_below": 85},
|
| 544 |
-
"treatment_prevents": ["platelet_transfusion", "blood_transfusion", "ffp", "tranexamic_acid"],
|
| 545 |
-
"urgency": "critical",
|
| 546 |
-
"agent_message": "Doctor! Massive epistaxis and gum bleeding won't stop! Now blood in urine and stool! BP is {bp_systolic}/{bp_diastolic}, HR {hr}! We need platelets and blood urgently!",
|
| 547 |
-
"trajectory_effect": "critical",
|
| 548 |
-
},
|
| 549 |
-
{
|
| 550 |
-
"name": "Tumor Lysis Syndrome",
|
| 551 |
-
"description": "Metabolic emergency from rapid cell death after chemotherapy with hyperkalemia, hyperphosphatemia, and AKI",
|
| 552 |
-
"probability_base": 0.10,
|
| 553 |
-
"time_window": (60, 360),
|
| 554 |
-
"vitals_criteria": {"hr_above": 100},
|
| 555 |
-
"treatment_prevents": ["rasburicase", "allopurinol", "iv_fluids", "alkalinization", "monitoring"],
|
| 556 |
-
"urgency": "urgent",
|
| 557 |
-
"agent_message": "Doctor, patient is having muscle cramps, palpitations, and reduced urine output after chemotherapy. ECG shows peaked T waves! I think it's tumor lysis syndrome!",
|
| 558 |
-
"trajectory_effect": "deteriorating",
|
| 559 |
-
},
|
| 560 |
-
{
|
| 561 |
-
"name": "Febrile Neutropenia — Sepsis",
|
| 562 |
-
"description": "Overwhelming infection in a neutropenic patient progressing to sepsis",
|
| 563 |
-
"probability_base": 0.15,
|
| 564 |
-
"time_window": (15, 120),
|
| 565 |
-
"vitals_criteria": {"temp_above": 38.3, "hr_above": 100},
|
| 566 |
-
"treatment_prevents": ["antibiotics", "empirical_antibiotics", "blood_culture", "piperacillin_tazobactam", "meropenem"],
|
| 567 |
-
"urgency": "critical",
|
| 568 |
-
"agent_message": "Doctor! Neutropenic patient spiking fever of {temp}C with rigors! HR is {hr}. This is febrile neutropenia — needs broad-spectrum antibiotics within the hour or we'll lose him!",
|
| 569 |
-
"trajectory_effect": "critical",
|
| 570 |
-
},
|
| 571 |
-
{
|
| 572 |
-
"name": "Hyperviscosity Syndrome",
|
| 573 |
-
"description": "Blood hyperviscosity from high paraprotein levels causing neurological and visual symptoms",
|
| 574 |
-
"probability_base": 0.06,
|
| 575 |
-
"time_window": (60, 300),
|
| 576 |
-
"vitals_criteria": {"bp_systolic_above": 160},
|
| 577 |
-
"treatment_prevents": ["plasmapheresis", "plasma_exchange", "hydration"],
|
| 578 |
-
"urgency": "urgent",
|
| 579 |
-
"agent_message": "Doctor, patient complains of blurred vision, headache, and confusion. Fundoscopy shows engorged veins and hemorrhages. Blood is 'thick' — hyperviscosity syndrome!",
|
| 580 |
-
"trajectory_effect": "deteriorating",
|
| 581 |
-
},
|
| 582 |
-
],
|
| 583 |
-
|
| 584 |
-
"psychiatry": [
|
| 585 |
-
{
|
| 586 |
-
"name": "Neuroleptic Malignant Syndrome",
|
| 587 |
-
"description": "Life-threatening reaction to antipsychotics with hyperthermia, rigidity, and autonomic instability",
|
| 588 |
-
"probability_base": 0.06,
|
| 589 |
-
"time_window": (60, 480),
|
| 590 |
-
"vitals_criteria": {"temp_above": 39.5, "hr_above": 120},
|
| 591 |
-
"treatment_prevents": ["stop_antipsychotic", "dantrolene", "bromocriptine", "cooling", "icu_transfer"],
|
| 592 |
-
"urgency": "critical",
|
| 593 |
-
"agent_message": "Doctor! Patient on antipsychotics is rigid as a board! Temperature is {temp}C, HR {hr}, profusely sweating. Lead-pipe rigidity everywhere. NMS — stop the antipsychotic!",
|
| 594 |
-
"trajectory_effect": "critical",
|
| 595 |
-
},
|
| 596 |
-
{
|
| 597 |
-
"name": "Serotonin Syndrome",
|
| 598 |
-
"description": "Serotonin toxicity from drug interaction with agitation, clonus, and hyperthermia",
|
| 599 |
-
"probability_base": 0.06,
|
| 600 |
-
"time_window": (30, 240),
|
| 601 |
-
"vitals_criteria": {"temp_above": 38.5, "hr_above": 110},
|
| 602 |
-
"treatment_prevents": ["stop_serotonergic", "cyproheptadine", "benzodiazepine", "cooling"],
|
| 603 |
-
"urgency": "urgent",
|
| 604 |
-
"agent_message": "Doctor, patient is agitated, tremulous, with clonus at the ankles. Pupils are dilated. Temperature rising to {temp}C. Multiple serotonergic drugs on the chart — serotonin syndrome!",
|
| 605 |
-
"trajectory_effect": "deteriorating",
|
| 606 |
-
},
|
| 607 |
-
{
|
| 608 |
-
"name": "Violent Agitation Episode",
|
| 609 |
-
"description": "Acute behavioral emergency with risk of harm to self or others",
|
| 610 |
-
"probability_base": 0.10,
|
| 611 |
-
"time_window": (10, 120),
|
| 612 |
-
"vitals_criteria": {"hr_above": 110},
|
| 613 |
-
"treatment_prevents": ["haloperidol", "lorazepam", "midazolam", "de_escalation", "restraint"],
|
| 614 |
-
"urgency": "urgent",
|
| 615 |
-
"agent_message": "Doctor! Patient has become violently agitated — throwing things, threatening staff! Two nurses needed to restrain. Screaming that people are trying to kill him. We need sedation NOW!",
|
| 616 |
-
"trajectory_effect": "deteriorating",
|
| 617 |
-
},
|
| 618 |
-
],
|
| 619 |
-
|
| 620 |
-
"dermatology": [
|
| 621 |
-
{
|
| 622 |
-
"name": "SJS Progression to TEN",
|
| 623 |
-
"description": "Stevens-Johnson syndrome progressing to toxic epidermal necrolysis (>30% BSA detachment)",
|
| 624 |
-
"probability_base": 0.10,
|
| 625 |
-
"time_window": (60, 360),
|
| 626 |
-
"vitals_criteria": {"temp_above": 38.5, "hr_above": 100},
|
| 627 |
-
"treatment_prevents": ["stop_offending_drug", "icu_transfer", "cyclosporine", "ivig", "wound_care", "fluid_resuscitation"],
|
| 628 |
-
"urgency": "critical",
|
| 629 |
-
"agent_message": "Doctor! Skin is sloughing off in sheets — Nikolsky sign positive everywhere! Mucosal involvement — eyes, mouth, genitals. This has progressed to TEN — needs burns unit/ICU!",
|
| 630 |
-
"trajectory_effect": "critical",
|
| 631 |
-
},
|
| 632 |
-
{
|
| 633 |
-
"name": "Secondary Sepsis from Skin",
|
| 634 |
-
"description": "Overwhelming infection through denuded skin barrier",
|
| 635 |
-
"probability_base": 0.08,
|
| 636 |
-
"time_window": (60, 360),
|
| 637 |
-
"vitals_criteria": {"temp_above": 38.5, "hr_above": 110, "bp_systolic_below": 90},
|
| 638 |
-
"treatment_prevents": ["antibiotics", "wound_care", "barrier_nursing", "iv_fluids"],
|
| 639 |
-
"urgency": "critical",
|
| 640 |
-
"agent_message": "Doctor! Patient with skin lesions is now spiking to {temp}C with rigors! BP dropping to {bp_systolic}/{bp_diastolic}. Wounds look infected. Sepsis through the skin!",
|
| 641 |
-
"trajectory_effect": "critical",
|
| 642 |
-
},
|
| 643 |
-
{
|
| 644 |
-
"name": "Airway Compromise from Angioedema",
|
| 645 |
-
"description": "Progressive angioedema threatening airway patency",
|
| 646 |
-
"probability_base": 0.06,
|
| 647 |
-
"time_window": (10, 90),
|
| 648 |
-
"vitals_criteria": {"spo2_below": 92, "rr_above": 24},
|
| 649 |
-
"treatment_prevents": ["adrenaline", "epinephrine", "hydrocortisone", "intubation", "tracheostomy"],
|
| 650 |
-
"urgency": "critical",
|
| 651 |
-
"agent_message": "Doctor! Lips and tongue are massively swollen! Patient is developing stridor — can barely talk! SpO2 falling to {spo2}%. We may need to secure the airway!",
|
| 652 |
-
"trajectory_effect": "critical",
|
| 653 |
-
},
|
| 654 |
-
],
|
| 655 |
-
|
| 656 |
-
"orthopedics": [
|
| 657 |
-
{
|
| 658 |
-
"name": "Fat Embolism Syndrome",
|
| 659 |
-
"description": "Fat embolism from long-bone fracture causing respiratory failure, neurological symptoms, and petechial rash",
|
| 660 |
-
"probability_base": 0.08,
|
| 661 |
-
"time_window": (720, 2880),
|
| 662 |
-
"vitals_criteria": {"spo2_below": 90, "hr_above": 110, "rr_above": 24},
|
| 663 |
-
"treatment_prevents": ["early_fixation", "fracture_stabilization", "oxygen", "supportive_care"],
|
| 664 |
-
"urgency": "critical",
|
| 665 |
-
"agent_message": "Doctor! Post-fracture patient suddenly confused, tachypneic, SpO2 dropped to {spo2}%! Petechial rash on chest and conjunctivae. Classic fat embolism syndrome!",
|
| 666 |
-
"trajectory_effect": "critical",
|
| 667 |
-
},
|
| 668 |
-
{
|
| 669 |
-
"name": "Compartment Syndrome",
|
| 670 |
-
"description": "Elevated intra-compartmental pressure after fracture/crush injury threatening limb viability",
|
| 671 |
-
"probability_base": 0.10,
|
| 672 |
-
"time_window": (60, 480),
|
| 673 |
-
"vitals_criteria": {"hr_above": 100},
|
| 674 |
-
"treatment_prevents": ["fasciotomy", "cast_bivalve", "cast_removal", "elevation", "orthopedic_consult"],
|
| 675 |
-
"urgency": "critical",
|
| 676 |
-
"agent_message": "Doctor! Patient's limb pain is excruciating and out of proportion! 5 P's — Pain on passive stretch, paresthesias! Compartment is rock hard. We need fasciotomy before we lose the limb!",
|
| 677 |
-
"trajectory_effect": "deteriorating",
|
| 678 |
-
},
|
| 679 |
-
{
|
| 680 |
-
"name": "Deep Vein Thrombosis / Pulmonary Embolism",
|
| 681 |
-
"description": "DVT with embolization to pulmonary vasculature after immobilization",
|
| 682 |
-
"probability_base": 0.08,
|
| 683 |
-
"time_window": (360, 4320),
|
| 684 |
-
"vitals_criteria": {"hr_above": 110, "spo2_below": 92, "rr_above": 22},
|
| 685 |
-
"treatment_prevents": ["dvt_prophylaxis", "enoxaparin", "heparin", "early_mobilization", "compression_stockings"],
|
| 686 |
-
"urgency": "critical",
|
| 687 |
-
"agent_message": "Doctor! Patient suddenly short of breath, chest pain, HR is {hr}! SpO2 dropped to {spo2}%. Calf is swollen. Post-immobilization — I think this is a PE!",
|
| 688 |
-
"trajectory_effect": "critical",
|
| 689 |
-
},
|
| 690 |
-
],
|
| 691 |
-
}
|
| 692 |
-
|
| 693 |
-
# ---------------------------------------------------------------------------
|
| 694 |
-
# Distraction / cross-patient interruptions (specialty-agnostic)
|
| 695 |
-
# ---------------------------------------------------------------------------
|
| 696 |
-
DISTRACTION_EVENTS: list[dict] = [
|
| 697 |
-
{
|
| 698 |
-
"name": "Another Patient Emergency",
|
| 699 |
-
"description": "Nurse asks for help with another patient crashing in the ward",
|
| 700 |
-
"min_time": 30,
|
| 701 |
-
"max_time": 180,
|
| 702 |
-
"probability": 0.03,
|
| 703 |
-
"agent_message": "Doctor, sorry to interrupt — the patient in bed 4 is having chest pain and looks unwell. Can you come quickly? I know you're busy here but...",
|
| 704 |
-
"urgency": "urgent",
|
| 705 |
-
},
|
| 706 |
-
{
|
| 707 |
-
"name": "Relative Confrontation",
|
| 708 |
-
"description": "Angry family member demands to speak with the doctor",
|
| 709 |
-
"min_time": 20,
|
| 710 |
-
"max_time": 120,
|
| 711 |
-
"probability": 0.04,
|
| 712 |
-
"agent_message": "Doctor, the patient's family is at the nursing station and they're very upset. They want to know why nothing has been done. The son is threatening to complain to the superintendent.",
|
| 713 |
-
"urgency": "urgent",
|
| 714 |
-
},
|
| 715 |
-
{
|
| 716 |
-
"name": "Phone Call from Lab",
|
| 717 |
-
"description": "Lab technician calls with a panic value that needs immediate attention",
|
| 718 |
-
"min_time": 30,
|
| 719 |
-
"max_time": 180,
|
| 720 |
-
"probability": 0.05,
|
| 721 |
-
"agent_message": "Doctor, urgent call from the lab — they've flagged a critical value on one of your patient's samples. They need a verbal acknowledgement and want to know if you want to repeat the test.",
|
| 722 |
-
"urgency": "urgent",
|
| 723 |
-
},
|
| 724 |
-
{
|
| 725 |
-
"name": "Equipment Failure",
|
| 726 |
-
"description": "Critical equipment malfunctions",
|
| 727 |
-
"min_time": 15,
|
| 728 |
-
"max_time": 120,
|
| 729 |
-
"probability": 0.03,
|
| 730 |
-
"agent_message": "Doctor, the pulse oximeter seems to be giving erratic readings and the backup monitor is also not working. Should we shift the patient to a bed with a working monitor?",
|
| 731 |
-
"urgency": "urgent",
|
| 732 |
-
},
|
| 733 |
-
{
|
| 734 |
-
"name": "Blood Bank Delay",
|
| 735 |
-
"description": "Blood bank notifies of delay in cross-matched blood",
|
| 736 |
-
"min_time": 30,
|
| 737 |
-
"max_time": 120,
|
| 738 |
-
"probability": 0.04,
|
| 739 |
-
"agent_message": "Doctor, blood bank called — the requested blood group is in short supply. They can arrange one unit but it will take another 2 hours. Should we call for donors?",
|
| 740 |
-
"urgency": "urgent",
|
| 741 |
-
},
|
| 742 |
-
]
|
| 743 |
-
|
| 744 |
-
|
| 745 |
-
# ---------------------------------------------------------------------------
|
| 746 |
-
# ComplicationEngine
|
| 747 |
-
# ---------------------------------------------------------------------------
|
| 748 |
-
|
| 749 |
-
class ComplicationEngine:
|
| 750 |
-
"""Generates probabilistic complications, time-based triggers, and urgent interruptions.
|
| 751 |
-
|
| 752 |
-
This engine is called by the orchestrator after each student action (after the
|
| 753 |
-
CaseStateManager has advanced time and evolved vitals). It evaluates every
|
| 754 |
-
possible complication for the current case, calculates time-dependent probability,
|
| 755 |
-
and returns any triggered events as SimulationEvent objects.
|
| 756 |
-
|
| 757 |
-
Design principles:
|
| 758 |
-
- Complications are CLINICALLY REALISTIC, not random
|
| 759 |
-
- Probability INCREASES over time if the condition is untreated
|
| 760 |
-
- Correct treatment PREVENTS complications
|
| 761 |
-
- Each specialty has its own complication profile
|
| 762 |
-
- Distractions and cross-patient interruptions add cognitive load
|
| 763 |
-
"""
|
| 764 |
-
|
| 765 |
-
def __init__(self, case_data: dict, state_manager: CaseStateManager):
|
| 766 |
-
self.case_data = case_data
|
| 767 |
-
self.state_manager = state_manager
|
| 768 |
-
|
| 769 |
-
# Extract case identity
|
| 770 |
-
self.specialty: str = (case_data.get("specialty") or "emergency").lower().strip()
|
| 771 |
-
self.diagnosis: str = (case_data.get("diagnosis") or case_data.get("final_diagnosis") or "").lower().strip()
|
| 772 |
-
self.difficulty: str = (case_data.get("difficulty") or "intermediate").lower().strip()
|
| 773 |
-
|
| 774 |
-
# Resolve which complications are possible for this case
|
| 775 |
-
self.possible_complications: list[dict] = self._get_specialty_complications(
|
| 776 |
-
self.specialty, self.diagnosis
|
| 777 |
-
)
|
| 778 |
-
|
| 779 |
-
# Track which complications have already fired (no repeats)
|
| 780 |
-
self.fired_complications: set[str] = set()
|
| 781 |
-
|
| 782 |
-
# Track distraction events already delivered
|
| 783 |
-
self.fired_distractions: set[str] = set()
|
| 784 |
-
|
| 785 |
-
# Internal event ID counter — starts high to avoid collision with state manager
|
| 786 |
-
self._next_event_id: int = 5000
|
| 787 |
-
|
| 788 |
-
# Difficulty multiplier — harder cases have more frequent complications
|
| 789 |
-
self._difficulty_multiplier: float = {
|
| 790 |
-
"beginner": 0.5,
|
| 791 |
-
"intermediate": 1.0,
|
| 792 |
-
"advanced": 1.5,
|
| 793 |
-
}.get(self.difficulty, 1.0)
|
| 794 |
-
|
| 795 |
-
logger.info(
|
| 796 |
-
f"ComplicationEngine initialized: specialty={self.specialty}, "
|
| 797 |
-
f"diagnosis={self.diagnosis}, difficulty={self.difficulty}, "
|
| 798 |
-
f"possible_complications={len(self.possible_complications)}"
|
| 799 |
-
)
|
| 800 |
-
|
| 801 |
-
# ------------------------------------------------------------------
|
| 802 |
-
# Public API
|
| 803 |
-
# ------------------------------------------------------------------
|
| 804 |
-
|
| 805 |
-
def check_complications(
|
| 806 |
-
self,
|
| 807 |
-
elapsed_minutes: int,
|
| 808 |
-
current_vitals: dict,
|
| 809 |
-
treatments: list,
|
| 810 |
-
investigations: dict,
|
| 811 |
-
) -> list[SimulationEvent]:
|
| 812 |
-
"""Evaluate all possible complications and return any that trigger.
|
| 813 |
-
|
| 814 |
-
Called by the orchestrator after each student action, AFTER the state manager
|
| 815 |
-
has advanced time and evolved vitals.
|
| 816 |
-
|
| 817 |
-
Args:
|
| 818 |
-
elapsed_minutes: Current simulation clock.
|
| 819 |
-
current_vitals: Current patient vitals dict from state manager.
|
| 820 |
-
treatments: List of TreatmentRecord objects (or dicts) of treatments given so far.
|
| 821 |
-
investigations: Dict of ordered investigations from state manager.
|
| 822 |
-
|
| 823 |
-
Returns:
|
| 824 |
-
List of SimulationEvent objects for any triggered complications/interruptions.
|
| 825 |
-
"""
|
| 826 |
-
triggered_events: list[SimulationEvent] = []
|
| 827 |
-
|
| 828 |
-
# Collect all treatment descriptions for matching
|
| 829 |
-
treatment_descriptions = self._collect_treatment_keywords(treatments)
|
| 830 |
-
|
| 831 |
-
# 1. Check each possible clinical complication
|
| 832 |
-
for complication in self.possible_complications:
|
| 833 |
-
comp_name = complication["name"]
|
| 834 |
-
|
| 835 |
-
# Skip if already fired
|
| 836 |
-
if comp_name in self.fired_complications:
|
| 837 |
-
continue
|
| 838 |
-
|
| 839 |
-
# Check if we're in the time window
|
| 840 |
-
min_t, max_t = complication["time_window"]
|
| 841 |
-
if elapsed_minutes < min_t or elapsed_minutes > max_t:
|
| 842 |
-
continue
|
| 843 |
-
|
| 844 |
-
# Check if preventive treatment was given
|
| 845 |
-
treated = self._is_treated(complication, treatment_descriptions)
|
| 846 |
-
|
| 847 |
-
# Calculate probability
|
| 848 |
-
probability = self._calculate_probability(
|
| 849 |
-
complication, elapsed_minutes, treated, current_vitals
|
| 850 |
-
)
|
| 851 |
-
|
| 852 |
-
# Roll the dice
|
| 853 |
-
if random.random() < probability:
|
| 854 |
-
event = self._generate_interruption(complication, current_vitals, elapsed_minutes)
|
| 855 |
-
triggered_events.append(event)
|
| 856 |
-
self.fired_complications.add(comp_name)
|
| 857 |
-
|
| 858 |
-
# Escalate patient trajectory on the state manager
|
| 859 |
-
self._escalate_trajectory(complication)
|
| 860 |
-
|
| 861 |
-
logger.warning(
|
| 862 |
-
f"COMPLICATION TRIGGERED: {comp_name} at t={elapsed_minutes}min "
|
| 863 |
-
f"(probability was {probability:.3f})"
|
| 864 |
-
)
|
| 865 |
-
|
| 866 |
-
# 2. Check distraction / cross-patient interruptions
|
| 867 |
-
distraction = self._check_distractions(elapsed_minutes)
|
| 868 |
-
if distraction is not None:
|
| 869 |
-
triggered_events.append(distraction)
|
| 870 |
-
|
| 871 |
-
return triggered_events
|
| 872 |
-
|
| 873 |
-
def get_possible_complications(self) -> list[dict]:
|
| 874 |
-
"""Return list of possible complications for external inspection (e.g., by evaluator).
|
| 875 |
-
|
| 876 |
-
Returns a sanitized view without revealing exact probabilities to the student.
|
| 877 |
-
"""
|
| 878 |
-
return [
|
| 879 |
-
{
|
| 880 |
-
"name": c["name"],
|
| 881 |
-
"description": c["description"],
|
| 882 |
-
"urgency": c["urgency"],
|
| 883 |
-
"time_window": c["time_window"],
|
| 884 |
-
"preventable_by": c["treatment_prevents"],
|
| 885 |
-
}
|
| 886 |
-
for c in self.possible_complications
|
| 887 |
-
]
|
| 888 |
-
|
| 889 |
-
def get_fired_complications(self) -> list[str]:
|
| 890 |
-
"""Return names of complications that have already triggered."""
|
| 891 |
-
return list(self.fired_complications)
|
| 892 |
-
|
| 893 |
-
def force_complication(self, complication_name: str, elapsed_minutes: int, current_vitals: dict) -> Optional[SimulationEvent]:
|
| 894 |
-
"""Force-trigger a specific complication regardless of probability.
|
| 895 |
-
|
| 896 |
-
Used by the evaluator agent or orchestrator for pedagogical purposes —
|
| 897 |
-
e.g., to test whether the student can handle a specific emergency.
|
| 898 |
-
|
| 899 |
-
Args:
|
| 900 |
-
complication_name: Exact name of the complication to trigger.
|
| 901 |
-
elapsed_minutes: Current simulation time.
|
| 902 |
-
current_vitals: Current vitals dict.
|
| 903 |
-
|
| 904 |
-
Returns:
|
| 905 |
-
SimulationEvent if the complication exists and hasn't fired yet, else None.
|
| 906 |
-
"""
|
| 907 |
-
for complication in self.possible_complications:
|
| 908 |
-
if complication["name"] == complication_name and complication_name not in self.fired_complications:
|
| 909 |
-
event = self._generate_interruption(complication, current_vitals, elapsed_minutes)
|
| 910 |
-
self.fired_complications.add(complication_name)
|
| 911 |
-
self._escalate_trajectory(complication)
|
| 912 |
-
logger.warning(f"COMPLICATION FORCE-TRIGGERED: {complication_name} at t={elapsed_minutes}min")
|
| 913 |
-
return event
|
| 914 |
-
|
| 915 |
-
logger.warning(f"Cannot force complication '{complication_name}' — not found or already fired.")
|
| 916 |
-
return None
|
| 917 |
-
|
| 918 |
-
# ------------------------------------------------------------------
|
| 919 |
-
# Specialty complication resolution
|
| 920 |
-
# ------------------------------------------------------------------
|
| 921 |
-
|
| 922 |
-
def _get_specialty_complications(self, specialty: str, diagnosis: str) -> list[dict]:
|
| 923 |
-
"""Resolve which complications are possible for this case.
|
| 924 |
-
|
| 925 |
-
Strategy:
|
| 926 |
-
1. Look up exact specialty match in SPECIALTY_COMPLICATIONS
|
| 927 |
-
2. If not found, fall back to "emergency" (generic acute complications)
|
| 928 |
-
3. Filter by diagnosis keywords if they narrow the complication set
|
| 929 |
-
4. Always include a subset of generic emergency complications
|
| 930 |
-
"""
|
| 931 |
-
complications: list[dict] = []
|
| 932 |
-
|
| 933 |
-
# Primary specialty complications
|
| 934 |
-
primary = SPECIALTY_COMPLICATIONS.get(specialty, [])
|
| 935 |
-
complications.extend(primary)
|
| 936 |
-
|
| 937 |
-
# If specialty not found, use emergency as fallback
|
| 938 |
-
if not primary and specialty != "emergency":
|
| 939 |
-
logger.info(f"No specific complications for specialty '{specialty}', using emergency fallback")
|
| 940 |
-
complications.extend(SPECIALTY_COMPLICATIONS.get("emergency", []))
|
| 941 |
-
|
| 942 |
-
# Add a few cross-cutting emergency complications if not already covered
|
| 943 |
-
# (septic shock can happen in any specialty, anaphylaxis from any drug)
|
| 944 |
-
emergency_crosscuts = ["Anaphylaxis", "Hemorrhagic Shock"]
|
| 945 |
-
if specialty != "emergency":
|
| 946 |
-
for ec_comp in SPECIALTY_COMPLICATIONS.get("emergency", []):
|
| 947 |
-
if ec_comp["name"] in emergency_crosscuts and ec_comp["name"] not in [c["name"] for c in complications]:
|
| 948 |
-
# Add with reduced base probability since it's cross-specialty
|
| 949 |
-
cross_comp = dict(ec_comp)
|
| 950 |
-
cross_comp["probability_base"] = ec_comp["probability_base"] * 0.3
|
| 951 |
-
complications.append(cross_comp)
|
| 952 |
-
|
| 953 |
-
# Diagnosis-specific filtering: boost probability for relevant complications
|
| 954 |
-
complications = self._filter_by_diagnosis(complications, diagnosis)
|
| 955 |
-
|
| 956 |
-
return complications
|
| 957 |
-
|
| 958 |
-
def _filter_by_diagnosis(self, complications: list[dict], diagnosis: str) -> list[dict]:
|
| 959 |
-
"""Boost or reduce complication probabilities based on the specific diagnosis.
|
| 960 |
-
|
| 961 |
-
For example, if the diagnosis contains "STEMI", boost cardiogenic shock.
|
| 962 |
-
If it contains "dengue", boost hemorrhagic manifestations.
|
| 963 |
-
"""
|
| 964 |
-
if not diagnosis:
|
| 965 |
-
return complications
|
| 966 |
-
|
| 967 |
-
# Diagnosis keyword -> complication name -> probability multiplier
|
| 968 |
-
diagnosis_boosts: dict[str, dict[str, float]] = {
|
| 969 |
-
"stemi": {"Cardiogenic Shock": 2.0, "Ventricular Tachycardia": 1.8, "Cardiac Arrest — VF/Pulseless VT": 1.5},
|
| 970 |
-
"nstemi": {"Cardiogenic Shock": 1.3, "Acute Heart Failure / Pulmonary Edema": 1.5},
|
| 971 |
-
"heart failure": {"Acute Heart Failure / Pulmonary Edema": 2.0, "Cardiogenic Shock": 1.5},
|
| 972 |
-
"pneumonia": {"Respiratory Failure — Type 1": 1.8, "Septic Shock": 1.5, "ARDS Development": 1.5},
|
| 973 |
-
"copd": {"Respiratory Failure — Type 1": 1.5},
|
| 974 |
-
"asthma": {"Respiratory Failure — Type 1": 2.0},
|
| 975 |
-
"dengue": {"Dengue Hemorrhagic Manifestations": 2.5, "Disseminated Intravascular Coagulation": 1.5},
|
| 976 |
-
"malaria": {"Septic Shock": 1.3, "Multi-Organ Dysfunction": 1.5},
|
| 977 |
-
"sepsis": {"Septic Shock": 2.0, "Multi-Organ Dysfunction": 1.8, "Disseminated Intravascular Coagulation": 1.5},
|
| 978 |
-
"meningitis": {"Raised ICP — Deterioration": 1.8, "Status Epilepticus": 1.5, "Cerebral Herniation": 1.3},
|
| 979 |
-
"stroke": {"Cerebral Herniation": 2.0, "Raised ICP — Deterioration": 1.8, "Status Epilepticus": 1.3},
|
| 980 |
-
"cirrhosis": {"Variceal Rupture": 2.0, "Hepatic Encephalopathy": 2.0, "Spontaneous Bacterial Peritonitis": 2.0},
|
| 981 |
-
"gi bleed": {"Massive Upper GI Bleed": 2.5, "Variceal Rupture": 1.5},
|
| 982 |
-
"dka": {"Cerebral Edema in DKA": 2.0, "Severe Hypoglycemia — Seizure/Coma": 1.5},
|
| 983 |
-
"thyroid storm": {"Thyroid Storm": 2.5},
|
| 984 |
-
"addison": {"Adrenal Crisis": 2.5},
|
| 985 |
-
"pre-eclampsia": {"Eclampsia": 2.5, "DIC in Obstetrics": 1.5},
|
| 986 |
-
"eclampsia": {"Eclampsia": 2.0, "DIC in Obstetrics": 1.8},
|
| 987 |
-
"leukemia": {"Tumor Lysis Syndrome": 2.0, "Febrile Neutropenia — Sepsis": 2.0},
|
| 988 |
-
"lymphoma": {"Tumor Lysis Syndrome": 2.0, "Febrile Neutropenia — Sepsis": 1.5},
|
| 989 |
-
"fracture": {"Fat Embolism Syndrome": 2.0, "Compartment Syndrome": 1.8, "Deep Vein Thrombosis / Pulmonary Embolism": 1.5},
|
| 990 |
-
"ckd": {"Hyperkalemia — Cardiac Arrest": 2.0, "Flash Pulmonary Edema": 1.8, "Uremic Encephalopathy": 1.5},
|
| 991 |
-
"aki": {"Hyperkalemia — Cardiac Arrest": 1.8, "Flash Pulmonary Edema": 1.5},
|
| 992 |
-
"sjs": {"SJS Progression to TEN": 2.5, "Secondary Sepsis from Skin": 1.8},
|
| 993 |
-
"nms": {"Neuroleptic Malignant Syndrome": 2.5},
|
| 994 |
-
}
|
| 995 |
-
|
| 996 |
-
# Find all matching boosts
|
| 997 |
-
applicable_boosts: dict[str, float] = {}
|
| 998 |
-
for keyword, boosts in diagnosis_boosts.items():
|
| 999 |
-
if keyword in diagnosis:
|
| 1000 |
-
for comp_name, multiplier in boosts.items():
|
| 1001 |
-
# Take the highest boost if multiple keywords match
|
| 1002 |
-
if comp_name not in applicable_boosts or multiplier > applicable_boosts[comp_name]:
|
| 1003 |
-
applicable_boosts[comp_name] = multiplier
|
| 1004 |
-
|
| 1005 |
-
# Apply boosts
|
| 1006 |
-
if applicable_boosts:
|
| 1007 |
-
boosted = []
|
| 1008 |
-
for comp in complications:
|
| 1009 |
-
comp_copy = dict(comp)
|
| 1010 |
-
if comp_copy["name"] in applicable_boosts:
|
| 1011 |
-
comp_copy["probability_base"] = min(
|
| 1012 |
-
0.5, # cap at 50% base probability
|
| 1013 |
-
comp_copy["probability_base"] * applicable_boosts[comp_copy["name"]]
|
| 1014 |
-
)
|
| 1015 |
-
boosted.append(comp_copy)
|
| 1016 |
-
return boosted
|
| 1017 |
-
|
| 1018 |
-
return complications
|
| 1019 |
-
|
| 1020 |
-
# ------------------------------------------------------------------
|
| 1021 |
-
# Probability calculation
|
| 1022 |
-
# ------------------------------------------------------------------
|
| 1023 |
-
|
| 1024 |
-
def _calculate_probability(
|
| 1025 |
-
self,
|
| 1026 |
-
complication: dict,
|
| 1027 |
-
elapsed: int,
|
| 1028 |
-
treated: bool,
|
| 1029 |
-
current_vitals: dict,
|
| 1030 |
-
) -> float:
|
| 1031 |
-
"""Calculate the probability of a complication firing at this tick.
|
| 1032 |
-
|
| 1033 |
-
The probability model:
|
| 1034 |
-
1. Start with probability_base
|
| 1035 |
-
2. Apply time curve: probability rises as we move through the time window
|
| 1036 |
-
Peak probability at 75% of the window, then plateaus
|
| 1037 |
-
3. Apply difficulty multiplier
|
| 1038 |
-
4. If treated: multiply by 0.05 (95% reduction — not zero, because
|
| 1039 |
-
treatment doesn't guarantee prevention)
|
| 1040 |
-
5. Apply vitals criteria: if vitals match danger thresholds, boost 2x
|
| 1041 |
-
6. Cap at 0.6 per tick to avoid certainty
|
| 1042 |
-
|
| 1043 |
-
Returns:
|
| 1044 |
-
Probability as a float in [0.0, 0.6].
|
| 1045 |
-
"""
|
| 1046 |
-
# Global minimum time before critical complications (30 minutes)
|
| 1047 |
-
# This gives students time to assess and begin treatment
|
| 1048 |
-
if complication.get("urgency") == "critical" and elapsed < 30:
|
| 1049 |
-
return 0.0
|
| 1050 |
-
|
| 1051 |
-
base = complication["probability_base"]
|
| 1052 |
-
min_t, max_t = complication["time_window"]
|
| 1053 |
-
window_duration = max(max_t - min_t, 1)
|
| 1054 |
-
|
| 1055 |
-
# Time curve: ramps up more slowly, with a gentler start
|
| 1056 |
-
time_into_window = elapsed - min_t
|
| 1057 |
-
peak_point = window_duration * 0.75
|
| 1058 |
-
if time_into_window <= peak_point:
|
| 1059 |
-
# Use a quadratic curve for smoother ramp-up (slower at start)
|
| 1060 |
-
normalized_time = time_into_window / peak_point
|
| 1061 |
-
time_factor = normalized_time * normalized_time # Quadratic: 0.0 -> 1.0
|
| 1062 |
-
else:
|
| 1063 |
-
time_factor = 1.0 # plateau after peak
|
| 1064 |
-
|
| 1065 |
-
# Per-tick probability (base * time_factor gives the cumulative-ish probability)
|
| 1066 |
-
# We normalize so that each tick is a small chance
|
| 1067 |
-
probability = base * time_factor
|
| 1068 |
-
|
| 1069 |
-
# Difficulty multiplier
|
| 1070 |
-
probability *= self._difficulty_multiplier
|
| 1071 |
-
|
| 1072 |
-
# Treatment reduction
|
| 1073 |
-
if treated:
|
| 1074 |
-
probability *= 0.05 # 95% reduction
|
| 1075 |
-
|
| 1076 |
-
# Vitals criteria boost
|
| 1077 |
-
vitals_boost = self._evaluate_vitals_criteria(complication, current_vitals)
|
| 1078 |
-
probability *= vitals_boost
|
| 1079 |
-
|
| 1080 |
-
# Cap probability per tick
|
| 1081 |
-
probability = min(0.6, max(0.0, probability))
|
| 1082 |
-
|
| 1083 |
-
return probability
|
| 1084 |
-
|
| 1085 |
-
def _evaluate_vitals_criteria(self, complication: dict, current_vitals: dict) -> float:
|
| 1086 |
-
"""Evaluate how much the current vitals boost this complication's probability.
|
| 1087 |
-
|
| 1088 |
-
Each matching criterion adds a 1.5x multiplier (compounding).
|
| 1089 |
-
No matching criteria returns 1.0 (no change).
|
| 1090 |
-
"""
|
| 1091 |
-
criteria = complication.get("vitals_criteria", {})
|
| 1092 |
-
if not criteria:
|
| 1093 |
-
return 1.0
|
| 1094 |
-
|
| 1095 |
-
multiplier = 1.0
|
| 1096 |
-
|
| 1097 |
-
for criterion, threshold in criteria.items():
|
| 1098 |
-
if criterion == "bp_systolic_below":
|
| 1099 |
-
if current_vitals.get("bp_systolic", 120) < threshold:
|
| 1100 |
-
multiplier *= 1.5
|
| 1101 |
-
elif criterion == "bp_systolic_above":
|
| 1102 |
-
if current_vitals.get("bp_systolic", 120) > threshold:
|
| 1103 |
-
multiplier *= 1.5
|
| 1104 |
-
elif criterion == "hr_above":
|
| 1105 |
-
if current_vitals.get("hr", 80) > threshold:
|
| 1106 |
-
multiplier *= 1.5
|
| 1107 |
-
elif criterion == "hr_below":
|
| 1108 |
-
if current_vitals.get("hr", 80) < threshold:
|
| 1109 |
-
multiplier *= 1.5
|
| 1110 |
-
elif criterion == "spo2_below":
|
| 1111 |
-
if current_vitals.get("spo2", 98) < threshold:
|
| 1112 |
-
multiplier *= 1.5
|
| 1113 |
-
elif criterion == "rr_above":
|
| 1114 |
-
if current_vitals.get("rr", 16) > threshold:
|
| 1115 |
-
multiplier *= 1.5
|
| 1116 |
-
elif criterion == "temp_above":
|
| 1117 |
-
if current_vitals.get("temp", 37.0) > threshold:
|
| 1118 |
-
multiplier *= 1.5
|
| 1119 |
-
elif criterion == "temp_below":
|
| 1120 |
-
if current_vitals.get("temp", 37.0) < threshold:
|
| 1121 |
-
multiplier *= 1.5
|
| 1122 |
-
|
| 1123 |
-
return multiplier
|
| 1124 |
-
|
| 1125 |
-
# ------------------------------------------------------------------
|
| 1126 |
-
# Treatment matching
|
| 1127 |
-
# ------------------------------------------------------------------
|
| 1128 |
-
|
| 1129 |
-
def _collect_treatment_keywords(self, treatments: list) -> set[str]:
|
| 1130 |
-
"""Extract a set of lowercase keywords from all administered treatments.
|
| 1131 |
-
|
| 1132 |
-
Handles both TreatmentRecord objects and plain dicts.
|
| 1133 |
-
"""
|
| 1134 |
-
keywords: set[str] = set()
|
| 1135 |
-
for tx in treatments:
|
| 1136 |
-
if hasattr(tx, "description"):
|
| 1137 |
-
desc = tx.description
|
| 1138 |
-
elif isinstance(tx, dict):
|
| 1139 |
-
desc = tx.get("description", "")
|
| 1140 |
-
else:
|
| 1141 |
-
desc = str(tx)
|
| 1142 |
-
|
| 1143 |
-
# Tokenize the treatment description into keywords
|
| 1144 |
-
desc_lower = desc.lower()
|
| 1145 |
-
# Split on common separators
|
| 1146 |
-
for token in desc_lower.replace(",", " ").replace(".", " ").replace("-", "_").replace("/", " ").split():
|
| 1147 |
-
token = token.strip()
|
| 1148 |
-
if len(token) > 2: # skip very short tokens
|
| 1149 |
-
keywords.add(token)
|
| 1150 |
-
|
| 1151 |
-
# Also keep the full description as a keyword for phrase matching
|
| 1152 |
-
keywords.add(desc_lower)
|
| 1153 |
-
|
| 1154 |
-
return keywords
|
| 1155 |
-
|
| 1156 |
-
def _is_treated(self, complication: dict, treatment_keywords: set[str]) -> bool:
|
| 1157 |
-
"""Check if any preventive treatment for this complication has been given.
|
| 1158 |
-
|
| 1159 |
-
Uses fuzzy keyword matching: if any token from treatment_prevents appears
|
| 1160 |
-
in the collected treatment keywords, we consider it treated.
|
| 1161 |
-
"""
|
| 1162 |
-
prevents = complication.get("treatment_prevents", [])
|
| 1163 |
-
for prevent_keyword in prevents:
|
| 1164 |
-
prevent_lower = prevent_keyword.lower().replace("-", "_")
|
| 1165 |
-
# Direct match
|
| 1166 |
-
if prevent_lower in treatment_keywords:
|
| 1167 |
-
return True
|
| 1168 |
-
# Partial match: check if any treatment keyword contains this keyword
|
| 1169 |
-
for tk in treatment_keywords:
|
| 1170 |
-
if prevent_lower in tk or tk in prevent_lower:
|
| 1171 |
-
return True
|
| 1172 |
-
return False
|
| 1173 |
-
|
| 1174 |
-
# ------------------------------------------------------------------
|
| 1175 |
-
# Event generation
|
| 1176 |
-
# ------------------------------------------------------------------
|
| 1177 |
-
|
| 1178 |
-
def _generate_interruption(
|
| 1179 |
-
self,
|
| 1180 |
-
complication: dict,
|
| 1181 |
-
current_vitals: dict,
|
| 1182 |
-
elapsed_minutes: int,
|
| 1183 |
-
) -> SimulationEvent:
|
| 1184 |
-
"""Create a SimulationEvent from a triggered complication.
|
| 1185 |
-
|
| 1186 |
-
The agent_message is formatted with current vital sign values to make
|
| 1187 |
-
the interruption feel clinically realistic and grounded in real numbers.
|
| 1188 |
-
"""
|
| 1189 |
-
# Format the agent message with current vitals
|
| 1190 |
-
message = complication["agent_message"].format(
|
| 1191 |
-
bp_systolic=current_vitals.get("bp_systolic", "??"),
|
| 1192 |
-
bp_diastolic=current_vitals.get("bp_diastolic", "??"),
|
| 1193 |
-
hr=current_vitals.get("hr", "??"),
|
| 1194 |
-
rr=current_vitals.get("rr", "??"),
|
| 1195 |
-
spo2=current_vitals.get("spo2", "??"),
|
| 1196 |
-
temp=current_vitals.get("temp", "??"),
|
| 1197 |
-
)
|
| 1198 |
-
|
| 1199 |
-
# Determine event type based on urgency
|
| 1200 |
-
if complication["urgency"] == "critical":
|
| 1201 |
-
event_type = "critical_complication"
|
| 1202 |
-
else:
|
| 1203 |
-
event_type = "urgent_complication"
|
| 1204 |
-
|
| 1205 |
-
event = SimulationEvent(
|
| 1206 |
-
event_id=f"comp-{self._next_event_id}",
|
| 1207 |
-
timestamp=elapsed_minutes,
|
| 1208 |
-
event_type=event_type,
|
| 1209 |
-
title=f"COMPLICATION: {complication['name']}",
|
| 1210 |
-
description=message,
|
| 1211 |
-
agent_type="nurse",
|
| 1212 |
-
)
|
| 1213 |
-
self._next_event_id += 1
|
| 1214 |
-
|
| 1215 |
-
# Also register this event on the state manager so it appears in the timeline
|
| 1216 |
-
self.state_manager.events.append(event)
|
| 1217 |
-
|
| 1218 |
-
return event
|
| 1219 |
-
|
| 1220 |
-
# ------------------------------------------------------------------
|
| 1221 |
-
# Trajectory escalation
|
| 1222 |
-
# ------------------------------------------------------------------
|
| 1223 |
-
|
| 1224 |
-
def _escalate_trajectory(self, complication: dict):
|
| 1225 |
-
"""Escalate the patient trajectory on the state manager based on the complication.
|
| 1226 |
-
|
| 1227 |
-
Critical complications push straight to CRITICAL.
|
| 1228 |
-
Urgent complications push to DETERIORATING if currently STABLE/IMPROVING.
|
| 1229 |
-
"""
|
| 1230 |
-
effect = complication.get("trajectory_effect", "deteriorating")
|
| 1231 |
-
|
| 1232 |
-
if effect == "critical":
|
| 1233 |
-
self.state_manager.trajectory = PatientTrajectory.CRITICAL
|
| 1234 |
-
elif effect == "deteriorating":
|
| 1235 |
-
if self.state_manager.trajectory in (PatientTrajectory.STABLE, PatientTrajectory.IMPROVING):
|
| 1236 |
-
self.state_manager.trajectory = PatientTrajectory.DETERIORATING
|
| 1237 |
-
|
| 1238 |
-
# ------------------------------------------------------------------
|
| 1239 |
-
# Distraction events
|
| 1240 |
-
# ------------------------------------------------------------------
|
| 1241 |
-
|
| 1242 |
-
def _check_distractions(self, elapsed_minutes: int) -> Optional[SimulationEvent]:
|
| 1243 |
-
"""Check if a distraction / cross-patient interruption should fire.
|
| 1244 |
-
|
| 1245 |
-
Only one distraction per session to avoid being annoying. Distractions
|
| 1246 |
-
increase cognitive load and test the student's ability to prioritize.
|
| 1247 |
-
|
| 1248 |
-
Returns:
|
| 1249 |
-
A SimulationEvent if a distraction triggers, else None.
|
| 1250 |
-
"""
|
| 1251 |
-
# Maximum one distraction per session
|
| 1252 |
-
if len(self.fired_distractions) >= 2:
|
| 1253 |
-
return None
|
| 1254 |
-
|
| 1255 |
-
for distraction in DISTRACTION_EVENTS:
|
| 1256 |
-
d_name = distraction["name"]
|
| 1257 |
-
if d_name in self.fired_distractions:
|
| 1258 |
-
continue
|
| 1259 |
-
|
| 1260 |
-
if elapsed_minutes < distraction["min_time"] or elapsed_minutes > distraction["max_time"]:
|
| 1261 |
-
continue
|
| 1262 |
-
|
| 1263 |
-
# Probability adjusted by difficulty
|
| 1264 |
-
prob = distraction["probability"] * self._difficulty_multiplier
|
| 1265 |
-
|
| 1266 |
-
if random.random() < prob:
|
| 1267 |
-
event = SimulationEvent(
|
| 1268 |
-
event_id=f"dist-{self._next_event_id}",
|
| 1269 |
-
timestamp=elapsed_minutes,
|
| 1270 |
-
event_type="distraction",
|
| 1271 |
-
title=f"INTERRUPTION: {d_name}",
|
| 1272 |
-
description=distraction["agent_message"],
|
| 1273 |
-
agent_type="nurse",
|
| 1274 |
-
)
|
| 1275 |
-
self._next_event_id += 1
|
| 1276 |
-
self.fired_distractions.add(d_name)
|
| 1277 |
-
|
| 1278 |
-
# Register on state manager timeline
|
| 1279 |
-
self.state_manager.events.append(event)
|
| 1280 |
-
|
| 1281 |
-
logger.info(f"DISTRACTION TRIGGERED: {d_name} at t={elapsed_minutes}min")
|
| 1282 |
-
return event
|
| 1283 |
-
|
| 1284 |
-
return None
|
| 1285 |
-
|
| 1286 |
-
# ------------------------------------------------------------------
|
| 1287 |
-
# Summary / debug
|
| 1288 |
-
# ------------------------------------------------------------------
|
| 1289 |
-
|
| 1290 |
-
def get_engine_summary(self) -> str:
|
| 1291 |
-
"""Return a summary of the complication engine state for debugging / logging."""
|
| 1292 |
-
lines = [
|
| 1293 |
-
f"ComplicationEngine Summary:",
|
| 1294 |
-
f" Specialty: {self.specialty}",
|
| 1295 |
-
f" Diagnosis: {self.diagnosis}",
|
| 1296 |
-
f" Difficulty: {self.difficulty} (multiplier: {self._difficulty_multiplier})",
|
| 1297 |
-
f" Possible complications: {len(self.possible_complications)}",
|
| 1298 |
-
f" Fired complications: {self.fired_complications or 'none'}",
|
| 1299 |
-
f" Fired distractions: {self.fired_distractions or 'none'}",
|
| 1300 |
-
f" Possible complication names:",
|
| 1301 |
-
]
|
| 1302 |
-
for c in self.possible_complications:
|
| 1303 |
-
lines.append(
|
| 1304 |
-
f" - {c['name']} (base_p={c['probability_base']:.2f}, "
|
| 1305 |
-
f"window={c['time_window']}, urgency={c['urgency']})"
|
| 1306 |
-
)
|
| 1307 |
-
return "\n".join(lines)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/core/agents/evaluator.py
DELETED
|
@@ -1,215 +0,0 @@
|
|
| 1 |
-
"""Evaluator Agent - Analyzes student communication and updates patient state."""
|
| 2 |
-
import logging
|
| 3 |
-
import os
|
| 4 |
-
from typing import Dict, Tuple
|
| 5 |
-
|
| 6 |
-
import anthropic
|
| 7 |
-
|
| 8 |
-
from app.models.simulation import EmotionalState, RapportLevel, FeedbackType, TutorFeedback
|
| 9 |
-
|
| 10 |
-
logger = logging.getLogger(__name__)
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
EVALUATOR_SYSTEM_PROMPT = """You are an expert evaluator analyzing medical student communication with patients.
|
| 14 |
-
|
| 15 |
-
Analyze the student's message for:
|
| 16 |
-
1. EMPATHY: Did they acknowledge patient's distress/emotions?
|
| 17 |
-
2. COMMUNICATION QUALITY: Open-ended questions vs closed yes/no questions?
|
| 18 |
-
3. BEDSIDE MANNER: Tone, warmth, professionalism
|
| 19 |
-
4. CLINICAL REASONING: Systematic approach vs random questioning
|
| 20 |
-
|
| 21 |
-
Based on analysis, determine:
|
| 22 |
-
- NEW EMOTIONAL STATE: How does the patient feel after this message?
|
| 23 |
-
- If student is warm/empathetic → patient becomes more CALM
|
| 24 |
-
- If student is cold/rushed → patient becomes ANXIOUS/DEFENSIVE
|
| 25 |
-
- If student interrupts or dismisses → patient becomes DEFENSIVE
|
| 26 |
-
|
| 27 |
-
- RAPPORT CHANGE: Did rapport increase or decrease? (1-5 scale)
|
| 28 |
-
- Open-ended questions, empathy → rapport increases
|
| 29 |
-
- Closed questions, dismissive tone → rapport decreases
|
| 30 |
-
|
| 31 |
-
Current patient state:
|
| 32 |
-
- Emotional state: {current_emotional_state}
|
| 33 |
-
- Rapport level: {current_rapport}
|
| 34 |
-
|
| 35 |
-
Student message: {student_message}
|
| 36 |
-
|
| 37 |
-
Respond in this EXACT format:
|
| 38 |
-
NEW_EMOTIONAL_STATE: [calm/concerned/anxious/defensive]
|
| 39 |
-
NEW_RAPPORT: [1-5]
|
| 40 |
-
EMPATHY_DETECTED: [yes/no]
|
| 41 |
-
OPEN_ENDED_QUESTION: [yes/no]
|
| 42 |
-
FEEDBACK_TYPE: [positive/warning/critical]
|
| 43 |
-
FEEDBACK_MESSAGE: [One sentence explaining what student did well or should improve]
|
| 44 |
-
"""
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
class EvaluatorAgent:
|
| 48 |
-
"""Evaluates student communication and updates simulation state."""
|
| 49 |
-
|
| 50 |
-
def __init__(self):
|
| 51 |
-
self.api_key = os.environ.get("ANTHROPIC_API_KEY")
|
| 52 |
-
self.client = None
|
| 53 |
-
if self.api_key and self.api_key != "sk-ant-your-key-here":
|
| 54 |
-
try:
|
| 55 |
-
self.client = anthropic.Anthropic(api_key=self.api_key)
|
| 56 |
-
except Exception as e:
|
| 57 |
-
logger.error(f"Failed to initialize Claude client for evaluator: {e}")
|
| 58 |
-
raise
|
| 59 |
-
|
| 60 |
-
def evaluate_message(
|
| 61 |
-
self,
|
| 62 |
-
student_message: str,
|
| 63 |
-
current_emotional_state: EmotionalState,
|
| 64 |
-
current_rapport: RapportLevel,
|
| 65 |
-
) -> Tuple[EmotionalState, RapportLevel, TutorFeedback]:
|
| 66 |
-
"""
|
| 67 |
-
Evaluate student message and return updated state + feedback.
|
| 68 |
-
|
| 69 |
-
Returns:
|
| 70 |
-
(new_emotional_state, new_rapport_level, feedback)
|
| 71 |
-
"""
|
| 72 |
-
|
| 73 |
-
if not self.client:
|
| 74 |
-
return self._fallback_evaluation(
|
| 75 |
-
student_message, current_emotional_state, current_rapport
|
| 76 |
-
)
|
| 77 |
-
|
| 78 |
-
system_prompt = EVALUATOR_SYSTEM_PROMPT.format(
|
| 79 |
-
current_emotional_state=current_emotional_state.value,
|
| 80 |
-
current_rapport=current_rapport.value,
|
| 81 |
-
student_message=student_message,
|
| 82 |
-
)
|
| 83 |
-
|
| 84 |
-
try:
|
| 85 |
-
response = self.client.messages.create(
|
| 86 |
-
model="claude-opus-4-6",
|
| 87 |
-
max_tokens=300,
|
| 88 |
-
system=system_prompt,
|
| 89 |
-
messages=[{
|
| 90 |
-
"role": "user",
|
| 91 |
-
"content": "Evaluate this student message.",
|
| 92 |
-
}],
|
| 93 |
-
temperature=0.3, # Lower temp for consistent evaluations
|
| 94 |
-
)
|
| 95 |
-
|
| 96 |
-
evaluation_text = response.content[0].text.strip()
|
| 97 |
-
return self._parse_evaluation(evaluation_text, current_emotional_state, current_rapport)
|
| 98 |
-
|
| 99 |
-
except Exception as e:
|
| 100 |
-
logger.error(f"Evaluator API error: {e}")
|
| 101 |
-
return self._fallback_evaluation(
|
| 102 |
-
student_message, current_emotional_state, current_rapport
|
| 103 |
-
)
|
| 104 |
-
|
| 105 |
-
def _parse_evaluation(
|
| 106 |
-
self,
|
| 107 |
-
evaluation_text: str,
|
| 108 |
-
current_emotional_state: EmotionalState,
|
| 109 |
-
current_rapport: RapportLevel,
|
| 110 |
-
) -> Tuple[EmotionalState, RapportLevel, TutorFeedback]:
|
| 111 |
-
"""Parse Claude's evaluation response."""
|
| 112 |
-
|
| 113 |
-
lines = evaluation_text.split("\n")
|
| 114 |
-
parsed = {}
|
| 115 |
-
|
| 116 |
-
for line in lines:
|
| 117 |
-
if ":" in line:
|
| 118 |
-
key, value = line.split(":", 1)
|
| 119 |
-
parsed[key.strip()] = value.strip()
|
| 120 |
-
|
| 121 |
-
# Extract values
|
| 122 |
-
try:
|
| 123 |
-
emotional_state_str = parsed.get("NEW_EMOTIONAL_STATE", current_emotional_state.value)
|
| 124 |
-
new_emotional_state = EmotionalState(emotional_state_str.lower())
|
| 125 |
-
except ValueError:
|
| 126 |
-
new_emotional_state = current_emotional_state
|
| 127 |
-
|
| 128 |
-
try:
|
| 129 |
-
rapport_str = parsed.get("NEW_RAPPORT", str(current_rapport.value))
|
| 130 |
-
new_rapport = RapportLevel(int(rapport_str))
|
| 131 |
-
except (ValueError, KeyError):
|
| 132 |
-
new_rapport = current_rapport
|
| 133 |
-
|
| 134 |
-
try:
|
| 135 |
-
feedback_type_str = parsed.get("FEEDBACK_TYPE", "warning")
|
| 136 |
-
feedback_type = FeedbackType(feedback_type_str.lower())
|
| 137 |
-
except ValueError:
|
| 138 |
-
feedback_type = FeedbackType.WARNING
|
| 139 |
-
|
| 140 |
-
feedback_message = parsed.get("FEEDBACK_MESSAGE", "Continue with your assessment.")
|
| 141 |
-
|
| 142 |
-
feedback = TutorFeedback(
|
| 143 |
-
type=feedback_type,
|
| 144 |
-
message=feedback_message,
|
| 145 |
-
)
|
| 146 |
-
|
| 147 |
-
return new_emotional_state, new_rapport, feedback
|
| 148 |
-
|
| 149 |
-
def _fallback_evaluation(
|
| 150 |
-
self,
|
| 151 |
-
student_message: str,
|
| 152 |
-
current_emotional_state: EmotionalState,
|
| 153 |
-
current_rapport: RapportLevel,
|
| 154 |
-
) -> Tuple[EmotionalState, RapportLevel, TutorFeedback]:
|
| 155 |
-
"""Simple keyword-based fallback evaluation."""
|
| 156 |
-
|
| 157 |
-
message_lower = student_message.lower()
|
| 158 |
-
new_emotional_state = current_emotional_state
|
| 159 |
-
new_rapport = current_rapport
|
| 160 |
-
|
| 161 |
-
# Detect empathy markers
|
| 162 |
-
empathy_markers = ["understand", "worried", "difficult", "sorry", "must be"]
|
| 163 |
-
has_empathy = any(marker in message_lower for marker in empathy_markers)
|
| 164 |
-
|
| 165 |
-
# Detect open-ended questions
|
| 166 |
-
open_ended_markers = ["tell me", "describe", "how do you", "what happened", "when did"]
|
| 167 |
-
has_open_ended = any(marker in message_lower for marker in open_ended_markers)
|
| 168 |
-
|
| 169 |
-
# Detect negative markers
|
| 170 |
-
negative_markers = ["quickly", "just tell me", "yes or no", "hurry"]
|
| 171 |
-
has_negative = any(marker in message_lower for marker in negative_markers)
|
| 172 |
-
|
| 173 |
-
# Update emotional state
|
| 174 |
-
if has_empathy and has_open_ended:
|
| 175 |
-
# Student is doing well → patient calms down
|
| 176 |
-
if current_emotional_state == EmotionalState.ANXIOUS:
|
| 177 |
-
new_emotional_state = EmotionalState.CONCERNED
|
| 178 |
-
elif current_emotional_state == EmotionalState.DEFENSIVE:
|
| 179 |
-
new_emotional_state = EmotionalState.CONCERNED
|
| 180 |
-
elif current_emotional_state == EmotionalState.CONCERNED:
|
| 181 |
-
new_emotional_state = EmotionalState.CALM
|
| 182 |
-
|
| 183 |
-
# Rapport increases
|
| 184 |
-
if current_rapport.value < 5:
|
| 185 |
-
new_rapport = RapportLevel(current_rapport.value + 1)
|
| 186 |
-
|
| 187 |
-
feedback = TutorFeedback(
|
| 188 |
-
type=FeedbackType.POSITIVE,
|
| 189 |
-
message="Good use of open-ended questions and empathy.",
|
| 190 |
-
)
|
| 191 |
-
|
| 192 |
-
elif has_negative:
|
| 193 |
-
# Student is rushed → patient becomes defensive
|
| 194 |
-
if current_emotional_state == EmotionalState.CALM:
|
| 195 |
-
new_emotional_state = EmotionalState.CONCERNED
|
| 196 |
-
else:
|
| 197 |
-
new_emotional_state = EmotionalState.DEFENSIVE
|
| 198 |
-
|
| 199 |
-
# Rapport decreases
|
| 200 |
-
if current_rapport.value > 1:
|
| 201 |
-
new_rapport = RapportLevel(current_rapport.value - 1)
|
| 202 |
-
|
| 203 |
-
feedback = TutorFeedback(
|
| 204 |
-
type=FeedbackType.CRITICAL,
|
| 205 |
-
message="Patient seems rushed. Try slowing down and showing empathy.",
|
| 206 |
-
)
|
| 207 |
-
|
| 208 |
-
else:
|
| 209 |
-
# Neutral interaction
|
| 210 |
-
feedback = TutorFeedback(
|
| 211 |
-
type=FeedbackType.WARNING,
|
| 212 |
-
message="Consider using more open-ended questions to build rapport.",
|
| 213 |
-
)
|
| 214 |
-
|
| 215 |
-
return new_emotional_state, new_rapport, feedback
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/core/agents/family_agent.py
DELETED
|
@@ -1,363 +0,0 @@
|
|
| 1 |
-
"""Family member agent — brings cultural context, emotional pressure, and additional history in Hinglish."""
|
| 2 |
-
|
| 3 |
-
from app.core.agents.base_agent import BaseAgent
|
| 4 |
-
from app.core.agents.symptom_translator import get_family_friendly_description
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
FAMILY_SYSTEM_PROMPT = """You are a family member of a patient in an Indian government hospital. A medical student (junior doctor) is examining your relative.
|
| 8 |
-
|
| 9 |
-
CRITICAL RULES:
|
| 10 |
-
1. You speak in Hindi-English mix (Hinglish) naturally — like a real Indian family member would.
|
| 11 |
-
Examples: "Doctor sahab, please inka kuch karo!", "Hum bahut pareshan hain doctor", "Private mein bahut kharcha ho gaya"
|
| 12 |
-
2. You do NOT know medical terminology. You describe things in lay terms.
|
| 13 |
-
3. You are emotionally invested — worried, anxious, sometimes pushy or tearful.
|
| 14 |
-
4. You may speak FOR the patient, interrupt, or add details the patient forgot.
|
| 15 |
-
5. You may withhold embarrassing information initially (alcoholism, mental health, family disputes).
|
| 16 |
-
6. You provide cultural context: dietary habits, home remedies tried, religious beliefs.
|
| 17 |
-
7. Keep responses realistic — 2-4 sentences typically, more when telling the backstory.
|
| 18 |
-
8. NEVER reveal information beyond the case data. If asked something not in the history, say "Humein nahi pata doctor" or "Yeh toh inhone kabhi bataya nahi."
|
| 19 |
-
9. You may contradict the patient's story slightly — this is realistic family dynamics.
|
| 20 |
-
|
| 21 |
-
YOUR RELATIONSHIP: {relationship} of the patient
|
| 22 |
-
|
| 23 |
-
REALISTIC INDIAN FAMILY BEHAVIORS:
|
| 24 |
-
- You tried home remedies first: haldi doodh, kadha, Vicks ki malish, desi ghee, neem ka paani
|
| 25 |
-
- You took the patient to a local RMP / jhola-chhaap doctor who gave injections and "drip" but nothing worked
|
| 26 |
-
- You may bring outside opinions: "Padosi ne kaha ki yeh typhoid hai", "WhatsApp pe padha ki yeh cancer ho sakta hai"
|
| 27 |
-
- You came to govt hospital because private was too expensive: "Private mein 50,000 laga diye, kuch nahi hua"
|
| 28 |
-
- You are overprotective: may answer questions directed at the patient, hover, get emotional
|
| 29 |
-
- Cost concerns are always present: "Doctor, kitna kharcha hoga?", "Hum garib log hain", "Insurance nahi hai"
|
| 30 |
-
- Work/livelihood concerns: "Yeh akele kamane wale hain", "Dukaan band hai 5 din se"
|
| 31 |
-
- Religious/cultural: "Mandir mein mannat maangi hai", "Maulvi sahab ne dum karwaya tha", "Gurudwara se langar leke aaye hain"
|
| 32 |
-
- Family medical history shared (sometimes reluctantly): "Inka papa ko bhi sugar thi", "Ghar mein sabko BP hai"
|
| 33 |
-
- May pressure the doctor: "Doctor kuch karo na please!", "Itna time kyun lag raha hai?"
|
| 34 |
-
- May bring food against medical advice: "Thoda doodh pilane mein kya hai doctor?"
|
| 35 |
-
- May question everything: "Yeh kitni dawaiyan de rahe ho?", "Injection zaruri hai kya?"
|
| 36 |
-
- May get emotional: "Agar inko kuch ho gaya toh...", crying, pleading
|
| 37 |
-
- May blame the patient: "Maine kaha tha sharab mat piyo!", "Khana time pe nahi khate"
|
| 38 |
-
- May share TMI: "Doctor, inki shaadi mein bhi problem hai", oversharing personal details
|
| 39 |
-
- May ask about diet repeatedly: "Kya khila sakte hain?", "Chai de sakte hain kya?"
|
| 40 |
-
|
| 41 |
-
PATIENT DETAILS:
|
| 42 |
-
- Patient age: {patient_age}, Gender: {patient_gender}, Location: {location}
|
| 43 |
-
- Chief complaint: {chief_complaint}
|
| 44 |
-
- Presentation: {presentation}
|
| 45 |
-
- History: {history}
|
| 46 |
-
- Family history: {family_history}
|
| 47 |
-
|
| 48 |
-
EMOTIONAL STATE:
|
| 49 |
-
- {emotional_state}
|
| 50 |
-
|
| 51 |
-
CULTURAL CONTEXT:
|
| 52 |
-
- Location-based: {location} — adjust dialect and cultural references accordingly
|
| 53 |
-
- Socioeconomic: Government hospital implies middle/lower-middle class
|
| 54 |
-
- You may reference local beliefs and practices specific to the region
|
| 55 |
-
|
| 56 |
-
Respond ONLY as the family member. Stay in character completely. Be emotionally authentic.
|
| 57 |
-
|
| 58 |
-
FORMATTING RULES:
|
| 59 |
-
- Do NOT use markdown formatting like ** or * in your responses
|
| 60 |
-
- Write in plain text only
|
| 61 |
-
- For actions or expressions, use plain text like: (doing something) instead of *doing something*"""
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
class FamilyAgent(BaseAgent):
|
| 65 |
-
"""Family member agent that provides cultural context and emotional pressure in Hinglish."""
|
| 66 |
-
|
| 67 |
-
agent_type = "family"
|
| 68 |
-
display_name = "Family Member"
|
| 69 |
-
|
| 70 |
-
def __init__(self):
|
| 71 |
-
super().__init__()
|
| 72 |
-
self.emotional_state = "worried"
|
| 73 |
-
self.relationship = "Mother"
|
| 74 |
-
self.family_info: dict = {}
|
| 75 |
-
|
| 76 |
-
def configure(self, case_data: dict):
|
| 77 |
-
"""Configure family member with case-specific data."""
|
| 78 |
-
patient = case_data.get("patient", {})
|
| 79 |
-
age = patient.get("age", 45)
|
| 80 |
-
gender = patient.get("gender", "Male")
|
| 81 |
-
location = patient.get("location", "Delhi")
|
| 82 |
-
|
| 83 |
-
# Choose appropriate family relationship based on patient demographics
|
| 84 |
-
self.relationship = self._choose_relationship(age, gender)
|
| 85 |
-
self.display_name = f"Patient's {self.relationship}"
|
| 86 |
-
|
| 87 |
-
self.family_info = {
|
| 88 |
-
"patient_age": age,
|
| 89 |
-
"patient_gender": gender,
|
| 90 |
-
"location": location,
|
| 91 |
-
"chief_complaint": case_data.get("chief_complaint", ""),
|
| 92 |
-
"presentation": case_data.get("initial_presentation", ""),
|
| 93 |
-
"history": "",
|
| 94 |
-
"family_history": "",
|
| 95 |
-
"relationship": self.relationship,
|
| 96 |
-
}
|
| 97 |
-
|
| 98 |
-
# Extract history and family history from stages
|
| 99 |
-
for stage in case_data.get("stages", []):
|
| 100 |
-
if stage.get("stage") == "history":
|
| 101 |
-
self.family_info["history"] = stage.get("info", "")
|
| 102 |
-
elif stage.get("stage") == "family_history":
|
| 103 |
-
self.family_info["family_history"] = stage.get("info", "")
|
| 104 |
-
|
| 105 |
-
# If no dedicated family_history stage, try to extract from general history
|
| 106 |
-
if not self.family_info["family_history"]:
|
| 107 |
-
history_text = self.family_info.get("history", "")
|
| 108 |
-
if "family" in history_text.lower():
|
| 109 |
-
self.family_info["family_history"] = history_text
|
| 110 |
-
|
| 111 |
-
# Set emotional state based on case severity
|
| 112 |
-
self._set_emotional_state(case_data)
|
| 113 |
-
|
| 114 |
-
def _choose_relationship(self, age: int, gender: str) -> str:
|
| 115 |
-
"""Choose realistic family relationship based on patient demographics."""
|
| 116 |
-
if age < 12:
|
| 117 |
-
return "Mother"
|
| 118 |
-
elif age < 18:
|
| 119 |
-
return "Father" if gender == "Male" else "Mother"
|
| 120 |
-
elif age < 30:
|
| 121 |
-
if gender == "Male":
|
| 122 |
-
return "Mother"
|
| 123 |
-
else:
|
| 124 |
-
return "Husband"
|
| 125 |
-
elif age < 50:
|
| 126 |
-
if gender == "Male":
|
| 127 |
-
return "Wife"
|
| 128 |
-
else:
|
| 129 |
-
return "Husband"
|
| 130 |
-
elif age < 65:
|
| 131 |
-
return "Son" if gender == "Male" else "Daughter"
|
| 132 |
-
else:
|
| 133 |
-
return "Son"
|
| 134 |
-
|
| 135 |
-
def _set_emotional_state(self, case_data: dict):
|
| 136 |
-
"""Determine emotional state from case severity."""
|
| 137 |
-
difficulty = case_data.get("difficulty", "intermediate")
|
| 138 |
-
vitals = case_data.get("vital_signs", {})
|
| 139 |
-
|
| 140 |
-
hr = vitals.get("hr", 80)
|
| 141 |
-
spo2 = vitals.get("spo2", 98)
|
| 142 |
-
|
| 143 |
-
if difficulty == "advanced" or spo2 < 90 or hr > 130:
|
| 144 |
-
self.emotional_state = (
|
| 145 |
-
"Extremely distressed — crying, pleading with the doctor, "
|
| 146 |
-
"may become irrational or aggressive out of fear. "
|
| 147 |
-
"\"Doctor please inko bacha lo! Kuch bhi karo!\""
|
| 148 |
-
)
|
| 149 |
-
elif difficulty == "intermediate" or spo2 < 94 or hr > 110:
|
| 150 |
-
self.emotional_state = (
|
| 151 |
-
"Very worried — pacing, asking repeated questions, "
|
| 152 |
-
"may pressure the doctor for quick answers. "
|
| 153 |
-
"\"Doctor, kya hua hai inko? Serious toh nahi hai na?\""
|
| 154 |
-
)
|
| 155 |
-
elif hr > 100:
|
| 156 |
-
self.emotional_state = (
|
| 157 |
-
"Worried but cooperative — concerned, hovering near the patient, "
|
| 158 |
-
"providing information when asked. "
|
| 159 |
-
"\"Doctor sahab, hum bahut pareshan hain, batao kya karna hai.\""
|
| 160 |
-
)
|
| 161 |
-
else:
|
| 162 |
-
self.emotional_state = (
|
| 163 |
-
"Concerned but calm — cooperative, answers questions, "
|
| 164 |
-
"provides background information. "
|
| 165 |
-
"\"Ji doctor, aap poochiye, hum sab batayenge.\""
|
| 166 |
-
)
|
| 167 |
-
|
| 168 |
-
def get_system_prompt(self, case_context: dict) -> str:
|
| 169 |
-
info = {**self.family_info, **case_context}
|
| 170 |
-
info["emotional_state"] = self.emotional_state
|
| 171 |
-
base_prompt = FAMILY_SYSTEM_PROMPT.format(
|
| 172 |
-
relationship=info.get("relationship", "Mother"),
|
| 173 |
-
patient_age=info.get("patient_age", 45),
|
| 174 |
-
patient_gender=info.get("patient_gender", "Male"),
|
| 175 |
-
location=info.get("location", "Delhi"),
|
| 176 |
-
chief_complaint=info.get("chief_complaint", "unknown"),
|
| 177 |
-
presentation=info.get("presentation", ""),
|
| 178 |
-
history=info.get("history", ""),
|
| 179 |
-
family_history=info.get("family_history", "Not known"),
|
| 180 |
-
emotional_state=self.emotional_state,
|
| 181 |
-
)
|
| 182 |
-
|
| 183 |
-
if self.specialized_knowledge:
|
| 184 |
-
base_prompt += (
|
| 185 |
-
"\n\n=== YOUR FAMILY & CULTURAL KNOWLEDGE ===\n"
|
| 186 |
-
"Use this knowledge to realistically portray the family member's perspective. "
|
| 187 |
-
"Express medical concepts as a layperson would understand them — through worry, "
|
| 188 |
-
"cultural beliefs, and family experience, NOT medical terms.\n\n"
|
| 189 |
-
f"{self.specialized_knowledge}"
|
| 190 |
-
)
|
| 191 |
-
|
| 192 |
-
# Extra guardrail: reinforce no-technical-language rule
|
| 193 |
-
base_prompt += (
|
| 194 |
-
"\n\nCRITICAL REMINDER — LANGUAGE RULES:\n"
|
| 195 |
-
"- NEVER use medical terminology like: tachycardia, hypotension, differential diagnosis, "
|
| 196 |
-
"hemoglobin, platelet, bilateral, edema, auscultation, pathophysiology, etc.\n"
|
| 197 |
-
"- Instead say things like: dil tez dhadak raha hai, BP gir gaya, khoon ki kami, "
|
| 198 |
-
"pairo mein sujan, etc.\n"
|
| 199 |
-
"- You are an ordinary Indian family member, NOT a medical professional.\n"
|
| 200 |
-
"- If you reference something medical, describe it as a worried family member would: "
|
| 201 |
-
"\"doctor ne kuch test bola tha\", \"BP bahut low ho gaya\", \"sugar badh gayi hai\"\n"
|
| 202 |
-
"- NEVER sound like you read a medical textbook. Sound like a real person in distress."
|
| 203 |
-
)
|
| 204 |
-
|
| 205 |
-
return base_prompt
|
| 206 |
-
|
| 207 |
-
def get_fallback_response(self, message: str, case_context: dict) -> str:
|
| 208 |
-
msg = message.lower()
|
| 209 |
-
cc = self.family_info.get("chief_complaint", "problem").lower()
|
| 210 |
-
rel = self.relationship.lower()
|
| 211 |
-
|
| 212 |
-
# Emotional pressure / urgency
|
| 213 |
-
if any(w in msg for w in ["wait", "time", "kitna", "report", "result"]):
|
| 214 |
-
return (
|
| 215 |
-
"Doctor sahab, kitna aur wait karna padega? Hum subah se yahan baithe hain. "
|
| 216 |
-
"Inki haalat dekho na, bahut takleef mein hain. Please jaldi kuch karo!"
|
| 217 |
-
)
|
| 218 |
-
|
| 219 |
-
# History / background questions
|
| 220 |
-
if any(w in msg for w in ["history", "pehle", "before", "past", "earlier"]):
|
| 221 |
-
return (
|
| 222 |
-
f"Doctor, yeh {cc} pehle kabhi nahi hua tha itna. Thoda bahut hota tha, "
|
| 223 |
-
"hum sochte the apne aap theek ho jayega. Local doctor ke paas gaye the, "
|
| 224 |
-
"unhone injection diya aur goli di, 2-3 din theek raha phir wapas ho gaya."
|
| 225 |
-
)
|
| 226 |
-
|
| 227 |
-
# Family medical history
|
| 228 |
-
if any(w in msg for w in ["family", "gharwale", "parents", "mother", "father", "hereditary"]):
|
| 229 |
-
family_hist = self.family_info.get("family_history", "")
|
| 230 |
-
if family_hist and family_hist != "Not known":
|
| 231 |
-
return (
|
| 232 |
-
f"Haan doctor, ghar mein toh hai yeh sab. {family_hist}. "
|
| 233 |
-
"Humne socha nahi tha ki inko bhi ho jayega."
|
| 234 |
-
)
|
| 235 |
-
return (
|
| 236 |
-
"Doctor, ghar mein toh kisi ko kuch khaas nahi tha... "
|
| 237 |
-
"Inka papa ko thoda BP tha shayad, par woh bhi dawai lete the. "
|
| 238 |
-
"Aur kuch yaad nahi aa raha."
|
| 239 |
-
)
|
| 240 |
-
|
| 241 |
-
# Home remedies / what was tried
|
| 242 |
-
if any(w in msg for w in ["remedy", "treatment", "dawai", "medicine", "tried", "kya kiya"]):
|
| 243 |
-
return (
|
| 244 |
-
"Doctor, pehle humne haldi wala doodh diya, phir padosi ne bola ki Hajmola khilao. "
|
| 245 |
-
"Phir chemist se Crocin li, usse thoda aram mila par raat ko phir bura ho gaya. "
|
| 246 |
-
"Tab humne bade hospital aane ka socha."
|
| 247 |
-
)
|
| 248 |
-
|
| 249 |
-
# Cost concerns
|
| 250 |
-
if any(w in msg for w in ["cost", "kharcha", "paisa", "money", "expensive", "pay"]):
|
| 251 |
-
return (
|
| 252 |
-
"Doctor sahab, hum already private mein bahut kharcha kar chuke hain. "
|
| 253 |
-
"Isliye yahan aaye hain. Zyada mehnga test mat karwao please, "
|
| 254 |
-
"hum garib log hain... bas inko theek kar do."
|
| 255 |
-
)
|
| 256 |
-
|
| 257 |
-
# Diet / food questions
|
| 258 |
-
if any(w in msg for w in ["diet", "food", "khana", "kya khilaye", "eat"]):
|
| 259 |
-
return (
|
| 260 |
-
"Doctor, inko kya khila sakte hain? Doodh de sakte hain? "
|
| 261 |
-
"Chai toh peete hain roz, woh band karna padega kya? "
|
| 262 |
-
"Ghar ka khana laaye hain, thoda dal-chawal hai."
|
| 263 |
-
)
|
| 264 |
-
|
| 265 |
-
# Alcohol / smoking / sensitive topics
|
| 266 |
-
if any(w in msg for w in ["smoke", "drink", "sharab", "alcohol", "cigarette", "tobacco", "gutka"]):
|
| 267 |
-
return (
|
| 268 |
-
"Nahi nahi doctor, yeh sab kuch nahi karte... "
|
| 269 |
-
"matlab... kabhi kabhi friends ke saath thodi beer pi lete hain, "
|
| 270 |
-
"par woh toh sab peete hain na. Zyada nahi peete yeh."
|
| 271 |
-
)
|
| 272 |
-
|
| 273 |
-
# Work / livelihood concerns
|
| 274 |
-
if any(w in msg for w in ["work", "kaam", "job", "naukri", "chutti"]):
|
| 275 |
-
return (
|
| 276 |
-
"Doctor, yeh akele kamane wale hain ghar mein. "
|
| 277 |
-
"5 din se kaam pe nahi gaye, boss bol raha hai ki chutti nahi milegi aur. "
|
| 278 |
-
"Jaldi theek karo please, nahi toh naukri chali jayegi."
|
| 279 |
-
)
|
| 280 |
-
|
| 281 |
-
# Questioning treatment / investigations
|
| 282 |
-
if any(w in msg for w in ["injection", "test", "investigation", "blood", "scan"]):
|
| 283 |
-
return (
|
| 284 |
-
"Doctor, itne sare test zaruri hain kya? Injection lagaoge? "
|
| 285 |
-
"Yeh darte hain injection se. Private wale doctor ne bhi 5-6 test karwaye the, "
|
| 286 |
-
"kuch nahi nikla. Please batao kya zaruri hai."
|
| 287 |
-
)
|
| 288 |
-
|
| 289 |
-
# Blaming the patient
|
| 290 |
-
if any(w in msg for w in ["why", "kyun", "reason", "cause", "wajah"]):
|
| 291 |
-
return (
|
| 292 |
-
f"Doctor, maine bahut baar bola tha ki apna khayal rakho. "
|
| 293 |
-
"Khana time pe nahi khate, raat ko der tak jagte hain, "
|
| 294 |
-
"stress lete hain. Par sunte hi nahi hain humari!"
|
| 295 |
-
)
|
| 296 |
-
|
| 297 |
-
# Default — general worried family response
|
| 298 |
-
return (
|
| 299 |
-
f"Doctor sahab, please batao inko kya hua hai? "
|
| 300 |
-
f"Yeh {cc} bahut badh gaya hai. Padosi ne bola ki bade hospital chalo, "
|
| 301 |
-
"isliye aaye hain. Aap kuch karo na please, hum bahut pareshan hain!"
|
| 302 |
-
)
|
| 303 |
-
|
| 304 |
-
def get_initial_context(self) -> dict:
|
| 305 |
-
"""Generate the family member's first statement when they arrive."""
|
| 306 |
-
cc_original = self.family_info.get("chief_complaint", "problem")
|
| 307 |
-
age = self.family_info.get("patient_age", 45)
|
| 308 |
-
gender = self.family_info.get("patient_gender", "Male")
|
| 309 |
-
location = self.family_info.get("location", "Delhi")
|
| 310 |
-
patient_ref = "yeh" if self.relationship in ("Mother", "Father", "Wife", "Husband") else "mere papa" if self.relationship == "Son" else "meri mummy" if self.relationship == "Daughter" else "yeh"
|
| 311 |
-
|
| 312 |
-
# Get family-friendly description of symptoms
|
| 313 |
-
family_description = get_family_friendly_description(cc_original, "kuch din")
|
| 314 |
-
|
| 315 |
-
greetings = {
|
| 316 |
-
"Mother": (
|
| 317 |
-
f"Doctor sahab, namaste! Mera bachcha bahut bimar hai, {family_description}. "
|
| 318 |
-
f"Local doctor ke paas bhi gaye the par kuch nahi hua. "
|
| 319 |
-
f"Please doctor, inka kuch karo! Hum bahut pareshan hain."
|
| 320 |
-
),
|
| 321 |
-
"Father": (
|
| 322 |
-
f"Namaste doctor sahab. Yeh mera {('beta' if gender == 'Male' else 'beti')} hai, "
|
| 323 |
-
f"{age} saal ka hai. {family_description}. "
|
| 324 |
-
f"Pehle private mein dikhaya, bahut kharcha hua, ab yahan laaye hain. Dekhiye please."
|
| 325 |
-
),
|
| 326 |
-
"Wife": (
|
| 327 |
-
f"Doctor sahab, namaste. Yeh mere husband hain, {family_description}. "
|
| 328 |
-
f"Kaam pe bhi nahi ja pa rahe. Maine bola doctor ke paas chalo, "
|
| 329 |
-
f"par sunte nahi. Aaj bahut zyada ho gaya toh laaye hain. Please dekh lo."
|
| 330 |
-
),
|
| 331 |
-
"Husband": (
|
| 332 |
-
f"Doctor sahab, meri wife ko {family_description}. "
|
| 333 |
-
f"Ghar pe kadha pila rahe the, thoda aram tha par ab bahut badh gaya hai. "
|
| 334 |
-
f"Please jaldi check karo, bacche ghar pe akele hain."
|
| 335 |
-
),
|
| 336 |
-
"Son": (
|
| 337 |
-
f"Namaste doctor. Yeh mere {('papa' if gender == 'Male' else 'mummy')} hain, "
|
| 338 |
-
f"age {age} hai. {family_description}. "
|
| 339 |
-
f"Pehle batate nahi the, aaj achanak tabiyat bigad gayi toh le aaye. "
|
| 340 |
-
f"Please achhe se check kar lo."
|
| 341 |
-
),
|
| 342 |
-
"Daughter": (
|
| 343 |
-
f"Doctor sahab, yeh meri {('mummy' if gender == 'Female' else 'papa')} hain. "
|
| 344 |
-
f"{family_description}. Dawai lene mein aanakaani karte hain, "
|
| 345 |
-
f"humne zabardasti hospital laaya hai. Please inka dhyan se treatment karo."
|
| 346 |
-
),
|
| 347 |
-
}
|
| 348 |
-
|
| 349 |
-
content = greetings.get(
|
| 350 |
-
self.relationship,
|
| 351 |
-
(
|
| 352 |
-
f"Doctor sahab, namaste. {patient_ref} ko {cc_original} hai. "
|
| 353 |
-
f"Bahut pareshan hain hum, please jaldi dekh lo."
|
| 354 |
-
),
|
| 355 |
-
)
|
| 356 |
-
|
| 357 |
-
return {
|
| 358 |
-
"agent_type": self.agent_type,
|
| 359 |
-
"display_name": self.display_name,
|
| 360 |
-
"content": content,
|
| 361 |
-
"relationship": self.relationship,
|
| 362 |
-
"emotional_state": self.emotional_state,
|
| 363 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/core/agents/knowledge_builder.py
DELETED
|
@@ -1,728 +0,0 @@
|
|
| 1 |
-
"""Dynamic knowledge builder — uses RAG + Claude to create role-specific expertise per case.
|
| 2 |
-
|
| 3 |
-
ACCURACY PRINCIPLES:
|
| 4 |
-
1. ONLY state facts that are grounded in the RAG corpus or well-established medical knowledge
|
| 5 |
-
2. Every clinical claim must be traceable to a source (corpus case, named guideline, or textbook)
|
| 6 |
-
3. When uncertain, explicitly say so — "This needs verification" is better than a confident wrong answer
|
| 7 |
-
4. Indian hospital context must reflect REAL govt hospital workflows, not idealized textbook scenarios
|
| 8 |
-
5. Agents must never invent guidelines, statistics, or protocols
|
| 9 |
-
|
| 10 |
-
Source hierarchy (most to least trusted):
|
| 11 |
-
- Named Indian guidelines: ICMR, API, CSI, INASL, ISCCM, NVBDCP/NCVBDC, NACO, IAP, FOGSI
|
| 12 |
-
- Indian medical journals: JAPI, IJMR, Indian Heart Journal, Indian J Gastroenterology
|
| 13 |
-
- RAG corpus cases (with source attribution)
|
| 14 |
-
- Standard medical textbooks: Harrison's, Robbins, Bailey & Love, OP Ghai, DC Dutta
|
| 15 |
-
- Well-established clinical consensus (must be labelled as such)
|
| 16 |
-
"""
|
| 17 |
-
|
| 18 |
-
import logging
|
| 19 |
-
import os
|
| 20 |
-
from typing import Optional
|
| 21 |
-
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 22 |
-
import time
|
| 23 |
-
|
| 24 |
-
import anthropic
|
| 25 |
-
|
| 26 |
-
from app.core.rag.vector_store import MedicalVectorStore
|
| 27 |
-
from app.core.rag.retriever import MedicalRetriever
|
| 28 |
-
|
| 29 |
-
logger = logging.getLogger(__name__)
|
| 30 |
-
|
| 31 |
-
# ---- Grounding rules injected into every synthesis prompt ----
|
| 32 |
-
|
| 33 |
-
SOURCE_GROUNDING_RULES = """
|
| 34 |
-
STRICT ACCURACY RULES — VIOLATIONS ARE UNACCEPTABLE:
|
| 35 |
-
|
| 36 |
-
1. ONLY USE INFORMATION FROM:
|
| 37 |
-
a) The reference material provided below (RAG corpus)
|
| 38 |
-
b) Well-established medical facts from standard Indian textbooks (Harrison's, Robbins, Park's PSM, OP Ghai)
|
| 39 |
-
c) Named Indian guidelines you are CERTAIN exist: ICMR, API, CSI, INASL, ISCCM, NVBDCP, NACO, IAP, FOGSI, NHM
|
| 40 |
-
d) Named Indian journals: JAPI, IJMR, Indian Heart Journal
|
| 41 |
-
|
| 42 |
-
2. NEVER:
|
| 43 |
-
- Invent a guideline or protocol that doesn't exist
|
| 44 |
-
- Cite a specific statistic unless it's in the reference material or you are >95% confident
|
| 45 |
-
- State "ICMR recommends X" unless you are certain ICMR actually recommends X
|
| 46 |
-
- Fabricate drug dosages — if unsure, say "dose per institutional protocol"
|
| 47 |
-
- Assume resource availability — Indian govt hospitals often lack CT, MRI, certain drugs
|
| 48 |
-
|
| 49 |
-
3. WHEN UNCERTAIN, SAY SO:
|
| 50 |
-
- "Per standard teaching hospital protocol..." (when exact guideline unclear)
|
| 51 |
-
- "Commonly practiced in Indian hospitals..." (when no specific guideline)
|
| 52 |
-
- "Verify current dosing with hospital formulary" (when dose uncertain)
|
| 53 |
-
- "Exact prevalence data varies by region" (when stats uncertain)
|
| 54 |
-
|
| 55 |
-
4. SOURCE ATTRIBUTION:
|
| 56 |
-
- Tag each clinical fact with its source: [Corpus], [Harrison's], [API Guidelines], [Clinical consensus]
|
| 57 |
-
- If a fact comes from the reference material below, tag it [Corpus: CASE-ID] where possible
|
| 58 |
-
- If it's standard textbook knowledge, tag it [Textbook]
|
| 59 |
-
- If it's from a named guideline, tag it [Guideline: NAME]
|
| 60 |
-
|
| 61 |
-
5. INDIAN HOSPITAL REALITY (not textbook fantasy):
|
| 62 |
-
- Govt hospital: 1 doctor per 50+ patients, overworked nurses, limited beds
|
| 63 |
-
- Often no CT/MRI — rely on clinical skills + basic labs + X-ray + ultrasound
|
| 64 |
-
- Drug availability: generic drugs, NLEM (National List of Essential Medicines)
|
| 65 |
-
- Referral system: PHC → CHC → District Hospital → Medical College Hospital
|
| 66 |
-
- Common constraints: power cuts, blood bank shortages, delayed lab reports
|
| 67 |
-
- Patient reality: delayed presentation (came after trying home remedies/local doctor/ayurvedic)
|
| 68 |
-
"""
|
| 69 |
-
|
| 70 |
-
# ---- Role-specific synthesis prompts ----
|
| 71 |
-
|
| 72 |
-
PATIENT_KNOWLEDGE_PROMPT = """You are building a PATIENT EXPERIENCE PROFILE for a clinical simulation agent.
|
| 73 |
-
|
| 74 |
-
{grounding_rules}
|
| 75 |
-
|
| 76 |
-
REFERENCE MATERIAL FROM MEDICAL CORPUS:
|
| 77 |
-
{rag_context}
|
| 78 |
-
|
| 79 |
-
CURRENT CASE:
|
| 80 |
-
- Diagnosis: {diagnosis}
|
| 81 |
-
- Specialty: {specialty}
|
| 82 |
-
- Chief complaint: {chief_complaint}
|
| 83 |
-
- Presentation: {presentation}
|
| 84 |
-
|
| 85 |
-
Create a patient experience profile grounded ONLY in the reference material and established clinical knowledge.
|
| 86 |
-
Write in second person ("you feel...") as instructions for the patient agent:
|
| 87 |
-
|
| 88 |
-
1. SYMPTOM EXPERIENCE [Source-tagged]:
|
| 89 |
-
How this condition actually feels. Use Hinglish — "pet mein jalan", "saans phoolna", "haath pair sunn hona".
|
| 90 |
-
Be specific to THIS diagnosis from the reference material. Tag sources.
|
| 91 |
-
|
| 92 |
-
2. SYMPTOM TIMELINE [Source-tagged]:
|
| 93 |
-
Realistic timeline based on the case presentation. Indian patients typically delay — "pehle socha gas hai",
|
| 94 |
-
"local doctor ne antacid di par kaam nahi kiya", "2-3 din wait kiya phir aaya".
|
| 95 |
-
|
| 96 |
-
3. PATIENT BACKGROUND [Based on Indian demographics]:
|
| 97 |
-
Realistic for the patient's age/gender/location from the case data.
|
| 98 |
-
- Diet: based on region (North Indian = roti/ghee/chai, South = rice/sambar, etc.)
|
| 99 |
-
- Habits: common risk factors for this condition in Indian population
|
| 100 |
-
- Home remedies tried: "haldi wala doodh piya", "Hajmola khaya", "jhadu-phoonk karwaya"
|
| 101 |
-
Only include what's relevant to THIS condition.
|
| 102 |
-
|
| 103 |
-
4. WHAT PATIENT KNOWS vs DOESN'T:
|
| 104 |
-
- Knows: symptoms they feel, what local doctor said, what family members suggested
|
| 105 |
-
- Doesn't know: medical terms, lab values, their actual diagnosis
|
| 106 |
-
- Misconceptions: common ones for this condition in India (e.g., "heart attack = gas")
|
| 107 |
-
|
| 108 |
-
5. EMOTIONAL STATE:
|
| 109 |
-
Based on case severity and Indian cultural context.
|
| 110 |
-
- Male patients may minimize symptoms ("kuch nahi hoga")
|
| 111 |
-
- Female patients may worry about family impact ("bacche kaun dekhega")
|
| 112 |
-
- Elderly may be fatalistic ("upar wale ki marzi")
|
| 113 |
-
|
| 114 |
-
6. RESPONSES TO HISTORY QUESTIONS [Source-tagged]:
|
| 115 |
-
Grounded in the case presentation data. Only answer what this patient would realistically know.
|
| 116 |
-
If the student asks about something not in the case data, the patient should say "pata nahi doctor"."""
|
| 117 |
-
|
| 118 |
-
NURSE_KNOWLEDGE_PROMPT = """You are building a NURSING PROTOCOL BRIEF for a clinical simulation agent in an Indian government hospital.
|
| 119 |
-
|
| 120 |
-
{grounding_rules}
|
| 121 |
-
|
| 122 |
-
REFERENCE MATERIAL FROM MEDICAL CORPUS:
|
| 123 |
-
{rag_context}
|
| 124 |
-
|
| 125 |
-
CURRENT CASE:
|
| 126 |
-
- Diagnosis: {diagnosis}
|
| 127 |
-
- Specialty: {specialty}
|
| 128 |
-
- Vital signs: BP {bp}, HR {hr}, RR {rr}, Temp {temp}°C, SpO2 {spo2}%
|
| 129 |
-
- Chief complaint: {chief_complaint}
|
| 130 |
-
|
| 131 |
-
Create a nursing protocol brief. EVERY clinical action must be grounded in established protocol.
|
| 132 |
-
|
| 133 |
-
1. TRIAGE ASSESSMENT [Source-tagged]:
|
| 134 |
-
- Category based on vitals (use standard triage: RED/YELLOW/GREEN)
|
| 135 |
-
- RED FLAGS specific to this condition from reference material
|
| 136 |
-
- What to communicate to the casualty medical officer (CMO)
|
| 137 |
-
|
| 138 |
-
2. MONITORING PRIORITIES [Source-tagged]:
|
| 139 |
-
- What parameters, how often (e.g., "vitals q15min if unstable, q1h if stable")
|
| 140 |
-
- Specific to THIS condition — what deterioration looks like
|
| 141 |
-
- What to escalate immediately vs. document for rounds
|
| 142 |
-
|
| 143 |
-
3. IMMEDIATE NURSING ACTIONS [Practical Indian hospital]:
|
| 144 |
-
- What's realistically available: pulse oximeter, BP cuff, thermometer, glucometer
|
| 145 |
-
- IV access: what gauge, what fluid (NS/RL — what's available)
|
| 146 |
-
- Positioning: specific to condition (e.g., propped up for cardiac, left lateral for liver abscess drainage)
|
| 147 |
-
- What to prepare from the ward stock vs. what needs pharmacy indent
|
| 148 |
-
|
| 149 |
-
4. INVESTIGATION PREPARATION [Practical]:
|
| 150 |
-
- Standard labs available in govt hospital: CBC, RFT, LFT, blood sugar, urine routine
|
| 151 |
-
- What needs special request: troponin, d-dimer, ABG, blood culture
|
| 152 |
-
- Imaging: X-ray (available), ultrasound (may need radiology call), CT/MRI (referral)
|
| 153 |
-
- Sample collection: which tubes, timing, special handling (e.g., ABG on ice)
|
| 154 |
-
|
| 155 |
-
5. MEDICATION AWARENESS [NLEM-grounded]:
|
| 156 |
-
- Only drugs commonly available in Indian govt hospitals (NLEM preferred)
|
| 157 |
-
- Doses only if from reference material — otherwise "as per order"
|
| 158 |
-
- Route, preparation, rate if IV
|
| 159 |
-
- Contraindications the nurse must verify (allergies, pregnancy, renal function)
|
| 160 |
-
|
| 161 |
-
6. WARD WORKFLOW [Real Indian hospital]:
|
| 162 |
-
- Duty handover communication (SBAR format)
|
| 163 |
-
- Documentation: what to note in case sheet
|
| 164 |
-
- When to call the senior resident vs. attend to yourself
|
| 165 |
-
- Night duty considerations: limited staff, skeleton lab services
|
| 166 |
-
|
| 167 |
-
7. EMERGENCY PREPARATION [Condition-specific]:
|
| 168 |
-
- Crash cart check: what drugs/equipment for THIS condition
|
| 169 |
-
- Nearest higher center for referral if needed
|
| 170 |
-
- Blood bank: crossmatch if bleeding risk"""
|
| 171 |
-
|
| 172 |
-
SENIOR_KNOWLEDGE_PROMPT = """You are building a TEACHING & DIAGNOSTIC EXPERTISE BRIEF for a senior consultant agent in an Indian medical college hospital.
|
| 173 |
-
|
| 174 |
-
{grounding_rules}
|
| 175 |
-
|
| 176 |
-
REFERENCE MATERIAL FROM MEDICAL CORPUS:
|
| 177 |
-
{rag_context}
|
| 178 |
-
|
| 179 |
-
CURRENT CASE:
|
| 180 |
-
- Diagnosis: {diagnosis}
|
| 181 |
-
- Specialty: {specialty}
|
| 182 |
-
- Difficulty: {difficulty}
|
| 183 |
-
- Chief complaint: {chief_complaint}
|
| 184 |
-
- Key differentials: {differentials}
|
| 185 |
-
- Learning points: {learning_points}
|
| 186 |
-
|
| 187 |
-
Create a teaching expertise brief. Every fact must be source-tagged.
|
| 188 |
-
|
| 189 |
-
1. DIAGNOSTIC ALGORITHM [Source-tagged]:
|
| 190 |
-
Step-by-step reasoning path from the reference material:
|
| 191 |
-
- Presenting complaint → What to consider first (life-threatening causes)
|
| 192 |
-
- History clues → Which differential they support/refute
|
| 193 |
-
- Examination findings → Pathognomonic signs if any
|
| 194 |
-
- Investigation interpretation → Confirmatory test
|
| 195 |
-
- "GOLDEN FINDING": The single most specific clue [Source: which reference case/guideline]
|
| 196 |
-
|
| 197 |
-
2. DIFFERENTIAL DIAGNOSIS MATRIX [Source-tagged]:
|
| 198 |
-
From the reference material, for each major differential:
|
| 199 |
-
| Differential | Supporting findings | Against | Key distinguishing test |
|
| 200 |
-
ONLY include differentials mentioned in the reference material or the case data.
|
| 201 |
-
|
| 202 |
-
3. INDIAN EPIDEMIOLOGY [Source-tagged, verified only]:
|
| 203 |
-
- ONLY cite statistics that are in the reference material
|
| 204 |
-
- If not in reference material, use hedged language: "India has a significant burden of..."
|
| 205 |
-
- Regional patterns if mentioned in corpus
|
| 206 |
-
- Do NOT invent prevalence numbers
|
| 207 |
-
|
| 208 |
-
4. INDIAN GUIDELINES [Only if actually exist]:
|
| 209 |
-
- Name the specific guideline document if it exists
|
| 210 |
-
- If unsure whether a guideline exists, say "Per standard teaching hospital practice..."
|
| 211 |
-
- Reference: API, CSI, ISCCM, INASL, ICMR — ONLY if you are certain they have guidelines for THIS condition
|
| 212 |
-
- For common conditions: reference standard textbook approach (Harrison's, Sabiston, etc.)
|
| 213 |
-
|
| 214 |
-
5. NEET-PG/EXAM RELEVANCE [Verified]:
|
| 215 |
-
- Classic "one-liner" descriptions (well-established ones only)
|
| 216 |
-
- Question patterns: "A patient presents with X, Y, Z — diagnosis?"
|
| 217 |
-
- High-yield facts from the reference material's learning points
|
| 218 |
-
- ONLY include exam facts you are certain are correct
|
| 219 |
-
|
| 220 |
-
6. SOCRATIC TEACHING PLAN [Based on case data]:
|
| 221 |
-
- 3 progressive questions grounded in THIS case's findings
|
| 222 |
-
- Hints that point to specific findings in the case data (not generic)
|
| 223 |
-
- Redirect strategies if student picks wrong differential (using case evidence)
|
| 224 |
-
- Post-diagnosis teaching: pathophysiology connecting ALL findings
|
| 225 |
-
|
| 226 |
-
7. MANAGEMENT [Source-tagged, Indian context]:
|
| 227 |
-
- First-line: only drugs available in NLEM or commonly stocked
|
| 228 |
-
- If advanced treatment needed (e.g., PCI for STEMI): note referral requirements
|
| 229 |
-
- Monitoring plan: what's realistic in a ward with 1 nurse per 20 patients
|
| 230 |
-
- Disposition: admission criteria, discharge criteria, follow-up plan
|
| 231 |
-
- Cost-consciousness: generic drugs, government scheme eligibility (PMJAY, etc.)"""
|
| 232 |
-
|
| 233 |
-
FAMILY_KNOWLEDGE_PROMPT = """You are building a FAMILY MEMBER PERSPECTIVE BRIEF for a clinical simulation agent in an Indian hospital.
|
| 234 |
-
|
| 235 |
-
{grounding_rules}
|
| 236 |
-
|
| 237 |
-
REFERENCE MATERIAL FROM MEDICAL CORPUS:
|
| 238 |
-
{rag_context}
|
| 239 |
-
|
| 240 |
-
CURRENT CASE:
|
| 241 |
-
- Diagnosis: {diagnosis}
|
| 242 |
-
- Chief complaint: {chief_complaint}
|
| 243 |
-
- Patient age/gender: From case data
|
| 244 |
-
- Location: Indian government hospital
|
| 245 |
-
|
| 246 |
-
Create a family member perspective brief. Focus on emotional and cultural context.
|
| 247 |
-
|
| 248 |
-
1. FAMILY UNDERSTANDING [Lay perspective]:
|
| 249 |
-
- What the family knows about the patient's condition (in lay terms)
|
| 250 |
-
- Misconceptions based on WhatsApp forwards, neighbors' advice
|
| 251 |
-
- Past experiences with similar symptoms in family/community
|
| 252 |
-
|
| 253 |
-
2. EMOTIONAL STATE:
|
| 254 |
-
- Anxiety level based on case severity
|
| 255 |
-
- Financial worries about treatment costs
|
| 256 |
-
- Work/livelihood concerns
|
| 257 |
-
- Family dynamics (who's the decision maker, gender roles)
|
| 258 |
-
|
| 259 |
-
3. CULTURAL CONTEXT:
|
| 260 |
-
- Home remedies already tried
|
| 261 |
-
- Religious/spiritual beliefs affecting treatment
|
| 262 |
-
- Dietary habits and restrictions
|
| 263 |
-
- Regional customs relevant to healthcare
|
| 264 |
-
|
| 265 |
-
4. QUESTIONS & CONCERNS:
|
| 266 |
-
- Cost of treatment ("kitna kharcha aayega?")
|
| 267 |
-
- Duration of hospital stay
|
| 268 |
-
- Who will take care of home/children
|
| 269 |
-
- Whether private hospital would be better"""
|
| 270 |
-
|
| 271 |
-
LAB_TECH_KNOWLEDGE_PROMPT = """You are building a LABORATORY OPERATIONS BRIEF for a lab technician agent in an Indian government hospital.
|
| 272 |
-
|
| 273 |
-
{grounding_rules}
|
| 274 |
-
|
| 275 |
-
REFERENCE MATERIAL FROM MEDICAL CORPUS:
|
| 276 |
-
{rag_context}
|
| 277 |
-
|
| 278 |
-
CURRENT CASE:
|
| 279 |
-
- Diagnosis: {diagnosis}
|
| 280 |
-
- Specialty: {specialty}
|
| 281 |
-
- Likely investigations needed
|
| 282 |
-
|
| 283 |
-
Create a lab operations brief for realistic Indian hospital lab workflow.
|
| 284 |
-
|
| 285 |
-
1. INVESTIGATION PRIORITIES [Based on diagnosis]:
|
| 286 |
-
- Which tests are most critical for this condition
|
| 287 |
-
- Sample requirements and special handling
|
| 288 |
-
- Turnaround times (realistic for govt hospital)
|
| 289 |
-
|
| 290 |
-
2. SAMPLE COLLECTION:
|
| 291 |
-
- Proper tubes/containers for each test
|
| 292 |
-
- Pre-analytical requirements (fasting, timing)
|
| 293 |
-
- Common collection errors to avoid
|
| 294 |
-
|
| 295 |
-
3. LAB CONSTRAINTS [Indian govt hospital reality]:
|
| 296 |
-
- Tests available in-house vs outsourced
|
| 297 |
-
- Weekend/night availability limitations
|
| 298 |
-
- Machine downtime issues
|
| 299 |
-
- Reagent stock situations
|
| 300 |
-
|
| 301 |
-
4. RESULT INTERPRETATION HINTS:
|
| 302 |
-
- Critical values to flag immediately
|
| 303 |
-
- Common interferences or artifacts
|
| 304 |
-
- When to suggest repeat sampling"""
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
# Verified Indian medical source patterns for confidence classification
|
| 308 |
-
VERIFIED_SOURCES = {
|
| 309 |
-
"high_confidence": [
|
| 310 |
-
"ICMR", "NVBDCP", "NCVBDC", "NACO", "INASL", "ISCCM", "CSI", "IAP",
|
| 311 |
-
"FOGSI", "NHM", "API Guidelines", "National Snakebite Protocol",
|
| 312 |
-
"RNTCP", "NTEP", "NLEM",
|
| 313 |
-
],
|
| 314 |
-
"medium_confidence": [
|
| 315 |
-
"JAPI", "IJMR", "Indian Heart Journal", "Indian Journal",
|
| 316 |
-
"Indian J Gastroenterology", "ISG", "NDMA",
|
| 317 |
-
],
|
| 318 |
-
"textbook_grade": [
|
| 319 |
-
"Harrison", "Robbins", "Park", "OP Ghai", "DC Dutta",
|
| 320 |
-
"Bailey", "Sabiston", "Nelson", "Schwartz",
|
| 321 |
-
],
|
| 322 |
-
}
|
| 323 |
-
|
| 324 |
-
|
| 325 |
-
def classify_source_confidence(source_str: str) -> str:
|
| 326 |
-
"""Classify a source string into a confidence tier.
|
| 327 |
-
|
| 328 |
-
Returns: 'verified_guideline', 'indian_journal', 'textbook', 'corpus_case', or 'unverified'
|
| 329 |
-
"""
|
| 330 |
-
if not source_str:
|
| 331 |
-
return "unverified"
|
| 332 |
-
src = source_str.upper()
|
| 333 |
-
for keyword in VERIFIED_SOURCES["high_confidence"]:
|
| 334 |
-
if keyword.upper() in src:
|
| 335 |
-
return "verified_guideline"
|
| 336 |
-
for keyword in VERIFIED_SOURCES["medium_confidence"]:
|
| 337 |
-
if keyword.upper() in src:
|
| 338 |
-
return "indian_journal"
|
| 339 |
-
for keyword in VERIFIED_SOURCES["textbook_grade"]:
|
| 340 |
-
if keyword.upper() in src:
|
| 341 |
-
return "textbook"
|
| 342 |
-
if "case" in source_str.lower() or "series" in source_str.lower():
|
| 343 |
-
return "corpus_case"
|
| 344 |
-
return "unverified"
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
class DynamicKnowledgeBuilder:
|
| 348 |
-
"""Builds role-specific medical expertise dynamically using RAG + Claude synthesis.
|
| 349 |
-
|
| 350 |
-
Key principle: ACCURACY OVER COMPREHENSIVENESS.
|
| 351 |
-
Better to say less that's correct than more that's wrong.
|
| 352 |
-
Every fact must trace to: corpus, named guideline, or established textbook.
|
| 353 |
-
"""
|
| 354 |
-
|
| 355 |
-
def __init__(self):
|
| 356 |
-
self.vector_store = MedicalVectorStore()
|
| 357 |
-
self.retriever = MedicalRetriever(self.vector_store)
|
| 358 |
-
|
| 359 |
-
self.api_key = os.environ.get("ANTHROPIC_API_KEY")
|
| 360 |
-
self.client: Optional[anthropic.Anthropic] = None
|
| 361 |
-
if self.api_key and self.api_key != "sk-ant-your-key-here":
|
| 362 |
-
try:
|
| 363 |
-
self.client = anthropic.Anthropic(api_key=self.api_key)
|
| 364 |
-
except Exception as e:
|
| 365 |
-
logger.warning(f"KnowledgeBuilder Claude init failed: {e}")
|
| 366 |
-
|
| 367 |
-
# Cache synthesized knowledge: {(case_id, role): knowledge_str}
|
| 368 |
-
self._cache: dict[tuple[str, str], str] = {}
|
| 369 |
-
|
| 370 |
-
def build_knowledge(self, case_data: dict, role: str) -> str:
|
| 371 |
-
"""Build role-specific knowledge for a case.
|
| 372 |
-
|
| 373 |
-
Args:
|
| 374 |
-
case_data: The full case dict (with id, diagnosis, specialty, etc.)
|
| 375 |
-
role: One of 'patient', 'nurse', 'senior_doctor', 'family', 'lab_tech'
|
| 376 |
-
|
| 377 |
-
Returns:
|
| 378 |
-
Synthesized knowledge string to inject into the agent's system prompt.
|
| 379 |
-
"""
|
| 380 |
-
case_id = case_data.get("id", "unknown")
|
| 381 |
-
cache_key = (case_id, role)
|
| 382 |
-
|
| 383 |
-
# Return cached if available
|
| 384 |
-
if cache_key in self._cache:
|
| 385 |
-
logger.info(f"Knowledge cache hit: {role} for case {case_id}")
|
| 386 |
-
return self._cache[cache_key]
|
| 387 |
-
|
| 388 |
-
# Step 1: Gather RAG context with source metadata
|
| 389 |
-
rag_context, sources = self._gather_rag_context(case_data, role)
|
| 390 |
-
|
| 391 |
-
# Step 2: Synthesize with Claude (strict grounding)
|
| 392 |
-
knowledge = self._synthesize_knowledge(case_data, role, rag_context)
|
| 393 |
-
|
| 394 |
-
# Step 3: Append source manifest with confidence levels
|
| 395 |
-
if knowledge and sources:
|
| 396 |
-
source_manifest = "\n\n=== SOURCES USED (with confidence) ===\n"
|
| 397 |
-
for src in sources:
|
| 398 |
-
conf = src.get("confidence", "unverified").upper()
|
| 399 |
-
source_manifest += (
|
| 400 |
-
f"- [{src['case_id']}] {src['title']} "
|
| 401 |
-
f"(Source: {src['source']}) "
|
| 402 |
-
f"[{src['chunk_type']}] "
|
| 403 |
-
f"[Confidence: {conf}]\n"
|
| 404 |
-
)
|
| 405 |
-
source_manifest += (
|
| 406 |
-
"\nNOTE: Prioritize HIGH CONFIDENCE sources. "
|
| 407 |
-
"For LOW/UNVERIFIED sources, hedge your language.\n"
|
| 408 |
-
)
|
| 409 |
-
knowledge += source_manifest
|
| 410 |
-
|
| 411 |
-
# Step 4: Cache and return
|
| 412 |
-
if knowledge:
|
| 413 |
-
self._cache[cache_key] = knowledge
|
| 414 |
-
logger.info(f"Built {role} knowledge for case {case_id} ({len(knowledge)} chars, {len(sources)} sources)")
|
| 415 |
-
|
| 416 |
-
return knowledge or ""
|
| 417 |
-
|
| 418 |
-
def build_all_agent_knowledge(self, case_data: dict) -> dict[str, str]:
|
| 419 |
-
"""Build knowledge for ALL 5 agents in PARALLEL using ThreadPoolExecutor.
|
| 420 |
-
|
| 421 |
-
This is 5x faster than sequential building since all Claude API calls run concurrently.
|
| 422 |
-
|
| 423 |
-
Args:
|
| 424 |
-
case_data: The full case dict
|
| 425 |
-
|
| 426 |
-
Returns:
|
| 427 |
-
Dict mapping role -> knowledge string for all 5 agents
|
| 428 |
-
"""
|
| 429 |
-
start_time = time.time()
|
| 430 |
-
case_id = case_data.get("id", "unknown")
|
| 431 |
-
roles = ["patient", "nurse", "senior_doctor", "family", "lab_tech"]
|
| 432 |
-
|
| 433 |
-
# Check cache first
|
| 434 |
-
all_cached = True
|
| 435 |
-
cached_knowledge = {}
|
| 436 |
-
for role in roles:
|
| 437 |
-
cache_key = (case_id, role)
|
| 438 |
-
if cache_key in self._cache:
|
| 439 |
-
cached_knowledge[role] = self._cache[cache_key]
|
| 440 |
-
else:
|
| 441 |
-
all_cached = False
|
| 442 |
-
break
|
| 443 |
-
|
| 444 |
-
if all_cached:
|
| 445 |
-
logger.info(f"All agent knowledge cached for case {case_id}")
|
| 446 |
-
return cached_knowledge
|
| 447 |
-
|
| 448 |
-
logger.info(f"Building knowledge for all 5 agents in parallel for case {case_id}")
|
| 449 |
-
|
| 450 |
-
knowledge_results = {}
|
| 451 |
-
|
| 452 |
-
# Use ThreadPoolExecutor to run all 5 knowledge builds in parallel
|
| 453 |
-
with ThreadPoolExecutor(max_workers=5) as executor:
|
| 454 |
-
# Submit all tasks
|
| 455 |
-
future_to_role = {
|
| 456 |
-
executor.submit(self.build_knowledge, case_data, role): role
|
| 457 |
-
for role in roles
|
| 458 |
-
}
|
| 459 |
-
|
| 460 |
-
# Collect results as they complete
|
| 461 |
-
for future in as_completed(future_to_role):
|
| 462 |
-
role = future_to_role[future]
|
| 463 |
-
try:
|
| 464 |
-
knowledge = future.result(timeout=15) # 15 second timeout per agent (Opus adaptive is fast)
|
| 465 |
-
knowledge_results[role] = knowledge
|
| 466 |
-
logger.info(f"Completed knowledge for {role} ({len(knowledge)} chars)")
|
| 467 |
-
except Exception as e:
|
| 468 |
-
logger.error(f"Failed to build knowledge for {role}: {e}")
|
| 469 |
-
# Use fallback for this role
|
| 470 |
-
knowledge_results[role] = self._fallback_knowledge(case_data, role, "")
|
| 471 |
-
|
| 472 |
-
elapsed = time.time() - start_time
|
| 473 |
-
logger.info(f"Built knowledge for all 5 agents in {elapsed:.2f}s (parallel execution)")
|
| 474 |
-
|
| 475 |
-
return knowledge_results
|
| 476 |
-
|
| 477 |
-
def _gather_rag_context(self, case_data: dict, role: str) -> tuple[str, list[dict]]:
|
| 478 |
-
"""Query RAG for role-appropriate medical knowledge.
|
| 479 |
-
|
| 480 |
-
Returns (context_text, source_list) where source_list tracks provenance.
|
| 481 |
-
"""
|
| 482 |
-
specialty = case_data.get("specialty", "")
|
| 483 |
-
diagnosis = case_data.get("diagnosis", "")
|
| 484 |
-
chief_complaint = case_data.get("chief_complaint", "")
|
| 485 |
-
|
| 486 |
-
context_parts = []
|
| 487 |
-
sources = []
|
| 488 |
-
|
| 489 |
-
def _add_results(results: list[dict], label: str):
|
| 490 |
-
if not results:
|
| 491 |
-
return
|
| 492 |
-
context_parts.append(f"=== {label} ===")
|
| 493 |
-
for r in results:
|
| 494 |
-
meta = r.get("metadata", {})
|
| 495 |
-
raw_source = meta.get("source", "corpus")
|
| 496 |
-
confidence = classify_source_confidence(raw_source)
|
| 497 |
-
confidence_label = {
|
| 498 |
-
"verified_guideline": "HIGH CONFIDENCE - Verified Indian Guideline",
|
| 499 |
-
"indian_journal": "MEDIUM CONFIDENCE - Indian Medical Journal",
|
| 500 |
-
"textbook": "HIGH CONFIDENCE - Standard Textbook",
|
| 501 |
-
"corpus_case": "MODERATE - Corpus Case",
|
| 502 |
-
"unverified": "LOW - Unverified Source",
|
| 503 |
-
}.get(confidence, "UNKNOWN")
|
| 504 |
-
source_tag = f"[Source: {raw_source} | Case: {meta.get('case_id', 'unknown')} | Confidence: {confidence_label}]"
|
| 505 |
-
context_parts.append(source_tag)
|
| 506 |
-
context_parts.append(r["content"])
|
| 507 |
-
context_parts.append("")
|
| 508 |
-
sources.append({
|
| 509 |
-
"case_id": meta.get("case_id", "unknown"),
|
| 510 |
-
"title": meta.get("title", "Untitled"),
|
| 511 |
-
"source": raw_source,
|
| 512 |
-
"confidence": confidence,
|
| 513 |
-
"specialty": meta.get("specialty", ""),
|
| 514 |
-
"chunk_type": meta.get("chunk_type", ""),
|
| 515 |
-
"relevance": r.get("relevance_score", 0),
|
| 516 |
-
})
|
| 517 |
-
|
| 518 |
-
if role == "patient":
|
| 519 |
-
results = self.vector_store.query(
|
| 520 |
-
query_text=f"Patient presenting with {chief_complaint}. Symptoms, history for {diagnosis}",
|
| 521 |
-
specialty=specialty,
|
| 522 |
-
n_results=5,
|
| 523 |
-
chunk_type="presentation",
|
| 524 |
-
)
|
| 525 |
-
_add_results(results, "SIMILAR PATIENT PRESENTATIONS FROM CORPUS")
|
| 526 |
-
|
| 527 |
-
full_results = self.vector_store.query(
|
| 528 |
-
query_text=f"Clinical case {specialty} {diagnosis}",
|
| 529 |
-
specialty=specialty,
|
| 530 |
-
n_results=3,
|
| 531 |
-
chunk_type="full_narrative",
|
| 532 |
-
)
|
| 533 |
-
_add_results(full_results, "REFERENCE CASES FROM CORPUS")
|
| 534 |
-
|
| 535 |
-
elif role == "nurse":
|
| 536 |
-
results = self.vector_store.query(
|
| 537 |
-
query_text=f"Clinical management {diagnosis}. Vitals monitoring nursing assessment {chief_complaint}",
|
| 538 |
-
specialty=specialty,
|
| 539 |
-
n_results=5,
|
| 540 |
-
chunk_type="full_narrative",
|
| 541 |
-
)
|
| 542 |
-
_add_results(results, "CLINICAL REFERENCE FOR NURSING PROTOCOLS")
|
| 543 |
-
|
| 544 |
-
learning_results = self.vector_store.query(
|
| 545 |
-
query_text=f"Diagnosis management learning points: {diagnosis}",
|
| 546 |
-
specialty=specialty,
|
| 547 |
-
n_results=3,
|
| 548 |
-
chunk_type="learning",
|
| 549 |
-
)
|
| 550 |
-
_add_results(learning_results, "DIAGNOSTIC & LEARNING MATERIAL")
|
| 551 |
-
|
| 552 |
-
elif role == "senior_doctor":
|
| 553 |
-
for chunk_type, label, n in [
|
| 554 |
-
("full_narrative", "COMPLETE CASE REFERENCES", 5),
|
| 555 |
-
("learning", "DIAGNOSTIC & TEACHING MATERIAL", 5),
|
| 556 |
-
("presentation", "CLINICAL PRESENTATIONS", 3),
|
| 557 |
-
]:
|
| 558 |
-
results = self.vector_store.query(
|
| 559 |
-
query_text=f"{diagnosis} differential diagnosis pathophysiology management {specialty}",
|
| 560 |
-
specialty=specialty,
|
| 561 |
-
n_results=n,
|
| 562 |
-
chunk_type=chunk_type,
|
| 563 |
-
)
|
| 564 |
-
_add_results(results, f"{label} FROM CORPUS")
|
| 565 |
-
|
| 566 |
-
elif role == "family":
|
| 567 |
-
# Family needs patient experience and cultural context
|
| 568 |
-
results = self.vector_store.query(
|
| 569 |
-
query_text=f"Patient family perspective {chief_complaint} {diagnosis}",
|
| 570 |
-
specialty=specialty,
|
| 571 |
-
n_results=3,
|
| 572 |
-
chunk_type="presentation",
|
| 573 |
-
)
|
| 574 |
-
_add_results(results, "SIMILAR PATIENT/FAMILY EXPERIENCES FROM CORPUS")
|
| 575 |
-
|
| 576 |
-
elif role == "lab_tech":
|
| 577 |
-
# Lab tech needs investigation and diagnostic info
|
| 578 |
-
results = self.vector_store.query(
|
| 579 |
-
query_text=f"Laboratory investigations diagnosis {diagnosis} {specialty}",
|
| 580 |
-
specialty=specialty,
|
| 581 |
-
n_results=5,
|
| 582 |
-
chunk_type="full_narrative",
|
| 583 |
-
)
|
| 584 |
-
_add_results(results, "INVESTIGATION REFERENCE FROM CORPUS")
|
| 585 |
-
|
| 586 |
-
context_text = "\n".join(context_parts) if context_parts else ""
|
| 587 |
-
return context_text, sources
|
| 588 |
-
|
| 589 |
-
def _synthesize_knowledge(
|
| 590 |
-
self, case_data: dict, role: str, rag_context: str
|
| 591 |
-
) -> Optional[str]:
|
| 592 |
-
"""Use Claude Opus with extended thinking to synthesize role-specific expertise.
|
| 593 |
-
|
| 594 |
-
The grounding rules ensure Claude only states verifiable facts.
|
| 595 |
-
"""
|
| 596 |
-
if not self.client:
|
| 597 |
-
logger.warning("No Claude client — returning structured RAG context")
|
| 598 |
-
return self._fallback_knowledge(case_data, role, rag_context)
|
| 599 |
-
|
| 600 |
-
prompts = {
|
| 601 |
-
"patient": PATIENT_KNOWLEDGE_PROMPT,
|
| 602 |
-
"nurse": NURSE_KNOWLEDGE_PROMPT,
|
| 603 |
-
"senior_doctor": SENIOR_KNOWLEDGE_PROMPT,
|
| 604 |
-
"family": FAMILY_KNOWLEDGE_PROMPT,
|
| 605 |
-
"lab_tech": LAB_TECH_KNOWLEDGE_PROMPT,
|
| 606 |
-
}
|
| 607 |
-
prompt_template = prompts.get(role)
|
| 608 |
-
if not prompt_template:
|
| 609 |
-
logger.warning(f"No prompt template for role: {role}")
|
| 610 |
-
return self._fallback_knowledge(case_data, role, rag_context)
|
| 611 |
-
|
| 612 |
-
vitals = case_data.get("vital_signs", {})
|
| 613 |
-
|
| 614 |
-
# Build prompt with grounding rules + RAG context
|
| 615 |
-
prompt = prompt_template.format(
|
| 616 |
-
grounding_rules=SOURCE_GROUNDING_RULES,
|
| 617 |
-
rag_context=rag_context or "NO REFERENCE MATERIAL AVAILABLE. Use ONLY well-established textbook facts. Tag everything [Textbook] and be conservative.",
|
| 618 |
-
diagnosis=case_data.get("diagnosis", "unknown"),
|
| 619 |
-
specialty=case_data.get("specialty", "general"),
|
| 620 |
-
difficulty=case_data.get("difficulty", "intermediate"),
|
| 621 |
-
chief_complaint=case_data.get("chief_complaint", ""),
|
| 622 |
-
presentation=case_data.get("initial_presentation", ""),
|
| 623 |
-
differentials=", ".join(case_data.get("differentials", [])),
|
| 624 |
-
learning_points="; ".join(case_data.get("learning_points", [])),
|
| 625 |
-
bp=vitals.get("bp", "120/80"),
|
| 626 |
-
hr=vitals.get("hr", 80),
|
| 627 |
-
rr=vitals.get("rr", 16),
|
| 628 |
-
temp=vitals.get("temp", 37.0),
|
| 629 |
-
spo2=vitals.get("spo2", 98),
|
| 630 |
-
)
|
| 631 |
-
|
| 632 |
-
try:
|
| 633 |
-
response = self.client.messages.create(
|
| 634 |
-
model="claude-opus-4-6",
|
| 635 |
-
max_tokens=16000,
|
| 636 |
-
temperature=1, # Required when thinking is enabled
|
| 637 |
-
thinking={
|
| 638 |
-
"type": "adaptive", # Opus 4.6: model decides when/how much to think
|
| 639 |
-
},
|
| 640 |
-
messages=[{"role": "user", "content": prompt}],
|
| 641 |
-
)
|
| 642 |
-
|
| 643 |
-
content = ""
|
| 644 |
-
for block in response.content:
|
| 645 |
-
if block.type == "text":
|
| 646 |
-
content = block.text.strip()
|
| 647 |
-
|
| 648 |
-
if content:
|
| 649 |
-
logger.info(f"Synthesized {role} knowledge: {len(content)} chars")
|
| 650 |
-
return content
|
| 651 |
-
|
| 652 |
-
except Exception as e:
|
| 653 |
-
logger.error(f"Knowledge synthesis error for {role}: {e}")
|
| 654 |
-
|
| 655 |
-
return self._fallback_knowledge(case_data, role, rag_context)
|
| 656 |
-
|
| 657 |
-
def _fallback_knowledge(
|
| 658 |
-
self, case_data: dict, role: str, rag_context: str
|
| 659 |
-
) -> str:
|
| 660 |
-
"""Fallback when Claude is unavailable — return structured RAG context with source tags.
|
| 661 |
-
|
| 662 |
-
This is a conservative fallback: only raw corpus data, no synthesis.
|
| 663 |
-
"""
|
| 664 |
-
diagnosis = case_data.get("diagnosis", "")
|
| 665 |
-
specialty = case_data.get("specialty", "")
|
| 666 |
-
chief_complaint = case_data.get("chief_complaint", "")
|
| 667 |
-
|
| 668 |
-
header = (
|
| 669 |
-
"NOTE: Dynamic knowledge synthesis unavailable. Using RAG corpus directly.\n"
|
| 670 |
-
"ONLY use facts from the reference material below. Do NOT invent or assume.\n\n"
|
| 671 |
-
)
|
| 672 |
-
|
| 673 |
-
if role == "patient":
|
| 674 |
-
return (
|
| 675 |
-
f"{header}"
|
| 676 |
-
f"CONDITION: {chief_complaint} ({specialty})\n"
|
| 677 |
-
f"Describe symptoms based ONLY on the presentation data provided to you.\n"
|
| 678 |
-
f"If asked something not in your case data, say 'pata nahi doctor'.\n\n"
|
| 679 |
-
f"REFERENCE MATERIAL [Corpus]:\n{rag_context[:2000]}"
|
| 680 |
-
)
|
| 681 |
-
|
| 682 |
-
if role == "nurse":
|
| 683 |
-
vitals = case_data.get("vital_signs", {})
|
| 684 |
-
return (
|
| 685 |
-
f"{header}"
|
| 686 |
-
f"PATIENT: {chief_complaint} ({specialty})\n"
|
| 687 |
-
f"VITALS: BP {vitals.get('bp')}, HR {vitals.get('hr')}, "
|
| 688 |
-
f"RR {vitals.get('rr')}, SpO2 {vitals.get('spo2')}%\n"
|
| 689 |
-
f"Report only what you observe. For protocols, say 'as per hospital protocol'.\n\n"
|
| 690 |
-
f"REFERENCE MATERIAL [Corpus]:\n{rag_context[:2000]}"
|
| 691 |
-
)
|
| 692 |
-
|
| 693 |
-
if role == "senior_doctor":
|
| 694 |
-
return (
|
| 695 |
-
f"{header}"
|
| 696 |
-
f"CASE: {chief_complaint} ({specialty})\n"
|
| 697 |
-
f"DIAGNOSIS: {diagnosis}\n"
|
| 698 |
-
f"DIFFERENTIALS: {', '.join(case_data.get('differentials', []))}\n"
|
| 699 |
-
f"LEARNING POINTS: {'; '.join(case_data.get('learning_points', []))}\n"
|
| 700 |
-
f"Teach using Socratic method. Only reference facts from the case data or reference material.\n\n"
|
| 701 |
-
f"REFERENCE MATERIAL [Corpus]:\n{rag_context[:3000]}"
|
| 702 |
-
)
|
| 703 |
-
|
| 704 |
-
if role == "family":
|
| 705 |
-
return (
|
| 706 |
-
f"{header}"
|
| 707 |
-
f"PATIENT CONDITION: {chief_complaint}\n"
|
| 708 |
-
f"You are a worried family member. Express concern in Hinglish.\n"
|
| 709 |
-
f"Share what home remedies you tried, cost concerns, work worries.\n"
|
| 710 |
-
f"Only describe what a family member would know in lay terms.\n\n"
|
| 711 |
-
f"REFERENCE MATERIAL [Corpus]:\n{rag_context[:1500]}"
|
| 712 |
-
)
|
| 713 |
-
|
| 714 |
-
if role == "lab_tech":
|
| 715 |
-
return (
|
| 716 |
-
f"{header}"
|
| 717 |
-
f"CASE: {chief_complaint} ({specialty})\n"
|
| 718 |
-
f"Focus on sample collection, turnaround times, and result reporting.\n"
|
| 719 |
-
f"Mention realistic govt hospital lab constraints.\n"
|
| 720 |
-
f"Use technical terms for tests but explain in simple terms to students.\n\n"
|
| 721 |
-
f"REFERENCE MATERIAL [Corpus]:\n{rag_context[:1500]}"
|
| 722 |
-
)
|
| 723 |
-
|
| 724 |
-
return f"{header}{rag_context[:2000]}" if rag_context else header
|
| 725 |
-
|
| 726 |
-
|
| 727 |
-
# Singleton instance
|
| 728 |
-
knowledge_builder = DynamicKnowledgeBuilder()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/core/agents/lab_tech_agent.py
DELETED
|
@@ -1,390 +0,0 @@
|
|
| 1 |
-
"""Lab technician agent — processes investigations with realistic delays and Indian hospital context."""
|
| 2 |
-
|
| 3 |
-
from app.core.agents.base_agent import BaseAgent
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
LAB_TECH_SYSTEM_PROMPT = """You are a lab technician (Ramesh) working in the pathology/clinical laboratory of an Indian government hospital. A medical student (junior doctor) has ordered investigations for a patient.
|
| 7 |
-
|
| 8 |
-
CRITICAL RULES:
|
| 9 |
-
1. You speak in professional English with Indian lab context — the way a real lab tech communicates with doctors.
|
| 10 |
-
Examples: "Sir, sample collected. Will send to the lab now.", "Doctor, CBC machine is under calibration, 30 min extra lagega."
|
| 11 |
-
2. You are professional, efficient, and knowledgeable about lab processes.
|
| 12 |
-
3. You know turnaround times, sample requirements, and common lab issues.
|
| 13 |
-
4. You flag CRITICAL VALUES immediately — this is a patient safety responsibility.
|
| 14 |
-
5. You do NOT interpret results clinically — you report values and flag abnormals.
|
| 15 |
-
6. Keep responses concise — 2-4 sentences, factual and to the point.
|
| 16 |
-
7. You may occasionally use Hindi phrases natural to the lab setting: "Sir, thoda time lagega", "Report ready hai."
|
| 17 |
-
8. NEVER fabricate test results not present in the case data. Only report values given in the case.
|
| 18 |
-
|
| 19 |
-
ACCURACY RULES:
|
| 20 |
-
- Report ONLY values that are in the case data or your specialized knowledge.
|
| 21 |
-
- For tests not yet resulted, say "Report pending, sir" — never invent values.
|
| 22 |
-
- Know normal ranges and flag values outside them.
|
| 23 |
-
- For critical/panic values, communicate urgency clearly.
|
| 24 |
-
- Know sample requirements: fasting samples, anticoagulant tubes, culture bottles, etc.
|
| 25 |
-
|
| 26 |
-
INDIAN GOVT HOSPITAL LAB REALITY:
|
| 27 |
-
- Lab is shared between wards, OPD, and casualty — workload is heavy.
|
| 28 |
-
- Equipment issues are common: calibration downtime, reagent shortages, power cuts.
|
| 29 |
-
- Turnaround times are longer than private labs.
|
| 30 |
-
- Night duty has limited staff — some specialized tests run only in day shift.
|
| 31 |
-
- NABL accreditation standards are followed where possible, but practical constraints exist.
|
| 32 |
-
- Common machines: 3-part/5-part hematology analyzer, semi-auto biochemistry analyzer, urine analyzer.
|
| 33 |
-
- Some tests are sent to referral labs: special cultures, genetic tests, some hormonal assays.
|
| 34 |
-
- Blood bank is separate — crossmatch takes 30-45 min minimum.
|
| 35 |
-
|
| 36 |
-
STANDARD TURNAROUND TIMES (GOVT HOSPITAL):
|
| 37 |
-
- CBC/Hemogram: 30 min - 1 hour
|
| 38 |
-
- RFT (Renal Function Tests): 1 - 2 hours
|
| 39 |
-
- LFT (Liver Function Tests): 1 - 2 hours
|
| 40 |
-
- Blood sugar (random/fasting): 30 min
|
| 41 |
-
- Serum electrolytes: 1 - 2 hours
|
| 42 |
-
- Coagulation profile (PT/INR, aPTT): 1 - 2 hours
|
| 43 |
-
- Urine routine/microscopy: 30 min - 1 hour
|
| 44 |
-
- Blood culture: 24 - 72 hours (preliminary), 5-7 days (final)
|
| 45 |
-
- Urine culture: 24 - 48 hours
|
| 46 |
-
- ABG (Arterial Blood Gas): 15 - 30 min (point-of-care)
|
| 47 |
-
- Troponin/Cardiac markers: 1 hour (if rapid kit available), 2-3 hours (lab-based)
|
| 48 |
-
- Peripheral smear: 1 - 2 hours (depends on pathologist availability)
|
| 49 |
-
- CSF analysis: 1 - 2 hours (routine), culture = 48-72 hours
|
| 50 |
-
- Dengue NS1/IgM: 1 - 2 hours (rapid kit), 4-6 hours (ELISA)
|
| 51 |
-
- Malaria (rapid test + smear): 30 min - 1 hour
|
| 52 |
-
- Widal test: 1 - 2 hours
|
| 53 |
-
- HbA1c: 2 - 4 hours
|
| 54 |
-
- Thyroid profile: 4 - 6 hours (batch run)
|
| 55 |
-
- X-ray: 30 min - 1 hour (depends on queue)
|
| 56 |
-
- ECG: 15 - 30 min
|
| 57 |
-
- Ultrasound: 2 - 4 hours (radiologist dependent)
|
| 58 |
-
|
| 59 |
-
CRITICAL/PANIC VALUES (FLAG IMMEDIATELY):
|
| 60 |
-
- Hemoglobin < 7 g/dL or > 20 g/dL
|
| 61 |
-
- Platelet count < 20,000 or > 10,00,000
|
| 62 |
-
- WBC < 2,000 or > 30,000
|
| 63 |
-
- Blood glucose < 50 mg/dL or > 500 mg/dL
|
| 64 |
-
- Serum potassium < 2.5 or > 6.5 mEq/L
|
| 65 |
-
- Serum sodium < 120 or > 160 mEq/L
|
| 66 |
-
- Serum creatinine > 10 mg/dL
|
| 67 |
-
- INR > 5.0
|
| 68 |
-
- Troponin positive (above cutoff)
|
| 69 |
-
- pH < 7.2 or > 7.6 (ABG)
|
| 70 |
-
- pO2 < 60 mmHg
|
| 71 |
-
- Lactate > 4 mmol/L
|
| 72 |
-
|
| 73 |
-
SAMPLE REQUIREMENTS (commonly asked):
|
| 74 |
-
- CBC: EDTA tube (purple cap), 2 mL
|
| 75 |
-
- RFT/LFT/Electrolytes: Plain tube (red cap) or gel tube, 3-5 mL
|
| 76 |
-
- Blood culture: Culture bottle, 8-10 mL per bottle, two bottles from different sites
|
| 77 |
-
- Coagulation: Citrate tube (blue cap), 2.7 mL, must be filled to the mark
|
| 78 |
-
- Blood sugar: Fluoride tube (grey cap), 2 mL
|
| 79 |
-
- ABG: Heparinized syringe, must be sent on ice within 15 minutes
|
| 80 |
-
- Urine: Clean catch midstream, sterile container for culture
|
| 81 |
-
- CSF: 3 tubes — biochemistry, microbiology, cytology
|
| 82 |
-
|
| 83 |
-
CASE DETAILS:
|
| 84 |
-
- Patient: {age}y {gender}
|
| 85 |
-
- Chief complaint: {chief_complaint}
|
| 86 |
-
- Investigations ordered: {investigations_ordered}
|
| 87 |
-
- Results available: {results_available}
|
| 88 |
-
- Pending investigations: {pending_investigations}
|
| 89 |
-
|
| 90 |
-
Respond ONLY as the lab technician. Be professional, factual, and flag critical values.
|
| 91 |
-
|
| 92 |
-
FORMATTING RULES:
|
| 93 |
-
- Do NOT use markdown formatting like ** or * in your responses
|
| 94 |
-
- Write in plain text only
|
| 95 |
-
- For actions or expressions, use plain text like: (doing something) instead of *doing something*"""
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
class LabTechAgent(BaseAgent):
|
| 99 |
-
"""Lab technician agent that processes investigations with realistic delays."""
|
| 100 |
-
|
| 101 |
-
agent_type = "lab_tech"
|
| 102 |
-
display_name = "Lab Tech Ramesh"
|
| 103 |
-
|
| 104 |
-
def __init__(self):
|
| 105 |
-
super().__init__()
|
| 106 |
-
self.case_info: dict = {}
|
| 107 |
-
self.investigations_ordered: list[str] = []
|
| 108 |
-
self.results_available: dict = {}
|
| 109 |
-
self.pending_investigations: list[str] = []
|
| 110 |
-
self.critical_values: list[str] = []
|
| 111 |
-
|
| 112 |
-
def configure(self, case_data: dict):
|
| 113 |
-
"""Configure lab tech with case-specific data."""
|
| 114 |
-
self.case_info = {
|
| 115 |
-
"age": case_data.get("patient", {}).get("age", 45),
|
| 116 |
-
"gender": case_data.get("patient", {}).get("gender", "Male"),
|
| 117 |
-
"chief_complaint": case_data.get("chief_complaint", ""),
|
| 118 |
-
}
|
| 119 |
-
|
| 120 |
-
# Extract lab/investigation info from stages
|
| 121 |
-
lab_info = ""
|
| 122 |
-
for stage in case_data.get("stages", []):
|
| 123 |
-
stage_name = stage.get("stage", "")
|
| 124 |
-
if stage_name in ("labs", "investigations", "lab_results"):
|
| 125 |
-
lab_info = stage.get("info", "")
|
| 126 |
-
break
|
| 127 |
-
|
| 128 |
-
self.case_info["lab_info"] = lab_info
|
| 129 |
-
|
| 130 |
-
# Parse available results from lab info
|
| 131 |
-
if lab_info:
|
| 132 |
-
self.results_available = {"lab_results": lab_info}
|
| 133 |
-
else:
|
| 134 |
-
self.results_available = {}
|
| 135 |
-
|
| 136 |
-
# Extract investigation list if available
|
| 137 |
-
investigations = case_data.get("investigations", [])
|
| 138 |
-
if isinstance(investigations, list):
|
| 139 |
-
self.investigations_ordered = investigations
|
| 140 |
-
elif isinstance(investigations, str):
|
| 141 |
-
self.investigations_ordered = [
|
| 142 |
-
inv.strip() for inv in investigations.split(",") if inv.strip()
|
| 143 |
-
]
|
| 144 |
-
else:
|
| 145 |
-
self.investigations_ordered = []
|
| 146 |
-
|
| 147 |
-
self.pending_investigations = []
|
| 148 |
-
|
| 149 |
-
# Detect critical values in lab info
|
| 150 |
-
self._detect_critical_values(lab_info)
|
| 151 |
-
|
| 152 |
-
def _detect_critical_values(self, lab_info: str):
|
| 153 |
-
"""Scan lab info for panic/critical values."""
|
| 154 |
-
self.critical_values = []
|
| 155 |
-
if not lab_info:
|
| 156 |
-
return
|
| 157 |
-
|
| 158 |
-
lab_lower = lab_info.lower()
|
| 159 |
-
|
| 160 |
-
# Check for critical patterns
|
| 161 |
-
critical_checks = [
|
| 162 |
-
("hb", "hemoglobin", lambda v: v < 7 or v > 20),
|
| 163 |
-
("platelet", "platelets", lambda v: v < 20000),
|
| 164 |
-
("wbc", "white blood cell", lambda v: v < 2000 or v > 30000),
|
| 165 |
-
("glucose", "blood sugar", lambda v: v < 50 or v > 500),
|
| 166 |
-
("potassium", "k+", lambda v: v < 2.5 or v > 6.5),
|
| 167 |
-
("sodium", "na+", lambda v: v < 120 or v > 160),
|
| 168 |
-
("creatinine", "creat", lambda v: v > 10),
|
| 169 |
-
("inr", "inr", lambda v: v > 5.0),
|
| 170 |
-
]
|
| 171 |
-
|
| 172 |
-
# Simple heuristic: flag keywords that suggest critical values
|
| 173 |
-
if any(word in lab_lower for word in ["critical", "panic", "dangerously", "severely"]):
|
| 174 |
-
self.critical_values.append("Flagged values detected in report")
|
| 175 |
-
|
| 176 |
-
# Check for very low platelet counts (common critical value in Indian hospitals)
|
| 177 |
-
import re
|
| 178 |
-
|
| 179 |
-
platelet_match = re.search(r"platelet[s]?\s*[:\-]?\s*([\d,]+)", lab_lower)
|
| 180 |
-
if platelet_match:
|
| 181 |
-
try:
|
| 182 |
-
count = int(platelet_match.group(1).replace(",", ""))
|
| 183 |
-
if count < 20000:
|
| 184 |
-
self.critical_values.append(
|
| 185 |
-
f"CRITICAL: Platelet count {count:,} — below panic value"
|
| 186 |
-
)
|
| 187 |
-
except ValueError:
|
| 188 |
-
pass
|
| 189 |
-
|
| 190 |
-
hb_match = re.search(r"(?:hb|hemoglobin|haemoglobin)\s*[:\-]?\s*([\d.]+)", lab_lower)
|
| 191 |
-
if hb_match:
|
| 192 |
-
try:
|
| 193 |
-
hb = float(hb_match.group(1))
|
| 194 |
-
if hb < 7:
|
| 195 |
-
self.critical_values.append(
|
| 196 |
-
f"CRITICAL: Hemoglobin {hb} g/dL — below panic value"
|
| 197 |
-
)
|
| 198 |
-
except ValueError:
|
| 199 |
-
pass
|
| 200 |
-
|
| 201 |
-
def get_system_prompt(self, case_context: dict) -> str:
|
| 202 |
-
info = {**self.case_info, **case_context}
|
| 203 |
-
|
| 204 |
-
investigations_str = ", ".join(self.investigations_ordered) if self.investigations_ordered else "As per doctor's orders"
|
| 205 |
-
results_str = self.case_info.get("lab_info", "Pending") or "Pending"
|
| 206 |
-
pending_str = ", ".join(self.pending_investigations) if self.pending_investigations else "None currently"
|
| 207 |
-
|
| 208 |
-
base_prompt = LAB_TECH_SYSTEM_PROMPT.format(
|
| 209 |
-
age=info.get("age", 45),
|
| 210 |
-
gender=info.get("gender", "Male"),
|
| 211 |
-
chief_complaint=info.get("chief_complaint", "unknown"),
|
| 212 |
-
investigations_ordered=investigations_str,
|
| 213 |
-
results_available=results_str,
|
| 214 |
-
pending_investigations=pending_str,
|
| 215 |
-
)
|
| 216 |
-
|
| 217 |
-
if self.critical_values:
|
| 218 |
-
base_prompt += (
|
| 219 |
-
"\n\n=== CRITICAL VALUES TO FLAG ===\n"
|
| 220 |
-
"You MUST immediately inform the doctor about these critical values:\n"
|
| 221 |
-
+ "\n".join(f"- {cv}" for cv in self.critical_values)
|
| 222 |
-
)
|
| 223 |
-
|
| 224 |
-
if self.specialized_knowledge:
|
| 225 |
-
base_prompt += (
|
| 226 |
-
"\n\n=== YOUR LAB EXPERTISE & CASE-SPECIFIC KNOWLEDGE ===\n"
|
| 227 |
-
"Use this specialized knowledge for accurate lab reporting, "
|
| 228 |
-
"turnaround times, and investigation-specific details for this case.\n\n"
|
| 229 |
-
f"{self.specialized_knowledge}"
|
| 230 |
-
)
|
| 231 |
-
|
| 232 |
-
return base_prompt
|
| 233 |
-
|
| 234 |
-
def get_fallback_response(self, message: str, case_context: dict) -> str:
|
| 235 |
-
msg = message.lower()
|
| 236 |
-
|
| 237 |
-
# Critical value alert takes priority
|
| 238 |
-
if self.critical_values:
|
| 239 |
-
for cv in self.critical_values:
|
| 240 |
-
if "platelet" in cv.lower() or "hemoglobin" in cv.lower():
|
| 241 |
-
return (
|
| 242 |
-
f"Doctor, urgent update! {cv}. "
|
| 243 |
-
"I'm flagging this as a panic value per our lab protocol. "
|
| 244 |
-
"Please review immediately and advise if repeat sample is needed."
|
| 245 |
-
)
|
| 246 |
-
|
| 247 |
-
# Sample collection
|
| 248 |
-
if any(w in msg for w in ["collect", "sample", "draw", "blood"]):
|
| 249 |
-
return (
|
| 250 |
-
"Sir, sample collected. EDTA and plain tubes filled. "
|
| 251 |
-
"Sending to the lab now. Routine turnaround is about 1-2 hours, "
|
| 252 |
-
"I'll try to expedite given the clinical urgency."
|
| 253 |
-
)
|
| 254 |
-
|
| 255 |
-
# Report status / ETA
|
| 256 |
-
if any(w in msg for w in ["report", "result", "ready", "status", "kab", "eta"]):
|
| 257 |
-
lab_info = self.case_info.get("lab_info", "")
|
| 258 |
-
if lab_info:
|
| 259 |
-
return (
|
| 260 |
-
"Doctor, reports are ready. I'm sending them to the ward now. "
|
| 261 |
-
"Please check — there are a few values I'd like you to review."
|
| 262 |
-
)
|
| 263 |
-
return (
|
| 264 |
-
"Sir, samples are being processed. CBC should be ready in about 45 minutes, "
|
| 265 |
-
"biochemistry in about 1-2 hours. I'll call the ward as soon as reports are ready."
|
| 266 |
-
)
|
| 267 |
-
|
| 268 |
-
# CBC related
|
| 269 |
-
if any(w in msg for w in ["cbc", "hemogram", "blood count"]):
|
| 270 |
-
return (
|
| 271 |
-
"Sir, for CBC I need 2 mL in the EDTA tube — purple cap. "
|
| 272 |
-
"Our 5-part analyzer is working, turnaround is about 45 minutes. "
|
| 273 |
-
"I'll flag any critical values immediately."
|
| 274 |
-
)
|
| 275 |
-
|
| 276 |
-
# Blood culture
|
| 277 |
-
if any(w in msg for w in ["culture", "blood culture", "sensitivity"]):
|
| 278 |
-
return (
|
| 279 |
-
"Sir, for blood culture we need two samples from different sites, "
|
| 280 |
-
"8-10 mL each in the culture bottles. Please collect before starting antibiotics. "
|
| 281 |
-
"Preliminary report in 24-48 hours, final with sensitivity in 5-7 days."
|
| 282 |
-
)
|
| 283 |
-
|
| 284 |
-
# ABG
|
| 285 |
-
if any(w in msg for w in ["abg", "arterial", "blood gas"]):
|
| 286 |
-
return (
|
| 287 |
-
"Sir, ABG sample needs to come in a heparinized syringe, on ice, "
|
| 288 |
-
"within 15 minutes of collection. Our ABG machine is in the ICU. "
|
| 289 |
-
"I can process it in 15-20 minutes if sent immediately."
|
| 290 |
-
)
|
| 291 |
-
|
| 292 |
-
# Coagulation
|
| 293 |
-
if any(w in msg for w in ["pt", "inr", "aptt", "coagulation", "clotting"]):
|
| 294 |
-
return (
|
| 295 |
-
"Sir, coagulation profile needs citrate tube — blue cap, 2.7 mL. "
|
| 296 |
-
"Please ensure the tube is filled exactly to the mark, "
|
| 297 |
-
"otherwise we'll have to reject and recollect. Turnaround is about 1-2 hours."
|
| 298 |
-
)
|
| 299 |
-
|
| 300 |
-
# Machine / equipment issues
|
| 301 |
-
if any(w in msg for w in ["machine", "down", "delay", "problem", "issue"]):
|
| 302 |
-
return (
|
| 303 |
-
"Sir, I should let you know — our biochemistry analyzer was down for "
|
| 304 |
-
"calibration this morning. It's back up now but there's a backlog. "
|
| 305 |
-
"May take an extra 30-45 minutes for LFT and RFT reports. Sorry for the delay."
|
| 306 |
-
)
|
| 307 |
-
|
| 308 |
-
# Urgent / stat
|
| 309 |
-
if any(w in msg for w in ["urgent", "stat", "emergency", "jaldi", "fast"]):
|
| 310 |
-
return (
|
| 311 |
-
"Understood sir, marking this as URGENT. I'll prioritize these samples. "
|
| 312 |
-
"CBC can be ready in 20-30 minutes, sugar in 15 minutes if you need stat. "
|
| 313 |
-
"I'll call the ward directly with results."
|
| 314 |
-
)
|
| 315 |
-
|
| 316 |
-
# Urine
|
| 317 |
-
if any(w in msg for w in ["urine", "urine culture", "urinalysis"]):
|
| 318 |
-
return (
|
| 319 |
-
"Sir, for urine routine I need a clean catch midstream sample in a plain container. "
|
| 320 |
-
"If you need culture also, please use the sterile container. "
|
| 321 |
-
"Routine report in 30-45 minutes, culture in 24-48 hours."
|
| 322 |
-
)
|
| 323 |
-
|
| 324 |
-
# Cross-match / blood bank
|
| 325 |
-
if any(w in msg for w in ["cross", "crossmatch", "blood bank", "transfusion", "prbc"]):
|
| 326 |
-
return (
|
| 327 |
-
"Sir, for crossmatch I'll need a fresh sample — 3 mL in plain tube with patient's details. "
|
| 328 |
-
"Blood bank takes minimum 30-45 minutes for crossmatch. "
|
| 329 |
-
"What blood group is the patient? I'll check availability."
|
| 330 |
-
)
|
| 331 |
-
|
| 332 |
-
# Dengue / malaria (common in Indian hospitals)
|
| 333 |
-
if any(w in msg for w in ["dengue", "malaria", "ns1", "widal", "typhoid"]):
|
| 334 |
-
return (
|
| 335 |
-
"Sir, we have rapid test kits for dengue NS1 and malaria. "
|
| 336 |
-
"Results in about 30 minutes. For dengue serology (IgM/IgG ELISA), "
|
| 337 |
-
"that runs in batch — turnaround is 4-6 hours. What do you want me to run?"
|
| 338 |
-
)
|
| 339 |
-
|
| 340 |
-
# Radiology (often asked to lab tech who redirects)
|
| 341 |
-
if any(w in msg for w in ["x-ray", "xray", "ultrasound", "ct", "mri", "scan"]):
|
| 342 |
-
return (
|
| 343 |
-
"Sir, imaging is not my department — you'll need to send a request to Radiology. "
|
| 344 |
-
"X-ray is usually available, turnaround 30 min to 1 hour depending on queue. "
|
| 345 |
-
"For ultrasound, the radiologist needs to be called. CT/MRI needs HOD approval."
|
| 346 |
-
)
|
| 347 |
-
|
| 348 |
-
# Default professional response
|
| 349 |
-
return (
|
| 350 |
-
"Yes sir, lab is ready. What investigations do you want me to process? "
|
| 351 |
-
"Just let me know the tests and I'll arrange the appropriate tubes and get samples collected."
|
| 352 |
-
)
|
| 353 |
-
|
| 354 |
-
def get_initial_report(self) -> dict:
|
| 355 |
-
"""Generate lab tech's introduction when investigation is first ordered."""
|
| 356 |
-
age = self.case_info.get("age", 45)
|
| 357 |
-
gender = self.case_info.get("gender", "Male").lower()
|
| 358 |
-
investigations = self.investigations_ordered
|
| 359 |
-
|
| 360 |
-
if investigations:
|
| 361 |
-
inv_list = ", ".join(investigations[:4])
|
| 362 |
-
extra = f" and {len(investigations) - 4} more" if len(investigations) > 4 else ""
|
| 363 |
-
content = (
|
| 364 |
-
f"Good day doctor. I'm Ramesh from the pathology lab. "
|
| 365 |
-
f"I've received the investigation request for your {age}y {gender} patient. "
|
| 366 |
-
f"Orders noted: {inv_list}{extra}. "
|
| 367 |
-
f"I'll collect the samples and start processing. Will update you as reports come in."
|
| 368 |
-
)
|
| 369 |
-
else:
|
| 370 |
-
content = (
|
| 371 |
-
f"Good day doctor. I'm Ramesh from the pathology lab. "
|
| 372 |
-
f"I understand you have a {age}y {gender} patient who needs investigations. "
|
| 373 |
-
f"Let me know what tests you'd like to order and I'll arrange sample collection."
|
| 374 |
-
)
|
| 375 |
-
|
| 376 |
-
# Add critical value alert if detected
|
| 377 |
-
if self.critical_values:
|
| 378 |
-
content += (
|
| 379 |
-
f"\n\nURGENT: Doctor, I need to flag something — "
|
| 380 |
-
+ "; ".join(self.critical_values)
|
| 381 |
-
+ ". Please review immediately."
|
| 382 |
-
)
|
| 383 |
-
|
| 384 |
-
return {
|
| 385 |
-
"agent_type": self.agent_type,
|
| 386 |
-
"display_name": self.display_name,
|
| 387 |
-
"content": content,
|
| 388 |
-
"investigations_ordered": self.investigations_ordered,
|
| 389 |
-
"critical_values": self.critical_values,
|
| 390 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/core/agents/nurse_agent.py
DELETED
|
@@ -1,211 +0,0 @@
|
|
| 1 |
-
"""Nurse agent — provides clinical observations, vitals, and urgency alerts."""
|
| 2 |
-
|
| 3 |
-
from app.core.agents.base_agent import BaseAgent
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
NURSE_SYSTEM_PROMPT = """You are an experienced ward nurse in an Indian government hospital assisting a medical student with a patient case.
|
| 7 |
-
|
| 8 |
-
CRITICAL RULES:
|
| 9 |
-
1. You are professional, efficient, and supportive of the student.
|
| 10 |
-
2. You provide clinical observations, vital sign readings, and nursing assessments.
|
| 11 |
-
3. You alert the student about urgent/critical findings using clear urgency levels.
|
| 12 |
-
4. You do NOT diagnose — you report observations and let the doctor decide.
|
| 13 |
-
5. You use proper medical terminology (you're a trained nurse).
|
| 14 |
-
6. You may gently prompt the student if they're missing something obvious.
|
| 15 |
-
7. Keep responses concise and clinical — 2-4 sentences.
|
| 16 |
-
8. You speak in English with occasional Hindi/medical terms naturally used in Indian hospitals.
|
| 17 |
-
|
| 18 |
-
ACCURACY RULES:
|
| 19 |
-
- ONLY report observations and protocols you are certain about.
|
| 20 |
-
- For drug doses, say "as per doctor's order" unless specified in your knowledge.
|
| 21 |
-
- For protocols, say "as per hospital protocol" if the specific guideline is unclear.
|
| 22 |
-
- Use NLEM (National List of Essential Medicines) drugs — avoid brand names.
|
| 23 |
-
- Be realistic about what's available in a govt hospital (no assuming MRI, special tests without referral).
|
| 24 |
-
- If you don't know something, say "Doctor, I'll check and confirm" — never guess.
|
| 25 |
-
|
| 26 |
-
INDIAN GOVT HOSPITAL REALITY:
|
| 27 |
-
- You manage 15-20 patients per shift, sometimes more.
|
| 28 |
-
- Lab reports take 2-4 hours (routine) or 30min-1hr (urgent).
|
| 29 |
-
- X-ray is available, ultrasound needs radiology call, CT/MRI = referral.
|
| 30 |
-
- Blood bank may need time for crossmatch, especially rare groups.
|
| 31 |
-
- Pharmacy indent for non-stock medicines takes time.
|
| 32 |
-
- Night duty: skeleton staff, limited lab services.
|
| 33 |
-
|
| 34 |
-
CASE DETAILS:
|
| 35 |
-
- Patient: {age}y {gender} from {location}
|
| 36 |
-
- Chief complaint: {chief_complaint}
|
| 37 |
-
- Vitals: BP {bp}, HR {hr}, RR {rr}, Temp {temp}°C, SpO2 {spo2}%
|
| 38 |
-
- Physical exam findings: {physical_exam}
|
| 39 |
-
- Lab findings: {labs}
|
| 40 |
-
|
| 41 |
-
URGENCY PROTOCOL:
|
| 42 |
-
- routine: Normal observations. "Doctor, patient is stable. Vitals are within normal range."
|
| 43 |
-
- attention: Something needs noting. "Doctor, I'd like to draw your attention to..."
|
| 44 |
-
- urgent: Abnormal finding needs action. "Doctor, the patient's SpO2 is dropping. Should we start O2?"
|
| 45 |
-
- critical: Immediate intervention needed. "Doctor! Patient's vitals are deteriorating — we need to act NOW!"
|
| 46 |
-
|
| 47 |
-
Assess urgency based on the vitals and case severity. Respond ONLY as the nurse.
|
| 48 |
-
|
| 49 |
-
FORMATTING RULES:
|
| 50 |
-
- Do NOT use markdown formatting like ** or * in your responses
|
| 51 |
-
- Write in plain text only
|
| 52 |
-
- For actions or expressions, use plain text like: (doing something) instead of *doing something*"""
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
class NurseAgent(BaseAgent):
|
| 56 |
-
"""Nurse agent that provides clinical observations and alerts."""
|
| 57 |
-
|
| 58 |
-
agent_type = "nurse"
|
| 59 |
-
display_name = "Nurse Priya"
|
| 60 |
-
|
| 61 |
-
def __init__(self):
|
| 62 |
-
super().__init__()
|
| 63 |
-
self.urgency_level = "routine"
|
| 64 |
-
self.case_info: dict = {}
|
| 65 |
-
|
| 66 |
-
def configure(self, case_data: dict):
|
| 67 |
-
"""Configure nurse with case-specific data."""
|
| 68 |
-
vitals = case_data.get("vital_signs", {})
|
| 69 |
-
self.case_info = {
|
| 70 |
-
"age": case_data.get("patient", {}).get("age", 45),
|
| 71 |
-
"gender": case_data.get("patient", {}).get("gender", "Male"),
|
| 72 |
-
"location": case_data.get("patient", {}).get("location", "Delhi"),
|
| 73 |
-
"chief_complaint": case_data.get("chief_complaint", ""),
|
| 74 |
-
"bp": vitals.get("bp", "120/80"),
|
| 75 |
-
"hr": vitals.get("hr", 80),
|
| 76 |
-
"rr": vitals.get("rr", 16),
|
| 77 |
-
"temp": vitals.get("temp", 37.0),
|
| 78 |
-
"spo2": vitals.get("spo2", 98),
|
| 79 |
-
"physical_exam": "",
|
| 80 |
-
"labs": "",
|
| 81 |
-
}
|
| 82 |
-
|
| 83 |
-
# Extract exam and lab info from stages
|
| 84 |
-
for stage in case_data.get("stages", []):
|
| 85 |
-
if stage.get("stage") == "physical_exam":
|
| 86 |
-
self.case_info["physical_exam"] = stage.get("info", "")
|
| 87 |
-
elif stage.get("stage") == "labs":
|
| 88 |
-
self.case_info["labs"] = stage.get("info", "")
|
| 89 |
-
|
| 90 |
-
self._set_urgency_level(vitals, case_data.get("difficulty", "intermediate"))
|
| 91 |
-
|
| 92 |
-
def _set_urgency_level(self, vitals: dict, difficulty: str):
|
| 93 |
-
"""Determine urgency from vitals."""
|
| 94 |
-
hr = vitals.get("hr", 80)
|
| 95 |
-
spo2 = vitals.get("spo2", 98)
|
| 96 |
-
rr = vitals.get("rr", 16)
|
| 97 |
-
temp = vitals.get("temp", 37.0)
|
| 98 |
-
|
| 99 |
-
if spo2 < 88 or hr > 140 or rr > 35 or temp > 40:
|
| 100 |
-
self.urgency_level = "critical"
|
| 101 |
-
elif spo2 < 92 or hr > 120 or rr > 28 or temp > 39:
|
| 102 |
-
self.urgency_level = "urgent"
|
| 103 |
-
elif spo2 < 95 or hr > 100 or rr > 22 or temp > 38:
|
| 104 |
-
self.urgency_level = "attention"
|
| 105 |
-
else:
|
| 106 |
-
self.urgency_level = "routine"
|
| 107 |
-
|
| 108 |
-
if difficulty == "advanced" and self.urgency_level == "routine":
|
| 109 |
-
self.urgency_level = "attention"
|
| 110 |
-
|
| 111 |
-
def get_system_prompt(self, case_context: dict) -> str:
|
| 112 |
-
info = {**self.case_info, **case_context}
|
| 113 |
-
# Use CURRENT vitals from simulation state if available, otherwise initial
|
| 114 |
-
base_prompt = NURSE_SYSTEM_PROMPT.format(
|
| 115 |
-
age=info.get("age", 45),
|
| 116 |
-
gender=info.get("gender", "Male"),
|
| 117 |
-
location=info.get("location", "Delhi"),
|
| 118 |
-
chief_complaint=info.get("chief_complaint", "unknown"),
|
| 119 |
-
bp=info.get("current_bp", info.get("bp", "120/80")),
|
| 120 |
-
hr=info.get("current_hr", info.get("hr", 80)),
|
| 121 |
-
rr=info.get("current_rr", info.get("rr", 16)),
|
| 122 |
-
temp=info.get("current_temp", info.get("temp", 37.0)),
|
| 123 |
-
spo2=info.get("current_spo2", info.get("spo2", 98)),
|
| 124 |
-
physical_exam=info.get("physical_exam", "Not yet examined"),
|
| 125 |
-
labs=info.get("labs", "Pending"),
|
| 126 |
-
)
|
| 127 |
-
|
| 128 |
-
if self.specialized_knowledge:
|
| 129 |
-
base_prompt += (
|
| 130 |
-
"\n\n=== YOUR CLINICAL PROTOCOL KNOWLEDGE ===\n"
|
| 131 |
-
"Use this specialized knowledge for accurate clinical observations, "
|
| 132 |
-
"nursing assessments, and protocol-based responses specific to this condition.\n\n"
|
| 133 |
-
f"{self.specialized_knowledge}"
|
| 134 |
-
)
|
| 135 |
-
|
| 136 |
-
return base_prompt
|
| 137 |
-
|
| 138 |
-
def get_fallback_response(self, message: str, case_context: dict) -> str:
|
| 139 |
-
msg = message.lower()
|
| 140 |
-
vitals = self.case_info
|
| 141 |
-
|
| 142 |
-
if self.urgency_level == "critical":
|
| 143 |
-
return (
|
| 144 |
-
f"Doctor! Patient's vitals are concerning — "
|
| 145 |
-
f"HR {vitals['hr']}, SpO2 {vitals['spo2']}%, RR {vitals['rr']}. "
|
| 146 |
-
f"We need to act quickly. What do you want me to start?"
|
| 147 |
-
)
|
| 148 |
-
|
| 149 |
-
if any(w in msg for w in ["vitals", "vital signs", "bp", "pulse"]):
|
| 150 |
-
return (
|
| 151 |
-
f"Doctor, latest vitals: BP {vitals['bp']}, HR {vitals['hr']} bpm, "
|
| 152 |
-
f"RR {vitals['rr']}/min, Temp {vitals['temp']}°C, SpO2 {vitals['spo2']}%. "
|
| 153 |
-
f"{'I notice the SpO2 is on the lower side.' if vitals['spo2'] < 95 else 'Vitals are noted.'}"
|
| 154 |
-
)
|
| 155 |
-
|
| 156 |
-
if any(w in msg for w in ["oxygen", "o2", "spo2"]):
|
| 157 |
-
return (
|
| 158 |
-
f"SpO2 is currently {vitals['spo2']}%. "
|
| 159 |
-
f"{'Shall I start supplemental O2 via nasal cannula?' if vitals['spo2'] < 94 else 'Saturation is being maintained.'}"
|
| 160 |
-
)
|
| 161 |
-
|
| 162 |
-
if any(w in msg for w in ["iv", "line", "access", "cannula"]):
|
| 163 |
-
return "Doctor, shall I get an IV line set up? I have 18G and 20G cannulas ready. Which access do you want?"
|
| 164 |
-
|
| 165 |
-
if any(w in msg for w in ["monitor", "ecg", "cardiac"]):
|
| 166 |
-
return "I'll put the patient on continuous cardiac monitoring right away. ECG machine is on its way."
|
| 167 |
-
|
| 168 |
-
if any(w in msg for w in ["lab", "blood", "test", "investigation"]):
|
| 169 |
-
return "Doctor, I can send the samples to lab right away. What tests do you want me to order — CBC, RFT, LFT, or anything specific?"
|
| 170 |
-
|
| 171 |
-
if self.urgency_level == "urgent":
|
| 172 |
-
return (
|
| 173 |
-
f"Doctor, just to update you — the patient's HR is {vitals['hr']} and SpO2 {vitals['spo2']}%. "
|
| 174 |
-
f"I'd recommend we keep a close watch. Want me to prepare any emergency medications?"
|
| 175 |
-
)
|
| 176 |
-
|
| 177 |
-
return "Yes doctor, I'm here. What do you need me to do for the patient?"
|
| 178 |
-
|
| 179 |
-
def get_initial_report(self) -> dict:
|
| 180 |
-
"""Generate nurse's initial patient handoff report."""
|
| 181 |
-
vitals = self.case_info
|
| 182 |
-
alerts = []
|
| 183 |
-
|
| 184 |
-
if vitals["spo2"] < 94:
|
| 185 |
-
alerts.append(f"SpO2 is low at {vitals['spo2']}%")
|
| 186 |
-
if vitals["hr"] > 110:
|
| 187 |
-
alerts.append(f"tachycardic at {vitals['hr']} bpm")
|
| 188 |
-
if vitals["rr"] > 24:
|
| 189 |
-
alerts.append(f"tachypneic with RR {vitals['rr']}")
|
| 190 |
-
if vitals["temp"] > 38.5:
|
| 191 |
-
alerts.append(f"febrile at {vitals['temp']}°C")
|
| 192 |
-
|
| 193 |
-
base = (
|
| 194 |
-
f"Doctor, we have a {vitals['age']}-year-old {vitals['gender'].lower()} patient "
|
| 195 |
-
f"presenting with {vitals['chief_complaint'].lower()}. "
|
| 196 |
-
f"Vitals — BP {vitals['bp']}, HR {vitals['hr']}, RR {vitals['rr']}, "
|
| 197 |
-
f"Temp {vitals['temp']}°C, SpO2 {vitals['spo2']}%."
|
| 198 |
-
)
|
| 199 |
-
|
| 200 |
-
if alerts:
|
| 201 |
-
base += f" Please note: patient is {', '.join(alerts)}."
|
| 202 |
-
|
| 203 |
-
if self.urgency_level in ("urgent", "critical"):
|
| 204 |
-
base += " I'd recommend we prioritize this case."
|
| 205 |
-
|
| 206 |
-
return {
|
| 207 |
-
"agent_type": self.agent_type,
|
| 208 |
-
"display_name": self.display_name,
|
| 209 |
-
"content": base,
|
| 210 |
-
"urgency_level": self.urgency_level,
|
| 211 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/core/agents/orchestrator.py
DELETED
|
@@ -1,914 +0,0 @@
|
|
| 1 |
-
"""Agent orchestrator — coordinates the complete hospital simulation.
|
| 2 |
-
|
| 3 |
-
This is the BRAIN of the simulation:
|
| 4 |
-
- Manages sessions with full case state (time, vitals, investigations, treatments)
|
| 5 |
-
- Routes student actions through safety validation -> treatment engine -> agents
|
| 6 |
-
- Enables multi-agent interaction (agents respond to each other, not just student)
|
| 7 |
-
- Generates simulation events (lab results arriving, vitals changing, patient deteriorating)
|
| 8 |
-
- Manages complication engine for probabilistic emergencies
|
| 9 |
-
- Includes Family agent (cultural context) and Lab Tech agent (investigation lifecycle)
|
| 10 |
-
"""
|
| 11 |
-
|
| 12 |
-
import logging
|
| 13 |
-
import random
|
| 14 |
-
import uuid
|
| 15 |
-
from typing import Optional
|
| 16 |
-
|
| 17 |
-
from app.core.agents.patient_agent import PatientAgent
|
| 18 |
-
from app.core.agents.nurse_agent import NurseAgent
|
| 19 |
-
from app.core.agents.senior_agent import SeniorDoctorAgent
|
| 20 |
-
from app.core.agents.family_agent import FamilyAgent
|
| 21 |
-
from app.core.agents.lab_tech_agent import LabTechAgent
|
| 22 |
-
from app.core.agents.knowledge_builder import knowledge_builder
|
| 23 |
-
from app.core.agents.case_state_manager import CaseStateManager
|
| 24 |
-
from app.core.agents.treatment_engine import treatment_engine
|
| 25 |
-
from app.core.agents.clinical_validator import clinical_validator
|
| 26 |
-
from app.core.agents.complication_engine import ComplicationEngine
|
| 27 |
-
from app.core.agents.response_optimizer import parallel_processor
|
| 28 |
-
|
| 29 |
-
logger = logging.getLogger(__name__)
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
class AgentSession:
|
| 33 |
-
"""Holds the complete simulation state for a single case session."""
|
| 34 |
-
|
| 35 |
-
def __init__(self, session_id: str, case_data: dict, student_level: str = "intern"):
|
| 36 |
-
self.session_id = session_id
|
| 37 |
-
self.case_data = case_data
|
| 38 |
-
self.student_level = student_level
|
| 39 |
-
|
| 40 |
-
# Initialize all 5 agents
|
| 41 |
-
self.patient = PatientAgent()
|
| 42 |
-
self.nurse = NurseAgent()
|
| 43 |
-
self.senior = SeniorDoctorAgent()
|
| 44 |
-
self.family = FamilyAgent()
|
| 45 |
-
self.lab_tech = LabTechAgent()
|
| 46 |
-
|
| 47 |
-
# Configure agents with case data
|
| 48 |
-
self.patient.configure(case_data)
|
| 49 |
-
self.nurse.configure(case_data)
|
| 50 |
-
self.senior.configure(case_data)
|
| 51 |
-
self.family.configure(case_data)
|
| 52 |
-
self.lab_tech.configure(case_data)
|
| 53 |
-
|
| 54 |
-
# Build dynamic knowledge — each agent specializes for this case
|
| 55 |
-
self._build_agent_knowledge(case_data)
|
| 56 |
-
|
| 57 |
-
# Initialize case state manager — time, vitals, investigations
|
| 58 |
-
self.state = CaseStateManager(case_data, student_level)
|
| 59 |
-
|
| 60 |
-
# Initialize complication engine
|
| 61 |
-
self.complication_engine = ComplicationEngine(case_data, self.state)
|
| 62 |
-
|
| 63 |
-
# Conversation tracking
|
| 64 |
-
self.message_history: list[dict] = []
|
| 65 |
-
self.diagnosis_submitted = False
|
| 66 |
-
|
| 67 |
-
def _build_agent_knowledge(self, case_data: dict):
|
| 68 |
-
"""Use DynamicKnowledgeBuilder to specialize ALL agents for this case in PARALLEL.
|
| 69 |
-
|
| 70 |
-
This runs 5x faster than sequential building by using ThreadPoolExecutor.
|
| 71 |
-
"""
|
| 72 |
-
try:
|
| 73 |
-
# Build knowledge for all 5 agents in parallel!
|
| 74 |
-
all_knowledge = knowledge_builder.build_all_agent_knowledge(case_data)
|
| 75 |
-
|
| 76 |
-
# Apply the knowledge to each agent
|
| 77 |
-
agent_mapping = {
|
| 78 |
-
"patient": (self.patient, "Patient"),
|
| 79 |
-
"nurse": (self.nurse, "Nurse"),
|
| 80 |
-
"senior_doctor": (self.senior, "Senior Doctor"),
|
| 81 |
-
"family": (self.family, "Family"),
|
| 82 |
-
"lab_tech": (self.lab_tech, "Lab Tech"),
|
| 83 |
-
}
|
| 84 |
-
|
| 85 |
-
for role, (agent, label) in agent_mapping.items():
|
| 86 |
-
knowledge = all_knowledge.get(role, "")
|
| 87 |
-
if knowledge:
|
| 88 |
-
agent.set_specialized_knowledge(knowledge)
|
| 89 |
-
logger.info(f"{label} agent specialized for case ({len(knowledge)} chars)")
|
| 90 |
-
else:
|
| 91 |
-
logger.warning(f"{label} knowledge not available, using base prompts")
|
| 92 |
-
|
| 93 |
-
except Exception as e:
|
| 94 |
-
logger.error(f"Parallel knowledge building failed: {e}")
|
| 95 |
-
# Fallback to sequential if parallel fails
|
| 96 |
-
logger.info("Falling back to sequential knowledge building...")
|
| 97 |
-
for role, agent, label in [
|
| 98 |
-
("patient", self.patient, "Patient"),
|
| 99 |
-
("nurse", self.nurse, "Nurse"),
|
| 100 |
-
("senior_doctor", self.senior, "Senior Doctor"),
|
| 101 |
-
("family", self.family, "Family"),
|
| 102 |
-
("lab_tech", self.lab_tech, "Lab Tech"),
|
| 103 |
-
]:
|
| 104 |
-
try:
|
| 105 |
-
knowledge = knowledge_builder.build_knowledge(case_data, role)
|
| 106 |
-
agent.set_specialized_knowledge(knowledge)
|
| 107 |
-
logger.info(f"{label} agent specialized for case ({len(knowledge)} chars)")
|
| 108 |
-
except Exception as e:
|
| 109 |
-
logger.warning(f"{label} knowledge build failed: {e}")
|
| 110 |
-
|
| 111 |
-
def get_enriched_context(self) -> dict:
|
| 112 |
-
"""Build context dict enriched with current simulation state.
|
| 113 |
-
|
| 114 |
-
Includes current vitals and a shared ward transcript so every agent
|
| 115 |
-
knows what other agents have said — critical for coherent conversations.
|
| 116 |
-
"""
|
| 117 |
-
state_summary = self.state.get_state_summary()
|
| 118 |
-
current_vitals = self.state.current_vitals
|
| 119 |
-
|
| 120 |
-
# Build a shared ward transcript from recent messages (last 12)
|
| 121 |
-
# so each agent knows what other agents and the student have said
|
| 122 |
-
ward_transcript = ""
|
| 123 |
-
recent_msgs = self.message_history[-12:]
|
| 124 |
-
if recent_msgs:
|
| 125 |
-
lines = []
|
| 126 |
-
for m in recent_msgs:
|
| 127 |
-
speaker = m.get("display_name", m.get("agent_type", "Unknown"))
|
| 128 |
-
content = m.get("content", "")[:200] # Truncate long messages
|
| 129 |
-
lines.append(f" {speaker}: {content}")
|
| 130 |
-
ward_transcript = "\n".join(lines)
|
| 131 |
-
|
| 132 |
-
return {
|
| 133 |
-
"chief_complaint": self.case_data.get("chief_complaint", ""),
|
| 134 |
-
"specialty": self.case_data.get("specialty", ""),
|
| 135 |
-
"difficulty": self.case_data.get("difficulty", ""),
|
| 136 |
-
"simulation_state": state_summary,
|
| 137 |
-
"elapsed_minutes": self.state.elapsed_minutes,
|
| 138 |
-
"student_level": self.student_level,
|
| 139 |
-
# Current vitals (may differ from initial)
|
| 140 |
-
"current_bp": f"{current_vitals.get('bp_systolic', 120)}/{current_vitals.get('bp_diastolic', 80)}",
|
| 141 |
-
"current_hr": current_vitals.get("hr", 80),
|
| 142 |
-
"current_rr": current_vitals.get("rr", 16),
|
| 143 |
-
"current_temp": current_vitals.get("temp", 37.0),
|
| 144 |
-
"current_spo2": current_vitals.get("spo2", 98),
|
| 145 |
-
# Shared transcript so agents know what happened in the ward
|
| 146 |
-
"ward_transcript": ward_transcript,
|
| 147 |
-
}
|
| 148 |
-
|
| 149 |
-
def get_vitals(self) -> dict:
|
| 150 |
-
"""Return current vitals with trends and trajectory."""
|
| 151 |
-
vitals_display = self.state.get_vitals_display()
|
| 152 |
-
return {
|
| 153 |
-
"vitals": {
|
| 154 |
-
"bp": vitals_display["bp"],
|
| 155 |
-
"hr": vitals_display["hr"],
|
| 156 |
-
"rr": vitals_display["rr"],
|
| 157 |
-
"temp": vitals_display["temp"],
|
| 158 |
-
"spo2": vitals_display["spo2"],
|
| 159 |
-
},
|
| 160 |
-
"trends": vitals_display.get("trends", {}),
|
| 161 |
-
"trajectory": vitals_display["trajectory"],
|
| 162 |
-
"elapsed_minutes": vitals_display["elapsed_minutes"],
|
| 163 |
-
"urgency_level": self.nurse.urgency_level,
|
| 164 |
-
"patient_distress": self.patient.distress_level,
|
| 165 |
-
}
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
class AgentOrchestrator:
|
| 169 |
-
"""Coordinates all hospital agents for realistic multi-agent simulation."""
|
| 170 |
-
|
| 171 |
-
def __init__(self):
|
| 172 |
-
self.sessions: dict[str, AgentSession] = {}
|
| 173 |
-
|
| 174 |
-
def initialize_session(
|
| 175 |
-
self,
|
| 176 |
-
case_data: dict,
|
| 177 |
-
student_level: str = "intern",
|
| 178 |
-
hospital_setting: str = "medical_college",
|
| 179 |
-
) -> dict:
|
| 180 |
-
"""Create a new simulation session with all 5 agents.
|
| 181 |
-
|
| 182 |
-
Returns initial messages from all agents + simulation state.
|
| 183 |
-
"""
|
| 184 |
-
session_id = str(uuid.uuid4())[:8]
|
| 185 |
-
session = AgentSession(session_id, case_data, student_level)
|
| 186 |
-
self.sessions[session_id] = session
|
| 187 |
-
|
| 188 |
-
initial_messages = []
|
| 189 |
-
|
| 190 |
-
# 1. Nurse gives triage report (she sees the patient first)
|
| 191 |
-
nurse_report = session.nurse.get_initial_report()
|
| 192 |
-
initial_messages.append(nurse_report)
|
| 193 |
-
|
| 194 |
-
# 2. Patient presents their complaint
|
| 195 |
-
patient_greeting = session.patient.get_initial_greeting()
|
| 196 |
-
initial_messages.append(patient_greeting)
|
| 197 |
-
|
| 198 |
-
# 3. Family member provides context
|
| 199 |
-
family_context = session.family.get_initial_context()
|
| 200 |
-
initial_messages.append(family_context)
|
| 201 |
-
|
| 202 |
-
# 4. Senior doctor sets the teaching context
|
| 203 |
-
senior_guidance = session.senior.get_initial_guidance()
|
| 204 |
-
initial_messages.append(senior_guidance)
|
| 205 |
-
|
| 206 |
-
session.message_history.extend(initial_messages)
|
| 207 |
-
|
| 208 |
-
return {
|
| 209 |
-
"session_id": session_id,
|
| 210 |
-
"messages": initial_messages,
|
| 211 |
-
"vitals": session.get_vitals(),
|
| 212 |
-
"timeline": session.state.get_timeline(),
|
| 213 |
-
"investigations": session.state.get_investigation_status(),
|
| 214 |
-
}
|
| 215 |
-
|
| 216 |
-
def process_action(
|
| 217 |
-
self,
|
| 218 |
-
session_id: str,
|
| 219 |
-
action_type: str,
|
| 220 |
-
student_input: Optional[str] = None,
|
| 221 |
-
) -> dict:
|
| 222 |
-
"""Process a student action through the complete simulation pipeline.
|
| 223 |
-
|
| 224 |
-
Pipeline:
|
| 225 |
-
1. Validate action for clinical safety
|
| 226 |
-
2. Advance simulation clock
|
| 227 |
-
3. Route to appropriate agent(s)
|
| 228 |
-
4. Process treatment effects (if treatment)
|
| 229 |
-
5. Check for complications (complication engine)
|
| 230 |
-
6. Check for triggered events
|
| 231 |
-
7. Return responses + updated state
|
| 232 |
-
"""
|
| 233 |
-
session = self.sessions.get(session_id)
|
| 234 |
-
if not session:
|
| 235 |
-
return {"error": "Session not found", "messages": []}
|
| 236 |
-
|
| 237 |
-
messages = []
|
| 238 |
-
context = session.get_enriched_context()
|
| 239 |
-
|
| 240 |
-
# Step 1: Safety validation (for treatments and investigations)
|
| 241 |
-
if action_type in ("order_treatment", "order_investigation") and student_input:
|
| 242 |
-
validation = clinical_validator.validate_action(
|
| 243 |
-
student_action=student_input,
|
| 244 |
-
action_type=action_type,
|
| 245 |
-
case_data=session.case_data,
|
| 246 |
-
current_vitals=session.state.current_vitals,
|
| 247 |
-
existing_treatments=[
|
| 248 |
-
{"description": tx.description} for tx in session.state.treatments
|
| 249 |
-
],
|
| 250 |
-
)
|
| 251 |
-
|
| 252 |
-
if validation["safety_level"] == "dangerous":
|
| 253 |
-
if validation.get("nurse_intervention"):
|
| 254 |
-
messages.append({
|
| 255 |
-
"agent_type": "nurse",
|
| 256 |
-
"display_name": "Nurse Priya",
|
| 257 |
-
"content": validation["nurse_intervention"],
|
| 258 |
-
"urgency_level": "urgent",
|
| 259 |
-
"is_intervention": True,
|
| 260 |
-
})
|
| 261 |
-
if validation.get("senior_intervention"):
|
| 262 |
-
messages.append({
|
| 263 |
-
"agent_type": "senior_doctor",
|
| 264 |
-
"display_name": "Dr. Sharma",
|
| 265 |
-
"content": validation["senior_intervention"],
|
| 266 |
-
"is_intervention": True,
|
| 267 |
-
})
|
| 268 |
-
if validation.get("teaching_point"):
|
| 269 |
-
messages.append({
|
| 270 |
-
"agent_type": "senior_doctor",
|
| 271 |
-
"display_name": "Dr. Sharma",
|
| 272 |
-
"content": f"Teaching point: {validation['teaching_point']}",
|
| 273 |
-
"is_teaching": True,
|
| 274 |
-
})
|
| 275 |
-
if not validation.get("proceed", True):
|
| 276 |
-
self._store_messages(session, student_input, messages)
|
| 277 |
-
return self._build_response(session, messages)
|
| 278 |
-
|
| 279 |
-
elif validation["safety_level"] == "caution" and validation.get("nurse_intervention"):
|
| 280 |
-
messages.append({
|
| 281 |
-
"agent_type": "nurse",
|
| 282 |
-
"display_name": "Nurse Priya",
|
| 283 |
-
"content": validation["nurse_intervention"],
|
| 284 |
-
"urgency_level": "attention",
|
| 285 |
-
"is_intervention": True,
|
| 286 |
-
})
|
| 287 |
-
|
| 288 |
-
# Step 2: Advance simulation clock
|
| 289 |
-
triggered_events = session.state.advance_time(action_type)
|
| 290 |
-
|
| 291 |
-
# Step 3: Route action to agents
|
| 292 |
-
agent_responses = self._route_action(session, action_type, student_input, context)
|
| 293 |
-
messages.extend(agent_responses)
|
| 294 |
-
|
| 295 |
-
# Step 4: Process treatment (if treatment action)
|
| 296 |
-
if action_type == "order_treatment" and student_input:
|
| 297 |
-
treatment_msgs = self._process_treatment(session, student_input)
|
| 298 |
-
messages.extend(treatment_msgs)
|
| 299 |
-
|
| 300 |
-
# Step 5: Process investigation order
|
| 301 |
-
if action_type == "order_investigation" and student_input:
|
| 302 |
-
inv_messages = self._process_investigation(session, student_input)
|
| 303 |
-
messages.extend(inv_messages)
|
| 304 |
-
|
| 305 |
-
# Step 6: Check complication engine
|
| 306 |
-
complication_events = session.complication_engine.check_complications(
|
| 307 |
-
elapsed_minutes=session.state.elapsed_minutes,
|
| 308 |
-
current_vitals=session.state.current_vitals,
|
| 309 |
-
treatments=session.state.treatments,
|
| 310 |
-
investigations=session.state.investigations,
|
| 311 |
-
)
|
| 312 |
-
for event in complication_events:
|
| 313 |
-
if not event.delivered:
|
| 314 |
-
event.delivered = True
|
| 315 |
-
messages.append({
|
| 316 |
-
"agent_type": event.agent_type or "nurse",
|
| 317 |
-
"display_name": "Nurse Priya" if event.agent_type == "nurse" else "Dr. Sharma",
|
| 318 |
-
"content": event.description,
|
| 319 |
-
"event_type": event.event_type,
|
| 320 |
-
"is_event": True,
|
| 321 |
-
"urgency_level": "critical" if "critical" in event.event_type else "urgent",
|
| 322 |
-
})
|
| 323 |
-
|
| 324 |
-
# Step 7: Deliver triggered state events as agent messages
|
| 325 |
-
for event in triggered_events:
|
| 326 |
-
if not event.delivered:
|
| 327 |
-
event.delivered = True
|
| 328 |
-
messages.append({
|
| 329 |
-
"agent_type": event.agent_type or "nurse",
|
| 330 |
-
"display_name": "Nurse Priya" if event.agent_type == "nurse" else "Dr. Sharma",
|
| 331 |
-
"content": event.description,
|
| 332 |
-
"event_type": event.event_type,
|
| 333 |
-
"is_event": True,
|
| 334 |
-
})
|
| 335 |
-
|
| 336 |
-
# Store and return
|
| 337 |
-
self._store_messages(session, student_input, messages)
|
| 338 |
-
return self._build_response(session, messages)
|
| 339 |
-
|
| 340 |
-
def _route_action(
|
| 341 |
-
self,
|
| 342 |
-
session: AgentSession,
|
| 343 |
-
action_type: str,
|
| 344 |
-
student_input: Optional[str],
|
| 345 |
-
context: dict,
|
| 346 |
-
) -> list[dict]:
|
| 347 |
-
"""Route a student action to the appropriate agent(s)."""
|
| 348 |
-
messages = []
|
| 349 |
-
enriched_input = student_input or ""
|
| 350 |
-
|
| 351 |
-
# Use parallel processing for actions involving multiple agents
|
| 352 |
-
if action_type == "talk_to_patient":
|
| 353 |
-
# Prepare agents to process in parallel
|
| 354 |
-
agents_to_process = [(
|
| 355 |
-
session.patient,
|
| 356 |
-
enriched_input or "Tell me about your problem",
|
| 357 |
-
context,
|
| 358 |
-
)]
|
| 359 |
-
|
| 360 |
-
# Family may interject (50% chance on patient conversations)
|
| 361 |
-
if random.random() < 0.5:
|
| 362 |
-
agents_to_process.append((
|
| 363 |
-
session.family,
|
| 364 |
-
f"The doctor is asking the patient: {enriched_input}. You may add context or interject.",
|
| 365 |
-
context,
|
| 366 |
-
))
|
| 367 |
-
|
| 368 |
-
# Process in parallel if multiple agents
|
| 369 |
-
if len(agents_to_process) > 1:
|
| 370 |
-
messages = parallel_processor.process_agents_parallel(agents_to_process, max_workers=2)
|
| 371 |
-
else:
|
| 372 |
-
messages.append(session.patient.respond(agents_to_process[0][1], context))
|
| 373 |
-
|
| 374 |
-
elif action_type == "ask_nurse":
|
| 375 |
-
resp = session.nurse.respond(
|
| 376 |
-
enriched_input or "What are the current vitals?",
|
| 377 |
-
context,
|
| 378 |
-
)
|
| 379 |
-
messages.append(resp)
|
| 380 |
-
|
| 381 |
-
elif action_type == "consult_senior":
|
| 382 |
-
resp = session.senior.respond(
|
| 383 |
-
enriched_input or "What do you think about this case?",
|
| 384 |
-
context,
|
| 385 |
-
)
|
| 386 |
-
messages.append(resp)
|
| 387 |
-
|
| 388 |
-
elif action_type == "talk_to_family":
|
| 389 |
-
resp = session.family.respond(
|
| 390 |
-
enriched_input or "Can you tell me about the patient's background?",
|
| 391 |
-
context,
|
| 392 |
-
)
|
| 393 |
-
messages.append(resp)
|
| 394 |
-
|
| 395 |
-
elif action_type == "ask_lab":
|
| 396 |
-
resp = session.lab_tech.respond(
|
| 397 |
-
enriched_input or "What is the status of the investigations?",
|
| 398 |
-
context,
|
| 399 |
-
)
|
| 400 |
-
messages.append(resp)
|
| 401 |
-
|
| 402 |
-
elif action_type == "examine_patient":
|
| 403 |
-
# Parallel processing for patient and nurse during examination
|
| 404 |
-
agents_to_process = [
|
| 405 |
-
(
|
| 406 |
-
session.patient,
|
| 407 |
-
f"The doctor is examining you. {enriched_input or 'General examination.'}",
|
| 408 |
-
context,
|
| 409 |
-
),
|
| 410 |
-
(
|
| 411 |
-
session.nurse,
|
| 412 |
-
f"Assisting with examination. Student is examining: {enriched_input or 'general exam'}. Report relevant findings from the case.",
|
| 413 |
-
context,
|
| 414 |
-
),
|
| 415 |
-
]
|
| 416 |
-
|
| 417 |
-
messages = parallel_processor.process_agents_parallel(agents_to_process, max_workers=2)
|
| 418 |
-
|
| 419 |
-
exam_data = self._extract_examination_findings(session, enriched_input)
|
| 420 |
-
if exam_data:
|
| 421 |
-
messages.append({
|
| 422 |
-
"agent_type": "system",
|
| 423 |
-
"display_name": "Examination",
|
| 424 |
-
"content": "Examination findings available",
|
| 425 |
-
"examination_findings": exam_data,
|
| 426 |
-
"is_event": True,
|
| 427 |
-
"event_type": "examination",
|
| 428 |
-
})
|
| 429 |
-
|
| 430 |
-
elif action_type == "team_huddle":
|
| 431 |
-
# Team huddle involves multiple agents - process first 3 in parallel
|
| 432 |
-
agents_to_process = [
|
| 433 |
-
(
|
| 434 |
-
session.nurse,
|
| 435 |
-
f"Team huddle called. Report current patient status, pending investigations, and any concerns. Student's question: {enriched_input or 'Let us discuss the case.'}",
|
| 436 |
-
context,
|
| 437 |
-
),
|
| 438 |
-
(
|
| 439 |
-
session.patient,
|
| 440 |
-
"The doctors are discussing your case. Is there anything new you want to tell them?",
|
| 441 |
-
context,
|
| 442 |
-
),
|
| 443 |
-
(
|
| 444 |
-
session.family,
|
| 445 |
-
"The medical team is discussing your relative's case. Share any concerns.",
|
| 446 |
-
context,
|
| 447 |
-
),
|
| 448 |
-
]
|
| 449 |
-
|
| 450 |
-
# Process first 3 agents in parallel
|
| 451 |
-
parallel_messages = parallel_processor.process_agents_parallel(agents_to_process, max_workers=3)
|
| 452 |
-
messages.extend(parallel_messages)
|
| 453 |
-
|
| 454 |
-
# Senior doctor needs nurse's response, so process after
|
| 455 |
-
nurse_content = parallel_messages[0].get('content', '')[:200] if parallel_messages else ""
|
| 456 |
-
senior_resp = session.senior.respond(
|
| 457 |
-
f"Team huddle. Nurse has reported: {nurse_content}. "
|
| 458 |
-
f"Student asks: {enriched_input or 'What should we focus on?'}. "
|
| 459 |
-
"Guide the student based on current case progress.",
|
| 460 |
-
context,
|
| 461 |
-
)
|
| 462 |
-
messages.append(senior_resp)
|
| 463 |
-
|
| 464 |
-
elif action_type in ("order_treatment", "order_investigation"):
|
| 465 |
-
pass # Handled separately in process_action
|
| 466 |
-
|
| 467 |
-
else:
|
| 468 |
-
logger.warning(f"Unknown action_type received: {action_type}")
|
| 469 |
-
messages.append({
|
| 470 |
-
"agent_type": "system",
|
| 471 |
-
"display_name": "System",
|
| 472 |
-
"content": f"Unknown action: {action_type}. Please select a valid action.",
|
| 473 |
-
})
|
| 474 |
-
|
| 475 |
-
return messages
|
| 476 |
-
|
| 477 |
-
def _extract_examination_findings(self, session: AgentSession, exam_request: str) -> Optional[dict]:
|
| 478 |
-
"""Extract structured examination findings from case data for the exam modal."""
|
| 479 |
-
findings: dict = {}
|
| 480 |
-
|
| 481 |
-
for stage in session.case_data.get("stages", []):
|
| 482 |
-
if stage.get("stage") == "physical_exam":
|
| 483 |
-
exam_text = stage.get("info", "")
|
| 484 |
-
sections = {
|
| 485 |
-
"inspection": [],
|
| 486 |
-
"palpation": [],
|
| 487 |
-
"percussion": [],
|
| 488 |
-
"auscultation": [],
|
| 489 |
-
"special_tests": [],
|
| 490 |
-
}
|
| 491 |
-
|
| 492 |
-
current_section = "inspection"
|
| 493 |
-
for line in exam_text.split("\n"):
|
| 494 |
-
line = line.strip()
|
| 495 |
-
if not line:
|
| 496 |
-
continue
|
| 497 |
-
line_lower = line.lower()
|
| 498 |
-
if "inspection" in line_lower or "general" in line_lower or "look" in line_lower:
|
| 499 |
-
current_section = "inspection"
|
| 500 |
-
elif "palpat" in line_lower or "feel" in line_lower or "tender" in line_lower:
|
| 501 |
-
current_section = "palpation"
|
| 502 |
-
elif "percuss" in line_lower:
|
| 503 |
-
current_section = "percussion"
|
| 504 |
-
elif "auscult" in line_lower or "listen" in line_lower or "heart sound" in line_lower or "breath" in line_lower:
|
| 505 |
-
current_section = "auscultation"
|
| 506 |
-
elif "special" in line_lower or "test" in line_lower or "sign" in line_lower:
|
| 507 |
-
current_section = "special_tests"
|
| 508 |
-
sections[current_section].append(line)
|
| 509 |
-
|
| 510 |
-
for key, lines in sections.items():
|
| 511 |
-
if lines:
|
| 512 |
-
findings[key] = "\n".join(lines)
|
| 513 |
-
break
|
| 514 |
-
|
| 515 |
-
if not findings:
|
| 516 |
-
return None
|
| 517 |
-
|
| 518 |
-
specialty = session.case_data.get("specialty", "")
|
| 519 |
-
if specialty in ("cardiology", "respiratory"):
|
| 520 |
-
findings["sounds"] = [
|
| 521 |
-
{"label": "Heart sounds", "description": "Auscultation findings as described above"},
|
| 522 |
-
]
|
| 523 |
-
if specialty == "dermatology":
|
| 524 |
-
findings["images"] = [
|
| 525 |
-
{"label": "Skin findings", "description": "Visual examination findings as described above"},
|
| 526 |
-
]
|
| 527 |
-
|
| 528 |
-
return findings
|
| 529 |
-
|
| 530 |
-
def _process_treatment(self, session: AgentSession, treatment_description: str) -> list[dict]:
|
| 531 |
-
"""Process a treatment order through the treatment engine."""
|
| 532 |
-
messages = []
|
| 533 |
-
|
| 534 |
-
assessment = treatment_engine.assess_treatment(
|
| 535 |
-
treatment_description=treatment_description,
|
| 536 |
-
case_data=session.case_data,
|
| 537 |
-
current_vitals=session.state.current_vitals,
|
| 538 |
-
existing_treatments=[
|
| 539 |
-
{"description": tx.description} for tx in session.state.treatments
|
| 540 |
-
],
|
| 541 |
-
specialized_knowledge=session.nurse.specialized_knowledge,
|
| 542 |
-
)
|
| 543 |
-
|
| 544 |
-
session.state.record_treatment(
|
| 545 |
-
description=treatment_description,
|
| 546 |
-
effects=assessment.get("vital_effects", {}),
|
| 547 |
-
is_appropriate=assessment.get("is_appropriate", True),
|
| 548 |
-
safety_note=assessment.get("reasoning", ""),
|
| 549 |
-
)
|
| 550 |
-
|
| 551 |
-
nurse_msg = assessment.get("nurse_response", f"Starting {treatment_description} as ordered.")
|
| 552 |
-
messages.append({
|
| 553 |
-
"agent_type": "nurse",
|
| 554 |
-
"display_name": "Nurse Priya",
|
| 555 |
-
"content": nurse_msg,
|
| 556 |
-
"urgency_level": "routine",
|
| 557 |
-
})
|
| 558 |
-
|
| 559 |
-
monitoring = assessment.get("monitoring")
|
| 560 |
-
if monitoring and monitoring != "Continue routine monitoring.":
|
| 561 |
-
messages.append({
|
| 562 |
-
"agent_type": "nurse",
|
| 563 |
-
"display_name": "Nurse Priya",
|
| 564 |
-
"content": f"I'll monitor: {monitoring}",
|
| 565 |
-
"urgency_level": "attention",
|
| 566 |
-
})
|
| 567 |
-
|
| 568 |
-
return messages
|
| 569 |
-
|
| 570 |
-
def _process_investigation(self, session: AgentSession, investigation_description: str) -> list[dict]:
|
| 571 |
-
"""Process an investigation order."""
|
| 572 |
-
messages = []
|
| 573 |
-
|
| 574 |
-
inv_type = self._parse_investigation_type(investigation_description)
|
| 575 |
-
is_urgent = any(w in investigation_description.lower() for w in ["urgent", "stat", "emergency", "immediately"])
|
| 576 |
-
|
| 577 |
-
investigation = session.state.order_investigation(inv_type, is_urgent)
|
| 578 |
-
|
| 579 |
-
lab_resp = session.lab_tech.respond(
|
| 580 |
-
f"New investigation ordered: {investigation.label}. {'Mark as URGENT.' if is_urgent else 'Routine.'} Process this investigation.",
|
| 581 |
-
session.get_enriched_context(),
|
| 582 |
-
)
|
| 583 |
-
messages.append(lab_resp)
|
| 584 |
-
|
| 585 |
-
eta_text = f"{investigation.turnaround} minutes" if investigation.turnaround < 60 else f"{investigation.turnaround // 60} hours"
|
| 586 |
-
urgency_text = "URGENT — " if is_urgent else ""
|
| 587 |
-
|
| 588 |
-
messages.append({
|
| 589 |
-
"agent_type": "nurse",
|
| 590 |
-
"display_name": "Nurse Priya",
|
| 591 |
-
"content": (
|
| 592 |
-
f"Noted, doctor. {urgency_text}{investigation.label} ordered. "
|
| 593 |
-
f"Sample collection done. Expected turnaround: {eta_text}. "
|
| 594 |
-
f"I'll inform you as soon as results are ready."
|
| 595 |
-
),
|
| 596 |
-
"urgency_level": "routine",
|
| 597 |
-
})
|
| 598 |
-
|
| 599 |
-
return messages
|
| 600 |
-
|
| 601 |
-
def _parse_investigation_type(self, description: str) -> str:
|
| 602 |
-
"""Parse investigation type from free-text description."""
|
| 603 |
-
desc = description.lower().strip()
|
| 604 |
-
|
| 605 |
-
mappings = {
|
| 606 |
-
"cbc": "cbc", "complete blood count": "cbc", "blood count": "cbc", "hemogram": "cbc",
|
| 607 |
-
"rft": "rft", "renal function": "rft", "kidney function": "rft", "creatinine": "rft",
|
| 608 |
-
"lft": "lft", "liver function": "lft", "bilirubin": "lft", "sgpt": "lft",
|
| 609 |
-
"blood sugar": "blood_sugar", "rbs": "rbs", "fbs": "fbs",
|
| 610 |
-
"abg": "abg", "arterial blood gas": "abg", "blood gas": "abg",
|
| 611 |
-
"ecg": "ecg", "ekg": "ecg", "electrocardiogram": "ecg",
|
| 612 |
-
"chest x-ray": "xray_chest", "cxr": "xray_chest", "chest xray": "xray_chest",
|
| 613 |
-
"x-ray": "xray", "xray": "xray",
|
| 614 |
-
"ultrasound": "ultrasound", "usg": "ultrasound",
|
| 615 |
-
"echo": "echo", "echocardiography": "echo", "2d echo": "echo",
|
| 616 |
-
"ct scan": "ct_scan", "ct": "ct_scan",
|
| 617 |
-
"mri": "mri",
|
| 618 |
-
"troponin": "troponin", "d-dimer": "d_dimer", "d dimer": "d_dimer",
|
| 619 |
-
"blood culture": "blood_culture",
|
| 620 |
-
"urine routine": "urine_routine", "urine culture": "urine_culture",
|
| 621 |
-
"electrolytes": "serum_electrolytes", "sodium": "serum_electrolytes",
|
| 622 |
-
"coagulation": "coagulation", "pt inr": "pt_inr", "pt/inr": "pt_inr",
|
| 623 |
-
"thyroid": "thyroid", "tft": "thyroid", "tsh": "thyroid",
|
| 624 |
-
"hba1c": "hba1c", "amylase": "amylase", "lipase": "lipase",
|
| 625 |
-
"dengue": "dengue_ns1", "ns1": "dengue_ns1",
|
| 626 |
-
"malaria": "malaria_smear", "peripheral smear": "malaria_smear",
|
| 627 |
-
"widal": "widal", "hiv": "hiv", "hbsag": "hbsag",
|
| 628 |
-
"csf": "csf_analysis", "lumbar puncture": "csf_analysis",
|
| 629 |
-
"blood group": "blood_group", "crossmatch": "crossmatch",
|
| 630 |
-
"procalcitonin": "procalcitonin", "bnp": "bnp",
|
| 631 |
-
}
|
| 632 |
-
|
| 633 |
-
for keyword, inv_type in mappings.items():
|
| 634 |
-
if keyword in desc:
|
| 635 |
-
return inv_type
|
| 636 |
-
|
| 637 |
-
return desc.replace(" ", "_")[:30]
|
| 638 |
-
|
| 639 |
-
def _store_messages(self, session: AgentSession, student_input: Optional[str], messages: list[dict]):
|
| 640 |
-
"""Store messages in session history."""
|
| 641 |
-
if student_input:
|
| 642 |
-
session.message_history.append({
|
| 643 |
-
"agent_type": "student",
|
| 644 |
-
"display_name": "You",
|
| 645 |
-
"content": student_input,
|
| 646 |
-
})
|
| 647 |
-
session.message_history.extend(messages)
|
| 648 |
-
|
| 649 |
-
def _build_response(self, session: AgentSession, messages: list[dict]) -> dict:
|
| 650 |
-
"""Build the standard response payload."""
|
| 651 |
-
return {
|
| 652 |
-
"session_id": session.session_id,
|
| 653 |
-
"messages": messages,
|
| 654 |
-
"vitals": session.get_vitals(),
|
| 655 |
-
"timeline": session.state.get_timeline(),
|
| 656 |
-
"investigations": session.state.get_investigation_status(),
|
| 657 |
-
"complications_fired": session.complication_engine.get_fired_complications(),
|
| 658 |
-
}
|
| 659 |
-
|
| 660 |
-
def process_team_huddle(self, session_id: str, student_input: Optional[str] = None) -> dict:
|
| 661 |
-
"""Trigger a team huddle — all agents discuss the case."""
|
| 662 |
-
return self.process_action(session_id, "team_huddle", student_input)
|
| 663 |
-
|
| 664 |
-
def advance_time(self, session_id: str, minutes: int = 30) -> dict:
|
| 665 |
-
"""Explicitly advance simulation time (e.g., 'wait for results')."""
|
| 666 |
-
session = self.sessions.get(session_id)
|
| 667 |
-
if not session:
|
| 668 |
-
return {"error": "Session not found", "messages": []}
|
| 669 |
-
|
| 670 |
-
messages = []
|
| 671 |
-
|
| 672 |
-
session.state.elapsed_minutes += minutes
|
| 673 |
-
session.state._evolve_vitals(minutes)
|
| 674 |
-
|
| 675 |
-
events = session.state._check_investigations()
|
| 676 |
-
events.extend(session.state._check_patient_events())
|
| 677 |
-
|
| 678 |
-
complication_events = session.complication_engine.check_complications(
|
| 679 |
-
elapsed_minutes=session.state.elapsed_minutes,
|
| 680 |
-
current_vitals=session.state.current_vitals,
|
| 681 |
-
treatments=session.state.treatments,
|
| 682 |
-
investigations=session.state.investigations,
|
| 683 |
-
)
|
| 684 |
-
|
| 685 |
-
session.state.vitals_history.append({
|
| 686 |
-
"time": session.state.elapsed_minutes,
|
| 687 |
-
**session.state.current_vitals,
|
| 688 |
-
})
|
| 689 |
-
|
| 690 |
-
for event in events:
|
| 691 |
-
if not event.delivered:
|
| 692 |
-
event.delivered = True
|
| 693 |
-
messages.append({
|
| 694 |
-
"agent_type": event.agent_type or "nurse",
|
| 695 |
-
"display_name": "Nurse Priya" if event.agent_type == "nurse" else "Dr. Sharma",
|
| 696 |
-
"content": event.description,
|
| 697 |
-
"event_type": event.event_type,
|
| 698 |
-
"is_event": True,
|
| 699 |
-
})
|
| 700 |
-
|
| 701 |
-
for event in complication_events:
|
| 702 |
-
if not event.delivered:
|
| 703 |
-
event.delivered = True
|
| 704 |
-
messages.append({
|
| 705 |
-
"agent_type": event.agent_type or "nurse",
|
| 706 |
-
"display_name": "Nurse Priya" if event.agent_type == "nurse" else "Dr. Sharma",
|
| 707 |
-
"content": event.description,
|
| 708 |
-
"event_type": event.event_type,
|
| 709 |
-
"is_event": True,
|
| 710 |
-
"urgency_level": "critical" if "critical" in event.event_type else "urgent",
|
| 711 |
-
})
|
| 712 |
-
|
| 713 |
-
if not messages:
|
| 714 |
-
messages.append({
|
| 715 |
-
"agent_type": "nurse",
|
| 716 |
-
"display_name": "Nurse Priya",
|
| 717 |
-
"content": f"Doctor, {minutes} minutes have passed. Patient vitals are stable. No new developments.",
|
| 718 |
-
"urgency_level": "routine",
|
| 719 |
-
})
|
| 720 |
-
|
| 721 |
-
self._store_messages(session, None, messages)
|
| 722 |
-
return self._build_response(session, messages)
|
| 723 |
-
|
| 724 |
-
def get_session_vitals(self, session_id: str) -> Optional[dict]:
|
| 725 |
-
"""Get current vitals for a session."""
|
| 726 |
-
session = self.sessions.get(session_id)
|
| 727 |
-
if not session:
|
| 728 |
-
return None
|
| 729 |
-
return session.get_vitals()
|
| 730 |
-
|
| 731 |
-
def get_session(self, session_id: str) -> Optional[AgentSession]:
|
| 732 |
-
"""Get an agent session by ID."""
|
| 733 |
-
return self.sessions.get(session_id)
|
| 734 |
-
|
| 735 |
-
def get_investigation_status(self, session_id: str) -> Optional[list[dict]]:
|
| 736 |
-
"""Get investigation status for a session."""
|
| 737 |
-
session = self.sessions.get(session_id)
|
| 738 |
-
if not session:
|
| 739 |
-
return None
|
| 740 |
-
return session.state.get_investigation_status()
|
| 741 |
-
|
| 742 |
-
def get_timeline(self, session_id: str) -> Optional[list[dict]]:
|
| 743 |
-
"""Get simulation timeline for a session."""
|
| 744 |
-
session = self.sessions.get(session_id)
|
| 745 |
-
if not session:
|
| 746 |
-
return None
|
| 747 |
-
return session.state.get_timeline()
|
| 748 |
-
|
| 749 |
-
|
| 750 |
-
# Singleton orchestrator shared across the app
|
| 751 |
-
orchestrator = AgentOrchestrator()
|
| 752 |
-
|
| 753 |
-
|
| 754 |
-
class SimulationOrchestrator:
|
| 755 |
-
"""Simplified orchestrator for the /api/simulation endpoints.
|
| 756 |
-
|
| 757 |
-
Wraps case generation + simulation state management to provide
|
| 758 |
-
the interface expected by simulation.py (start_simulation,
|
| 759 |
-
process_student_message, complete_simulation, get_simulation).
|
| 760 |
-
"""
|
| 761 |
-
|
| 762 |
-
def __init__(self):
|
| 763 |
-
from app.core.rag.shared import case_generator
|
| 764 |
-
from app.models.simulation import (
|
| 765 |
-
SimulationState,
|
| 766 |
-
PatientProfile,
|
| 767 |
-
PatientGender,
|
| 768 |
-
EmotionalState,
|
| 769 |
-
RapportLevel,
|
| 770 |
-
SimulationMessage,
|
| 771 |
-
TutorFeedback,
|
| 772 |
-
FeedbackType,
|
| 773 |
-
)
|
| 774 |
-
|
| 775 |
-
self._case_generator = case_generator
|
| 776 |
-
self._simulations: dict[str, SimulationState] = {}
|
| 777 |
-
|
| 778 |
-
# Store model refs for use in methods
|
| 779 |
-
self._SimulationState = SimulationState
|
| 780 |
-
self._PatientProfile = PatientProfile
|
| 781 |
-
self._PatientGender = PatientGender
|
| 782 |
-
self._EmotionalState = EmotionalState
|
| 783 |
-
self._RapportLevel = RapportLevel
|
| 784 |
-
self._SimulationMessage = SimulationMessage
|
| 785 |
-
self._TutorFeedback = TutorFeedback
|
| 786 |
-
self._FeedbackType = FeedbackType
|
| 787 |
-
|
| 788 |
-
def start_simulation(self, specialty: str = "general_medicine", difficulty: str = "intermediate"):
|
| 789 |
-
"""Start a new patient simulation, returning a SimulationState."""
|
| 790 |
-
case = self._case_generator.generate_case(specialty=specialty, difficulty=difficulty)
|
| 791 |
-
case_id = case.get("id", str(uuid.uuid4())[:8])
|
| 792 |
-
|
| 793 |
-
# Map case data to PatientProfile
|
| 794 |
-
gender_raw = case.get("patient_gender", "male").lower()
|
| 795 |
-
gender_map = {"male": self._PatientGender.MALE, "female": self._PatientGender.FEMALE,
|
| 796 |
-
"pregnant": self._PatientGender.PREGNANT}
|
| 797 |
-
gender = gender_map.get(gender_raw, self._PatientGender.MALE)
|
| 798 |
-
|
| 799 |
-
profile = self._PatientProfile(
|
| 800 |
-
age=case.get("patient_age", 45),
|
| 801 |
-
gender=gender,
|
| 802 |
-
name=case.get("patient_name", "Patient"),
|
| 803 |
-
chief_complaint=case.get("chief_complaint", ""),
|
| 804 |
-
setting=case.get("setting", "OPD"),
|
| 805 |
-
specialty=specialty,
|
| 806 |
-
difficulty=difficulty,
|
| 807 |
-
actual_diagnosis=case.get("diagnosis", "Unknown"),
|
| 808 |
-
key_history_points=case.get("key_history", case.get("learning_points", [])),
|
| 809 |
-
physical_exam_findings=case.get("physical_exam", {}),
|
| 810 |
-
)
|
| 811 |
-
|
| 812 |
-
initial_message = self._SimulationMessage(
|
| 813 |
-
role="patient",
|
| 814 |
-
content=case.get("initial_presentation", f"Doctor, {profile.chief_complaint}"),
|
| 815 |
-
emotional_state=self._EmotionalState.CONCERNED,
|
| 816 |
-
)
|
| 817 |
-
|
| 818 |
-
sim = self._SimulationState(
|
| 819 |
-
case_id=case_id,
|
| 820 |
-
patient_profile=profile,
|
| 821 |
-
emotional_state=self._EmotionalState.CONCERNED,
|
| 822 |
-
rapport_level=self._RapportLevel.MODERATE,
|
| 823 |
-
messages=[initial_message],
|
| 824 |
-
)
|
| 825 |
-
|
| 826 |
-
self._simulations[case_id] = sim
|
| 827 |
-
return sim
|
| 828 |
-
|
| 829 |
-
def process_student_message(self, case_id: str, student_message: str):
|
| 830 |
-
"""Process a student message and return the updated SimulationState."""
|
| 831 |
-
sim = self._get_or_raise(case_id)
|
| 832 |
-
|
| 833 |
-
# Record student message
|
| 834 |
-
sim.messages.append(self._SimulationMessage(role="student", content=student_message))
|
| 835 |
-
|
| 836 |
-
# Generate patient response using the agent orchestrator if possible
|
| 837 |
-
patient_response = self._generate_patient_response(sim, student_message)
|
| 838 |
-
|
| 839 |
-
sim.messages.append(self._SimulationMessage(
|
| 840 |
-
role="patient",
|
| 841 |
-
content=patient_response,
|
| 842 |
-
emotional_state=sim.emotional_state,
|
| 843 |
-
))
|
| 844 |
-
|
| 845 |
-
# Generate tutor feedback
|
| 846 |
-
feedback_type, feedback_msg = self._evaluate_student_message(student_message)
|
| 847 |
-
sim.tutor_feedback.append(self._TutorFeedback(type=feedback_type, message=feedback_msg))
|
| 848 |
-
|
| 849 |
-
return sim
|
| 850 |
-
|
| 851 |
-
def complete_simulation(self, case_id: str, diagnosis: str, reasoning: str):
|
| 852 |
-
"""Mark simulation as complete with student's diagnosis."""
|
| 853 |
-
from datetime import datetime
|
| 854 |
-
|
| 855 |
-
sim = self._get_or_raise(case_id)
|
| 856 |
-
sim.student_diagnosis = diagnosis
|
| 857 |
-
sim.student_reasoning = reasoning
|
| 858 |
-
sim.completed_at = datetime.now()
|
| 859 |
-
return sim
|
| 860 |
-
|
| 861 |
-
def get_simulation(self, case_id: str):
|
| 862 |
-
"""Get simulation state by case_id."""
|
| 863 |
-
return self._get_or_raise(case_id)
|
| 864 |
-
|
| 865 |
-
def _get_or_raise(self, case_id: str):
|
| 866 |
-
sim = self._simulations.get(case_id)
|
| 867 |
-
if not sim:
|
| 868 |
-
raise ValueError(f"Simulation {case_id} not found")
|
| 869 |
-
return sim
|
| 870 |
-
|
| 871 |
-
def _generate_patient_response(self, sim, student_message: str) -> str:
|
| 872 |
-
"""Generate a contextual patient response."""
|
| 873 |
-
complaint = sim.patient_profile.chief_complaint
|
| 874 |
-
name = sim.patient_profile.name
|
| 875 |
-
|
| 876 |
-
open_ended_markers = ["tell me", "describe", "how", "what", "when", "where"]
|
| 877 |
-
is_open = any(m in student_message.lower() for m in open_ended_markers)
|
| 878 |
-
|
| 879 |
-
empathy_markers = ["understand", "worried", "difficult", "sorry", "must be"]
|
| 880 |
-
shows_empathy = any(m in student_message.lower() for m in empathy_markers)
|
| 881 |
-
|
| 882 |
-
if shows_empathy:
|
| 883 |
-
if sim.rapport_level.value < 5:
|
| 884 |
-
sim.rapport_level = self._RapportLevel(min(5, sim.rapport_level.value + 1))
|
| 885 |
-
sim.emotional_state = self._EmotionalState.CALM
|
| 886 |
-
return (
|
| 887 |
-
f"Thank you doctor, that makes me feel better. "
|
| 888 |
-
f"Actually, I also wanted to mention that the {complaint} has been getting worse at night."
|
| 889 |
-
)
|
| 890 |
-
|
| 891 |
-
if is_open:
|
| 892 |
-
return (
|
| 893 |
-
f"Doctor, the {complaint} started about 3-4 days ago. "
|
| 894 |
-
f"First I thought it was nothing, tried some home remedies. "
|
| 895 |
-
f"But it kept getting worse so my family brought me here."
|
| 896 |
-
)
|
| 897 |
-
|
| 898 |
-
return (
|
| 899 |
-
f"Yes doctor, the {complaint} is still bothering me. "
|
| 900 |
-
f"What do you think it could be?"
|
| 901 |
-
)
|
| 902 |
-
|
| 903 |
-
def _evaluate_student_message(self, message: str):
|
| 904 |
-
"""Simple heuristic evaluation of student communication."""
|
| 905 |
-
empathy_markers = ["understand", "worried", "difficult", "sorry", "must be", "concern"]
|
| 906 |
-
open_markers = ["tell me", "describe", "how do you", "what happened", "can you explain"]
|
| 907 |
-
|
| 908 |
-
if any(m in message.lower() for m in empathy_markers):
|
| 909 |
-
return self._FeedbackType.POSITIVE, "Good empathetic communication. This builds rapport."
|
| 910 |
-
if any(m in message.lower() for m in open_markers):
|
| 911 |
-
return self._FeedbackType.POSITIVE, "Nice open-ended question. This encourages the patient to share more."
|
| 912 |
-
if message.strip().endswith("?") and len(message.split()) > 5:
|
| 913 |
-
return self._FeedbackType.WARNING, "Consider using more open-ended questions to gather richer history."
|
| 914 |
-
return self._FeedbackType.WARNING, "Try to build rapport with empathetic language before diving into clinical questions."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/core/agents/patient.py
DELETED
|
@@ -1,167 +0,0 @@
|
|
| 1 |
-
"""Patient Agent - Generates realistic patient responses using Claude Opus."""
|
| 2 |
-
import logging
|
| 3 |
-
import os
|
| 4 |
-
from typing import Optional
|
| 5 |
-
|
| 6 |
-
import anthropic
|
| 7 |
-
|
| 8 |
-
from app.models.simulation import EmotionalState, PatientGender
|
| 9 |
-
|
| 10 |
-
logger = logging.getLogger(__name__)
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
PATIENT_SYSTEM_PROMPT = """You are a {age}yo {gender} patient in {setting}. You're feeling {emotional_state}.
|
| 14 |
-
|
| 15 |
-
CRITICAL RULES:
|
| 16 |
-
1. NEVER use medical jargon - you're not a doctor
|
| 17 |
-
2. Speak naturally using simple language
|
| 18 |
-
3. Mix Hindi-English words naturally if appropriate (e.g., "dard hai", "seene mein")
|
| 19 |
-
4. Show emotion in your responses
|
| 20 |
-
5. Your emotional state affects how you respond:
|
| 21 |
-
- CALM: Cooperative, detailed answers, trusting
|
| 22 |
-
- CONCERNED: A bit worried, needs reassurance, mostly cooperative
|
| 23 |
-
- ANXIOUS: Short answers, worried, needs calming down first
|
| 24 |
-
- DEFENSIVE: Resistant, minimal answers, feels judged or rushed
|
| 25 |
-
|
| 26 |
-
IMPORTANT BEHAVIOR RULES:
|
| 27 |
-
- If student is warm/empathetic → you become more CALM
|
| 28 |
-
- If student is cold/rushed/dismissive → you become more DEFENSIVE/ANXIOUS
|
| 29 |
-
- If student asks open-ended questions → you give more details
|
| 30 |
-
- If student just fires closed questions → you give minimal yes/no answers
|
| 31 |
-
- If student acknowledges your distress → you calm down
|
| 32 |
-
|
| 33 |
-
Your complaint: {chief_complaint}
|
| 34 |
-
|
| 35 |
-
Key information you know (only share if asked properly):
|
| 36 |
-
{key_history}
|
| 37 |
-
|
| 38 |
-
Physical symptoms you're experiencing:
|
| 39 |
-
{physical_symptoms}
|
| 40 |
-
|
| 41 |
-
Examples of realistic patient speech:
|
| 42 |
-
- Good: "Doctor, seene mein bahut dard ho raha hai, left side mein"
|
| 43 |
-
- Bad: "I have substernal chest pain radiating to the left arm"
|
| 44 |
-
|
| 45 |
-
- Good: "Haan doctor, mujhe diabetes hai, 5 saal se"
|
| 46 |
-
- Bad: "I have type 2 diabetes mellitus for 5 years"
|
| 47 |
-
|
| 48 |
-
Respond AS THE PATIENT. Stay in character."""
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
class PatientAgent:
|
| 52 |
-
"""Simulates a realistic patient using Claude Opus API."""
|
| 53 |
-
|
| 54 |
-
def __init__(self):
|
| 55 |
-
self.api_key = os.environ.get("ANTHROPIC_API_KEY")
|
| 56 |
-
self.client = None
|
| 57 |
-
if self.api_key and self.api_key != "sk-ant-your-key-here":
|
| 58 |
-
try:
|
| 59 |
-
self.client = anthropic.Anthropic(api_key=self.api_key)
|
| 60 |
-
except Exception as e:
|
| 61 |
-
logger.error(f"Failed to initialize Claude client for patient agent: {e}")
|
| 62 |
-
raise
|
| 63 |
-
|
| 64 |
-
def generate_response(
|
| 65 |
-
self,
|
| 66 |
-
student_message: str,
|
| 67 |
-
patient_profile: dict,
|
| 68 |
-
emotional_state: EmotionalState,
|
| 69 |
-
conversation_history: list,
|
| 70 |
-
) -> str:
|
| 71 |
-
"""Generate patient response based on student message and current state."""
|
| 72 |
-
|
| 73 |
-
if not self.client:
|
| 74 |
-
raise ValueError("Claude API client not initialized")
|
| 75 |
-
|
| 76 |
-
# Build patient context
|
| 77 |
-
system_prompt = PATIENT_SYSTEM_PROMPT.format(
|
| 78 |
-
age=patient_profile["age"],
|
| 79 |
-
gender=patient_profile["gender"],
|
| 80 |
-
setting=patient_profile["setting"],
|
| 81 |
-
emotional_state=emotional_state.value,
|
| 82 |
-
chief_complaint=patient_profile["chief_complaint"],
|
| 83 |
-
key_history="\n".join(f"- {item}" for item in patient_profile.get("key_history_points", [])),
|
| 84 |
-
physical_symptoms=patient_profile.get("physical_symptoms", "Describe as appropriate"),
|
| 85 |
-
)
|
| 86 |
-
|
| 87 |
-
# Build conversation history
|
| 88 |
-
messages = []
|
| 89 |
-
for msg in conversation_history:
|
| 90 |
-
messages.append({
|
| 91 |
-
"role": "user" if msg["role"] == "student" else "assistant",
|
| 92 |
-
"content": msg["content"],
|
| 93 |
-
})
|
| 94 |
-
|
| 95 |
-
# Add current student message
|
| 96 |
-
messages.append({
|
| 97 |
-
"role": "user",
|
| 98 |
-
"content": student_message,
|
| 99 |
-
})
|
| 100 |
-
|
| 101 |
-
try:
|
| 102 |
-
response = self.client.messages.create(
|
| 103 |
-
model="claude-opus-4-6",
|
| 104 |
-
max_tokens=400,
|
| 105 |
-
system=system_prompt,
|
| 106 |
-
messages=messages,
|
| 107 |
-
temperature=0.8, # Slightly higher for natural variation
|
| 108 |
-
)
|
| 109 |
-
return response.content[0].text.strip()
|
| 110 |
-
|
| 111 |
-
except Exception as e:
|
| 112 |
-
logger.error(f"Patient agent API error: {e}")
|
| 113 |
-
return self._fallback_response(emotional_state)
|
| 114 |
-
|
| 115 |
-
def generate_initial_greeting(
|
| 116 |
-
self,
|
| 117 |
-
patient_profile: dict,
|
| 118 |
-
emotional_state: EmotionalState,
|
| 119 |
-
) -> str:
|
| 120 |
-
"""Generate patient's first words when student enters."""
|
| 121 |
-
|
| 122 |
-
if not self.client:
|
| 123 |
-
return self._fallback_greeting(emotional_state)
|
| 124 |
-
|
| 125 |
-
system_prompt = f"""You are a {patient_profile['age']}yo {patient_profile['gender']} patient in {patient_profile['setting']}.
|
| 126 |
-
You're feeling {emotional_state.value} and just walked in.
|
| 127 |
-
|
| 128 |
-
Generate your FIRST words to the doctor. Keep it very short (1-2 sentences).
|
| 129 |
-
Use natural language, NO medical jargon.
|
| 130 |
-
Show your emotional state."""
|
| 131 |
-
|
| 132 |
-
try:
|
| 133 |
-
response = self.client.messages.create(
|
| 134 |
-
model="claude-opus-4-6",
|
| 135 |
-
max_tokens=150,
|
| 136 |
-
system=system_prompt,
|
| 137 |
-
messages=[{
|
| 138 |
-
"role": "user",
|
| 139 |
-
"content": f"Patient with {patient_profile['chief_complaint']} enters. What do you say first?",
|
| 140 |
-
}],
|
| 141 |
-
temperature=0.8,
|
| 142 |
-
)
|
| 143 |
-
return response.content[0].text.strip()
|
| 144 |
-
|
| 145 |
-
except Exception as e:
|
| 146 |
-
logger.error(f"Patient greeting generation error: {e}")
|
| 147 |
-
return self._fallback_greeting(emotional_state)
|
| 148 |
-
|
| 149 |
-
def _fallback_response(self, emotional_state: EmotionalState) -> str:
|
| 150 |
-
"""Fallback response if API fails."""
|
| 151 |
-
responses = {
|
| 152 |
-
EmotionalState.CALM: "Haan doctor, aap puchiye. Main bata dunga.",
|
| 153 |
-
EmotionalState.CONCERNED: "Doctor, kuch samajh nahi aa raha. Sab theek ho jayega na?",
|
| 154 |
-
EmotionalState.ANXIOUS: "Bahut dard ho raha hai doctor... bahut dard...",
|
| 155 |
-
EmotionalState.DEFENSIVE: "Maine pehle bhi bataya. Aur kya puchna hai?",
|
| 156 |
-
}
|
| 157 |
-
return responses.get(emotional_state, "Haan doctor?")
|
| 158 |
-
|
| 159 |
-
def _fallback_greeting(self, emotional_state: EmotionalState) -> str:
|
| 160 |
-
"""Fallback greeting if API fails."""
|
| 161 |
-
greetings = {
|
| 162 |
-
EmotionalState.CALM: "Namaste doctor. Main aaj theek nahi feel kar raha.",
|
| 163 |
-
EmotionalState.CONCERNED: "Doctor, bahut problem ho rahi hai...",
|
| 164 |
-
EmotionalState.ANXIOUS: "Doctor... doctor please help... bahut dard hai!",
|
| 165 |
-
EmotionalState.DEFENSIVE: "Kya hai doctor? Bahut busy lag rahe ho.",
|
| 166 |
-
}
|
| 167 |
-
return greetings.get(emotional_state, "Namaste doctor.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/core/agents/patient_agent.py
DELETED
|
@@ -1,188 +0,0 @@
|
|
| 1 |
-
"""Patient agent — speaks in Hindi/English mix with realistic distress levels."""
|
| 2 |
-
|
| 3 |
-
from app.core.agents.base_agent import BaseAgent
|
| 4 |
-
from app.core.agents.symptom_translator import get_patient_friendly_description
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
PATIENT_SYSTEM_PROMPT = """You are a patient in an Indian government hospital. You are being examined by a medical student (junior doctor).
|
| 8 |
-
|
| 9 |
-
CRITICAL RULES:
|
| 10 |
-
1. You speak in Hindi-English mix (Hinglish) naturally — like a real Indian patient would.
|
| 11 |
-
Examples: "Doctor sahab, mujhe bahut zyada dard ho raha hai chest mein", "Haan doctor, breathing mein problem hai"
|
| 12 |
-
2. You do NOT know medical terminology. Describe symptoms in simple, lay terms.
|
| 13 |
-
3. You have a specific distress level based on your condition severity.
|
| 14 |
-
4. You may be anxious, scared, in pain, or confused — act accordingly.
|
| 15 |
-
5. You can only share information you would realistically know (symptoms, history, lifestyle).
|
| 16 |
-
6. You do NOT know your own diagnosis. You are the patient, not the doctor.
|
| 17 |
-
7. If asked about something you don't know (like lab values), say "Yeh toh doctor aapko pata hoga"
|
| 18 |
-
8. Keep responses realistic — 1-3 sentences typically, more if telling your history.
|
| 19 |
-
9. NEVER reveal information beyond your case data. If asked something not in your history, say "Pata nahi doctor" or "Yaad nahi aa raha".
|
| 20 |
-
|
| 21 |
-
REALISTIC INDIAN PATIENT BEHAVIOR:
|
| 22 |
-
- You likely tried home remedies first: haldi doodh, Hajmola, Pudin Hara, local pharmacy ki dawai
|
| 23 |
-
- You may have gone to a local doctor/RMP (registered medical practitioner) who gave "goli" but it didn't work
|
| 24 |
-
- You came to the govt hospital because: private is expensive / local doctor referred / someone said "bade hospital jao"
|
| 25 |
-
- You may not remember medicine names: "ek chhoti goli thi", "injection lagi thi"
|
| 26 |
-
- Family brought you. They may have opinions: "Gharwale bol rahe the ki gas hai"
|
| 27 |
-
- You may not follow medical instructions easily: diet changes are hard, medicine timing is missed
|
| 28 |
-
- You may be worried about: cost ("kitna kharcha hoga?"), work ("chutti nahi milegi"), family ("bacche ghar pe akele hain")
|
| 29 |
-
- Religious/cultural: may mention "bhagwan ki kripa se theek ho jaunga", "mannat maangi hai"
|
| 30 |
-
|
| 31 |
-
PATIENT DETAILS:
|
| 32 |
-
- Age: {age}, Gender: {gender}, Location: {location}
|
| 33 |
-
- Chief complaint: {chief_complaint}
|
| 34 |
-
- Presentation: {presentation}
|
| 35 |
-
- History: {history}
|
| 36 |
-
- Distress level: {distress_level} (low=calm, moderate=worried, high=distressed/crying, critical=severe pain/panic)
|
| 37 |
-
|
| 38 |
-
DISTRESS BEHAVIOR:
|
| 39 |
-
- low: Calm, answers questions clearly. "Haan doctor, yeh problem 2 hafte se hai."
|
| 40 |
-
- moderate: Worried but cooperative. "Doctor, mujhe dar lag raha hai... kuch serious toh nahi?"
|
| 41 |
-
- high: In visible distress, may cry or groan. "Aaahhh... bahut dard ho raha hai doctor... please kuch karo!"
|
| 42 |
-
- critical: Severe pain/panic, short responses. "Doctor... saans... nahi aa rahi... please..."
|
| 43 |
-
|
| 44 |
-
Respond ONLY as the patient. Stay in character completely.
|
| 45 |
-
|
| 46 |
-
FORMATTING RULES:
|
| 47 |
-
- Do NOT use markdown formatting like ** or * in your responses
|
| 48 |
-
- Write in plain text only
|
| 49 |
-
- For actions or expressions, use plain text like: (dard se karaahte hue) or crying... instead of *dard se karaahte hue*"""
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
class PatientAgent(BaseAgent):
|
| 53 |
-
"""Patient agent that speaks in Hinglish with realistic distress."""
|
| 54 |
-
|
| 55 |
-
agent_type = "patient"
|
| 56 |
-
display_name = "Patient"
|
| 57 |
-
|
| 58 |
-
def __init__(self):
|
| 59 |
-
super().__init__()
|
| 60 |
-
self.distress_level = "moderate"
|
| 61 |
-
self.patient_info: dict = {}
|
| 62 |
-
|
| 63 |
-
def configure(self, case_data: dict):
|
| 64 |
-
"""Configure patient with case-specific data."""
|
| 65 |
-
self.patient_info = {
|
| 66 |
-
"age": case_data.get("patient", {}).get("age", 45),
|
| 67 |
-
"gender": case_data.get("patient", {}).get("gender", "Male"),
|
| 68 |
-
"location": case_data.get("patient", {}).get("location", "Delhi"),
|
| 69 |
-
"chief_complaint": case_data.get("chief_complaint", ""),
|
| 70 |
-
"presentation": case_data.get("initial_presentation", ""),
|
| 71 |
-
"history": "",
|
| 72 |
-
}
|
| 73 |
-
|
| 74 |
-
# Extract history from stages
|
| 75 |
-
for stage in case_data.get("stages", []):
|
| 76 |
-
if stage.get("stage") == "history":
|
| 77 |
-
self.patient_info["history"] = stage.get("info", "")
|
| 78 |
-
break
|
| 79 |
-
|
| 80 |
-
# Set distress based on vital signs and difficulty
|
| 81 |
-
self._set_distress_level(case_data)
|
| 82 |
-
|
| 83 |
-
def _set_distress_level(self, case_data: dict):
|
| 84 |
-
"""Determine distress level from vitals and difficulty."""
|
| 85 |
-
difficulty = case_data.get("difficulty", "intermediate")
|
| 86 |
-
vitals = case_data.get("vital_signs", {})
|
| 87 |
-
|
| 88 |
-
hr = vitals.get("hr", 80)
|
| 89 |
-
spo2 = vitals.get("spo2", 98)
|
| 90 |
-
rr = vitals.get("rr", 16)
|
| 91 |
-
|
| 92 |
-
if difficulty == "advanced" or spo2 < 90 or hr > 130 or rr > 30:
|
| 93 |
-
self.distress_level = "critical"
|
| 94 |
-
elif difficulty == "intermediate" or spo2 < 94 or hr > 110 or rr > 24:
|
| 95 |
-
self.distress_level = "high"
|
| 96 |
-
elif hr > 100 or rr > 20:
|
| 97 |
-
self.distress_level = "moderate"
|
| 98 |
-
else:
|
| 99 |
-
self.distress_level = "low"
|
| 100 |
-
|
| 101 |
-
def get_system_prompt(self, case_context: dict) -> str:
|
| 102 |
-
info = {**self.patient_info, **case_context}
|
| 103 |
-
info["distress_level"] = self.distress_level
|
| 104 |
-
base_prompt = PATIENT_SYSTEM_PROMPT.format(
|
| 105 |
-
age=info.get("age", 45),
|
| 106 |
-
gender=info.get("gender", "Male"),
|
| 107 |
-
location=info.get("location", "Delhi"),
|
| 108 |
-
chief_complaint=info.get("chief_complaint", "unknown"),
|
| 109 |
-
presentation=info.get("presentation", ""),
|
| 110 |
-
history=info.get("history", ""),
|
| 111 |
-
distress_level=self.distress_level,
|
| 112 |
-
)
|
| 113 |
-
|
| 114 |
-
if self.specialized_knowledge:
|
| 115 |
-
base_prompt += (
|
| 116 |
-
"\n\n=== YOUR CONDITION-SPECIFIC KNOWLEDGE ===\n"
|
| 117 |
-
"Use this medical knowledge to accurately portray your symptoms and experience. "
|
| 118 |
-
"Remember: you are a patient, so express this as feelings and experiences, NOT medical terms.\n\n"
|
| 119 |
-
f"{self.specialized_knowledge}"
|
| 120 |
-
)
|
| 121 |
-
|
| 122 |
-
# Extra guardrail: reinforce lay language
|
| 123 |
-
base_prompt += (
|
| 124 |
-
"\n\nCRITICAL REMINDER — LANGUAGE RULES:\n"
|
| 125 |
-
"- NEVER use medical terms like: tachycardia, dyspnea, edema, differential, hemoglobin, etc.\n"
|
| 126 |
-
"- Instead describe sensations: dil zor se dhadak raha hai, saans phool rahi hai, "
|
| 127 |
-
"pair sujan gaye hain, chakkar aa raha hai, etc.\n"
|
| 128 |
-
"- You are an ordinary Indian patient. Speak naturally in Hinglish.\n"
|
| 129 |
-
"- Each response should feel DIFFERENT — vary your words, expressions, and emotions.\n"
|
| 130 |
-
"- Do NOT repeat the same phrases from earlier in the conversation."
|
| 131 |
-
)
|
| 132 |
-
|
| 133 |
-
return base_prompt
|
| 134 |
-
|
| 135 |
-
def get_fallback_response(self, message: str, case_context: dict) -> str:
|
| 136 |
-
msg = message.lower()
|
| 137 |
-
|
| 138 |
-
if self.distress_level == "critical":
|
| 139 |
-
if any(w in msg for w in ["pain", "dard", "hurt"]):
|
| 140 |
-
return "Doctor... bahut... zyada dard... please kuch karo... saans nahi aa rahi..."
|
| 141 |
-
return "Doctor... please... jaldi..."
|
| 142 |
-
|
| 143 |
-
if self.distress_level == "high":
|
| 144 |
-
if any(w in msg for w in ["how long", "kab se", "when"]):
|
| 145 |
-
return "Doctor sahab, yeh 2-3 din se bahut zyada ho gaya hai... pehle thoda thoda hota tha, ab toh sehen nahi hota!"
|
| 146 |
-
if any(w in msg for w in ["pain", "dard", "hurt"]):
|
| 147 |
-
return "Haan doctor, bahut dard hai... yahan pe... aaahhh... please dawai de do!"
|
| 148 |
-
return "Doctor, mujhe bahut takleef ho rahi hai... kuch serious toh nahi na?"
|
| 149 |
-
|
| 150 |
-
if self.distress_level == "moderate":
|
| 151 |
-
if any(w in msg for w in ["history", "pehle", "before", "past"]):
|
| 152 |
-
return "Doctor, pehle aisa kabhi nahi hua tha. Bas 1-2 baar thoda sa hua tha lekin itna nahi tha."
|
| 153 |
-
if any(w in msg for w in ["medicine", "dawai", "medication"]):
|
| 154 |
-
return "Haan doctor, mein BP ki dawai leta hoon... naam yaad nahi aa raha... chhoti wali goli hai."
|
| 155 |
-
if any(w in msg for w in ["family", "gharwale", "parents"]):
|
| 156 |
-
return "Ji doctor, mere father ko bhi sugar tha... aur unko heart ka bhi problem tha."
|
| 157 |
-
return "Ji doctor, bataiye kya karna hai? Mujhe thoda dar lag raha hai."
|
| 158 |
-
|
| 159 |
-
# low distress
|
| 160 |
-
if any(w in msg for w in ["how", "kaise"]):
|
| 161 |
-
return "Doctor sahab, yeh problem thode dinon se hai. Pehle chalta tha lekin ab zyada ho gaya."
|
| 162 |
-
if any(w in msg for w in ["smoke", "drink", "sharab", "cigarette"]):
|
| 163 |
-
return "Nahi doctor, mein na pita hoon na cigarette peeta hoon. Bas kabhi kabhi chai peeta hoon."
|
| 164 |
-
return f"Ji doctor, main {self.patient_info.get('chief_complaint', 'problem').lower()} ki wajah se aaya hoon. Aap bataiye kya karna chahiye?"
|
| 165 |
-
|
| 166 |
-
def get_initial_greeting(self) -> dict:
|
| 167 |
-
"""Generate the patient's initial complaint on arrival."""
|
| 168 |
-
cc = self.patient_info.get("chief_complaint", "problem")
|
| 169 |
-
age = self.patient_info.get("age", 45)
|
| 170 |
-
gender = self.patient_info.get("gender", "Male")
|
| 171 |
-
|
| 172 |
-
# Get patient-friendly description of symptoms
|
| 173 |
-
lay_description = get_patient_friendly_description(cc, self.distress_level)
|
| 174 |
-
|
| 175 |
-
greetings = {
|
| 176 |
-
"critical": f"Doctor sahab... please... {lay_description}... saans nahi aa rahi...",
|
| 177 |
-
"high": f"Doctor sahab, namaste... mujhe bahut zyada problem ho rahi hai... {lay_description}... please jaldi check karo!",
|
| 178 |
-
"moderate": f"Namaste doctor sahab. Mein aapke paas aaya hoon kyunki mujhe {lay_description}. 2-3 din se ho raha hai, ab zyada ho gaya.",
|
| 179 |
-
"low": f"Namaste doctor sahab. Mujhe {lay_description}, isliye aaya hoon. Dekhiye na please.",
|
| 180 |
-
}
|
| 181 |
-
|
| 182 |
-
content = greetings.get(self.distress_level, greetings["moderate"])
|
| 183 |
-
return {
|
| 184 |
-
"agent_type": self.agent_type,
|
| 185 |
-
"display_name": self.display_name,
|
| 186 |
-
"content": content,
|
| 187 |
-
"distress_level": self.distress_level,
|
| 188 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/core/agents/response_optimizer.py
DELETED
|
@@ -1,218 +0,0 @@
|
|
| 1 |
-
"""Response optimization utilities for faster agent responses.
|
| 2 |
-
|
| 3 |
-
Key optimizations:
|
| 4 |
-
1. Parallel agent processing
|
| 5 |
-
2. Smart context filtering based on query type
|
| 6 |
-
3. Response caching for common queries
|
| 7 |
-
4. Conversation history compression
|
| 8 |
-
"""
|
| 9 |
-
|
| 10 |
-
import hashlib
|
| 11 |
-
import re
|
| 12 |
-
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 13 |
-
from typing import Optional, Any
|
| 14 |
-
import logging
|
| 15 |
-
import time
|
| 16 |
-
|
| 17 |
-
logger = logging.getLogger(__name__)
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
class ResponseCache:
|
| 21 |
-
"""Cache for agent responses to reduce API calls."""
|
| 22 |
-
|
| 23 |
-
def __init__(self, max_size: int = 100, ttl_seconds: int = 300):
|
| 24 |
-
self._cache: dict[str, tuple[Any, float]] = {}
|
| 25 |
-
self.max_size = max_size
|
| 26 |
-
self.ttl_seconds = ttl_seconds
|
| 27 |
-
|
| 28 |
-
def _make_key(self, agent_type: str, message: str, context: dict) -> str:
|
| 29 |
-
"""Create cache key from request parameters."""
|
| 30 |
-
# Normalize message for better cache hits
|
| 31 |
-
normalized_msg = message.lower().strip()
|
| 32 |
-
normalized_msg = re.sub(r'\s+', ' ', normalized_msg)
|
| 33 |
-
|
| 34 |
-
# Include multiple context dimensions so cache doesn't return stale responses:
|
| 35 |
-
# - elapsed_minutes: time-dependent responses
|
| 36 |
-
# - current vitals: responses should reflect latest vitals
|
| 37 |
-
# - ward_transcript hash: responses should reflect what others said
|
| 38 |
-
elapsed = context.get('elapsed_minutes', 0)
|
| 39 |
-
vitals_key = f"{context.get('current_hr', '')}-{context.get('current_spo2', '')}"
|
| 40 |
-
transcript_hash = hashlib.md5(
|
| 41 |
-
context.get('ward_transcript', '').encode()
|
| 42 |
-
).hexdigest()[:8]
|
| 43 |
-
context_key = f"{context.get('chief_complaint', '')}-{elapsed}-{vitals_key}-{transcript_hash}"
|
| 44 |
-
|
| 45 |
-
key_string = f"{agent_type}:{normalized_msg}:{context_key}"
|
| 46 |
-
return hashlib.md5(key_string.encode()).hexdigest()
|
| 47 |
-
|
| 48 |
-
def get(self, agent_type: str, message: str, context: dict) -> Optional[dict]:
|
| 49 |
-
"""Get cached response if available and not expired."""
|
| 50 |
-
key = self._make_key(agent_type, message, context)
|
| 51 |
-
|
| 52 |
-
if key in self._cache:
|
| 53 |
-
response, timestamp = self._cache[key]
|
| 54 |
-
if time.time() - timestamp < self.ttl_seconds:
|
| 55 |
-
logger.info(f"Cache hit for {agent_type} agent")
|
| 56 |
-
return response
|
| 57 |
-
else:
|
| 58 |
-
# Expired - remove from cache
|
| 59 |
-
del self._cache[key]
|
| 60 |
-
|
| 61 |
-
return None
|
| 62 |
-
|
| 63 |
-
def set(self, agent_type: str, message: str, context: dict, response: dict):
|
| 64 |
-
"""Cache a response."""
|
| 65 |
-
# Implement simple LRU by removing oldest if at capacity
|
| 66 |
-
if len(self._cache) >= self.max_size:
|
| 67 |
-
oldest_key = min(self._cache.keys(), key=lambda k: self._cache[k][1])
|
| 68 |
-
del self._cache[oldest_key]
|
| 69 |
-
|
| 70 |
-
key = self._make_key(agent_type, message, context)
|
| 71 |
-
self._cache[key] = (response, time.time())
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
class ContextFilter:
|
| 75 |
-
"""Smart context filtering to reduce API payload size."""
|
| 76 |
-
|
| 77 |
-
@staticmethod
|
| 78 |
-
def filter_knowledge_for_query(specialized_knowledge: str, message: str, agent_type: str) -> str:
|
| 79 |
-
"""Filter specialized knowledge to only relevant sections based on query type."""
|
| 80 |
-
|
| 81 |
-
if not specialized_knowledge:
|
| 82 |
-
return ""
|
| 83 |
-
|
| 84 |
-
msg_lower = message.lower()
|
| 85 |
-
|
| 86 |
-
# For simple/common queries, use minimal context
|
| 87 |
-
simple_queries = [
|
| 88 |
-
"vital", "bp", "heart rate", "temperature", "spo2", "oxygen",
|
| 89 |
-
"how are you", "kaise ho", "feeling", "better", "worse"
|
| 90 |
-
]
|
| 91 |
-
if any(q in msg_lower for q in simple_queries):
|
| 92 |
-
# Return only first 1000 chars for simple queries
|
| 93 |
-
return specialized_knowledge[:1000]
|
| 94 |
-
|
| 95 |
-
# For examination queries, focus on physical exam sections
|
| 96 |
-
if any(w in msg_lower for w in ["examine", "check", "look", "palpat", "auscult"]):
|
| 97 |
-
sections = specialized_knowledge.split("===")
|
| 98 |
-
relevant = [s for s in sections if any(
|
| 99 |
-
kw in s.lower() for kw in ["exam", "physical", "finding", "sign"]
|
| 100 |
-
)]
|
| 101 |
-
return "===".join(relevant[:2])[:3000] # Limit to 3000 chars
|
| 102 |
-
|
| 103 |
-
# For history queries, focus on history/timeline sections
|
| 104 |
-
if any(w in msg_lower for w in ["history", "when", "kab", "started", "began", "pehle"]):
|
| 105 |
-
sections = specialized_knowledge.split("===")
|
| 106 |
-
relevant = [s for s in sections if any(
|
| 107 |
-
kw in s.lower() for kw in ["history", "timeline", "background", "onset"]
|
| 108 |
-
)]
|
| 109 |
-
return "===".join(relevant[:2])[:3000]
|
| 110 |
-
|
| 111 |
-
# For treatment queries, focus on management sections
|
| 112 |
-
if any(w in msg_lower for w in ["treatment", "medicine", "dawai", "drug", "manage"]):
|
| 113 |
-
sections = specialized_knowledge.split("===")
|
| 114 |
-
relevant = [s for s in sections if any(
|
| 115 |
-
kw in s.lower() for kw in ["treatment", "management", "medication", "drug", "protocol"]
|
| 116 |
-
)]
|
| 117 |
-
return "===".join(relevant[:2])[:3000]
|
| 118 |
-
|
| 119 |
-
# For diagnosis/differential queries
|
| 120 |
-
if any(w in msg_lower for w in ["diagnosis", "differential", "what is", "cause", "why"]):
|
| 121 |
-
sections = specialized_knowledge.split("===")
|
| 122 |
-
relevant = [s for s in sections if any(
|
| 123 |
-
kw in s.lower() for kw in ["diagnosis", "differential", "pathophysiology", "cause"]
|
| 124 |
-
)]
|
| 125 |
-
return "===".join(relevant[:2])[:4000]
|
| 126 |
-
|
| 127 |
-
# For investigation queries
|
| 128 |
-
if any(w in msg_lower for w in ["test", "investigation", "lab", "result", "report"]):
|
| 129 |
-
sections = specialized_knowledge.split("===")
|
| 130 |
-
relevant = [s for s in sections if any(
|
| 131 |
-
kw in s.lower() for kw in ["investigation", "lab", "test", "result", "finding"]
|
| 132 |
-
)]
|
| 133 |
-
return "===".join(relevant[:2])[:3000]
|
| 134 |
-
|
| 135 |
-
# Default: return first 5000 chars for general queries
|
| 136 |
-
return specialized_knowledge[:5000]
|
| 137 |
-
|
| 138 |
-
@staticmethod
|
| 139 |
-
def compress_conversation_history(history: list[dict], max_messages: int = 10) -> list[dict]:
|
| 140 |
-
"""Keep only recent messages, summarizing older ones if needed."""
|
| 141 |
-
|
| 142 |
-
if len(history) <= max_messages:
|
| 143 |
-
return history
|
| 144 |
-
|
| 145 |
-
# Keep last N messages in full
|
| 146 |
-
recent = history[-max_messages:]
|
| 147 |
-
|
| 148 |
-
# Add a summary message for older messages if there are many
|
| 149 |
-
if len(history) > max_messages + 5:
|
| 150 |
-
older_count = len(history) - max_messages
|
| 151 |
-
summary_msg = {
|
| 152 |
-
"role": "user",
|
| 153 |
-
"content": f"[Context: {older_count} earlier messages omitted. Patient has been discussing symptoms and undergoing examination. Continue from here.]"
|
| 154 |
-
}
|
| 155 |
-
return [summary_msg] + recent
|
| 156 |
-
|
| 157 |
-
return recent
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
class ParallelAgentProcessor:
|
| 161 |
-
"""Process multiple agent responses in parallel."""
|
| 162 |
-
|
| 163 |
-
@staticmethod
|
| 164 |
-
def process_agents_parallel(agents_to_process: list[tuple], max_workers: int = 3) -> list[dict]:
|
| 165 |
-
"""Process multiple agents in parallel.
|
| 166 |
-
|
| 167 |
-
Args:
|
| 168 |
-
agents_to_process: List of tuples (agent, message, context)
|
| 169 |
-
max_workers: Maximum parallel workers (3 is optimal for API rate limits)
|
| 170 |
-
|
| 171 |
-
Returns:
|
| 172 |
-
List of agent responses
|
| 173 |
-
"""
|
| 174 |
-
|
| 175 |
-
if not agents_to_process:
|
| 176 |
-
return []
|
| 177 |
-
|
| 178 |
-
start_time = time.time()
|
| 179 |
-
responses = []
|
| 180 |
-
|
| 181 |
-
with ThreadPoolExecutor(max_workers=min(max_workers, len(agents_to_process))) as executor:
|
| 182 |
-
# Submit all tasks
|
| 183 |
-
future_to_agent = {
|
| 184 |
-
executor.submit(agent.respond, message, context): (agent, idx)
|
| 185 |
-
for idx, (agent, message, context) in enumerate(agents_to_process)
|
| 186 |
-
}
|
| 187 |
-
|
| 188 |
-
# Collect results as they complete
|
| 189 |
-
for future in as_completed(future_to_agent):
|
| 190 |
-
agent, idx = future_to_agent[future]
|
| 191 |
-
try:
|
| 192 |
-
response = future.result(timeout=10) # 10 second timeout per agent
|
| 193 |
-
responses.append((idx, response))
|
| 194 |
-
logger.info(f"Completed response from {agent.display_name}")
|
| 195 |
-
except Exception as e:
|
| 196 |
-
logger.error(f"Failed to get response from {agent.display_name}: {e}")
|
| 197 |
-
# Use fallback response
|
| 198 |
-
fallback = {
|
| 199 |
-
"agent_type": agent.agent_type,
|
| 200 |
-
"display_name": agent.display_name,
|
| 201 |
-
"content": agent.get_fallback_response("", {}),
|
| 202 |
-
}
|
| 203 |
-
responses.append((idx, fallback))
|
| 204 |
-
|
| 205 |
-
# Sort by original order
|
| 206 |
-
responses.sort(key=lambda x: x[0])
|
| 207 |
-
results = [r[1] for r in responses]
|
| 208 |
-
|
| 209 |
-
elapsed = time.time() - start_time
|
| 210 |
-
logger.info(f"Processed {len(results)} agents in parallel in {elapsed:.2f}s")
|
| 211 |
-
|
| 212 |
-
return results
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
# Singleton instances
|
| 216 |
-
response_cache = ResponseCache(max_size=200, ttl_seconds=120) # 2 minute TTL (reduced from 10 min to avoid stale responses)
|
| 217 |
-
context_filter = ContextFilter()
|
| 218 |
-
parallel_processor = ParallelAgentProcessor()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/core/agents/senior_agent.py
DELETED
|
@@ -1,187 +0,0 @@
|
|
| 1 |
-
"""Senior doctor agent — Socratic teaching mentor who guides without giving answers."""
|
| 2 |
-
|
| 3 |
-
from app.core.agents.base_agent import BaseAgent
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
SENIOR_SYSTEM_PROMPT = """You are a senior consultant doctor (professor) in an Indian medical college teaching hospital. You are supervising a final-year MBBS student who is handling a case.
|
| 7 |
-
|
| 8 |
-
CRITICAL RULES:
|
| 9 |
-
1. Use the SOCRATIC METHOD — ask probing questions, never give the answer directly.
|
| 10 |
-
2. Guide the student's clinical reasoning through structured questioning.
|
| 11 |
-
3. You are supportive but academically rigorous.
|
| 12 |
-
4. You know the correct diagnosis but must NOT reveal it unless the student has already diagnosed correctly.
|
| 13 |
-
5. If the student is on the wrong track, gently redirect with questions.
|
| 14 |
-
6. If the student is stuck, provide progressive hints (never the answer).
|
| 15 |
-
7. Keep responses concise — 2-4 sentences with 1-2 Socratic questions.
|
| 16 |
-
8. You speak in professional English with occasional Hindi phrases natural in Indian hospitals.
|
| 17 |
-
|
| 18 |
-
ACCURACY RULES — CRITICAL FOR A TEACHER:
|
| 19 |
-
- ONLY reference guidelines you are CERTAIN exist (ICMR, API, CSI, INASL, ISCCM, NVBDCP, NACO).
|
| 20 |
-
- Do NOT invent guidelines. If unsure, say "standard teaching hospital practice" instead.
|
| 21 |
-
- Drug doses: ONLY state if you are certain. Otherwise say "check the formulary."
|
| 22 |
-
- Statistics: ONLY cite if from your specialized knowledge. Otherwise say "India has a significant burden."
|
| 23 |
-
- If the student asks something beyond your knowledge, say "Let's look that up — good question."
|
| 24 |
-
- NEVER confidently state something you're unsure about. Wrong teaching is worse than no teaching.
|
| 25 |
-
- Reference standard textbooks: Harrison's, Robbins, Park's PSM, OP Ghai, DC Dutta — these are reliable.
|
| 26 |
-
- For NEET-PG patterns, only cite well-known classic associations (e.g., "apple-green birefringence = amyloid").
|
| 27 |
-
|
| 28 |
-
INDIAN TEACHING HOSPITAL CONTEXT:
|
| 29 |
-
- This is a medical college hospital — you have residents, interns, postings.
|
| 30 |
-
- Teaching happens on ward rounds, not in classrooms.
|
| 31 |
-
- The student is expected to present the case, form a differential, propose investigations.
|
| 32 |
-
- Standard approach: History → Examination → Investigations → Differential → Diagnosis → Management.
|
| 33 |
-
- You know the practical constraints: limited imaging, delayed reports, resource sharing.
|
| 34 |
-
|
| 35 |
-
CASE DETAILS:
|
| 36 |
-
- Patient: {age}y {gender}, {chief_complaint}
|
| 37 |
-
- Specialty: {specialty}
|
| 38 |
-
- Difficulty: {difficulty}
|
| 39 |
-
- Correct diagnosis: {diagnosis}
|
| 40 |
-
- Key differentials: {differentials}
|
| 41 |
-
- Critical learning points: {learning_points}
|
| 42 |
-
|
| 43 |
-
TEACHING APPROACH:
|
| 44 |
-
1. If student hasn't started: "Let's approach this systematically. What are your initial impressions?"
|
| 45 |
-
2. If student has a hypothesis: Challenge it. "Good thinking, but what else could present similarly?"
|
| 46 |
-
3. If student is stuck: Hint progressively. "Think about the vital signs... what pattern do you see?"
|
| 47 |
-
4. If student is close: Encourage. "You're on the right track. Now, what investigation would confirm?"
|
| 48 |
-
5. If student is wrong: Redirect gently. "That's a reasonable thought, but consider — would that explain ALL the findings?"
|
| 49 |
-
6. After diagnosis: Teach the deeper lesson. "Excellent. Now for the exam — what's the pathophysiology here?"
|
| 50 |
-
|
| 51 |
-
Respond ONLY as the senior doctor. Be a great teacher — accurate above all else.
|
| 52 |
-
|
| 53 |
-
FORMATTING RULES:
|
| 54 |
-
- Do NOT use markdown formatting like ** or * in your responses
|
| 55 |
-
- Write in plain text only
|
| 56 |
-
- For actions or expressions, use plain text like: (doing something) instead of *doing something*"""
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
class SeniorDoctorAgent(BaseAgent):
|
| 60 |
-
"""Senior doctor agent that teaches using Socratic method."""
|
| 61 |
-
|
| 62 |
-
agent_type = "senior_doctor"
|
| 63 |
-
display_name = "Dr. Sharma"
|
| 64 |
-
|
| 65 |
-
def __init__(self):
|
| 66 |
-
super().__init__()
|
| 67 |
-
self.case_info: dict = {}
|
| 68 |
-
self.hints_given = 0
|
| 69 |
-
self.student_on_track = False
|
| 70 |
-
|
| 71 |
-
def configure(self, case_data: dict):
|
| 72 |
-
"""Configure senior doctor with full case knowledge."""
|
| 73 |
-
self.case_info = {
|
| 74 |
-
"age": case_data.get("patient", {}).get("age", 45),
|
| 75 |
-
"gender": case_data.get("patient", {}).get("gender", "Male"),
|
| 76 |
-
"chief_complaint": case_data.get("chief_complaint", ""),
|
| 77 |
-
"specialty": case_data.get("specialty", ""),
|
| 78 |
-
"difficulty": case_data.get("difficulty", "intermediate"),
|
| 79 |
-
"diagnosis": case_data.get("diagnosis", ""),
|
| 80 |
-
"differentials": ", ".join(case_data.get("differentials", [])[:5]),
|
| 81 |
-
"learning_points": "; ".join(case_data.get("learning_points", [])[:3]),
|
| 82 |
-
}
|
| 83 |
-
self.hints_given = 0
|
| 84 |
-
self.student_on_track = False
|
| 85 |
-
|
| 86 |
-
def get_system_prompt(self, case_context: dict) -> str:
|
| 87 |
-
info = {**self.case_info, **case_context}
|
| 88 |
-
base_prompt = SENIOR_SYSTEM_PROMPT.format(
|
| 89 |
-
age=info.get("age", 45),
|
| 90 |
-
gender=info.get("gender", "Male"),
|
| 91 |
-
chief_complaint=info.get("chief_complaint", "unknown"),
|
| 92 |
-
specialty=info.get("specialty", "general"),
|
| 93 |
-
difficulty=info.get("difficulty", "intermediate"),
|
| 94 |
-
diagnosis=info.get("diagnosis", "unknown"),
|
| 95 |
-
differentials=info.get("differentials", ""),
|
| 96 |
-
learning_points=info.get("learning_points", ""),
|
| 97 |
-
)
|
| 98 |
-
|
| 99 |
-
if self.specialized_knowledge:
|
| 100 |
-
base_prompt += (
|
| 101 |
-
"\n\n=== YOUR DIAGNOSTIC & TEACHING EXPERTISE ===\n"
|
| 102 |
-
"Use this specialized knowledge for accurate Socratic teaching. "
|
| 103 |
-
"This contains the diagnostic algorithm, Indian guidelines, NEET-PG patterns, "
|
| 104 |
-
"and differential reasoning specific to this case.\n\n"
|
| 105 |
-
f"{self.specialized_knowledge}"
|
| 106 |
-
)
|
| 107 |
-
|
| 108 |
-
return base_prompt
|
| 109 |
-
|
| 110 |
-
def get_fallback_response(self, message: str, case_context: dict) -> str:
|
| 111 |
-
msg = message.lower()
|
| 112 |
-
self.hints_given += 1
|
| 113 |
-
|
| 114 |
-
# Check if student mentions the correct diagnosis
|
| 115 |
-
diagnosis = self.case_info.get("diagnosis", "").lower()
|
| 116 |
-
if diagnosis and any(
|
| 117 |
-
word in msg for word in diagnosis.split() if len(word) > 3
|
| 118 |
-
):
|
| 119 |
-
self.student_on_track = True
|
| 120 |
-
return (
|
| 121 |
-
"Excellent clinical reasoning! You've identified the key diagnosis. "
|
| 122 |
-
"Now tell me — what is the pathophysiological mechanism here? "
|
| 123 |
-
"And what would be your first-line management according to current guidelines?"
|
| 124 |
-
)
|
| 125 |
-
|
| 126 |
-
if self.hints_given <= 1:
|
| 127 |
-
return (
|
| 128 |
-
"Let's think about this systematically. "
|
| 129 |
-
f"You have a {self.case_info.get('age', 45)}-year-old presenting with "
|
| 130 |
-
f"{self.case_info.get('chief_complaint', 'these symptoms')}. "
|
| 131 |
-
"What are the most dangerous diagnoses you need to rule out first? "
|
| 132 |
-
"Start with your differential — what's at the top of your list?"
|
| 133 |
-
)
|
| 134 |
-
|
| 135 |
-
if self.hints_given == 2:
|
| 136 |
-
return (
|
| 137 |
-
"Good effort. Now look at the vital signs carefully — do you see a pattern? "
|
| 138 |
-
f"This is a {self.case_info.get('specialty', 'clinical')} case. "
|
| 139 |
-
"What investigation would help you narrow down your differential? "
|
| 140 |
-
"Remember — systematic approach is key for NEET-PG as well."
|
| 141 |
-
)
|
| 142 |
-
|
| 143 |
-
if self.hints_given == 3:
|
| 144 |
-
specialty = self.case_info.get("specialty", "")
|
| 145 |
-
return (
|
| 146 |
-
f"Let me give you a hint — think about the classic {specialty} presentations. "
|
| 147 |
-
f"The key differentials here would include: {self.case_info.get('differentials', 'several possibilities')}. "
|
| 148 |
-
"Which of these fits best with ALL the findings — history, examination, and investigations?"
|
| 149 |
-
)
|
| 150 |
-
|
| 151 |
-
# Progressive hints after 3
|
| 152 |
-
return (
|
| 153 |
-
"You're working hard on this, which is good. Let me narrow it down — "
|
| 154 |
-
"focus on the ONE finding that is most specific. "
|
| 155 |
-
"What single investigation or sign points you toward the diagnosis? "
|
| 156 |
-
"Think about what makes this case different from the usual presentation."
|
| 157 |
-
)
|
| 158 |
-
|
| 159 |
-
def get_initial_guidance(self) -> dict:
|
| 160 |
-
"""Generate senior doctor's initial teaching prompt."""
|
| 161 |
-
difficulty = self.case_info.get("difficulty", "intermediate")
|
| 162 |
-
specialty = self.case_info.get("specialty", "clinical")
|
| 163 |
-
|
| 164 |
-
if difficulty == "advanced":
|
| 165 |
-
content = (
|
| 166 |
-
f"Interesting {specialty} case we have here. "
|
| 167 |
-
"This one will test your clinical reasoning — the presentation may not be straightforward. "
|
| 168 |
-
"Start by taking a thorough history from the patient. What would you ask first, and why?"
|
| 169 |
-
)
|
| 170 |
-
elif difficulty == "beginner":
|
| 171 |
-
content = (
|
| 172 |
-
f"Good, let's work through this {specialty} case together. "
|
| 173 |
-
"Start from the basics — look at the patient's presentation and vital signs. "
|
| 174 |
-
"What's your initial assessment? Don't worry about getting it perfect, just think aloud."
|
| 175 |
-
)
|
| 176 |
-
else:
|
| 177 |
-
content = (
|
| 178 |
-
f"Alright, we have a {specialty} case. "
|
| 179 |
-
"I want you to approach this like you would in your exam — systematically. "
|
| 180 |
-
"Start with the patient's presenting complaint and vitals. What catches your attention?"
|
| 181 |
-
)
|
| 182 |
-
|
| 183 |
-
return {
|
| 184 |
-
"agent_type": self.agent_type,
|
| 185 |
-
"display_name": self.display_name,
|
| 186 |
-
"content": content,
|
| 187 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/core/agents/symptom_translator.py
DELETED
|
@@ -1,213 +0,0 @@
|
|
| 1 |
-
"""Translates medical terminology to patient-friendly Hinglish lay terms."""
|
| 2 |
-
|
| 3 |
-
def translate_to_lay_terms(chief_complaint: str) -> dict:
|
| 4 |
-
"""Convert medical chief complaint to patient-friendly descriptions.
|
| 5 |
-
|
| 6 |
-
Returns dict with:
|
| 7 |
-
- patient_description: What the patient would say
|
| 8 |
-
- family_description: What the family would say
|
| 9 |
-
- simple_terms: Basic lay description
|
| 10 |
-
"""
|
| 11 |
-
cc = chief_complaint.lower()
|
| 12 |
-
|
| 13 |
-
# Common symptom translations
|
| 14 |
-
translations = {
|
| 15 |
-
# GI symptoms
|
| 16 |
-
"bloody diarrhea": {
|
| 17 |
-
"patient": "potty mein khoon aa raha hai, bahut baar jaana padta hai",
|
| 18 |
-
"family": "inko din mein 10-12 baar loose motion ho raha hai, khoon bhi aata hai",
|
| 19 |
-
"simple": "khoon wali loose motion"
|
| 20 |
-
},
|
| 21 |
-
"painless bloody diarrhea": {
|
| 22 |
-
"patient": "potty mein khoon aata hai, dard nahi hota par bahut baar jaana padta hai",
|
| 23 |
-
"family": "khoon wali loose motion ho rahi hai, dard toh nahi batate",
|
| 24 |
-
"simple": "bina dard ke khoon wali potty"
|
| 25 |
-
},
|
| 26 |
-
"hematemesis": {
|
| 27 |
-
"patient": "ulti mein khoon aa raha hai",
|
| 28 |
-
"family": "khoon ki ulti hui hai, bahut dar lag raha hai",
|
| 29 |
-
"simple": "khoon ki ulti"
|
| 30 |
-
},
|
| 31 |
-
"melena": {
|
| 32 |
-
"patient": "potty ekdum kaali ho gayi hai, tar jaisi",
|
| 33 |
-
"family": "inki potty kaali hai, doctor ne kaha khoon hai",
|
| 34 |
-
"simple": "kaali potty"
|
| 35 |
-
},
|
| 36 |
-
"dysphagia": {
|
| 37 |
-
"patient": "khana nigalne mein dikkat ho rahi hai, gale mein atak jaata hai",
|
| 38 |
-
"family": "khana nahi kha pa rahe, gale mein fas jaata hai",
|
| 39 |
-
"simple": "nigalne mein dikkat"
|
| 40 |
-
},
|
| 41 |
-
|
| 42 |
-
# Cardiac symptoms
|
| 43 |
-
"chest pain": {
|
| 44 |
-
"patient": "chhati mein dard ho raha hai",
|
| 45 |
-
"family": "seene mein dard ki shikayat kar rahe hain",
|
| 46 |
-
"simple": "seene mein dard"
|
| 47 |
-
},
|
| 48 |
-
"palpitations": {
|
| 49 |
-
"patient": "dil bahut tez tez dhadak raha hai, kabhi kabhi chhoot bhi jaata hai",
|
| 50 |
-
"family": "dil ki dhakdhak ki shikayat hai, ghabrahat hoti hai",
|
| 51 |
-
"simple": "dil ki tez dhakdhak"
|
| 52 |
-
},
|
| 53 |
-
"dyspnea": {
|
| 54 |
-
"patient": "saans phool rahi hai, saans lene mein dikkat hai",
|
| 55 |
-
"family": "saans nahi aa rahi inko theek se",
|
| 56 |
-
"simple": "saans ki takleef"
|
| 57 |
-
},
|
| 58 |
-
"orthopnea": {
|
| 59 |
-
"patient": "lete hue saans nahi aa paati, baith kar sona padta hai",
|
| 60 |
-
"family": "raat ko baith kar sote hain, lete hue saans phoolti hai",
|
| 61 |
-
"simple": "lete hue saans phoolna"
|
| 62 |
-
},
|
| 63 |
-
"syncope": {
|
| 64 |
-
"patient": "chakkar aakar behosh ho gaya tha",
|
| 65 |
-
"family": "achanak behosh ho gaye the, gir gaye the",
|
| 66 |
-
"simple": "behoshi"
|
| 67 |
-
},
|
| 68 |
-
|
| 69 |
-
# Respiratory symptoms
|
| 70 |
-
"cough with expectoration": {
|
| 71 |
-
"patient": "khansi ho rahi hai, balgam bhi aata hai",
|
| 72 |
-
"family": "bahut khansi hai, kaf bhi nikalta hai",
|
| 73 |
-
"simple": "balgam wali khansi"
|
| 74 |
-
},
|
| 75 |
-
"hemoptysis": {
|
| 76 |
-
"patient": "khansi mein khoon aa raha hai",
|
| 77 |
-
"family": "khoon ki khansi ho rahi hai",
|
| 78 |
-
"simple": "khoon wali khansi"
|
| 79 |
-
},
|
| 80 |
-
"wheezing": {
|
| 81 |
-
"patient": "saans lete waqt seeti ki awaaz aati hai",
|
| 82 |
-
"family": "saans mein awaaz aa rahi hai",
|
| 83 |
-
"simple": "saans mein seeti"
|
| 84 |
-
},
|
| 85 |
-
|
| 86 |
-
# Neuro symptoms
|
| 87 |
-
"headache": {
|
| 88 |
-
"patient": "sar mein bahut dard hai",
|
| 89 |
-
"family": "sar dard ki shikayat kar rahe hain",
|
| 90 |
-
"simple": "sar dard"
|
| 91 |
-
},
|
| 92 |
-
"seizures": {
|
| 93 |
-
"patient": "mirgi ka daura pada tha, haath pair akad gaye the",
|
| 94 |
-
"family": "jhatke aaye the, behosh ho gaye the",
|
| 95 |
-
"simple": "daura/mirgi"
|
| 96 |
-
},
|
| 97 |
-
"weakness": {
|
| 98 |
-
"patient": "kamzori bahut hai, chalne mein dikkat hai",
|
| 99 |
-
"family": "bahut kamzor ho gaye hain",
|
| 100 |
-
"simple": "kamzori"
|
| 101 |
-
},
|
| 102 |
-
"hemiparesis": {
|
| 103 |
-
"patient": "ek taraf ka haath pair nahi chal raha",
|
| 104 |
-
"family": "right/left side kamzor hai",
|
| 105 |
-
"simple": "aadhe badan ki kamzori"
|
| 106 |
-
},
|
| 107 |
-
|
| 108 |
-
# General symptoms
|
| 109 |
-
"fever": {
|
| 110 |
-
"patient": "bukhar hai, thand lag rahi hai",
|
| 111 |
-
"family": "bukhar hai 3 din se",
|
| 112 |
-
"simple": "bukhar"
|
| 113 |
-
},
|
| 114 |
-
"weight loss": {
|
| 115 |
-
"patient": "wazan bahut kam ho gaya hai",
|
| 116 |
-
"family": "bahut duble ho gaye hain",
|
| 117 |
-
"simple": "wazan ghatna"
|
| 118 |
-
},
|
| 119 |
-
"loss of appetite": {
|
| 120 |
-
"patient": "bhookh nahi lagti, kuch khane ka mann nahi karta",
|
| 121 |
-
"family": "khana bilkul nahi khate",
|
| 122 |
-
"simple": "bhookh na lagna"
|
| 123 |
-
},
|
| 124 |
-
"fatigue": {
|
| 125 |
-
"patient": "thakawat bahut rehti hai, kaam karne ka mann nahi karta",
|
| 126 |
-
"family": "hamesha thake thake rehte hain",
|
| 127 |
-
"simple": "thakaan"
|
| 128 |
-
},
|
| 129 |
-
"jaundice": {
|
| 130 |
-
"patient": "aankhen peeli ho gayi hain, peshaab bhi peela hai",
|
| 131 |
-
"family": "peeliya ho gaya hai, aankhen peeli hain",
|
| 132 |
-
"simple": "peeliya"
|
| 133 |
-
},
|
| 134 |
-
"edema": {
|
| 135 |
-
"patient": "pair suj gaye hain, joote tight ho gaye",
|
| 136 |
-
"family": "haath pair mein sujan hai",
|
| 137 |
-
"simple": "sujan"
|
| 138 |
-
},
|
| 139 |
-
"ascites": {
|
| 140 |
-
"patient": "pet phool gaya hai, paani bhar gaya hai",
|
| 141 |
-
"family": "pet mein paani bhar gaya hai",
|
| 142 |
-
"simple": "pet mein paani"
|
| 143 |
-
}
|
| 144 |
-
}
|
| 145 |
-
|
| 146 |
-
# Check for exact matches first
|
| 147 |
-
for medical_term, lay_terms in translations.items():
|
| 148 |
-
if medical_term in cc:
|
| 149 |
-
return lay_terms
|
| 150 |
-
|
| 151 |
-
# Check for partial matches
|
| 152 |
-
for medical_term, lay_terms in translations.items():
|
| 153 |
-
for word in medical_term.split():
|
| 154 |
-
if word in cc and len(word) > 4: # Avoid short words like "of", "with"
|
| 155 |
-
return lay_terms
|
| 156 |
-
|
| 157 |
-
# Default fallback - extract key symptoms
|
| 158 |
-
if "pain" in cc or "ache" in cc:
|
| 159 |
-
if "chest" in cc:
|
| 160 |
-
return translations["chest pain"]
|
| 161 |
-
elif "head" in cc:
|
| 162 |
-
return translations["headache"]
|
| 163 |
-
else:
|
| 164 |
-
return {
|
| 165 |
-
"patient": "bahut dard ho raha hai",
|
| 166 |
-
"family": "dard ki shikayat kar rahe hain",
|
| 167 |
-
"simple": "dard"
|
| 168 |
-
}
|
| 169 |
-
|
| 170 |
-
if "bleeding" in cc or "blood" in cc:
|
| 171 |
-
return {
|
| 172 |
-
"patient": "khoon aa raha hai",
|
| 173 |
-
"family": "khoon aane ki problem hai",
|
| 174 |
-
"simple": "khoon aana"
|
| 175 |
-
}
|
| 176 |
-
|
| 177 |
-
if "vomit" in cc:
|
| 178 |
-
return {
|
| 179 |
-
"patient": "ulti ho rahi hai",
|
| 180 |
-
"family": "baar baar ulti kar rahe hain",
|
| 181 |
-
"simple": "ulti"
|
| 182 |
-
}
|
| 183 |
-
|
| 184 |
-
if "breath" in cc or "dyspn" in cc or "short" in cc:
|
| 185 |
-
return translations["dyspnea"]
|
| 186 |
-
|
| 187 |
-
if "swelling" in cc or "swell" in cc:
|
| 188 |
-
return translations["edema"]
|
| 189 |
-
|
| 190 |
-
# Generic fallback
|
| 191 |
-
return {
|
| 192 |
-
"patient": "tabiyat kharab hai, theek nahi lag raha",
|
| 193 |
-
"family": "tabiyat bigad gayi hai",
|
| 194 |
-
"simple": "bimaar"
|
| 195 |
-
}
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
def get_patient_friendly_description(chief_complaint: str, distress_level: str = "moderate") -> str:
|
| 199 |
-
"""Get patient's description based on distress level."""
|
| 200 |
-
lay_terms = translate_to_lay_terms(chief_complaint)
|
| 201 |
-
|
| 202 |
-
if distress_level == "critical":
|
| 203 |
-
return f"{lay_terms['simple']}... bahut... zyada..."
|
| 204 |
-
elif distress_level == "high":
|
| 205 |
-
return f"{lay_terms['patient']}, bahut takleef hai"
|
| 206 |
-
else:
|
| 207 |
-
return lay_terms['patient']
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
def get_family_friendly_description(chief_complaint: str, duration: str = "kuch din") -> str:
|
| 211 |
-
"""Get family member's description of the problem."""
|
| 212 |
-
lay_terms = translate_to_lay_terms(chief_complaint)
|
| 213 |
-
return f"{lay_terms['family']}, {duration} se pareshan hain"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/core/agents/treatment_engine.py
DELETED
|
@@ -1,232 +0,0 @@
|
|
| 1 |
-
"""Treatment engine — models drug effects, contraindications, and patient outcomes.
|
| 2 |
-
|
| 3 |
-
Uses Claude Opus to dynamically assess treatment appropriateness and predict effects.
|
| 4 |
-
No hardcoded drug databases — the AI reasons about each treatment in context.
|
| 5 |
-
"""
|
| 6 |
-
|
| 7 |
-
import logging
|
| 8 |
-
import os
|
| 9 |
-
from typing import Optional
|
| 10 |
-
|
| 11 |
-
import anthropic
|
| 12 |
-
|
| 13 |
-
logger = logging.getLogger(__name__)
|
| 14 |
-
|
| 15 |
-
# Treatment categories and their typical vitals effects (used as guidance for Claude)
|
| 16 |
-
TREATMENT_GUIDANCE = """
|
| 17 |
-
TREATMENT ASSESSMENT FRAMEWORK:
|
| 18 |
-
|
| 19 |
-
You are a clinical pharmacologist assessing a treatment order in an Indian government hospital.
|
| 20 |
-
|
| 21 |
-
AVAILABLE RESOURCES (Indian Govt Hospital):
|
| 22 |
-
- NLEM drugs (National List of Essential Medicines) are available
|
| 23 |
-
- Non-NLEM drugs: may need special indent, delay
|
| 24 |
-
- IV fluids: NS 0.9%, RL, DNS (Dextrose Normal Saline), D5W, D10W
|
| 25 |
-
- Blood products: available after crossmatch (30-60 min)
|
| 26 |
-
- Emergency drugs: Adrenaline, Atropine, Dopamine, Dobutamine, Furosemide, Hydrocortisone
|
| 27 |
-
- Antibiotics: Commonly available — Amoxicillin, Ceftriaxone, Metronidazole, Azithromycin, Ciprofloxacin, Doxycycline, Gentamicin
|
| 28 |
-
- Analgesics: Paracetamol, Diclofenac, Tramadol; Morphine (needs controlled drug register)
|
| 29 |
-
- Antihypertensives: Amlodipine, Atenolol, Enalapril, Losartan, Nifedipine
|
| 30 |
-
- Antidiabetics: Metformin, Glimepiride, Insulin (Regular, NPH)
|
| 31 |
-
- Anticoagulants: Heparin, Warfarin, Enoxaparin
|
| 32 |
-
- Bronchodilators: Salbutamol nebulization, Ipratropium, Aminophylline
|
| 33 |
-
- Steroids: Prednisolone, Hydrocortisone, Dexamethasone, Methylprednisolone
|
| 34 |
-
- Antimalarials: Artesunate, ACT, Chloroquine, Primaquine
|
| 35 |
-
|
| 36 |
-
ASSESSMENT CRITERIA:
|
| 37 |
-
1. Is this treatment APPROPRIATE for the patient's condition?
|
| 38 |
-
2. Are there CONTRAINDICATIONS based on the patient's history/vitals?
|
| 39 |
-
3. Is the DOSE reasonable (if specified)?
|
| 40 |
-
4. Is the drug AVAILABLE in a govt hospital?
|
| 41 |
-
5. What VITAL SIGN EFFECTS would this treatment have?
|
| 42 |
-
|
| 43 |
-
RESPOND IN THIS EXACT JSON FORMAT:
|
| 44 |
-
{
|
| 45 |
-
"is_appropriate": true/false,
|
| 46 |
-
"safety_level": "safe" | "caution" | "dangerous",
|
| 47 |
-
"reasoning": "Brief clinical reasoning for the assessment",
|
| 48 |
-
"availability": "available" | "special_indent" | "referral_needed",
|
| 49 |
-
"vital_effects": {
|
| 50 |
-
"hr_change": 0,
|
| 51 |
-
"bp_systolic_change": 0,
|
| 52 |
-
"spo2_change": 0,
|
| 53 |
-
"rr_change": 0,
|
| 54 |
-
"temp_change": 0.0
|
| 55 |
-
},
|
| 56 |
-
"nurse_response": "What the nurse would say when given this order",
|
| 57 |
-
"monitoring": "What to monitor after this treatment",
|
| 58 |
-
"alternative": "If inappropriate, suggest what should be given instead (or null)"
|
| 59 |
-
}
|
| 60 |
-
|
| 61 |
-
IMPORTANT:
|
| 62 |
-
- vital_effects values are CHANGES (positive = increase, negative = decrease)
|
| 63 |
-
- Be realistic — IV fluids raise BP by 10-15mmHg, not 50
|
| 64 |
-
- Antipyretics reduce temp by 0.5-1.0°C, not instantly normalize
|
| 65 |
-
- O2 supplementation raises SpO2 by 3-8% depending on delivery method
|
| 66 |
-
- Beta blockers reduce HR by 10-20 bpm
|
| 67 |
-
- If treatment is dangerous, still estimate effects but flag the danger
|
| 68 |
-
"""
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
class TreatmentEngine:
|
| 72 |
-
"""Assesses and models treatment effects using Claude Opus.
|
| 73 |
-
|
| 74 |
-
This is NOT a static drug database — it uses Claude's medical knowledge
|
| 75 |
-
to reason about each treatment in the specific clinical context.
|
| 76 |
-
"""
|
| 77 |
-
|
| 78 |
-
def __init__(self):
|
| 79 |
-
self.api_key = os.environ.get("ANTHROPIC_API_KEY")
|
| 80 |
-
self.client: Optional[anthropic.Anthropic] = None
|
| 81 |
-
if self.api_key and self.api_key != "sk-ant-your-key-here":
|
| 82 |
-
try:
|
| 83 |
-
self.client = anthropic.Anthropic(api_key=self.api_key)
|
| 84 |
-
except Exception as e:
|
| 85 |
-
logger.warning(f"TreatmentEngine Claude init failed: {e}")
|
| 86 |
-
|
| 87 |
-
def assess_treatment(
|
| 88 |
-
self,
|
| 89 |
-
treatment_description: str,
|
| 90 |
-
case_data: dict,
|
| 91 |
-
current_vitals: dict,
|
| 92 |
-
existing_treatments: list[dict],
|
| 93 |
-
specialized_knowledge: str = "",
|
| 94 |
-
) -> dict:
|
| 95 |
-
"""Assess a treatment order for appropriateness and predict effects.
|
| 96 |
-
|
| 97 |
-
Args:
|
| 98 |
-
treatment_description: What the student ordered (e.g., "Start IV NS 1L stat")
|
| 99 |
-
case_data: Full case data dict
|
| 100 |
-
current_vitals: Current vital signs
|
| 101 |
-
existing_treatments: Previously ordered treatments
|
| 102 |
-
specialized_knowledge: Agent's dynamic knowledge for this case
|
| 103 |
-
|
| 104 |
-
Returns:
|
| 105 |
-
Assessment dict with safety_level, vital_effects, nurse_response, etc.
|
| 106 |
-
"""
|
| 107 |
-
if not self.client:
|
| 108 |
-
return self._fallback_assessment(treatment_description, current_vitals)
|
| 109 |
-
|
| 110 |
-
# Build the assessment prompt
|
| 111 |
-
vitals = current_vitals
|
| 112 |
-
prompt = f"""{TREATMENT_GUIDANCE}
|
| 113 |
-
|
| 114 |
-
PATIENT CONTEXT:
|
| 115 |
-
- Diagnosis: {case_data.get('diagnosis', 'Under evaluation')}
|
| 116 |
-
- Age/Gender: {case_data.get('patient', {}).get('age', 'Unknown')}y {case_data.get('patient', {}).get('gender', 'Unknown')}
|
| 117 |
-
- Chief complaint: {case_data.get('chief_complaint', '')}
|
| 118 |
-
- Current vitals: BP {vitals.get('bp_systolic', 120)}/{vitals.get('bp_diastolic', 80)}, HR {vitals.get('hr', 80)}, RR {vitals.get('rr', 16)}, Temp {vitals.get('temp', 37.0)}°C, SpO2 {vitals.get('spo2', 98)}%
|
| 119 |
-
- Existing treatments: {'; '.join(tx.get('description', '') for tx in existing_treatments) or 'None yet'}
|
| 120 |
-
- Difficulty: {case_data.get('difficulty', 'intermediate')}
|
| 121 |
-
|
| 122 |
-
CASE-SPECIFIC KNOWLEDGE:
|
| 123 |
-
{specialized_knowledge[:2000] if specialized_knowledge else 'No additional context available.'}
|
| 124 |
-
|
| 125 |
-
STUDENT'S TREATMENT ORDER:
|
| 126 |
-
"{treatment_description}"
|
| 127 |
-
|
| 128 |
-
Assess this treatment order. Respond ONLY with the JSON object."""
|
| 129 |
-
|
| 130 |
-
try:
|
| 131 |
-
response = self.client.messages.create(
|
| 132 |
-
model="claude-opus-4-6",
|
| 133 |
-
max_tokens=2000,
|
| 134 |
-
temperature=1,
|
| 135 |
-
thinking={
|
| 136 |
-
"type": "adaptive",
|
| 137 |
-
},
|
| 138 |
-
messages=[{"role": "user", "content": prompt}],
|
| 139 |
-
)
|
| 140 |
-
|
| 141 |
-
content = ""
|
| 142 |
-
for block in response.content:
|
| 143 |
-
if block.type == "text":
|
| 144 |
-
content = block.text.strip()
|
| 145 |
-
|
| 146 |
-
if content:
|
| 147 |
-
return self._parse_assessment(content, treatment_description)
|
| 148 |
-
|
| 149 |
-
except Exception as e:
|
| 150 |
-
logger.error(f"TreatmentEngine assessment error: {e}")
|
| 151 |
-
|
| 152 |
-
return self._fallback_assessment(treatment_description, current_vitals)
|
| 153 |
-
|
| 154 |
-
def _parse_assessment(self, response_text: str, treatment_description: str) -> dict:
|
| 155 |
-
"""Parse Claude's JSON response into a structured assessment."""
|
| 156 |
-
import json
|
| 157 |
-
|
| 158 |
-
# Try to extract JSON from the response
|
| 159 |
-
try:
|
| 160 |
-
# Handle case where response has markdown code blocks
|
| 161 |
-
text = response_text
|
| 162 |
-
if "```json" in text:
|
| 163 |
-
text = text.split("```json")[1].split("```")[0]
|
| 164 |
-
elif "```" in text:
|
| 165 |
-
text = text.split("```")[1].split("```")[0]
|
| 166 |
-
|
| 167 |
-
result = json.loads(text.strip())
|
| 168 |
-
|
| 169 |
-
# Ensure required fields
|
| 170 |
-
return {
|
| 171 |
-
"is_appropriate": result.get("is_appropriate", True),
|
| 172 |
-
"safety_level": result.get("safety_level", "safe"),
|
| 173 |
-
"reasoning": result.get("reasoning", "Assessment completed."),
|
| 174 |
-
"availability": result.get("availability", "available"),
|
| 175 |
-
"vital_effects": result.get("vital_effects", {}),
|
| 176 |
-
"nurse_response": result.get("nurse_response", f"Noted, doctor. Starting {treatment_description}."),
|
| 177 |
-
"monitoring": result.get("monitoring", "Continue routine monitoring."),
|
| 178 |
-
"alternative": result.get("alternative"),
|
| 179 |
-
"treatment_description": treatment_description,
|
| 180 |
-
}
|
| 181 |
-
except (json.JSONDecodeError, IndexError, KeyError) as e:
|
| 182 |
-
logger.warning(f"Failed to parse treatment assessment JSON: {e}")
|
| 183 |
-
return self._fallback_assessment(treatment_description, {})
|
| 184 |
-
|
| 185 |
-
def _fallback_assessment(self, treatment_description: str, current_vitals: dict) -> dict:
|
| 186 |
-
"""Fallback when Claude is unavailable — conservative assessment."""
|
| 187 |
-
desc_lower = treatment_description.lower()
|
| 188 |
-
|
| 189 |
-
# Basic pattern matching for common treatments
|
| 190 |
-
effects = {}
|
| 191 |
-
safety = "safe"
|
| 192 |
-
nurse_msg = f"Noted, doctor. Starting {treatment_description}."
|
| 193 |
-
|
| 194 |
-
if any(w in desc_lower for w in ["iv fluid", "ns ", "normal saline", "rl ", "ringer"]):
|
| 195 |
-
effects = {"bp_systolic_change": 10, "hr_change": -5}
|
| 196 |
-
nurse_msg = "Starting IV fluids as ordered. I'll monitor the drip rate."
|
| 197 |
-
|
| 198 |
-
elif any(w in desc_lower for w in ["oxygen", "o2", "nasal cannula", "mask"]):
|
| 199 |
-
effects = {"spo2_change": 5, "rr_change": -2}
|
| 200 |
-
nurse_msg = "Starting O2 supplementation. I'll monitor SpO2 closely."
|
| 201 |
-
|
| 202 |
-
elif any(w in desc_lower for w in ["paracetamol", "pcm", "antipyretic"]):
|
| 203 |
-
effects = {"temp_change": -0.5}
|
| 204 |
-
nurse_msg = "Giving paracetamol as ordered. I'll recheck temperature in 30 minutes."
|
| 205 |
-
|
| 206 |
-
elif any(w in desc_lower for w in ["nebulization", "nebuliser", "salbutamol"]):
|
| 207 |
-
effects = {"spo2_change": 3, "rr_change": -3, "hr_change": 5}
|
| 208 |
-
nurse_msg = "Setting up nebulization now. I'll monitor the patient during the procedure."
|
| 209 |
-
|
| 210 |
-
elif any(w in desc_lower for w in ["antibiotic", "ceftriaxone", "amoxicillin"]):
|
| 211 |
-
effects = {} # Antibiotics don't have immediate vital effects
|
| 212 |
-
nurse_msg = "Noted. I'll prepare the antibiotic and do a test dose first as per protocol."
|
| 213 |
-
|
| 214 |
-
else:
|
| 215 |
-
safety = "caution"
|
| 216 |
-
nurse_msg = f"Doctor, just confirming — you want to start {treatment_description}? I'll prepare it right away."
|
| 217 |
-
|
| 218 |
-
return {
|
| 219 |
-
"is_appropriate": True,
|
| 220 |
-
"safety_level": safety,
|
| 221 |
-
"reasoning": "Assessment based on standard protocols (Claude API unavailable for detailed assessment).",
|
| 222 |
-
"availability": "available",
|
| 223 |
-
"vital_effects": effects,
|
| 224 |
-
"nurse_response": nurse_msg,
|
| 225 |
-
"monitoring": "Continue monitoring vitals post-treatment.",
|
| 226 |
-
"alternative": None,
|
| 227 |
-
"treatment_description": treatment_description,
|
| 228 |
-
}
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
# Singleton
|
| 232 |
-
treatment_engine = TreatmentEngine()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/core/agents/tutor.py
DELETED
|
@@ -1,104 +0,0 @@
|
|
| 1 |
-
import logging
|
| 2 |
-
import os
|
| 3 |
-
from typing import Optional
|
| 4 |
-
|
| 5 |
-
import anthropic
|
| 6 |
-
|
| 7 |
-
logger = logging.getLogger(__name__)
|
| 8 |
-
|
| 9 |
-
TUTOR_SYSTEM_PROMPT = """You are a Socratic clinical reasoning tutor for Indian medical students (MBBS final year, interns, NEET-PG aspirants).
|
| 10 |
-
|
| 11 |
-
Your role:
|
| 12 |
-
- Guide the student through clinical reasoning using the Socratic method
|
| 13 |
-
- Ask probing questions instead of giving answers directly
|
| 14 |
-
- Expose cognitive biases (anchoring, premature closure, availability, confirmation)
|
| 15 |
-
- Encourage systematic differential diagnosis
|
| 16 |
-
- Keep responses concise (2-4 sentences max)
|
| 17 |
-
- Reference the Indian clinical context when relevant
|
| 18 |
-
|
| 19 |
-
Case context:
|
| 20 |
-
- Chief complaint: {chief_complaint}
|
| 21 |
-
- Specialty: {specialty}
|
| 22 |
-
- Difficulty: {difficulty}
|
| 23 |
-
|
| 24 |
-
IMPORTANT: Never reveal the diagnosis directly. Guide the student to discover it themselves."""
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
class SocraticTutor:
|
| 28 |
-
"""AI tutor that uses Socratic method to guide clinical reasoning."""
|
| 29 |
-
|
| 30 |
-
def __init__(self):
|
| 31 |
-
self.conversation_history: list = []
|
| 32 |
-
self.api_key = os.environ.get("ANTHROPIC_API_KEY")
|
| 33 |
-
self.client = None
|
| 34 |
-
if self.api_key and self.api_key != "sk-ant-your-key-here":
|
| 35 |
-
try:
|
| 36 |
-
self.client = anthropic.Anthropic(api_key=self.api_key)
|
| 37 |
-
except Exception as e:
|
| 38 |
-
logger.warning(f"Claude client init failed for tutor: {e}")
|
| 39 |
-
|
| 40 |
-
def respond(self, student_message: str, case_context: dict) -> str:
|
| 41 |
-
"""Generate Socratic response to student's reasoning."""
|
| 42 |
-
|
| 43 |
-
self.conversation_history.append({
|
| 44 |
-
"role": "user",
|
| 45 |
-
"content": student_message,
|
| 46 |
-
})
|
| 47 |
-
|
| 48 |
-
# Try Claude API first, fallback to keyword-based
|
| 49 |
-
if self.client:
|
| 50 |
-
response = self._respond_with_claude(student_message, case_context)
|
| 51 |
-
if response:
|
| 52 |
-
self.conversation_history.append({"role": "assistant", "content": response})
|
| 53 |
-
return response
|
| 54 |
-
|
| 55 |
-
response = self._keyword_fallback(student_message, case_context)
|
| 56 |
-
self.conversation_history.append({"role": "assistant", "content": response})
|
| 57 |
-
return response
|
| 58 |
-
|
| 59 |
-
def _respond_with_claude(self, message: str, context: dict) -> Optional[str]:
|
| 60 |
-
"""Generate a Socratic response using Claude API."""
|
| 61 |
-
system = TUTOR_SYSTEM_PROMPT.format(
|
| 62 |
-
chief_complaint=context.get("chief_complaint", "unknown"),
|
| 63 |
-
specialty=context.get("specialty", "general"),
|
| 64 |
-
difficulty=context.get("difficulty", "intermediate"),
|
| 65 |
-
)
|
| 66 |
-
|
| 67 |
-
messages = self.conversation_history.copy()
|
| 68 |
-
|
| 69 |
-
try:
|
| 70 |
-
response = self.client.messages.create(
|
| 71 |
-
model="claude-opus-4-6",
|
| 72 |
-
max_tokens=300,
|
| 73 |
-
system=system,
|
| 74 |
-
messages=messages,
|
| 75 |
-
)
|
| 76 |
-
return response.content[0].text.strip()
|
| 77 |
-
except Exception as e:
|
| 78 |
-
logger.error(f"Tutor Claude API error: {e}")
|
| 79 |
-
return None
|
| 80 |
-
|
| 81 |
-
def _keyword_fallback(self, message: str, context: dict) -> str:
|
| 82 |
-
"""Keyword-based fallback when Claude API is unavailable."""
|
| 83 |
-
message_lower = message.lower()
|
| 84 |
-
|
| 85 |
-
if any(word in message_lower for word in ["heart attack", "mi", "stemi", "acs"]):
|
| 86 |
-
return "You're considering an acute coronary event. That's a reasonable starting point given the presentation. But what features of this case are unusual for a typical MI? What risk factors stand out?"
|
| 87 |
-
|
| 88 |
-
if any(word in message_lower for word in ["cocaine", "drug", "substance"]):
|
| 89 |
-
return "Excellent observation about the substance use. How does cocaine specifically affect the coronary vasculature? And critically - how does this change your management compared to a standard ACS protocol?"
|
| 90 |
-
|
| 91 |
-
if any(word in message_lower for word in ["pe", "embolism", "dvt"]):
|
| 92 |
-
return "Pulmonary embolism is an important differential for chest pain. What clinical features would help you distinguish PE from ACS in this patient? What investigation would be most helpful?"
|
| 93 |
-
|
| 94 |
-
if any(word in message_lower for word in ["beta blocker", "metoprolol", "atenolol"]):
|
| 95 |
-
return "Think carefully about beta-blockers in this context. What happens physiologically when you block beta-receptors in a patient with cocaine on board? This is a critical management distinction."
|
| 96 |
-
|
| 97 |
-
if len(self.conversation_history) <= 2:
|
| 98 |
-
return "Let's think through this systematically. What are the most dangerous causes of this presentation you need to rule out first? Start with your differential diagnosis."
|
| 99 |
-
|
| 100 |
-
return "Good thinking. Can you explain your reasoning further? What evidence supports your hypothesis, and what evidence might contradict it?"
|
| 101 |
-
|
| 102 |
-
def reset(self):
|
| 103 |
-
"""Reset conversation for a new case."""
|
| 104 |
-
self.conversation_history = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/core/analytics/__init__.py
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
# Analytics modules
|
|
|
|
|
|
backend/app/core/analytics/bias_detector.py
DELETED
|
@@ -1,118 +0,0 @@
|
|
| 1 |
-
from datetime import datetime
|
| 2 |
-
from typing import Optional
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
class BiasDetector:
|
| 6 |
-
"""Detects cognitive biases from student decision patterns."""
|
| 7 |
-
|
| 8 |
-
def __init__(self):
|
| 9 |
-
self.student_history: list = []
|
| 10 |
-
|
| 11 |
-
def add_case_result(self, case_id: str, student_actions: list, diagnosis: str, correct: bool):
|
| 12 |
-
self.student_history.append({
|
| 13 |
-
"case_id": case_id,
|
| 14 |
-
"actions": student_actions,
|
| 15 |
-
"diagnosis": diagnosis,
|
| 16 |
-
"correct": correct,
|
| 17 |
-
"timestamp": datetime.now().isoformat(),
|
| 18 |
-
})
|
| 19 |
-
|
| 20 |
-
def detect_anchoring_bias(self) -> Optional[dict]:
|
| 21 |
-
recent = self.student_history[-10:] if len(self.student_history) >= 10 else self.student_history
|
| 22 |
-
if not recent:
|
| 23 |
-
return None
|
| 24 |
-
|
| 25 |
-
anchoring_count = sum(
|
| 26 |
-
1 for case in recent
|
| 27 |
-
if len(case.get("actions", [])) > 0
|
| 28 |
-
and case["actions"][0].get("diagnosis") == case["diagnosis"]
|
| 29 |
-
)
|
| 30 |
-
|
| 31 |
-
if anchoring_count >= 7:
|
| 32 |
-
return {
|
| 33 |
-
"bias": "anchoring",
|
| 34 |
-
"severity": "moderate",
|
| 35 |
-
"score": anchoring_count * 10,
|
| 36 |
-
"evidence": f"Stuck with initial diagnosis in {anchoring_count}/{len(recent)} cases",
|
| 37 |
-
"recommendation": "Practice cases with atypical presentations. Force yourself to reconsider after each new piece of information.",
|
| 38 |
-
}
|
| 39 |
-
return None
|
| 40 |
-
|
| 41 |
-
def detect_premature_closure(self) -> Optional[dict]:
|
| 42 |
-
recent = self.student_history[-10:] if len(self.student_history) >= 10 else self.student_history
|
| 43 |
-
if not recent:
|
| 44 |
-
return None
|
| 45 |
-
|
| 46 |
-
premature_count = sum(
|
| 47 |
-
1 for case in recent
|
| 48 |
-
if len(case.get("actions", {}).get("differential_list", [])) < 3
|
| 49 |
-
)
|
| 50 |
-
|
| 51 |
-
if premature_count >= 6:
|
| 52 |
-
return {
|
| 53 |
-
"bias": "premature_closure",
|
| 54 |
-
"severity": "high",
|
| 55 |
-
"score": premature_count * 10,
|
| 56 |
-
"evidence": f"Only considered 1-2 diagnoses in {premature_count}/{len(recent)} cases",
|
| 57 |
-
"recommendation": "Force yourself to list 3+ differential diagnoses before deciding.",
|
| 58 |
-
}
|
| 59 |
-
return None
|
| 60 |
-
|
| 61 |
-
def generate_bias_report(self) -> dict:
|
| 62 |
-
biases = []
|
| 63 |
-
anchoring = self.detect_anchoring_bias()
|
| 64 |
-
if anchoring:
|
| 65 |
-
biases.append(anchoring)
|
| 66 |
-
premature = self.detect_premature_closure()
|
| 67 |
-
if premature:
|
| 68 |
-
biases.append(premature)
|
| 69 |
-
|
| 70 |
-
return {
|
| 71 |
-
"biases_detected": biases,
|
| 72 |
-
"cases_analyzed": len(self.student_history),
|
| 73 |
-
"overall_accuracy": self._calculate_accuracy(),
|
| 74 |
-
"generated_at": datetime.now().isoformat(),
|
| 75 |
-
}
|
| 76 |
-
|
| 77 |
-
def generate_demo_report(self) -> dict:
|
| 78 |
-
return {
|
| 79 |
-
"biases_detected": [
|
| 80 |
-
{
|
| 81 |
-
"type": "anchoring",
|
| 82 |
-
"severity": "moderate",
|
| 83 |
-
"score": 65,
|
| 84 |
-
"evidence": "You stuck with your initial diagnosis in 7 out of 10 recent cases, even when new information contradicted it.",
|
| 85 |
-
"recommendation": "Practice cases with atypical presentations. Force yourself to reconsider after each new piece of information.",
|
| 86 |
-
},
|
| 87 |
-
{
|
| 88 |
-
"type": "premature_closure",
|
| 89 |
-
"severity": "low",
|
| 90 |
-
"score": 40,
|
| 91 |
-
"evidence": "In 4 out of 10 cases, you considered fewer than 3 differential diagnoses before settling on your answer.",
|
| 92 |
-
"recommendation": "Before finalizing, always list at least 3 differential diagnoses and explain why you're ruling each one out.",
|
| 93 |
-
},
|
| 94 |
-
{
|
| 95 |
-
"type": "availability",
|
| 96 |
-
"severity": "moderate",
|
| 97 |
-
"score": 55,
|
| 98 |
-
"evidence": "After studying cardiology, you diagnosed 3 consecutive non-cardiac cases as cardiac. Your recent study focus influenced your diagnoses.",
|
| 99 |
-
"recommendation": "Before diagnosing, list 3 differential diagnoses from different organ systems.",
|
| 100 |
-
},
|
| 101 |
-
{
|
| 102 |
-
"type": "confirmation",
|
| 103 |
-
"severity": "low",
|
| 104 |
-
"score": 30,
|
| 105 |
-
"evidence": "Minimal confirmation bias detected. You generally consider contradicting evidence.",
|
| 106 |
-
"recommendation": "Continue actively seeking evidence that contradicts your working diagnosis.",
|
| 107 |
-
},
|
| 108 |
-
],
|
| 109 |
-
"cases_analyzed": 48,
|
| 110 |
-
"overall_accuracy": 75,
|
| 111 |
-
"generated_at": datetime.now().isoformat(),
|
| 112 |
-
}
|
| 113 |
-
|
| 114 |
-
def _calculate_accuracy(self) -> float:
|
| 115 |
-
if not self.student_history:
|
| 116 |
-
return 0.0
|
| 117 |
-
correct = sum(1 for case in self.student_history if case.get("correct"))
|
| 118 |
-
return round(correct / len(self.student_history) * 100, 1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/core/analytics/knowledge_graph.py
DELETED
|
@@ -1,79 +0,0 @@
|
|
| 1 |
-
class KnowledgeGraphBuilder:
|
| 2 |
-
"""Builds knowledge graph from student case history."""
|
| 3 |
-
|
| 4 |
-
def __init__(self):
|
| 5 |
-
self.concepts: dict = {}
|
| 6 |
-
self.connections: list = []
|
| 7 |
-
|
| 8 |
-
def update_concept(self, concept: str, correct: bool):
|
| 9 |
-
if concept not in self.concepts:
|
| 10 |
-
self.concepts[concept] = {"correct": 0, "total": 0}
|
| 11 |
-
self.concepts[concept]["total"] += 1
|
| 12 |
-
if correct:
|
| 13 |
-
self.concepts[concept]["correct"] += 1
|
| 14 |
-
|
| 15 |
-
def add_connection(self, source: str, target: str, correct: bool):
|
| 16 |
-
connection_id = f"{source}-{target}"
|
| 17 |
-
existing = next((c for c in self.connections if c["id"] == connection_id), None)
|
| 18 |
-
if existing:
|
| 19 |
-
existing["total"] += 1
|
| 20 |
-
if correct:
|
| 21 |
-
existing["correct"] += 1
|
| 22 |
-
else:
|
| 23 |
-
self.connections.append({
|
| 24 |
-
"id": connection_id,
|
| 25 |
-
"source": source,
|
| 26 |
-
"target": target,
|
| 27 |
-
"correct": 1 if correct else 0,
|
| 28 |
-
"total": 1,
|
| 29 |
-
})
|
| 30 |
-
|
| 31 |
-
def to_graph_data(self) -> dict:
|
| 32 |
-
nodes = [
|
| 33 |
-
{"id": concept, "strength": data["correct"] / max(data["total"], 1), "size": data["total"]}
|
| 34 |
-
for concept, data in self.concepts.items()
|
| 35 |
-
]
|
| 36 |
-
links = [
|
| 37 |
-
{"source": conn["source"], "target": conn["target"], "strength": conn["correct"] / max(conn["total"], 1)}
|
| 38 |
-
for conn in self.connections
|
| 39 |
-
]
|
| 40 |
-
return {"nodes": nodes, "links": links}
|
| 41 |
-
|
| 42 |
-
def build_demo_graph(self) -> dict:
|
| 43 |
-
return {
|
| 44 |
-
"nodes": [
|
| 45 |
-
{"id": "Cardiology", "strength": 0.82, "size": 12, "category": "specialty"},
|
| 46 |
-
{"id": "Respiratory", "strength": 0.65, "size": 8, "category": "specialty"},
|
| 47 |
-
{"id": "Infectious", "strength": 0.78, "size": 10, "category": "specialty"},
|
| 48 |
-
{"id": "Neurology", "strength": 0.45, "size": 5, "category": "specialty"},
|
| 49 |
-
{"id": "Gastro", "strength": 0.70, "size": 7, "category": "specialty"},
|
| 50 |
-
{"id": "Emergency", "strength": 0.55, "size": 6, "category": "specialty"},
|
| 51 |
-
{"id": "STEMI", "strength": 0.85, "size": 8, "category": "diagnosis"},
|
| 52 |
-
{"id": "Pulmonary Embolism", "strength": 0.40, "size": 4, "category": "diagnosis"},
|
| 53 |
-
{"id": "Dengue", "strength": 0.80, "size": 9, "category": "diagnosis"},
|
| 54 |
-
{"id": "Pneumonia", "strength": 0.72, "size": 7, "category": "diagnosis"},
|
| 55 |
-
{"id": "Meningitis", "strength": 0.35, "size": 3, "category": "diagnosis"},
|
| 56 |
-
{"id": "Chest Pain", "strength": 0.90, "size": 10, "category": "symptom"},
|
| 57 |
-
{"id": "Dyspnea", "strength": 0.75, "size": 8, "category": "symptom"},
|
| 58 |
-
{"id": "Fever", "strength": 0.85, "size": 11, "category": "symptom"},
|
| 59 |
-
{"id": "Headache", "strength": 0.60, "size": 6, "category": "symptom"},
|
| 60 |
-
{"id": "ECG", "strength": 0.88, "size": 9, "category": "investigation"},
|
| 61 |
-
{"id": "Troponin", "strength": 0.80, "size": 7, "category": "investigation"},
|
| 62 |
-
],
|
| 63 |
-
"links": [
|
| 64 |
-
{"source": "Chest Pain", "target": "STEMI", "strength": 0.9},
|
| 65 |
-
{"source": "Chest Pain", "target": "Pulmonary Embolism", "strength": 0.3},
|
| 66 |
-
{"source": "Cardiology", "target": "STEMI", "strength": 0.9},
|
| 67 |
-
{"source": "STEMI", "target": "ECG", "strength": 0.9},
|
| 68 |
-
{"source": "STEMI", "target": "Troponin", "strength": 0.85},
|
| 69 |
-
{"source": "Dyspnea", "target": "Pneumonia", "strength": 0.75},
|
| 70 |
-
{"source": "Dyspnea", "target": "Pulmonary Embolism", "strength": 0.35},
|
| 71 |
-
{"source": "Respiratory", "target": "Pneumonia", "strength": 0.72},
|
| 72 |
-
{"source": "Fever", "target": "Dengue", "strength": 0.8},
|
| 73 |
-
{"source": "Fever", "target": "Infectious", "strength": 0.78},
|
| 74 |
-
{"source": "Fever", "target": "Meningitis", "strength": 0.4},
|
| 75 |
-
{"source": "Infectious", "target": "Dengue", "strength": 0.82},
|
| 76 |
-
{"source": "Headache", "target": "Meningitis", "strength": 0.35},
|
| 77 |
-
{"source": "Headache", "target": "Neurology", "strength": 0.48},
|
| 78 |
-
],
|
| 79 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/core/analytics/recommender.py
DELETED
|
@@ -1,62 +0,0 @@
|
|
| 1 |
-
class CaseRecommender:
|
| 2 |
-
"""Recommends next cases based on student needs."""
|
| 3 |
-
|
| 4 |
-
def recommend(self, student_profile: dict) -> list:
|
| 5 |
-
recommendations = []
|
| 6 |
-
specialty_scores = student_profile.get("specialty_scores", {})
|
| 7 |
-
|
| 8 |
-
weak_specialties = [s for s, score in specialty_scores.items() if score < 60]
|
| 9 |
-
if weak_specialties:
|
| 10 |
-
recommendations.append({
|
| 11 |
-
"type": "weak_area",
|
| 12 |
-
"specialty": weak_specialties[0],
|
| 13 |
-
"difficulty": "beginner",
|
| 14 |
-
"reason": f"Your {weak_specialties[0]} accuracy is only {specialty_scores[weak_specialties[0]]}%",
|
| 15 |
-
"priority": "high",
|
| 16 |
-
})
|
| 17 |
-
|
| 18 |
-
if student_profile.get("biases", {}).get("anchoring"):
|
| 19 |
-
recommendations.append({
|
| 20 |
-
"type": "bias_counter",
|
| 21 |
-
"specialty": "mixed",
|
| 22 |
-
"difficulty": "intermediate",
|
| 23 |
-
"reason": "Atypical presentation cases to reduce anchoring bias",
|
| 24 |
-
"priority": "medium",
|
| 25 |
-
})
|
| 26 |
-
|
| 27 |
-
strong_specialties = [s for s, score in specialty_scores.items() if score > 80]
|
| 28 |
-
if strong_specialties:
|
| 29 |
-
recommendations.append({
|
| 30 |
-
"type": "challenge",
|
| 31 |
-
"specialty": strong_specialties[0],
|
| 32 |
-
"difficulty": "advanced",
|
| 33 |
-
"reason": f"Your {strong_specialties[0]} accuracy is {specialty_scores[strong_specialties[0]]}%. Ready for advanced cases!",
|
| 34 |
-
"priority": "low",
|
| 35 |
-
})
|
| 36 |
-
|
| 37 |
-
return recommendations
|
| 38 |
-
|
| 39 |
-
def get_demo_recommendations(self) -> list:
|
| 40 |
-
return [
|
| 41 |
-
{
|
| 42 |
-
"type": "weak_area",
|
| 43 |
-
"specialty": "Neurology",
|
| 44 |
-
"difficulty": "beginner",
|
| 45 |
-
"reason": "Your neurology accuracy is only 45%. Let's strengthen this foundation.",
|
| 46 |
-
"priority": "high",
|
| 47 |
-
},
|
| 48 |
-
{
|
| 49 |
-
"type": "bias_counter",
|
| 50 |
-
"specialty": "Mixed",
|
| 51 |
-
"difficulty": "intermediate",
|
| 52 |
-
"reason": "Atypical presentation cases to reduce your anchoring bias pattern.",
|
| 53 |
-
"priority": "medium",
|
| 54 |
-
},
|
| 55 |
-
{
|
| 56 |
-
"type": "challenge",
|
| 57 |
-
"specialty": "Cardiology",
|
| 58 |
-
"difficulty": "advanced",
|
| 59 |
-
"reason": "Your cardiology accuracy is 82%. Ready for advanced cases!",
|
| 60 |
-
"priority": "low",
|
| 61 |
-
},
|
| 62 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/core/rag/__init__.py
DELETED
|
@@ -1,7 +0,0 @@
|
|
| 1 |
-
"""RAG system - ChromaDB vector store + Claude API case generation."""
|
| 2 |
-
|
| 3 |
-
from app.core.rag.vector_store import MedicalVectorStore
|
| 4 |
-
from app.core.rag.retriever import MedicalRetriever
|
| 5 |
-
from app.core.rag.generator import CaseGenerator
|
| 6 |
-
|
| 7 |
-
__all__ = ["MedicalVectorStore", "MedicalRetriever", "CaseGenerator"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/core/rag/generator.py
DELETED
|
@@ -1,465 +0,0 @@
|
|
| 1 |
-
"""RAG-powered clinical case generator using ChromaDB + Claude API."""
|
| 2 |
-
|
| 3 |
-
import json
|
| 4 |
-
import logging
|
| 5 |
-
import os
|
| 6 |
-
import uuid
|
| 7 |
-
from typing import Optional
|
| 8 |
-
from pathlib import Path
|
| 9 |
-
from datetime import datetime
|
| 10 |
-
|
| 11 |
-
import anthropic
|
| 12 |
-
|
| 13 |
-
from app.core.rag.vector_store import MedicalVectorStore
|
| 14 |
-
from app.core.rag.retriever import MedicalRetriever
|
| 15 |
-
|
| 16 |
-
logger = logging.getLogger(__name__)
|
| 17 |
-
|
| 18 |
-
# Claude API case generation prompt
|
| 19 |
-
CASE_GENERATION_PROMPT = """You are an expert medical case writer for Clinical-Mind, an AI-powered clinical reasoning simulator for Indian medical students (MBBS final year, interns, NEET-PG aspirants).
|
| 20 |
-
|
| 21 |
-
Using the reference cases from the medical corpus below as inspiration and factual grounding, generate a UNIQUE, ORIGINAL clinical case that:
|
| 22 |
-
|
| 23 |
-
1. Is set in an Indian healthcare context (Indian demographics, locations, disease patterns, healthcare system)
|
| 24 |
-
2. Matches the requested specialty and difficulty level
|
| 25 |
-
3. Has realistic, medically accurate clinical details
|
| 26 |
-
4. Includes atypical or challenging features appropriate to the difficulty level
|
| 27 |
-
5. Tests clinical reasoning, not just knowledge recall
|
| 28 |
-
|
| 29 |
-
IMPORTANT RULES:
|
| 30 |
-
- Do NOT copy any reference case verbatim. Use them only as factual grounding.
|
| 31 |
-
- Create a completely new patient scenario with different demographics, presentation nuances, and clinical twists.
|
| 32 |
-
- For "beginner" cases: straightforward presentation, classic findings
|
| 33 |
-
- For "intermediate" cases: some atypical features, requires careful analysis
|
| 34 |
-
- For "advanced" cases: atypical presentation, multiple co-morbidities, diagnostic dilemmas
|
| 35 |
-
|
| 36 |
-
{rag_context}
|
| 37 |
-
|
| 38 |
-
Generate a case for:
|
| 39 |
-
- Specialty: {specialty}
|
| 40 |
-
- Difficulty: {difficulty}
|
| 41 |
-
- Student Level: {year_level}
|
| 42 |
-
|
| 43 |
-
Respond with ONLY a valid JSON object (no markdown, no explanation) with this exact structure:
|
| 44 |
-
{{
|
| 45 |
-
"patient": {{"age": <int>, "gender": "<Male/Female>", "location": "<Indian city, state>"}},
|
| 46 |
-
"chief_complaint": "<brief chief complaint>",
|
| 47 |
-
"initial_presentation": "<2-3 sentence clinical vignette presented to the student>",
|
| 48 |
-
"vital_signs": {{"bp": "<systolic/diastolic>", "hr": <int>, "rr": <int>, "temp": <float>, "spo2": <int>}},
|
| 49 |
-
"stages": [
|
| 50 |
-
{{"stage": "history", "info": "<detailed history findings revealed when student takes history>"}},
|
| 51 |
-
{{"stage": "physical_exam", "info": "<detailed physical exam findings>"}},
|
| 52 |
-
{{"stage": "labs", "info": "<investigation results including labs, imaging, special tests>"}}
|
| 53 |
-
],
|
| 54 |
-
"diagnosis": "<correct final diagnosis>",
|
| 55 |
-
"differentials": ["<differential 1>", "<differential 2>", "<differential 3>", "<differential 4>", "<differential 5>"],
|
| 56 |
-
"learning_points": ["<point 1>", "<point 2>", "<point 3>", "<point 4>"],
|
| 57 |
-
"atypical_features": "<what makes this case challenging or unique>",
|
| 58 |
-
"specialty": "{specialty}",
|
| 59 |
-
"difficulty": "{difficulty}"
|
| 60 |
-
}}"""
|
| 61 |
-
|
| 62 |
-
EVALUATION_PROMPT = """You are a clinical reasoning evaluator for medical students. A student has submitted a diagnosis for a clinical case.
|
| 63 |
-
|
| 64 |
-
{rag_context}
|
| 65 |
-
|
| 66 |
-
Case Diagnosis: {correct_diagnosis}
|
| 67 |
-
Student's Diagnosis: {student_diagnosis}
|
| 68 |
-
Student's Reasoning: {student_reasoning}
|
| 69 |
-
|
| 70 |
-
Evaluate the student's diagnosis and reasoning. Consider:
|
| 71 |
-
1. Is the diagnosis correct or partially correct?
|
| 72 |
-
2. What was good about their clinical reasoning?
|
| 73 |
-
3. What did they miss or get wrong?
|
| 74 |
-
4. What are the key learning points?
|
| 75 |
-
|
| 76 |
-
Respond with ONLY a valid JSON object:
|
| 77 |
-
{{
|
| 78 |
-
"is_correct": <true/false>,
|
| 79 |
-
"accuracy_score": <0-100>,
|
| 80 |
-
"feedback": "<2-3 sentences of constructive feedback>",
|
| 81 |
-
"reasoning_strengths": ["<strength 1>", "<strength 2>"],
|
| 82 |
-
"reasoning_gaps": ["<gap 1>", "<gap 2>"],
|
| 83 |
-
"learning_points": ["<point 1>", "<point 2>", "<point 3>"],
|
| 84 |
-
"suggested_review_topics": ["<topic 1>", "<topic 2>"]
|
| 85 |
-
}}"""
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
class CaseGenerator:
|
| 89 |
-
"""RAG-powered clinical case generator using ChromaDB + Claude API."""
|
| 90 |
-
|
| 91 |
-
def __init__(self, vector_store: Optional[MedicalVectorStore] = None):
|
| 92 |
-
# Create persistent storage directory
|
| 93 |
-
self.storage_dir = Path("./data/active_cases")
|
| 94 |
-
self.storage_dir.mkdir(parents=True, exist_ok=True)
|
| 95 |
-
|
| 96 |
-
# Load persisted cases on startup
|
| 97 |
-
self.active_cases: dict = self._load_persisted_cases()
|
| 98 |
-
logger.info(f"Loaded {len(self.active_cases)} persisted cases from disk")
|
| 99 |
-
|
| 100 |
-
# Initialize vector store and retriever
|
| 101 |
-
if vector_store:
|
| 102 |
-
self.vector_store = vector_store
|
| 103 |
-
else:
|
| 104 |
-
self.vector_store = MedicalVectorStore()
|
| 105 |
-
self.retriever = MedicalRetriever(self.vector_store)
|
| 106 |
-
|
| 107 |
-
# Initialize Claude client
|
| 108 |
-
self.api_key = os.environ.get("ANTHROPIC_API_KEY", "")
|
| 109 |
-
self.client = None
|
| 110 |
-
if self.api_key and self.api_key != "sk-ant-your-key-here":
|
| 111 |
-
try:
|
| 112 |
-
self.client = anthropic.Anthropic(api_key=self.api_key)
|
| 113 |
-
logger.info("Claude API client initialized")
|
| 114 |
-
except Exception as e:
|
| 115 |
-
logger.warning(f"Claude API client init failed: {e}")
|
| 116 |
-
|
| 117 |
-
# Auto-ingest corpus if vector store is empty
|
| 118 |
-
if self.vector_store.count() == 0:
|
| 119 |
-
logger.info("Vector store empty, ingesting seed corpus...")
|
| 120 |
-
count = self.vector_store.ingest_corpus()
|
| 121 |
-
logger.info(f"Ingested {count} document chunks into ChromaDB")
|
| 122 |
-
|
| 123 |
-
def generate_case(
|
| 124 |
-
self,
|
| 125 |
-
specialty: str,
|
| 126 |
-
difficulty: str = "intermediate",
|
| 127 |
-
year_level: str = "final_year",
|
| 128 |
-
) -> dict:
|
| 129 |
-
"""Generate a unique clinical case using RAG context + Claude API."""
|
| 130 |
-
case_id = str(uuid.uuid4())[:8]
|
| 131 |
-
|
| 132 |
-
# Step 1: Retrieve relevant context from ChromaDB
|
| 133 |
-
rag_context = self.retriever.retrieve_case_context(
|
| 134 |
-
specialty=specialty,
|
| 135 |
-
difficulty=difficulty,
|
| 136 |
-
n_results=5,
|
| 137 |
-
)
|
| 138 |
-
|
| 139 |
-
# Step 2: Generate case via Claude API (or fallback to RAG-only)
|
| 140 |
-
case_data = None
|
| 141 |
-
if self.client and rag_context:
|
| 142 |
-
case_data = self._generate_with_claude(
|
| 143 |
-
specialty=specialty,
|
| 144 |
-
difficulty=difficulty,
|
| 145 |
-
year_level=year_level,
|
| 146 |
-
rag_context=rag_context,
|
| 147 |
-
)
|
| 148 |
-
|
| 149 |
-
# Step 3: Fallback to corpus-based case if Claude unavailable
|
| 150 |
-
if not case_data:
|
| 151 |
-
case_data = self._fallback_from_corpus(specialty, difficulty)
|
| 152 |
-
|
| 153 |
-
case_data["id"] = case_id
|
| 154 |
-
self.active_cases[case_id] = case_data
|
| 155 |
-
|
| 156 |
-
# Save to persistent storage
|
| 157 |
-
self._save_case_to_disk(case_id, case_data)
|
| 158 |
-
|
| 159 |
-
# Clean up old cases periodically
|
| 160 |
-
if len(self.active_cases) > 20: # Cleanup when we have many cases
|
| 161 |
-
self._cleanup_old_cases()
|
| 162 |
-
|
| 163 |
-
return case_data
|
| 164 |
-
|
| 165 |
-
def _generate_with_claude(
|
| 166 |
-
self,
|
| 167 |
-
specialty: str,
|
| 168 |
-
difficulty: str,
|
| 169 |
-
year_level: str,
|
| 170 |
-
rag_context: str,
|
| 171 |
-
) -> Optional[dict]:
|
| 172 |
-
"""Generate a case using Claude API with RAG context."""
|
| 173 |
-
prompt = CASE_GENERATION_PROMPT.format(
|
| 174 |
-
rag_context=rag_context,
|
| 175 |
-
specialty=specialty,
|
| 176 |
-
difficulty=difficulty,
|
| 177 |
-
year_level=year_level,
|
| 178 |
-
)
|
| 179 |
-
|
| 180 |
-
try:
|
| 181 |
-
response = self.client.messages.create(
|
| 182 |
-
model="claude-opus-4-6",
|
| 183 |
-
max_tokens=4096,
|
| 184 |
-
messages=[{"role": "user", "content": prompt}],
|
| 185 |
-
)
|
| 186 |
-
|
| 187 |
-
response_text = response.content[0].text.strip()
|
| 188 |
-
|
| 189 |
-
# Parse JSON from response (handle potential markdown wrapping)
|
| 190 |
-
if response_text.startswith("```"):
|
| 191 |
-
response_text = response_text.split("```")[1]
|
| 192 |
-
if response_text.startswith("json"):
|
| 193 |
-
response_text = response_text[4:]
|
| 194 |
-
response_text = response_text.strip()
|
| 195 |
-
|
| 196 |
-
case_data = json.loads(response_text)
|
| 197 |
-
logger.info(f"Claude generated case: {case_data.get('diagnosis', 'unknown')}")
|
| 198 |
-
return case_data
|
| 199 |
-
|
| 200 |
-
except json.JSONDecodeError as e:
|
| 201 |
-
logger.error(f"Failed to parse Claude response as JSON: {e}")
|
| 202 |
-
except anthropic.APIError as e:
|
| 203 |
-
logger.error(f"Claude API error: {e}")
|
| 204 |
-
except Exception as e:
|
| 205 |
-
logger.error(f"Case generation error: {e}")
|
| 206 |
-
|
| 207 |
-
return None
|
| 208 |
-
|
| 209 |
-
def _fallback_from_corpus(self, specialty: str, difficulty: str) -> dict:
|
| 210 |
-
"""Fallback: return a case directly from the corpus when Claude API is unavailable."""
|
| 211 |
-
results = self.vector_store.query(
|
| 212 |
-
query_text=f"Clinical case {specialty} {difficulty}",
|
| 213 |
-
specialty=specialty,
|
| 214 |
-
difficulty=difficulty,
|
| 215 |
-
n_results=1,
|
| 216 |
-
chunk_type="full_narrative",
|
| 217 |
-
)
|
| 218 |
-
|
| 219 |
-
if results:
|
| 220 |
-
# Try to find the original JSON case data
|
| 221 |
-
case_id = results[0]["metadata"].get("case_id", "")
|
| 222 |
-
case_data = self._load_case_from_corpus(case_id, specialty)
|
| 223 |
-
if case_data:
|
| 224 |
-
return case_data
|
| 225 |
-
|
| 226 |
-
# Ultimate fallback: return a minimal case from RAG text
|
| 227 |
-
if results:
|
| 228 |
-
return {
|
| 229 |
-
"patient": {"age": 35, "gender": "Male", "location": "India"},
|
| 230 |
-
"chief_complaint": results[0]["metadata"].get("title", "Medical case"),
|
| 231 |
-
"initial_presentation": results[0]["content"][:500],
|
| 232 |
-
"vital_signs": {"bp": "120/80", "hr": 80, "rr": 16, "temp": 37.0, "spo2": 98},
|
| 233 |
-
"stages": [
|
| 234 |
-
{"stage": "history", "info": "Please configure ANTHROPIC_API_KEY for dynamic case generation. Currently serving from corpus."},
|
| 235 |
-
{"stage": "physical_exam", "info": "Physical examination findings from corpus."},
|
| 236 |
-
{"stage": "labs", "info": "Laboratory results from corpus."},
|
| 237 |
-
],
|
| 238 |
-
"diagnosis": "Configure API key for full case generation",
|
| 239 |
-
"differentials": [],
|
| 240 |
-
"learning_points": ["Set ANTHROPIC_API_KEY environment variable to enable AI-powered case generation"],
|
| 241 |
-
"atypical_features": "",
|
| 242 |
-
"specialty": specialty,
|
| 243 |
-
"difficulty": difficulty,
|
| 244 |
-
}
|
| 245 |
-
|
| 246 |
-
# Absolute fallback if nothing in corpus
|
| 247 |
-
return self._empty_case(specialty, difficulty)
|
| 248 |
-
|
| 249 |
-
def _load_case_from_corpus(self, case_id: str, specialty: str) -> Optional[dict]:
|
| 250 |
-
"""Load the original structured case from corpus JSON files."""
|
| 251 |
-
from app.core.rag.vector_store import CORPUS_DIR
|
| 252 |
-
import json
|
| 253 |
-
|
| 254 |
-
for json_file in CORPUS_DIR.glob("*.json"):
|
| 255 |
-
try:
|
| 256 |
-
with open(json_file) as f:
|
| 257 |
-
cases = json.load(f)
|
| 258 |
-
for case in cases:
|
| 259 |
-
if case.get("id") == case_id:
|
| 260 |
-
# Transform to API format
|
| 261 |
-
return {
|
| 262 |
-
"patient": case.get("demographics", {"age": 35, "gender": "Unknown", "location": "India"}),
|
| 263 |
-
"chief_complaint": case.get("chief_complaint", ""),
|
| 264 |
-
"initial_presentation": case.get("presentation", ""),
|
| 265 |
-
"vital_signs": case.get("vital_signs", {}),
|
| 266 |
-
"stages": case.get("stages", [
|
| 267 |
-
{"stage": "history", "info": case.get("history", "")},
|
| 268 |
-
{"stage": "physical_exam", "info": case.get("physical_exam", "")},
|
| 269 |
-
{"stage": "labs", "info": case.get("investigations", "")},
|
| 270 |
-
]),
|
| 271 |
-
"diagnosis": case.get("diagnosis", ""),
|
| 272 |
-
"differentials": case.get("differentials", []),
|
| 273 |
-
"learning_points": case.get("learning_points", []),
|
| 274 |
-
"atypical_features": case.get("atypical_features", ""),
|
| 275 |
-
"specialty": case.get("specialty", specialty),
|
| 276 |
-
"difficulty": case.get("difficulty", "intermediate"),
|
| 277 |
-
}
|
| 278 |
-
except Exception:
|
| 279 |
-
continue
|
| 280 |
-
return None
|
| 281 |
-
|
| 282 |
-
def _empty_case(self, specialty: str, difficulty: str) -> dict:
|
| 283 |
-
"""Return a placeholder case when corpus is empty."""
|
| 284 |
-
return {
|
| 285 |
-
"patient": {"age": 0, "gender": "Unknown", "location": "India"},
|
| 286 |
-
"chief_complaint": "No cases available",
|
| 287 |
-
"initial_presentation": "Please run the corpus ingestion script to load medical cases into the RAG system.",
|
| 288 |
-
"vital_signs": {"bp": "N/A", "hr": 0, "rr": 0, "temp": 0, "spo2": 0},
|
| 289 |
-
"stages": [],
|
| 290 |
-
"diagnosis": "Corpus empty",
|
| 291 |
-
"differentials": [],
|
| 292 |
-
"learning_points": ["Run: python -m scripts.ingest to load the medical corpus"],
|
| 293 |
-
"specialty": specialty,
|
| 294 |
-
"difficulty": difficulty,
|
| 295 |
-
}
|
| 296 |
-
|
| 297 |
-
def get_case(self, case_id: str) -> Optional[dict]:
|
| 298 |
-
# First check in-memory cache
|
| 299 |
-
case = self.active_cases.get(case_id)
|
| 300 |
-
if case:
|
| 301 |
-
return case
|
| 302 |
-
|
| 303 |
-
# If not in memory, try loading from disk
|
| 304 |
-
case_file = self.storage_dir / f"{case_id}.json"
|
| 305 |
-
if case_file.exists():
|
| 306 |
-
try:
|
| 307 |
-
with open(case_file, 'r') as f:
|
| 308 |
-
data = json.load(f)
|
| 309 |
-
case_data = data.get("case_data")
|
| 310 |
-
if case_data:
|
| 311 |
-
# Cache it in memory for future use
|
| 312 |
-
self.active_cases[case_id] = case_data
|
| 313 |
-
logger.info(f"Loaded case {case_id} from disk cache")
|
| 314 |
-
return case_data
|
| 315 |
-
except Exception as e:
|
| 316 |
-
logger.error(f"Failed to load case {case_id} from disk: {e}")
|
| 317 |
-
|
| 318 |
-
return None
|
| 319 |
-
|
| 320 |
-
def process_action(self, case_id: str, action_type: str, student_input: Optional[str] = None) -> dict:
|
| 321 |
-
case = self.active_cases.get(case_id)
|
| 322 |
-
if not case:
|
| 323 |
-
return {"error": "Case not found"}
|
| 324 |
-
|
| 325 |
-
stage_map = {
|
| 326 |
-
"take_history": 0,
|
| 327 |
-
"physical_exam": 1,
|
| 328 |
-
"order_labs": 2,
|
| 329 |
-
}
|
| 330 |
-
|
| 331 |
-
stage_index = stage_map.get(action_type)
|
| 332 |
-
if stage_index is not None and stage_index < len(case.get("stages", [])):
|
| 333 |
-
return {
|
| 334 |
-
"action": action_type,
|
| 335 |
-
"result": case["stages"][stage_index],
|
| 336 |
-
}
|
| 337 |
-
|
| 338 |
-
return {"action": action_type, "result": "Action processed"}
|
| 339 |
-
|
| 340 |
-
def evaluate_diagnosis(self, case_id: str, diagnosis: str, reasoning: str = "") -> dict:
|
| 341 |
-
"""Evaluate student diagnosis using RAG context + Claude API for rich feedback."""
|
| 342 |
-
case = self.active_cases.get(case_id)
|
| 343 |
-
if not case:
|
| 344 |
-
return {"error": "Case not found"}
|
| 345 |
-
|
| 346 |
-
correct_diagnosis = case.get("diagnosis", "")
|
| 347 |
-
|
| 348 |
-
# Try Claude API evaluation with RAG context
|
| 349 |
-
if self.client:
|
| 350 |
-
evaluation = self._evaluate_with_claude(
|
| 351 |
-
correct_diagnosis=correct_diagnosis,
|
| 352 |
-
student_diagnosis=diagnosis,
|
| 353 |
-
student_reasoning=reasoning,
|
| 354 |
-
specialty=case.get("specialty", ""),
|
| 355 |
-
)
|
| 356 |
-
if evaluation:
|
| 357 |
-
evaluation["correct_diagnosis"] = correct_diagnosis
|
| 358 |
-
evaluation["student_diagnosis"] = diagnosis
|
| 359 |
-
evaluation["differentials"] = case.get("differentials", [])
|
| 360 |
-
return evaluation
|
| 361 |
-
|
| 362 |
-
# Fallback: keyword-based matching
|
| 363 |
-
is_correct = any(
|
| 364 |
-
keyword in diagnosis.lower()
|
| 365 |
-
for keyword in correct_diagnosis.lower().split()
|
| 366 |
-
if len(keyword) > 3
|
| 367 |
-
)
|
| 368 |
-
|
| 369 |
-
return {
|
| 370 |
-
"student_diagnosis": diagnosis,
|
| 371 |
-
"correct_diagnosis": correct_diagnosis,
|
| 372 |
-
"is_correct": is_correct,
|
| 373 |
-
"accuracy_score": 100 if is_correct else 30,
|
| 374 |
-
"differentials": case.get("differentials", []),
|
| 375 |
-
"learning_points": case.get("learning_points", []),
|
| 376 |
-
"feedback": "Excellent clinical reasoning!" if is_correct else f"The correct diagnosis is {correct_diagnosis}. Review the key learning points.",
|
| 377 |
-
"reasoning_strengths": [],
|
| 378 |
-
"reasoning_gaps": [],
|
| 379 |
-
"suggested_review_topics": [],
|
| 380 |
-
}
|
| 381 |
-
|
| 382 |
-
def _evaluate_with_claude(
|
| 383 |
-
self,
|
| 384 |
-
correct_diagnosis: str,
|
| 385 |
-
student_diagnosis: str,
|
| 386 |
-
student_reasoning: str,
|
| 387 |
-
specialty: str,
|
| 388 |
-
) -> Optional[dict]:
|
| 389 |
-
"""Use Claude API to provide rich evaluation feedback."""
|
| 390 |
-
rag_context = self.retriever.retrieve_for_evaluation(
|
| 391 |
-
diagnosis=correct_diagnosis,
|
| 392 |
-
specialty=specialty,
|
| 393 |
-
)
|
| 394 |
-
|
| 395 |
-
prompt = EVALUATION_PROMPT.format(
|
| 396 |
-
rag_context=rag_context,
|
| 397 |
-
correct_diagnosis=correct_diagnosis,
|
| 398 |
-
student_diagnosis=student_diagnosis,
|
| 399 |
-
student_reasoning=student_reasoning or "No reasoning provided",
|
| 400 |
-
)
|
| 401 |
-
|
| 402 |
-
try:
|
| 403 |
-
response = self.client.messages.create(
|
| 404 |
-
model="claude-opus-4-6",
|
| 405 |
-
max_tokens=2048,
|
| 406 |
-
messages=[{"role": "user", "content": prompt}],
|
| 407 |
-
)
|
| 408 |
-
|
| 409 |
-
response_text = response.content[0].text.strip()
|
| 410 |
-
if response_text.startswith("```"):
|
| 411 |
-
response_text = response_text.split("```")[1]
|
| 412 |
-
if response_text.startswith("json"):
|
| 413 |
-
response_text = response_text[4:]
|
| 414 |
-
response_text = response_text.strip()
|
| 415 |
-
|
| 416 |
-
return json.loads(response_text)
|
| 417 |
-
|
| 418 |
-
except Exception as e:
|
| 419 |
-
logger.error(f"Claude evaluation error: {e}")
|
| 420 |
-
return None
|
| 421 |
-
|
| 422 |
-
def get_corpus_stats(self) -> dict:
|
| 423 |
-
"""Get statistics about the loaded RAG corpus."""
|
| 424 |
-
return self.retriever.get_corpus_stats()
|
| 425 |
-
|
| 426 |
-
def _save_case_to_disk(self, case_id: str, case_data: dict):
|
| 427 |
-
"""Save a case to persistent storage."""
|
| 428 |
-
try:
|
| 429 |
-
case_file = self.storage_dir / f"{case_id}.json"
|
| 430 |
-
with open(case_file, 'w') as f:
|
| 431 |
-
json.dump({
|
| 432 |
-
"case_id": case_id,
|
| 433 |
-
"case_data": case_data,
|
| 434 |
-
"timestamp": datetime.now().isoformat()
|
| 435 |
-
}, f, indent=2)
|
| 436 |
-
logger.info(f"Saved case {case_id} to disk")
|
| 437 |
-
except Exception as e:
|
| 438 |
-
logger.error(f"Failed to save case {case_id} to disk: {e}")
|
| 439 |
-
|
| 440 |
-
def _load_persisted_cases(self) -> dict:
|
| 441 |
-
"""Load all persisted cases from disk."""
|
| 442 |
-
cases = {}
|
| 443 |
-
try:
|
| 444 |
-
for case_file in self.storage_dir.glob("*.json"):
|
| 445 |
-
with open(case_file, 'r') as f:
|
| 446 |
-
data = json.load(f)
|
| 447 |
-
case_id = data.get("case_id")
|
| 448 |
-
case_data = data.get("case_data")
|
| 449 |
-
if case_id and case_data:
|
| 450 |
-
cases[case_id] = case_data
|
| 451 |
-
logger.debug(f"Loaded case {case_id} from disk")
|
| 452 |
-
except Exception as e:
|
| 453 |
-
logger.error(f"Failed to load persisted cases: {e}")
|
| 454 |
-
return cases
|
| 455 |
-
|
| 456 |
-
def _cleanup_old_cases(self):
|
| 457 |
-
"""Clean up cases older than 24 hours."""
|
| 458 |
-
try:
|
| 459 |
-
cutoff_time = datetime.now().timestamp() - (24 * 60 * 60) # 24 hours ago
|
| 460 |
-
for case_file in self.storage_dir.glob("*.json"):
|
| 461 |
-
if case_file.stat().st_mtime < cutoff_time:
|
| 462 |
-
case_file.unlink()
|
| 463 |
-
logger.debug(f"Cleaned up old case file: {case_file.name}")
|
| 464 |
-
except Exception as e:
|
| 465 |
-
logger.error(f"Failed to cleanup old cases: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/core/rag/retriever.py
DELETED
|
@@ -1,125 +0,0 @@
|
|
| 1 |
-
"""RAG retriever - queries ChromaDB and formats context for Claude API."""
|
| 2 |
-
|
| 3 |
-
import logging
|
| 4 |
-
from typing import Optional
|
| 5 |
-
|
| 6 |
-
from app.core.rag.vector_store import MedicalVectorStore
|
| 7 |
-
|
| 8 |
-
logger = logging.getLogger(__name__)
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
class MedicalRetriever:
|
| 12 |
-
"""Retrieves relevant medical case context from the vector store for case generation."""
|
| 13 |
-
|
| 14 |
-
def __init__(self, vector_store: MedicalVectorStore):
|
| 15 |
-
self.vector_store = vector_store
|
| 16 |
-
|
| 17 |
-
def retrieve_case_context(
|
| 18 |
-
self,
|
| 19 |
-
specialty: str,
|
| 20 |
-
difficulty: str = "intermediate",
|
| 21 |
-
topic_hint: Optional[str] = None,
|
| 22 |
-
n_results: int = 5,
|
| 23 |
-
) -> str:
|
| 24 |
-
"""Retrieve relevant case context for generating a new clinical case.
|
| 25 |
-
|
| 26 |
-
Returns formatted context string suitable for injection into Claude prompt.
|
| 27 |
-
"""
|
| 28 |
-
# Build a query that targets the specialty and difficulty
|
| 29 |
-
query = f"Clinical case in {specialty} for medical students, {difficulty} difficulty level"
|
| 30 |
-
if topic_hint:
|
| 31 |
-
query += f", related to {topic_hint}"
|
| 32 |
-
|
| 33 |
-
# Get full narrative cases as primary context
|
| 34 |
-
results = self.vector_store.query(
|
| 35 |
-
query_text=query,
|
| 36 |
-
specialty=specialty,
|
| 37 |
-
n_results=n_results,
|
| 38 |
-
chunk_type="full_narrative",
|
| 39 |
-
)
|
| 40 |
-
|
| 41 |
-
if not results:
|
| 42 |
-
# Fallback: try without specialty filter
|
| 43 |
-
results = self.vector_store.query(
|
| 44 |
-
query_text=query,
|
| 45 |
-
n_results=n_results,
|
| 46 |
-
chunk_type="full_narrative",
|
| 47 |
-
)
|
| 48 |
-
|
| 49 |
-
if not results:
|
| 50 |
-
logger.warning(f"No RAG context found for specialty={specialty}, difficulty={difficulty}")
|
| 51 |
-
return ""
|
| 52 |
-
|
| 53 |
-
# Format context for Claude
|
| 54 |
-
context_parts = [
|
| 55 |
-
"=== REFERENCE MEDICAL CASES FROM CORPUS ===",
|
| 56 |
-
f"Specialty: {specialty} | Difficulty: {difficulty}",
|
| 57 |
-
f"Retrieved {len(results)} reference cases for inspiration.",
|
| 58 |
-
"",
|
| 59 |
-
]
|
| 60 |
-
|
| 61 |
-
for i, result in enumerate(results, 1):
|
| 62 |
-
context_parts.append(f"--- Reference Case {i} (Relevance: {result['relevance_score']:.2f}) ---")
|
| 63 |
-
context_parts.append(result["content"])
|
| 64 |
-
context_parts.append("")
|
| 65 |
-
|
| 66 |
-
context_parts.append("=== END OF REFERENCE CASES ===")
|
| 67 |
-
|
| 68 |
-
return "\n".join(context_parts)
|
| 69 |
-
|
| 70 |
-
def retrieve_for_evaluation(
|
| 71 |
-
self,
|
| 72 |
-
diagnosis: str,
|
| 73 |
-
specialty: str,
|
| 74 |
-
) -> str:
|
| 75 |
-
"""Retrieve context relevant to a specific diagnosis for evaluating student answers."""
|
| 76 |
-
query = f"Diagnosis: {diagnosis}. Clinical features, differentials, and learning points."
|
| 77 |
-
|
| 78 |
-
results = self.vector_store.query(
|
| 79 |
-
query_text=query,
|
| 80 |
-
specialty=specialty,
|
| 81 |
-
n_results=3,
|
| 82 |
-
chunk_type="learning",
|
| 83 |
-
)
|
| 84 |
-
|
| 85 |
-
if not results:
|
| 86 |
-
results = self.vector_store.query(
|
| 87 |
-
query_text=query,
|
| 88 |
-
n_results=3,
|
| 89 |
-
chunk_type="learning",
|
| 90 |
-
)
|
| 91 |
-
|
| 92 |
-
if not results:
|
| 93 |
-
return ""
|
| 94 |
-
|
| 95 |
-
context_parts = ["=== REFERENCE LEARNING MATERIAL ==="]
|
| 96 |
-
for result in results:
|
| 97 |
-
context_parts.append(result["content"])
|
| 98 |
-
context_parts.append("")
|
| 99 |
-
context_parts.append("=== END REFERENCE ===")
|
| 100 |
-
|
| 101 |
-
return "\n".join(context_parts)
|
| 102 |
-
|
| 103 |
-
def retrieve_similar_cases(
|
| 104 |
-
self,
|
| 105 |
-
presentation: str,
|
| 106 |
-
n_results: int = 3,
|
| 107 |
-
) -> list[dict]:
|
| 108 |
-
"""Find cases similar to a given presentation text."""
|
| 109 |
-
results = self.vector_store.query(
|
| 110 |
-
query_text=presentation,
|
| 111 |
-
n_results=n_results,
|
| 112 |
-
chunk_type="presentation",
|
| 113 |
-
)
|
| 114 |
-
return results
|
| 115 |
-
|
| 116 |
-
def get_corpus_stats(self) -> dict:
|
| 117 |
-
"""Get statistics about the loaded corpus."""
|
| 118 |
-
total = self.vector_store.count()
|
| 119 |
-
specialties = self.vector_store.get_specialties()
|
| 120 |
-
return {
|
| 121 |
-
"total_documents": total,
|
| 122 |
-
"total_cases": total // 3, # 3 chunks per case
|
| 123 |
-
"specialties": specialties,
|
| 124 |
-
"status": "loaded" if total > 0 else "empty",
|
| 125 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|