Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -9,7 +9,6 @@ import base64
|
|
| 9 |
from datetime import datetime
|
| 10 |
from typing import List, Dict, Optional, Tuple
|
| 11 |
from enum import Enum
|
| 12 |
-
|
| 13 |
from fastapi import FastAPI, HTTPException, UploadFile, File, Query, Form
|
| 14 |
from fastapi.responses import StreamingResponse, JSONResponse
|
| 15 |
from fastapi.middleware.cors import CORSMiddleware
|
|
@@ -19,7 +18,7 @@ from bson import ObjectId
|
|
| 19 |
import speech_recognition as sr
|
| 20 |
from gtts import gTTS
|
| 21 |
from pydub import AudioSegment
|
| 22 |
-
|
| 23 |
from txagent.txagent import TxAgent
|
| 24 |
from db.mongo import get_mongo_client
|
| 25 |
|
|
@@ -28,12 +27,14 @@ logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(
|
|
| 28 |
logger = logging.getLogger("TxAgentAPI")
|
| 29 |
|
| 30 |
# App
|
| 31 |
-
app = FastAPI(title="TxAgent API", version="2.
|
| 32 |
|
| 33 |
app.add_middleware(
|
| 34 |
CORSMiddleware,
|
| 35 |
-
allow_origins=["*"],
|
| 36 |
-
|
|
|
|
|
|
|
| 37 |
)
|
| 38 |
|
| 39 |
# Pydantic Models
|
|
@@ -54,6 +55,10 @@ class VoiceOutputRequest(BaseModel):
|
|
| 54 |
slow: bool = False
|
| 55 |
return_format: str = "mp3" # mp3 or base64
|
| 56 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
# Enums
|
| 58 |
class RiskLevel(str, Enum):
|
| 59 |
NONE = "none"
|
|
@@ -122,13 +127,11 @@ def detect_suicide_risk(text: str) -> Tuple[RiskLevel, float, List[str]]:
|
|
| 122 |
'no reason to live', 'plan to die'
|
| 123 |
]
|
| 124 |
|
| 125 |
-
# Check for explicit mentions
|
| 126 |
explicit_mentions = [kw for kw in suicide_keywords if kw in text.lower()]
|
| 127 |
|
| 128 |
if not explicit_mentions:
|
| 129 |
return RiskLevel.NONE, 0.0, []
|
| 130 |
|
| 131 |
-
# If found, ask AI for detailed assessment
|
| 132 |
assessment_prompt = (
|
| 133 |
"Assess the suicide risk level based on this text. "
|
| 134 |
"Consider frequency, specificity, and severity of statements. "
|
|
@@ -141,11 +144,10 @@ def detect_suicide_risk(text: str) -> Tuple[RiskLevel, float, List[str]]:
|
|
| 141 |
response = agent.chat(
|
| 142 |
message=assessment_prompt,
|
| 143 |
history=[],
|
| 144 |
-
temperature=0.2,
|
| 145 |
max_new_tokens=256
|
| 146 |
)
|
| 147 |
|
| 148 |
-
# Extract JSON from response
|
| 149 |
json_match = re.search(r'\{.*\}', response, re.DOTALL)
|
| 150 |
if json_match:
|
| 151 |
assessment = json.loads(json_match.group())
|
|
@@ -157,8 +159,7 @@ def detect_suicide_risk(text: str) -> Tuple[RiskLevel, float, List[str]]:
|
|
| 157 |
except Exception as e:
|
| 158 |
logger.error(f"Error in suicide risk assessment: {e}")
|
| 159 |
|
| 160 |
-
|
| 161 |
-
risk_score = min(0.1 * len(explicit_mentions), 0.9) # Cap at 0.9 for fallback
|
| 162 |
if risk_score > 0.7:
|
| 163 |
return RiskLevel.HIGH, risk_score, explicit_mentions
|
| 164 |
elif risk_score > 0.4:
|
|
@@ -185,25 +186,112 @@ def serialize_patient(patient: dict) -> dict:
|
|
| 185 |
patient_copy["_id"] = str(patient_copy["_id"])
|
| 186 |
return patient_copy
|
| 187 |
|
| 188 |
-
def compute_patient_data_hash(
|
| 189 |
-
"""Compute SHA-256 hash of patient data."""
|
| 190 |
-
serialized = json.dumps(
|
| 191 |
return hashlib.sha256(serialized.encode()).hexdigest()
|
| 192 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 193 |
async def analyze_patient(patient: dict):
|
|
|
|
| 194 |
try:
|
| 195 |
serialized = serialize_patient(patient)
|
| 196 |
patient_id = serialized.get("fhir_id")
|
| 197 |
patient_hash = compute_patient_data_hash(serialized)
|
| 198 |
logger.info(f"🧾 Analyzing patient: {patient_id}")
|
| 199 |
|
| 200 |
-
# Check if analysis exists and hash matches
|
| 201 |
existing_analysis = await analysis_collection.find_one({"patient_id": patient_id})
|
| 202 |
if existing_analysis and existing_analysis.get("data_hash") == patient_hash:
|
| 203 |
logger.info(f"✅ No changes in patient data for {patient_id}, skipping analysis")
|
| 204 |
-
return
|
| 205 |
|
| 206 |
-
# Main clinical analysis
|
| 207 |
doc = json.dumps(serialized, indent=2)
|
| 208 |
message = (
|
| 209 |
"You are a clinical decision support AI.\n\n"
|
|
@@ -218,7 +306,6 @@ async def analyze_patient(patient: dict):
|
|
| 218 |
raw = agent.chat(message=message, history=[], temperature=0.7, max_new_tokens=1024)
|
| 219 |
structured = structure_medical_response(raw)
|
| 220 |
|
| 221 |
-
# Suicide risk assessment
|
| 222 |
risk_level, risk_score, risk_factors = detect_suicide_risk(raw)
|
| 223 |
suicide_risk = {
|
| 224 |
"level": risk_level.value,
|
|
@@ -226,14 +313,13 @@ async def analyze_patient(patient: dict):
|
|
| 226 |
"factors": risk_factors
|
| 227 |
}
|
| 228 |
|
| 229 |
-
# Store analysis with data hash
|
| 230 |
analysis_doc = {
|
| 231 |
"patient_id": patient_id,
|
| 232 |
"timestamp": datetime.utcnow(),
|
| 233 |
"summary": structured,
|
| 234 |
"suicide_risk": suicide_risk,
|
| 235 |
"raw": raw,
|
| 236 |
-
"data_hash": patient_hash
|
| 237 |
}
|
| 238 |
|
| 239 |
await analysis_collection.update_one(
|
|
@@ -242,7 +328,6 @@ async def analyze_patient(patient: dict):
|
|
| 242 |
upsert=True
|
| 243 |
)
|
| 244 |
|
| 245 |
-
# Create alert if risk is above threshold
|
| 246 |
if risk_level in [RiskLevel.MODERATE, RiskLevel.HIGH, RiskLevel.SEVERE]:
|
| 247 |
await create_alert(patient_id, suicide_risk)
|
| 248 |
|
|
@@ -251,18 +336,11 @@ async def analyze_patient(patient: dict):
|
|
| 251 |
except Exception as e:
|
| 252 |
logger.error(f"Error analyzing patient: {e}")
|
| 253 |
|
| 254 |
-
async def analyze_all_patients():
|
| 255 |
-
patients = await patients_collection.find({}).to_list(length=None)
|
| 256 |
-
for patient in patients:
|
| 257 |
-
await analyze_patient(patient)
|
| 258 |
-
await asyncio.sleep(0.1)
|
| 259 |
-
|
| 260 |
def recognize_speech(audio_data: bytes, language: str = "en-US") -> str:
|
| 261 |
-
"""Convert speech to text using Google's speech recognition"""
|
| 262 |
recognizer = sr.Recognizer()
|
| 263 |
|
| 264 |
try:
|
| 265 |
-
# Convert bytes to AudioFile
|
| 266 |
with io.BytesIO(audio_data) as audio_file:
|
| 267 |
with sr.AudioFile(audio_file) as source:
|
| 268 |
audio = recognizer.record(source)
|
|
@@ -279,7 +357,7 @@ def recognize_speech(audio_data: bytes, language: str = "en-US") -> str:
|
|
| 279 |
raise HTTPException(status_code=500, detail="Error processing speech")
|
| 280 |
|
| 281 |
def text_to_speech(text: str, language: str = "en", slow: bool = False) -> bytes:
|
| 282 |
-
"""Convert text to speech using gTTS and return as MP3 bytes"""
|
| 283 |
try:
|
| 284 |
tts = gTTS(text=text, lang=language, slow=slow)
|
| 285 |
mp3_fp = io.BytesIO()
|
|
@@ -323,16 +401,14 @@ async def status():
|
|
| 323 |
return {
|
| 324 |
"status": "running",
|
| 325 |
"timestamp": datetime.utcnow().isoformat(),
|
| 326 |
-
"version": "2.
|
| 327 |
-
"features": ["chat", "voice-input", "voice-output", "patient-analysis"]
|
| 328 |
}
|
| 329 |
|
| 330 |
@app.get("/patients/analysis-results")
|
| 331 |
async def get_patient_analysis_results(name: Optional[str] = Query(None)):
|
| 332 |
try:
|
| 333 |
query = {}
|
| 334 |
-
|
| 335 |
-
# If a name filter is provided, we search the patients collection first
|
| 336 |
if name:
|
| 337 |
name_regex = re.compile(name, re.IGNORECASE)
|
| 338 |
matching_patients = await patients_collection.find({"full_name": name_regex}).to_list(length=None)
|
|
@@ -341,10 +417,7 @@ async def get_patient_analysis_results(name: Optional[str] = Query(None)):
|
|
| 341 |
return []
|
| 342 |
query = {"patient_id": {"$in": patient_ids}}
|
| 343 |
|
| 344 |
-
# Find analysis results based on patient_ids (or all if no filter)
|
| 345 |
analyses = await analysis_collection.find(query).sort("timestamp", -1).to_list(length=100)
|
| 346 |
-
|
| 347 |
-
# Attach full_name to each analysis result
|
| 348 |
enriched_results = []
|
| 349 |
for analysis in analyses:
|
| 350 |
patient = await patients_collection.find_one({"fhir_id": analysis["patient_id"]})
|
|
@@ -396,18 +469,13 @@ async def transcribe_voice(
|
|
| 396 |
audio: UploadFile = File(...),
|
| 397 |
language: str = Query("en-US", description="Language code for speech recognition")
|
| 398 |
):
|
| 399 |
-
"""Convert speech to text"""
|
| 400 |
try:
|
| 401 |
-
# Read audio file
|
| 402 |
audio_data = await audio.read()
|
| 403 |
-
|
| 404 |
-
# Validate audio format
|
| 405 |
if not audio.filename.lower().endswith(('.wav', '.mp3', '.ogg', '.flac')):
|
| 406 |
raise HTTPException(status_code=400, detail="Unsupported audio format")
|
| 407 |
|
| 408 |
-
# Convert speech to text
|
| 409 |
text = recognize_speech(audio_data, language)
|
| 410 |
-
|
| 411 |
return {"text": text}
|
| 412 |
|
| 413 |
except HTTPException:
|
|
@@ -418,16 +486,13 @@ async def transcribe_voice(
|
|
| 418 |
|
| 419 |
@app.post("/voice/synthesize")
|
| 420 |
async def synthesize_voice(request: VoiceOutputRequest):
|
| 421 |
-
"""Convert text to speech"""
|
| 422 |
try:
|
| 423 |
-
# Generate speech from text
|
| 424 |
audio_data = text_to_speech(request.text, request.language, request.slow)
|
| 425 |
|
| 426 |
if request.return_format == "base64":
|
| 427 |
-
# Return as base64 encoded string
|
| 428 |
return {"audio": base64.b64encode(audio_data).decode('utf-8')}
|
| 429 |
else:
|
| 430 |
-
# Return as MP3 file
|
| 431 |
return StreamingResponse(
|
| 432 |
io.BytesIO(audio_data),
|
| 433 |
media_type="audio/mpeg",
|
|
@@ -440,9 +505,6 @@ async def synthesize_voice(request: VoiceOutputRequest):
|
|
| 440 |
logger.error(f"Error in voice synthesis: {e}")
|
| 441 |
raise HTTPException(status_code=500, detail="Error generating voice output")
|
| 442 |
|
| 443 |
-
|
| 444 |
-
|
| 445 |
-
|
| 446 |
@app.post("/voice/chat")
|
| 447 |
async def voice_chat_endpoint(
|
| 448 |
audio: UploadFile = File(...),
|
|
@@ -450,13 +512,11 @@ async def voice_chat_endpoint(
|
|
| 450 |
temperature: float = Query(0.7, ge=0.1, le=1.0),
|
| 451 |
max_new_tokens: int = Query(512, ge=50, le=1024)
|
| 452 |
):
|
| 453 |
-
"""Complete voice chat interaction (speech-to-text -> AI -> text-to-speech)"""
|
| 454 |
try:
|
| 455 |
-
# Step 1: Convert speech to text
|
| 456 |
audio_data = await audio.read()
|
| 457 |
user_message = recognize_speech(audio_data, language)
|
| 458 |
|
| 459 |
-
# Step 2: Get AI response
|
| 460 |
chat_response = agent.chat(
|
| 461 |
message=user_message,
|
| 462 |
history=[],
|
|
@@ -464,10 +524,8 @@ async def voice_chat_endpoint(
|
|
| 464 |
max_new_tokens=max_new_tokens
|
| 465 |
)
|
| 466 |
|
| 467 |
-
# Step 3: Convert response to speech
|
| 468 |
audio_data = text_to_speech(chat_response, language.split('-')[0])
|
| 469 |
|
| 470 |
-
# Return as MP3 file
|
| 471 |
return StreamingResponse(
|
| 472 |
io.BytesIO(audio_data),
|
| 473 |
media_type="audio/mpeg",
|
|
@@ -478,4 +536,47 @@ async def voice_chat_endpoint(
|
|
| 478 |
raise
|
| 479 |
except Exception as e:
|
| 480 |
logger.error(f"Error in voice chat: {e}")
|
| 481 |
-
raise HTTPException(status_code=500, detail="Error processing voice chat")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
from datetime import datetime
|
| 10 |
from typing import List, Dict, Optional, Tuple
|
| 11 |
from enum import Enum
|
|
|
|
| 12 |
from fastapi import FastAPI, HTTPException, UploadFile, File, Query, Form
|
| 13 |
from fastapi.responses import StreamingResponse, JSONResponse
|
| 14 |
from fastapi.middleware.cors import CORSMiddleware
|
|
|
|
| 18 |
import speech_recognition as sr
|
| 19 |
from gtts import gTTS
|
| 20 |
from pydub import AudioSegment
|
| 21 |
+
import PyPDF2
|
| 22 |
from txagent.txagent import TxAgent
|
| 23 |
from db.mongo import get_mongo_client
|
| 24 |
|
|
|
|
| 27 |
logger = logging.getLogger("TxAgentAPI")
|
| 28 |
|
| 29 |
# App
|
| 30 |
+
app = FastAPI(title="TxAgent API", version="2.4.0") # Updated version for file upload support
|
| 31 |
|
| 32 |
app.add_middleware(
|
| 33 |
CORSMiddleware,
|
| 34 |
+
allow_origins=["*"],
|
| 35 |
+
allow_credentials=True,
|
| 36 |
+
allow_methods=["*"],
|
| 37 |
+
allow_headers=["*"]
|
| 38 |
)
|
| 39 |
|
| 40 |
# Pydantic Models
|
|
|
|
| 55 |
slow: bool = False
|
| 56 |
return_format: str = "mp3" # mp3 or base64
|
| 57 |
|
| 58 |
+
class PatientReportAnalysisRequest(BaseModel):
|
| 59 |
+
patient_id: str
|
| 60 |
+
file_type: str = "text" # text, pdf, audio
|
| 61 |
+
|
| 62 |
# Enums
|
| 63 |
class RiskLevel(str, Enum):
|
| 64 |
NONE = "none"
|
|
|
|
| 127 |
'no reason to live', 'plan to die'
|
| 128 |
]
|
| 129 |
|
|
|
|
| 130 |
explicit_mentions = [kw for kw in suicide_keywords if kw in text.lower()]
|
| 131 |
|
| 132 |
if not explicit_mentions:
|
| 133 |
return RiskLevel.NONE, 0.0, []
|
| 134 |
|
|
|
|
| 135 |
assessment_prompt = (
|
| 136 |
"Assess the suicide risk level based on this text. "
|
| 137 |
"Consider frequency, specificity, and severity of statements. "
|
|
|
|
| 144 |
response = agent.chat(
|
| 145 |
message=assessment_prompt,
|
| 146 |
history=[],
|
| 147 |
+
temperature=0.2,
|
| 148 |
max_new_tokens=256
|
| 149 |
)
|
| 150 |
|
|
|
|
| 151 |
json_match = re.search(r'\{.*\}', response, re.DOTALL)
|
| 152 |
if json_match:
|
| 153 |
assessment = json.loads(json_match.group())
|
|
|
|
| 159 |
except Exception as e:
|
| 160 |
logger.error(f"Error in suicide risk assessment: {e}")
|
| 161 |
|
| 162 |
+
risk_score = min(0.1 * len(explicit_mentions), 0.9)
|
|
|
|
| 163 |
if risk_score > 0.7:
|
| 164 |
return RiskLevel.HIGH, risk_score, explicit_mentions
|
| 165 |
elif risk_score > 0.4:
|
|
|
|
| 186 |
patient_copy["_id"] = str(patient_copy["_id"])
|
| 187 |
return patient_copy
|
| 188 |
|
| 189 |
+
def compute_patient_data_hash(data: dict) -> str:
|
| 190 |
+
"""Compute SHA-256 hash of patient data or report."""
|
| 191 |
+
serialized = json.dumps(data, sort_keys=True)
|
| 192 |
return hashlib.sha256(serialized.encode()).hexdigest()
|
| 193 |
|
| 194 |
+
def extract_text_from_pdf(pdf_data: bytes) -> str:
|
| 195 |
+
"""Extract text from a PDF file."""
|
| 196 |
+
try:
|
| 197 |
+
pdf_reader = PyPDF2.PdfReader(io.BytesIO(pdf_data))
|
| 198 |
+
text = ""
|
| 199 |
+
for page in pdf_reader.pages:
|
| 200 |
+
text += page.extract_text() or ""
|
| 201 |
+
return clean_text_response(text)
|
| 202 |
+
except Exception as e:
|
| 203 |
+
logger.error(f"Error extracting text from PDF: {e}")
|
| 204 |
+
raise HTTPException(status_code=400, detail="Failed to extract text from PDF")
|
| 205 |
+
|
| 206 |
+
async def analyze_patient_report(patient_id: str, report_content: str, file_type: str):
|
| 207 |
+
"""Analyze a patient report and store results."""
|
| 208 |
+
try:
|
| 209 |
+
# Compute hash of report content
|
| 210 |
+
report_data = {"patient_id": patient_id, "content": report_content, "file_type": file_type}
|
| 211 |
+
report_hash = compute_patient_data_hash(report_data)
|
| 212 |
+
logger.info(f"🧾 Analyzing report for patient: {patient_id}")
|
| 213 |
+
|
| 214 |
+
# Check if analysis exists and hash matches
|
| 215 |
+
existing_analysis = await analysis_collection.find_one({"patient_id": patient_id, "report_hash": report_hash})
|
| 216 |
+
if existing_analysis:
|
| 217 |
+
logger.info(f"✅ No changes in report data for {patient_id}, skipping analysis")
|
| 218 |
+
return existing_analysis
|
| 219 |
+
|
| 220 |
+
# Construct analysis prompt
|
| 221 |
+
prompt = (
|
| 222 |
+
"You are a clinical decision support AI. Analyze the following patient report:\n"
|
| 223 |
+
"1. Summarize the patient's medical history.\n"
|
| 224 |
+
"2. Identify risks or red flags (including mental health and suicide risk).\n"
|
| 225 |
+
"3. Highlight missed diagnoses or treatments.\n"
|
| 226 |
+
"4. Suggest next clinical steps.\n"
|
| 227 |
+
f"\nPatient Report ({file_type}):\n{'-'*40}\n{report_content[:10000]}"
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
# Perform analysis
|
| 231 |
+
raw_response = agent.chat(
|
| 232 |
+
message=prompt,
|
| 233 |
+
history=[],
|
| 234 |
+
temperature=0.7,
|
| 235 |
+
max_new_tokens=1024
|
| 236 |
+
)
|
| 237 |
+
structured_response = structure_medical_response(raw_response)
|
| 238 |
+
|
| 239 |
+
# Suicide risk assessment
|
| 240 |
+
risk_level, risk_score, risk_factors = detect_suicide_risk(raw_response)
|
| 241 |
+
suicide_risk = {
|
| 242 |
+
"level": risk_level.value,
|
| 243 |
+
"score": risk_score,
|
| 244 |
+
"factors": risk_factors
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
# Store analysis
|
| 248 |
+
analysis_doc = {
|
| 249 |
+
"patient_id": patient_id,
|
| 250 |
+
"timestamp": datetime.utcnow(),
|
| 251 |
+
"summary": structured_response,
|
| 252 |
+
"suicide_risk": suicide_risk,
|
| 253 |
+
"raw": raw_response,
|
| 254 |
+
"report_hash": report_hash,
|
| 255 |
+
"file_type": file_type
|
| 256 |
+
}
|
| 257 |
+
|
| 258 |
+
await analysis_collection.update_one(
|
| 259 |
+
{"patient_id": patient_id, "report_hash": report_hash},
|
| 260 |
+
{"$set": analysis_doc},
|
| 261 |
+
upsert=True
|
| 262 |
+
)
|
| 263 |
+
|
| 264 |
+
# Create alert for high-risk cases
|
| 265 |
+
if risk_level in [RiskLevel.MODERATE, RiskLevel.HIGH, RiskLevel.SEVERE]:
|
| 266 |
+
await create_alert(patient_id, suicide_risk)
|
| 267 |
+
|
| 268 |
+
logger.info(f"✅ Stored analysis for patient report {patient_id}")
|
| 269 |
+
return analysis_doc
|
| 270 |
+
|
| 271 |
+
except Exception as e:
|
| 272 |
+
logger.error(f"Error analyzing patient report: {e}")
|
| 273 |
+
raise HTTPException(status_code=500, detail="Failed to analyze patient report")
|
| 274 |
+
|
| 275 |
+
async def analyze_all_patients():
|
| 276 |
+
"""Analyze all patients in the database."""
|
| 277 |
+
patients = await patients_collection.find({}).to_list(length=None)
|
| 278 |
+
for patient in patients:
|
| 279 |
+
await analyze_patient(patient)
|
| 280 |
+
await asyncio.sleep(0.1)
|
| 281 |
+
|
| 282 |
async def analyze_patient(patient: dict):
|
| 283 |
+
"""Analyze patient data (existing logic for patient records)."""
|
| 284 |
try:
|
| 285 |
serialized = serialize_patient(patient)
|
| 286 |
patient_id = serialized.get("fhir_id")
|
| 287 |
patient_hash = compute_patient_data_hash(serialized)
|
| 288 |
logger.info(f"🧾 Analyzing patient: {patient_id}")
|
| 289 |
|
|
|
|
| 290 |
existing_analysis = await analysis_collection.find_one({"patient_id": patient_id})
|
| 291 |
if existing_analysis and existing_analysis.get("data_hash") == patient_hash:
|
| 292 |
logger.info(f"✅ No changes in patient data for {patient_id}, skipping analysis")
|
| 293 |
+
return
|
| 294 |
|
|
|
|
| 295 |
doc = json.dumps(serialized, indent=2)
|
| 296 |
message = (
|
| 297 |
"You are a clinical decision support AI.\n\n"
|
|
|
|
| 306 |
raw = agent.chat(message=message, history=[], temperature=0.7, max_new_tokens=1024)
|
| 307 |
structured = structure_medical_response(raw)
|
| 308 |
|
|
|
|
| 309 |
risk_level, risk_score, risk_factors = detect_suicide_risk(raw)
|
| 310 |
suicide_risk = {
|
| 311 |
"level": risk_level.value,
|
|
|
|
| 313 |
"factors": risk_factors
|
| 314 |
}
|
| 315 |
|
|
|
|
| 316 |
analysis_doc = {
|
| 317 |
"patient_id": patient_id,
|
| 318 |
"timestamp": datetime.utcnow(),
|
| 319 |
"summary": structured,
|
| 320 |
"suicide_risk": suicide_risk,
|
| 321 |
"raw": raw,
|
| 322 |
+
"data_hash": patient_hash
|
| 323 |
}
|
| 324 |
|
| 325 |
await analysis_collection.update_one(
|
|
|
|
| 328 |
upsert=True
|
| 329 |
)
|
| 330 |
|
|
|
|
| 331 |
if risk_level in [RiskLevel.MODERATE, RiskLevel.HIGH, RiskLevel.SEVERE]:
|
| 332 |
await create_alert(patient_id, suicide_risk)
|
| 333 |
|
|
|
|
| 336 |
except Exception as e:
|
| 337 |
logger.error(f"Error analyzing patient: {e}")
|
| 338 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 339 |
def recognize_speech(audio_data: bytes, language: str = "en-US") -> str:
|
| 340 |
+
"""Convert speech to text using Google's speech recognition."""
|
| 341 |
recognizer = sr.Recognizer()
|
| 342 |
|
| 343 |
try:
|
|
|
|
| 344 |
with io.BytesIO(audio_data) as audio_file:
|
| 345 |
with sr.AudioFile(audio_file) as source:
|
| 346 |
audio = recognizer.record(source)
|
|
|
|
| 357 |
raise HTTPException(status_code=500, detail="Error processing speech")
|
| 358 |
|
| 359 |
def text_to_speech(text: str, language: str = "en", slow: bool = False) -> bytes:
|
| 360 |
+
"""Convert text to speech using gTTS and return as MP3 bytes."""
|
| 361 |
try:
|
| 362 |
tts = gTTS(text=text, lang=language, slow=slow)
|
| 363 |
mp3_fp = io.BytesIO()
|
|
|
|
| 401 |
return {
|
| 402 |
"status": "running",
|
| 403 |
"timestamp": datetime.utcnow().isoformat(),
|
| 404 |
+
"version": "2.4.0",
|
| 405 |
+
"features": ["chat", "voice-input", "voice-output", "patient-analysis", "report-upload"]
|
| 406 |
}
|
| 407 |
|
| 408 |
@app.get("/patients/analysis-results")
|
| 409 |
async def get_patient_analysis_results(name: Optional[str] = Query(None)):
|
| 410 |
try:
|
| 411 |
query = {}
|
|
|
|
|
|
|
| 412 |
if name:
|
| 413 |
name_regex = re.compile(name, re.IGNORECASE)
|
| 414 |
matching_patients = await patients_collection.find({"full_name": name_regex}).to_list(length=None)
|
|
|
|
| 417 |
return []
|
| 418 |
query = {"patient_id": {"$in": patient_ids}}
|
| 419 |
|
|
|
|
| 420 |
analyses = await analysis_collection.find(query).sort("timestamp", -1).to_list(length=100)
|
|
|
|
|
|
|
| 421 |
enriched_results = []
|
| 422 |
for analysis in analyses:
|
| 423 |
patient = await patients_collection.find_one({"fhir_id": analysis["patient_id"]})
|
|
|
|
| 469 |
audio: UploadFile = File(...),
|
| 470 |
language: str = Query("en-US", description="Language code for speech recognition")
|
| 471 |
):
|
| 472 |
+
"""Convert speech to text."""
|
| 473 |
try:
|
|
|
|
| 474 |
audio_data = await audio.read()
|
|
|
|
|
|
|
| 475 |
if not audio.filename.lower().endswith(('.wav', '.mp3', '.ogg', '.flac')):
|
| 476 |
raise HTTPException(status_code=400, detail="Unsupported audio format")
|
| 477 |
|
|
|
|
| 478 |
text = recognize_speech(audio_data, language)
|
|
|
|
| 479 |
return {"text": text}
|
| 480 |
|
| 481 |
except HTTPException:
|
|
|
|
| 486 |
|
| 487 |
@app.post("/voice/synthesize")
|
| 488 |
async def synthesize_voice(request: VoiceOutputRequest):
|
| 489 |
+
"""Convert text to speech."""
|
| 490 |
try:
|
|
|
|
| 491 |
audio_data = text_to_speech(request.text, request.language, request.slow)
|
| 492 |
|
| 493 |
if request.return_format == "base64":
|
|
|
|
| 494 |
return {"audio": base64.b64encode(audio_data).decode('utf-8')}
|
| 495 |
else:
|
|
|
|
| 496 |
return StreamingResponse(
|
| 497 |
io.BytesIO(audio_data),
|
| 498 |
media_type="audio/mpeg",
|
|
|
|
| 505 |
logger.error(f"Error in voice synthesis: {e}")
|
| 506 |
raise HTTPException(status_code=500, detail="Error generating voice output")
|
| 507 |
|
|
|
|
|
|
|
|
|
|
| 508 |
@app.post("/voice/chat")
|
| 509 |
async def voice_chat_endpoint(
|
| 510 |
audio: UploadFile = File(...),
|
|
|
|
| 512 |
temperature: float = Query(0.7, ge=0.1, le=1.0),
|
| 513 |
max_new_tokens: int = Query(512, ge=50, le=1024)
|
| 514 |
):
|
| 515 |
+
"""Complete voice chat interaction (speech-to-text -> AI -> text-to-speech)."""
|
| 516 |
try:
|
|
|
|
| 517 |
audio_data = await audio.read()
|
| 518 |
user_message = recognize_speech(audio_data, language)
|
| 519 |
|
|
|
|
| 520 |
chat_response = agent.chat(
|
| 521 |
message=user_message,
|
| 522 |
history=[],
|
|
|
|
| 524 |
max_new_tokens=max_new_tokens
|
| 525 |
)
|
| 526 |
|
|
|
|
| 527 |
audio_data = text_to_speech(chat_response, language.split('-')[0])
|
| 528 |
|
|
|
|
| 529 |
return StreamingResponse(
|
| 530 |
io.BytesIO(audio_data),
|
| 531 |
media_type="audio/mpeg",
|
|
|
|
| 536 |
raise
|
| 537 |
except Exception as e:
|
| 538 |
logger.error(f"Error in voice chat: {e}")
|
| 539 |
+
raise HTTPException(status_code=500, detail="Error processing voice chat")
|
| 540 |
+
|
| 541 |
+
@app.post("/patient/upload-report")
|
| 542 |
+
async def upload_patient_report(
|
| 543 |
+
patient_id: str = Form(...),
|
| 544 |
+
file_type: str = Form("text", description="Type of file: text, pdf, audio"),
|
| 545 |
+
file: UploadFile = File(...)
|
| 546 |
+
):
|
| 547 |
+
"""Upload and analyze a patient report (text, PDF, or audio)."""
|
| 548 |
+
try:
|
| 549 |
+
# Validate patient ID
|
| 550 |
+
patient = await patients_collection.find_one({"fhir_id": patient_id})
|
| 551 |
+
if not patient:
|
| 552 |
+
raise HTTPException(status_code=404, detail=f"Patient {patient_id} not found")
|
| 553 |
+
|
| 554 |
+
# Validate file type
|
| 555 |
+
if file_type not in ["text", "pdf", "audio"]:
|
| 556 |
+
raise HTTPException(status_code=400, detail="Invalid file type. Supported: text, pdf, audio")
|
| 557 |
+
|
| 558 |
+
# Read and process file
|
| 559 |
+
file_content = await file.read()
|
| 560 |
+
|
| 561 |
+
if file_type == "text":
|
| 562 |
+
report_content = file_content.decode('utf-8')
|
| 563 |
+
elif file_type == "pdf":
|
| 564 |
+
report_content = extract_text_from_pdf(file_content)
|
| 565 |
+
elif file_type == "audio":
|
| 566 |
+
report_content = recognize_speech(file_content, language="en-US")
|
| 567 |
+
else:
|
| 568 |
+
raise HTTPException(status_code=400, detail="Unsupported file type")
|
| 569 |
+
|
| 570 |
+
# Analyze the report
|
| 571 |
+
analysis = await analyze_patient_report(patient_id, report_content, file_type)
|
| 572 |
+
return JSONResponse(content=analysis)
|
| 573 |
+
|
| 574 |
+
except HTTPException:
|
| 575 |
+
raise
|
| 576 |
+
except Exception as e:
|
| 577 |
+
logger.error(f"Error processing patient report upload: {e}")
|
| 578 |
+
raise HTTPException(status_code=500, detail="Error processing patient report")
|
| 579 |
+
|
| 580 |
+
if __name__ == "__main__":
|
| 581 |
+
import uvicorn
|
| 582 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|