dpv007 commited on
Commit
61ff318
·
verified ·
1 Parent(s): 328d99b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +330 -201
app.py CHANGED
@@ -1,30 +1,35 @@
1
  # app.py
2
  """
3
  Elderly HealthWatch AI Backend (FastAPI)
4
- This variant uses:
5
- - facenet-pytorch or mtcnn if available
6
- - otherwise falls back to OpenCV Haar cascades (fast, CPU-only, lightweight)
7
- - integrates a remote VLM via gradio_client to get JSON feature vectors
 
 
 
 
8
  """
9
 
10
  import io
 
11
  import uuid
 
12
  import asyncio
 
 
13
  from typing import Dict, Any, Optional
14
  from datetime import datetime
 
15
  from fastapi import FastAPI, UploadFile, File, BackgroundTasks, HTTPException
16
  from fastapi.middleware.cors import CORSMiddleware
17
  from PIL import Image
18
  import numpy as np
19
- import os
20
- import traceback
21
  import cv2 # opencv-python-headless expected installed
22
- import json
23
- import logging
24
 
25
- # Optional gradio client import (for VLM)
26
  try:
27
- from gradio_client import Client, handle_file
28
  GRADIO_AVAILABLE = True
29
  except Exception:
30
  GRADIO_AVAILABLE = False
@@ -32,41 +37,40 @@ except Exception:
32
  # Configure logging
33
  logging.basicConfig(level=logging.INFO)
34
 
35
- # Configuration for remote VLM (change to your target Space)
36
- GRADIO_SPACE = os.getenv("GRADIO_SPACE", "developer0hye/Qwen3-VL-8B-Instruct")
37
- # If your space is private, set HF_TOKEN as a secret / env var in Spaces
38
  HF_TOKEN = os.getenv("HF_TOKEN", None)
 
 
39
  DEFAULT_VLM_PROMPT = (
40
  "From the provided face/eye images, compute the required screening features "
41
  "(pallor, sclera yellowness, redness, mobility metrics, quality checks) "
42
  "and output a clean JSON feature vector only."
43
  )
44
 
45
- # Attempt to import facenet-pytorch MTCNN first (recommended)
 
 
 
 
 
 
46
  try:
47
- from facenet_pytorch import MTCNN as FacenetMTCNN
48
  _MTCNN_IMPL = "facenet_pytorch"
49
  except Exception:
50
  FacenetMTCNN = None
51
  _MTCNN_IMPL = None
52
 
53
- # Fallback to the classic mtcnn package
54
  if _MTCNN_IMPL is None:
55
  try:
56
- from mtcnn import MTCNN as ClassicMTCNN
57
  _MTCNN_IMPL = "mtcnn"
58
  except Exception:
59
  ClassicMTCNN = None
60
 
61
- # We'll create a fallback "opencv" detector if neither is present.
62
  def create_mtcnn_or_fallback():
63
- """
64
- Return:
65
- - facenet_pytorch.MTCNN instance if available
66
- - classic mtcnn instance if available
67
- - dict with OpenCV cascade detector if neither available
68
- - None if something unexpected happened
69
- """
70
  if _MTCNN_IMPL == "facenet_pytorch" and FacenetMTCNN is not None:
71
  try:
72
  return FacenetMTCNN(keep_all=False, device="cpu")
@@ -77,29 +81,23 @@ def create_mtcnn_or_fallback():
77
  return ClassicMTCNN()
78
  except Exception:
79
  pass
80
-
81
- # OpenCV fallback: use Haar cascades (bundled with cv2)
82
  try:
83
  face_cascade_path = os.path.join(cv2.data.haarcascades, "haarcascade_frontalface_default.xml")
84
  eye_cascade_path = os.path.join(cv2.data.haarcascades, "haarcascade_eye.xml")
85
  if os.path.exists(face_cascade_path) and os.path.exists(eye_cascade_path):
86
- face_cascade = cv2.CascadeClassifier(face_cascade_path)
87
- eye_cascade = cv2.CascadeClassifier(eye_cascade_path)
88
- return {"impl": "opencv", "face_cascade": face_cascade, "eye_cascade": eye_cascade}
 
 
89
  except Exception:
90
  pass
91
-
92
  return None
93
 
94
  mtcnn = create_mtcnn_or_fallback()
95
- # mtcnn may now be:
96
- # - FacenetMTCNN instance (facenet_pytorch)
97
- # - ClassicMTCNN instance (mtcnn)
98
- # - dict {"impl":"opencv", "face_cascade":..., "eye_cascade":...}
99
- # - None
100
 
101
  app = FastAPI(title="Elderly HealthWatch AI Backend")
102
-
103
  app.add_middleware(
104
  CORSMiddleware,
105
  allow_origins=["*"],
@@ -108,16 +106,16 @@ app.add_middleware(
108
  allow_headers=["*"],
109
  )
110
 
 
111
  screenings_db: Dict[str, Dict[str, Any]] = {}
112
 
 
 
 
113
  def load_image_from_bytes(bytes_data: bytes) -> Image.Image:
114
  return Image.open(io.BytesIO(bytes_data)).convert("RGB")
115
 
116
  def estimate_eye_openness_from_detection(confidence: float) -> float:
117
- """
118
- Simple mapping from detection confidence to an "eye_openness" heuristic in [0,1].
119
- (Used by facenet/mtcnn flows)
120
- """
121
  try:
122
  conf = float(confidence)
123
  openness = min(max((conf * 1.15), 0.0), 1.0)
@@ -125,70 +123,59 @@ def estimate_eye_openness_from_detection(confidence: float) -> float:
125
  except Exception:
126
  return 0.0
127
 
128
- # --------------------------
129
- # VLM client helper
130
- # --------------------------
131
- def get_gradio_client():
132
- """Return a configured gradio Client or raise if not available."""
133
  if not GRADIO_AVAILABLE:
134
- raise RuntimeError("gradio_client not installed in this environment.")
135
  if HF_TOKEN:
136
- return Client(GRADIO_SPACE, hf_token=HF_TOKEN)
137
- return Client(GRADIO_SPACE)
138
 
139
  def run_vlm_and_get_features(face_path: str, eye_path: str, prompt: Optional[str] = None) -> Dict[str, Any]:
140
  """
141
- Synchronous call to the remote VLM (gradio / chat_fn).
142
- Expects the VLM to return a JSON string only (we parse it).
143
- On success returns a dict (parsed JSON).
144
- On failure raises RuntimeError or ValueError.
145
  """
146
  prompt = prompt or DEFAULT_VLM_PROMPT
147
-
148
  if not os.path.exists(face_path) or not os.path.exists(eye_path):
149
  raise FileNotFoundError("Face or eye image path missing for VLM call.")
150
-
151
  if not GRADIO_AVAILABLE:
152
- raise RuntimeError("gradio_client is not available in this environment.")
153
-
154
- client = get_gradio_client()
155
 
156
- message = {
157
- "text": prompt,
158
- "files": [handle_file(face_path), handle_file(eye_path)]
159
- }
160
 
161
- # Call the remote API
162
  try:
163
- logging.info("Calling remote VLM at %s", GRADIO_SPACE)
164
- # result is typically a tuple: (output_dict, new_history)
165
  result = client.predict(message=message, history=[], api_name="/chat_fn")
166
  except Exception as e:
167
  logging.exception("VLM call failed")
168
  raise RuntimeError(f"VLM call failed: {e}")
169
 
170
- # Extract text output
171
- if not result or not isinstance(result, (list, tuple)):
172
- # some spaces return just a dict too
173
- if isinstance(result, dict):
174
- out = result
175
- else:
176
- raise RuntimeError("Unexpected VLM response shape")
177
- else:
178
  out = result[0]
 
 
 
 
179
 
180
  if not isinstance(out, dict):
181
  raise RuntimeError("Unexpected VLM output format (expected dict with 'text' key)")
182
 
183
  text_out = out.get("text") or out.get("output") or None
184
  if not text_out:
185
- raise RuntimeError("VLM returned empty text output")
186
 
187
- # The model was instructed to return JSON only. Try to parse it.
188
  try:
189
  features = json.loads(text_out)
190
  except Exception:
191
- # attempt a forgiving extraction: find the first { ... } block
192
  try:
193
  s = text_out
194
  first = s.find("{")
@@ -207,10 +194,150 @@ def run_vlm_and_get_features(face_path: str, eye_path: str, prompt: Optional[str
207
 
208
  return features
209
 
210
- # --------------------------
211
- # End VLM helper
212
- # --------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213
 
 
 
 
214
  @app.get("/")
215
  async def read_root():
216
  return {"message": "Elderly HealthWatch AI Backend"}
@@ -224,16 +351,21 @@ async def health_check():
224
  impl = "opencv_haar_fallback"
225
  else:
226
  impl = _MTCNN_IMPL
227
- return {"status": "healthy", "detector": impl, "vlm_available": GRADIO_AVAILABLE}
 
 
 
 
 
 
228
 
229
  @app.post("/api/v1/validate-eye-photo")
230
  async def validate_eye_photo(image: UploadFile = File(...)):
231
  """
232
- Validate an eye photo: detects a face and returns eye_openness_score & landmarks.
233
- Uses facenet/mtcnn if available; otherwise OpenCV haar cascades.
234
  """
235
  if mtcnn is None:
236
- # No detector at all
237
  raise HTTPException(status_code=500, detail="No face detector available in this deployment.")
238
 
239
  try:
@@ -248,13 +380,9 @@ async def validate_eye_photo(image: UploadFile = File(...)):
248
  try:
249
  boxes, probs, landmarks = mtcnn.detect(pil_img, landmarks=True)
250
  if boxes is None or len(boxes) == 0:
251
- return {
252
- "valid": False,
253
- "face_detected": False,
254
- "eye_openness_score": 0.0,
255
- "message_english": "No face detected. Please ensure your face is clearly visible in the frame.",
256
- "message_hindi": "कोई चेहरा नहीं मिला। कृपया सुनिश्चित करें कि आपका चेहरा फ्रेम में स्पष्ट रूप से दिखाई दे रहा है।"
257
- }
258
  prob = float(probs[0]) if probs is not None else 0.0
259
  lm = landmarks[0] if landmarks is not None else None
260
  if lm is not None and len(lm) >= 2:
@@ -264,14 +392,10 @@ async def validate_eye_photo(image: UploadFile = File(...)):
264
  left_eye = right_eye = None
265
  eye_openness_score = estimate_eye_openness_from_detection(prob)
266
  is_valid = eye_openness_score >= 0.3
267
- return {
268
- "valid": bool(is_valid),
269
- "face_detected": True,
270
- "eye_openness_score": round(eye_openness_score, 2),
271
- "message_english": "Photo looks good! Eyes are properly open." if is_valid else "Eyes appear to be closed or partially closed. Please open your eyes wide and try again.",
272
- "message_hindi": "फोटो अच्छी है! आंखें ठीक से खुली हैं।" if is_valid else "आंखें बंद या आंशिक रूप से बंद दिखाई दे रही हैं। कृपया अपनी आंखें चौड़ी खोलें और पुनः प्रयास करें।",
273
- "eye_landmarks": {"left_eye": left_eye, "right_eye": right_eye}
274
- }
275
  except Exception:
276
  traceback.print_exc()
277
  raise HTTPException(status_code=500, detail="Face detector failed during inference.")
@@ -283,13 +407,9 @@ async def validate_eye_photo(image: UploadFile = File(...)):
283
  except Exception:
284
  detections = mtcnn.detect_faces(pil_img)
285
  if not detections:
286
- return {
287
- "valid": False,
288
- "face_detected": False,
289
- "eye_openness_score": 0.0,
290
- "message_english": "No face detected. Please ensure your face is clearly visible in the frame.",
291
- "message_hindi": "कोई चेहरा नहीं मिला। कृपया सुनिश्चित करें कि आपका चेहरा फ्रेम में स्पष्ट रूप से दिखाई दे रहा है।"
292
- }
293
  face = detections[0]
294
  keypoints = face.get("keypoints", {})
295
  left_eye = keypoints.get("left_eye")
@@ -297,14 +417,10 @@ async def validate_eye_photo(image: UploadFile = File(...)):
297
  confidence = float(face.get("confidence", 0.0))
298
  eye_openness_score = estimate_eye_openness_from_detection(confidence)
299
  is_valid = eye_openness_score >= 0.3
300
- return {
301
- "valid": bool(is_valid),
302
- "face_detected": True,
303
- "eye_openness_score": round(eye_openness_score, 2),
304
- "message_english": "Photo looks good! Eyes are properly open." if is_valid else "Eyes appear to be closed or partially closed. Please open your eyes wide and try again.",
305
- "message_hindi": "फोटो अच्छी है! आंखें ठीक से खुली हैं।" if is_valid else "आंखें बंद या आंशिक रूप से बंद दिखाई दे रही हैं। कृपया अपनी आंखें चौड़ी खोलें और पुनः प्रयास करें।",
306
- "eye_landmarks": {"left_eye": left_eye, "right_eye": right_eye}
307
- }
308
 
309
  # OpenCV Haar cascade fallback
310
  if isinstance(mtcnn, dict) and mtcnn.get("impl") == "opencv":
@@ -314,54 +430,38 @@ async def validate_eye_photo(image: UploadFile = File(...)):
314
  eye_cascade = mtcnn["eye_cascade"]
315
  faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=4, minSize=(60, 60))
316
  if len(faces) == 0:
317
- return {
318
- "valid": False,
319
- "face_detected": False,
320
- "eye_openness_score": 0.0,
321
- "message_english": "No face detected. Please ensure your face is clearly visible in the frame.",
322
- "message_hindi": "कोई चेहरा नहीं मिला। कृपया सुनिश्चित करें कि आपका चेहरा फ्रेम में स्पष्ट रूप से दिखाई दे रहा है।"
323
- }
324
- # Use first face
325
  (x, y, w, h) = faces[0]
326
  roi_gray = gray[y:y+h, x:x+w]
327
  eyes = eye_cascade.detectMultiScale(roi_gray, scaleFactor=1.1, minNeighbors=5, minSize=(20, 10))
328
- # Heuristic openness: if eyes detected => open
329
  eye_openness_score = 1.0 if len(eyes) >= 1 else 0.0
330
  is_valid = eye_openness_score >= 0.3
331
- # estimate coordinates relative to full image
332
  left_eye = None
333
  right_eye = None
334
  if len(eyes) >= 1:
335
  ex, ey, ew, eh = eyes[0]
336
- # convert to image coords center
337
  cx = float(x + ex + ew/2)
338
  cy = float(y + ey + eh/2)
339
  left_eye = {"x": cx, "y": cy}
340
- return {
341
- "valid": bool(is_valid),
342
- "face_detected": True,
343
- "eye_openness_score": round(eye_openness_score, 2),
344
- "message_english": "Photo looks good! Eyes are detected." if is_valid else "Eyes not detected. Please open your eyes wide and try again.",
345
- "message_hindi": "फोटो अच्छी है! आंखें मिलीं।" if is_valid else "आंखें नहीं मिलीं। कृपया अपनी आंखें चौड़ी खोलें और पुनः प्रयास करें।",
346
- "eye_landmarks": {"left_eye": left_eye, "right_eye": right_eye}
347
- }
348
  except Exception:
349
  traceback.print_exc()
350
  raise HTTPException(status_code=500, detail="OpenCV fallback detector failed.")
351
- # Should not reach here
352
  raise HTTPException(status_code=500, detail="Invalid detector configuration.")
353
  except HTTPException:
354
  raise
355
  except Exception as e:
356
  traceback.print_exc()
357
- return {
358
- "valid": False,
359
- "face_detected": False,
360
- "eye_openness_score": 0.0,
361
- "message_english": "Error processing image. Please try again.",
362
- "message_hindi": "छवि प्रोसेस करने में त्रुटि। कृपया पुनः प्रयास करें।",
363
- "error": str(e)
364
- }
365
 
366
  @app.post("/api/v1/upload")
367
  async def upload_images(
@@ -369,6 +469,9 @@ async def upload_images(
369
  face_image: UploadFile = File(...),
370
  eye_image: UploadFile = File(...)
371
  ):
 
 
 
372
  try:
373
  screening_id = str(uuid.uuid4())
374
  now = datetime.utcnow().isoformat() + "Z"
@@ -428,30 +531,43 @@ async def get_history(user_id: str):
428
  history = [s for s in screenings_db.values() if s.get("user_id") == user_id]
429
  return {"screenings": history}
430
 
 
 
 
431
  async def process_screening(screening_id: str):
 
 
 
 
 
 
 
 
432
  try:
433
  if screening_id not in screenings_db:
434
- print(f"[process_screening] screening {screening_id} not found")
435
  return
436
  screenings_db[screening_id]["status"] = "processing"
437
- print(f"[process_screening] Starting {screening_id}")
 
438
  entry = screenings_db[screening_id]
439
  face_path = entry.get("face_image_path")
440
  eye_path = entry.get("eye_image_path")
 
441
  if not (face_path and os.path.exists(face_path)):
442
  raise RuntimeError("Face image missing")
443
  if not (eye_path and os.path.exists(eye_path)):
444
  raise RuntimeError("Eye image missing")
 
445
  face_img = Image.open(face_path).convert("RGB")
446
  eye_img = Image.open(eye_path).convert("RGB")
447
 
448
- # Basic detection using whichever detector is available; populate quality_metrics
449
  face_detected = False
450
  face_confidence = 0.0
451
  left_eye_coord = right_eye_coord = None
452
 
453
- # facenet/mtcnn path
454
- if not isinstance(mtcnn, dict) and (_MTCNN_IMPL == "facenet_pytorch" or _MTCNN_IMPL == "mtcnn"):
455
  try:
456
  if _MTCNN_IMPL == "facenet_pytorch":
457
  boxes, probs, landmarks = mtcnn.detect(face_img, landmarks=True)
@@ -475,7 +591,6 @@ async def process_screening(screening_id: str):
475
  except Exception:
476
  traceback.print_exc()
477
 
478
- # OpenCV fallback path
479
  if isinstance(mtcnn, dict) and mtcnn.get("impl") == "opencv":
480
  try:
481
  arr = np.asarray(face_img)
@@ -485,7 +600,6 @@ async def process_screening(screening_id: str):
485
  faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=4, minSize=(60, 60))
486
  if len(faces) > 0:
487
  face_detected = True
488
- # crude confidence proxy by face size ratio
489
  (x, y, w, h) = faces[0]
490
  face_confidence = min(1.0, (w*h) / (arr.shape[0]*arr.shape[1]) * 4.0)
491
  roi_gray = gray[y:y+h, x:x+w]
@@ -507,74 +621,86 @@ async def process_screening(screening_id: str):
507
  }
508
  screenings_db[screening_id]["quality_metrics"] = quality_metrics
509
 
510
- # Attempt VLM call to compute multimodal features (pallor, sclera yellowness, etc.)
 
 
 
511
  try:
512
  vlm_features = run_vlm_and_get_features(face_path, eye_path)
513
- # attach under ai_results
514
  screenings_db[screening_id].setdefault("ai_results", {})
515
- screenings_db[screening_id]["ai_results"].update({
516
- "vlm_features": vlm_features
517
- })
518
  except Exception as e:
519
- # Don't fail the entire pipeline for VLM errors; record them
520
  logging.exception("VLM feature extraction failed")
521
  screenings_db[screening_id].setdefault("ai_results", {})
522
- screenings_db[screening_id]["ai_results"].update({
523
- "vlm_error": str(e)
524
- })
525
-
526
- # Simulate Medical model steps (kept short)
527
- await asyncio.sleep(1)
528
- vlm_face_desc = "Patient appears to have normal facial tone; no severe jaundice visible."
529
- vlm_eye_desc = "Sclera shows mild yellowing."
530
-
531
- await asyncio.sleep(1)
532
- medical_insights = {
533
- "hemoglobin_estimate": 11.2,
534
- "bilirubin_estimate": 1.8,
535
- "anemia_indicators": ["pale skin"],
536
- "jaundice_indicators": ["mild scleral yellowing"],
537
- "confidence": 0.82
538
- }
539
 
540
- hem = medical_insights["hemoglobin_estimate"]
541
- bil = medical_insights["bilirubin_estimate"]
542
-
543
- ai_results = {
544
- "hemoglobin_g_dl": hem,
545
- "anemia_status": "Mild Anemia" if hem < 12 else "Normal",
546
- "anemia_confidence": medical_insights["confidence"],
547
- "bilirubin_mg_dl": bil,
548
- "jaundice_status": "Normal" if bil < 2.5 else "Elevated",
549
- "jaundice_confidence": medical_insights["confidence"],
550
- "vlm_face_description": vlm_face_desc,
551
- "vlm_eye_description": vlm_eye_desc,
552
- "medical_insights": medical_insights,
553
- "processing_time_ms": 1200
554
- }
555
- # Merge ai_results while preserving vlm_features if present
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
556
  screenings_db[screening_id].setdefault("ai_results", {})
557
- screenings_db[screening_id]["ai_results"].update(ai_results)
 
 
558
 
 
559
  disease_predictions = [
560
  {
561
- "condition": "Iron Deficiency Anemia",
562
- "risk_level": "Medium" if hem < 12 else "Low",
563
- "probability": 0.76 if hem < 12 else 0.23,
564
- "confidence": medical_insights["confidence"]
565
  },
566
  {
567
- "condition": "Jaundice",
568
- "risk_level": "Low" if bil < 2.5 else "Medium",
569
- "probability": 0.23 if bil < 2.5 else 0.45,
570
- "confidence": medical_insights["confidence"]
571
  }
572
  ]
573
 
574
  recommendations = {
575
- "action_needed": "consult" if hem < 12 else "monitor",
576
- "message_english": f"Your hemoglobin is {hem} g/dL. Please consult a doctor within 2 weeks for blood tests.",
577
- "message_hindi": f"आपका हीमोग्लोबिन {hem} g/dL है। कृपया 2 सप्ताह में डॉक्टर से परामर्श करें।"
578
  }
579
 
580
  screenings_db[screening_id].update({
@@ -583,15 +709,18 @@ async def process_screening(screening_id: str):
583
  "recommendations": recommendations
584
  })
585
 
586
- print(f"[process_screening] Completed {screening_id}")
587
  except Exception as e:
588
  traceback.print_exc()
589
  if screening_id in screenings_db:
590
  screenings_db[screening_id]["status"] = "failed"
591
  screenings_db[screening_id]["error"] = str(e)
592
  else:
593
- print(f"[process_screening] Failed for unknown screening {screening_id}: {e}")
594
 
 
 
 
595
  if __name__ == "__main__":
596
  import uvicorn
597
  uvicorn.run("app:app", host="0.0.0.0", port=7860, reload=False)
 
1
  # app.py
2
  """
3
  Elderly HealthWatch AI Backend (FastAPI)
4
+ Pipeline:
5
+ - receive images
6
+ - run VLM (remote gradio / chat_fn) -> JSON feature vector
7
+ - run LLM (remote gradio /chat) -> structured risk JSON (per requested schema)
8
+ - continue rest of processing and store results
9
+ Notes:
10
+ - Add gradio_client==1.13.2 (or another compatible 1.x) to requirements.txt
11
+ - If VLM/LLM Spaces are private, set HF_TOKEN in the environment for authentication.
12
  """
13
 
14
  import io
15
+ import os
16
  import uuid
17
+ import json
18
  import asyncio
19
+ import logging
20
+ import traceback
21
  from typing import Dict, Any, Optional
22
  from datetime import datetime
23
+
24
  from fastapi import FastAPI, UploadFile, File, BackgroundTasks, HTTPException
25
  from fastapi.middleware.cors import CORSMiddleware
26
  from PIL import Image
27
  import numpy as np
 
 
28
  import cv2 # opencv-python-headless expected installed
 
 
29
 
30
+ # Optional gradio client (for VLM + LLM calls)
31
  try:
32
+ from gradio_client import Client, handle_file # type: ignore
33
  GRADIO_AVAILABLE = True
34
  except Exception:
35
  GRADIO_AVAILABLE = False
 
37
  # Configure logging
38
  logging.basicConfig(level=logging.INFO)
39
 
40
+ # Configuration for remote VLM and LLM spaces (change to your target Space names)
41
+ GRADIO_VLM_SPACE = os.getenv("GRADIO_SPACE", "developer0hye/Qwen3-VL-8B-Instruct")
42
+ LLM_GRADIO_SPACE = os.getenv("LLM_GRADIO_SPACE", "Tonic/med-gpt-oss-20b-demo")
43
  HF_TOKEN = os.getenv("HF_TOKEN", None)
44
+
45
+ # Default VLM prompt
46
  DEFAULT_VLM_PROMPT = (
47
  "From the provided face/eye images, compute the required screening features "
48
  "(pallor, sclera yellowness, redness, mobility metrics, quality checks) "
49
  "and output a clean JSON feature vector only."
50
  )
51
 
52
+ # Default LLM prompts / metadata
53
+ LLM_MODEL_IDENTITY = os.getenv("LLM_MODEL_IDENTITY", "You are GPT-Tonic, a large language model trained by TonicAI for clinical reasoning.")
54
+ LLM_SYSTEM_PROMPT = os.getenv("LLM_SYSTEM_PROMPT", "You are GPT-Tonic, a medically-oriented assistant. Answer concisely and provide structured JSON only.")
55
+ LLM_DEVELOPER_PROMPT = os.getenv("LLM_DEVELOPER_PROMPT", "Provide structured JSON with keys: risk_score, jaundice_probability, anemia_probability, hydration_issue_probability, neurological_issue_probability, summary, recommendation, confidence. Output JSON only.")
56
+
57
+ # Try MTCNN libs; fallback to OpenCV haar cascades
58
+ _MTCNN_IMPL = None
59
  try:
60
+ from facenet_pytorch import MTCNN as FacenetMTCNN # type: ignore
61
  _MTCNN_IMPL = "facenet_pytorch"
62
  except Exception:
63
  FacenetMTCNN = None
64
  _MTCNN_IMPL = None
65
 
 
66
  if _MTCNN_IMPL is None:
67
  try:
68
+ from mtcnn import MTCNN as ClassicMTCNN # type: ignore
69
  _MTCNN_IMPL = "mtcnn"
70
  except Exception:
71
  ClassicMTCNN = None
72
 
 
73
  def create_mtcnn_or_fallback():
 
 
 
 
 
 
 
74
  if _MTCNN_IMPL == "facenet_pytorch" and FacenetMTCNN is not None:
75
  try:
76
  return FacenetMTCNN(keep_all=False, device="cpu")
 
81
  return ClassicMTCNN()
82
  except Exception:
83
  pass
84
+ # OpenCV Haar fallback
 
85
  try:
86
  face_cascade_path = os.path.join(cv2.data.haarcascades, "haarcascade_frontalface_default.xml")
87
  eye_cascade_path = os.path.join(cv2.data.haarcascades, "haarcascade_eye.xml")
88
  if os.path.exists(face_cascade_path) and os.path.exists(eye_cascade_path):
89
+ return {
90
+ "impl": "opencv",
91
+ "face_cascade": cv2.CascadeClassifier(face_cascade_path),
92
+ "eye_cascade": cv2.CascadeClassifier(eye_cascade_path)
93
+ }
94
  except Exception:
95
  pass
 
96
  return None
97
 
98
  mtcnn = create_mtcnn_or_fallback()
 
 
 
 
 
99
 
100
  app = FastAPI(title="Elderly HealthWatch AI Backend")
 
101
  app.add_middleware(
102
  CORSMiddleware,
103
  allow_origins=["*"],
 
106
  allow_headers=["*"],
107
  )
108
 
109
+ # In-memory DB for demo
110
  screenings_db: Dict[str, Dict[str, Any]] = {}
111
 
112
+ # -----------------------
113
+ # Utility helpers
114
+ # -----------------------
115
  def load_image_from_bytes(bytes_data: bytes) -> Image.Image:
116
  return Image.open(io.BytesIO(bytes_data)).convert("RGB")
117
 
118
  def estimate_eye_openness_from_detection(confidence: float) -> float:
 
 
 
 
119
  try:
120
  conf = float(confidence)
121
  openness = min(max((conf * 1.15), 0.0), 1.0)
 
123
  except Exception:
124
  return 0.0
125
 
126
+ # -----------------------
127
+ # Gradio / VLM helper
128
+ # -----------------------
129
+ def get_gradio_client_for_space(space: str) -> Client:
 
130
  if not GRADIO_AVAILABLE:
131
+ raise RuntimeError("gradio_client not installed in this environment. Add gradio_client to requirements.txt.")
132
  if HF_TOKEN:
133
+ return Client(space, hf_token=HF_TOKEN)
134
+ return Client(space)
135
 
136
  def run_vlm_and_get_features(face_path: str, eye_path: str, prompt: Optional[str] = None) -> Dict[str, Any]:
137
  """
138
+ Synchronous call to remote VLM (gradio /chat_fn). Expects a JSON feature vector in response.
139
+ Returns parsed dict or raises.
 
 
140
  """
141
  prompt = prompt or DEFAULT_VLM_PROMPT
 
142
  if not os.path.exists(face_path) or not os.path.exists(eye_path):
143
  raise FileNotFoundError("Face or eye image path missing for VLM call.")
 
144
  if not GRADIO_AVAILABLE:
145
+ raise RuntimeError("gradio_client not available in this environment.")
 
 
146
 
147
+ client = get_gradio_client_for_space(GRADIO_VLM_SPACE)
148
+ message = {"text": prompt, "files": [handle_file(face_path), handle_file(eye_path)]}
 
 
149
 
 
150
  try:
151
+ logging.info("Calling VLM Space %s", GRADIO_VLM_SPACE)
 
152
  result = client.predict(message=message, history=[], api_name="/chat_fn")
153
  except Exception as e:
154
  logging.exception("VLM call failed")
155
  raise RuntimeError(f"VLM call failed: {e}")
156
 
157
+ # Normalize result
158
+ if not result:
159
+ raise RuntimeError("Empty response from VLM")
160
+
161
+ if isinstance(result, (list, tuple)):
 
 
 
162
  out = result[0]
163
+ elif isinstance(result, dict):
164
+ out = result
165
+ else:
166
+ out = {"text": str(result)}
167
 
168
  if not isinstance(out, dict):
169
  raise RuntimeError("Unexpected VLM output format (expected dict with 'text' key)")
170
 
171
  text_out = out.get("text") or out.get("output") or None
172
  if not text_out:
173
+ text_out = json.dumps(out)
174
 
175
+ # Parse JSON, forgiving extraction if needed
176
  try:
177
  features = json.loads(text_out)
178
  except Exception:
 
179
  try:
180
  s = text_out
181
  first = s.find("{")
 
194
 
195
  return features
196
 
197
+ # -----------------------
198
+ # Gradio / LLM helper (always prompts with VLM output + strict instruction)
199
+ # -----------------------
200
+ def run_llm_on_vlm(vlm_features: Dict[str, Any],
201
+ max_new_tokens: int = 1024,
202
+ temperature: float = 0.0,
203
+ reasoning_effort: str = "medium",
204
+ model_identity: Optional[str] = None,
205
+ system_prompt: Optional[str] = None,
206
+ developer_prompt: Optional[str] = None) -> Dict[str, Any]:
207
+ """
208
+ Call the remote LLM Space's /chat endpoint with vlm_features embedded in the prompt.
209
+ The LLM is ALWAYS prompted with the vlm JSON followed by the exact instruction:
210
+ "{vlm_ioutput},Generate a JSON with risk_score, jaundice_probability, anemia_probability, hydration_issue_probability, neurological_issue_probability, summary, recommendation, confidence.
211
+ Base probabilities logically on the input features.
212
+ Do NOT mention any disease names in summary or recommendation; use neutral wording only."
213
+ Returns parsed dict with normalized numeric fields.
214
+ """
215
+ if not GRADIO_AVAILABLE:
216
+ raise RuntimeError("gradio_client not installed. Add gradio_client to requirements.txt")
217
+
218
+ client = get_gradio_client_for_space(LLM_GRADIO_SPACE)
219
+
220
+ model_identity = model_identity or LLM_MODEL_IDENTITY
221
+ system_prompt = system_prompt or LLM_SYSTEM_PROMPT
222
+ developer_prompt = developer_prompt or LLM_DEVELOPER_PROMPT
223
+
224
+ # Prepare the exact combined prompt
225
+ vlm_json_str = json.dumps(vlm_features, default=str)
226
+ instruction = (
227
+ ",Generate a JSON with risk_score, jaundice_probability, anemia_probability, "
228
+ "hydration_issue_probability, neurological_issue_probability, summary, recommendation, confidence. \n"
229
+ "Base probabilities logically on the input features. \n"
230
+ "Do NOT mention any disease names in summary or recommendation; use neutral wording only."
231
+ )
232
+ input_payload_str = vlm_json_str + instruction
233
+
234
+ try:
235
+ logging.info("Calling LLM Space %s with strict schema prompt", LLM_GRADIO_SPACE)
236
+ result = client.predict(
237
+ input_data=input_payload_str,
238
+ max_new_tokens=float(max_new_tokens),
239
+ model_identity=model_identity,
240
+ system_prompt=system_prompt,
241
+ developer_prompt=developer_prompt,
242
+ reasoning_effort=reasoning_effort,
243
+ temperature=float(temperature),
244
+ top_p=0.9,
245
+ top_k=50,
246
+ repetition_penalty=1.0,
247
+ api_name="/chat"
248
+ )
249
+ except Exception as e:
250
+ logging.exception("LLM call failed")
251
+ raise RuntimeError(f"LLM call failed: {e}")
252
+
253
+ # Normalize result to string
254
+ if isinstance(result, (dict, list)):
255
+ text_out = json.dumps(result)
256
+ else:
257
+ text_out = str(result)
258
+
259
+ if not text_out or len(text_out.strip()) == 0:
260
+ raise RuntimeError("LLM returned empty response")
261
+
262
+ # Parse JSON (forgiving extraction)
263
+ try:
264
+ parsed = json.loads(text_out)
265
+ except Exception:
266
+ try:
267
+ s = text_out
268
+ first = s.find("{")
269
+ last = s.rfind("}")
270
+ if first != -1 and last != -1 and last > first:
271
+ maybe = s[first:last+1]
272
+ parsed = json.loads(maybe)
273
+ else:
274
+ raise ValueError("No JSON object found in LLM output")
275
+ except Exception as e:
276
+ logging.exception("Failed to parse JSON from LLM output")
277
+ raise ValueError(f"Failed to parse JSON from LLM output: {e}\nRaw output: {text_out}")
278
+
279
+ if not isinstance(parsed, dict):
280
+ raise ValueError("Parsed LLM output is not a JSON object/dict")
281
+
282
+ # Validate and coerce expected probability fields to floats between 0..1 and risk_score 0..100
283
+ def safe_prob(val):
284
+ try:
285
+ v = float(val)
286
+ if v > 1:
287
+ # if model returned 0-100 percentage, convert
288
+ if v <= 100:
289
+ v = v / 100.0
290
+ return max(0.0, min(1.0, v))
291
+ except Exception:
292
+ return None
293
+
294
+ expected_prob_keys = [
295
+ "jaundice_probability",
296
+ "anemia_probability",
297
+ "hydration_issue_probability",
298
+ "neurological_issue_probability",
299
+ ]
300
+ for k in expected_prob_keys:
301
+ if k in parsed:
302
+ parsed[k] = safe_prob(parsed[k])
303
+ else:
304
+ parsed[k] = None
305
+
306
+ # risk_score: coerce to 0..100
307
+ if "risk_score" in parsed:
308
+ try:
309
+ rs = float(parsed["risk_score"])
310
+ if rs <= 1:
311
+ rs = rs * 100.0
312
+ parsed["risk_score"] = round(max(0.0, min(100.0, rs)), 2)
313
+ except Exception:
314
+ parsed["risk_score"] = None
315
+ else:
316
+ # derive a simple aggregated risk_score if missing
317
+ probs = [p for p in (parsed.get(k) for k in expected_prob_keys) if isinstance(p, (int, float))]
318
+ parsed["risk_score"] = round((sum(probs) / len(probs) * 100.0) if probs else 0.0, 2)
319
+
320
+ # Ensure confidence exists and is 0..1
321
+ if "confidence" in parsed:
322
+ try:
323
+ c = float(parsed["confidence"])
324
+ if c > 1 and c <= 100:
325
+ c = c / 100.0
326
+ parsed["confidence"] = max(0.0, min(1.0, c))
327
+ except Exception:
328
+ parsed["confidence"] = None
329
+ else:
330
+ parsed["confidence"] = None
331
+
332
+ # summary and recommendation must be strings (neutral wording)
333
+ parsed["summary"] = str(parsed.get("summary", "")).strip()
334
+ parsed["recommendation"] = str(parsed.get("recommendation", "")).strip()
335
+
336
+ return parsed
337
 
338
+ # -----------------------
339
+ # API endpoints
340
+ # -----------------------
341
  @app.get("/")
342
  async def read_root():
343
  return {"message": "Elderly HealthWatch AI Backend"}
 
351
  impl = "opencv_haar_fallback"
352
  else:
353
  impl = _MTCNN_IMPL
354
+ return {
355
+ "status": "healthy",
356
+ "detector": impl,
357
+ "vlm_available": GRADIO_AVAILABLE,
358
+ "vlm_space": GRADIO_VLM_SPACE,
359
+ "llm_space": LLM_GRADIO_SPACE
360
+ }
361
 
362
  @app.post("/api/v1/validate-eye-photo")
363
  async def validate_eye_photo(image: UploadFile = File(...)):
364
  """
365
+ Lightweight validation endpoint. Uses available detector (facenet/mtcnn/opencv) to check face/eye detection.
366
+ For full pipeline, use /api/v1/upload which invokes VLM+LLM in background.
367
  """
368
  if mtcnn is None:
 
369
  raise HTTPException(status_code=500, detail="No face detector available in this deployment.")
370
 
371
  try:
 
380
  try:
381
  boxes, probs, landmarks = mtcnn.detect(pil_img, landmarks=True)
382
  if boxes is None or len(boxes) == 0:
383
+ return {"valid": False, "face_detected": False, "eye_openness_score": 0.0,
384
+ "message_english": "No face detected. Please ensure your face is clearly visible in the frame.",
385
+ "message_hindi": "कोई चेहरा नहीं मिला। कृपया सुनिश्चित करें कि आपका चेहरा फ्रेम में स्पष्ट रूप से दिखाई दे रहा है।"}
 
 
 
 
386
  prob = float(probs[0]) if probs is not None else 0.0
387
  lm = landmarks[0] if landmarks is not None else None
388
  if lm is not None and len(lm) >= 2:
 
392
  left_eye = right_eye = None
393
  eye_openness_score = estimate_eye_openness_from_detection(prob)
394
  is_valid = eye_openness_score >= 0.3
395
+ return {"valid": bool(is_valid), "face_detected": True, "eye_openness_score": round(eye_openness_score, 2),
396
+ "message_english": "Photo looks good! Eyes are properly open." if is_valid else "Eyes appear to be closed or partially closed. Please open your eyes wide and try again.",
397
+ "message_hindi": "फोटो अच्छी है! आंखें ठीक से खुली हैं।" if is_valid else "आंखें बंद या आंशिक रूप से बंद दिखाई दे रही हैं। कृपया अपनी आंखें चौड़ी खोलें और पुनः प्रयास करें।",
398
+ "eye_landmarks": {"left_eye": left_eye, "right_eye": right_eye}}
 
 
 
 
399
  except Exception:
400
  traceback.print_exc()
401
  raise HTTPException(status_code=500, detail="Face detector failed during inference.")
 
407
  except Exception:
408
  detections = mtcnn.detect_faces(pil_img)
409
  if not detections:
410
+ return {"valid": False, "face_detected": False, "eye_openness_score": 0.0,
411
+ "message_english": "No face detected. Please ensure your face is clearly visible in the frame.",
412
+ "message_hindi": "कोई चेहरा नहीं मिला। कृपया सुनिश्चित करें कि आपका चेहरा फ्रेम में स्पष्ट रूप से दिखाई दे रहा है।"}
 
 
 
 
413
  face = detections[0]
414
  keypoints = face.get("keypoints", {})
415
  left_eye = keypoints.get("left_eye")
 
417
  confidence = float(face.get("confidence", 0.0))
418
  eye_openness_score = estimate_eye_openness_from_detection(confidence)
419
  is_valid = eye_openness_score >= 0.3
420
+ return {"valid": bool(is_valid), "face_detected": True, "eye_openness_score": round(eye_openness_score, 2),
421
+ "message_english": "Photo looks good! Eyes are properly open." if is_valid else "Eyes appear to be closed or partially closed. Please open your eyes wide and try again.",
422
+ "message_hindi": "फोटो अच्छी है! आंखें ठीक से खुली हैं।" if is_valid else "आंखें बंद या आंशिक रूप से बंद दिखाई दे रही हैं। कृपया अपनी आंखें चौड़ी खोलें और पुनः प्रयास करें।",
423
+ "eye_landmarks": {"left_eye": left_eye, "right_eye": right_eye}}
 
 
 
 
424
 
425
  # OpenCV Haar cascade fallback
426
  if isinstance(mtcnn, dict) and mtcnn.get("impl") == "opencv":
 
430
  eye_cascade = mtcnn["eye_cascade"]
431
  faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=4, minSize=(60, 60))
432
  if len(faces) == 0:
433
+ return {"valid": False, "face_detected": False, "eye_openness_score": 0.0,
434
+ "message_english": "No face detected. Please ensure your face is clearly visible in the frame.",
435
+ "message_hindi": "कोई चेहरा नहीं मिला। कृपया सुनिश्चित करें कि आपका चेहरा फ्रेम में स्पष्ट रूप से दिखाई दे रहा है।"}
 
 
 
 
 
436
  (x, y, w, h) = faces[0]
437
  roi_gray = gray[y:y+h, x:x+w]
438
  eyes = eye_cascade.detectMultiScale(roi_gray, scaleFactor=1.1, minNeighbors=5, minSize=(20, 10))
 
439
  eye_openness_score = 1.0 if len(eyes) >= 1 else 0.0
440
  is_valid = eye_openness_score >= 0.3
 
441
  left_eye = None
442
  right_eye = None
443
  if len(eyes) >= 1:
444
  ex, ey, ew, eh = eyes[0]
 
445
  cx = float(x + ex + ew/2)
446
  cy = float(y + ey + eh/2)
447
  left_eye = {"x": cx, "y": cy}
448
+ return {"valid": bool(is_valid), "face_detected": True, "eye_openness_score": round(eye_openness_score, 2),
449
+ "message_english": "Photo looks good! Eyes are detected." if is_valid else "Eyes not detected. Please open your eyes wide and try again.",
450
+ "message_hindi": "फोटो अच्छी है! आंखें मिलीं।" if is_valid else "आंखें नहीं मिलीं। कृपया अपनी आंखें चौड़ी खोलें और पुनः प्रयास करें।",
451
+ "eye_landmarks": {"left_eye": left_eye, "right_eye": right_eye}}
 
 
 
 
452
  except Exception:
453
  traceback.print_exc()
454
  raise HTTPException(status_code=500, detail="OpenCV fallback detector failed.")
455
+
456
  raise HTTPException(status_code=500, detail="Invalid detector configuration.")
457
  except HTTPException:
458
  raise
459
  except Exception as e:
460
  traceback.print_exc()
461
+ return {"valid": False, "face_detected": False, "eye_openness_score": 0.0,
462
+ "message_english": "Error processing image. Please try again.",
463
+ "message_hindi": "छवि प्रोसेस करने में त्रुटि। कृपया पुनः प्रयास करें।",
464
+ "error": str(e)}
 
 
 
 
465
 
466
  @app.post("/api/v1/upload")
467
  async def upload_images(
 
469
  face_image: UploadFile = File(...),
470
  eye_image: UploadFile = File(...)
471
  ):
472
+ """
473
+ Save images and enqueue background processing. VLM -> LLM runs inside process_screening.
474
+ """
475
  try:
476
  screening_id = str(uuid.uuid4())
477
  now = datetime.utcnow().isoformat() + "Z"
 
531
  history = [s for s in screenings_db.values() if s.get("user_id") == user_id]
532
  return {"screenings": history}
533
 
534
+ # -----------------------
535
+ # Main processing pipeline
536
+ # -----------------------
537
  async def process_screening(screening_id: str):
538
+ """
539
+ Main pipeline:
540
+ - load images
541
+ - quick detector-based quality metrics
542
+ - run VLM -> vlm_features
543
+ - run LLM on vlm_features -> structured risk JSON
544
+ - merge results into ai_results and finish
545
+ """
546
  try:
547
  if screening_id not in screenings_db:
548
+ logging.error("[process_screening] screening %s not found", screening_id)
549
  return
550
  screenings_db[screening_id]["status"] = "processing"
551
+ logging.info("[process_screening] Starting %s", screening_id)
552
+
553
  entry = screenings_db[screening_id]
554
  face_path = entry.get("face_image_path")
555
  eye_path = entry.get("eye_image_path")
556
+
557
  if not (face_path and os.path.exists(face_path)):
558
  raise RuntimeError("Face image missing")
559
  if not (eye_path and os.path.exists(eye_path)):
560
  raise RuntimeError("Eye image missing")
561
+
562
  face_img = Image.open(face_path).convert("RGB")
563
  eye_img = Image.open(eye_path).convert("RGB")
564
 
565
+ # Basic detection + quality metrics (facenet/mtcnn/opencv)
566
  face_detected = False
567
  face_confidence = 0.0
568
  left_eye_coord = right_eye_coord = None
569
 
570
+ if mtcnn is not None and not isinstance(mtcnn, dict) and (_MTCNN_IMPL == "facenet_pytorch" or _MTCNN_IMPL == "mtcnn"):
 
571
  try:
572
  if _MTCNN_IMPL == "facenet_pytorch":
573
  boxes, probs, landmarks = mtcnn.detect(face_img, landmarks=True)
 
591
  except Exception:
592
  traceback.print_exc()
593
 
 
594
  if isinstance(mtcnn, dict) and mtcnn.get("impl") == "opencv":
595
  try:
596
  arr = np.asarray(face_img)
 
600
  faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=4, minSize=(60, 60))
601
  if len(faces) > 0:
602
  face_detected = True
 
603
  (x, y, w, h) = faces[0]
604
  face_confidence = min(1.0, (w*h) / (arr.shape[0]*arr.shape[1]) * 4.0)
605
  roi_gray = gray[y:y+h, x:x+w]
 
621
  }
622
  screenings_db[screening_id]["quality_metrics"] = quality_metrics
623
 
624
+ # --------------------------
625
+ # RUN VLM -> get vlm_features
626
+ # --------------------------
627
+ vlm_features = None
628
  try:
629
  vlm_features = run_vlm_and_get_features(face_path, eye_path)
 
630
  screenings_db[screening_id].setdefault("ai_results", {})
631
+ screenings_db[screening_id]["ai_results"].update({"vlm_features": vlm_features})
 
 
632
  except Exception as e:
 
633
  logging.exception("VLM feature extraction failed")
634
  screenings_db[screening_id].setdefault("ai_results", {})
635
+ screenings_db[screening_id]["ai_results"].update({"vlm_error": str(e)})
636
+ vlm_features = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
637
 
638
+ # --------------------------
639
+ # RUN LLM on vlm_features -> structured risk JSON
640
+ # --------------------------
641
+ structured_risk = None
642
+ try:
643
+ if vlm_features:
644
+ structured_risk = run_llm_on_vlm(vlm_features)
645
+ else:
646
+ # Fallback if VLM failed: produce conservative defaults
647
+ structured_risk = {
648
+ "risk_score": 0.0,
649
+ "jaundice_probability": 0.0,
650
+ "anemia_probability": 0.0,
651
+ "hydration_issue_probability": 0.0,
652
+ "neurological_issue_probability": 0.0,
653
+ "summary": "",
654
+ "recommendation": "",
655
+ "confidence": 0.0
656
+ }
657
+ screenings_db[screening_id].setdefault("ai_results", {})
658
+ screenings_db[screening_id]["ai_results"].update({"structured_risk": structured_risk})
659
+ except Exception as e:
660
+ logging.exception("LLM processing failed")
661
+ screenings_db[screening_id].setdefault("ai_results", {})
662
+ screenings_db[screening_id]["ai_results"].update({"llm_error": str(e)})
663
+ structured_risk = {
664
+ "risk_score": 0.0,
665
+ "jaundice_probability": 0.0,
666
+ "anemia_probability": 0.0,
667
+ "hydration_issue_probability": 0.0,
668
+ "neurological_issue_probability": 0.0,
669
+ "summary": "",
670
+ "recommendation": "",
671
+ "confidence": 0.0
672
+ }
673
+
674
+ # Use structured_risk for summary recommendations & simple disease inference placeholders
675
+ hem = screenings_db[screening_id]["ai_results"].get("medical_insights", {}).get("hemoglobin_estimate", None)
676
+ bil = screenings_db[screening_id]["ai_results"].get("medical_insights", {}).get("bilirubin_estimate", None)
677
+
678
+ # Keep older ai_results shape for backward compatibility (if you want)
679
  screenings_db[screening_id].setdefault("ai_results", {})
680
+ screenings_db[screening_id]["ai_results"].update({
681
+ "processing_time_ms": 1200
682
+ })
683
 
684
+ # disease_predictions & recommendations can be built from structured_risk if needed
685
  disease_predictions = [
686
  {
687
+ "condition": "Anemia-like-signs", # internal tag (not surfaced in LLM summary)
688
+ "risk_level": "Medium" if structured_risk.get("anemia_probability", 0.0) > 0.5 else "Low",
689
+ "probability": structured_risk.get("anemia_probability", 0.0),
690
+ "confidence": structured_risk.get("confidence", 0.0)
691
  },
692
  {
693
+ "condition": "Jaundice-like-signs",
694
+ "risk_level": "Medium" if structured_risk.get("jaundice_probability", 0.0) > 0.5 else "Low",
695
+ "probability": structured_risk.get("jaundice_probability", 0.0),
696
+ "confidence": structured_risk.get("confidence", 0.0)
697
  }
698
  ]
699
 
700
  recommendations = {
701
+ "action_needed": "consult" if structured_risk.get("risk_score", 0.0) > 30.0 else "monitor",
702
+ "message_english": structured_risk.get("recommendation", "") or f"Please follow up with a health professional if concerns persist.",
703
+ "message_hindi": "" # could be auto-translated if desired
704
  }
705
 
706
  screenings_db[screening_id].update({
 
709
  "recommendations": recommendations
710
  })
711
 
712
+ logging.info("[process_screening] Completed %s", screening_id)
713
  except Exception as e:
714
  traceback.print_exc()
715
  if screening_id in screenings_db:
716
  screenings_db[screening_id]["status"] = "failed"
717
  screenings_db[screening_id]["error"] = str(e)
718
  else:
719
+ logging.error("[process_screening] Failed for unknown screening %s: %s", screening_id, str(e))
720
 
721
+ # -----------------------
722
+ # Run server (for local debugging)
723
+ # -----------------------
724
  if __name__ == "__main__":
725
  import uvicorn
726
  uvicorn.run("app:app", host="0.0.0.0", port=7860, reload=False)