Update app.py
Browse files
app.py
CHANGED
|
@@ -10,12 +10,10 @@ Pipeline:
|
|
| 10 |
Notes:
|
| 11 |
- Add gradio_client==1.13.2 (or another compatible 1.x) to requirements.txt
|
| 12 |
- If VLM/LLM Spaces are private, set HF_TOKEN in the environment for authentication.
|
| 13 |
-
- This
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
* sends either cleaned JSON or raw VLM string into LLM (and logs which was used).
|
| 18 |
-
- VLM calls were simplified to a single call (no retries).
|
| 19 |
"""
|
| 20 |
|
| 21 |
import io
|
|
@@ -26,7 +24,6 @@ import asyncio
|
|
| 26 |
import logging
|
| 27 |
import traceback
|
| 28 |
import re
|
| 29 |
-
import time
|
| 30 |
from typing import Dict, Any, Optional, Tuple
|
| 31 |
from datetime import datetime
|
| 32 |
|
|
@@ -56,7 +53,7 @@ HF_TOKEN = os.getenv("HF_TOKEN", None)
|
|
| 56 |
DEFAULT_VLM_PROMPT = (
|
| 57 |
"From the provided face/eye images, compute the required screening features "
|
| 58 |
"(pallor, sclera yellowness, redness, mobility metrics, quality checks) "
|
| 59 |
-
"and output a clean JSON feature vector only
|
| 60 |
)
|
| 61 |
|
| 62 |
# Default LLM prompts / metadata (stricter: force JSON-only output)
|
|
@@ -143,32 +140,34 @@ def estimate_eye_openness_from_detection(confidence: float) -> float:
|
|
| 143 |
return 0.0
|
| 144 |
|
| 145 |
# -----------------------
|
| 146 |
-
# Regex-based robust extractor
|
| 147 |
# -----------------------
|
| 148 |
def extract_json_via_regex(raw_text: str) -> Dict[str, Any]:
|
| 149 |
"""
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
- anemia_probability (0..1)
|
| 155 |
-
- hydration_issue_probability (0..1)
|
| 156 |
-
- neurological_issue_probability (0..1)
|
| 157 |
-
- confidence (0..1)
|
| 158 |
-
- summary (string)
|
| 159 |
-
- recommendation (string)
|
| 160 |
"""
|
|
|
|
| 161 |
match = re.search(r"\{[\s\S]*\}", raw_text)
|
| 162 |
if not match:
|
| 163 |
-
raise ValueError("No JSON-like block found in
|
|
|
|
| 164 |
block = match.group(0)
|
| 165 |
|
| 166 |
def find_number_for_key(key: str) -> Optional[float]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 167 |
patterns = [
|
| 168 |
-
rf'"{key}"\s*:\s*["\']?\s*([-+]?\d+(\.\d+)?)\s*%?\s*["\']?',
|
| 169 |
rf"'{key}'\s*:\s*['\"]?\s*([-+]?\d+(\.\d+)?)\s*%?\s*['\"]?",
|
| 170 |
-
rf'\b{key}\b\s*:\s*["\']?\s*([-+]?\d+(\.\d+)?)\s*%?\s*["\']?',
|
| 171 |
-
rf'"{key}"\s*:\s*["\']([^"\']+)["\']',
|
| 172 |
rf"'{key}'\s*:\s*['\"]([^'\"]+)['\"]"
|
| 173 |
]
|
| 174 |
for pat in patterns:
|
|
@@ -178,25 +177,33 @@ def extract_json_via_regex(raw_text: str) -> Dict[str, Any]:
|
|
| 178 |
g = m.group(1)
|
| 179 |
if g is None:
|
| 180 |
continue
|
| 181 |
-
s = str(g).strip()
|
|
|
|
|
|
|
|
|
|
| 182 |
try:
|
| 183 |
-
|
|
|
|
| 184 |
except Exception:
|
|
|
|
| 185 |
return None
|
| 186 |
return None
|
| 187 |
|
| 188 |
def find_text_for_key(key: str) -> str:
|
|
|
|
| 189 |
m = re.search(rf'"{key}"\s*:\s*"([^"]*)"', block, flags=re.IGNORECASE)
|
| 190 |
if m:
|
| 191 |
return m.group(1).strip()
|
| 192 |
m = re.search(rf"'{key}'\s*:\s*'([^']*)'", block, flags=re.IGNORECASE)
|
| 193 |
if m:
|
| 194 |
return m.group(1).strip()
|
|
|
|
| 195 |
m = re.search(rf'\b{key}\b\s*:\s*([^\n,}}]+)', block, flags=re.IGNORECASE)
|
| 196 |
if m:
|
| 197 |
return m.group(1).strip().strip('",')
|
| 198 |
return ""
|
| 199 |
|
|
|
|
| 200 |
raw_risk = find_number_for_key("risk_score")
|
| 201 |
raw_jaundice = find_number_for_key("jaundice_probability")
|
| 202 |
raw_anemia = find_number_for_key("anemia_probability")
|
|
@@ -204,13 +211,17 @@ def extract_json_via_regex(raw_text: str) -> Dict[str, Any]:
|
|
| 204 |
raw_neuro = find_number_for_key("neurological_issue_probability")
|
| 205 |
raw_conf = find_number_for_key("confidence")
|
| 206 |
|
|
|
|
|
|
|
| 207 |
def normalize_prob(v: Optional[float]) -> float:
|
| 208 |
if v is None:
|
| 209 |
return 0.0
|
| 210 |
if v > 1.0 and v <= 100.0:
|
| 211 |
return max(0.0, min(1.0, v / 100.0))
|
|
|
|
| 212 |
if v > 100.0:
|
| 213 |
return 1.0
|
|
|
|
| 214 |
return max(0.0, min(1.0, v))
|
| 215 |
|
| 216 |
jaundice_probability = normalize_prob(raw_jaundice)
|
|
@@ -219,13 +230,17 @@ def extract_json_via_regex(raw_text: str) -> Dict[str, Any]:
|
|
| 219 |
neurological_issue_probability = normalize_prob(raw_neuro)
|
| 220 |
confidence = normalize_prob(raw_conf)
|
| 221 |
|
|
|
|
| 222 |
def normalize_risk(v: Optional[float]) -> float:
|
| 223 |
if v is None:
|
| 224 |
return 0.0
|
| 225 |
if v <= 1.0:
|
|
|
|
| 226 |
return round(max(0.0, min(100.0, v * 100.0)), 2)
|
|
|
|
| 227 |
if v > 1.0 and v <= 100.0:
|
| 228 |
return round(max(0.0, min(100.0, v)), 2)
|
|
|
|
| 229 |
return round(max(0.0, min(100.0, v if v < float('inf') else 100.0)), 2)
|
| 230 |
|
| 231 |
risk_score = normalize_risk(raw_risk)
|
|
@@ -246,7 +261,7 @@ def extract_json_via_regex(raw_text: str) -> Dict[str, Any]:
|
|
| 246 |
return out
|
| 247 |
|
| 248 |
# -----------------------
|
| 249 |
-
# Gradio / VLM helper (
|
| 250 |
# -----------------------
|
| 251 |
def get_gradio_client_for_space(space: str) -> Client:
|
| 252 |
if not GRADIO_AVAILABLE:
|
|
@@ -260,7 +275,8 @@ def run_vlm_and_get_features(face_path: str, eye_path: str, prompt: Optional[str
|
|
| 260 |
Synchronous call to remote VLM (gradio /chat_fn). Returns tuple:
|
| 261 |
(parsed_features_dict_or_None, raw_text_response_str)
|
| 262 |
|
| 263 |
-
|
|
|
|
| 264 |
"""
|
| 265 |
prompt = prompt or DEFAULT_VLM_PROMPT
|
| 266 |
if not os.path.exists(face_path) or not os.path.exists(eye_path):
|
|
@@ -271,63 +287,53 @@ def run_vlm_and_get_features(face_path: str, eye_path: str, prompt: Optional[str
|
|
| 271 |
client = get_gradio_client_for_space(GRADIO_VLM_SPACE)
|
| 272 |
message = {"text": prompt, "files": [handle_file(face_path), handle_file(eye_path)]}
|
| 273 |
|
| 274 |
-
# SINGLE CALL (no retries)
|
| 275 |
try:
|
| 276 |
logger.info("Calling VLM Space %s", GRADIO_VLM_SPACE)
|
| 277 |
result = client.predict(message=message, history=[], api_name="/chat_fn")
|
| 278 |
except Exception as e:
|
| 279 |
-
logger.exception("VLM call failed
|
| 280 |
raise RuntimeError(f"VLM call failed: {e}")
|
| 281 |
|
| 282 |
-
# Normalize result
|
| 283 |
-
raw_text = ""
|
| 284 |
if not result:
|
| 285 |
-
|
| 286 |
-
raw_text = ""
|
| 287 |
-
else:
|
| 288 |
-
if isinstance(result, (list, tuple)):
|
| 289 |
-
out = result[0]
|
| 290 |
-
elif isinstance(result, dict):
|
| 291 |
-
out = result
|
| 292 |
-
else:
|
| 293 |
-
out = {"text": str(result)}
|
| 294 |
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 298 |
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
logger.warning("VLM returned no text AND files: %s", out.get("files"))
|
| 302 |
|
| 303 |
-
|
| 304 |
-
|
|
|
|
| 305 |
|
| 306 |
-
# Try to parse JSON
|
| 307 |
parsed_features = None
|
| 308 |
try:
|
| 309 |
-
parsed_features = json.loads(
|
| 310 |
-
if
|
| 311 |
parsed_features = None
|
| 312 |
except Exception:
|
| 313 |
-
parsed_features = None
|
| 314 |
-
|
| 315 |
-
# If json.loads failed or returned None, try regex-based extraction
|
| 316 |
-
if parsed_features is None and raw_text and raw_text.strip():
|
| 317 |
try:
|
| 318 |
-
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 322 |
parsed_features = None
|
| 323 |
|
| 324 |
-
|
| 325 |
-
logger.info("VLM parsed features: None (will fallback to sending '{}' or raw string to LLM).")
|
| 326 |
-
else:
|
| 327 |
-
logger.info("VLM parsed features (final): %s", json.dumps(parsed_features, ensure_ascii=False))
|
| 328 |
-
|
| 329 |
-
# Always return raw_text (may be empty string) and parsed_features (or None)
|
| 330 |
-
return parsed_features, (raw_text or "")
|
| 331 |
|
| 332 |
# -----------------------
|
| 333 |
# Gradio / LLM helper (defensive, with retry + clamps)
|
|
@@ -341,9 +347,9 @@ def run_llm_on_vlm(vlm_features_or_raw: Any,
|
|
| 341 |
developer_prompt: Optional[str] = None) -> Dict[str, Any]:
|
| 342 |
"""
|
| 343 |
Call the remote LLM Space's /chat endpoint with defensive input handling and a single retry.
|
| 344 |
-
-
|
| 345 |
-
-
|
| 346 |
-
-
|
| 347 |
"""
|
| 348 |
if not GRADIO_AVAILABLE:
|
| 349 |
raise RuntimeError("gradio_client not installed. Add gradio_client to requirements.txt")
|
|
@@ -359,17 +365,13 @@ def run_llm_on_vlm(vlm_features_or_raw: Any,
|
|
| 359 |
system_prompt = system_prompt or LLM_SYSTEM_PROMPT
|
| 360 |
developer_prompt = developer_prompt or LLM_DEVELOPER_PROMPT
|
| 361 |
|
| 362 |
-
#
|
| 363 |
if isinstance(vlm_features_or_raw, str):
|
| 364 |
-
|
| 365 |
-
logger.info("LLM input will be RAW VLM STRING (len=%d)", len(vlm_raw_str or ""))
|
| 366 |
-
vlm_json_str_to_send = vlm_raw_str if vlm_raw_str and vlm_raw_str.strip() else "{}"
|
| 367 |
else:
|
| 368 |
-
|
| 369 |
-
logger.info("LLM input will be CLEANED VLM JSON (len=%d)", len(vlm_raw_str))
|
| 370 |
-
vlm_json_str_to_send = vlm_raw_str
|
| 371 |
|
| 372 |
-
#
|
| 373 |
instruction = (
|
| 374 |
"\n\nSTRICT INSTRUCTIONS (READ CAREFULLY):\n"
|
| 375 |
"1) OUTPUT ONLY a single valid JSON object and nothing else — no prose, no explanation, no code fences.\n"
|
|
@@ -380,9 +382,10 @@ def run_llm_on_vlm(vlm_features_or_raw: Any,
|
|
| 380 |
"If you cannot estimate a value, set it to null.\n\n"
|
| 381 |
"Now, based on the VLM output below, produce ONLY the JSON object described above.\n\n"
|
| 382 |
"===BEGIN VLM OUTPUT===\n"
|
| 383 |
-
f"{
|
| 384 |
"===END VLM OUTPUT===\n\n"
|
| 385 |
)
|
|
|
|
| 386 |
|
| 387 |
# Defensive coercion / clamps
|
| 388 |
try_max_new_tokens = int(max_new_tokens) if max_new_tokens is not None else 1024
|
|
@@ -390,12 +393,13 @@ def run_llm_on_vlm(vlm_features_or_raw: Any,
|
|
| 390 |
try_max_new_tokens = 1024
|
| 391 |
|
| 392 |
try_temperature = float(temperature) if temperature is not None else 0.0
|
| 393 |
-
#
|
| 394 |
if try_temperature < 0.1:
|
| 395 |
try_temperature = 0.1
|
| 396 |
|
|
|
|
| 397 |
predict_kwargs = dict(
|
| 398 |
-
input_data=
|
| 399 |
max_new_tokens=float(try_max_new_tokens),
|
| 400 |
model_identity=model_identity,
|
| 401 |
system_prompt=system_prompt,
|
|
@@ -408,39 +412,26 @@ def run_llm_on_vlm(vlm_features_or_raw: Any,
|
|
| 408 |
api_name="/chat"
|
| 409 |
)
|
| 410 |
|
|
|
|
| 411 |
last_exc = None
|
| 412 |
for attempt in (1, 2):
|
| 413 |
try:
|
| 414 |
logger.info("Calling LLM Space %s (attempt %d) with temperature=%s, max_new_tokens=%s",
|
| 415 |
LLM_GRADIO_SPACE, attempt, predict_kwargs.get("temperature"), predict_kwargs.get("max_new_tokens"))
|
| 416 |
result = client.predict(**predict_kwargs)
|
| 417 |
-
|
| 418 |
# normalize to string
|
| 419 |
if isinstance(result, (dict, list)):
|
| 420 |
text_out = json.dumps(result)
|
| 421 |
else:
|
| 422 |
text_out = str(result)
|
| 423 |
-
|
| 424 |
if not text_out or len(text_out.strip()) == 0:
|
| 425 |
raise RuntimeError("LLM returned empty response")
|
| 426 |
-
|
| 427 |
-
logger.info("LLM raw output (len=%d):\n%s", len(text_out or ""), (text_out[:2000] + "...") if len(text_out) > 2000 else text_out)
|
| 428 |
|
| 429 |
# parse with regex extractor (may raise)
|
| 430 |
-
parsed =
|
| 431 |
-
|
| 432 |
-
|
| 433 |
-
except Exception:
|
| 434 |
-
# fallback: attempt json.loads naive
|
| 435 |
-
try:
|
| 436 |
-
parsed = json.loads(text_out)
|
| 437 |
-
if not isinstance(parsed, dict):
|
| 438 |
-
parsed = None
|
| 439 |
-
except Exception:
|
| 440 |
-
parsed = None
|
| 441 |
-
|
| 442 |
-
if parsed is None:
|
| 443 |
-
raise ValueError("Failed to extract JSON from LLM output")
|
| 444 |
|
| 445 |
# pretty log parsed JSON
|
| 446 |
try:
|
|
@@ -448,7 +439,7 @@ def run_llm_on_vlm(vlm_features_or_raw: Any,
|
|
| 448 |
except Exception:
|
| 449 |
logger.info("LLM parsed JSON (raw dict): %s", str(parsed))
|
| 450 |
|
| 451 |
-
# defensive clamps (same as
|
| 452 |
def safe_prob(val):
|
| 453 |
try:
|
| 454 |
v = float(val)
|
|
@@ -487,24 +478,29 @@ def run_llm_on_vlm(vlm_features_or_raw: Any,
|
|
| 487 |
return parsed
|
| 488 |
|
| 489 |
except AppError as app_e:
|
|
|
|
| 490 |
logger.exception("LLM AppError (remote validation failed) on attempt %d: %s", attempt, str(app_e))
|
| 491 |
last_exc = app_e
|
| 492 |
if attempt == 1:
|
|
|
|
| 493 |
predict_kwargs["temperature"] = 0.2
|
| 494 |
predict_kwargs["max_new_tokens"] = float(512)
|
| 495 |
logger.info("Retrying LLM call with temperature=0.2 and max_new_tokens=512")
|
| 496 |
continue
|
| 497 |
else:
|
|
|
|
| 498 |
raise RuntimeError(f"LLM call failed (AppError): {app_e}")
|
| 499 |
except Exception as e:
|
| 500 |
logger.exception("LLM call failed on attempt %d: %s", attempt, str(e))
|
| 501 |
last_exc = e
|
|
|
|
| 502 |
if attempt == 1:
|
| 503 |
predict_kwargs["temperature"] = 0.2
|
| 504 |
predict_kwargs["max_new_tokens"] = float(512)
|
| 505 |
continue
|
| 506 |
raise RuntimeError(f"LLM call failed: {e}")
|
| 507 |
|
|
|
|
| 508 |
raise RuntimeError(f"LLM call ultimately failed: {last_exc}")
|
| 509 |
|
| 510 |
# -----------------------
|
|
@@ -533,8 +529,13 @@ async def health_check():
|
|
| 533 |
|
| 534 |
@app.post("/api/v1/validate-eye-photo")
|
| 535 |
async def validate_eye_photo(image: UploadFile = File(...)):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 536 |
if mtcnn is None:
|
| 537 |
raise HTTPException(status_code=500, detail="No face detector available in this deployment.")
|
|
|
|
| 538 |
try:
|
| 539 |
content = await image.read()
|
| 540 |
if not content:
|
|
@@ -542,6 +543,7 @@ async def validate_eye_photo(image: UploadFile = File(...)):
|
|
| 542 |
pil_img = load_image_from_bytes(content)
|
| 543 |
img_arr = np.asarray(pil_img) # RGB
|
| 544 |
|
|
|
|
| 545 |
if not isinstance(mtcnn, dict) and _MTCNN_IMPL == "facenet_pytorch":
|
| 546 |
try:
|
| 547 |
boxes, probs, landmarks = mtcnn.detect(pil_img, landmarks=True)
|
|
@@ -566,6 +568,7 @@ async def validate_eye_photo(image: UploadFile = File(...)):
|
|
| 566 |
traceback.print_exc()
|
| 567 |
raise HTTPException(status_code=500, detail="Face detector failed during inference.")
|
| 568 |
|
|
|
|
| 569 |
if not isinstance(mtcnn, dict) and _MTCNN_IMPL == "mtcnn":
|
| 570 |
try:
|
| 571 |
detections = mtcnn.detect_faces(img_arr)
|
|
@@ -587,6 +590,7 @@ async def validate_eye_photo(image: UploadFile = File(...)):
|
|
| 587 |
"message_hindi": "फोटो अच्छी है! आंखें ठीक से खुली हैं।" if is_valid else "आंखें बंद या आंशिक रूप से बंद दिखाई दे रही हैं। कृपया अपनी आंखें चौड़ी खोलें और पुनः प्रयास करें।",
|
| 588 |
"eye_landmarks": {"left_eye": left_eye, "right_eye": right_eye}}
|
| 589 |
|
|
|
|
| 590 |
if isinstance(mtcnn, dict) and mtcnn.get("impl") == "opencv":
|
| 591 |
try:
|
| 592 |
gray = cv2.cvtColor(img_arr, cv2.COLOR_RGB2GRAY)
|
|
@@ -688,11 +692,7 @@ async def get_status(screening_id: str):
|
|
| 688 |
async def get_results(screening_id: str):
|
| 689 |
if screening_id not in screenings_db:
|
| 690 |
raise HTTPException(status_code=404, detail="Screening not found")
|
| 691 |
-
|
| 692 |
-
entry = screenings_db[screening_id]
|
| 693 |
-
entry.setdefault("ai_results", {})
|
| 694 |
-
entry["ai_results"].setdefault("vlm_raw", entry.get("ai_results", {}).get("vlm_raw", ""))
|
| 695 |
-
return entry
|
| 696 |
|
| 697 |
@app.get("/api/v1/history/{user_id}")
|
| 698 |
async def get_history(user_id: str):
|
|
@@ -709,7 +709,7 @@ async def get_vitals_from_upload(
|
|
| 709 |
):
|
| 710 |
"""
|
| 711 |
Run VLM -> LLM pipeline synchronously (but off the event loop) and return:
|
| 712 |
-
{
|
| 713 |
"""
|
| 714 |
if not GRADIO_AVAILABLE:
|
| 715 |
raise HTTPException(status_code=500, detail="VLM/LLM client not available in this deployment.")
|
|
@@ -735,26 +735,17 @@ async def get_vitals_from_upload(
|
|
| 735 |
# Run VLM (off the event loop)
|
| 736 |
vlm_features, vlm_raw = await asyncio.to_thread(run_vlm_and_get_features, face_path, eye_path)
|
| 737 |
|
| 738 |
-
#
|
| 739 |
-
|
| 740 |
-
logger.info("get_vitals_from_upload - VLM parsed features: %s", json.dumps(vlm_features, indent=2, ensure_ascii=False) if vlm_features else "None")
|
| 741 |
-
|
| 742 |
-
# Decide what to feed to LLM: prefer cleaned JSON if available, else raw VLM string
|
| 743 |
-
if vlm_features:
|
| 744 |
-
llm_input = json.dumps(vlm_features, ensure_ascii=False)
|
| 745 |
-
logger.info("Feeding CLEANED VLM JSON to LLM (len=%d).", len(llm_input))
|
| 746 |
-
else:
|
| 747 |
-
llm_input = vlm_raw if vlm_raw and vlm_raw.strip() else "{}"
|
| 748 |
-
logger.info("Feeding RAW VLM STRING to LLM (len=%d).", len(llm_input))
|
| 749 |
|
| 750 |
# Run LLM (off the event loop)
|
| 751 |
structured_risk = await asyncio.to_thread(run_llm_on_vlm, llm_input)
|
| 752 |
|
| 753 |
-
# Return merged result
|
| 754 |
return {
|
| 755 |
-
"
|
| 756 |
-
"
|
| 757 |
-
"
|
| 758 |
}
|
| 759 |
except Exception as e:
|
| 760 |
logger.exception("get_vitals_from_upload pipeline failed")
|
|
@@ -779,22 +770,13 @@ async def get_vitals_for_screening(screening_id: str):
|
|
| 779 |
# Run VLM off the event loop
|
| 780 |
vlm_features, vlm_raw = await asyncio.to_thread(run_vlm_and_get_features, face_path, eye_path)
|
| 781 |
|
| 782 |
-
|
| 783 |
-
logger.info("get_vitals_for_screening(%s) - VLM parsed features: %s", screening_id, json.dumps(vlm_features, indent=2, ensure_ascii=False) if vlm_features else "None")
|
| 784 |
-
|
| 785 |
-
if vlm_features:
|
| 786 |
-
llm_input = json.dumps(vlm_features, ensure_ascii=False)
|
| 787 |
-
logger.info("Feeding CLEANED VLM JSON to LLM (len=%d).", len(llm_input))
|
| 788 |
-
else:
|
| 789 |
-
llm_input = vlm_raw if vlm_raw and vlm_raw.strip() else "{}"
|
| 790 |
-
logger.info("Feeding RAW VLM STRING to LLM (len=%d).", len(llm_input))
|
| 791 |
-
|
| 792 |
structured_risk = await asyncio.to_thread(run_llm_on_vlm, llm_input)
|
| 793 |
|
| 794 |
# Optionally store this run's outputs back into the DB for inspection
|
| 795 |
entry.setdefault("ai_results", {})
|
| 796 |
entry["ai_results"].update({
|
| 797 |
-
"
|
| 798 |
"vlm_raw": vlm_raw,
|
| 799 |
"structured_risk": structured_risk,
|
| 800 |
"last_vitals_run": datetime.utcnow().isoformat() + "Z"
|
|
@@ -802,16 +784,16 @@ async def get_vitals_for_screening(screening_id: str):
|
|
| 802 |
|
| 803 |
return {
|
| 804 |
"screening_id": screening_id,
|
| 805 |
-
"
|
| 806 |
-
"
|
| 807 |
-
"
|
| 808 |
}
|
| 809 |
except Exception as e:
|
| 810 |
logger.exception("get_vitals_for_screening pipeline failed")
|
| 811 |
raise HTTPException(status_code=500, detail=f"Pipeline failed: {e}")
|
| 812 |
|
| 813 |
# -----------------------
|
| 814 |
-
# Main
|
| 815 |
# -----------------------
|
| 816 |
async def process_screening(screening_id: str):
|
| 817 |
"""
|
|
@@ -819,7 +801,7 @@ async def process_screening(screening_id: str):
|
|
| 819 |
- load images
|
| 820 |
- quick detector-based quality metrics
|
| 821 |
- run VLM -> vlm_features (dict or None) + vlm_raw (string)
|
| 822 |
-
- run LLM on
|
| 823 |
- merge results into ai_results and finish
|
| 824 |
"""
|
| 825 |
try:
|
|
@@ -909,7 +891,7 @@ async def process_screening(screening_id: str):
|
|
| 909 |
vlm_features, vlm_raw = run_vlm_and_get_features(face_path, eye_path)
|
| 910 |
screenings_db[screening_id].setdefault("ai_results", {})
|
| 911 |
screenings_db[screening_id]["ai_results"].update({
|
| 912 |
-
"
|
| 913 |
"vlm_raw": vlm_raw
|
| 914 |
})
|
| 915 |
except Exception as e:
|
|
@@ -917,25 +899,29 @@ async def process_screening(screening_id: str):
|
|
| 917 |
screenings_db[screening_id].setdefault("ai_results", {})
|
| 918 |
screenings_db[screening_id]["ai_results"].update({"vlm_error": str(e)})
|
| 919 |
vlm_features = None
|
| 920 |
-
vlm_raw =
|
| 921 |
-
|
| 922 |
-
# Log VLM outputs in pipeline context
|
| 923 |
-
logger.info("process_screening(%s) - VLM raw (snippet): %s", screening_id, (vlm_raw[:500] + "...") if vlm_raw else "<EMPTY>")
|
| 924 |
-
logger.info("process_screening(%s) - VLM parsed features: %s", screening_id, json.dumps(vlm_features, indent=2, ensure_ascii=False) if vlm_features else "None")
|
| 925 |
|
| 926 |
# --------------------------
|
| 927 |
-
# RUN LLM on
|
| 928 |
# --------------------------
|
| 929 |
structured_risk = None
|
| 930 |
try:
|
| 931 |
-
if
|
| 932 |
-
|
| 933 |
-
|
|
|
|
| 934 |
else:
|
| 935 |
-
#
|
| 936 |
-
|
| 937 |
-
|
| 938 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 939 |
screenings_db[screening_id].setdefault("ai_results", {})
|
| 940 |
screenings_db[screening_id]["ai_results"].update({"structured_risk": structured_risk})
|
| 941 |
except Exception as e:
|
|
@@ -954,14 +940,19 @@ async def process_screening(screening_id: str):
|
|
| 954 |
}
|
| 955 |
|
| 956 |
# Use structured_risk for summary recommendations & simple disease inference placeholders
|
|
|
|
|
|
|
|
|
|
|
|
|
| 957 |
screenings_db[screening_id].setdefault("ai_results", {})
|
| 958 |
screenings_db[screening_id]["ai_results"].update({
|
| 959 |
"processing_time_ms": 1200
|
| 960 |
})
|
| 961 |
|
|
|
|
| 962 |
disease_predictions = [
|
| 963 |
{
|
| 964 |
-
"condition": "Anemia-like-signs",
|
| 965 |
"risk_level": "Medium" if structured_risk.get("anemia_probability", 0.0) > 0.5 else "Low",
|
| 966 |
"probability": structured_risk.get("anemia_probability", 0.0),
|
| 967 |
"confidence": structured_risk.get("confidence", 0.0)
|
|
@@ -977,7 +968,7 @@ async def process_screening(screening_id: str):
|
|
| 977 |
recommendations = {
|
| 978 |
"action_needed": "consult" if structured_risk.get("risk_score", 0.0) > 30.0 else "monitor",
|
| 979 |
"message_english": structured_risk.get("recommendation", "") or f"Please follow up with a health professional if concerns persist.",
|
| 980 |
-
"message_hindi": ""
|
| 981 |
}
|
| 982 |
|
| 983 |
screenings_db[screening_id].update({
|
|
|
|
| 10 |
Notes:
|
| 11 |
- Add gradio_client==1.13.2 (or another compatible 1.x) to requirements.txt
|
| 12 |
- If VLM/LLM Spaces are private, set HF_TOKEN in the environment for authentication.
|
| 13 |
+
- This version includes a robust regex-based extractor that finds the outermost {...} block
|
| 14 |
+
in the LLM output, extracts numeric values for the required keys, and always returns
|
| 15 |
+
numeric defaults (no NaN) so frontends will not receive null/None for numeric fields.
|
| 16 |
+
- This variant logs raw LLM output and the parsed JSON using Python logging.
|
|
|
|
|
|
|
| 17 |
"""
|
| 18 |
|
| 19 |
import io
|
|
|
|
| 24 |
import logging
|
| 25 |
import traceback
|
| 26 |
import re
|
|
|
|
| 27 |
from typing import Dict, Any, Optional, Tuple
|
| 28 |
from datetime import datetime
|
| 29 |
|
|
|
|
| 53 |
DEFAULT_VLM_PROMPT = (
|
| 54 |
"From the provided face/eye images, compute the required screening features "
|
| 55 |
"(pallor, sclera yellowness, redness, mobility metrics, quality checks) "
|
| 56 |
+
"and output a clean JSON feature vector only."
|
| 57 |
)
|
| 58 |
|
| 59 |
# Default LLM prompts / metadata (stricter: force JSON-only output)
|
|
|
|
| 140 |
return 0.0
|
| 141 |
|
| 142 |
# -----------------------
|
| 143 |
+
# Regex-based robust extractor
|
| 144 |
# -----------------------
|
| 145 |
def extract_json_via_regex(raw_text: str) -> Dict[str, Any]:
|
| 146 |
"""
|
| 147 |
+
1) Finds the outermost { ... } block in raw_text.
|
| 148 |
+
2) Extracts numeric values after the listed keys using regex, tolerating:
|
| 149 |
+
- quotes, spaces, percent signs, percent numbers like "55%", strings like "0.12", integers, or numbers in quotes.
|
| 150 |
+
3) Returns a dict with numeric fields GUARANTEED to be floats (no None/NaN), and string fields for summary/recommendation.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
"""
|
| 152 |
+
# Find the first {...} block (outermost approximation)
|
| 153 |
match = re.search(r"\{[\s\S]*\}", raw_text)
|
| 154 |
if not match:
|
| 155 |
+
raise ValueError("No JSON-like block found in LLM output")
|
| 156 |
+
|
| 157 |
block = match.group(0)
|
| 158 |
|
| 159 |
def find_number_for_key(key: str) -> Optional[float]:
|
| 160 |
+
"""
|
| 161 |
+
Returns a float in range 0..1 for probabilities, and raw numeric for other keys depending on usage.
|
| 162 |
+
This helper returns None if not found; caller will replace with defaults (0.0).
|
| 163 |
+
"""
|
| 164 |
+
# Try multiple patterns to be robust
|
| 165 |
+
# Pattern captures numbers possibly with % and optional quotes, e.g. "45%", '0.12', 0.5, " 87 "
|
| 166 |
patterns = [
|
| 167 |
+
rf'"{key}"\s*:\s*["\']?\s*([-+]?\d+(\.\d+)?)\s*%?\s*["\']?', # "key": "45%" or "key": 0.45
|
| 168 |
rf"'{key}'\s*:\s*['\"]?\s*([-+]?\d+(\.\d+)?)\s*%?\s*['\"]?",
|
| 169 |
+
rf'\b{key}\b\s*:\s*["\']?\s*([-+]?\d+(\.\d+)?)\s*%?\s*["\']?', # key: 45%
|
| 170 |
+
rf'"{key}"\s*:\s*["\']([^"\']+)["\']', # capture quoted text (for non-numeric attempts)
|
| 171 |
rf"'{key}'\s*:\s*['\"]([^'\"]+)['\"]"
|
| 172 |
]
|
| 173 |
for pat in patterns:
|
|
|
|
| 177 |
g = m.group(1)
|
| 178 |
if g is None:
|
| 179 |
continue
|
| 180 |
+
s = str(g).strip()
|
| 181 |
+
# Remove percent sign if present
|
| 182 |
+
s = s.replace("%", "").strip()
|
| 183 |
+
# Try to coerce to float
|
| 184 |
try:
|
| 185 |
+
val = float(s)
|
| 186 |
+
return val
|
| 187 |
except Exception:
|
| 188 |
+
# not numeric
|
| 189 |
return None
|
| 190 |
return None
|
| 191 |
|
| 192 |
def find_text_for_key(key: str) -> str:
|
| 193 |
+
# capture "key": "some text" allowing single/double quotes and also unquoted until comma/}
|
| 194 |
m = re.search(rf'"{key}"\s*:\s*"([^"]*)"', block, flags=re.IGNORECASE)
|
| 195 |
if m:
|
| 196 |
return m.group(1).strip()
|
| 197 |
m = re.search(rf"'{key}'\s*:\s*'([^']*)'", block, flags=re.IGNORECASE)
|
| 198 |
if m:
|
| 199 |
return m.group(1).strip()
|
| 200 |
+
# fallback: key: some text (unquoted) up to comma or }
|
| 201 |
m = re.search(rf'\b{key}\b\s*:\s*([^\n,}}]+)', block, flags=re.IGNORECASE)
|
| 202 |
if m:
|
| 203 |
return m.group(1).strip().strip('",')
|
| 204 |
return ""
|
| 205 |
|
| 206 |
+
# Extract raw numeric candidates
|
| 207 |
raw_risk = find_number_for_key("risk_score")
|
| 208 |
raw_jaundice = find_number_for_key("jaundice_probability")
|
| 209 |
raw_anemia = find_number_for_key("anemia_probability")
|
|
|
|
| 211 |
raw_neuro = find_number_for_key("neurological_issue_probability")
|
| 212 |
raw_conf = find_number_for_key("confidence")
|
| 213 |
|
| 214 |
+
# Normalize:
|
| 215 |
+
# - For probabilities: if value > 1 and <=100 => treat as percent -> divide by 100. If <=1 treat as fraction.
|
| 216 |
def normalize_prob(v: Optional[float]) -> float:
|
| 217 |
if v is None:
|
| 218 |
return 0.0
|
| 219 |
if v > 1.0 and v <= 100.0:
|
| 220 |
return max(0.0, min(1.0, v / 100.0))
|
| 221 |
+
# if v is large >100, clamp to 1.0
|
| 222 |
if v > 100.0:
|
| 223 |
return 1.0
|
| 224 |
+
# otherwise assume already 0..1
|
| 225 |
return max(0.0, min(1.0, v))
|
| 226 |
|
| 227 |
jaundice_probability = normalize_prob(raw_jaundice)
|
|
|
|
| 230 |
neurological_issue_probability = normalize_prob(raw_neuro)
|
| 231 |
confidence = normalize_prob(raw_conf)
|
| 232 |
|
| 233 |
+
# risk_score: return in 0..100
|
| 234 |
def normalize_risk(v: Optional[float]) -> float:
|
| 235 |
if v is None:
|
| 236 |
return 0.0
|
| 237 |
if v <= 1.0:
|
| 238 |
+
# fraction given -> scale to 0..100
|
| 239 |
return round(max(0.0, min(100.0, v * 100.0)), 2)
|
| 240 |
+
# if between 1 and 100, assume it's already 0..100
|
| 241 |
if v > 1.0 and v <= 100.0:
|
| 242 |
return round(max(0.0, min(100.0, v)), 2)
|
| 243 |
+
# clamp anything insane
|
| 244 |
return round(max(0.0, min(100.0, v if v < float('inf') else 100.0)), 2)
|
| 245 |
|
| 246 |
risk_score = normalize_risk(raw_risk)
|
|
|
|
| 261 |
return out
|
| 262 |
|
| 263 |
# -----------------------
|
| 264 |
+
# Gradio / VLM helper (returns parsed dict OR None, plus raw text)
|
| 265 |
# -----------------------
|
| 266 |
def get_gradio_client_for_space(space: str) -> Client:
|
| 267 |
if not GRADIO_AVAILABLE:
|
|
|
|
| 275 |
Synchronous call to remote VLM (gradio /chat_fn). Returns tuple:
|
| 276 |
(parsed_features_dict_or_None, raw_text_response_str)
|
| 277 |
|
| 278 |
+
We attempt to parse JSON as before, but always return the original raw text so it can be
|
| 279 |
+
forwarded verbatim to the LLM if desired.
|
| 280 |
"""
|
| 281 |
prompt = prompt or DEFAULT_VLM_PROMPT
|
| 282 |
if not os.path.exists(face_path) or not os.path.exists(eye_path):
|
|
|
|
| 287 |
client = get_gradio_client_for_space(GRADIO_VLM_SPACE)
|
| 288 |
message = {"text": prompt, "files": [handle_file(face_path), handle_file(eye_path)]}
|
| 289 |
|
|
|
|
| 290 |
try:
|
| 291 |
logger.info("Calling VLM Space %s", GRADIO_VLM_SPACE)
|
| 292 |
result = client.predict(message=message, history=[], api_name="/chat_fn")
|
| 293 |
except Exception as e:
|
| 294 |
+
logger.exception("VLM call failed")
|
| 295 |
raise RuntimeError(f"VLM call failed: {e}")
|
| 296 |
|
|
|
|
|
|
|
| 297 |
if not result:
|
| 298 |
+
raise RuntimeError("Empty response from VLM")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 299 |
|
| 300 |
+
# Normalize result
|
| 301 |
+
if isinstance(result, (list, tuple)):
|
| 302 |
+
out = result[0]
|
| 303 |
+
elif isinstance(result, dict):
|
| 304 |
+
out = result
|
| 305 |
+
else:
|
| 306 |
+
out = {"text": str(result)}
|
| 307 |
|
| 308 |
+
if not isinstance(out, dict):
|
| 309 |
+
raise RuntimeError("Unexpected VLM output format (expected dict with 'text' key)")
|
|
|
|
| 310 |
|
| 311 |
+
text_out = out.get("text") or out.get("output") or None
|
| 312 |
+
if not text_out:
|
| 313 |
+
text_out = json.dumps(out)
|
| 314 |
|
| 315 |
+
# Try to parse JSON but remember raw text always
|
| 316 |
parsed_features = None
|
| 317 |
try:
|
| 318 |
+
parsed_features = json.loads(text_out)
|
| 319 |
+
if not isinstance(parsed_features, dict):
|
| 320 |
parsed_features = None
|
| 321 |
except Exception:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 322 |
try:
|
| 323 |
+
s = text_out
|
| 324 |
+
first = s.find("{")
|
| 325 |
+
last = s.rfind("}")
|
| 326 |
+
if first != -1 and last != -1 and last > first:
|
| 327 |
+
maybe = s[first:last+1]
|
| 328 |
+
parsed_features = json.loads(maybe)
|
| 329 |
+
if not isinstance(parsed_features, dict):
|
| 330 |
+
parsed_features = None
|
| 331 |
+
else:
|
| 332 |
+
parsed_features = None
|
| 333 |
+
except Exception:
|
| 334 |
parsed_features = None
|
| 335 |
|
| 336 |
+
return parsed_features, text_out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 337 |
|
| 338 |
# -----------------------
|
| 339 |
# Gradio / LLM helper (defensive, with retry + clamps)
|
|
|
|
| 347 |
developer_prompt: Optional[str] = None) -> Dict[str, Any]:
|
| 348 |
"""
|
| 349 |
Call the remote LLM Space's /chat endpoint with defensive input handling and a single retry.
|
| 350 |
+
- Coerces types (int for tokens), clamps ranges where remote spaces often expect them.
|
| 351 |
+
- Retries once with safe defaults if the Space rejects the inputs (e.g. temperature too low).
|
| 352 |
+
- Logs and returns regex-extracted JSON as before.
|
| 353 |
"""
|
| 354 |
if not GRADIO_AVAILABLE:
|
| 355 |
raise RuntimeError("gradio_client not installed. Add gradio_client to requirements.txt")
|
|
|
|
| 365 |
system_prompt = system_prompt or LLM_SYSTEM_PROMPT
|
| 366 |
developer_prompt = developer_prompt or LLM_DEVELOPER_PROMPT
|
| 367 |
|
| 368 |
+
# Prepare the combined prompt: use raw string as-is, otherwise json.dumps the dict
|
| 369 |
if isinstance(vlm_features_or_raw, str):
|
| 370 |
+
vlm_json_str = vlm_features_or_raw
|
|
|
|
|
|
|
| 371 |
else:
|
| 372 |
+
vlm_json_str = json.dumps(vlm_features_or_raw, default=str)
|
|
|
|
|
|
|
| 373 |
|
| 374 |
+
# Strong, explicit instruction to output only JSON
|
| 375 |
instruction = (
|
| 376 |
"\n\nSTRICT INSTRUCTIONS (READ CAREFULLY):\n"
|
| 377 |
"1) OUTPUT ONLY a single valid JSON object and nothing else — no prose, no explanation, no code fences.\n"
|
|
|
|
| 382 |
"If you cannot estimate a value, set it to null.\n\n"
|
| 383 |
"Now, based on the VLM output below, produce ONLY the JSON object described above.\n\n"
|
| 384 |
"===BEGIN VLM OUTPUT===\n"
|
| 385 |
+
f"{vlm_json_str}\n"
|
| 386 |
"===END VLM OUTPUT===\n\n"
|
| 387 |
)
|
| 388 |
+
input_payload_str = instruction
|
| 389 |
|
| 390 |
# Defensive coercion / clamps
|
| 391 |
try_max_new_tokens = int(max_new_tokens) if max_new_tokens is not None else 1024
|
|
|
|
| 393 |
try_max_new_tokens = 1024
|
| 394 |
|
| 395 |
try_temperature = float(temperature) if temperature is not None else 0.0
|
| 396 |
+
# Many demos require temperature >= 0.1; clamp to 0.1 minimum to avoid validation failures
|
| 397 |
if try_temperature < 0.1:
|
| 398 |
try_temperature = 0.1
|
| 399 |
|
| 400 |
+
# prepare kwargs for predict
|
| 401 |
predict_kwargs = dict(
|
| 402 |
+
input_data=input_payload_str,
|
| 403 |
max_new_tokens=float(try_max_new_tokens),
|
| 404 |
model_identity=model_identity,
|
| 405 |
system_prompt=system_prompt,
|
|
|
|
| 412 |
api_name="/chat"
|
| 413 |
)
|
| 414 |
|
| 415 |
+
# attempt + one retry with safer defaults if AppError occurs
|
| 416 |
last_exc = None
|
| 417 |
for attempt in (1, 2):
|
| 418 |
try:
|
| 419 |
logger.info("Calling LLM Space %s (attempt %d) with temperature=%s, max_new_tokens=%s",
|
| 420 |
LLM_GRADIO_SPACE, attempt, predict_kwargs.get("temperature"), predict_kwargs.get("max_new_tokens"))
|
| 421 |
result = client.predict(**predict_kwargs)
|
|
|
|
| 422 |
# normalize to string
|
| 423 |
if isinstance(result, (dict, list)):
|
| 424 |
text_out = json.dumps(result)
|
| 425 |
else:
|
| 426 |
text_out = str(result)
|
|
|
|
| 427 |
if not text_out or len(text_out.strip()) == 0:
|
| 428 |
raise RuntimeError("LLM returned empty response")
|
| 429 |
+
logger.info("LLM raw output:\n%s", text_out)
|
|
|
|
| 430 |
|
| 431 |
# parse with regex extractor (may raise)
|
| 432 |
+
parsed = extract_json_via_regex(text_out)
|
| 433 |
+
if not isinstance(parsed, dict):
|
| 434 |
+
raise ValueError("Parsed LLM output is not a JSON object/dict")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 435 |
|
| 436 |
# pretty log parsed JSON
|
| 437 |
try:
|
|
|
|
| 439 |
except Exception:
|
| 440 |
logger.info("LLM parsed JSON (raw dict): %s", str(parsed))
|
| 441 |
|
| 442 |
+
# defensive clamps (same as before)
|
| 443 |
def safe_prob(val):
|
| 444 |
try:
|
| 445 |
v = float(val)
|
|
|
|
| 478 |
return parsed
|
| 479 |
|
| 480 |
except AppError as app_e:
|
| 481 |
+
# Specific remote validation error: log and attempt a single retry with ultra-safe defaults
|
| 482 |
logger.exception("LLM AppError (remote validation failed) on attempt %d: %s", attempt, str(app_e))
|
| 483 |
last_exc = app_e
|
| 484 |
if attempt == 1:
|
| 485 |
+
# tighten inputs and retry: force temperature=0.2, max_new_tokens=512
|
| 486 |
predict_kwargs["temperature"] = 0.2
|
| 487 |
predict_kwargs["max_new_tokens"] = float(512)
|
| 488 |
logger.info("Retrying LLM call with temperature=0.2 and max_new_tokens=512")
|
| 489 |
continue
|
| 490 |
else:
|
| 491 |
+
# no more retries
|
| 492 |
raise RuntimeError(f"LLM call failed (AppError): {app_e}")
|
| 493 |
except Exception as e:
|
| 494 |
logger.exception("LLM call failed on attempt %d: %s", attempt, str(e))
|
| 495 |
last_exc = e
|
| 496 |
+
# try one retry only for non-AppError exceptions
|
| 497 |
if attempt == 1:
|
| 498 |
predict_kwargs["temperature"] = 0.2
|
| 499 |
predict_kwargs["max_new_tokens"] = float(512)
|
| 500 |
continue
|
| 501 |
raise RuntimeError(f"LLM call failed: {e}")
|
| 502 |
|
| 503 |
+
# if we reach here, raise last caught exception
|
| 504 |
raise RuntimeError(f"LLM call ultimately failed: {last_exc}")
|
| 505 |
|
| 506 |
# -----------------------
|
|
|
|
| 529 |
|
| 530 |
@app.post("/api/v1/validate-eye-photo")
|
| 531 |
async def validate_eye_photo(image: UploadFile = File(...)):
|
| 532 |
+
"""
|
| 533 |
+
Lightweight validation endpoint. Uses available detector (facenet/mtcnn/opencv) to check face/eye detection.
|
| 534 |
+
For full pipeline, use /api/v1/upload which invokes VLM+LLM in background.
|
| 535 |
+
"""
|
| 536 |
if mtcnn is None:
|
| 537 |
raise HTTPException(status_code=500, detail="No face detector available in this deployment.")
|
| 538 |
+
|
| 539 |
try:
|
| 540 |
content = await image.read()
|
| 541 |
if not content:
|
|
|
|
| 543 |
pil_img = load_image_from_bytes(content)
|
| 544 |
img_arr = np.asarray(pil_img) # RGB
|
| 545 |
|
| 546 |
+
# facenet-pytorch branch
|
| 547 |
if not isinstance(mtcnn, dict) and _MTCNN_IMPL == "facenet_pytorch":
|
| 548 |
try:
|
| 549 |
boxes, probs, landmarks = mtcnn.detect(pil_img, landmarks=True)
|
|
|
|
| 568 |
traceback.print_exc()
|
| 569 |
raise HTTPException(status_code=500, detail="Face detector failed during inference.")
|
| 570 |
|
| 571 |
+
# classic mtcnn branch
|
| 572 |
if not isinstance(mtcnn, dict) and _MTCNN_IMPL == "mtcnn":
|
| 573 |
try:
|
| 574 |
detections = mtcnn.detect_faces(img_arr)
|
|
|
|
| 590 |
"message_hindi": "फोटो अच्छी है! आंखें ठीक से खुली हैं।" if is_valid else "आंखें बंद या आंशिक रूप से बंद दिखाई दे रही हैं। कृपया अपनी आंखें चौड़ी खोलें और पुनः प्रयास करें।",
|
| 591 |
"eye_landmarks": {"left_eye": left_eye, "right_eye": right_eye}}
|
| 592 |
|
| 593 |
+
# OpenCV Haar cascade fallback
|
| 594 |
if isinstance(mtcnn, dict) and mtcnn.get("impl") == "opencv":
|
| 595 |
try:
|
| 596 |
gray = cv2.cvtColor(img_arr, cv2.COLOR_RGB2GRAY)
|
|
|
|
| 692 |
async def get_results(screening_id: str):
|
| 693 |
if screening_id not in screenings_db:
|
| 694 |
raise HTTPException(status_code=404, detail="Screening not found")
|
| 695 |
+
return screenings_db[screening_id]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 696 |
|
| 697 |
@app.get("/api/v1/history/{user_id}")
|
| 698 |
async def get_history(user_id: str):
|
|
|
|
| 709 |
):
|
| 710 |
"""
|
| 711 |
Run VLM -> LLM pipeline synchronously (but off the event loop) and return:
|
| 712 |
+
{ vlm_features, vlm_raw, structured_risk }
|
| 713 |
"""
|
| 714 |
if not GRADIO_AVAILABLE:
|
| 715 |
raise HTTPException(status_code=500, detail="VLM/LLM client not available in this deployment.")
|
|
|
|
| 735 |
# Run VLM (off the event loop)
|
| 736 |
vlm_features, vlm_raw = await asyncio.to_thread(run_vlm_and_get_features, face_path, eye_path)
|
| 737 |
|
| 738 |
+
# Prefer sending raw vlm text to LLM (same behavior as process_screening)
|
| 739 |
+
llm_input = vlm_raw if vlm_raw else (vlm_features if vlm_features else "{}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 740 |
|
| 741 |
# Run LLM (off the event loop)
|
| 742 |
structured_risk = await asyncio.to_thread(run_llm_on_vlm, llm_input)
|
| 743 |
|
| 744 |
+
# Return merged result
|
| 745 |
return {
|
| 746 |
+
"vlm_features": vlm_features,
|
| 747 |
+
"vlm_raw": vlm_raw,
|
| 748 |
+
"structured_risk": structured_risk
|
| 749 |
}
|
| 750 |
except Exception as e:
|
| 751 |
logger.exception("get_vitals_from_upload pipeline failed")
|
|
|
|
| 770 |
# Run VLM off the event loop
|
| 771 |
vlm_features, vlm_raw = await asyncio.to_thread(run_vlm_and_get_features, face_path, eye_path)
|
| 772 |
|
| 773 |
+
llm_input = vlm_raw if vlm_raw else (vlm_features if vlm_features else "{}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 774 |
structured_risk = await asyncio.to_thread(run_llm_on_vlm, llm_input)
|
| 775 |
|
| 776 |
# Optionally store this run's outputs back into the DB for inspection
|
| 777 |
entry.setdefault("ai_results", {})
|
| 778 |
entry["ai_results"].update({
|
| 779 |
+
"vlm_features": vlm_features,
|
| 780 |
"vlm_raw": vlm_raw,
|
| 781 |
"structured_risk": structured_risk,
|
| 782 |
"last_vitals_run": datetime.utcnow().isoformat() + "Z"
|
|
|
|
| 784 |
|
| 785 |
return {
|
| 786 |
"screening_id": screening_id,
|
| 787 |
+
"vlm_features": vlm_features,
|
| 788 |
+
"vlm_raw": vlm_raw,
|
| 789 |
+
"structured_risk": structured_risk
|
| 790 |
}
|
| 791 |
except Exception as e:
|
| 792 |
logger.exception("get_vitals_for_screening pipeline failed")
|
| 793 |
raise HTTPException(status_code=500, detail=f"Pipeline failed: {e}")
|
| 794 |
|
| 795 |
# -----------------------
|
| 796 |
+
# Main processing pipeline
|
| 797 |
# -----------------------
|
| 798 |
async def process_screening(screening_id: str):
|
| 799 |
"""
|
|
|
|
| 801 |
- load images
|
| 802 |
- quick detector-based quality metrics
|
| 803 |
- run VLM -> vlm_features (dict or None) + vlm_raw (string)
|
| 804 |
+
- run LLM on vlm_raw (preferred) or vlm_features -> structured risk JSON
|
| 805 |
- merge results into ai_results and finish
|
| 806 |
"""
|
| 807 |
try:
|
|
|
|
| 891 |
vlm_features, vlm_raw = run_vlm_and_get_features(face_path, eye_path)
|
| 892 |
screenings_db[screening_id].setdefault("ai_results", {})
|
| 893 |
screenings_db[screening_id]["ai_results"].update({
|
| 894 |
+
"vlm_features": vlm_features,
|
| 895 |
"vlm_raw": vlm_raw
|
| 896 |
})
|
| 897 |
except Exception as e:
|
|
|
|
| 899 |
screenings_db[screening_id].setdefault("ai_results", {})
|
| 900 |
screenings_db[screening_id]["ai_results"].update({"vlm_error": str(e)})
|
| 901 |
vlm_features = None
|
| 902 |
+
vlm_raw = None
|
|
|
|
|
|
|
|
|
|
|
|
|
| 903 |
|
| 904 |
# --------------------------
|
| 905 |
+
# RUN LLM on vlm_raw (preferred) or vlm_features -> structured risk JSON
|
| 906 |
# --------------------------
|
| 907 |
structured_risk = None
|
| 908 |
try:
|
| 909 |
+
if vlm_raw:
|
| 910 |
+
structured_risk = run_llm_on_vlm(vlm_raw)
|
| 911 |
+
elif vlm_features:
|
| 912 |
+
structured_risk = run_llm_on_vlm(vlm_features)
|
| 913 |
else:
|
| 914 |
+
# Fallback if VLM failed: produce conservative defaults
|
| 915 |
+
structured_risk = {
|
| 916 |
+
"risk_score": 0.0,
|
| 917 |
+
"jaundice_probability": 0.0,
|
| 918 |
+
"anemia_probability": 0.0,
|
| 919 |
+
"hydration_issue_probability": 0.0,
|
| 920 |
+
"neurological_issue_probability": 0.0,
|
| 921 |
+
"summary": "",
|
| 922 |
+
"recommendation": "",
|
| 923 |
+
"confidence": 0.0
|
| 924 |
+
}
|
| 925 |
screenings_db[screening_id].setdefault("ai_results", {})
|
| 926 |
screenings_db[screening_id]["ai_results"].update({"structured_risk": structured_risk})
|
| 927 |
except Exception as e:
|
|
|
|
| 940 |
}
|
| 941 |
|
| 942 |
# Use structured_risk for summary recommendations & simple disease inference placeholders
|
| 943 |
+
hem = screenings_db[screening_id]["ai_results"].get("medical_insights", {}).get("hemoglobin_estimate", None)
|
| 944 |
+
bil = screenings_db[screening_id]["ai_results"].get("medical_insights", {}).get("bilirubin_estimate", None)
|
| 945 |
+
|
| 946 |
+
# Keep older ai_results shape for backward compatibility (if you want)
|
| 947 |
screenings_db[screening_id].setdefault("ai_results", {})
|
| 948 |
screenings_db[screening_id]["ai_results"].update({
|
| 949 |
"processing_time_ms": 1200
|
| 950 |
})
|
| 951 |
|
| 952 |
+
# disease_predictions & recommendations can be built from structured_risk if needed
|
| 953 |
disease_predictions = [
|
| 954 |
{
|
| 955 |
+
"condition": "Anemia-like-signs", # internal tag (not surfaced in LLM summary)
|
| 956 |
"risk_level": "Medium" if structured_risk.get("anemia_probability", 0.0) > 0.5 else "Low",
|
| 957 |
"probability": structured_risk.get("anemia_probability", 0.0),
|
| 958 |
"confidence": structured_risk.get("confidence", 0.0)
|
|
|
|
| 968 |
recommendations = {
|
| 969 |
"action_needed": "consult" if structured_risk.get("risk_score", 0.0) > 30.0 else "monitor",
|
| 970 |
"message_english": structured_risk.get("recommendation", "") or f"Please follow up with a health professional if concerns persist.",
|
| 971 |
+
"message_hindi": "" # could be auto-translated if desired
|
| 972 |
}
|
| 973 |
|
| 974 |
screenings_db[screening_id].update({
|