dpv007 commited on
Commit
139d77f
Β·
verified Β·
1 Parent(s): 42128c0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +339 -68
app.py CHANGED
@@ -1,6 +1,6 @@
1
  """
2
- Elderly HealthWatch AI Backend (FastAPI) - With GCS Support
3
- Simplified: Just upload gcs-credentials.json to repository root
4
  """
5
 
6
  import io
@@ -38,7 +38,10 @@ except Exception:
38
  # ============================================================================
39
  # Configuration
40
  # ============================================================================
41
- logging.basicConfig(level=logging.INFO)
 
 
 
42
  logger = logging.getLogger("elderly_healthwatch")
43
 
44
  GRADIO_VLM_SPACE = os.getenv("GRADIO_SPACE", "developer0hye/Qwen3-VL-8B-Instruct")
@@ -50,14 +53,11 @@ GCS_BUCKET_NAME = "elderly-healthwatch-images"
50
  GCS_CREDENTIALS_FILE = "gcs-credentials.json"
51
 
52
  DEFAULT_VLM_PROMPT = (
53
- "Analyze the provided face and eye images and extract only the following screening features: "
54
- "pallor, sclera_yellowness, eye_redness, facial_mobility_metrics, and image_quality_checks. "
55
- "Return a single valid JSON object only (no text, no markdown). "
56
- "Each feature value must be a normalized confidence score between 0 and 1. "
57
- "If a feature cannot be reliably determined, assign null."
58
  )
59
 
60
-
61
  LLM_SYSTEM_PROMPT = (
62
  "System: This assistant MUST ONLY OUTPUT a single valid JSON object as its response β€” "
63
  "no prose, no explanations, no code fences, no annotations."
@@ -81,17 +81,42 @@ screenings_db: Dict[str, Dict[str, Any]] = {}
81
  # ============================================================================
82
  def setup_gcs_client():
83
  """Initialize GCS client from credentials file"""
 
 
 
 
84
  if not GCS_AVAILABLE:
85
- logger.warning("GCS libraries not installed")
86
  return None, None
87
 
88
  try:
 
 
 
 
89
  if os.path.exists(GCS_CREDENTIALS_FILE):
90
- logger.info("Found GCS credentials file: %s", GCS_CREDENTIALS_FILE)
 
 
91
  credentials = service_account.Credentials.from_service_account_file(GCS_CREDENTIALS_FILE)
 
 
92
  client = storage.Client(credentials=credentials)
 
 
93
  bucket = client.bucket(GCS_BUCKET_NAME)
94
- logger.info("βœ… GCS initialized successfully for bucket: %s", GCS_BUCKET_NAME)
 
 
 
 
 
 
 
 
 
 
 
95
  return client, bucket
96
  else:
97
  logger.warning("⚠️ GCS credentials file not found at: %s", GCS_CREDENTIALS_FILE)
@@ -99,25 +124,43 @@ def setup_gcs_client():
99
  return None, None
100
 
101
  except Exception as e:
102
- logger.exception("Failed to initialize GCS: %s", str(e))
103
  return None, None
104
 
105
  gcs_client, gcs_bucket = setup_gcs_client()
106
 
107
  def upload_to_gcs(local_path: str, blob_name: str) -> Optional[str]:
108
  """Upload file to GCS and return public URL"""
 
 
 
 
 
 
 
 
 
109
  if gcs_bucket is None:
 
110
  return None
111
 
112
  try:
113
  blob = gcs_bucket.blob(blob_name)
 
 
114
  blob.upload_from_filename(local_path, content_type='image/jpeg')
 
 
115
  blob.make_public()
 
 
116
  public_url = blob.public_url
117
- logger.info("βœ… Uploaded to GCS: %s -> %s", blob_name, public_url[:60])
 
 
118
  return public_url
119
  except Exception as e:
120
- logger.exception("Failed to upload to GCS: %s", str(e))
121
  return None
122
 
123
  # ============================================================================
@@ -160,7 +203,10 @@ face_detector, detector_type = setup_face_detector()
160
  # Utility Functions
161
  # ============================================================================
162
  def load_image_from_bytes(bytes_data: bytes) -> Image.Image:
163
- return Image.open(io.BytesIO(bytes_data)).convert("RGB")
 
 
 
164
 
165
  def normalize_probability(val: Optional[float]) -> float:
166
  """Normalize probability to 0-1 range"""
@@ -340,7 +386,7 @@ def extract_json_from_llm_output(raw_text: str) -> Dict[str, Any]:
340
  }
341
 
342
  # ============================================================================
343
- # VLM Integration - WITH GCS URL SUPPORT
344
  # ============================================================================
345
  def get_gradio_client(space: str) -> Client:
346
  """Get Gradio client with optional auth"""
@@ -350,14 +396,39 @@ def get_gradio_client(space: str) -> Client:
350
 
351
  def call_vlm_with_urls(face_url: str, eye_url: str, prompt: Optional[str] = None) -> Tuple[Optional[Dict], str]:
352
  """Call VLM using image URLs instead of file handles"""
 
 
 
 
 
 
 
 
 
 
 
 
 
353
  prompt = prompt or DEFAULT_VLM_PROMPT
 
 
 
354
 
355
- logger.info("πŸ”— VLM Input - Face URL: %s", face_url[:80])
356
- logger.info("πŸ”— VLM Input - Eye URL: %s", eye_url[:80])
 
 
357
 
358
- client = get_gradio_client(GRADIO_VLM_SPACE)
 
 
 
 
 
 
359
 
360
- # Try different message formats that VLM spaces commonly accept
 
361
  message_formats = [
362
  # Format 1: URLs in files array
363
  {"text": prompt, "files": [face_url, eye_url]},
@@ -368,80 +439,135 @@ def call_vlm_with_urls(face_url: str, eye_url: str, prompt: Optional[str] = None
368
  # Format 4: Images array
369
  {"text": prompt, "images": [face_url, eye_url]},
370
  ]
 
371
 
372
  last_error = None
373
 
 
374
  for idx, message in enumerate(message_formats, 1):
 
 
 
 
 
375
  try:
376
- logger.info("Trying VLM message format %d/%d: %s", idx, len(message_formats), list(message.keys()))
377
  result = client.predict(message=message, history=[], api_name="/chat_fn")
 
378
 
379
- logger.info("βœ… VLM call succeeded with format %d", idx)
380
- logger.info("VLM raw result type: %s", type(result))
 
 
381
 
382
- # Process the result
383
  if isinstance(result, (list, tuple)):
384
- logger.info("VLM returned list/tuple with %d elements", len(result))
385
  out = result[0] if len(result) > 0 else {}
 
386
  elif isinstance(result, dict):
387
- logger.info("VLM returned dict with keys: %s", list(result.keys()))
388
  out = result
389
  else:
390
- logger.info("VLM returned type: %s, converting to string", type(result))
391
  out = {"text": str(result)}
392
 
393
- # Extract text from various possible formats
 
394
  text_out = None
 
395
  if isinstance(out, dict):
 
396
  text_out = out.get("text") or out.get("output") or out.get("content") or out.get("response")
 
 
 
 
 
397
 
398
  if not text_out:
 
399
  if isinstance(result, str):
400
  text_out = result
 
401
  else:
402
  text_out = json.dumps(out)
 
403
 
404
- logger.info("VLM extracted text (first 300 chars): %s", text_out[:300] if text_out else "EMPTY")
 
 
405
 
 
406
  if not text_out or len(text_out.strip()) == 0:
407
- logger.warning("VLM returned empty text, trying next format...")
408
  last_error = "Empty response"
409
  continue
410
 
411
- # Try to parse JSON
 
412
  parsed = None
 
413
  try:
414
  parsed = json.loads(text_out)
415
  if not isinstance(parsed, dict):
416
- logger.warning("VLM JSON parsed but not a dict: %s", type(parsed))
417
  parsed = None
418
  else:
419
- logger.info("βœ… VLM successfully parsed JSON with keys: %s", list(parsed.keys()))
420
- except Exception:
 
 
 
 
 
421
  # Try to extract JSON from text
422
  try:
423
  first = text_out.find("{")
424
  last = text_out.rfind("}")
 
 
 
425
  if first != -1 and last != -1 and last > first:
426
  json_str = text_out[first:last+1]
 
 
 
427
  parsed = json.loads(json_str)
428
  if isinstance(parsed, dict):
429
- logger.info("βœ… Successfully extracted JSON from text with keys: %s", list(parsed.keys()))
 
430
  else:
 
431
  parsed = None
 
 
432
  except Exception as extract_err:
433
- logger.info("Could not extract JSON from VLM text: %s", str(extract_err))
434
  parsed = None
435
 
436
- # Success! Return the result
 
 
 
 
 
 
 
437
  return parsed, text_out
438
 
439
  except Exception as e:
440
- logger.warning("VLM format %d failed: %s", idx, str(e))
 
441
  last_error = str(e)
442
  continue
443
 
444
  # All formats failed
 
 
 
 
 
445
  raise RuntimeError(f"All VLM message formats failed. Last error: {last_error}")
446
 
447
  def call_vlm(face_path: str, eye_path: str, prompt: Optional[str] = None) -> Tuple[Optional[Dict], str]:
@@ -449,10 +575,23 @@ def call_vlm(face_path: str, eye_path: str, prompt: Optional[str] = None) -> Tup
449
  Call VLM - wrapper that handles both local files and GCS URLs
450
  Strategy: Try GCS first (if available), fallback to file handles
451
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
452
 
453
  # Strategy 1: Try GCS URLs (if GCS is set up)
454
  if gcs_bucket is not None:
455
- logger.info("🌐 GCS is available, uploading images and using URLs for VLM")
456
 
457
  try:
458
  # Generate unique blob names
@@ -460,10 +599,19 @@ def call_vlm(face_path: str, eye_path: str, prompt: Optional[str] = None) -> Tup
460
  face_blob_name = f"vlm_temp/{unique_id}_face.jpg"
461
  eye_blob_name = f"vlm_temp/{unique_id}_eye.jpg"
462
 
 
 
 
 
463
  # Upload to GCS
 
464
  face_url = upload_to_gcs(face_path, face_blob_name)
465
  eye_url = upload_to_gcs(eye_path, eye_blob_name)
466
 
 
 
 
 
467
  if face_url and eye_url:
468
  logger.info("βœ… Successfully uploaded to GCS, calling VLM with URLs")
469
  return call_vlm_with_urls(face_url, eye_url, prompt)
@@ -471,48 +619,78 @@ def call_vlm(face_path: str, eye_path: str, prompt: Optional[str] = None) -> Tup
471
  logger.warning("⚠️ GCS upload failed, falling back to file handles")
472
  except Exception as e:
473
  logger.warning("⚠️ GCS error, falling back to file handles: %s", str(e))
 
474
  else:
475
  logger.info("ℹ️ GCS not available, using file handles for VLM")
476
 
477
  # Strategy 2: Fallback to file handles (original method)
 
 
 
 
478
  if not os.path.exists(face_path) or not os.path.exists(eye_path):
 
 
 
479
  raise FileNotFoundError("Face or eye image path missing")
480
 
481
- logger.info("πŸ“ VLM Input - Face file: %s (size: %d bytes)", face_path, os.path.getsize(face_path))
482
- logger.info("πŸ“ VLM Input - Eye file: %s (size: %d bytes)", eye_path, os.path.getsize(eye_path))
483
-
484
  prompt = prompt or DEFAULT_VLM_PROMPT
 
 
 
485
  client = get_gradio_client(GRADIO_VLM_SPACE)
486
- message = {"text": prompt, "files": [handle_file(face_path), handle_file(eye_path)]}
 
 
 
 
 
 
 
 
 
 
487
 
488
  try:
489
- logger.info("Calling VLM with file handles")
490
  result = client.predict(message=message, history=[], api_name="/chat_fn")
491
- logger.info("VLM raw result: %s", str(result)[:500])
 
 
492
  except Exception as e:
493
- logger.exception("VLM call with file handles failed")
494
  raise RuntimeError(f"VLM call failed: {e}")
495
 
496
  # Process result (same as in call_vlm_with_urls)
 
497
  if isinstance(result, (list, tuple)):
498
  out = result[0] if len(result) > 0 else {}
 
499
  elif isinstance(result, dict):
500
  out = result
 
501
  else:
502
  out = {"text": str(result)}
 
503
 
504
  text_out = None
505
  if isinstance(out, dict):
506
  text_out = out.get("text") or out.get("output") or out.get("content")
 
507
 
508
  if not text_out:
509
  if isinstance(result, str):
510
  text_out = result
 
511
  else:
512
  text_out = json.dumps(out)
 
 
 
 
513
 
514
  if not text_out or len(text_out.strip()) == 0:
515
- logger.warning("VLM returned empty text")
516
  text_out = "{}"
517
 
518
  parsed = None
@@ -520,7 +698,10 @@ def call_vlm(face_path: str, eye_path: str, prompt: Optional[str] = None) -> Tup
520
  parsed = json.loads(text_out)
521
  if not isinstance(parsed, dict):
522
  parsed = None
 
 
523
  except Exception:
 
524
  try:
525
  first = text_out.find("{")
526
  last = text_out.rfind("}")
@@ -528,9 +709,17 @@ def call_vlm(face_path: str, eye_path: str, prompt: Optional[str] = None) -> Tup
528
  parsed = json.loads(text_out[first:last+1])
529
  if not isinstance(parsed, dict):
530
  parsed = None
 
 
531
  except Exception:
 
532
  pass
533
 
 
 
 
 
 
534
  return parsed, text_out
535
 
536
  # ============================================================================
@@ -604,12 +793,21 @@ def get_fallback_risk_assessment(vlm_output: Any, reason: str = "LLM unavailable
604
 
605
  def call_llm(vlm_output: Any, use_fallback_on_error: bool = True) -> Dict[str, Any]:
606
  """Call LLM with VLM output and return structured risk assessment"""
 
 
 
 
607
  if not GRADIO_AVAILABLE:
 
608
  if use_fallback_on_error:
609
  return get_fallback_risk_assessment(vlm_output, reason="gradio_not_available")
610
  raise RuntimeError("gradio_client not installed")
611
 
612
  vlm_text = vlm_output if isinstance(vlm_output, str) else json.dumps(vlm_output, default=str)
 
 
 
 
613
 
614
  if not vlm_text or vlm_text.strip() in ["{}", "[]", ""]:
615
  logger.warning("VLM output is empty, using fallback assessment")
@@ -628,9 +826,17 @@ def call_llm(vlm_output: Any, use_fallback_on_error: bool = True) -> Dict[str, A
628
  "VLM Output:\n" + vlm_text + "\n"
629
  )
630
 
 
 
631
  try:
 
632
  client = get_gradio_client(LLM_GRADIO_SPACE)
633
- logger.info("Calling LLM Space: %s", LLM_GRADIO_SPACE)
 
 
 
 
 
634
 
635
  result = client.predict(
636
  input_data=instruction,
@@ -646,12 +852,19 @@ def call_llm(vlm_output: Any, use_fallback_on_error: bool = True) -> Dict[str, A
646
  api_name="/chat"
647
  )
648
 
 
 
 
649
  text_out = json.dumps(result) if isinstance(result, (dict, list)) else str(result)
650
- logger.info("LLM raw output:\n%s", text_out[:1000])
 
651
 
 
652
  parsed = extract_json_from_llm_output(text_out)
653
- logger.info("LLM parsed JSON:\n%s", json.dumps(parsed, indent=2))
 
654
 
 
655
  all_zero = all(
656
  parsed.get(k, 0) == 0
657
  for k in ["jaundice_probability", "anemia_probability",
@@ -659,15 +872,18 @@ def call_llm(vlm_output: Any, use_fallback_on_error: bool = True) -> Dict[str, A
659
  )
660
 
661
  if all_zero and parsed.get("risk_score", 0) == 0:
662
- logger.warning("LLM returned all-zero assessment")
663
  parsed["summary"] = "Image analysis incomplete. Please ensure photos are clear and well-lit."
664
  parsed["recommendation"] = "Retake photos with face clearly visible and eyes open."
665
  parsed["confidence"] = 0.1
666
 
 
 
 
667
  return parsed
668
 
669
  except Exception as e:
670
- logger.exception("LLM call failed: %s", str(e))
671
 
672
  error_msg = str(e).lower()
673
  if "quota" in error_msg or "gpu" in error_msg:
@@ -685,21 +901,35 @@ def call_llm(vlm_output: Any, use_fallback_on_error: bool = True) -> Dict[str, A
685
  # ============================================================================
686
  async def process_screening(screening_id: str):
687
  """Main processing pipeline"""
 
 
 
 
688
  try:
689
  if screening_id not in screenings_db:
690
- logger.error("Screening %s not found", screening_id)
691
  return
692
 
693
  screenings_db[screening_id]["status"] = "processing"
694
- logger.info("Starting processing for %s", screening_id)
695
 
696
  entry = screenings_db[screening_id]
697
  face_path = entry["face_image_path"]
698
  eye_path = entry["eye_image_path"]
699
 
 
 
 
 
700
  # Load images and get quality metrics
 
701
  face_img = Image.open(face_path).convert("RGB")
 
 
 
702
  detection_result = detect_face_and_eyes(face_img)
 
 
703
 
704
  quality_metrics = {
705
  "face_detected": detection_result["face_detected"],
@@ -713,13 +943,22 @@ async def process_screening(screening_id: str):
713
  "face_blur_estimate": int(np.var(np.asarray(face_img.convert("L"))))
714
  }
715
  screenings_db[screening_id]["quality_metrics"] = quality_metrics
 
716
 
717
- # Call VLM (will use GCS URLs if available)
 
718
  vlm_features, vlm_raw = await asyncio.to_thread(call_vlm, face_path, eye_path)
 
 
 
719
 
720
- # Call LLM with fallback enabled
 
721
  llm_input = vlm_raw if vlm_raw else (vlm_features if vlm_features else "{}")
722
  structured_risk = await asyncio.to_thread(call_llm, llm_input, use_fallback_on_error=True)
 
 
 
723
 
724
  # Store results
725
  screenings_db[screening_id]["ai_results"] = {
@@ -757,10 +996,12 @@ async def process_screening(screening_id: str):
757
  "recommendations": recommendations
758
  })
759
 
760
- logger.info("Completed processing for %s", screening_id)
 
 
761
 
762
  except Exception as e:
763
- logger.exception("Processing failed for %s", screening_id)
764
  if screening_id in screenings_db:
765
  screenings_db[screening_id]["status"] = "failed"
766
  screenings_db[screening_id]["error"] = str(e)
@@ -768,7 +1009,7 @@ async def process_screening(screening_id: str):
768
  # ============================================================================
769
  # FastAPI App & Routes
770
  # ============================================================================
771
- app = FastAPI(title="Elderly HealthWatch AI Backend")
772
  app.add_middleware(
773
  CORSMiddleware,
774
  allow_origins=["*"],
@@ -780,9 +1021,9 @@ app.add_middleware(
780
  @app.get("/")
781
  async def read_root():
782
  return {
783
- "message": "Elderly HealthWatch AI Backend",
784
  "gcs_enabled": gcs_bucket is not None,
785
- "version": "1.0.0-gcs"
786
  }
787
 
788
  @app.get("/health")
@@ -817,7 +1058,8 @@ async def health_check():
817
  "llm_message": llm_message,
818
  "gcs_available": gcs_bucket is not None,
819
  "gcs_bucket": GCS_BUCKET_NAME if gcs_bucket else None,
820
- "fallback_enabled": True
 
821
  }
822
 
823
  @app.post("/api/v1/validate-eye-photo")
@@ -870,19 +1112,29 @@ async def upload_images(
870
  eye_image: UploadFile = File(...)
871
  ):
872
  """Upload images and start background processing"""
 
 
 
 
873
  try:
874
  screening_id = str(uuid.uuid4())
 
875
 
876
  face_path = os.path.join(TMP_DIR, f"{screening_id}_face.jpg")
877
  eye_path = os.path.join(TMP_DIR, f"{screening_id}_eye.jpg")
878
 
 
879
  face_bytes = await face_image.read()
880
  eye_bytes = await eye_image.read()
 
 
881
 
 
882
  with open(face_path, "wb") as f:
883
  f.write(face_bytes)
884
  with open(eye_path, "wb") as f:
885
  f.write(eye_bytes)
 
886
 
887
  screenings_db[screening_id] = {
888
  "id": screening_id,
@@ -896,12 +1148,14 @@ async def upload_images(
896
  "recommendations": {}
897
  }
898
 
 
899
  background_tasks.add_task(process_screening, screening_id)
 
900
 
901
  return {"screening_id": screening_id}
902
 
903
  except Exception as e:
904
- logger.exception("Upload failed")
905
  raise HTTPException(status_code=500, detail=str(e))
906
 
907
  @app.post("/api/v1/analyze/{screening_id}")
@@ -949,6 +1203,10 @@ async def get_vitals_from_upload(
949
  eye_image: UploadFile = File(...)
950
  ):
951
  """Synchronous VLM + LLM pipeline with GCS support"""
 
 
 
 
952
  if not GRADIO_AVAILABLE:
953
  raise HTTPException(
954
  status_code=503,
@@ -960,6 +1218,7 @@ async def get_vitals_from_upload(
960
  face_path = os.path.join(TMP_DIR, f"{uid}_face.jpg")
961
  eye_path = os.path.join(TMP_DIR, f"{uid}_eye.jpg")
962
 
 
963
  face_bytes = await face_image.read()
964
  eye_bytes = await eye_image.read()
965
 
@@ -967,14 +1226,19 @@ async def get_vitals_from_upload(
967
  f.write(face_bytes)
968
  with open(eye_path, "wb") as f:
969
  f.write(eye_bytes)
 
970
 
971
- # Call VLM (will use GCS if available)
972
  vlm_features, vlm_raw = await asyncio.to_thread(call_vlm, face_path, eye_path)
973
 
974
  # Call LLM
975
  llm_input = vlm_raw if vlm_raw else (vlm_features if vlm_features else "{}")
976
  structured_risk = await asyncio.to_thread(call_llm, llm_input, use_fallback_on_error=True)
977
 
 
 
 
 
978
  return {
979
  "vlm_features": vlm_features,
980
  "vlm_raw": vlm_raw,
@@ -984,7 +1248,7 @@ async def get_vitals_from_upload(
984
  }
985
 
986
  except Exception as e:
987
- logger.exception("Get vitals failed")
988
  error_msg = str(e).lower()
989
 
990
  if "quota" in error_msg or "gpu" in error_msg:
@@ -1001,6 +1265,8 @@ async def get_vitals_from_upload(
1001
  @app.post("/api/v1/get-vitals/{screening_id}")
1002
  async def get_vitals_for_screening(screening_id: str):
1003
  """Re-run VLM + LLM on existing screening"""
 
 
1004
  if screening_id not in screenings_db:
1005
  raise HTTPException(status_code=404, detail="Screening not found")
1006
 
@@ -1024,6 +1290,8 @@ async def get_vitals_for_screening(screening_id: str):
1024
  "using_fallback": structured_risk.get("fallback_mode", False)
1025
  })
1026
 
 
 
1027
  return {
1028
  "screening_id": screening_id,
1029
  "vlm_features": vlm_features,
@@ -1033,9 +1301,12 @@ async def get_vitals_for_screening(screening_id: str):
1033
  }
1034
 
1035
  except Exception as e:
1036
- logger.exception("Get vitals for screening failed")
1037
  raise HTTPException(status_code=500, detail=str(e))
1038
 
1039
  if __name__ == "__main__":
1040
  import uvicorn
1041
- uvicorn.run("app:app", host="0.0.0.0", port=7860, reload=False)
 
 
 
 
1
  """
2
+ Elderly HealthWatch AI Backend (FastAPI) - With GCS Support and Detailed Logging
3
+ Enhanced with comprehensive logging at every step
4
  """
5
 
6
  import io
 
38
  # ============================================================================
39
  # Configuration
40
  # ============================================================================
41
+ logging.basicConfig(
42
+ level=logging.INFO,
43
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
44
+ )
45
  logger = logging.getLogger("elderly_healthwatch")
46
 
47
  GRADIO_VLM_SPACE = os.getenv("GRADIO_SPACE", "developer0hye/Qwen3-VL-8B-Instruct")
 
53
  GCS_CREDENTIALS_FILE = "gcs-credentials.json"
54
 
55
  DEFAULT_VLM_PROMPT = (
56
+ "From the provided face/eye images, compute the required screening features "
57
+ "(pallor, sclera yellowness, redness, mobility metrics, quality checks) "
58
+ "and output a clean JSON feature vector only."
 
 
59
  )
60
 
 
61
  LLM_SYSTEM_PROMPT = (
62
  "System: This assistant MUST ONLY OUTPUT a single valid JSON object as its response β€” "
63
  "no prose, no explanations, no code fences, no annotations."
 
81
  # ============================================================================
82
  def setup_gcs_client():
83
  """Initialize GCS client from credentials file"""
84
+ logger.info("=" * 80)
85
+ logger.info("INITIALIZING GOOGLE CLOUD STORAGE")
86
+ logger.info("=" * 80)
87
+
88
  if not GCS_AVAILABLE:
89
+ logger.warning("❌ GCS libraries not installed")
90
  return None, None
91
 
92
  try:
93
+ logger.info("Looking for credentials file: %s", GCS_CREDENTIALS_FILE)
94
+ logger.info("Current working directory: %s", os.getcwd())
95
+ logger.info("Credentials file exists: %s", os.path.exists(GCS_CREDENTIALS_FILE))
96
+
97
  if os.path.exists(GCS_CREDENTIALS_FILE):
98
+ logger.info("βœ… Found GCS credentials file")
99
+ logger.info("File size: %d bytes", os.path.getsize(GCS_CREDENTIALS_FILE))
100
+
101
  credentials = service_account.Credentials.from_service_account_file(GCS_CREDENTIALS_FILE)
102
+ logger.info("βœ… Credentials loaded successfully")
103
+
104
  client = storage.Client(credentials=credentials)
105
+ logger.info("βœ… Storage client created")
106
+
107
  bucket = client.bucket(GCS_BUCKET_NAME)
108
+ logger.info("βœ… Bucket reference obtained: %s", GCS_BUCKET_NAME)
109
+
110
+ # Test bucket access
111
+ try:
112
+ bucket.exists()
113
+ logger.info("βœ… Bucket access verified")
114
+ except Exception as e:
115
+ logger.warning("⚠️ Could not verify bucket access: %s", str(e))
116
+
117
+ logger.info("=" * 80)
118
+ logger.info("βœ… GCS INITIALIZATION SUCCESSFUL")
119
+ logger.info("=" * 80)
120
  return client, bucket
121
  else:
122
  logger.warning("⚠️ GCS credentials file not found at: %s", GCS_CREDENTIALS_FILE)
 
124
  return None, None
125
 
126
  except Exception as e:
127
+ logger.exception("❌ Failed to initialize GCS: %s", str(e))
128
  return None, None
129
 
130
  gcs_client, gcs_bucket = setup_gcs_client()
131
 
132
  def upload_to_gcs(local_path: str, blob_name: str) -> Optional[str]:
133
  """Upload file to GCS and return public URL"""
134
+ logger.info("-" * 80)
135
+ logger.info("UPLOADING TO GCS")
136
+ logger.info(" - Local path: %s", local_path)
137
+ logger.info(" - Blob name: %s", blob_name)
138
+ logger.info(" - File exists: %s", os.path.exists(local_path))
139
+
140
+ if os.path.exists(local_path):
141
+ logger.info(" - File size: %d bytes", os.path.getsize(local_path))
142
+
143
  if gcs_bucket is None:
144
+ logger.warning(" ❌ GCS bucket not available")
145
  return None
146
 
147
  try:
148
  blob = gcs_bucket.blob(blob_name)
149
+ logger.info(" - Blob object created")
150
+
151
  blob.upload_from_filename(local_path, content_type='image/jpeg')
152
+ logger.info(" βœ… File uploaded to GCS")
153
+
154
  blob.make_public()
155
+ logger.info(" βœ… Blob made public")
156
+
157
  public_url = blob.public_url
158
+ logger.info(" βœ… Public URL: %s", public_url)
159
+ logger.info(" - URL length: %d", len(public_url))
160
+
161
  return public_url
162
  except Exception as e:
163
+ logger.exception("❌ Failed to upload to GCS: %s", str(e))
164
  return None
165
 
166
  # ============================================================================
 
203
  # Utility Functions
204
  # ============================================================================
205
  def load_image_from_bytes(bytes_data: bytes) -> Image.Image:
206
+ logger.info("Loading image from bytes (size: %d)", len(bytes_data))
207
+ img = Image.open(io.BytesIO(bytes_data)).convert("RGB")
208
+ logger.info(" - Image loaded: %s, size: %s", img.mode, img.size)
209
+ return img
210
 
211
  def normalize_probability(val: Optional[float]) -> float:
212
  """Normalize probability to 0-1 range"""
 
386
  }
387
 
388
  # ============================================================================
389
+ # VLM Integration - WITH GCS URL SUPPORT AND DETAILED LOGGING
390
  # ============================================================================
391
  def get_gradio_client(space: str) -> Client:
392
  """Get Gradio client with optional auth"""
 
396
 
397
  def call_vlm_with_urls(face_url: str, eye_url: str, prompt: Optional[str] = None) -> Tuple[Optional[Dict], str]:
398
  """Call VLM using image URLs instead of file handles"""
399
+ logger.info("=" * 80)
400
+ logger.info("STARTING VLM CALL WITH URLS")
401
+ logger.info("=" * 80)
402
+
403
+ # Step 1: Validate inputs
404
+ logger.info("STEP 1: Validating inputs")
405
+ logger.info(" - Face URL provided: %s", bool(face_url))
406
+ logger.info(" - Face URL length: %d", len(face_url) if face_url else 0)
407
+ logger.info(" - Face URL (full): %s", face_url)
408
+ logger.info(" - Eye URL provided: %s", bool(eye_url))
409
+ logger.info(" - Eye URL length: %d", len(eye_url) if eye_url else 0)
410
+ logger.info(" - Eye URL (full): %s", eye_url)
411
+
412
  prompt = prompt or DEFAULT_VLM_PROMPT
413
+ logger.info(" - Prompt provided: %s", bool(prompt))
414
+ logger.info(" - Prompt length: %d chars", len(prompt))
415
+ logger.info(" - Prompt (first 200 chars): %s", prompt[:200])
416
 
417
+ # Step 2: Initialize Gradio client
418
+ logger.info("STEP 2: Initializing Gradio client")
419
+ logger.info(" - VLM Space: %s", GRADIO_VLM_SPACE)
420
+ logger.info(" - HF Token provided: %s", bool(HF_TOKEN))
421
 
422
+ try:
423
+ client = get_gradio_client(GRADIO_VLM_SPACE)
424
+ logger.info(" βœ… Gradio client initialized successfully")
425
+ except Exception as e:
426
+ logger.error(" ❌ Failed to initialize Gradio client: %s", str(e))
427
+ logger.exception("Client initialization error:")
428
+ raise
429
 
430
+ # Step 3: Prepare message formats
431
+ logger.info("STEP 3: Preparing message formats")
432
  message_formats = [
433
  # Format 1: URLs in files array
434
  {"text": prompt, "files": [face_url, eye_url]},
 
439
  # Format 4: Images array
440
  {"text": prompt, "images": [face_url, eye_url]},
441
  ]
442
+ logger.info(" - Prepared %d message format variations", len(message_formats))
443
 
444
  last_error = None
445
 
446
+ # Step 4: Try each message format
447
  for idx, message in enumerate(message_formats, 1):
448
+ logger.info("-" * 80)
449
+ logger.info("STEP 4.%d: Trying message format %d/%d", idx, idx, len(message_formats))
450
+ logger.info(" - Format keys: %s", list(message.keys()))
451
+ logger.info(" - Format structure (first 300 chars): %s", str(message)[:300])
452
+
453
  try:
454
+ logger.info(" - Calling VLM API with /chat_fn endpoint...")
455
  result = client.predict(message=message, history=[], api_name="/chat_fn")
456
+ logger.info(" βœ… API call succeeded!")
457
 
458
+ # Step 5: Process result
459
+ logger.info("STEP 5: Processing VLM result")
460
+ logger.info(" - Result type: %s", type(result))
461
+ logger.info(" - Result (first 500 chars): %s", str(result)[:500])
462
 
463
+ # Parse result structure
464
  if isinstance(result, (list, tuple)):
465
+ logger.info(" - Result is list/tuple with %d elements", len(result))
466
  out = result[0] if len(result) > 0 else {}
467
+ logger.info(" - Extracted first element, type: %s", type(out))
468
  elif isinstance(result, dict):
469
+ logger.info(" - Result is dict with keys: %s", list(result.keys()))
470
  out = result
471
  else:
472
+ logger.info(" - Result is %s, converting to dict", type(result))
473
  out = {"text": str(result)}
474
 
475
+ # Step 6: Extract text from result
476
+ logger.info("STEP 6: Extracting text from result")
477
  text_out = None
478
+
479
  if isinstance(out, dict):
480
+ logger.info(" - Checking dict keys for text content...")
481
  text_out = out.get("text") or out.get("output") or out.get("content") or out.get("response")
482
+ logger.info(" - Found text in key: %s",
483
+ "text" if out.get("text") else
484
+ "output" if out.get("output") else
485
+ "content" if out.get("content") else
486
+ "response" if out.get("response") else "none")
487
 
488
  if not text_out:
489
+ logger.info(" - No text in dict, trying alternative methods")
490
  if isinstance(result, str):
491
  text_out = result
492
+ logger.info(" - Using result as string directly")
493
  else:
494
  text_out = json.dumps(out)
495
+ logger.info(" - Converting to JSON string")
496
 
497
+ logger.info(" - Extracted text length: %d chars", len(text_out) if text_out else 0)
498
+ logger.info(" - Extracted text (first 500 chars): %s", text_out[:500] if text_out else "EMPTY")
499
+ logger.info(" - Extracted text (last 200 chars): %s", text_out[-200:] if text_out and len(text_out) > 200 else "")
500
 
501
+ # Validate extracted text
502
  if not text_out or len(text_out.strip()) == 0:
503
+ logger.warning(" ⚠️ VLM returned empty text, trying next format...")
504
  last_error = "Empty response"
505
  continue
506
 
507
+ # Step 7: Try to parse JSON
508
+ logger.info("STEP 7: Attempting to parse JSON from text")
509
  parsed = None
510
+
511
  try:
512
  parsed = json.loads(text_out)
513
  if not isinstance(parsed, dict):
514
+ logger.warning(" ⚠️ JSON parsed but result is not a dict, type: %s", type(parsed))
515
  parsed = None
516
  else:
517
+ logger.info(" βœ… Successfully parsed JSON directly")
518
+ logger.info(" - JSON keys: %s", list(parsed.keys()))
519
+ logger.info(" - JSON (formatted): %s", json.dumps(parsed, indent=2)[:500])
520
+ except Exception as parse_err:
521
+ logger.info(" - Direct JSON parsing failed: %s", str(parse_err))
522
+ logger.info(" - Attempting to extract JSON from surrounding text...")
523
+
524
  # Try to extract JSON from text
525
  try:
526
  first = text_out.find("{")
527
  last = text_out.rfind("}")
528
+ logger.info(" - First '{' at position: %d", first)
529
+ logger.info(" - Last '}' at position: %d", last)
530
+
531
  if first != -1 and last != -1 and last > first:
532
  json_str = text_out[first:last+1]
533
+ logger.info(" - Extracted JSON string length: %d", len(json_str))
534
+ logger.info(" - Extracted JSON string: %s", json_str[:300])
535
+
536
  parsed = json.loads(json_str)
537
  if isinstance(parsed, dict):
538
+ logger.info(" βœ… Successfully extracted and parsed JSON from text")
539
+ logger.info(" - JSON keys: %s", list(parsed.keys()))
540
  else:
541
+ logger.warning(" ⚠️ Extracted JSON is not a dict, type: %s", type(parsed))
542
  parsed = None
543
+ else:
544
+ logger.warning(" ⚠️ Could not find valid JSON delimiters")
545
  except Exception as extract_err:
546
+ logger.warning(" ❌ JSON extraction failed: %s", str(extract_err))
547
  parsed = None
548
 
549
+ # Step 8: Return successful result
550
+ logger.info("=" * 80)
551
+ logger.info("βœ… VLM CALL COMPLETED SUCCESSFULLY")
552
+ logger.info(" - Using message format: %d", idx)
553
+ logger.info(" - Parsed JSON: %s", "Yes" if parsed else "No (raw text only)")
554
+ logger.info(" - Response length: %d chars", len(text_out))
555
+ logger.info("=" * 80)
556
+
557
  return parsed, text_out
558
 
559
  except Exception as e:
560
+ logger.warning(" ❌ VLM format %d failed: %s", idx, str(e))
561
+ logger.exception("Detailed error for format %d:", idx)
562
  last_error = str(e)
563
  continue
564
 
565
  # All formats failed
566
+ logger.error("=" * 80)
567
+ logger.error("❌ ALL VLM MESSAGE FORMATS FAILED")
568
+ logger.error(" - Tried %d different formats", len(message_formats))
569
+ logger.error(" - Last error: %s", last_error)
570
+ logger.error("=" * 80)
571
  raise RuntimeError(f"All VLM message formats failed. Last error: {last_error}")
572
 
573
  def call_vlm(face_path: str, eye_path: str, prompt: Optional[str] = None) -> Tuple[Optional[Dict], str]:
 
575
  Call VLM - wrapper that handles both local files and GCS URLs
576
  Strategy: Try GCS first (if available), fallback to file handles
577
  """
578
+ logger.info("=" * 80)
579
+ logger.info("VLM CALL ORCHESTRATOR")
580
+ logger.info("=" * 80)
581
+ logger.info("Input files:")
582
+ logger.info(" - Face image: %s", face_path)
583
+ logger.info(" - Eye image: %s", eye_path)
584
+ logger.info(" - Face exists: %s", os.path.exists(face_path))
585
+ logger.info(" - Eye exists: %s", os.path.exists(eye_path))
586
+
587
+ if os.path.exists(face_path):
588
+ logger.info(" - Face size: %d bytes", os.path.getsize(face_path))
589
+ if os.path.exists(eye_path):
590
+ logger.info(" - Eye size: %d bytes", os.path.getsize(eye_path))
591
 
592
  # Strategy 1: Try GCS URLs (if GCS is set up)
593
  if gcs_bucket is not None:
594
+ logger.info("🌐 GCS is available, attempting URL-based VLM call")
595
 
596
  try:
597
  # Generate unique blob names
 
599
  face_blob_name = f"vlm_temp/{unique_id}_face.jpg"
600
  eye_blob_name = f"vlm_temp/{unique_id}_eye.jpg"
601
 
602
+ logger.info(" - Generated unique ID: %s", unique_id)
603
+ logger.info(" - Face blob name: %s", face_blob_name)
604
+ logger.info(" - Eye blob name: %s", eye_blob_name)
605
+
606
  # Upload to GCS
607
+ logger.info("Uploading images to GCS...")
608
  face_url = upload_to_gcs(face_path, face_blob_name)
609
  eye_url = upload_to_gcs(eye_path, eye_blob_name)
610
 
611
+ logger.info("Upload results:")
612
+ logger.info(" - Face URL: %s", face_url if face_url else "FAILED")
613
+ logger.info(" - Eye URL: %s", eye_url if eye_url else "FAILED")
614
+
615
  if face_url and eye_url:
616
  logger.info("βœ… Successfully uploaded to GCS, calling VLM with URLs")
617
  return call_vlm_with_urls(face_url, eye_url, prompt)
 
619
  logger.warning("⚠️ GCS upload failed, falling back to file handles")
620
  except Exception as e:
621
  logger.warning("⚠️ GCS error, falling back to file handles: %s", str(e))
622
+ logger.exception("GCS error details:")
623
  else:
624
  logger.info("ℹ️ GCS not available, using file handles for VLM")
625
 
626
  # Strategy 2: Fallback to file handles (original method)
627
+ logger.info("-" * 80)
628
+ logger.info("USING FILE HANDLE STRATEGY")
629
+ logger.info("-" * 80)
630
+
631
  if not os.path.exists(face_path) or not os.path.exists(eye_path):
632
+ logger.error("❌ File paths missing!")
633
+ logger.error(" - Face exists: %s", os.path.exists(face_path))
634
+ logger.error(" - Eye exists: %s", os.path.exists(eye_path))
635
  raise FileNotFoundError("Face or eye image path missing")
636
 
 
 
 
637
  prompt = prompt or DEFAULT_VLM_PROMPT
638
+ logger.info(" - Using prompt: %s", prompt[:100])
639
+
640
+ logger.info(" - Creating Gradio client...")
641
  client = get_gradio_client(GRADIO_VLM_SPACE)
642
+ logger.info(" βœ… Client created")
643
+
644
+ logger.info(" - Creating file handles...")
645
+ face_handle = handle_file(face_path)
646
+ eye_handle = handle_file(eye_path)
647
+ logger.info(" βœ… File handles created")
648
+ logger.info(" - Face handle type: %s", type(face_handle))
649
+ logger.info(" - Eye handle type: %s", type(eye_handle))
650
+
651
+ message = {"text": prompt, "files": [face_handle, eye_handle]}
652
+ logger.info(" - Message structure: %s", list(message.keys()))
653
 
654
  try:
655
+ logger.info(" - Calling VLM API...")
656
  result = client.predict(message=message, history=[], api_name="/chat_fn")
657
+ logger.info(" βœ… VLM API call succeeded")
658
+ logger.info(" - Raw result type: %s", type(result))
659
+ logger.info(" - Raw result (first 500 chars): %s", str(result)[:500])
660
  except Exception as e:
661
+ logger.exception("❌ VLM call with file handles failed")
662
  raise RuntimeError(f"VLM call failed: {e}")
663
 
664
  # Process result (same as in call_vlm_with_urls)
665
+ logger.info("Processing result...")
666
  if isinstance(result, (list, tuple)):
667
  out = result[0] if len(result) > 0 else {}
668
+ logger.info(" - Extracted from list, type: %s", type(out))
669
  elif isinstance(result, dict):
670
  out = result
671
+ logger.info(" - Using dict directly")
672
  else:
673
  out = {"text": str(result)}
674
+ logger.info(" - Wrapped in dict")
675
 
676
  text_out = None
677
  if isinstance(out, dict):
678
  text_out = out.get("text") or out.get("output") or out.get("content")
679
+ logger.info(" - Extracted text from dict key")
680
 
681
  if not text_out:
682
  if isinstance(result, str):
683
  text_out = result
684
+ logger.info(" - Using result string directly")
685
  else:
686
  text_out = json.dumps(out)
687
+ logger.info(" - Converted to JSON string")
688
+
689
+ logger.info(" - Final text length: %d", len(text_out) if text_out else 0)
690
+ logger.info(" - Final text (first 500 chars): %s", text_out[:500] if text_out else "EMPTY")
691
 
692
  if not text_out or len(text_out.strip()) == 0:
693
+ logger.warning(" ⚠️ VLM returned empty text")
694
  text_out = "{}"
695
 
696
  parsed = None
 
698
  parsed = json.loads(text_out)
699
  if not isinstance(parsed, dict):
700
  parsed = None
701
+ else:
702
+ logger.info(" βœ… Parsed JSON successfully")
703
  except Exception:
704
+ logger.info(" - Direct JSON parse failed, trying extraction...")
705
  try:
706
  first = text_out.find("{")
707
  last = text_out.rfind("}")
 
709
  parsed = json.loads(text_out[first:last+1])
710
  if not isinstance(parsed, dict):
711
  parsed = None
712
+ else:
713
+ logger.info(" βœ… Extracted and parsed JSON")
714
  except Exception:
715
+ logger.warning(" ⚠️ JSON extraction failed")
716
  pass
717
 
718
+ logger.info("=" * 80)
719
+ logger.info("VLM CALL COMPLETED")
720
+ logger.info(" - Parsed JSON: %s", "Yes" if parsed else "No")
721
+ logger.info("=" * 80)
722
+
723
  return parsed, text_out
724
 
725
  # ============================================================================
 
793
 
794
  def call_llm(vlm_output: Any, use_fallback_on_error: bool = True) -> Dict[str, Any]:
795
  """Call LLM with VLM output and return structured risk assessment"""
796
+ logger.info("=" * 80)
797
+ logger.info("STARTING LLM CALL")
798
+ logger.info("=" * 80)
799
+
800
  if not GRADIO_AVAILABLE:
801
+ logger.error("❌ Gradio not available")
802
  if use_fallback_on_error:
803
  return get_fallback_risk_assessment(vlm_output, reason="gradio_not_available")
804
  raise RuntimeError("gradio_client not installed")
805
 
806
  vlm_text = vlm_output if isinstance(vlm_output, str) else json.dumps(vlm_output, default=str)
807
+ logger.info("VLM input to LLM:")
808
+ logger.info(" - Type: %s", type(vlm_output))
809
+ logger.info(" - Length: %d chars", len(vlm_text))
810
+ logger.info(" - Content (first 500 chars): %s", vlm_text[:500])
811
 
812
  if not vlm_text or vlm_text.strip() in ["{}", "[]", ""]:
813
  logger.warning("VLM output is empty, using fallback assessment")
 
826
  "VLM Output:\n" + vlm_text + "\n"
827
  )
828
 
829
+ logger.info("LLM instruction length: %d chars", len(instruction))
830
+
831
  try:
832
+ logger.info("Creating LLM client for space: %s", LLM_GRADIO_SPACE)
833
  client = get_gradio_client(LLM_GRADIO_SPACE)
834
+ logger.info("βœ… LLM client created")
835
+
836
+ logger.info("Calling LLM with parameters:")
837
+ logger.info(" - max_new_tokens: 1024")
838
+ logger.info(" - temperature: 0.2")
839
+ logger.info(" - top_p: 0.9")
840
 
841
  result = client.predict(
842
  input_data=instruction,
 
852
  api_name="/chat"
853
  )
854
 
855
+ logger.info("βœ… LLM API call succeeded")
856
+ logger.info("LLM result type: %s", type(result))
857
+
858
  text_out = json.dumps(result) if isinstance(result, (dict, list)) else str(result)
859
+ logger.info("LLM raw output length: %d chars", len(text_out))
860
+ logger.info("LLM raw output (first 1000 chars):\n%s", text_out[:1000])
861
 
862
+ logger.info("Attempting to extract JSON from LLM output...")
863
  parsed = extract_json_from_llm_output(text_out)
864
+ logger.info("βœ… JSON extraction successful")
865
+ logger.info("Parsed LLM JSON:\n%s", json.dumps(parsed, indent=2))
866
 
867
+ # Validation
868
  all_zero = all(
869
  parsed.get(k, 0) == 0
870
  for k in ["jaundice_probability", "anemia_probability",
 
872
  )
873
 
874
  if all_zero and parsed.get("risk_score", 0) == 0:
875
+ logger.warning("⚠️ LLM returned all-zero assessment")
876
  parsed["summary"] = "Image analysis incomplete. Please ensure photos are clear and well-lit."
877
  parsed["recommendation"] = "Retake photos with face clearly visible and eyes open."
878
  parsed["confidence"] = 0.1
879
 
880
+ logger.info("=" * 80)
881
+ logger.info("βœ… LLM CALL COMPLETED SUCCESSFULLY")
882
+ logger.info("=" * 80)
883
  return parsed
884
 
885
  except Exception as e:
886
+ logger.exception("❌ LLM call failed: %s", str(e))
887
 
888
  error_msg = str(e).lower()
889
  if "quota" in error_msg or "gpu" in error_msg:
 
901
  # ============================================================================
902
  async def process_screening(screening_id: str):
903
  """Main processing pipeline"""
904
+ logger.info("=" * 80)
905
+ logger.info("BACKGROUND PROCESSING STARTED FOR: %s", screening_id)
906
+ logger.info("=" * 80)
907
+
908
  try:
909
  if screening_id not in screenings_db:
910
+ logger.error("Screening %s not found in database", screening_id)
911
  return
912
 
913
  screenings_db[screening_id]["status"] = "processing"
914
+ logger.info("Status updated to: processing")
915
 
916
  entry = screenings_db[screening_id]
917
  face_path = entry["face_image_path"]
918
  eye_path = entry["eye_image_path"]
919
 
920
+ logger.info("Processing images:")
921
+ logger.info(" - Face: %s", face_path)
922
+ logger.info(" - Eye: %s", eye_path)
923
+
924
  # Load images and get quality metrics
925
+ logger.info("Loading face image for quality check...")
926
  face_img = Image.open(face_path).convert("RGB")
927
+ logger.info(" βœ… Face image loaded: %s", face_img.size)
928
+
929
+ logger.info("Running face detection...")
930
  detection_result = detect_face_and_eyes(face_img)
931
+ logger.info(" - Face detected: %s", detection_result["face_detected"])
932
+ logger.info(" - Face confidence: %.3f", detection_result["face_confidence"])
933
 
934
  quality_metrics = {
935
  "face_detected": detection_result["face_detected"],
 
943
  "face_blur_estimate": int(np.var(np.asarray(face_img.convert("L"))))
944
  }
945
  screenings_db[screening_id]["quality_metrics"] = quality_metrics
946
+ logger.info("βœ… Quality metrics computed and stored")
947
 
948
+ # Call VLM
949
+ logger.info("Calling VLM...")
950
  vlm_features, vlm_raw = await asyncio.to_thread(call_vlm, face_path, eye_path)
951
+ logger.info("βœ… VLM call completed")
952
+ logger.info(" - Features returned: %s", bool(vlm_features))
953
+ logger.info(" - Raw output length: %d", len(vlm_raw) if vlm_raw else 0)
954
 
955
+ # Call LLM
956
+ logger.info("Calling LLM...")
957
  llm_input = vlm_raw if vlm_raw else (vlm_features if vlm_features else "{}")
958
  structured_risk = await asyncio.to_thread(call_llm, llm_input, use_fallback_on_error=True)
959
+ logger.info("βœ… LLM call completed")
960
+ logger.info(" - Risk score: %.2f", structured_risk.get("risk_score", 0))
961
+ logger.info(" - Using fallback: %s", structured_risk.get("fallback_mode", False))
962
 
963
  # Store results
964
  screenings_db[screening_id]["ai_results"] = {
 
996
  "recommendations": recommendations
997
  })
998
 
999
+ logger.info("=" * 80)
1000
+ logger.info("βœ… PROCESSING COMPLETED SUCCESSFULLY FOR: %s", screening_id)
1001
+ logger.info("=" * 80)
1002
 
1003
  except Exception as e:
1004
+ logger.exception("❌ Processing failed for %s", screening_id)
1005
  if screening_id in screenings_db:
1006
  screenings_db[screening_id]["status"] = "failed"
1007
  screenings_db[screening_id]["error"] = str(e)
 
1009
  # ============================================================================
1010
  # FastAPI App & Routes
1011
  # ============================================================================
1012
+ app = FastAPI(title="Elderly HealthWatch AI Backend - Enhanced Logging")
1013
  app.add_middleware(
1014
  CORSMiddleware,
1015
  allow_origins=["*"],
 
1021
  @app.get("/")
1022
  async def read_root():
1023
  return {
1024
+ "message": "Elderly HealthWatch AI Backend - Enhanced Logging Version",
1025
  "gcs_enabled": gcs_bucket is not None,
1026
+ "version": "1.0.0-gcs-logging"
1027
  }
1028
 
1029
  @app.get("/health")
 
1058
  "llm_message": llm_message,
1059
  "gcs_available": gcs_bucket is not None,
1060
  "gcs_bucket": GCS_BUCKET_NAME if gcs_bucket else None,
1061
+ "fallback_enabled": True,
1062
+ "enhanced_logging": True
1063
  }
1064
 
1065
  @app.post("/api/v1/validate-eye-photo")
 
1112
  eye_image: UploadFile = File(...)
1113
  ):
1114
  """Upload images and start background processing"""
1115
+ logger.info("=" * 80)
1116
+ logger.info("NEW UPLOAD REQUEST")
1117
+ logger.info("=" * 80)
1118
+
1119
  try:
1120
  screening_id = str(uuid.uuid4())
1121
+ logger.info("Generated screening ID: %s", screening_id)
1122
 
1123
  face_path = os.path.join(TMP_DIR, f"{screening_id}_face.jpg")
1124
  eye_path = os.path.join(TMP_DIR, f"{screening_id}_eye.jpg")
1125
 
1126
+ logger.info("Reading uploaded files...")
1127
  face_bytes = await face_image.read()
1128
  eye_bytes = await eye_image.read()
1129
+ logger.info(" - Face image: %d bytes", len(face_bytes))
1130
+ logger.info(" - Eye image: %d bytes", len(eye_bytes))
1131
 
1132
+ logger.info("Saving images to disk...")
1133
  with open(face_path, "wb") as f:
1134
  f.write(face_bytes)
1135
  with open(eye_path, "wb") as f:
1136
  f.write(eye_bytes)
1137
+ logger.info(" βœ… Images saved")
1138
 
1139
  screenings_db[screening_id] = {
1140
  "id": screening_id,
 
1148
  "recommendations": {}
1149
  }
1150
 
1151
+ logger.info("Adding background task...")
1152
  background_tasks.add_task(process_screening, screening_id)
1153
+ logger.info("βœ… Upload successful, processing queued")
1154
 
1155
  return {"screening_id": screening_id}
1156
 
1157
  except Exception as e:
1158
+ logger.exception("❌ Upload failed")
1159
  raise HTTPException(status_code=500, detail=str(e))
1160
 
1161
  @app.post("/api/v1/analyze/{screening_id}")
 
1203
  eye_image: UploadFile = File(...)
1204
  ):
1205
  """Synchronous VLM + LLM pipeline with GCS support"""
1206
+ logger.info("=" * 80)
1207
+ logger.info("GET VITALS REQUEST (SYNCHRONOUS)")
1208
+ logger.info("=" * 80)
1209
+
1210
  if not GRADIO_AVAILABLE:
1211
  raise HTTPException(
1212
  status_code=503,
 
1218
  face_path = os.path.join(TMP_DIR, f"{uid}_face.jpg")
1219
  eye_path = os.path.join(TMP_DIR, f"{uid}_eye.jpg")
1220
 
1221
+ logger.info("Reading and saving images...")
1222
  face_bytes = await face_image.read()
1223
  eye_bytes = await eye_image.read()
1224
 
 
1226
  f.write(face_bytes)
1227
  with open(eye_path, "wb") as f:
1228
  f.write(eye_bytes)
1229
+ logger.info("βœ… Images saved: %d + %d bytes", len(face_bytes), len(eye_bytes))
1230
 
1231
+ # Call VLM
1232
  vlm_features, vlm_raw = await asyncio.to_thread(call_vlm, face_path, eye_path)
1233
 
1234
  # Call LLM
1235
  llm_input = vlm_raw if vlm_raw else (vlm_features if vlm_features else "{}")
1236
  structured_risk = await asyncio.to_thread(call_llm, llm_input, use_fallback_on_error=True)
1237
 
1238
+ logger.info("=" * 80)
1239
+ logger.info("βœ… GET VITALS COMPLETED")
1240
+ logger.info("=" * 80)
1241
+
1242
  return {
1243
  "vlm_features": vlm_features,
1244
  "vlm_raw": vlm_raw,
 
1248
  }
1249
 
1250
  except Exception as e:
1251
+ logger.exception("❌ Get vitals failed")
1252
  error_msg = str(e).lower()
1253
 
1254
  if "quota" in error_msg or "gpu" in error_msg:
 
1265
  @app.post("/api/v1/get-vitals/{screening_id}")
1266
  async def get_vitals_for_screening(screening_id: str):
1267
  """Re-run VLM + LLM on existing screening"""
1268
+ logger.info("GET VITALS FOR EXISTING SCREENING: %s", screening_id)
1269
+
1270
  if screening_id not in screenings_db:
1271
  raise HTTPException(status_code=404, detail="Screening not found")
1272
 
 
1290
  "using_fallback": structured_risk.get("fallback_mode", False)
1291
  })
1292
 
1293
+ logger.info("βœ… Get vitals completed for screening: %s", screening_id)
1294
+
1295
  return {
1296
  "screening_id": screening_id,
1297
  "vlm_features": vlm_features,
 
1301
  }
1302
 
1303
  except Exception as e:
1304
+ logger.exception("❌ Get vitals for screening failed")
1305
  raise HTTPException(status_code=500, detail=str(e))
1306
 
1307
  if __name__ == "__main__":
1308
  import uvicorn
1309
+ logger.info("=" * 80)
1310
+ logger.info("STARTING FASTAPI SERVER")
1311
+ logger.info("=" * 80)
1312
+ uvicorn.run("app_with_detailed_logs:app", host="0.0.0.0", port=7860, reload=False)