dpv007 commited on
Commit
6201211
·
verified ·
1 Parent(s): eea760c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -11
app.py CHANGED
@@ -13,6 +13,7 @@ Notes:
13
  - This version includes a robust regex-based extractor that finds the outermost {...} block
14
  in the LLM output, extracts numeric values for the required keys, and always returns
15
  numeric defaults (no NaN) so frontends will not receive null/None for numeric fields.
 
16
  """
17
 
18
  import io
@@ -41,6 +42,7 @@ except Exception:
41
 
42
  # Configure logging
43
  logging.basicConfig(level=logging.INFO)
 
44
 
45
  # Configuration for remote VLM and LLM spaces (change to your target Space names)
46
  GRADIO_VLM_SPACE = os.getenv("GRADIO_SPACE", "developer0hye/Qwen3-VL-8B-Instruct")
@@ -286,10 +288,10 @@ def run_vlm_and_get_features(face_path: str, eye_path: str, prompt: Optional[str
286
  message = {"text": prompt, "files": [handle_file(face_path), handle_file(eye_path)]}
287
 
288
  try:
289
- logging.info("Calling VLM Space %s", GRADIO_VLM_SPACE)
290
  result = client.predict(message=message, history=[], api_name="/chat_fn")
291
  except Exception as e:
292
- logging.exception("VLM call failed")
293
  raise RuntimeError(f"VLM call failed: {e}")
294
 
295
  if not result:
@@ -385,7 +387,7 @@ def run_llm_on_vlm(vlm_features_or_raw: Any,
385
  input_payload_str = instruction # we feed only the instruction (which contains the VLM output)
386
 
387
  try:
388
- logging.info("Calling LLM Space %s with strict JSON-only prompt", LLM_GRADIO_SPACE)
389
  result = client.predict(
390
  input_data=input_payload_str,
391
  max_new_tokens=float(max_new_tokens),
@@ -400,7 +402,7 @@ def run_llm_on_vlm(vlm_features_or_raw: Any,
400
  api_name="/chat"
401
  )
402
  except Exception as e:
403
- logging.exception("LLM call failed")
404
  raise RuntimeError(f"LLM call failed: {e}")
405
 
406
  # Normalize result to string
@@ -412,20 +414,30 @@ def run_llm_on_vlm(vlm_features_or_raw: Any,
412
  if not text_out or len(text_out.strip()) == 0:
413
  raise RuntimeError("LLM returned empty response")
414
 
 
 
 
415
  # Use regex-based extraction (robust)
416
  try:
417
  parsed = extract_json_via_regex(text_out)
418
  except Exception as e:
419
- logging.exception("Regex JSON extraction failed")
420
  # As a last fallback, attempt naive JSON parsing; if that fails, raise with raw output
421
  try:
422
  parsed = json.loads(text_out)
423
  except Exception:
 
424
  raise ValueError(f"Failed to extract JSON from LLM output: {e}\nRaw Output:\n{text_out}")
425
 
426
  if not isinstance(parsed, dict):
427
  raise ValueError("Parsed LLM output is not a JSON object/dict")
428
 
 
 
 
 
 
 
429
  # Final safety clamps (already ensured by extractor, but keep defensive checks)
430
  def safe_prob(val):
431
  try:
@@ -679,10 +691,10 @@ async def process_screening(screening_id: str):
679
  """
680
  try:
681
  if screening_id not in screenings_db:
682
- logging.error("[process_screening] screening %s not found", screening_id)
683
  return
684
  screenings_db[screening_id]["status"] = "processing"
685
- logging.info("[process_screening] Starting %s", screening_id)
686
 
687
  entry = screenings_db[screening_id]
688
  face_path = entry.get("face_image_path")
@@ -768,7 +780,7 @@ async def process_screening(screening_id: str):
768
  "vlm_raw": vlm_raw
769
  })
770
  except Exception as e:
771
- logging.exception("VLM feature extraction failed")
772
  screenings_db[screening_id].setdefault("ai_results", {})
773
  screenings_db[screening_id]["ai_results"].update({"vlm_error": str(e)})
774
  vlm_features = None
@@ -798,7 +810,7 @@ async def process_screening(screening_id: str):
798
  screenings_db[screening_id].setdefault("ai_results", {})
799
  screenings_db[screening_id]["ai_results"].update({"structured_risk": structured_risk})
800
  except Exception as e:
801
- logging.exception("LLM processing failed")
802
  screenings_db[screening_id].setdefault("ai_results", {})
803
  screenings_db[screening_id]["ai_results"].update({"llm_error": str(e)})
804
  structured_risk = {
@@ -850,14 +862,14 @@ async def process_screening(screening_id: str):
850
  "recommendations": recommendations
851
  })
852
 
853
- logging.info("[process_screening] Completed %s", screening_id)
854
  except Exception as e:
855
  traceback.print_exc()
856
  if screening_id in screenings_db:
857
  screenings_db[screening_id]["status"] = "failed"
858
  screenings_db[screening_id]["error"] = str(e)
859
  else:
860
- logging.error("[process_screening] Failed for unknown screening %s: %s", screening_id, str(e))
861
 
862
  # -----------------------
863
  # Run server (for local debugging)
 
13
  - This version includes a robust regex-based extractor that finds the outermost {...} block
14
  in the LLM output, extracts numeric values for the required keys, and always returns
15
  numeric defaults (no NaN) so frontends will not receive null/None for numeric fields.
16
+ - This variant logs raw LLM output and the parsed JSON using Python logging.
17
  """
18
 
19
  import io
 
42
 
43
  # Configure logging
44
  logging.basicConfig(level=logging.INFO)
45
+ logger = logging.getLogger("elderly_healthwatch")
46
 
47
  # Configuration for remote VLM and LLM spaces (change to your target Space names)
48
  GRADIO_VLM_SPACE = os.getenv("GRADIO_SPACE", "developer0hye/Qwen3-VL-8B-Instruct")
 
288
  message = {"text": prompt, "files": [handle_file(face_path), handle_file(eye_path)]}
289
 
290
  try:
291
+ logger.info("Calling VLM Space %s", GRADIO_VLM_SPACE)
292
  result = client.predict(message=message, history=[], api_name="/chat_fn")
293
  except Exception as e:
294
+ logger.exception("VLM call failed")
295
  raise RuntimeError(f"VLM call failed: {e}")
296
 
297
  if not result:
 
387
  input_payload_str = instruction # we feed only the instruction (which contains the VLM output)
388
 
389
  try:
390
+ logger.info("Calling LLM Space %s with strict JSON-only prompt", LLM_GRADIO_SPACE)
391
  result = client.predict(
392
  input_data=input_payload_str,
393
  max_new_tokens=float(max_new_tokens),
 
402
  api_name="/chat"
403
  )
404
  except Exception as e:
405
+ logger.exception("LLM call failed")
406
  raise RuntimeError(f"LLM call failed: {e}")
407
 
408
  # Normalize result to string
 
414
  if not text_out or len(text_out.strip()) == 0:
415
  raise RuntimeError("LLM returned empty response")
416
 
417
+ # LOG raw output for debugging / auditing
418
+ logger.info("LLM raw output:\n%s", text_out)
419
+
420
  # Use regex-based extraction (robust)
421
  try:
422
  parsed = extract_json_via_regex(text_out)
423
  except Exception as e:
424
+ logger.exception("Regex JSON extraction failed")
425
  # As a last fallback, attempt naive JSON parsing; if that fails, raise with raw output
426
  try:
427
  parsed = json.loads(text_out)
428
  except Exception:
429
+ # include raw output in the exception text so logs contain it
430
  raise ValueError(f"Failed to extract JSON from LLM output: {e}\nRaw Output:\n{text_out}")
431
 
432
  if not isinstance(parsed, dict):
433
  raise ValueError("Parsed LLM output is not a JSON object/dict")
434
 
435
+ # LOG parsed JSON (pretty-printed)
436
+ try:
437
+ logger.info("LLM parsed JSON:\n%s", json.dumps(parsed, indent=2, ensure_ascii=False))
438
+ except Exception:
439
+ logger.info("LLM parsed JSON (raw dict): %s", str(parsed))
440
+
441
  # Final safety clamps (already ensured by extractor, but keep defensive checks)
442
  def safe_prob(val):
443
  try:
 
691
  """
692
  try:
693
  if screening_id not in screenings_db:
694
+ logger.error("[process_screening] screening %s not found", screening_id)
695
  return
696
  screenings_db[screening_id]["status"] = "processing"
697
+ logger.info("[process_screening] Starting %s", screening_id)
698
 
699
  entry = screenings_db[screening_id]
700
  face_path = entry.get("face_image_path")
 
780
  "vlm_raw": vlm_raw
781
  })
782
  except Exception as e:
783
+ logger.exception("VLM feature extraction failed")
784
  screenings_db[screening_id].setdefault("ai_results", {})
785
  screenings_db[screening_id]["ai_results"].update({"vlm_error": str(e)})
786
  vlm_features = None
 
810
  screenings_db[screening_id].setdefault("ai_results", {})
811
  screenings_db[screening_id]["ai_results"].update({"structured_risk": structured_risk})
812
  except Exception as e:
813
+ logger.exception("LLM processing failed")
814
  screenings_db[screening_id].setdefault("ai_results", {})
815
  screenings_db[screening_id]["ai_results"].update({"llm_error": str(e)})
816
  structured_risk = {
 
862
  "recommendations": recommendations
863
  })
864
 
865
+ logger.info("[process_screening] Completed %s", screening_id)
866
  except Exception as e:
867
  traceback.print_exc()
868
  if screening_id in screenings_db:
869
  screenings_db[screening_id]["status"] = "failed"
870
  screenings_db[screening_id]["error"] = str(e)
871
  else:
872
+ logger.error("[process_screening] Failed for unknown screening %s: %s", screening_id, str(e))
873
 
874
  # -----------------------
875
  # Run server (for local debugging)