Spaces:
Running
Running
Update src/ai_processor.py
Browse files- src/ai_processor.py +86 -33
src/ai_processor.py
CHANGED
|
@@ -28,15 +28,12 @@ logging.basicConfig(
|
|
| 28 |
def _log_kv(prefix: str, kv: Dict):
|
| 29 |
logging.debug(prefix + " | " + " | ".join(f"{k}={v}" for k, v in kv.items()))
|
| 30 |
|
| 31 |
-
# ---
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
logging.info("Registered @spaces.GPU stub (enable_queue=False).")
|
| 38 |
-
except Exception:
|
| 39 |
-
pass
|
| 40 |
|
| 41 |
UPLOADS_DIR = "uploads"
|
| 42 |
os.makedirs(UPLOADS_DIR, exist_ok=True)
|
|
@@ -91,49 +88,105 @@ def _import_hf_hub():
|
|
| 91 |
from huggingface_hub import HfApi, HfFolder
|
| 92 |
return HfApi, HfFolder
|
| 93 |
|
| 94 |
-
# ----------
|
| 95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 96 |
patient_info: str,
|
| 97 |
visual_results: Dict,
|
| 98 |
guideline_context: str,
|
| 99 |
image_pil: Image.Image,
|
| 100 |
max_new_tokens: Optional[int] = None,
|
| 101 |
) -> str:
|
| 102 |
-
|
|
|
|
|
|
|
|
|
|
| 103 |
return "⚠️ VLM disabled"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
try:
|
| 105 |
from transformers import pipeline
|
| 106 |
pipe = pipeline(
|
| 107 |
task="image-text-to-text",
|
| 108 |
-
model=
|
| 109 |
-
device_map=None,
|
| 110 |
token=HF_TOKEN,
|
| 111 |
trust_remote_code=True,
|
| 112 |
model_kwargs={"low_cpu_mem_usage": True},
|
| 113 |
)
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
)
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 127 |
if out and len(out) > 0:
|
| 128 |
try:
|
| 129 |
-
|
| 130 |
except Exception:
|
| 131 |
-
|
|
|
|
|
|
|
| 132 |
return "⚠️ No output generated"
|
| 133 |
except Exception as e:
|
| 134 |
-
logging.error(f"❌
|
| 135 |
return "⚠️ VLM error"
|
| 136 |
|
|
|
|
|
|
|
|
|
|
| 137 |
# ---------- Initialize CPU models ----------
|
| 138 |
def load_yolo_model():
|
| 139 |
YOLO = _import_ultralytics()
|
|
@@ -343,7 +396,7 @@ def _grabcut_refine(bgr: np.ndarray, seed01: np.ndarray, iters: int = 3) -> np.n
|
|
| 343 |
seed_dil = cv2.dilate(seed01, k, iterations=1)
|
| 344 |
gc[seed01.astype(bool)] = cv2.GC_PR_FGD
|
| 345 |
gc[seed_dil.astype(bool)] = cv2.GC_FGD
|
| 346 |
-
gc[0, :], gc[-1, :], gc[:, 0], gc[:,
|
| 347 |
bgdModel = np.zeros((1, 65), np.float64)
|
| 348 |
fgdModel = np.zeros((1, 65), np.float64)
|
| 349 |
cv2.grabCut(bgr, gc, None, bgdModel, fgdModel, iters, cv2.GC_INIT_WITH_MASK)
|
|
@@ -811,7 +864,7 @@ Automated analysis provides quantitative measurements; verify via clinical exami
|
|
| 811 |
)
|
| 812 |
if report and report.strip() and not report.startswith(("⚠️", "❌")):
|
| 813 |
return report
|
| 814 |
-
logging.warning("
|
| 815 |
return self._generate_fallback_report(patient_info, visual_results, guideline_context)
|
| 816 |
except Exception as e:
|
| 817 |
logging.error(f"Report generation failed: {e}")
|
|
@@ -917,4 +970,4 @@ Automated analysis provides quantitative measurements; verify via clinical exami
|
|
| 917 |
"report": f"Analysis initialization failed: {str(e)}",
|
| 918 |
"saved_image_path": None,
|
| 919 |
"guideline_context": "",
|
| 920 |
-
}
|
|
|
|
| 28 |
def _log_kv(prefix: str, kv: Dict):
|
| 29 |
logging.debug(prefix + " | " + " | ".join(f"{k}={v}" for k, v in kv.items()))
|
| 30 |
|
| 31 |
+
# --- Spaces GPU decorator (REQUIRED) ---
|
| 32 |
+
from spaces import GPU as _SPACES_GPU
|
| 33 |
+
|
| 34 |
+
@_SPACES_GPU(enable_queue=False)
|
| 35 |
+
def smartheal_gpu_stub(ping: int = 0) -> str:
|
| 36 |
+
return "ready"
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
UPLOADS_DIR = "uploads"
|
| 39 |
os.makedirs(UPLOADS_DIR, exist_ok=True)
|
|
|
|
| 88 |
from huggingface_hub import HfApi, HfFolder
|
| 89 |
return HfApi, HfFolder
|
| 90 |
|
| 91 |
+
# ---------- SmartHeal prompts (system + user prefix) ----------
|
| 92 |
+
SMARTHEAL_SYSTEM_PROMPT = """\
|
| 93 |
+
You are SmartHeal Clinical Assistant, a wound-care decision-support system.
|
| 94 |
+
You analyze wound photographs and brief patient context to produce careful,
|
| 95 |
+
specific, guideline-informed recommendations WITHOUT diagnosing. You always:
|
| 96 |
+
- Use the measurements calculated by the vision pipeline as ground truth.
|
| 97 |
+
- Prefer concise, actionable steps tailored to exudate level, infection risk, and pain.
|
| 98 |
+
- Flag uncertainties and red flags that need escalation to a clinician.
|
| 99 |
+
- Avoid contraindicated advice; do not infer unseen comorbidities.
|
| 100 |
+
- Keep under 300 words and use the requested headings exactly.
|
| 101 |
+
- Tone: professional, clear, and conservative; no definitive medical claims.
|
| 102 |
+
- Safety: remind the user to seek clinician review for changes or red flags.
|
| 103 |
+
"""
|
| 104 |
+
|
| 105 |
+
SMARTHEAL_USER_PREFIX = """\
|
| 106 |
+
Patient: {patient_info}
|
| 107 |
+
Visual findings: type={wound_type}, size={length_cm}x{breadth_cm} cm, area={area_cm2} cm^2,
|
| 108 |
+
detection_conf={det_conf:.2f}, calibration={px_per_cm} px/cm.
|
| 109 |
+
|
| 110 |
+
Guideline context (snippets you can draw principles from; do not quote at length):
|
| 111 |
+
{guideline_context}
|
| 112 |
+
|
| 113 |
+
Write a structured answer with these headings exactly:
|
| 114 |
+
1. Clinical Summary (max 4 bullet points)
|
| 115 |
+
2. Likely Stage/Type (if uncertain, say 'uncertain')
|
| 116 |
+
3. Treatment Plan (specific dressing choices and frequency based on exudate/infection risk)
|
| 117 |
+
4. Red Flags (what to escalate and when)
|
| 118 |
+
5. Follow-up Cadence (days)
|
| 119 |
+
6. Notes (assumptions/uncertainties)
|
| 120 |
+
|
| 121 |
+
Keep to 220–300 words. Do NOT provide diagnosis. Avoid contraindicated advice.
|
| 122 |
+
"""
|
| 123 |
+
|
| 124 |
+
# ---------- VLM (MedGemma replaced with Qwen2-VL) ----------
|
| 125 |
+
def generate_medgemma_report( # kept name so callers don't change
|
| 126 |
patient_info: str,
|
| 127 |
visual_results: Dict,
|
| 128 |
guideline_context: str,
|
| 129 |
image_pil: Image.Image,
|
| 130 |
max_new_tokens: Optional[int] = None,
|
| 131 |
) -> str:
|
| 132 |
+
"""
|
| 133 |
+
MedGemma replacement using Qwen/Qwen2-VL-2B-Instruct via image-text-to-text.
|
| 134 |
+
"""
|
| 135 |
+
if os.getenv("SMARTHEAL_ENABLE_VLM", "1") != "1":
|
| 136 |
return "⚠️ VLM disabled"
|
| 137 |
+
|
| 138 |
+
model_id = os.getenv("SMARTHEAL_VLM_MODEL", "Qwen/Qwen2-VL-2B-Instruct")
|
| 139 |
+
max_new_tokens = max_new_tokens or int(os.getenv("SMARTHEAL_VLM_MAX_TOKENS", "600"))
|
| 140 |
+
|
| 141 |
try:
|
| 142 |
from transformers import pipeline
|
| 143 |
pipe = pipeline(
|
| 144 |
task="image-text-to-text",
|
| 145 |
+
model=model_id,
|
| 146 |
+
device_map=None, # keep CPU by default for Spaces stability
|
| 147 |
token=HF_TOKEN,
|
| 148 |
trust_remote_code=True,
|
| 149 |
model_kwargs={"low_cpu_mem_usage": True},
|
| 150 |
)
|
| 151 |
+
except Exception as e:
|
| 152 |
+
logging.error(f"❌ Could not load VLM ({model_id}): {e}")
|
| 153 |
+
return "⚠️ VLM error"
|
| 154 |
+
|
| 155 |
+
uprompt = SMARTHEAL_USER_PREFIX.format(
|
| 156 |
+
patient_info=patient_info,
|
| 157 |
+
wound_type=visual_results.get("wound_type", "Unknown"),
|
| 158 |
+
length_cm=visual_results.get("length_cm", 0),
|
| 159 |
+
breadth_cm=visual_results.get("breadth_cm", 0),
|
| 160 |
+
area_cm2=visual_results.get("surface_area_cm2", 0),
|
| 161 |
+
det_conf=float(visual_results.get("detection_confidence", 0.0)),
|
| 162 |
+
px_per_cm=visual_results.get("px_per_cm", "?"),
|
| 163 |
+
guideline_context=(guideline_context or "")[:900],
|
| 164 |
+
)
|
| 165 |
+
|
| 166 |
+
try:
|
| 167 |
+
messages = [
|
| 168 |
+
{"role": "system", "content": [{"type": "text", "text": SMARTHEAL_SYSTEM_PROMPT}]},
|
| 169 |
+
{"role": "user", "content": [
|
| 170 |
+
{"type": "image", "image": image_pil},
|
| 171 |
+
{"type": "text", "text": uprompt},
|
| 172 |
+
]},
|
| 173 |
+
]
|
| 174 |
+
out = pipe(text=messages, max_new_tokens=max_new_tokens, do_sample=False, temperature=0.2)
|
| 175 |
if out and len(out) > 0:
|
| 176 |
try:
|
| 177 |
+
text = out[0]["generated_text"][-1].get("content", "")
|
| 178 |
except Exception:
|
| 179 |
+
text = out[0].get("generated_text", "")
|
| 180 |
+
text = (text or "").strip()
|
| 181 |
+
return text if text else "⚠️ Empty response"
|
| 182 |
return "⚠️ No output generated"
|
| 183 |
except Exception as e:
|
| 184 |
+
logging.error(f"❌ VLM generation error: {e}")
|
| 185 |
return "⚠️ VLM error"
|
| 186 |
|
| 187 |
+
UPLOADS_DIR = "uploads"
|
| 188 |
+
os.makedirs(UPLOADS_DIR, exist_ok=True)
|
| 189 |
+
|
| 190 |
# ---------- Initialize CPU models ----------
|
| 191 |
def load_yolo_model():
|
| 192 |
YOLO = _import_ultralytics()
|
|
|
|
| 396 |
seed_dil = cv2.dilate(seed01, k, iterations=1)
|
| 397 |
gc[seed01.astype(bool)] = cv2.GC_PR_FGD
|
| 398 |
gc[seed_dil.astype(bool)] = cv2.GC_FGD
|
| 399 |
+
gc[0, :], gc[-1, :], gc[:, 0], gc[:, 1] = cv2.GC_BGD, cv2.GC_BGD, cv2.GC_BGD, cv2.GC_BGD
|
| 400 |
bgdModel = np.zeros((1, 65), np.float64)
|
| 401 |
fgdModel = np.zeros((1, 65), np.float64)
|
| 402 |
cv2.grabCut(bgr, gc, None, bgdModel, fgdModel, iters, cv2.GC_INIT_WITH_MASK)
|
|
|
|
| 864 |
)
|
| 865 |
if report and report.strip() and not report.startswith(("⚠️", "❌")):
|
| 866 |
return report
|
| 867 |
+
logging.warning("VLM unavailable/invalid; using fallback.")
|
| 868 |
return self._generate_fallback_report(patient_info, visual_results, guideline_context)
|
| 869 |
except Exception as e:
|
| 870 |
logging.error(f"Report generation failed: {e}")
|
|
|
|
| 970 |
"report": f"Analysis initialization failed: {str(e)}",
|
| 971 |
"saved_image_path": None,
|
| 972 |
"guideline_context": "",
|
| 973 |
+
}
|