SmartHeal commited on
Commit
085a6f7
Β·
verified Β·
1 Parent(s): 5b2e7ae

Update src/ai_processor.py

Browse files
Files changed (1) hide show
  1. src/ai_processor.py +670 -253
src/ai_processor.py CHANGED
@@ -1,42 +1,87 @@
1
  # smartheal_ai_processor.py
2
- # Full, functional module with conditional Spaces GPU support and CPU fallbacks.
 
3
 
4
  import os
5
- import time
6
  import logging
7
  from datetime import datetime
8
- from typing import Optional, Dict, List
 
 
 
 
 
9
 
10
  import cv2
11
  import numpy as np
12
  from PIL import Image
 
 
 
 
 
 
 
 
 
 
13
 
14
- # =============== LOGGING SETUP ===============
15
- logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
16
 
17
- # =============== CONFIGURATION ===============
 
 
 
 
18
  UPLOADS_DIR = "uploads"
19
  os.makedirs(UPLOADS_DIR, exist_ok=True)
20
 
21
  HF_TOKEN = os.getenv("HF_TOKEN", None)
22
  YOLO_MODEL_PATH = "src/best.pt"
23
- SEG_MODEL_PATH = "src/segmentation_model.h5" # optional
24
  GUIDELINE_PDFS = ["src/eHealth in Wound Care.pdf", "src/IWGDF Guideline.pdf", "src/evaluation.pdf"]
25
- DATASET_ID = "SmartHeal/wound-image-uploads" # optional (set HF_TOKEN too)
26
- PIXELS_PER_CM = 38 # heuristic
 
 
 
 
 
 
27
 
28
- # =============== GLOBAL CACHES ===============
29
  models_cache: Dict[str, object] = {}
30
  knowledge_base_cache: Dict[str, object] = {}
31
 
32
- # ---------- Optional imports guarded ----------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  def _import_ultralytics():
34
- from ultralytics import YOLO
 
 
35
  return YOLO
36
 
37
  def _import_tf_loader():
38
  import tensorflow as tf
39
- tf.config.set_visible_devices([], "GPU") # force CPU
40
  from tensorflow.keras.models import load_model
41
  return load_model
42
 
@@ -60,143 +105,136 @@ def _import_hf_hub():
60
  from huggingface_hub import HfApi, HfFolder
61
  return HfApi, HfFolder
62
 
63
- # =============== SPACES GPU CONDITIONAL ===============
64
- def _spaces_gpu_available() -> bool:
65
- try:
66
- import torch
67
- return bool(torch.cuda.is_available())
68
- except Exception:
69
- return False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
71
- def _spaces_lib_available() -> bool:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  try:
73
- import spaces # noqa
74
- return True
75
  except Exception:
76
- return False
77
-
78
- HAVE_SPACES_GPU = _spaces_gpu_available() and _spaces_lib_available()
79
-
80
- if HAVE_SPACES_GPU:
81
- import spaces # define only if available & GPU present
82
-
83
- @spaces.GPU(enable_queue=True, duration=90)
84
- def generate_medgemma_report_with_timeout(
85
- patient_info: str,
86
- visual_results: Dict,
87
- guideline_context: str,
88
- image_pil: Image.Image,
89
- max_new_tokens: Optional[int] = None,
90
- ) -> str:
91
- """Runs on Spaces GPU only; callers keep one signature on both paths."""
92
- import torch
93
- from transformers import pipeline
94
- try:
95
- torch.cuda.empty_cache()
96
-
97
- prompt = f"""
98
- You are a medical AI assistant. Analyze this wound image and patient data.
99
- Patient: {patient_info}
100
- Wound: {visual_results.get('wound_type', 'Unknown')} - {visual_results.get('length_cm', 0)}Γ—{visual_results.get('breadth_cm', 0)} cm
101
- Provide a structured report with:
102
- 1. Clinical Summary
103
- 2. Treatment Recommendations
104
- 3. Risk Assessment
105
- 4. Monitoring Plan
106
- """.strip()
107
-
108
- pipe = pipeline(
109
- "image-text-to-text",
110
- model="google/medgemma-4b-it",
111
- torch_dtype=torch.bfloat16,
112
- device_map="auto",
113
- token=HF_TOKEN,
114
- model_kwargs={"low_cpu_mem_usage": True, "use_cache": True},
115
- )
116
 
117
- messages = [
118
- {
119
- "role": "user",
120
- "content": [
121
- {"type": "image", "image": image_pil},
122
- {"type": "text", "text": prompt},
123
- ],
124
- }
125
- ]
126
-
127
- t0 = time.time()
128
- out = pipe(
129
- text=messages,
130
- max_new_tokens=max_new_tokens or 800,
131
- do_sample=False,
132
- temperature=0.7,
133
- pad_token_id=pipe.tokenizer.eos_token_id,
134
- )
135
- logging.info(f"βœ… MedGemma completed in {time.time() - t0:.2f}s")
136
 
137
- if out and len(out) > 0:
138
- # Defensive extraction
139
- try:
140
- return out[0]["generated_text"][-1].get("content", "").strip() or "⚠️ Empty response"
141
- except Exception:
142
- return (out[0].get("generated_text", "") or "").strip() or "⚠️ Empty response"
143
- return "⚠️ No output generated"
144
- except Exception as e:
145
- logging.error(f"❌ MedGemma generation error: {e}")
146
- return f"❌ Report generation failed: {str(e)}"
147
- finally:
148
- try:
149
- torch.cuda.empty_cache()
150
- except Exception:
151
- pass
152
- else:
153
- def generate_medgemma_report_with_timeout(
154
- patient_info: str,
155
- visual_results: Dict,
156
- guideline_context: str,
157
- image_pil: Image.Image,
158
- max_new_tokens: Optional[int] = None,
159
- ) -> str:
160
- """CPU-only path: return a warning so caller uses fallback."""
161
- return "⚠️ GPU not available"
162
 
163
- # =============== MODEL INITIALIZATION (CPU-SAFE) ===============
164
  def load_yolo_model():
165
  YOLO = _import_ultralytics()
166
- return YOLO(YOLO_MODEL_PATH)
167
-
 
 
168
  def load_segmentation_model():
 
169
  load_model = _import_tf_loader()
170
- return load_model(SEG_MODEL_PATH, compile=False)
171
 
172
  def load_classification_pipeline():
173
  pipe = _import_hf_cls()
174
- return pipe(
175
- "image-classification",
176
- model="Hemg/Wound-classification",
177
- token=HF_TOKEN,
178
- device="cpu",
179
- )
180
 
181
  def load_embedding_model():
182
  Emb = _import_embeddings()
183
  return Emb(model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={"device": "cpu"})
184
 
185
  def initialize_cpu_models() -> None:
186
- """Initialize all CPU-only models once with robust fallbacks."""
187
- # Hugging Face auth (optional)
188
  if HF_TOKEN:
189
  try:
190
  HfApi, HfFolder = _import_hf_hub()
191
  HfFolder.save_token(HF_TOKEN)
192
- logging.info("βœ… HuggingFace token set")
193
  except Exception as e:
194
  logging.warning(f"HF token save failed: {e}")
195
 
196
  if "det" not in models_cache:
197
  try:
198
  models_cache["det"] = load_yolo_model()
199
- logging.info("βœ… YOLO model loaded (CPU)")
200
  except Exception as e:
201
  logging.error(f"YOLO load failed: {e}")
202
 
@@ -204,46 +242,46 @@ def initialize_cpu_models() -> None:
204
  try:
205
  if os.path.exists(SEG_MODEL_PATH):
206
  models_cache["seg"] = load_segmentation_model()
207
- logging.info("βœ… Segmentation model loaded (CPU)")
 
 
 
208
  else:
209
  models_cache["seg"] = None
210
- logging.warning("Segmentation model file not found; skipping seg.")
211
  except Exception as e:
212
  models_cache["seg"] = None
213
- logging.warning(f"Segmentation model not available: {e}")
214
 
215
  if "cls" not in models_cache:
216
  try:
217
  models_cache["cls"] = load_classification_pipeline()
218
- logging.info("βœ… Classification pipeline loaded (CPU)")
219
  except Exception as e:
220
  models_cache["cls"] = None
221
- logging.warning(f"Classification pipeline not available: {e}")
222
 
223
  if "embedding_model" not in models_cache:
224
  try:
225
  models_cache["embedding_model"] = load_embedding_model()
226
- logging.info("βœ… Embedding model loaded (CPU)")
227
  except Exception as e:
228
  models_cache["embedding_model"] = None
229
- logging.warning(f"Embedding model not available: {e}")
230
 
231
  def setup_knowledge_base() -> None:
232
- """Load PDFs and create FAISS vector store (optional)."""
233
  if "vector_store" in knowledge_base_cache:
234
  return
235
-
236
- docs = []
237
  try:
238
  PyPDFLoader = _import_langchain_pdf()
239
  for pdf in GUIDELINE_PDFS:
240
  if os.path.exists(pdf):
241
  try:
242
- loader = PyPDFLoader(pdf)
243
- docs.extend(loader.load())
244
  logging.info(f"Loaded PDF: {pdf}")
245
  except Exception as e:
246
- logging.warning(f"Failed to load PDF {pdf}: {e}")
247
  except Exception as e:
248
  logging.warning(f"LangChain PDF loader unavailable: {e}")
249
 
@@ -251,146 +289,536 @@ def setup_knowledge_base() -> None:
251
  try:
252
  from langchain.text_splitter import RecursiveCharacterTextSplitter
253
  FAISS = _import_langchain_faiss()
254
- splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
255
- chunks = splitter.split_documents(docs)
256
  knowledge_base_cache["vector_store"] = FAISS.from_documents(chunks, models_cache["embedding_model"])
257
- logging.info(f"βœ… Knowledge base ready with {len(chunks)} chunks")
258
  except Exception as e:
259
  knowledge_base_cache["vector_store"] = None
260
- logging.warning(f"Knowledge base unavailable: {e}")
261
  else:
262
  knowledge_base_cache["vector_store"] = None
263
- logging.warning("Knowledge base disabled (no docs or embeddings).")
264
 
265
- # Initialize on import
266
  initialize_cpu_models()
267
  setup_knowledge_base()
268
 
269
- # =============== AI PROCESSOR ===============
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
270
  class AIProcessor:
271
  def __init__(self):
272
  self.models_cache = models_cache
273
  self.knowledge_base_cache = knowledge_base_cache
274
- self.px_per_cm = PIXELS_PER_CM
275
  self.uploads_dir = UPLOADS_DIR
276
  self.dataset_id = DATASET_ID
277
  self.hf_token = HF_TOKEN
278
 
279
- # ---------- Image utilities ----------
280
  def _ensure_analysis_dir(self) -> str:
281
  out_dir = os.path.join(self.uploads_dir, "analysis")
282
  os.makedirs(out_dir, exist_ok=True)
283
  return out_dir
284
 
285
  def perform_visual_analysis(self, image_pil: Image.Image) -> Dict:
286
- """YOLO detect β†’ (optional) Keras seg β†’ (optional) HF classifier β†’ save visuals."""
 
 
 
287
  try:
 
 
 
 
 
 
288
  image_cv = cv2.cvtColor(np.array(image_pil.convert("RGB")), cv2.COLOR_RGB2BGR)
289
 
290
- det = self.models_cache.get("det")
291
- if det is None:
 
292
  raise RuntimeError("YOLO model not loaded")
293
-
294
- # YOLO on CPU
295
- results = det.predict(image_cv, verbose=False, device="cpu")
296
- if not results or not getattr(results[0], "boxes", None) or len(results[0].boxes) == 0:
297
- raise ValueError("No wound could be detected.")
 
 
 
298
 
299
  box = results[0].boxes[0].xyxy[0].cpu().numpy().astype(int)
300
  x1, y1, x2, y2 = [int(v) for v in box]
301
  x1, y1 = max(0, x1), max(0, y1)
302
  x2, y2 = min(image_cv.shape[1], x2), min(image_cv.shape[0], y2)
303
- detected_region_cv = image_cv[y1:y2, x1:x2]
304
-
305
- # Optional segmentation
306
- seg_model = self.models_cache.get("seg")
307
- length = breadth = area = 0.0
308
- seg_path = None
309
- if seg_model is not None and detected_region_cv.size > 0:
310
  try:
311
- input_size = seg_model.input_shape[1:3]
312
- resized = cv2.resize(detected_region_cv, (input_size[1], input_size[0]))
313
- mask_pred = seg_model.predict(np.expand_dims(resized / 255.0, 0), verbose=0)[0]
314
- mask_np = (mask_pred[:, :, 0] > 0.5).astype(np.uint8)
315
-
316
- contours, _ = cv2.findContours(mask_np, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
317
- if contours:
318
- cnt = max(contours, key=cv2.contourArea)
319
- x, y, w, h = cv2.boundingRect(cnt)
320
- length = round(h / self.px_per_cm, 2)
321
- breadth = round(w / self.px_per_cm, 2)
322
- area = round(cv2.contourArea(cnt) / (self.px_per_cm ** 2), 2)
323
-
324
- # overlay visualization
325
- mask_resized = cv2.resize(
326
- mask_np * 255,
327
- (detected_region_cv.shape[1], detected_region_cv.shape[0]),
328
- interpolation=cv2.INTER_NEAREST,
329
- )
330
- overlay = detected_region_cv.copy()
331
- overlay[mask_resized > 127] = [0, 0, 255]
332
- seg_vis = cv2.addWeighted(detected_region_cv, 0.7, overlay, 0.3, 0)
333
-
334
- ts = datetime.now().strftime("%Y%m%d_%H%M%S")
335
- out_dir = self._ensure_analysis_dir()
336
- seg_path = os.path.join(out_dir, f"segmentation_{ts}.png")
337
- cv2.imwrite(seg_path, seg_vis)
338
- except Exception as e:
339
- logging.warning(f"Segmentation step skipped: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
340
 
341
- # Optional classification
342
  wound_type = "Unknown"
343
  cls_pipe = self.models_cache.get("cls")
344
  if cls_pipe is not None:
345
  try:
346
- detected_image_pil = Image.fromarray(cv2.cvtColor(detected_region_cv, cv2.COLOR_BGR2RGB))
347
- preds = cls_pipe(detected_image_pil)
348
  if preds:
349
  wound_type = max(preds, key=lambda x: x.get("score", 0)).get("label", "Unknown")
350
  except Exception as e:
351
- logging.warning(f"Classification step failed: {e}")
352
-
353
- # Save detection & original
354
- out_dir = self._ensure_analysis_dir()
355
- ts = datetime.now().strftime("%Y%m%d_%H%M%S")
356
- det_vis = image_cv.copy()
357
- cv2.rectangle(det_vis, (x1, y1), (x2, y2), (0, 255, 0), 2)
358
- det_path = os.path.join(out_dir, f"detection_{ts}.png")
359
- cv2.imwrite(det_path, det_vis)
360
-
361
- original_path = os.path.join(out_dir, f"original_{ts}.png")
362
- cv2.imwrite(original_path, image_cv)
363
 
364
  return {
365
  "wound_type": wound_type,
366
- "length_cm": length,
367
- "breadth_cm": breadth,
368
- "surface_area_cm2": area,
 
 
369
  "detection_confidence": float(results[0].boxes.conf[0].cpu().item())
370
- if getattr(results[0].boxes, "conf", None) is not None
371
- else 0.0,
372
- "detection_image_path": det_path,
373
- "segmentation_image_path": seg_path,
 
 
 
 
374
  "original_image_path": original_path,
375
  }
376
  except Exception as e:
377
- logging.error(f"Visual analysis failed: {e}")
378
  raise
379
 
 
380
  def query_guidelines(self, query: str) -> str:
381
- """Query the knowledge base (optional)."""
382
  try:
383
  vs = self.knowledge_base_cache.get("vector_store")
384
  if not vs:
385
  return "Knowledge base is not available."
386
- # support both old and new retriever APIs
387
- try:
388
- retriever = vs.as_retriever(search_kwargs={"k": 5})
389
- docs = retriever.get_relevant_documents(query) # LC >= 0.2
390
- except Exception:
391
- retriever = vs.as_retriever(search_kwargs={"k": 5})
392
- # older invoke API
393
- docs = retriever.invoke(query)
394
  lines: List[str] = []
395
  for d in docs:
396
  src = (d.metadata or {}).get("source", "N/A")
@@ -401,9 +829,7 @@ class AIProcessor:
401
  logging.warning(f"Guidelines query failed: {e}")
402
  return f"Guidelines query failed: {str(e)}"
403
 
404
- # ---------- Report builders ----------
405
  def _generate_fallback_report(self, patient_info: str, visual_results: Dict, guideline_context: str) -> str:
406
- """Plaintext/markdown fallback when MedGemma is unavailable."""
407
  return f"""# 🩺 SmartHeal AI - Comprehensive Wound Analysis Report
408
  ## πŸ“‹ Patient Information
409
  {patient_info}
@@ -412,10 +838,12 @@ class AIProcessor:
412
  - **Dimensions**: {visual_results.get('length_cm', 0)} cm Γ— {visual_results.get('breadth_cm', 0)} cm
413
  - **Surface Area**: {visual_results.get('surface_area_cm2', 0)} cmΒ²
414
  - **Detection Confidence**: {visual_results.get('detection_confidence', 0):.1%}
 
415
  ## πŸ“Š Analysis Images
416
  - **Original**: {visual_results.get('original_image_path', 'N/A')}
417
  - **Detection**: {visual_results.get('detection_image_path', 'N/A')}
418
  - **Segmentation**: {visual_results.get('segmentation_image_path', 'N/A')}
 
419
  ## 🎯 Clinical Summary
420
  Automated analysis provides quantitative measurements; verify via clinical examination.
421
  ## πŸ’Š Recommendations
@@ -423,10 +851,10 @@ Automated analysis provides quantitative measurements; verify via clinical exami
423
  - Debride necrotic tissue if indicated (clinical decision)
424
  - Document with serial photos and measurements
425
  ## πŸ“… Monitoring
426
- - Daily in week 1, then every 2-3 days (or as indicated)
427
  - Weekly progress review
428
  ## πŸ“š Guideline Context
429
- {(guideline_context or '')[:800]}{'...' if guideline_context and len(guideline_context) > 800 else ''}
430
  **Disclaimer:** Automated, for decision support only. Verify clinically.
431
  """
432
 
@@ -438,22 +866,19 @@ Automated analysis provides quantitative measurements; verify via clinical exami
438
  image_pil: Image.Image,
439
  max_new_tokens: Optional[int] = None,
440
  ) -> str:
441
- """Try MedGemma (GPU) β†’ fallback report."""
442
  try:
443
- report = generate_medgemma_report_with_timeout(
444
  patient_info, visual_results, guideline_context, image_pil, max_new_tokens
445
  )
446
  if report and report.strip() and not report.startswith(("⚠️", "❌")):
447
  return report
448
- logging.warning("MedGemma unavailable/invalid; using fallback.")
449
  return self._generate_fallback_report(patient_info, visual_results, guideline_context)
450
  except Exception as e:
451
  logging.error(f"Report generation failed: {e}")
452
  return self._generate_fallback_report(patient_info, visual_results, guideline_context)
453
 
454
- # ---------- HF dataset commit ----------
455
  def save_and_commit_image(self, image_pil: Image.Image) -> str:
456
- """Save image locally and optionally upload to HF dataset."""
457
  try:
458
  os.makedirs(self.uploads_dir, exist_ok=True)
459
  ts = datetime.now().strftime("%Y%m%d_%H%M%S")
@@ -462,17 +887,17 @@ Automated analysis provides quantitative measurements; verify via clinical exami
462
  image_pil.convert("RGB").save(path)
463
  logging.info(f"βœ… Image saved locally: {path}")
464
 
465
- if self.hf_token and self.dataset_id:
466
  try:
467
  HfApi, HfFolder = _import_hf_hub()
468
- HfFolder.save_token(self.hf_token)
469
  api = HfApi()
470
  api.upload_file(
471
  path_or_fileobj=path,
472
  path_in_repo=f"images/{filename}",
473
- repo_id=self.dataset_id,
474
  repo_type="dataset",
475
- token=self.hf_token,
476
  commit_message=f"Upload wound image: {filename}",
477
  )
478
  logging.info("βœ… Image committed to HF dataset")
@@ -484,28 +909,23 @@ Automated analysis provides quantitative measurements; verify via clinical exami
484
  logging.error(f"Failed to save/commit image: {e}")
485
  return ""
486
 
487
- # ---------- Orchestrator ----------
488
  def full_analysis_pipeline(self, image_pil: Image.Image, questionnaire_data: Dict) -> Dict:
489
- """End-to-end analysis with robust fallbacks."""
490
  try:
491
  saved_path = self.save_and_commit_image(image_pil)
492
-
493
  visual_results = self.perform_visual_analysis(image_pil)
494
 
495
- # Patient info summary text
496
  pi = questionnaire_data or {}
497
  patient_info = (
498
- f"Age: {pi.get('age', 'N/A')}, "
499
- f"Diabetic: {pi.get('diabetic', 'N/A')}, "
500
- f"Allergies: {pi.get('allergies', 'N/A')}, "
501
- f"Date of Wound: {pi.get('date_of_injury', 'N/A')}, "
502
- f"Professional Care: {pi.get('professional_care', 'N/A')}, "
503
- f"Oozing/Bleeding: {pi.get('oozing_bleeding', 'N/A')}, "
504
- f"Infection: {pi.get('infection', 'N/A')}, "
505
- f"Moisture: {pi.get('moisture', 'N/A')}"
506
  )
507
 
508
- # Query guidelines
509
  query = (
510
  f"best practices for managing a {visual_results.get('wound_type','Unknown')} "
511
  f"with moisture '{pi.get('moisture','unknown')}' and infection '{pi.get('infection','unknown')}' "
@@ -513,18 +933,16 @@ Automated analysis provides quantitative measurements; verify via clinical exami
513
  )
514
  guideline_context = self.query_guidelines(query)
515
 
516
- # Generate final report
517
- report = self.generate_final_report(patient_info=patient_info,
518
- visual_results=visual_results,
519
- guideline_context=guideline_context,
520
- image_pil=image_pil)
521
 
522
  return {
523
  "success": True,
524
  "visual_analysis": visual_results,
525
  "report": report,
526
  "saved_image_path": saved_path,
527
- "guideline_context": (guideline_context or "")[:500] + ("..." if guideline_context and len(guideline_context) > 500 else ""),
 
 
528
  }
529
  except Exception as e:
530
  logging.error(f"Pipeline error: {e}")
@@ -538,7 +956,6 @@ Automated analysis provides quantitative measurements; verify via clinical exami
538
  }
539
 
540
  def analyze_wound(self, image, questionnaire_data: Dict) -> Dict:
541
- """Public entrypoint used by your UI."""
542
  try:
543
  if isinstance(image, str):
544
  if not os.path.exists(image):
@@ -561,4 +978,4 @@ Automated analysis provides quantitative measurements; verify via clinical exami
561
  "report": f"Analysis initialization failed: {str(e)}",
562
  "saved_image_path": None,
563
  "guideline_context": "",
564
- }
 
1
  # smartheal_ai_processor.py
2
+ # Verbose, instrumented version β€” preserves public class/function names
3
+ # Turn on deep logging: export LOGLEVEL=DEBUG SMARTHEAL_DEBUG=1
4
 
5
  import os
 
6
  import logging
7
  from datetime import datetime
8
+ from typing import Optional, Dict, List, Tuple
9
+
10
+ # ---- Environment defaults (do NOT globally hint CUDA here) ----
11
+ os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
12
+ LOGLEVEL = os.getenv("LOGLEVEL", "INFO").upper()
13
+ SMARTHEAL_DEBUG = os.getenv("SMARTHEAL_DEBUG", "0") == "1"
14
 
15
  import cv2
16
  import numpy as np
17
  from PIL import Image
18
+ from PIL.ExifTags import TAGS
19
+
20
+ # --- Logging config ---
21
+ logging.basicConfig(
22
+ level=getattr(logging, LOGLEVEL, logging.INFO),
23
+ format="%(asctime)s - %(levelname)s - %(message)s",
24
+ )
25
+
26
+ def _log_kv(prefix: str, kv: Dict):
27
+ logging.debug(prefix + " | " + " | ".join(f"{k}={v}" for k, v in kv.items()))
28
 
29
+ # --- Spaces GPU decorator (REQUIRED) ---
30
+ from spaces import GPU as _SPACES_GPU
31
 
32
+ @_SPACES_GPU(enable_queue=True)
33
+ def smartheal_gpu_stub(ping: int = 0) -> str:
34
+ return "ready"
35
+
36
+ # ---- Paths / constants ----
37
  UPLOADS_DIR = "uploads"
38
  os.makedirs(UPLOADS_DIR, exist_ok=True)
39
 
40
  HF_TOKEN = os.getenv("HF_TOKEN", None)
41
  YOLO_MODEL_PATH = "src/best.pt"
42
+ SEG_MODEL_PATH = "src/segmentation_model.h5" # optional
43
  GUIDELINE_PDFS = ["src/eHealth in Wound Care.pdf", "src/IWGDF Guideline.pdf", "src/evaluation.pdf"]
44
+ DATASET_ID = "SmartHeal/wound-image-uploads"
45
+ DEFAULT_PX_PER_CM = 38.0
46
+ PX_PER_CM_MIN, PX_PER_CM_MAX = 5.0, 1200.0
47
+
48
+ # Segmentation preprocessing knobs
49
+ SEG_EXPECTS_RGB = os.getenv("SEG_EXPECTS_RGB", "1") == "1" # most TF models trained on RGB
50
+ SEG_NORM = os.getenv("SEG_NORM", "0to1") # "0to1" | "imagenet"
51
+ SEG_THRESH = float(os.getenv("SEG_THRESH", "0.5"))
52
 
 
53
  models_cache: Dict[str, object] = {}
54
  knowledge_base_cache: Dict[str, object] = {}
55
 
56
+ # ---------- Utilities to prevent CUDA in main process ----------
57
+ from contextlib import contextmanager
58
+
59
+ @contextmanager
60
+ def _no_cuda_env():
61
+ """
62
+ Mask GPUs so any library imported/constructed in the main process
63
+ cannot see CUDA (required for Spaces Stateless GPU).
64
+ """
65
+ prev = os.environ.get("CUDA_VISIBLE_DEVICES")
66
+ os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
67
+ try:
68
+ yield
69
+ finally:
70
+ if prev is None:
71
+ os.environ.pop("CUDA_VISIBLE_DEVICES", None)
72
+ else:
73
+ os.environ["CUDA_VISIBLE_DEVICES"] = prev
74
+
75
+ # ---------- Lazy imports (wrapped where needed) ----------
76
  def _import_ultralytics():
77
+ # Prevent Ultralytics from probing CUDA on import
78
+ with _no_cuda_env():
79
+ from ultralytics import YOLO
80
  return YOLO
81
 
82
  def _import_tf_loader():
83
  import tensorflow as tf
84
+ tf.config.set_visible_devices([], "GPU")
85
  from tensorflow.keras.models import load_model
86
  return load_model
87
 
 
105
  from huggingface_hub import HfApi, HfFolder
106
  return HfApi, HfFolder
107
 
108
+ # ---------- SmartHeal prompts (system + user prefix) ----------
109
+ SMARTHEAL_SYSTEM_PROMPT = """\
110
+ You are SmartHeal Clinical Assistant, a wound-care decision-support system.
111
+ You analyze wound photographs and brief patient context to produce careful,
112
+ specific, guideline-informed recommendations WITHOUT diagnosing. You always:
113
+ - Use the measurements calculated by the vision pipeline as ground truth.
114
+ - Prefer concise, actionable steps tailored to exudate level, infection risk, and pain.
115
+ - Flag uncertainties and red flags that need escalation to a clinician.
116
+ - Avoid contraindicated advice; do not infer unseen comorbidities.
117
+ - Keep under 300 words and use the requested headings exactly.
118
+ - Tone: professional, clear, and conservative; no definitive medical claims.
119
+ - Safety: remind the user to seek clinician review for changes or red flags.
120
+ """
121
+
122
+ SMARTHEAL_USER_PREFIX = """\
123
+ Patient: {patient_info}
124
+ Visual findings: type={wound_type}, size={length_cm}x{breadth_cm} cm, area={area_cm2} cm^2,
125
+ detection_conf={det_conf:.2f}, calibration={px_per_cm} px/cm.
126
+ Guideline context (snippets you can draw principles from; do not quote at length):
127
+ {guideline_context}
128
+ Write a structured answer with these headings exactly:
129
+ 1. Clinical Summary (max 4 bullet points)
130
+ 2. Likely Stage/Type (if uncertain, say 'uncertain')
131
+ 3. Treatment Plan (specific dressing choices and frequency based on exudate/infection risk)
132
+ 4. Red Flags (what to escalate and when)
133
+ 5. Follow-up Cadence (days)
134
+ 6. Notes (assumptions/uncertainties)
135
+ Keep to 220–300 words. Do NOT provide diagnosis. Avoid contraindicated advice.
136
+ """
137
 
138
+ # ---------- VLM (MedGemma replaced with Qwen2-VL) ----------
139
+ @_SPACES_GPU(enable_queue=True)
140
+ def _vlm_infer_gpu(messages, model_id: str, max_new_tokens: int, token: Optional[str]):
141
+ """
142
+ Runs entirely inside a Spaces GPU worker. It's the ONLY place we allow CUDA init.
143
+ """
144
+ from transformers import pipeline
145
+ import torch # Ensure torch is imported here
146
+ pipe = pipeline(
147
+ task="image-text-to-text",
148
+ model=model_id,
149
+ torch_dtype=torch.bfloat16, # Use torch_dtype from the working example
150
+ device_map="auto", # CUDA init happens here, safely in GPU worker
151
+ token=token,
152
+ trust_remote_code=True,
153
+ model_kwargs={"low_cpu_mem_usage": True},
154
+ )
155
+ out = pipe(text=messages, max_new_tokens=max_new_tokens, do_sample=False, temperature=0.2)
156
  try:
157
+ txt = out[0]["generated_text"][-1].get("content", "")
 
158
  except Exception:
159
+ txt = out[0].get("generated_text", "")
160
+ return (txt or "").strip() or "⚠️ Empty response"
161
+
162
+ def generate_medgemma_report( # kept name so callers don't change
163
+ patient_info: str,
164
+ visual_results: Dict,
165
+ guideline_context: str,
166
+ image_pil: Image.Image,
167
+ max_new_tokens: Optional[int] = None,
168
+ ) -> str:
169
+ """
170
+ MedGemma replacement using Qwen/Qwen2-VL-2B-Instruct via image-text-to-text.
171
+ Loads & runs ONLY inside a GPU worker to satisfy Stateless GPU constraints.
172
+ """
173
+ if os.getenv("SMARTHEAL_ENABLE_VLM", "1") != "1":
174
+ return "⚠️ VLM disabled"
175
+
176
+ model_id = os.getenv("SMARTHEAL_VLM_MODEL", "Qwen/Qwen2-VL-2B-Instruct")
177
+ max_new_tokens = max_new_tokens or int(os.getenv("SMARTHEAL_VLM_MAX_TOKENS", "600"))
178
+
179
+ uprompt = SMARTHEAL_USER_PREFIX.format(
180
+ patient_info=patient_info,
181
+ wound_type=visual_results.get("wound_type", "Unknown"),
182
+ length_cm=visual_results.get("length_cm", 0),
183
+ breadth_cm=visual_results.get("breadth_cm", 0),
184
+ area_cm2=visual_results.get("surface_area_cm2", 0),
185
+ det_conf=float(visual_results.get("detection_confidence", 0.0)),
186
+ px_per_cm=visual_results.get("px_per_cm", "?"),
187
+ guideline_context=(guideline_context or "")[:900],
188
+ )
 
 
 
 
 
 
 
 
 
 
189
 
190
+ messages = [
191
+ {"role": "system", "content": [{"type": "text", "text": SMARTHEAL_SYSTEM_PROMPT}]},
192
+ {"role": "user", "content": [
193
+ {"type": "image", "image": image_pil},
194
+ {"type": "text", "text": uprompt},
195
+ ]},
196
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
197
 
198
+ try:
199
+ # IMPORTANT: do not import transformers or touch CUDA here. Only call the GPU worker.
200
+ return _vlm_infer_gpu(messages, model_id, max_new_tokens, HF_TOKEN)
201
+ except Exception as e:
202
+ logging.error(f"VLM call failed: {e}")
203
+ return "⚠️ VLM error"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
 
205
+ # ---------- Initialize CPU models ----------
206
  def load_yolo_model():
207
  YOLO = _import_ultralytics()
208
+ # Construct model with CUDA masked to avoid auto-selecting cuda:0
209
+ with _no_cuda_env():
210
+ model = YOLO(YOLO_MODEL_PATH)
211
+ return model
212
  def load_segmentation_model():
213
+ import tensorflow as tf
214
  load_model = _import_tf_loader()
215
+ return load_model(SEG_MODEL_PATH, compile=False, custom_objects={'InputLayer': tf.keras.layers.InputLayer})
216
 
217
  def load_classification_pipeline():
218
  pipe = _import_hf_cls()
219
+ return pipe("image-classification", model="Hemg/Wound-classification", token=HF_TOKEN, device="cpu")
 
 
 
 
 
220
 
221
  def load_embedding_model():
222
  Emb = _import_embeddings()
223
  return Emb(model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={"device": "cpu"})
224
 
225
  def initialize_cpu_models() -> None:
 
 
226
  if HF_TOKEN:
227
  try:
228
  HfApi, HfFolder = _import_hf_hub()
229
  HfFolder.save_token(HF_TOKEN)
230
+ logging.info("βœ… HF token set")
231
  except Exception as e:
232
  logging.warning(f"HF token save failed: {e}")
233
 
234
  if "det" not in models_cache:
235
  try:
236
  models_cache["det"] = load_yolo_model()
237
+ logging.info("βœ… YOLO loaded (CPU; CUDA masked in main)")
238
  except Exception as e:
239
  logging.error(f"YOLO load failed: {e}")
240
 
 
242
  try:
243
  if os.path.exists(SEG_MODEL_PATH):
244
  models_cache["seg"] = load_segmentation_model()
245
+ m = models_cache["seg"]
246
+ ishape = getattr(m, "input_shape", None)
247
+ oshape = getattr(m, "output_shape", None)
248
+ logging.info(f"βœ… Segmentation model loaded (CPU) | input_shape={ishape} output_shape={oshape}")
249
  else:
250
  models_cache["seg"] = None
251
+ logging.warning("Segmentation model file missing; skipping.")
252
  except Exception as e:
253
  models_cache["seg"] = None
254
+ logging.warning(f"Segmentation unavailable: {e}")
255
 
256
  if "cls" not in models_cache:
257
  try:
258
  models_cache["cls"] = load_classification_pipeline()
259
+ logging.info("βœ… Classifier loaded (CPU)")
260
  except Exception as e:
261
  models_cache["cls"] = None
262
+ logging.warning(f"Classifier unavailable: {e}")
263
 
264
  if "embedding_model" not in models_cache:
265
  try:
266
  models_cache["embedding_model"] = load_embedding_model()
267
+ logging.info("βœ… Embeddings loaded (CPU)")
268
  except Exception as e:
269
  models_cache["embedding_model"] = None
270
+ logging.warning(f"Embeddings unavailable: {e}")
271
 
272
  def setup_knowledge_base() -> None:
 
273
  if "vector_store" in knowledge_base_cache:
274
  return
275
+ docs: List = []
 
276
  try:
277
  PyPDFLoader = _import_langchain_pdf()
278
  for pdf in GUIDELINE_PDFS:
279
  if os.path.exists(pdf):
280
  try:
281
+ docs.extend(PyPDFLoader(pdf).load())
 
282
  logging.info(f"Loaded PDF: {pdf}")
283
  except Exception as e:
284
+ logging.warning(f"PDF load failed ({pdf}): {e}")
285
  except Exception as e:
286
  logging.warning(f"LangChain PDF loader unavailable: {e}")
287
 
 
289
  try:
290
  from langchain.text_splitter import RecursiveCharacterTextSplitter
291
  FAISS = _import_langchain_faiss()
292
+ chunks = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100).split_documents(docs)
 
293
  knowledge_base_cache["vector_store"] = FAISS.from_documents(chunks, models_cache["embedding_model"])
294
+ logging.info(f"βœ… Knowledge base ready ({len(chunks)} chunks)")
295
  except Exception as e:
296
  knowledge_base_cache["vector_store"] = None
297
+ logging.warning(f"KB build failed: {e}")
298
  else:
299
  knowledge_base_cache["vector_store"] = None
300
+ logging.warning("KB disabled (no docs or embeddings).")
301
 
 
302
  initialize_cpu_models()
303
  setup_knowledge_base()
304
 
305
+ # ---------- Calibration helpers ----------
306
+ def _exif_to_dict(pil_img: Image.Image) -> Dict[str, object]:
307
+ out = {}
308
+ try:
309
+ exif = pil_img.getexif()
310
+ if not exif:
311
+ return out
312
+ for k, v in exif.items():
313
+ tag = TAGS.get(k, k)
314
+ out[tag] = v
315
+ except Exception:
316
+ pass
317
+ return out
318
+
319
+ def _to_float(val) -> Optional[float]:
320
+ try:
321
+ if val is None:
322
+ return None
323
+ if isinstance(val, tuple) and len(val) == 2:
324
+ num, den = float(val[0]), float(val[1]) if float(val[1]) != 0 else 1.0
325
+ return num / den
326
+ return float(val)
327
+ except Exception:
328
+ return None
329
+
330
+ def _estimate_sensor_width_mm(f_mm: Optional[float], f35: Optional[float]) -> Optional[float]:
331
+ if f_mm and f35 and f35 > 0:
332
+ return 36.0 * f_mm / f35
333
+ return None
334
+
335
+ def estimate_px_per_cm_from_exif(pil_img: Image.Image, default_px_per_cm: float = DEFAULT_PX_PER_CM) -> Tuple[float, Dict]:
336
+ meta = {"used": "default", "f_mm": None, "f35": None, "sensor_w_mm": None, "distance_m": None}
337
+ try:
338
+ exif = _exif_to_dict(pil_img)
339
+ f_mm = _to_float(exif.get("FocalLength"))
340
+ f35 = _to_float(exif.get("FocalLengthIn35mmFilm") or exif.get("FocalLengthIn35mm"))
341
+ subj_dist_m = _to_float(exif.get("SubjectDistance"))
342
+ sensor_w_mm = _estimate_sensor_width_mm(f_mm, f35)
343
+ meta.update({"f_mm": f_mm, "f35": f35, "sensor_w_mm": sensor_w_mm, "distance_m": subj_dist_m})
344
+
345
+ if f_mm and sensor_w_mm and subj_dist_m and subj_dist_m > 0:
346
+ w_px = pil_img.width
347
+ field_w_mm = sensor_w_mm * (subj_dist_m * 1000.0) / f_mm
348
+ field_w_cm = field_w_mm / 10.0
349
+ px_per_cm = w_px / max(field_w_cm, 1e-6)
350
+ px_per_cm = float(np.clip(px_per_cm, PX_PER_CM_MIN, PX_PER_CM_MAX))
351
+ meta["used"] = "exif"
352
+ return px_per_cm, meta
353
+ return float(default_px_per_cm), meta
354
+ except Exception:
355
+ return float(default_px_per_cm), meta
356
+
357
+ # ---------- Segmentation helpers ----------
358
+ def _imagenet_norm(arr: np.ndarray) -> np.ndarray:
359
+ mean = np.array([123.675, 116.28, 103.53], dtype=np.float32)
360
+ std = np.array([58.395, 57.12, 57.375], dtype=np.float32)
361
+ return (arr.astype(np.float32) - mean) / std
362
+
363
+ def _preprocess_for_seg(bgr_roi: np.ndarray, target_hw: Tuple[int, int]) -> np.ndarray:
364
+ H, W = target_hw
365
+ resized = cv2.resize(bgr_roi, (W, H), interpolation=cv2.INTER_LINEAR)
366
+ if SEG_EXPECTS_RGB:
367
+ resized = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
368
+ if SEG_NORM.lower() == "imagenet":
369
+ x = _imagenet_norm(resized)
370
+ else:
371
+ x = resized.astype(np.float32) / 255.0
372
+ x = np.expand_dims(x, axis=0) # (1,H,W,3)
373
+ return x
374
+
375
+ def _to_prob(pred: np.ndarray) -> np.ndarray:
376
+ p = np.squeeze(pred)
377
+ pmin, pmax = float(p.min()), float(p.max())
378
+ if pmax > 1.0 or pmin < 0.0:
379
+ p = 1.0 / (1.0 + np.exp(-p))
380
+ return p.astype(np.float32)
381
+
382
+ # ---- Adaptive threshold + GrabCut grow ----
383
+ def _adaptive_prob_threshold(p: np.ndarray) -> float:
384
+ """
385
+ Choose a threshold that avoids tiny blobs while not swallowing skin.
386
+ Try Otsu and the 90th percentile, clamp to [0.25, 0.65], pick by area heuristic.
387
+ """
388
+ p01 = np.clip(p.astype(np.float32), 0, 1)
389
+ p255 = (p01 * 255).astype(np.uint8)
390
+
391
+ ret_otsu, _ = cv2.threshold(p255, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
392
+ thr_otsu = float(np.clip(ret_otsu / 255.0, 0.25, 0.65))
393
+ thr_pctl = float(np.clip(np.percentile(p01, 90), 0.25, 0.65))
394
+
395
+ def area_frac(thr: float) -> float:
396
+ return float((p01 >= thr).sum()) / float(p01.size)
397
+
398
+ af_otsu = area_frac(thr_otsu)
399
+ af_pctl = area_frac(thr_pctl)
400
+
401
+ def score(af: float) -> float:
402
+ target_low, target_high = 0.03, 0.10
403
+ if af < target_low: return abs(af - target_low) * 3.0
404
+ if af > target_high: return abs(af - target_high) * 1.5
405
+ return 0.0
406
+
407
+ return thr_otsu if score(af_otsu) <= score(af_pctl) else thr_pctl
408
+
409
+ def _grabcut_refine(bgr: np.ndarray, seed01: np.ndarray, iters: int = 3) -> np.ndarray:
410
+ """Grow from a confident core into low-contrast margins."""
411
+ h, w = bgr.shape[:2]
412
+ gc = np.full((h, w), cv2.GC_PR_BGD, np.uint8)
413
+ k = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
414
+ seed_dil = cv2.dilate(seed01, k, iterations=1)
415
+ gc[seed01.astype(bool)] = cv2.GC_PR_FGD
416
+ gc[seed_dil.astype(bool)] = cv2.GC_FGD
417
+ gc[0, :], gc[-1, :], gc[:, 0], gc[:, 1] = cv2.GC_BGD, cv2.GC_BGD, cv2.GC_BGD, cv2.GC_BGD
418
+ bgdModel = np.zeros((1, 65), np.float64)
419
+ fgdModel = np.zeros((1, 65), np.float64)
420
+ cv2.grabCut(bgr, gc, None, bgdModel, fgdModel, iters, cv2.GC_INIT_WITH_MASK)
421
+ return np.where((gc == cv2.GC_FGD) | (gc == cv2.GC_PR_FGD), 1, 0).astype(np.uint8)
422
+
423
+ def _fill_holes(mask01: np.ndarray) -> np.ndarray:
424
+ h, w = mask01.shape[:2]
425
+ ff = np.zeros((h + 2, w + 2), np.uint8)
426
+ m = (mask01 * 255).astype(np.uint8).copy()
427
+ cv2.floodFill(m, ff, (0, 0), 255)
428
+ m_inv = cv2.bitwise_not(m)
429
+ out = ((mask01 * 255) | m_inv) // 255
430
+ return out.astype(np.uint8)
431
+
432
+ def _clean_mask(mask01: np.ndarray) -> np.ndarray:
433
+ """Open β†’ Close β†’ Fill holes β†’ Largest component (no dilation)."""
434
+ mask01 = (mask01 > 0).astype(np.uint8)
435
+ k3 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
436
+ k5 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
437
+ mask01 = cv2.morphologyEx(mask01, cv2.MORPH_OPEN, k3, iterations=1)
438
+ mask01 = cv2.morphologyEx(mask01, cv2.MORPH_CLOSE, k5, iterations=1)
439
+ mask01 = _fill_holes(mask01)
440
+ # Keep largest component only
441
+ num, labels, stats, _ = cv2.connectedComponentsWithStats(mask01, 8)
442
+ if num > 1:
443
+ areas = stats[1:, cv2.CC_STAT_AREA]
444
+ if areas.size:
445
+ largest_idx = 1 + int(np.argmax(areas))
446
+ mask01 = (labels == largest_idx).astype(np.uint8)
447
+ return (mask01 > 0).astype(np.uint8)
448
+
449
+ # Global last debug dict (per-process)
450
+ _last_seg_debug: Dict[str, object] = {}
451
+
452
+ def segment_wound(image_bgr: np.ndarray, ts: str, out_dir: str) -> Tuple[np.ndarray, Dict[str, object]]:
453
+ """
454
+ TF model β†’ adaptive threshold on prob β†’ GrabCut grow β†’ cleanup.
455
+ Fallback: KMeans-Lab.
456
+ Returns (mask_uint8_0_255, debug_dict)
457
+ """
458
+ debug = {"used": None, "reason": None, "positive_fraction": 0.0,
459
+ "thr": None, "heatmap_path": None, "roi_seen_by_model": None}
460
+
461
+ seg_model = models_cache.get("seg", None)
462
+
463
+ # --- Model path ---
464
+ if seg_model is not None:
465
+ try:
466
+ ishape = getattr(seg_model, "input_shape", None)
467
+ if not ishape or len(ishape) < 4:
468
+ raise ValueError(f"Bad seg input_shape: {ishape}")
469
+ th, tw = int(ishape[1]), int(ishape[2])
470
+
471
+ x = _preprocess_for_seg(image_bgr, (th, tw))
472
+ roi_seen_path = None
473
+ if SMARTHEAL_DEBUG:
474
+ roi_seen_path = os.path.join(out_dir, f"roi_for_seg_{ts}.png")
475
+ cv2.imwrite(roi_seen_path, image_bgr)
476
+
477
+ pred = seg_model.predict(x, verbose=0)
478
+ if isinstance(pred, (list, tuple)): pred = pred[0]
479
+ p = _to_prob(pred)
480
+ p = cv2.resize(p, (image_bgr.shape[1], image_bgr.shape[0]), interpolation=cv2.INTER_LINEAR)
481
+
482
+ heatmap_path = None
483
+ if SMARTHEAL_DEBUG:
484
+ hm = (np.clip(p, 0, 1) * 255).astype(np.uint8)
485
+ heat = cv2.applyColorMap(hm, cv2.COLORMAP_JET)
486
+ heatmap_path = os.path.join(out_dir, f"seg_pred_heatmap_{ts}.png")
487
+ cv2.imwrite(heatmap_path, heat)
488
+
489
+ thr = _adaptive_prob_threshold(p)
490
+ core01 = (p >= thr).astype(np.uint8)
491
+ core_frac = float(core01.sum()) / float(core01.size)
492
+
493
+ if core_frac < 0.005:
494
+ thr2 = max(thr - 0.10, 0.15)
495
+ core01 = (p >= thr2).astype(np.uint8)
496
+ thr = thr2
497
+ core_frac = float(core01.sum()) / float(core01.size)
498
+
499
+ if core01.any():
500
+ gc01 = _grabcut_refine(image_bgr, core01, iters=3)
501
+ mask01 = _clean_mask(gc01)
502
+ else:
503
+ mask01 = np.zeros(core01.shape, np.uint8)
504
+
505
+ pos_frac = float(mask01.sum()) / float(mask01.size)
506
+ logging.info(f"SegModel USED | thr={float(thr):.2f} core_frac={core_frac:.4f} final_frac={pos_frac:.4f}")
507
+
508
+ debug.update({
509
+ "used": "tf_model",
510
+ "reason": "ok",
511
+ "positive_fraction": pos_frac,
512
+ "thr": float(thr),
513
+ "heatmap_path": heatmap_path,
514
+ "roi_seen_by_model": roi_seen_path
515
+ })
516
+ return (mask01 * 255).astype(np.uint8), debug
517
+
518
+ except Exception as e:
519
+ logging.warning(f"⚠️ Segmentation model failed β†’ fallback. Reason: {e}")
520
+ debug.update({"used": "fallback_kmeans", "reason": f"model_failed: {e}"})
521
+
522
+ # --- Fallback: KMeans in Lab (reddest cluster as wound) ---
523
+ Z = image_bgr.reshape((-1, 3)).astype(np.float32)
524
+ criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
525
+ _, labels, centers = cv2.kmeans(Z, 2, None, criteria, 5, cv2.KMEANS_PP_CENTERS)
526
+ centers_u8 = centers.astype(np.uint8).reshape(1, 2, 3)
527
+ centers_lab = cv2.cvtColor(centers_u8, cv2.COLOR_BGR2LAB)[0]
528
+ wound_idx = int(np.argmax(centers_lab[:, 1])) # maximize a* (red)
529
+ mask01 = (labels.reshape(image_bgr.shape[:2]) == wound_idx).astype(np.uint8)
530
+ mask01 = _clean_mask(mask01)
531
+
532
+ pos_frac = float(mask01.sum()) / float(mask01.size)
533
+ logging.info(f"KMeans USED | final_frac={pos_frac:.4f}")
534
+
535
+ debug.update({
536
+ "used": "fallback_kmeans",
537
+ "reason": debug.get("reason") or "no_model",
538
+ "positive_fraction": pos_frac,
539
+ "thr": None
540
+ })
541
+ return (mask01 * 255).astype(np.uint8), debug
542
+
543
+ # ---------- Measurement + overlay helpers ----------
544
+ def largest_component_mask(binary01: np.ndarray, min_area_px: int = 50) -> np.ndarray:
545
+ num, labels, stats, _ = cv2.connectedComponentsWithStats(binary01.astype(np.uint8), connectivity=8)
546
+ if num <= 1:
547
+ return binary01.astype(np.uint8)
548
+ areas = stats[1:, cv2.CC_STAT_AREA]
549
+ if areas.size == 0 or areas.max() < min_area_px:
550
+ return binary01.astype(np.uint8)
551
+ largest_idx = 1 + int(np.argmax(areas))
552
+ return (labels == largest_idx).astype(np.uint8)
553
+
554
+ def measure_min_area_rect(mask01: np.ndarray, px_per_cm: float) -> Tuple[float, float, Tuple]:
555
+ contours, _ = cv2.findContours(mask01.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
556
+ if not contours:
557
+ return 0.0, 0.0, (None, None)
558
+ cnt = max(contours, key=cv2.contourArea)
559
+ rect = cv2.minAreaRect(cnt)
560
+ (w_px, h_px) = rect[1]
561
+ length_px, breadth_px = (max(w_px, h_px), min(w_px, h_px))
562
+ length_cm = round(length_px / max(px_per_cm, 1e-6), 2)
563
+ breadth_cm = round(breadth_px / max(px_per_cm, 1e-6), 2)
564
+ box = cv2.boxPoints(rect).astype(int)
565
+ return length_cm, breadth_cm, (box, rect[0])
566
+
567
+ def area_cm2_from_contour(mask01: np.ndarray, px_per_cm: float) -> Tuple[float, Optional[np.ndarray]]:
568
+ """Area from largest polygon (sub-pixel); returns (area_cm2, contour)."""
569
+ m = (mask01 > 0).astype(np.uint8)
570
+ contours, _ = cv2.findContours(m, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
571
+ if not contours:
572
+ return 0.0, None
573
+ cnt = max(contours, key=cv2.contourArea)
574
+ poly_area_px2 = float(cv2.contourArea(cnt))
575
+ area_cm2 = round(poly_area_px2 / (max(px_per_cm, 1e-6) ** 2), 2)
576
+ return area_cm2, cnt
577
+
578
+ def clamp_area_with_minrect(cnt: np.ndarray, px_per_cm: float, area_cm2_poly: float) -> float:
579
+ rect = cv2.minAreaRect(cnt)
580
+ (w_px, h_px) = rect[1]
581
+ rect_area_px2 = float(max(w_px, 0.0) * max(h_px, 0.0))
582
+ rect_area_cm2 = rect_area_px2 / (max(px_per_cm, 1e-6) ** 2)
583
+ return round(min(area_cm2_poly, rect_area_cm2 * 1.05), 2)
584
+
585
+ def draw_measurement_overlay(
586
+ base_bgr: np.ndarray,
587
+ mask01: np.ndarray,
588
+ rect_box: np.ndarray,
589
+ length_cm: float,
590
+ breadth_cm: float,
591
+ thickness: int = 2
592
+ ) -> np.ndarray:
593
+ """
594
+ 1) Strong red mask overlay + white contour
595
+ 2) Min-area rectangle
596
+ 3) Double-headed arrows labeled Length/Width
597
+ """
598
+ overlay = base_bgr.copy()
599
+
600
+ # Mask tint
601
+ mask255 = (mask01 * 255).astype(np.uint8)
602
+ mask3 = cv2.merge([mask255, mask255, mask255])
603
+ red = np.zeros_like(overlay); red[:] = (0, 0, 255)
604
+ alpha = 0.55
605
+ tinted = cv2.addWeighted(overlay, 1 - alpha, red, alpha, 0)
606
+ overlay = np.where(mask3 > 0, tinted, overlay)
607
+
608
+ # Contour
609
+ cnts, _ = cv2.findContours(mask255, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
610
+ if cnts:
611
+ cv2.drawContours(overlay, cnts, -1, (255, 255, 255), 2)
612
+
613
+ if rect_box is not None:
614
+ cv2.polylines(overlay, [rect_box], True, (255, 255, 255), thickness)
615
+ pts = rect_box.reshape(-1, 2)
616
+
617
+ def midpoint(a, b): return (int((a[0] + b[0]) / 2), int((a[1] + b[1]) / 2))
618
+ e = [np.linalg.norm(pts[i] - pts[(i + 1) % 4]) for i in range(4)]
619
+ long_edge_idx = int(np.argmax(e))
620
+ mids = [midpoint(pts[i], pts[(i + 1) % 4]) for i in range(4)]
621
+ long_pair = (long_edge_idx, (long_edge_idx + 2) % 4)
622
+ short_pair = ((long_edge_idx + 1) % 4, (long_edge_idx + 3) % 4)
623
+
624
+ def draw_double_arrow(img, p1, p2):
625
+ cv2.arrowedLine(img, p1, p2, (0, 0, 0), thickness + 2, tipLength=0.05)
626
+ cv2.arrowedLine(img, p2, p1, (0, 0, 0), thickness + 2, tipLength=0.05)
627
+ cv2.arrowedLine(img, p1, p2, (255, 255, 255), thickness, tipLength=0.05)
628
+ cv2.arrowedLine(img, p2, p1, (255, 255, 255), thickness, tipLength=0.05)
629
+
630
+ def put_label(text, anchor):
631
+ org = (anchor[0] + 6, anchor[1] - 6)
632
+ cv2.putText(overlay, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 4, cv2.LINE_AA)
633
+ cv2.putText(overlay, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2, cv2.LINE_AA)
634
+
635
+ draw_double_arrow(overlay, mids[long_pair[0]], mids[long_pair[1]])
636
+ draw_double_arrow(overlay, mids[short_pair[0]], mids[short_pair[1]])
637
+ put_label(f"Length: {length_cm:.2f} cm", mids[long_pair[0]])
638
+ put_label(f"Width: {breadth_cm:.2f} cm", mids[short_pair[0]])
639
+
640
+ return overlay
641
+
642
+ # ---------- AI PROCESSOR ----------
643
  class AIProcessor:
644
  def __init__(self):
645
  self.models_cache = models_cache
646
  self.knowledge_base_cache = knowledge_base_cache
 
647
  self.uploads_dir = UPLOADS_DIR
648
  self.dataset_id = DATASET_ID
649
  self.hf_token = HF_TOKEN
650
 
 
651
  def _ensure_analysis_dir(self) -> str:
652
  out_dir = os.path.join(self.uploads_dir, "analysis")
653
  os.makedirs(out_dir, exist_ok=True)
654
  return out_dir
655
 
656
  def perform_visual_analysis(self, image_pil: Image.Image) -> Dict:
657
+ """
658
+ YOLO detect β†’ crop ROI β†’ segment_wound(ROI) β†’ clean mask β†’
659
+ minAreaRect measurement (cm) using EXIF px/cm β†’ save outputs.
660
+ """
661
  try:
662
+ px_per_cm, exif_meta = estimate_px_per_cm_from_exif(image_pil, DEFAULT_PX_PER_CM)
663
+ # Guardrails for calibration to avoid huge area blow-ups
664
+ px_per_cm = float(np.clip(px_per_cm, 20.0, 350.0))
665
+ if (exif_meta or {}).get("used") != "exif":
666
+ logging.warning(f"Calibration fallback used: px_per_cm={px_per_cm:.2f} (default). Prefer ruler/Aruco for accuracy.")
667
+
668
  image_cv = cv2.cvtColor(np.array(image_pil.convert("RGB")), cv2.COLOR_RGB2BGR)
669
 
670
+ # --- Detection ---
671
+ det_model = self.models_cache.get("det")
672
+ if det_model is None:
673
  raise RuntimeError("YOLO model not loaded")
674
+ # Force CPU inference and avoid CUDA touch
675
+ results = det_model.predict(image_cv, verbose=False, device="cpu")
676
+ if (not results) or (not getattr(results[0], "boxes", None)) or (len(results[0].boxes) == 0):
677
+ try:
678
+ import gradio as gr
679
+ raise gr.Error("No wound could be detected.")
680
+ except Exception:
681
+ raise RuntimeError("No wound could be detected.")
682
 
683
  box = results[0].boxes[0].xyxy[0].cpu().numpy().astype(int)
684
  x1, y1, x2, y2 = [int(v) for v in box]
685
  x1, y1 = max(0, x1), max(0, y1)
686
  x2, y2 = min(image_cv.shape[1], x2), min(image_cv.shape[0], y2)
687
+ roi = image_cv[y1:y2, x1:x2].copy()
688
+ if roi.size == 0:
 
 
 
 
 
689
  try:
690
+ import gradio as gr
691
+ raise gr.Error("Detected ROI is empty.")
692
+ except Exception:
693
+ raise RuntimeError("Detected ROI is empty.")
694
+
695
+ out_dir = self._ensure_analysis_dir()
696
+ ts = datetime.now().strftime("%Y%m%d_%H%M%S")
697
+
698
+ # --- Segmentation (model-first + KMeans fallback) ---
699
+ mask_u8_255, seg_debug = segment_wound(roi, ts, out_dir)
700
+ mask01 = (mask_u8_255 > 127).astype(np.uint8)
701
+
702
+ if mask01.any():
703
+ mask01 = _clean_mask(mask01)
704
+ logging.debug(f"Mask postproc: px_after={int(mask01.sum())}")
705
+
706
+ # --- Measurement (accurate & conservative) ---
707
+ if mask01.any():
708
+ length_cm, breadth_cm, (box_pts, _) = measure_min_area_rect(mask01, px_per_cm)
709
+ area_poly_cm2, largest_cnt = area_cm2_from_contour(mask01, px_per_cm)
710
+ if largest_cnt is not None:
711
+ surface_area_cm2 = clamp_area_with_minrect(largest_cnt, px_per_cm, area_poly_cm2)
712
+ else:
713
+ surface_area_cm2 = area_poly_cm2
714
+
715
+ anno_roi = draw_measurement_overlay(roi, mask01, box_pts, length_cm, breadth_cm)
716
+ segmentation_empty = False
717
+ else:
718
+ # Fallback if seg failed: use ROI dimensions
719
+ h_px = max(0, y2 - y1); w_px = max(0, x2 - x1)
720
+ length_cm = round(max(h_px, w_px) / px_per_cm, 2)
721
+ breadth_cm = round(min(h_px, w_px) / px_per_cm, 2)
722
+ surface_area_cm2 = round((h_px * w_px) / (px_per_cm ** 2), 2)
723
+ anno_roi = roi.copy()
724
+ cv2.rectangle(anno_roi, (2, 2), (anno_roi.shape[1]-3, anno_roi.shape[0]-3), (0, 0, 255), 3)
725
+ cv2.line(anno_roi, (0, 0), (anno_roi.shape[1]-1, anno_roi.shape[0]-1), (0, 0, 255), 2)
726
+ cv2.line(anno_roi, (anno_roi.shape[1]-1, 0), (0, anno_roi.shape[0]-1), (0, 0, 255), 2)
727
+ box_pts = None
728
+ segmentation_empty = True
729
+
730
+ # --- Save visualizations ---
731
+ original_path = os.path.join(out_dir, f"original_{ts}.png")
732
+ cv2.imwrite(original_path, image_cv)
733
+
734
+ det_vis = image_cv.copy()
735
+ cv2.rectangle(det_vis, (x1, y1), (x2, y2), (0, 255, 0), 2)
736
+ detection_path = os.path.join(out_dir, f"detection_{ts}.png")
737
+ cv2.imwrite(detection_path, det_vis)
738
+
739
+ roi_mask_path = os.path.join(out_dir, f"roi_mask_{ts}.png")
740
+ cv2.imwrite(roi_mask_path, (mask01 * 255).astype(np.uint8))
741
+
742
+ # ROI overlay (mask tint + contour, without arrows)
743
+ mask255 = (mask01 * 255).astype(np.uint8)
744
+ mask3 = cv2.merge([mask255, mask255, mask255])
745
+ red = np.zeros_like(roi); red[:] = (0, 0, 255)
746
+ alpha = 0.55
747
+ tinted = cv2.addWeighted(roi, 1 - alpha, red, alpha, 0)
748
+ if mask255.any():
749
+ roi_overlay = np.where(mask3 > 0, tinted, roi)
750
+ cnts, _ = cv2.findContours(mask255, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
751
+ cv2.drawContours(roi_overlay, cnts, -1, (255, 255, 255), 2)
752
+ else:
753
+ roi_overlay = anno_roi
754
+
755
+ seg_full = image_cv.copy()
756
+ seg_full[y1:y2, x1:x2] = roi_overlay
757
+ segmentation_path = os.path.join(out_dir, f"segmentation_{ts}.png")
758
+ cv2.imwrite(segmentation_path, seg_full)
759
+
760
+ segmentation_roi_path = os.path.join(out_dir, f"segmentation_roi_{ts}.png")
761
+ cv2.imwrite(segmentation_roi_path, roi_overlay)
762
+
763
+ # Annotated (mask + arrows + labels) in full-frame
764
+ anno_full = image_cv.copy()
765
+ anno_full[y1:y2, x1:x2] = anno_roi
766
+ annotated_seg_path = os.path.join(out_dir, f"segmentation_annotated_{ts}.png")
767
+ cv2.imwrite(annotated_seg_path, anno_full)
768
 
769
+ # --- Optional classification ---
770
  wound_type = "Unknown"
771
  cls_pipe = self.models_cache.get("cls")
772
  if cls_pipe is not None:
773
  try:
774
+ preds = cls_pipe(Image.fromarray(cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)))
 
775
  if preds:
776
  wound_type = max(preds, key=lambda x: x.get("score", 0)).get("label", "Unknown")
777
  except Exception as e:
778
+ logging.warning(f"Classification failed: {e}")
779
+
780
+ # Log end-of-seg summary
781
+ seg_summary = {
782
+ "seg_used": seg_debug.get("used"),
783
+ "seg_reason": seg_debug.get("reason"),
784
+ "positive_fraction": round(float(seg_debug.get("positive_fraction", 0.0)), 6),
785
+ "threshold": seg_debug.get("thr"),
786
+ "segmentation_empty": segmentation_empty,
787
+ "exif_px_per_cm": round(px_per_cm, 3),
788
+ }
789
+ _log_kv("SEG_SUMMARY", seg_summary)
790
 
791
  return {
792
  "wound_type": wound_type,
793
+ "length_cm": length_cm,
794
+ "breadth_cm": breadth_cm,
795
+ "surface_area_cm2": surface_area_cm2,
796
+ "px_per_cm": round(px_per_cm, 2),
797
+ "calibration_meta": exif_meta,
798
  "detection_confidence": float(results[0].boxes.conf[0].cpu().item())
799
+ if getattr(results[0].boxes, "conf", None) is not None else 0.0,
800
+ "detection_image_path": detection_path,
801
+ "segmentation_image_path": annotated_seg_path,
802
+ "segmentation_annotated_path": annotated_seg_path,
803
+ "segmentation_roi_path": segmentation_roi_path,
804
+ "roi_mask_path": roi_mask_path,
805
+ "segmentation_empty": segmentation_empty,
806
+ "segmentation_debug": seg_debug,
807
  "original_image_path": original_path,
808
  }
809
  except Exception as e:
810
+ logging.error(f"Visual analysis failed: {e}", exc_info=True)
811
  raise
812
 
813
+ # ---------- Knowledge base + reporting ----------
814
  def query_guidelines(self, query: str) -> str:
 
815
  try:
816
  vs = self.knowledge_base_cache.get("vector_store")
817
  if not vs:
818
  return "Knowledge base is not available."
819
+ retriever = vs.as_retriever(search_kwargs={"k": 5})
820
+ # Modern API (avoid get_relevant_documents deprecation)
821
+ docs = retriever.invoke(query)
 
 
 
 
 
822
  lines: List[str] = []
823
  for d in docs:
824
  src = (d.metadata or {}).get("source", "N/A")
 
829
  logging.warning(f"Guidelines query failed: {e}")
830
  return f"Guidelines query failed: {str(e)}"
831
 
 
832
  def _generate_fallback_report(self, patient_info: str, visual_results: Dict, guideline_context: str) -> str:
 
833
  return f"""# 🩺 SmartHeal AI - Comprehensive Wound Analysis Report
834
  ## πŸ“‹ Patient Information
835
  {patient_info}
 
838
  - **Dimensions**: {visual_results.get('length_cm', 0)} cm Γ— {visual_results.get('breadth_cm', 0)} cm
839
  - **Surface Area**: {visual_results.get('surface_area_cm2', 0)} cmΒ²
840
  - **Detection Confidence**: {visual_results.get('detection_confidence', 0):.1%}
841
+ - **Calibration**: {visual_results.get('px_per_cm','?')} px/cm ({(visual_results.get('calibration_meta') or {}).get('used','default')})
842
  ## πŸ“Š Analysis Images
843
  - **Original**: {visual_results.get('original_image_path', 'N/A')}
844
  - **Detection**: {visual_results.get('detection_image_path', 'N/A')}
845
  - **Segmentation**: {visual_results.get('segmentation_image_path', 'N/A')}
846
+ - **Annotated**: {visual_results.get('segmentation_annotated_path', 'N/A')}
847
  ## 🎯 Clinical Summary
848
  Automated analysis provides quantitative measurements; verify via clinical examination.
849
  ## πŸ’Š Recommendations
 
851
  - Debride necrotic tissue if indicated (clinical decision)
852
  - Document with serial photos and measurements
853
  ## πŸ“… Monitoring
854
+ - Daily in week 1, then every 2–3 days (or as indicated)
855
  - Weekly progress review
856
  ## πŸ“š Guideline Context
857
+ {(guideline_context or '')[:800]}{"..." if guideline_context and len(guideline_context) > 800 else ''}
858
  **Disclaimer:** Automated, for decision support only. Verify clinically.
859
  """
860
 
 
866
  image_pil: Image.Image,
867
  max_new_tokens: Optional[int] = None,
868
  ) -> str:
 
869
  try:
870
+ report = generate_medgemma_report(
871
  patient_info, visual_results, guideline_context, image_pil, max_new_tokens
872
  )
873
  if report and report.strip() and not report.startswith(("⚠️", "❌")):
874
  return report
875
+ logging.warning("VLM unavailable/invalid; using fallback.")
876
  return self._generate_fallback_report(patient_info, visual_results, guideline_context)
877
  except Exception as e:
878
  logging.error(f"Report generation failed: {e}")
879
  return self._generate_fallback_report(patient_info, visual_results, guideline_context)
880
 
 
881
  def save_and_commit_image(self, image_pil: Image.Image) -> str:
 
882
  try:
883
  os.makedirs(self.uploads_dir, exist_ok=True)
884
  ts = datetime.now().strftime("%Y%m%d_%H%M%S")
 
887
  image_pil.convert("RGB").save(path)
888
  logging.info(f"βœ… Image saved locally: {path}")
889
 
890
+ if HF_TOKEN and DATASET_ID:
891
  try:
892
  HfApi, HfFolder = _import_hf_hub()
893
+ HfFolder.save_token(HF_TOKEN)
894
  api = HfApi()
895
  api.upload_file(
896
  path_or_fileobj=path,
897
  path_in_repo=f"images/{filename}",
898
+ repo_id=DATASET_ID,
899
  repo_type="dataset",
900
+ token=HF_TOKEN,
901
  commit_message=f"Upload wound image: {filename}",
902
  )
903
  logging.info("βœ… Image committed to HF dataset")
 
909
  logging.error(f"Failed to save/commit image: {e}")
910
  return ""
911
 
 
912
  def full_analysis_pipeline(self, image_pil: Image.Image, questionnaire_data: Dict) -> Dict:
 
913
  try:
914
  saved_path = self.save_and_commit_image(image_pil)
 
915
  visual_results = self.perform_visual_analysis(image_pil)
916
 
 
917
  pi = questionnaire_data or {}
918
  patient_info = (
919
+ f"Age: {pi.get('age','N/A')}, "
920
+ f"Diabetic: {pi.get('diabetic','N/A')}, "
921
+ f"Allergies: {pi.get('allergies','N/A')}, "
922
+ f"Date of Wound: {pi.get('date_of_injury','N/A')}, "
923
+ f"Professional Care: {pi.get('professional_care','N/A')}, "
924
+ f"Oozing/Bleeding: {pi.get('oozing_bleeding','N/A')}, "
925
+ f"Infection: {pi.get('infection','N/A')}, "
926
+ f"Moisture: {pi.get('moisture','N/A')}"
927
  )
928
 
 
929
  query = (
930
  f"best practices for managing a {visual_results.get('wound_type','Unknown')} "
931
  f"with moisture '{pi.get('moisture','unknown')}' and infection '{pi.get('infection','unknown')}' "
 
933
  )
934
  guideline_context = self.query_guidelines(query)
935
 
936
+ report = self.generate_final_report(patient_info, visual_results, guideline_context, image_pil)
 
 
 
 
937
 
938
  return {
939
  "success": True,
940
  "visual_analysis": visual_results,
941
  "report": report,
942
  "saved_image_path": saved_path,
943
+ "guideline_context": (guideline_context or "")[:500] + (
944
+ "..." if guideline_context and len(guideline_context) > 500 else ""
945
+ ),
946
  }
947
  except Exception as e:
948
  logging.error(f"Pipeline error: {e}")
 
956
  }
957
 
958
  def analyze_wound(self, image, questionnaire_data: Dict) -> Dict:
 
959
  try:
960
  if isinstance(image, str):
961
  if not os.path.exists(image):
 
978
  "report": f"Analysis initialization failed: {str(e)}",
979
  "saved_image_path": None,
980
  "guideline_context": "",
981
+ }