SmartHeal commited on
Commit
d323f56
Β·
verified Β·
1 Parent(s): b99f47a

Update src/ai_processor.py

Browse files
Files changed (1) hide show
  1. src/ai_processor.py +621 -185
src/ai_processor.py CHANGED
@@ -1,53 +1,37 @@
1
- """
2
- SmartHeal AI Processor - Zero GPU Compatible Version
3
- Designed specifically for Hugging Face Spaces with Zero GPU architecture
4
- """
5
 
6
  import os
7
  import logging
 
 
 
 
 
 
 
 
8
  import cv2
9
  import numpy as np
10
  from PIL import Image
11
- import json
12
- from datetime import datetime
13
- from typing import Optional, Dict, List, Tuple, Any
14
- from contextlib import contextmanager
15
 
16
- # Configure logging
17
  logging.basicConfig(
18
- level=logging.INFO,
19
- format="%(asctime)s - %(levelname)s - %(message)s"
20
  )
21
 
22
- # Environment setup for Zero GPU compatibility
23
- os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
24
- os.environ.setdefault("CUDA_VISIBLE_DEVICES", "-1") # Hide GPU from main process
25
-
26
- # Import spaces decorator
27
- try:
28
- import spaces
29
- _SPACES_GPU = spaces.GPU
30
- except ImportError:
31
- logging.warning("spaces package not available - running in CPU mode")
32
- # Create dummy decorator for local testing
33
- def _SPACES_GPU_dummy(*args, **kwargs):
34
- def decorator(func):
35
- return func
36
- return decorator
37
- _SPACES_GPU = _SPACES_GPU_dummy
38
 
39
- @contextmanager
40
- def _no_cuda_env():
41
- """Context manager to prevent CUDA initialization in main process"""
42
- prev_cuda = os.environ.get("CUDA_VISIBLE_DEVICES")
43
- os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
44
- try:
45
- yield
46
- finally:
47
- if prev_cuda is None:
48
- os.environ.pop("CUDA_VISIBLE_DEVICES", None)
49
- else:
50
- os.environ["CUDA_VISIBLE_DEVICES"] = prev_cuda
51
 
52
  # ---- Paths / constants ----
53
  UPLOADS_DIR = "uploads"
@@ -61,9 +45,33 @@ DATASET_ID = "SmartHeal/wound-image-uploads"
61
  DEFAULT_PX_PER_CM = 38.0
62
  PX_PER_CM_MIN, PX_PER_CM_MAX = 5.0, 1200.0
63
 
 
 
 
 
 
64
  models_cache: Dict[str, object] = {}
65
  knowledge_base_cache: Dict[str, object] = {}
66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  # ---------- Lazy imports (wrapped where needed) ----------
68
  def _import_ultralytics():
69
  # Prevent Ultralytics from probing CUDA on import
@@ -72,40 +80,33 @@ def _import_ultralytics():
72
  return YOLO
73
 
74
  def _import_tf_loader():
75
- # Ensure TensorFlow does not try to use GPU in main process
76
- with _no_cuda_env():
77
- import tensorflow as tf
78
- tf.config.set_visible_devices([], "GPU")
79
- from tensorflow.keras.models import load_model
80
  return load_model
81
 
82
  def _import_hf_cls():
83
- with _no_cuda_env():
84
- from transformers import pipeline
85
  return pipeline
86
 
87
  def _import_embeddings():
88
- with _no_cuda_env():
89
- from langchain_community.embeddings import HuggingFaceEmbeddings
90
  return HuggingFaceEmbeddings
91
 
92
  def _import_langchain_pdf():
93
- with _no_cuda_env():
94
- from langchain_community.document_loaders import PyPDFLoader
95
  return PyPDFLoader
96
 
97
  def _import_langchain_faiss():
98
- with _no_cuda_env():
99
- from langchain_community.vectorstores import FAISS
100
  return FAISS
101
 
102
  def _import_hf_hub():
103
- with _no_cuda_env():
104
- from huggingface_hub import HfApi, HfFolder
105
  return HfApi, HfFolder
106
 
107
  # ---------- SmartHeal prompts (system + user prefix) ----------
108
- SMARTHEAL_SYSTEM_PROMPT = """
109
  You are SmartHeal Clinical Assistant, a wound-care decision-support system.
110
  You analyze wound photographs and brief patient context to produce careful,
111
  specific, guideline-informed recommendations WITHOUT diagnosing. You always:
@@ -118,7 +119,7 @@ specific, guideline-informed recommendations WITHOUT diagnosing. You always:
118
  - Safety: remind the user to seek clinician review for changes or red flags.
119
  """
120
 
121
- SMARTHEAL_USER_PREFIX = """
122
  Patient: {patient_info}
123
  Visual findings: type={wound_type}, size={length_cm}x{breadth_cm} cm, area={area_cm2} cm^2,
124
  detection_conf={det_conf:.2f}, calibration={px_per_cm} px/cm.
@@ -212,10 +213,8 @@ def load_yolo_model():
212
  model = YOLO(YOLO_MODEL_PATH)
213
  return model
214
  def load_segmentation_model():
 
215
  load_model = _import_tf_loader()
216
- # Need to import tf.keras.layers within the no_cuda_env for custom_objects
217
- with _no_cuda_env():
218
- import tensorflow as tf
219
  return load_model(SEG_MODEL_PATH, compile=False, custom_objects={'InputLayer': tf.keras.layers.InputLayer})
220
 
221
  def load_classification_pipeline():
@@ -367,7 +366,12 @@ def _imagenet_norm(arr: np.ndarray) -> np.ndarray:
367
  def _preprocess_for_seg(bgr_roi: np.ndarray, target_hw: Tuple[int, int]) -> np.ndarray:
368
  H, W = target_hw
369
  resized = cv2.resize(bgr_roi, (W, H), interpolation=cv2.INTER_LINEAR)
370
- x = resized.astype(np.float32) / 255.0
 
 
 
 
 
371
  x = np.expand_dims(x, axis=0) # (1,H,W,3)
372
  return x
373
 
@@ -406,9 +410,7 @@ def _adaptive_prob_threshold(p: np.ndarray) -> float:
406
  return thr_otsu if score(af_otsu) <= score(af_pctl) else thr_pctl
407
 
408
  def _grabcut_refine(bgr: np.ndarray, seed01: np.ndarray, iters: int = 3) -> np.ndarray:
409
- """
410
- Grow from a confident core into low-contrast margins.
411
- """
412
  h, w = bgr.shape[:2]
413
  gc = np.full((h, w), cv2.GC_PR_BGD, np.uint8)
414
  k = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
@@ -421,137 +423,571 @@ def _grabcut_refine(bgr: np.ndarray, seed01: np.ndarray, iters: int = 3) -> np.n
421
  cv2.grabCut(bgr, gc, None, bgdModel, fgdModel, iters, cv2.GC_INIT_WITH_MASK)
422
  return np.where((gc == cv2.GC_FGD) | (gc == cv2.GC_PR_FGD), 1, 0).astype(np.uint8)
423
 
424
- # ---------- Main AIProcessor Class ----------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
425
  class AIProcessor:
426
  def __init__(self):
427
- self.config = type("Config", (object,), {
428
- "HF_TOKEN": HF_TOKEN,
429
- "YOLO_MODEL_PATH": YOLO_MODEL_PATH,
430
- "SEG_MODEL_PATH": SEG_MODEL_PATH,
431
- "DATASET_ID": DATASET_ID,
432
- "UPLOADS_DIR": UPLOADS_DIR,
433
- "GUIDELINE_PDFS": GUIDELINE_PDFS
434
- })()
435
  self.models_cache = models_cache
436
  self.knowledge_base_cache = knowledge_base_cache
437
- self.px_per_cm = DEFAULT_PX_PER_CM # Use default from constants
438
-
439
- # Ensure CPU models and KB are initialized
440
- initialize_cpu_models()
441
- setup_knowledge_base()
442
-
443
- def perform_visual_analysis(self, image_pil: Image.Image) -> Dict[str, Any]:
444
- image_cv = cv2.cvtColor(np.array(image_pil), cv2.COLOR_RGB2BGR)
445
-
446
- if "det" not in self.models_cache or not self.models_cache["det"]:
447
- raise ValueError("YOLO model not initialized.")
448
-
449
- results = self.models_cache["det"].predict(image_cv, verbose=False, device="cpu")
450
- if not results or not results[0].boxes:
451
- raise ValueError("No wound detected.")
452
-
453
- box = results[0].boxes[0].xyxy[0].cpu().numpy().astype(int)
454
- region_cv = image_cv[box[1]:box[3], box[0]:box[2]]
455
- detection_confidence = float(results[0].boxes[0].conf[0].cpu().numpy())
456
-
457
- length = breadth = area = 0
458
- if "seg" in self.models_cache and self.models_cache["seg"]:
459
- try:
460
- seg_model = self.models_cache["seg"]
461
- input_size = seg_model.input_shape[1:3]
462
- preprocessed_roi = _preprocess_for_seg(region_cv, input_size)
463
- mask_pred = seg_model.predict(preprocessed_roi, verbose=0)[0]
464
- prob_mask = _to_prob(mask_pred)
465
-
466
- # Adaptive thresholding and GrabCut refinement
467
- initial_mask = (prob_mask >= _adaptive_prob_threshold(prob_mask)).astype(np.uint8)
468
- refined_mask = _grabcut_refine(region_cv, initial_mask)
469
-
470
- contours, _ = cv2.findContours(refined_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
471
- if contours:
472
- cnt = max(contours, key=cv2.contourArea)
473
- x, y, w, h = cv2.boundingRect(cnt)
474
- length = round(h / self.px_per_cm, 2)
475
- breadth = round(w / self.px_per_cm, 2)
476
- area = round(cv2.contourArea(cnt) / (self.px_per_cm ** 2), 2)
477
- except Exception as e:
478
- logging.warning(f"Segmentation process failed: {e}")
479
-
480
- wound_type = "Unknown"
481
- if "cls" in self.models_cache and self.models_cache["cls"]:
482
- try:
483
- wound_region_pil = Image.fromarray(cv2.cvtColor(region_cv, cv2.COLOR_BGR2RGB))
484
- classification_results = self.models_cache["cls"](wound_region_pil)
485
- wound_type = max(classification_results, key=lambda x: x["score"])["label"]
486
- except Exception as e:
487
- logging.warning(f"Classification process failed: {e}")
488
-
489
- return {
490
- "wound_type": wound_type,
491
- "length_cm": length,
492
- "breadth_cm": breadth,
493
- "surface_area_cm2": area,
494
- "detection_confidence": detection_confidence,
495
- "px_per_cm": self.px_per_cm
496
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
497
 
 
498
  def query_guidelines(self, query: str) -> str:
499
- vector_store = self.knowledge_base_cache.get("vector_store")
500
- if not vector_store:
501
- return "Knowledge base unavailable."
502
-
503
- retriever = vector_store.as_retriever(search_kwargs={"k": 10})
504
- docs = retriever.invoke(query)
505
- return "\n\n".join([
506
- f"Source: {doc.metadata.get('source', 'N/A')}, Page: {doc.metadata.get('page', 'N/A')}\nContent: {doc.page_content}"
507
- for doc in docs
508
- ])
509
-
510
- def generate_final_report(self, patient_info, visual_results, guideline_context, image_pil, max_new_tokens=2048):
511
- return generate_medgemma_report(patient_info, visual_results, guideline_context, image_pil, max_new_tokens)
512
-
513
- def save_and_commit_image(self, image_pil):
514
- filename = f"{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.png"
515
- local_path = os.path.join(self.config.UPLOADS_DIR, filename)
516
- image_pil.convert("RGB").save(local_path)
517
- logging.info(f"Image saved locally: {local_path}")
518
-
519
- if self.config.HF_TOKEN and self.config.DATASET_ID:
520
- try:
521
- api = _import_hf_hub()[0]() # HfApi
522
- api.upload_file(
523
- path_or_fileobj=local_path,
524
- path_in_repo=f"images/{filename}",
525
- repo_id=self.config.DATASET_ID,
526
- repo_type="dataset",
527
- commit_message=f"Upload wound image: {filename}",
528
- token=self.config.HF_TOKEN
529
- )
530
- logging.info("βœ… Image uploaded to HF dataset.")
531
- except Exception as e:
532
- logging.warning(f"Upload failed: {e}")
533
-
534
- @_SPACES_GPU(enable_queue=True, duration=120)
535
- def full_analysis_pipeline(self, image, questionnaire_data):
536
  try:
537
- self.save_and_commit_image(image)
538
- visual = self.perform_visual_analysis(image)
539
- patient_info = ", ".join([f"{k}: {v}" for k, v in questionnaire_data.items()])
540
- query = f"best practices for managing a {visual['wound_type']} with moisture level '{questionnaire_data.get('moisture')}' and signs of infection '{questionnaire_data.get('infection')}' in a patient who is diabetic '{questionnaire_data.get('diabetic')}'"
541
- guideline_context = self.query_guidelines(query)
 
 
 
 
 
 
 
 
 
 
 
 
 
542
 
543
- return self.generate_final_report(patient_info, visual, guideline_context, image)
 
544
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
545
  except Exception as e:
546
- logging.error(f"Pipeline error: {e}", exc_info=True)
547
- return f"❌ Error: {e}"
548
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
549
 
550
- # Convenience function for external use
551
- @_SPACES_GPU(enable_queue=True, duration=180)
552
- def analyze_wound(image: Image.Image, questionnaire_data: Dict[str, Any]) -> str:
553
- """
554
- Main entry point for wound analysis
555
- """
556
- proc = get_processor()
557
- return proc.full_analysis_pipeline(image, questionnaire_data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # smartheal_ai_processor.py
2
+ # Verbose, instrumented version β€” preserves public class/function names
3
+ # Turn on deep logging: export LOGLEVEL=DEBUG SMARTHEAL_DEBUG=1
 
4
 
5
  import os
6
  import logging
7
+ from datetime import datetime
8
+ from typing import Optional, Dict, List, Tuple
9
+
10
+ # ---- Environment defaults (do NOT globally hint CUDA here) ----
11
+ os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
12
+ LOGLEVEL = os.getenv("LOGLEVEL", "INFO").upper()
13
+ SMARTHEAL_DEBUG = os.getenv("SMARTHEAL_DEBUG", "0") == "1"
14
+
15
  import cv2
16
  import numpy as np
17
  from PIL import Image
18
+ from PIL.ExifTags import TAGS
 
 
 
19
 
20
+ # --- Logging config ---
21
  logging.basicConfig(
22
+ level=getattr(logging, LOGLEVEL, logging.INFO),
23
+ format="%(asctime)s - %(levelname)s - %(message)s",
24
  )
25
 
26
+ def _log_kv(prefix: str, kv: Dict):
27
+ logging.debug(prefix + " | " + " | ".join(f"{k}={v}" for k, v in kv.items()))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
+ # --- Spaces GPU decorator (REQUIRED) ---
30
+ from spaces import GPU as _SPACES_GPU
31
+
32
+ @_SPACES_GPU(enable_queue=True)
33
+ def smartheal_gpu_stub(ping: int = 0) -> str:
34
+ return "ready"
 
 
 
 
 
 
35
 
36
  # ---- Paths / constants ----
37
  UPLOADS_DIR = "uploads"
 
45
  DEFAULT_PX_PER_CM = 38.0
46
  PX_PER_CM_MIN, PX_PER_CM_MAX = 5.0, 1200.0
47
 
48
+ # Segmentation preprocessing knobs
49
+ SEG_EXPECTS_RGB = os.getenv("SEG_EXPECTS_RGB", "1") == "1" # most TF models trained on RGB
50
+ SEG_NORM = os.getenv("SEG_NORM", "0to1") # "0to1" | "imagenet"
51
+ SEG_THRESH = float(os.getenv("SEG_THRESH", "0.5"))
52
+
53
  models_cache: Dict[str, object] = {}
54
  knowledge_base_cache: Dict[str, object] = {}
55
 
56
+ # ---------- Utilities to prevent CUDA in main process ----------
57
+ from contextlib import contextmanager
58
+
59
+ @contextmanager
60
+ def _no_cuda_env():
61
+ """
62
+ Mask GPUs so any library imported/constructed in the main process
63
+ cannot see CUDA (required for Spaces Stateless GPU).
64
+ """
65
+ prev = os.environ.get("CUDA_VISIBLE_DEVICES")
66
+ os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
67
+ try:
68
+ yield
69
+ finally:
70
+ if prev is None:
71
+ os.environ.pop("CUDA_VISIBLE_DEVICES", None)
72
+ else:
73
+ os.environ["CUDA_VISIBLE_DEVICES"] = prev
74
+
75
  # ---------- Lazy imports (wrapped where needed) ----------
76
  def _import_ultralytics():
77
  # Prevent Ultralytics from probing CUDA on import
 
80
  return YOLO
81
 
82
  def _import_tf_loader():
83
+ import tensorflow as tf
84
+ tf.config.set_visible_devices([], "GPU")
85
+ from tensorflow.keras.models import load_model
 
 
86
  return load_model
87
 
88
  def _import_hf_cls():
89
+ from transformers import pipeline
 
90
  return pipeline
91
 
92
  def _import_embeddings():
93
+ from langchain_community.embeddings import HuggingFaceEmbeddings
 
94
  return HuggingFaceEmbeddings
95
 
96
  def _import_langchain_pdf():
97
+ from langchain_community.document_loaders import PyPDFLoader
 
98
  return PyPDFLoader
99
 
100
  def _import_langchain_faiss():
101
+ from langchain_community.vectorstores import FAISS
 
102
  return FAISS
103
 
104
  def _import_hf_hub():
105
+ from huggingface_hub import HfApi, HfFolder
 
106
  return HfApi, HfFolder
107
 
108
  # ---------- SmartHeal prompts (system + user prefix) ----------
109
+ SMARTHEAL_SYSTEM_PROMPT = """\
110
  You are SmartHeal Clinical Assistant, a wound-care decision-support system.
111
  You analyze wound photographs and brief patient context to produce careful,
112
  specific, guideline-informed recommendations WITHOUT diagnosing. You always:
 
119
  - Safety: remind the user to seek clinician review for changes or red flags.
120
  """
121
 
122
+ SMARTHEAL_USER_PREFIX = """\
123
  Patient: {patient_info}
124
  Visual findings: type={wound_type}, size={length_cm}x{breadth_cm} cm, area={area_cm2} cm^2,
125
  detection_conf={det_conf:.2f}, calibration={px_per_cm} px/cm.
 
213
  model = YOLO(YOLO_MODEL_PATH)
214
  return model
215
  def load_segmentation_model():
216
+ import tensorflow as tf
217
  load_model = _import_tf_loader()
 
 
 
218
  return load_model(SEG_MODEL_PATH, compile=False, custom_objects={'InputLayer': tf.keras.layers.InputLayer})
219
 
220
  def load_classification_pipeline():
 
366
  def _preprocess_for_seg(bgr_roi: np.ndarray, target_hw: Tuple[int, int]) -> np.ndarray:
367
  H, W = target_hw
368
  resized = cv2.resize(bgr_roi, (W, H), interpolation=cv2.INTER_LINEAR)
369
+ if SEG_EXPECTS_RGB:
370
+ resized = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
371
+ if SEG_NORM.lower() == "imagenet":
372
+ x = _imagenet_norm(resized)
373
+ else:
374
+ x = resized.astype(np.float32) / 255.0
375
  x = np.expand_dims(x, axis=0) # (1,H,W,3)
376
  return x
377
 
 
410
  return thr_otsu if score(af_otsu) <= score(af_pctl) else thr_pctl
411
 
412
  def _grabcut_refine(bgr: np.ndarray, seed01: np.ndarray, iters: int = 3) -> np.ndarray:
413
+ """Grow from a confident core into low-contrast margins."""
 
 
414
  h, w = bgr.shape[:2]
415
  gc = np.full((h, w), cv2.GC_PR_BGD, np.uint8)
416
  k = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
 
423
  cv2.grabCut(bgr, gc, None, bgdModel, fgdModel, iters, cv2.GC_INIT_WITH_MASK)
424
  return np.where((gc == cv2.GC_FGD) | (gc == cv2.GC_PR_FGD), 1, 0).astype(np.uint8)
425
 
426
+ def _fill_holes(mask01: np.ndarray) -> np.ndarray:
427
+ h, w = mask01.shape[:2]
428
+ ff = np.zeros((h + 2, w + 2), np.uint8)
429
+ m = (mask01 * 255).astype(np.uint8).copy()
430
+ cv2.floodFill(m, ff, (0, 0), 255)
431
+ m_inv = cv2.bitwise_not(m)
432
+ out = ((mask01 * 255) | m_inv) // 255
433
+ return out.astype(np.uint8)
434
+
435
+ def _clean_mask(mask01: np.ndarray) -> np.ndarray:
436
+ """Open β†’ Close β†’ Fill holes β†’ Largest component (no dilation)."""
437
+ mask01 = (mask01 > 0).astype(np.uint8)
438
+ k3 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
439
+ k5 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
440
+ mask01 = cv2.morphologyEx(mask01, cv2.MORPH_OPEN, k3, iterations=1)
441
+ mask01 = cv2.morphologyEx(mask01, cv2.MORPH_CLOSE, k5, iterations=1)
442
+ mask01 = _fill_holes(mask01)
443
+ # Keep largest component only
444
+ num, labels, stats, _ = cv2.connectedComponentsWithStats(mask01, 8)
445
+ if num > 1:
446
+ areas = stats[1:, cv2.CC_STAT_AREA]
447
+ if areas.size:
448
+ largest_idx = 1 + int(np.argmax(areas))
449
+ mask01 = (labels == largest_idx).astype(np.uint8)
450
+ return (mask01 > 0).astype(np.uint8)
451
+
452
+ # Global last debug dict (per-process)
453
+ _last_seg_debug: Dict[str, object] = {}
454
+
455
+ def segment_wound(image_bgr: np.ndarray, ts: str, out_dir: str) -> Tuple[np.ndarray, Dict[str, object]]:
456
+ """
457
+ TF model β†’ adaptive threshold on prob β†’ GrabCut grow β†’ cleanup.
458
+ Fallback: KMeans-Lab.
459
+ Returns (mask_uint8_0_255, debug_dict)
460
+ """
461
+ debug = {"used": None, "reason": None, "positive_fraction": 0.0,
462
+ "thr": None, "heatmap_path": None, "roi_seen_by_model": None}
463
+
464
+ seg_model = models_cache.get("seg", None)
465
+
466
+ # --- Model path ---
467
+ if seg_model is not None:
468
+ try:
469
+ ishape = getattr(seg_model, "input_shape", None)
470
+ if not ishape or len(ishape) < 4:
471
+ raise ValueError(f"Bad seg input_shape: {ishape}")
472
+ th, tw = int(ishape[1]), int(ishape[2])
473
+
474
+ x = _preprocess_for_seg(image_bgr, (th, tw))
475
+ roi_seen_path = None
476
+ if SMARTHEAL_DEBUG:
477
+ roi_seen_path = os.path.join(out_dir, f"roi_for_seg_{ts}.png")
478
+ cv2.imwrite(roi_seen_path, image_bgr)
479
+
480
+ pred = seg_model.predict(x, verbose=0)
481
+ if isinstance(pred, (list, tuple)): pred = pred[0]
482
+ p = _to_prob(pred)
483
+ p = cv2.resize(p, (image_bgr.shape[1], image_bgr.shape[0]), interpolation=cv2.INTER_LINEAR)
484
+
485
+ heatmap_path = None
486
+ if SMARTHEAL_DEBUG:
487
+ hm = (np.clip(p, 0, 1) * 255).astype(np.uint8)
488
+ heat = cv2.applyColorMap(hm, cv2.COLORMAP_JET)
489
+ heatmap_path = os.path.join(out_dir, f"seg_pred_heatmap_{ts}.png")
490
+ cv2.imwrite(heatmap_path, heat)
491
+
492
+ thr = _adaptive_prob_threshold(p)
493
+ core01 = (p >= thr).astype(np.uint8)
494
+ core_frac = float(core01.sum()) / float(core01.size)
495
+
496
+ if core_frac < 0.005:
497
+ thr2 = max(thr - 0.10, 0.15)
498
+ core01 = (p >= thr2).astype(np.uint8)
499
+ thr = thr2
500
+ core_frac = float(core01.sum()) / float(core01.size)
501
+
502
+ if core01.any():
503
+ gc01 = _grabcut_refine(image_bgr, core01, iters=3)
504
+ mask01 = _clean_mask(gc01)
505
+ else:
506
+ mask01 = np.zeros(core01.shape, np.uint8)
507
+
508
+ pos_frac = float(mask01.sum()) / float(mask01.size)
509
+ logging.info(f"SegModel USED | thr={float(thr):.2f} core_frac={core_frac:.4f} final_frac={pos_frac:.4f}")
510
+
511
+ debug.update({
512
+ "used": "tf_model",
513
+ "reason": "ok",
514
+ "positive_fraction": pos_frac,
515
+ "thr": float(thr),
516
+ "heatmap_path": heatmap_path,
517
+ "roi_seen_by_model": roi_seen_path
518
+ })
519
+ return (mask01 * 255).astype(np.uint8), debug
520
+
521
+ except Exception as e:
522
+ logging.warning(f"⚠️ Segmentation model failed β†’ fallback. Reason: {e}")
523
+ debug.update({"used": "fallback_kmeans", "reason": f"model_failed: {e}"})
524
+
525
+ # --- Fallback: KMeans in Lab (reddest cluster as wound) ---
526
+ Z = image_bgr.reshape((-1, 3)).astype(np.float32)
527
+ criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
528
+ _, labels, centers = cv2.kmeans(Z, 2, None, criteria, 5, cv2.KMEANS_PP_CENTERS)
529
+ centers_u8 = centers.astype(np.uint8).reshape(1, 2, 3)
530
+ centers_lab = cv2.cvtColor(centers_u8, cv2.COLOR_BGR2LAB)[0]
531
+ wound_idx = int(np.argmax(centers_lab[:, 1])) # maximize a* (red)
532
+ mask01 = (labels.reshape(image_bgr.shape[:2]) == wound_idx).astype(np.uint8)
533
+ mask01 = _clean_mask(mask01)
534
+
535
+ pos_frac = float(mask01.sum()) / float(mask01.size)
536
+ logging.info(f"KMeans USED | final_frac={pos_frac:.4f}")
537
+
538
+ debug.update({
539
+ "used": "fallback_kmeans",
540
+ "reason": debug.get("reason") or "no_model",
541
+ "positive_fraction": pos_frac,
542
+ "thr": None
543
+ })
544
+ return (mask01 * 255).astype(np.uint8), debug
545
+
546
+ # ---------- Measurement + overlay helpers ----------
547
+ def largest_component_mask(binary01: np.ndarray, min_area_px: int = 50) -> np.ndarray:
548
+ num, labels, stats, _ = cv2.connectedComponentsWithStats(binary01.astype(np.uint8), connectivity=8)
549
+ if num <= 1:
550
+ return binary01.astype(np.uint8)
551
+ areas = stats[1:, cv2.CC_STAT_AREA]
552
+ if areas.size == 0 or areas.max() < min_area_px:
553
+ return binary01.astype(np.uint8)
554
+ largest_idx = 1 + int(np.argmax(areas))
555
+ return (labels == largest_idx).astype(np.uint8)
556
+
557
+ def measure_min_area_rect(mask01: np.ndarray, px_per_cm: float) -> Tuple[float, float, Tuple]:
558
+ contours, _ = cv2.findContours(mask01.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
559
+ if not contours:
560
+ return 0.0, 0.0, (None, None)
561
+ cnt = max(contours, key=cv2.contourArea)
562
+ rect = cv2.minAreaRect(cnt)
563
+ (w_px, h_px) = rect[1]
564
+ length_px, breadth_px = (max(w_px, h_px), min(w_px, h_px))
565
+ length_cm = round(length_px / max(px_per_cm, 1e-6), 2)
566
+ breadth_cm = round(breadth_px / max(px_per_cm, 1e-6), 2)
567
+ box = cv2.boxPoints(rect).astype(int)
568
+ return length_cm, breadth_cm, (box, rect[0])
569
+
570
+ def area_cm2_from_contour(mask01: np.ndarray, px_per_cm: float) -> Tuple[float, Optional[np.ndarray]]:
571
+ """Area from largest polygon (sub-pixel); returns (area_cm2, contour)."""
572
+ m = (mask01 > 0).astype(np.uint8)
573
+ contours, _ = cv2.findContours(m, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
574
+ if not contours:
575
+ return 0.0, None
576
+ cnt = max(contours, key=cv2.contourArea)
577
+ poly_area_px2 = float(cv2.contourArea(cnt))
578
+ area_cm2 = round(poly_area_px2 / (max(px_per_cm, 1e-6) ** 2), 2)
579
+ return area_cm2, cnt
580
+
581
+ def clamp_area_with_minrect(cnt: np.ndarray, px_per_cm: float, area_cm2_poly: float) -> float:
582
+ rect = cv2.minAreaRect(cnt)
583
+ (w_px, h_px) = rect[1]
584
+ rect_area_px2 = float(max(w_px, 0.0) * max(h_px, 0.0))
585
+ rect_area_cm2 = rect_area_px2 / (max(px_per_cm, 1e-6) ** 2)
586
+ return round(min(area_cm2_poly, rect_area_cm2 * 1.05), 2)
587
+
588
+ def draw_measurement_overlay(
589
+ base_bgr: np.ndarray,
590
+ mask01: np.ndarray,
591
+ rect_box: np.ndarray,
592
+ length_cm: float,
593
+ breadth_cm: float,
594
+ thickness: int = 2
595
+ ) -> np.ndarray:
596
+ """
597
+ 1) Strong red mask overlay + white contour
598
+ 2) Min-area rectangle
599
+ 3) Double-headed arrows labeled Length/Width
600
+ """
601
+ overlay = base_bgr.copy()
602
+
603
+ # Mask tint
604
+ mask255 = (mask01 * 255).astype(np.uint8)
605
+ mask3 = cv2.merge([mask255, mask255, mask255])
606
+ red = np.zeros_like(overlay); red[:] = (0, 0, 255)
607
+ alpha = 0.55
608
+ tinted = cv2.addWeighted(overlay, 1 - alpha, red, alpha, 0)
609
+ overlay = np.where(mask3 > 0, tinted, overlay)
610
+
611
+ # Contour
612
+ cnts, _ = cv2.findContours(mask255, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
613
+ if cnts:
614
+ cv2.drawContours(overlay, cnts, -1, (255, 255, 255), 2)
615
+
616
+ if rect_box is not None:
617
+ cv2.polylines(overlay, [rect_box], True, (255, 255, 255), thickness)
618
+ pts = rect_box.reshape(-1, 2)
619
+
620
+ def midpoint(a, b): return (int((a[0] + b[0]) / 2), int((a[1] + b[1]) / 2))
621
+ e = [np.linalg.norm(pts[i] - pts[(i + 1) % 4]) for i in range(4)]
622
+ long_edge_idx = int(np.argmax(e))
623
+ mids = [midpoint(pts[i], pts[(i + 1) % 4]) for i in range(4)]
624
+ long_pair = (long_edge_idx, (long_edge_idx + 2) % 4)
625
+ short_pair = ((long_edge_idx + 1) % 4, (long_edge_idx + 3) % 4)
626
+
627
+ def draw_double_arrow(img, p1, p2):
628
+ cv2.arrowedLine(img, p1, p2, (0, 0, 0), thickness + 2, tipLength=0.05)
629
+ cv2.arrowedLine(img, p2, p1, (0, 0, 0), thickness + 2, tipLength=0.05)
630
+ cv2.arrowedLine(img, p1, p2, (255, 255, 255), thickness, tipLength=0.05)
631
+ cv2.arrowedLine(img, p2, p1, (255, 255, 255), thickness, tipLength=0.05)
632
+
633
+ def put_label(text, anchor):
634
+ org = (anchor[0] + 6, anchor[1] - 6)
635
+ cv2.putText(overlay, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 4, cv2.LINE_AA)
636
+ cv2.putText(overlay, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2, cv2.LINE_AA)
637
+
638
+ draw_double_arrow(overlay, mids[long_pair[0]], mids[long_pair[1]])
639
+ draw_double_arrow(overlay, mids[short_pair[0]], mids[short_pair[1]])
640
+ put_label(f"Length: {length_cm:.2f} cm", mids[long_pair[0]])
641
+ put_label(f"Width: {breadth_cm:.2f} cm", mids[short_pair[0]])
642
+
643
+ return overlay
644
+
645
+ # ---------- AI PROCESSOR ----------
646
  class AIProcessor:
647
  def __init__(self):
 
 
 
 
 
 
 
 
648
  self.models_cache = models_cache
649
  self.knowledge_base_cache = knowledge_base_cache
650
+ self.uploads_dir = UPLOADS_DIR
651
+ self.dataset_id = DATASET_ID
652
+ self.hf_token = HF_TOKEN
653
+
654
+ def _ensure_analysis_dir(self) -> str:
655
+ out_dir = os.path.join(self.uploads_dir, "analysis")
656
+ os.makedirs(out_dir, exist_ok=True)
657
+ return out_dir
658
+
659
+ def perform_visual_analysis(self, image_pil: Image.Image) -> Dict:
660
+ """
661
+ YOLO detect β†’ crop ROI β†’ segment_wound(ROI) β†’ clean mask β†’
662
+ minAreaRect measurement (cm) using EXIF px/cm β†’ save outputs.
663
+ """
664
+ try:
665
+ px_per_cm, exif_meta = estimate_px_per_cm_from_exif(image_pil, DEFAULT_PX_PER_CM)
666
+ # Guardrails for calibration to avoid huge area blow-ups
667
+ px_per_cm = float(np.clip(px_per_cm, 20.0, 350.0))
668
+ if (exif_meta or {}).get("used") != "exif":
669
+ logging.warning(f"Calibration fallback used: px_per_cm={px_per_cm:.2f} (default). Prefer ruler/Aruco for accuracy.")
670
+
671
+ image_cv = cv2.cvtColor(np.array(image_pil.convert("RGB")), cv2.COLOR_RGB2BGR)
672
+
673
+ # --- Detection ---
674
+ det_model = self.models_cache.get("det")
675
+ if det_model is None:
676
+ raise RuntimeError("YOLO model not loaded")
677
+ # Force CPU inference and avoid CUDA touch
678
+ results = det_model.predict(image_cv, verbose=False, device="cpu")
679
+ if (not results) or (not getattr(results[0], "boxes", None)) or (len(results[0].boxes) == 0):
680
+ try:
681
+ import gradio as gr
682
+ raise gr.Error("No wound could be detected.")
683
+ except Exception:
684
+ raise RuntimeError("No wound could be detected.")
685
+
686
+ box = results[0].boxes[0].xyxy[0].cpu().numpy().astype(int)
687
+ x1, y1, x2, y2 = [int(v) for v in box]
688
+ x1, y1 = max(0, x1), max(0, y1)
689
+ x2, y2 = min(image_cv.shape[1], x2), min(image_cv.shape[0], y2)
690
+ roi = image_cv[y1:y2, x1:x2].copy()
691
+ if roi.size == 0:
692
+ try:
693
+ import gradio as gr
694
+ raise gr.Error("Detected ROI is empty.")
695
+ except Exception:
696
+ raise RuntimeError("Detected ROI is empty.")
697
+
698
+ out_dir = self._ensure_analysis_dir()
699
+ ts = datetime.now().strftime("%Y%m%d_%H%M%S")
700
+
701
+ # --- Segmentation (model-first + KMeans fallback) ---
702
+ mask_u8_255, seg_debug = segment_wound(roi, ts, out_dir)
703
+ mask01 = (mask_u8_255 > 127).astype(np.uint8)
704
+
705
+ if mask01.any():
706
+ mask01 = _clean_mask(mask01)
707
+ logging.debug(f"Mask postproc: px_after={int(mask01.sum())}")
708
+
709
+ # --- Measurement (accurate & conservative) ---
710
+ if mask01.any():
711
+ length_cm, breadth_cm, (box_pts, _) = measure_min_area_rect(mask01, px_per_cm)
712
+ area_poly_cm2, largest_cnt = area_cm2_from_contour(mask01, px_per_cm)
713
+ if largest_cnt is not None:
714
+ surface_area_cm2 = clamp_area_with_minrect(largest_cnt, px_per_cm, area_poly_cm2)
715
+ else:
716
+ surface_area_cm2 = area_poly_cm2
717
+
718
+ anno_roi = draw_measurement_overlay(roi, mask01, box_pts, length_cm, breadth_cm)
719
+ segmentation_empty = False
720
+ else:
721
+ # Fallback if seg failed: use ROI dimensions
722
+ h_px = max(0, y2 - y1); w_px = max(0, x2 - x1)
723
+ length_cm = round(max(h_px, w_px) / px_per_cm, 2)
724
+ breadth_cm = round(min(h_px, w_px) / px_per_cm, 2)
725
+ surface_area_cm2 = round((h_px * w_px) / (px_per_cm ** 2), 2)
726
+ anno_roi = roi.copy()
727
+ cv2.rectangle(anno_roi, (2, 2), (anno_roi.shape[1]-3, anno_roi.shape[0]-3), (0, 0, 255), 3)
728
+ cv2.line(anno_roi, (0, 0), (anno_roi.shape[1]-1, anno_roi.shape[0]-1), (0, 0, 255), 2)
729
+ cv2.line(anno_roi, (anno_roi.shape[1]-1, 0), (0, anno_roi.shape[0]-1), (0, 0, 255), 2)
730
+ box_pts = None
731
+ segmentation_empty = True
732
+
733
+ # --- Save visualizations ---
734
+ original_path = os.path.join(out_dir, f"original_{ts}.png")
735
+ cv2.imwrite(original_path, image_cv)
736
+
737
+ det_vis = image_cv.copy()
738
+ cv2.rectangle(det_vis, (x1, y1), (x2, y2), (0, 255, 0), 2)
739
+ detection_path = os.path.join(out_dir, f"detection_{ts}.png")
740
+ cv2.imwrite(detection_path, det_vis)
741
+
742
+ roi_mask_path = os.path.join(out_dir, f"roi_mask_{ts}.png")
743
+ cv2.imwrite(roi_mask_path, (mask01 * 255).astype(np.uint8))
744
+
745
+ # ROI overlay (mask tint + contour, without arrows)
746
+ mask255 = (mask01 * 255).astype(np.uint8)
747
+ mask3 = cv2.merge([mask255, mask255, mask255])
748
+ red = np.zeros_like(roi); red[:] = (0, 0, 255)
749
+ alpha = 0.55
750
+ tinted = cv2.addWeighted(roi, 1 - alpha, red, alpha, 0)
751
+ if mask255.any():
752
+ roi_overlay = np.where(mask3 > 0, tinted, roi)
753
+ cnts, _ = cv2.findContours(mask255, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
754
+ cv2.drawContours(roi_overlay, cnts, -1, (255, 255, 255), 2)
755
+ else:
756
+ roi_overlay = anno_roi
757
+
758
+ seg_full = image_cv.copy()
759
+ seg_full[y1:y2, x1:x2] = roi_overlay
760
+ segmentation_path = os.path.join(out_dir, f"segmentation_{ts}.png")
761
+ cv2.imwrite(segmentation_path, seg_full)
762
+
763
+ segmentation_roi_path = os.path.join(out_dir, f"segmentation_roi_{ts}.png")
764
+ cv2.imwrite(segmentation_roi_path, roi_overlay)
765
+
766
+ # Annotated (mask + arrows + labels) in full-frame
767
+ anno_full = image_cv.copy()
768
+ anno_full[y1:y2, x1:x2] = anno_roi
769
+ annotated_seg_path = os.path.join(out_dir, f"segmentation_annotated_{ts}.png")
770
+ cv2.imwrite(annotated_seg_path, anno_full)
771
+
772
+ # --- Optional classification ---
773
+ wound_type = "Unknown"
774
+ cls_pipe = self.models_cache.get("cls")
775
+ if cls_pipe is not None:
776
+ try:
777
+ preds = cls_pipe(Image.fromarray(cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)))
778
+ if preds:
779
+ wound_type = max(preds, key=lambda x: x.get("score", 0)).get("label", "Unknown")
780
+ except Exception as e:
781
+ logging.warning(f"Classification failed: {e}")
782
+
783
+ # Log end-of-seg summary
784
+ seg_summary = {
785
+ "seg_used": seg_debug.get("used"),
786
+ "seg_reason": seg_debug.get("reason"),
787
+ "positive_fraction": round(float(seg_debug.get("positive_fraction", 0.0)), 6),
788
+ "threshold": seg_debug.get("thr"),
789
+ "segmentation_empty": segmentation_empty,
790
+ "exif_px_per_cm": round(px_per_cm, 3),
791
+ }
792
+ _log_kv("SEG_SUMMARY", seg_summary)
793
+
794
+ return {
795
+ "wound_type": wound_type,
796
+ "length_cm": length_cm,
797
+ "breadth_cm": breadth_cm,
798
+ "surface_area_cm2": surface_area_cm2,
799
+ "px_per_cm": round(px_per_cm, 2),
800
+ "calibration_meta": exif_meta,
801
+ "detection_confidence": float(results[0].boxes.conf[0].cpu().item())
802
+ if getattr(results[0].boxes, "conf", None) is not None else 0.0,
803
+ "detection_image_path": detection_path,
804
+ "segmentation_image_path": annotated_seg_path,
805
+ "segmentation_annotated_path": annotated_seg_path,
806
+ "segmentation_roi_path": segmentation_roi_path,
807
+ "roi_mask_path": roi_mask_path,
808
+ "segmentation_empty": segmentation_empty,
809
+ "segmentation_debug": seg_debug,
810
+ "original_image_path": original_path,
811
+ }
812
+ except Exception as e:
813
+ logging.error(f"Visual analysis failed: {e}", exc_info=True)
814
+ raise
815
 
816
+ # ---------- Knowledge base + reporting ----------
817
  def query_guidelines(self, query: str) -> str:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
818
  try:
819
+ vs = self.knowledge_base_cache.get("vector_store")
820
+ if not vs:
821
+ return "Knowledge base is not available."
822
+ retriever = vs.as_retriever(search_kwargs={"k": 5})
823
+ # Modern API (avoid get_relevant_documents deprecation)
824
+ docs = retriever.invoke(query)
825
+ lines: List[str] = []
826
+ for d in docs:
827
+ src = (d.metadata or {}).get("source", "N/A")
828
+ txt = (d.page_content or "")[:300]
829
+ lines.append(f"Source: {src}\nContent: {txt}...")
830
+ return "\n\n".join(lines) if lines else "No relevant guideline snippets found."
831
+ except Exception as e:
832
+ logging.warning(f"Guidelines query failed: {e}")
833
+ return f"Guidelines query failed: {str(e)}"
834
+
835
+ def _generate_fallback_report(self, patient_info: str, visual_results: Dict, guideline_context: str) -> str:
836
+ return f"""# 🩺 SmartHeal AI - Comprehensive Wound Analysis Report
837
 
838
+ ## πŸ“‹ Patient Information
839
+ {patient_info}
840
 
841
+ ## πŸ” Visual Analysis Results
842
+ - **Wound Type**: {visual_results.get('wound_type', 'Unknown')}
843
+ - **Dimensions**: {visual_results.get('length_cm', 0)} cm Γ— {visual_results.get('breadth_cm', 0)} cm
844
+ - **Surface Area**: {visual_results.get('surface_area_cm2', 0)} cmΒ²
845
+ - **Detection Confidence**: {visual_results.get('detection_confidence', 0):.1%}
846
+ - **Calibration**: {visual_results.get('px_per_cm','?')} px/cm ({(visual_results.get('calibration_meta') or {}).get('used','default')})
847
+
848
+ ## πŸ“Š Analysis Images
849
+ - **Original**: {visual_results.get('original_image_path', 'N/A')}
850
+ - **Detection**: {visual_results.get('detection_image_path', 'N/A')}
851
+ - **Segmentation**: {visual_results.get('segmentation_image_path', 'N/A')}
852
+ - **Annotated**: {visual_results.get('segmentation_annotated_path', 'N/A')}
853
+
854
+ ## 🎯 Clinical Summary
855
+ Automated analysis provides quantitative measurements; verify via clinical examination.
856
+
857
+ ## πŸ’Š Recommendations
858
+ - Cleanse wound gently; select dressing per exudate/infection risk
859
+ - Debride necrotic tissue if indicated (clinical decision)
860
+ - Document with serial photos and measurements
861
+
862
+ ## πŸ“… Monitoring
863
+ - Daily in week 1, then every 2–3 days (or as indicated)
864
+ - Weekly progress review
865
+
866
+ ## πŸ“š Guideline Context
867
+ {(guideline_context or '')[:800]}{"..." if guideline_context and len(guideline_context) > 800 else ''}
868
+
869
+ **Disclaimer:** Automated, for decision support only. Verify clinically.
870
+ """
871
+
872
+ def generate_final_report(
873
+ self,
874
+ patient_info: str,
875
+ visual_results: Dict,
876
+ guideline_context: str,
877
+ image_pil: Image.Image,
878
+ max_new_tokens: Optional[int] = None,
879
+ ) -> str:
880
+ try:
881
+ report = generate_medgemma_report(
882
+ patient_info, visual_results, guideline_context, image_pil, max_new_tokens
883
+ )
884
+ if report and report.strip() and not report.startswith(("⚠️", "❌")):
885
+ return report
886
+ logging.warning("VLM unavailable/invalid; using fallback.")
887
+ return self._generate_fallback_report(patient_info, visual_results, guideline_context)
888
  except Exception as e:
889
+ logging.error(f"Report generation failed: {e}")
890
+ return self._generate_fallback_report(patient_info, visual_results, guideline_context)
891
 
892
+ def save_and_commit_image(self, image_pil: Image.Image) -> str:
893
+ try:
894
+ os.makedirs(self.uploads_dir, exist_ok=True)
895
+ ts = datetime.now().strftime("%Y%m%d_%H%M%S")
896
+ filename = f"{ts}.png"
897
+ path = os.path.join(self.uploads_dir, filename)
898
+ image_pil.convert("RGB").save(path)
899
+ logging.info(f"βœ… Image saved locally: {path}")
900
+
901
+ if HF_TOKEN and DATASET_ID:
902
+ try:
903
+ HfApi, HfFolder = _import_hf_hub()
904
+ HfFolder.save_token(HF_TOKEN)
905
+ api = HfApi()
906
+ api.upload_file(
907
+ path_or_fileobj=path,
908
+ path_in_repo=f"images/{filename}",
909
+ repo_id=DATASET_ID,
910
+ repo_type="dataset",
911
+ token=HF_TOKEN,
912
+ commit_message=f"Upload wound image: {filename}",
913
+ )
914
+ logging.info("βœ… Image committed to HF dataset")
915
+ except Exception as e:
916
+ logging.warning(f"HF upload failed: {e}")
917
 
918
+ return path
919
+ except Exception as e:
920
+ logging.error(f"Failed to save/commit image: {e}")
921
+ return ""
922
+
923
+ @_SPACES_GPU(enable_queue=True)
924
+ def full_analysis_pipeline(self, image_pil: Image.Image, questionnaire_data: Dict) -> Dict:
925
+ try:
926
+ saved_path = self.save_and_commit_image(image_pil)
927
+ visual_results = self.perform_visual_analysis(image_pil)
928
+
929
+ pi = questionnaire_data or {}
930
+ patient_info = (
931
+ f"Age: {pi.get('age','N/A')}, "
932
+ f"Diabetic: {pi.get('diabetic','N/A')}, "
933
+ f"Allergies: {pi.get('allergies','N/A')}, "
934
+ f"Date of Wound: {pi.get('date_of_injury','N/A')}, "
935
+ f"Professional Care: {pi.get('professional_care','N/A')}, "
936
+ f"Oozing/Bleeding: {pi.get('oozing_bleeding','N/A')}, "
937
+ f"Infection: {pi.get('infection','N/A')}, "
938
+ f"Moisture: {pi.get('moisture','N/A')}"
939
+ )
940
+
941
+ query = (
942
+ f"best practices for managing a {visual_results.get('wound_type','Unknown')} "
943
+ f"with moisture '{pi.get('moisture','unknown')}' and infection '{pi.get('infection','unknown')}' "
944
+ f"in a diabetic status '{pi.get('diabetic','unknown')}'"
945
+ )
946
+ guideline_context = self.query_guidelines(query)
947
+
948
+ report = self.generate_final_report(patient_info, visual_results, guideline_context, image_pil)
949
+
950
+ return {
951
+ "success": True,
952
+ "visual_analysis": visual_results,
953
+ "report": report,
954
+ "saved_image_path": saved_path,
955
+ "guideline_context": (guideline_context or "")[:500] + (
956
+ "..." if guideline_context and len(guideline_context) > 500 else ""
957
+ ),
958
+ }
959
+ except Exception as e:
960
+ logging.error(f"Pipeline error: {e}")
961
+ return {
962
+ "success": False,
963
+ "error": str(e),
964
+ "visual_analysis": {},
965
+ "report": f"Analysis failed: {str(e)}",
966
+ "saved_image_path": None,
967
+ "guideline_context": "",
968
+ }
969
+ @_SPACES_GPU(enable_queue=True)
970
+ def analyze_wound(self, image, questionnaire_data: Dict) -> Dict:
971
+ try:
972
+ if isinstance(image, str):
973
+ if not os.path.exists(image):
974
+ raise ValueError(f"Image file not found: {image}")
975
+ image_pil = Image.open(image)
976
+ elif isinstance(image, Image.Image):
977
+ image_pil = image
978
+ elif isinstance(image, np.ndarray):
979
+ image_pil = Image.fromarray(image)
980
+ else:
981
+ raise ValueError(f"Unsupported image type: {type(image)}")
982
+
983
+ return self.full_analysis_pipeline(image_pil, questionnaire_data or {})
984
+ except Exception as e:
985
+ logging.error(f"Wound analysis error: {e}")
986
+ return {
987
+ "success": False,
988
+ "error": str(e),
989
+ "visual_analysis": {},
990
+ "report": f"Analysis initialization failed: {str(e)}",
991
+ "saved_image_path": None,
992
+ "guideline_context": "",
993
+ }