colomboMk commited on
Commit
4f34f13
·
verified ·
1 Parent(s): 125c303

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +127 -144
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import os
2
 
3
  # Route caches to /tmp to avoid filling the Space persistent storage
@@ -22,24 +23,20 @@ try:
22
  _ULTRA_OK = True
23
  except Exception:
24
  _ULTRA_OK = False
 
25
 
26
- # Config
27
- MAX_SIDE_PX = 70 # filtro lato massimo bbox per modello A (SAHI)
28
  SEG_DEFAULT_ALPHA = 0.45
29
-
 
 
30
  # Simple global caches to avoid reloading models each click
31
  _DET_MODEL_CACHE = {} # key: (weights_path, device) -> AutoDetectionModel
32
  _SEG_MODEL_CACHE = {} # key: weights_path -> YOLO
 
33
 
34
- def _ensure_rgb(img: np.ndarray) -> np.ndarray:
35
- if img is None:
36
- return None
37
- if img.ndim == 2:
38
- return cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
39
- if img.shape[2] == 4:
40
- return cv2.cvtColor(img, cv2.COLOR_RGBA2RGB)
41
- return img
42
-
43
  def _choose_device(user_choice: str) -> str:
44
  if user_choice != "auto":
45
  return user_choice
@@ -54,14 +51,13 @@ def _get_det_model(weights_path: str, device: str, conf: float):
54
  Returns a cached SAHI AutoDetectionModel. Updates confidence on the fly.
55
  """
56
  if not os.path.exists(weights_path):
57
- raise gr.Error(f"Pesi detection non trovati: {weights_path}")
58
  key = (weights_path, device)
59
  model = _DET_MODEL_CACHE.get(key)
60
  if model is None:
61
- # SAHI uses yolov8 wrapper for Ultralytics models (works for v8/v9/v11)
62
  try:
63
  model = AutoDetectionModel.from_pretrained(
64
- model_type="yolov8",
65
  model_path=weights_path,
66
  confidence_threshold=conf,
67
  device=device,
@@ -69,7 +65,7 @@ def _get_det_model(weights_path: str, device: str, conf: float):
69
  except Exception:
70
  # CPU fallback
71
  model = AutoDetectionModel.from_pretrained(
72
- model_type="yolov8",
73
  model_path=weights_path,
74
  confidence_threshold=conf,
75
  device="cpu",
@@ -85,29 +81,96 @@ def _get_det_model(weights_path: str, device: str, conf: float):
85
 
86
  def _get_seg_model(weights_path: str):
87
  if not _ULTRA_OK:
88
- raise gr.Error("Ultralytics non installato. Installa con: pip install ultralytics")
89
  if not os.path.exists(weights_path):
90
- raise gr.Error(f"Pesi segmentation non trovati: {weights_path}")
91
  model = _SEG_MODEL_CACHE.get(weights_path)
92
  if model is None:
93
  model = YOLO(weights_path)
94
  _SEG_MODEL_CACHE[weights_path] = model
95
  return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
- def _optimize_slicing_dims(H: int, W: int, slice_h: int, slice_w: int, overlap_h: float, overlap_w: float, auto_opt: bool):
98
- if not auto_opt:
99
- return int(slice_h), int(slice_w), float(overlap_h), float(overlap_w)
100
- sh = min(int(slice_h), H)
101
- sw = min(int(slice_w), W)
102
- # If the image already fits in one slice, remove overlap to reduce work
103
- oh = 0.0 if (H <= sh and W <= sw) else float(overlap_h)
104
- ow = 0.0 if (H <= sh and W <= sw) else float(overlap_w)
105
- return sh, sw, oh, ow
 
 
 
 
 
 
 
 
 
106
 
107
  def _draw_boxes_overlay(image_rgb: np.ndarray, sahi_result, target_class: str, use_target: bool):
108
  """
109
  Returns overlay_rgb (H,W,3), alpha_mask (H,W) uint8, counts_text
110
- Only draws rectangles (no labels). Filters boxes with max side > MAX_SIDE_PX.
111
  """
112
  H, W = image_rgb.shape[:2]
113
  overlay = np.zeros((H, W, 3), dtype=np.uint8)
@@ -135,8 +198,9 @@ def _draw_boxes_overlay(image_rgb: np.ndarray, sahi_result, target_class: str, u
135
  h = max(0, y2 - y1)
136
  if w == 0 or h == 0:
137
  continue
138
- if max(w, h) > MAX_SIDE_PX:
139
- continue
 
140
 
141
  area = getattr(item.bbox, "area", w * h)
142
  try:
@@ -163,9 +227,9 @@ def _draw_boxes_overlay(image_rgb: np.ndarray, sahi_result, target_class: str, u
163
  # Convert overlay BGR -> RGB
164
  overlay_rgb = cv2.cvtColor(overlay, cv2.COLOR_BGR2RGB)
165
  if use_target:
166
- counts = f"target='{target_class}': {target_count} | totale: {total_count}"
167
  else:
168
- counts = f"totale: {total_count}"
169
  return overlay_rgb, alpha, counts
170
 
171
  def _draw_seg_overlay(image_rgb: np.ndarray, yolo_result, target_class: str, use_target: bool, fill_alpha: float = SEG_DEFAULT_ALPHA):
@@ -184,7 +248,7 @@ def _draw_seg_overlay(image_rgb: np.ndarray, yolo_result, target_class: str, use
184
  masks = getattr(r, "masks", None)
185
 
186
  if boxes is None or len(boxes) == 0:
187
- counts = f"target='{target_class}': 0 | totale: 0" if use_target else "totale: 0"
188
  return cv2.cvtColor(overlay_bgr, cv2.COLOR_BGR2RGB), alpha, counts
189
 
190
  N = len(boxes)
@@ -255,9 +319,9 @@ def _draw_seg_overlay(image_rgb: np.ndarray, yolo_result, target_class: str, use
255
 
256
  overlay_rgb = cv2.cvtColor(overlay_bgr, cv2.COLOR_BGR2RGB)
257
  if use_target:
258
- counts = f"target='{target_class}': {target_count} | totale: {total_count}"
259
  else:
260
- counts = f"totale: {total_count}"
261
  return overlay_rgb, alpha, counts
262
 
263
  def _composite_layers(base_rgb: np.ndarray, layers: list):
@@ -285,23 +349,9 @@ def _composite_layers(base_rgb: np.ndarray, layers: list):
285
 
286
  return np.clip(result, 0, 255).astype(np.uint8)
287
 
288
- def _sahi_predict(image_rgb: np.ndarray, det_model, slice_h, slice_w, overlap_h, overlap_w):
289
- return get_sliced_prediction(
290
- image_rgb,
291
- det_model,
292
- slice_height=int(slice_h),
293
- slice_width=int(slice_w),
294
- overlap_height_ratio=float(overlap_h),
295
- overlap_width_ratio=float(overlap_w),
296
- postprocess_class_agnostic=False,
297
- verbose=0,
298
- )
299
-
300
- # Gradio callables
301
-
302
  def on_image_upload(image, state):
303
  """
304
- Resetta gli overlay quando si carica una nuova immagine.
305
  """
306
  if image is None:
307
  return None, {"base": None, "det": None, "seg": None, "det_counts": "", "seg_counts": ""}, "", ""
@@ -309,60 +359,6 @@ def on_image_upload(image, state):
309
  new_state = {"base": img_rgb, "det": None, "seg": None, "det_counts": "", "seg_counts": ""}
310
  return img_rgb, new_state, "", ""
311
 
312
- def run_det(
313
- image, state,
314
- weights_det_path, conf_det, slice_h, slice_w, overlap_h, overlap_w, device,
315
- target_class, use_target, auto_opt_slice
316
- ):
317
- """
318
- Esegue il modello A (SAHI detection) e aggiorna solo l'overlay 'det'.
319
- Recompone l'immagine finale con entrambi i layer (det + seg) nell'ordine temporale.
320
- """
321
- if state is None or state.get("base") is None:
322
- raise gr.Error("Carica prima un'immagine.")
323
- base = state["base"]
324
- H, W = base.shape[:2]
325
- det_model = _get_det_model(weights_det_path, _choose_device(device), conf_det)
326
- sh, sw, oh, ow = _optimize_slicing_dims(H, W, slice_h, slice_w, overlap_h, overlap_w, auto_opt_slice)
327
- sahi_res = _sahi_predict(base, det_model, sh, sw, oh, ow)
328
-
329
- overlay_rgb, alpha, counts = _draw_boxes_overlay(base, sahi_res, target_class, bool(use_target))
330
-
331
- state["det"] = {"overlay": overlay_rgb, "alpha": alpha, "ts": time.time()}
332
- state["det_counts"] = counts
333
-
334
- layers = [state["det"], state.get("seg")]
335
- composite = _composite_layers(base, layers)
336
- return composite, state, state["det_counts"], state.get("seg_counts", "")
337
-
338
- def run_seg(
339
- image, state,
340
- weights_seg_path, conf_seg, device,
341
- target_class, use_target, seg_alpha
342
- ):
343
- """
344
- Esegue il modello B (YOLO segmentation) e aggiorna solo l'overlay 'seg'.
345
- Recompone l'immagine finale con entrambi i layer (det + seg) nell'ordine temporale.
346
- """
347
- if state is None or state.get("base") is None:
348
- raise gr.Error("Carica prima un'immagine.")
349
- base = state["base"]
350
- seg_model = _get_seg_model(weights_seg_path)
351
- # device is handled in predict
352
- try:
353
- seg_results = seg_model.predict(source=base, conf=float(conf_seg), device=_choose_device(device), verbose=False)
354
- r0 = seg_results[0] if isinstance(seg_results, (list, tuple)) else seg_results
355
- except Exception as e:
356
- raise gr.Error(f"Errore inferenza segmentation: {e}")
357
-
358
- overlay_rgb, alpha, counts = _draw_seg_overlay(base, r0, target_class, bool(use_target), float(seg_alpha))
359
- state["seg"] = {"overlay": overlay_rgb, "alpha": alpha, "ts": time.time()}
360
- state["seg_counts"] = counts
361
-
362
- layers = [state.get("det"), state["seg"]]
363
- composite = _composite_layers(base, layers)
364
- return composite, state, state.get("det_counts", ""), state["seg_counts"]
365
-
366
  def clear_overlays(image, state):
367
  if state is None or state.get("base") is None:
368
  return None, {"base": None, "det": None, "seg": None, "det_counts": "", "seg_counts": ""}, "", ""
@@ -372,9 +368,9 @@ def clear_overlays(image, state):
372
  state["det_counts"] = ""
373
  state["seg_counts"] = ""
374
  return base, state, "", ""
 
375
 
376
- # Maintenance helpers
377
-
378
  def _dir_size(path: str) -> int:
379
  try:
380
  total = 0
@@ -437,37 +433,24 @@ def clean_caches():
437
  except Exception:
438
  pass
439
  return "Removed:\n" + ("\n".join(removed) if removed else "(none)")
 
440
 
441
  def build_app():
442
- with gr.Blocks(title="YOLOv11 SAHI Detection + YOLO Segmentation (dual overlays)") as demo:
443
  gr.Markdown(
444
- "## Doppia inferenza su stessa immagine, overlay combinati\n"
445
- "- Modello A: SAHI detection (usa pesi YOLOv11 seg come detection) — solo bbox, filtro lato > 70px.\n"
446
- "- Modello B: YOLO segmentation nativo — maschere riempite + contorno.\n"
447
- "- Esegui i modelli con pulsanti separati; gli overlay si accumulano sull'immagine base (nuovo overlay sopra).\n"
448
- "- Opzionale: disabilita l'evidenziazione della classe target se non ti serve."
449
  )
450
 
451
  state = gr.State({"base": None, "det": None, "seg": None, "det_counts": "", "seg_counts": ""})
452
 
453
  with gr.Row():
454
  with gr.Column(scale=1):
455
- img_in = gr.Image(label="Immagine", type="numpy")
456
- with gr.Accordion("Pesi modelli", open=True):
457
- weights_det = gr.Textbox(
458
- label="Pesi Modello A (Detection + SAHI, .pt)",
459
- value="weights/best.pt",
460
- )
461
- weights_seg = gr.Textbox(
462
- label="Pesi Modello B (Segmentation, .pt)",
463
- value="weights/seg.pt",
464
- )
465
-
466
- with gr.Row():
467
- target = gr.Textbox(label="Classe target", value="berry")
468
- use_target = gr.Checkbox(label="Usa classe target", value=True)
469
 
470
- with gr.Tab("Modello A — SAHI Detection"):
471
  with gr.Row():
472
  conf_det = gr.Slider(0.0, 1.0, value=0.35, step=0.01, label="Confidence (A)")
473
  device_a = gr.Dropdown(["auto", "cuda:0", "cpu"], value="auto", label="Device")
@@ -477,29 +460,28 @@ def build_app():
477
  with gr.Row():
478
  overlap_h = gr.Slider(0.0, 0.9, value=0.10, step=0.01, label="Overlap H (A)")
479
  overlap_w = gr.Slider(0.0, 0.9, value=0.10, step=0.01, label="Overlap W (A)")
480
- auto_opt_slice = gr.Checkbox(label="Ottimizza slicing su immagini piccole", value=True)
481
- btn_det = gr.Button("Esegui Modello A (SAHI)")
482
 
483
- with gr.Tab("Modello B — YOLO Segmentation"):
484
  with gr.Row():
485
  conf_seg = gr.Slider(0.0, 1.0, value=0.35, step=0.01, label="Confidence (B)")
486
- seg_alpha = gr.Slider(0.0, 1.0, value=SEG_DEFAULT_ALPHA, step=0.05, label="Alpha maschere (B)")
487
  device_b = gr.Dropdown(["auto", "cuda:0", "cpu"], value="auto", label="Device")
488
- btn_seg = gr.Button("Esegui Modello B (Seg)")
489
 
490
  with gr.Row():
491
- btn_clear = gr.Button("Pulisci overlay", variant="secondary")
492
 
493
- with gr.Accordion("Manutenzione spazio", open=False):
494
- btn_check = gr.Button("Controlla storage")
495
- btn_clean = gr.Button("Pulisci cache")
496
- maint_out = gr.Textbox(label="Log manutenzione", interactive=False)
497
 
498
  with gr.Column(scale=2):
499
- img_out = gr.Image(label="Risultato combinato", type="numpy")
500
  with gr.Row():
501
- counts_out_det = gr.Textbox(label="Conteggi (A)", interactive=False)
502
- counts_out_seg = gr.Textbox(label="Conteggi (B)", interactive=False)
503
 
504
  # Wiring
505
  img_in.change(
@@ -512,8 +494,8 @@ def build_app():
512
  run_det,
513
  inputs=[
514
  img_in, state,
515
- weights_det, conf_det, slice_h, slice_w, overlap_h, overlap_w, device_a,
516
- target, use_target, auto_opt_slice
517
  ],
518
  outputs=[img_out, state, counts_out_det, counts_out_seg],
519
  )
@@ -522,8 +504,9 @@ def build_app():
522
  run_seg,
523
  inputs=[
524
  img_in, state,
525
- weights_seg, conf_seg, device_b,
526
- target, use_target, seg_alpha
 
527
  ],
528
  outputs=[img_out, state, counts_out_det, counts_out_seg],
529
  )
 
1
+ #region Imports
2
  import os
3
 
4
  # Route caches to /tmp to avoid filling the Space persistent storage
 
23
  _ULTRA_OK = True
24
  except Exception:
25
  _ULTRA_OK = False
26
+ #endregion
27
 
28
+ #region Config and setup
29
+ MAX_SIDE_PX = -1 # 70 # possible filtering for max bbox side for Model A
30
  SEG_DEFAULT_ALPHA = 0.45
31
+ # Weights
32
+ WEIGHTS_DETECTION = "weights/berries.pt"
33
+ WEIGHTS_SEGMENTATION = "weights/bunches.pt"
34
  # Simple global caches to avoid reloading models each click
35
  _DET_MODEL_CACHE = {} # key: (weights_path, device) -> AutoDetectionModel
36
  _SEG_MODEL_CACHE = {} # key: weights_path -> YOLO
37
+ #endregion
38
 
39
+ #region Model and device handling
 
 
 
 
 
 
 
 
40
  def _choose_device(user_choice: str) -> str:
41
  if user_choice != "auto":
42
  return user_choice
 
51
  Returns a cached SAHI AutoDetectionModel. Updates confidence on the fly.
52
  """
53
  if not os.path.exists(weights_path):
54
+ raise gr.Error(f"Detection weights not found: {weights_path}")
55
  key = (weights_path, device)
56
  model = _DET_MODEL_CACHE.get(key)
57
  if model is None:
 
58
  try:
59
  model = AutoDetectionModel.from_pretrained(
60
+ model_type="yolov11",
61
  model_path=weights_path,
62
  confidence_threshold=conf,
63
  device=device,
 
65
  except Exception:
66
  # CPU fallback
67
  model = AutoDetectionModel.from_pretrained(
68
+ model_type="yolov11",
69
  model_path=weights_path,
70
  confidence_threshold=conf,
71
  device="cpu",
 
81
 
82
  def _get_seg_model(weights_path: str):
83
  if not _ULTRA_OK:
84
+ raise gr.Error("Ultralytics not found, please install it with: pip install ultralytics")
85
  if not os.path.exists(weights_path):
86
+ raise gr.Error(f"Segmentation weights not found: {weights_path}")
87
  model = _SEG_MODEL_CACHE.get(weights_path)
88
  if model is None:
89
  model = YOLO(weights_path)
90
  _SEG_MODEL_CACHE[weights_path] = model
91
  return model
92
+ #endregion
93
+
94
+ #region Inference
95
+ def _sahi_predict(image_rgb: np.ndarray, det_model, slice_h, slice_w, overlap_h, overlap_w):
96
+ return get_sliced_prediction(
97
+ image_rgb,
98
+ det_model,
99
+ slice_height=int(slice_h),
100
+ slice_width=int(slice_w),
101
+ overlap_height_ratio=float(overlap_h),
102
+ overlap_width_ratio=float(overlap_w),
103
+ postprocess_class_agnostic=False,
104
+ verbose=0,
105
+ )
106
+
107
+ def run_det(
108
+ image, state,
109
+ weights_det_path, conf_det, slice_h, slice_w, overlap_h, overlap_w, device,
110
+ target_class, use_target, auto_opt_slice
111
+ ):
112
+ """
113
+ Run model A for berries and updates only 'det' overlay.
114
+ Assemble final image with both layers (det + seg) in the proper order.
115
+ """
116
+ if state is None or state.get("base") is None:
117
+ raise gr.Error("Loading an image is required before inference.")
118
+ base = state["base"]
119
+ det_model = _get_det_model(weights_det_path, _choose_device(device), conf_det)
120
+ sahi_res = _sahi_predict(base, det_model, slice_h, slice_w, overlap_h, overlap_w)
121
+
122
+ overlay_rgb, alpha, counts = _draw_boxes_overlay(base, sahi_res, target_class, bool(use_target))
123
+
124
+ state["det"] = {"overlay": overlay_rgb, "alpha": alpha, "ts": time.time()}
125
+ state["det_counts"] = counts
126
+
127
+ layers = [state["det"], state.get("seg")]
128
+ composite = _composite_layers(base, layers)
129
+ return composite, state, state["det_counts"], state.get("seg_counts", "")
130
+
131
+ def run_seg(
132
+ image, state,
133
+ weights_seg_path, conf_seg, device,
134
+ target_class, use_target, seg_alpha
135
+ ):
136
+ """
137
+ Run model B for bunches and updates only 'seg' overlay.
138
+ Assemble final image with both layers (det + seg) in the proper order.
139
+ """
140
+ if state is None or state.get("base") is None:
141
+ raise gr.Error("Loading an image is required before inference.")
142
+ base = state["base"]
143
+ seg_model = _get_seg_model(weights_seg_path)
144
+ # device is handled in predict
145
+ try:
146
+ seg_results = seg_model.predict(source=base, conf=float(conf_seg), device=_choose_device(device), verbose=False)
147
+ r0 = seg_results[0] if isinstance(seg_results, (list, tuple)) else seg_results
148
+ except Exception as e:
149
+ raise gr.Error(f"Error in segmentation inference: {e}")
150
 
151
+ overlay_rgb, alpha, counts = _draw_seg_overlay(base, r0, target_class, bool(use_target), float(seg_alpha))
152
+ state["seg"] = {"overlay": overlay_rgb, "alpha": alpha, "ts": time.time()}
153
+ state["seg_counts"] = counts
154
+
155
+ layers = [state.get("det"), state["seg"]]
156
+ composite = _composite_layers(base, layers)
157
+ return composite, state, state.get("det_counts", ""), state["seg_counts"]
158
+ #endregion
159
+
160
+ #region Draw
161
+ def _ensure_rgb(img: np.ndarray) -> np.ndarray:
162
+ if img is None:
163
+ return None
164
+ if img.ndim == 2:
165
+ return cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
166
+ if img.shape[2] == 4:
167
+ return cv2.cvtColor(img, cv2.COLOR_RGBA2RGB)
168
+ return img
169
 
170
  def _draw_boxes_overlay(image_rgb: np.ndarray, sahi_result, target_class: str, use_target: bool):
171
  """
172
  Returns overlay_rgb (H,W,3), alpha_mask (H,W) uint8, counts_text
173
+ Only draws rectangles (no labels). Filters boxes with max side > MAX_SIDE_PX if MAX_SIDE_PX > 0 (hard coded).
174
  """
175
  H, W = image_rgb.shape[:2]
176
  overlay = np.zeros((H, W, 3), dtype=np.uint8)
 
198
  h = max(0, y2 - y1)
199
  if w == 0 or h == 0:
200
  continue
201
+ if MAX_SIDE_PX > 0:
202
+ if max(w, h) > MAX_SIDE_PX:
203
+ continue
204
 
205
  area = getattr(item.bbox, "area", w * h)
206
  try:
 
227
  # Convert overlay BGR -> RGB
228
  overlay_rgb = cv2.cvtColor(overlay, cv2.COLOR_BGR2RGB)
229
  if use_target:
230
+ counts = f"target='{target_class}': {target_count} | total: {total_count}"
231
  else:
232
+ counts = f"total: {total_count}"
233
  return overlay_rgb, alpha, counts
234
 
235
  def _draw_seg_overlay(image_rgb: np.ndarray, yolo_result, target_class: str, use_target: bool, fill_alpha: float = SEG_DEFAULT_ALPHA):
 
248
  masks = getattr(r, "masks", None)
249
 
250
  if boxes is None or len(boxes) == 0:
251
+ counts = f"target='{target_class}': 0 | total: 0" if use_target else "total: 0"
252
  return cv2.cvtColor(overlay_bgr, cv2.COLOR_BGR2RGB), alpha, counts
253
 
254
  N = len(boxes)
 
319
 
320
  overlay_rgb = cv2.cvtColor(overlay_bgr, cv2.COLOR_BGR2RGB)
321
  if use_target:
322
+ counts = f"target='{target_class}': {target_count} | total: {total_count}"
323
  else:
324
+ counts = f"total: {total_count}"
325
  return overlay_rgb, alpha, counts
326
 
327
  def _composite_layers(base_rgb: np.ndarray, layers: list):
 
349
 
350
  return np.clip(result, 0, 255).astype(np.uint8)
351
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
352
  def on_image_upload(image, state):
353
  """
354
+ Reset overlays if uploading a new image.
355
  """
356
  if image is None:
357
  return None, {"base": None, "det": None, "seg": None, "det_counts": "", "seg_counts": ""}, "", ""
 
359
  new_state = {"base": img_rgb, "det": None, "seg": None, "det_counts": "", "seg_counts": ""}
360
  return img_rgb, new_state, "", ""
361
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
362
  def clear_overlays(image, state):
363
  if state is None or state.get("base") is None:
364
  return None, {"base": None, "det": None, "seg": None, "det_counts": "", "seg_counts": ""}, "", ""
 
368
  state["det_counts"] = ""
369
  state["seg_counts"] = ""
370
  return base, state, "", ""
371
+ #endregion
372
 
373
+ #region Maintenance
 
374
  def _dir_size(path: str) -> int:
375
  try:
376
  total = 0
 
433
  except Exception:
434
  pass
435
  return "Removed:\n" + ("\n".join(removed) if removed else "(none)")
436
+ #endregion
437
 
438
  def build_app():
439
+ with gr.Blocks(title="Berries detection & bunches segmentation") as demo:
440
  gr.Markdown(
441
+ "## Double inference on the same image with combined overlays\n"
442
+ "- Model A: berries detection\n"
443
+ "- Model B: bunches segmentation\n"
444
+ "- Individual executions\n"
 
445
  )
446
 
447
  state = gr.State({"base": None, "det": None, "seg": None, "det_counts": "", "seg_counts": ""})
448
 
449
  with gr.Row():
450
  with gr.Column(scale=1):
451
+ img_in = gr.Image(label="Image", type="numpy")
 
 
 
 
 
 
 
 
 
 
 
 
 
452
 
453
+ with gr.Tab("Model A — Berries Detection"):
454
  with gr.Row():
455
  conf_det = gr.Slider(0.0, 1.0, value=0.35, step=0.01, label="Confidence (A)")
456
  device_a = gr.Dropdown(["auto", "cuda:0", "cpu"], value="auto", label="Device")
 
460
  with gr.Row():
461
  overlap_h = gr.Slider(0.0, 0.9, value=0.10, step=0.01, label="Overlap H (A)")
462
  overlap_w = gr.Slider(0.0, 0.9, value=0.10, step=0.01, label="Overlap W (A)")
463
+ btn_det = gr.Button("Run berries detection")
 
464
 
465
+ with gr.Tab("Model B — Bunches Segmentation"):
466
  with gr.Row():
467
  conf_seg = gr.Slider(0.0, 1.0, value=0.35, step=0.01, label="Confidence (B)")
468
+ seg_alpha = gr.Slider(0.0, 1.0, value=SEG_DEFAULT_ALPHA, step=0.05, label="Alpha masks (B)")
469
  device_b = gr.Dropdown(["auto", "cuda:0", "cpu"], value="auto", label="Device")
470
+ btn_seg = gr.Button("Run bunches segmentation")
471
 
472
  with gr.Row():
473
+ btn_clear = gr.Button("Clean overlay", variant="secondary")
474
 
475
+ with gr.Accordion("Disk Maintenance", open=False):
476
+ btn_check = gr.Button("Check storage")
477
+ btn_clean = gr.Button("Clean cache")
478
+ maint_out = gr.Textbox(label="Log Maintenance", interactive=False)
479
 
480
  with gr.Column(scale=2):
481
+ img_out = gr.Image(label="Combined Result", type="numpy")
482
  with gr.Row():
483
+ counts_out_det = gr.Textbox(label="Counts - Berries", interactive=False)
484
+ counts_out_seg = gr.Textbox(label="Counts - Bunches", interactive=False)
485
 
486
  # Wiring
487
  img_in.change(
 
494
  run_det,
495
  inputs=[
496
  img_in, state,
497
+ WEIGHTS_DETECTION, conf_det, slice_h, slice_w, overlap_h, overlap_w, device_a#,
498
+ #target, use_target, auto_opt_slice
499
  ],
500
  outputs=[img_out, state, counts_out_det, counts_out_seg],
501
  )
 
504
  run_seg,
505
  inputs=[
506
  img_in, state,
507
+ WEIGHTS_SEGMENTATION, conf_seg, device_b,
508
+ seg_alpha
509
+ #target, use_target, seg_alpha
510
  ],
511
  outputs=[img_out, state, counts_out_det, counts_out_seg],
512
  )