skarugu commited on
Commit
ae9565c
Β·
1 Parent(s): e75a647

Update Streamlit app v9 and self_train v5

Browse files
Files changed (2) hide show
  1. self_train.py +54 -0
  2. src/streamlit_app.py +184 -38
self_train.py CHANGED
@@ -13,6 +13,16 @@ Trigger conditions (any one fires a retrain):
13
  2. N unlabelled images accumulated in queue β†’ retrain_queue/
14
  3. K consecutive low-confidence images β†’ retrain_queue/ (reason=low_confidence)
15
  4. Nightly scheduled run β†’ APScheduler cron 02:00 UTC
 
 
 
 
 
 
 
 
 
 
16
 
17
  After each retrain:
18
  β€’ Fine-tunes from current HF Hub weights
@@ -20,6 +30,7 @@ After each retrain:
20
  β€’ Only pushes to Hub if new Dice > previous best
21
  β€’ Archives queue β†’ runs/<run_id>/processed_queue/
22
  β€’ Appends entry to manifest.json
 
23
 
24
  Usage:
25
  python self_train.py # check triggers once
@@ -118,6 +129,45 @@ def _load_manifest() -> list:
118
  def _save_manifest(m: list): MANIFEST_PATH.write_text(json.dumps(m, indent=2, default=str))
119
 
120
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  # ─────────────────────────────────────────────────────────────────────────────
122
  # Trigger checks
123
  # ─────────────────────────────────────────────────────────────────────────────
@@ -431,6 +481,10 @@ def run_retrain(reason: str = "scheduled"):
431
  # Archive queue
432
  archive = run_dir / "processed_queue"
433
  archive.mkdir(parents=True, exist_ok=True)
 
 
 
 
434
  for p in list(QUEUE_DIR.glob("*")) if QUEUE_DIR.exists() else []:
435
  shutil.move(str(p), str(archive / p.name))
436
  for folder in list(CORRECTIONS_DIR.glob("*")) if CORRECTIONS_DIR.exists() else []:
 
13
  2. N unlabelled images accumulated in queue β†’ retrain_queue/
14
  3. K consecutive low-confidence images β†’ retrain_queue/ (reason=low_confidence)
15
  4. Nightly scheduled run β†’ APScheduler cron 02:00 UTC
16
+ 5. User-optimized parameters submitted β†’ corrections/ (reason=user_optimized_params)
17
+ These submissions include the image, postprocessed masks from the user's
18
+ tuned parameter set, and a full snapshot of the sidebar settings. The
19
+ self-training pipeline uses these as additional supervised training pairs
20
+ and can aggregate parameter statistics to learn optimal defaults.
21
+
22
+ Privacy note:
23
+ Images processed in Private Mode (toggle in the Streamlit sidebar) are
24
+ NEVER queued for retraining. Only images explicitly submitted by the
25
+ user via "Submit for training" or "Submit corrections" are used.
26
 
27
  After each retrain:
28
  β€’ Fine-tunes from current HF Hub weights
 
30
  β€’ Only pushes to Hub if new Dice > previous best
31
  β€’ Archives queue β†’ runs/<run_id>/processed_queue/
32
  β€’ Appends entry to manifest.json
33
+ β€’ Aggregates user-submitted parameter snapshots β†’ optimal_params.json
34
 
35
  Usage:
36
  python self_train.py # check triggers once
 
129
  def _save_manifest(m: list): MANIFEST_PATH.write_text(json.dumps(m, indent=2, default=str))
130
 
131
 
132
+ def _aggregate_user_params(corrections_dir: Path, run_dir: Path):
133
+ """
134
+ Scan corrections for user_optimized_params submissions and aggregate
135
+ their parameter settings. Writes optimal_params.json to run_dir with
136
+ median values β€” useful for tuning defaults.
137
+ """
138
+ all_params = []
139
+ if not corrections_dir.exists():
140
+ return
141
+ for meta_p in corrections_dir.glob("*/meta.json"):
142
+ try:
143
+ meta = json.loads(meta_p.read_text())
144
+ if meta.get("reason") == "user_optimized_params" and "parameters" in meta:
145
+ all_params.append(meta["parameters"])
146
+ except Exception:
147
+ continue
148
+
149
+ if not all_params:
150
+ return
151
+
152
+ # Compute median for each numeric parameter
153
+ aggregated = {}
154
+ for key in all_params[0]:
155
+ vals = [p[key] for p in all_params if key in p and isinstance(p[key], (int, float))]
156
+ if vals:
157
+ vals.sort()
158
+ mid = len(vals) // 2
159
+ aggregated[key] = vals[mid] if len(vals) % 2 else (vals[mid-1] + vals[mid]) / 2
160
+
161
+ result = {
162
+ "n_submissions": len(all_params),
163
+ "aggregated_params": aggregated,
164
+ "all_submissions": all_params,
165
+ }
166
+ out = run_dir / "optimal_params.json"
167
+ out.write_text(json.dumps(result, indent=2))
168
+ log.info("Aggregated %d user param submissions β†’ %s", len(all_params), out)
169
+
170
+
171
  # ─────────────────────────────────────────────────────────────────────────────
172
  # Trigger checks
173
  # ─────────────────────────────────────────────────────────────────────────────
 
481
  # Archive queue
482
  archive = run_dir / "processed_queue"
483
  archive.mkdir(parents=True, exist_ok=True)
484
+
485
+ # Before archiving, collect user-submitted parameter snapshots
486
+ _aggregate_user_params(CORRECTIONS_DIR, run_dir)
487
+
488
  for p in list(QUEUE_DIR.glob("*")) if QUEUE_DIR.exists() else []:
489
  shutil.move(str(p), str(archive / p.name))
490
  for folder in list(CORRECTIONS_DIR.glob("*")) if CORRECTIONS_DIR.exists() else []:
src/streamlit_app.py CHANGED
@@ -7,7 +7,7 @@ Drop-in replacement for streamlit_app.py on Hugging Face Spaces.
7
  Features:
8
  ✦ Animated count-up metrics (9 counters)
9
  ✦ Instance overlay β€” nucleus IDs (1,2,3…) + myotube IDs (M1,M2…)
10
- ✦ Contour outline overlay β€” see exactly what each detection covers
11
  ✦ Watershed nuclei splitting for accurate counts
12
  ✦ Myotube surface area (total, mean, max Β΅mΒ²) + per-tube bar chart
13
  ✦ Active learning β€” upload corrected masks β†’ saved to corrections/
@@ -15,14 +15,24 @@ Features:
15
  ✦ Retraining queue status panel
16
  ✦ All original sidebar controls preserved
17
 
18
- v8 changes (validated against 57-well manual count dataset):
19
- ✦ REMOVED myotube closing β€” was merging adjacent myotubes into single blobs
20
- (caused 86% of images to undercount; r=0.245 β†’ see validation report).
21
- ✦ Unified postprocessing: opening + erode/dilate (matches training script).
22
- ✦ Added aspect-ratio shape filter to reject round debris false positives.
23
- ✦ Added contour outline tab per collaborator request.
24
- ✦ Fixed active learning correction upload bug (art["original"] β†’ art["rgb_u8"]).
25
- ✦ Unified threshold defaults across all scripts (thr_myo=0.40, thr_nuc=0.45).
 
 
 
 
 
 
 
 
 
 
26
  """
27
 
28
  import io
@@ -480,6 +490,8 @@ def compute_bio_metrics(nuc_mask, myo_mask,
480
  "mean_area_um2" : sa["mean_area_um2"],
481
  "max_area_um2" : sa["max_area_um2"],
482
  "_per_myotube_areas" : sa["per_myotube_areas"],
 
 
483
  }
484
 
485
 
@@ -596,13 +608,20 @@ def make_outline_overlay(rgb_u8: np.ndarray,
596
 
597
  def collect_label_positions(nuc_lab: np.ndarray,
598
  myo_lab: np.ndarray,
599
- img_w: int, img_h: int) -> dict:
 
600
  """
601
  Collect centroid positions for every nucleus and myotube,
602
  scaled to the original image pixel dimensions.
 
 
 
 
 
603
  Returns:
604
- { "nuclei": [ {"id": 1, "x": 123.4, "y": 56.7}, ... ],
605
- "myotubes": [ {"id": "M1","x": 200.1, "y": 300.5}, ... ] }
 
606
  """
607
  sx = img_w / nuc_lab.shape[1]
608
  sy = img_h / nuc_lab.shape[0]
@@ -615,11 +634,29 @@ def collect_label_positions(nuc_lab: np.ndarray,
615
  sx2 = img_w / myo_lab.shape[1]
616
  sy2 = img_h / myo_lab.shape[0]
617
  myotubes = []
618
- for prop in measure.regionprops(myo_lab):
619
- r, c = prop.centroid
620
- myotubes.append({"id": f"M{prop.label}", "x": round(c * sx2, 1), "y": round(r * sy2, 1)})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
621
 
622
- return {"nuclei": nuclei, "myotubes": myotubes}
623
 
624
 
625
  def make_svg_viewer(img_b64: str,
@@ -1238,6 +1275,23 @@ with st.sidebar:
1238
  st.header("Active learning")
1239
  enable_al = st.toggle("Enable correction upload", value=True)
1240
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1241
  st.header("Metric definitions")
1242
  with st.expander("Fusion Index"):
1243
  st.write("100 Γ— (nuclei in myotubes with β‰₯2 nuclei) / total nuclei")
@@ -1250,8 +1304,13 @@ with st.sidebar:
1250
  # ─────────────────────────────────────────────────────────────────────────────
1251
  # FILE UPLOADER
1252
  # ─────────────────────────────────────────────────────────────────────────────
 
 
 
 
 
1253
  uploads = st.file_uploader(
1254
- "Upload 1+ images (png / jpg / tif). Public Space β€” don't upload sensitive data.",
1255
  type=["png", "jpg", "jpeg", "tif", "tiff"],
1256
  accept_multiple_files=True,
1257
  )
@@ -1308,12 +1367,13 @@ if run:
1308
  with torch.no_grad():
1309
  probs = torch.sigmoid(model(x_t)).cpu().numpy()[0]
1310
 
1311
- # Confidence check
1312
  conf = float(np.mean([probs[0].max(), probs[1].max()]))
1313
  if conf < CONF_FLAG_THR:
1314
  low_conf_flags.append((name, conf))
1315
- add_to_queue(rgb_u8, reason="low_confidence",
1316
- metadata={"confidence": conf, "filename": up.name})
 
1317
 
1318
  nuc_raw = (probs[0] > float(thr_nuc)).astype(np.uint8)
1319
  myo_raw = (probs[1] > float(thr_myo)).astype(np.uint8)
@@ -1355,7 +1415,8 @@ if run:
1355
 
1356
  # Label positions in image-pixel coordinates (used by SVG viewer)
1357
  orig_h_img, orig_w_img = rgb_u8.shape[:2]
1358
- label_positions = collect_label_positions(nuc_lab, myo_lab, orig_w_img, orig_h_img)
 
1359
 
1360
  bio = compute_bio_metrics(
1361
  nuc_pp, myo_pp,
@@ -1368,6 +1429,8 @@ if run:
1368
  )
1369
  bio["fi_method"] = "cytoplasm-hole" if use_hole_method else "pixel-overlap"
1370
  per_areas = bio.pop("_per_myotube_areas", [])
 
 
1371
  bio["image"] = name
1372
  results.append(bio)
1373
  all_bio_metrics[name] = {**bio, "_per_myotube_areas": per_areas}
@@ -1503,7 +1566,8 @@ col_img, col_metrics = st.columns([3, 2], gap="large")
1503
  with col_img:
1504
  tabs = st.tabs([
1505
  "πŸ”΅ Combined",
1506
- "πŸ“ Outlines",
 
1507
  "🟣 Nuclei only",
1508
  "🟠 Myotubes only",
1509
  "πŸ“· Original",
@@ -1547,41 +1611,58 @@ with col_img:
1547
  st.components.v1.html(html_combined, height=680, scrolling=False)
1548
 
1549
  with tabs[1]:
1550
- # Outline overlay β€” shows contour boundaries around each detection
1551
- outline_img = make_outline_overlay(
1552
- _rgb, _nl, _ml,
1553
  nuc_color=nuc_rgb, myo_color=(0, 255, 0),
1554
  line_width=2,
1555
  )
1556
- outline_b64 = _b64png_disp(outline_img)
1557
- outline_lpos = lpos # show both labels on outline view
1558
- html_outline = make_svg_viewer(
1559
- outline_b64, iw, ih, outline_lpos,
1560
- show_nuclei=label_nuc, show_myotubes=label_myo,
1561
  )
1562
- st.components.v1.html(html_outline, height=680, scrolling=False)
1563
 
1564
  with tabs[2]:
1565
- nuc_only_lpos = {"nuclei": lpos["nuclei"], "myotubes": []}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1566
  html_nuc = make_svg_viewer(
1567
  nuc_only_b64, iw, ih, nuc_only_lpos,
1568
  show_nuclei=True, show_myotubes=False,
1569
  )
1570
  st.components.v1.html(html_nuc, height=680, scrolling=False)
1571
 
1572
- with tabs[3]:
1573
- myo_only_lpos = {"nuclei": [], "myotubes": lpos["myotubes"]}
 
1574
  html_myo = make_svg_viewer(
1575
  myo_only_b64, iw, ih, myo_only_lpos,
1576
  show_nuclei=False, show_myotubes=True,
1577
  )
1578
  st.components.v1.html(html_myo, height=680, scrolling=False)
1579
 
1580
- with tabs[4]:
1581
- st.image(art["rgb_u8"], use_container_width=True)
1582
  with tabs[5]:
1583
- st.image(art["nuc_pp"], use_container_width=True)
1584
  with tabs[6]:
 
 
1585
  st.image(art["myo_pp"], use_container_width=True)
1586
 
1587
  with col_metrics:
@@ -1633,10 +1714,70 @@ with col_metrics:
1633
 
1634
  st.divider()
1635
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1636
  # ─────────────────────────────────────────────────────────────────────────────
1637
  # ACTIVE LEARNING β€” CORRECTION UPLOAD
1638
  # ─────────────────────────────────────────────────────────────────────────────
1639
- if enable_al:
1640
  st.subheader("🧠 Submit corrected labels (Active Learning)")
1641
  st.caption(
1642
  "Upload corrected binary masks for any image. "
@@ -1668,6 +1809,11 @@ if enable_al:
1668
  f"βœ… Corrections for **{al_pick}** saved to `corrections/`. "
1669
  "The model will retrain at the next scheduled cycle."
1670
  )
 
 
 
 
 
1671
 
1672
  st.divider()
1673
 
 
7
  Features:
8
  ✦ Animated count-up metrics (9 counters)
9
  ✦ Instance overlay β€” nucleus IDs (1,2,3…) + myotube IDs (M1,M2…)
10
+ ✦ Separate nuclei outline + myotube outline tabs
11
  ✦ Watershed nuclei splitting for accurate counts
12
  ✦ Myotube surface area (total, mean, max Β΅mΒ²) + per-tube bar chart
13
  ✦ Active learning β€” upload corrected masks β†’ saved to corrections/
 
15
  ✦ Retraining queue status panel
16
  ✦ All original sidebar controls preserved
17
 
18
+ v9 changes:
19
+ ✦ FIXED: SVG viewer myotube ID count now matches live metrics count.
20
+ Root cause: viewer showed all connected components (myo_lab), but
21
+ metrics only counted those with β‰₯1 MyHC+ nucleus. Now the viewer
22
+ badge shows the biological myotube_count from compute_bio_metrics,
23
+ and non-bio myotube regions are shown as faint outlines (not labelled).
24
+ ✦ Outlines split into two separate tabs: "Nuclei outlines" and
25
+ "Myotube outlines" per collaborator request.
26
+ ✦ Privacy mode: sidebar toggle for "Private mode β€” do not use my data
27
+ for training". When enabled, images are NOT queued for retraining
28
+ (no low_confidence queue, no corrections submission).
29
+ ✦ Training contribution mode: explicit user-initiated action to submit
30
+ current image + tuned parameters as a training contribution. Only
31
+ runs when user clicks "Submit for training" after finding good params.
32
+ ✦ Parameter learning: when user submits, the current sidebar parameter
33
+ set (thresholds, postprocessing knobs) is saved alongside the image
34
+ so self_train.py can learn optimal parameters per image type.
35
+ ✦ All v8 fixes preserved (no closing, shape filter, erode+dilate, etc).
36
  """
37
 
38
  import io
 
490
  "mean_area_um2" : sa["mean_area_um2"],
491
  "max_area_um2" : sa["max_area_um2"],
492
  "_per_myotube_areas" : sa["per_myotube_areas"],
493
+ "_bio_myo_ids" : set(nm.keys()), # myotube label IDs with β‰₯1 MyHC+ nucleus
494
+ "_total_cc_count" : int(myo_lab.max()), # total connected components (for reference)
495
  }
496
 
497
 
 
608
 
609
  def collect_label_positions(nuc_lab: np.ndarray,
610
  myo_lab: np.ndarray,
611
+ img_w: int, img_h: int,
612
+ bio_myo_ids: set = None) -> dict:
613
  """
614
  Collect centroid positions for every nucleus and myotube,
615
  scaled to the original image pixel dimensions.
616
+
617
+ bio_myo_ids: set of myotube label IDs that have β‰₯1 MyHC+ nucleus.
618
+ If provided, only these are labelled as "M1", "M2", …
619
+ (renumbered sequentially). Non-bio regions get no label
620
+ and are stored separately for faint outline rendering.
621
  Returns:
622
+ { "nuclei": [ {"id": "1", "x": 123.4, "y": 56.7}, ... ],
623
+ "myotubes": [ {"id": "M1", "x": 200.1, "y": 300.5, "orig_label": 5}, ... ],
624
+ "myotubes_nonbio": [ {"id": "", "x": ..., "y": ..., "orig_label": 3}, ... ] }
625
  """
626
  sx = img_w / nuc_lab.shape[1]
627
  sy = img_h / nuc_lab.shape[0]
 
634
  sx2 = img_w / myo_lab.shape[1]
635
  sy2 = img_h / myo_lab.shape[0]
636
  myotubes = []
637
+ myotubes_nonbio = []
638
+
639
+ if bio_myo_ids is not None and len(bio_myo_ids) > 0:
640
+ # Renumber biological myotubes sequentially: M1, M2, M3…
641
+ sorted_bio = sorted(bio_myo_ids)
642
+ bio_remap = {orig: idx + 1 for idx, orig in enumerate(sorted_bio)}
643
+ for prop in measure.regionprops(myo_lab):
644
+ r, c = prop.centroid
645
+ pos = {"x": round(c * sx2, 1), "y": round(r * sy2, 1), "orig_label": prop.label}
646
+ if prop.label in bio_remap:
647
+ pos["id"] = f"M{bio_remap[prop.label]}"
648
+ myotubes.append(pos)
649
+ else:
650
+ pos["id"] = ""
651
+ myotubes_nonbio.append(pos)
652
+ else:
653
+ # Fallback: label all connected components (backward compat)
654
+ for prop in measure.regionprops(myo_lab):
655
+ r, c = prop.centroid
656
+ myotubes.append({"id": f"M{prop.label}", "x": round(c * sx2, 1),
657
+ "y": round(r * sy2, 1), "orig_label": prop.label})
658
 
659
+ return {"nuclei": nuclei, "myotubes": myotubes, "myotubes_nonbio": myotubes_nonbio}
660
 
661
 
662
  def make_svg_viewer(img_b64: str,
 
1275
  st.header("Active learning")
1276
  enable_al = st.toggle("Enable correction upload", value=True)
1277
 
1278
+ st.header("Privacy & Training")
1279
+ private_mode = st.toggle(
1280
+ "πŸ”’ Private mode",
1281
+ value=False,
1282
+ help=(
1283
+ "When enabled, your images are processed locally only. "
1284
+ "They are NOT added to the retraining queue, NOT saved to "
1285
+ "corrections/, and NOT used for model improvement in any way. "
1286
+ "Use this for unpublished data or sensitive research images."
1287
+ )
1288
+ )
1289
+ if private_mode:
1290
+ st.info(
1291
+ "πŸ”’ **Private mode ON** β€” your images will not be used for "
1292
+ "training or stored beyond this session."
1293
+ )
1294
+
1295
  st.header("Metric definitions")
1296
  with st.expander("Fusion Index"):
1297
  st.write("100 Γ— (nuclei in myotubes with β‰₯2 nuclei) / total nuclei")
 
1304
  # ─────────────────────────────────────────────────────────────────────────────
1305
  # FILE UPLOADER
1306
  # ─────────────────────────────────────────────────────────────────────────────
1307
+ _uploader_label = (
1308
+ "Upload 1+ images (png / jpg / tif). πŸ”’ Private mode is ON β€” images will not be stored."
1309
+ if private_mode else
1310
+ "Upload 1+ images (png / jpg / tif). Images may be used for model improvement."
1311
+ )
1312
  uploads = st.file_uploader(
1313
+ _uploader_label,
1314
  type=["png", "jpg", "jpeg", "tif", "tiff"],
1315
  accept_multiple_files=True,
1316
  )
 
1367
  with torch.no_grad():
1368
  probs = torch.sigmoid(model(x_t)).cpu().numpy()[0]
1369
 
1370
+ # Confidence check β€” only queue for training if NOT in private mode
1371
  conf = float(np.mean([probs[0].max(), probs[1].max()]))
1372
  if conf < CONF_FLAG_THR:
1373
  low_conf_flags.append((name, conf))
1374
+ if not private_mode:
1375
+ add_to_queue(rgb_u8, reason="low_confidence",
1376
+ metadata={"confidence": conf, "filename": up.name})
1377
 
1378
  nuc_raw = (probs[0] > float(thr_nuc)).astype(np.uint8)
1379
  myo_raw = (probs[1] > float(thr_myo)).astype(np.uint8)
 
1415
 
1416
  # Label positions in image-pixel coordinates (used by SVG viewer)
1417
  orig_h_img, orig_w_img = rgb_u8.shape[:2]
1418
+ label_positions = collect_label_positions(nuc_lab, myo_lab, orig_w_img, orig_h_img,
1419
+ bio_myo_ids=bio_myo_ids)
1420
 
1421
  bio = compute_bio_metrics(
1422
  nuc_pp, myo_pp,
 
1429
  )
1430
  bio["fi_method"] = "cytoplasm-hole" if use_hole_method else "pixel-overlap"
1431
  per_areas = bio.pop("_per_myotube_areas", [])
1432
+ bio_myo_ids = bio.pop("_bio_myo_ids", set())
1433
+ total_cc_count = bio.pop("_total_cc_count", 0)
1434
  bio["image"] = name
1435
  results.append(bio)
1436
  all_bio_metrics[name] = {**bio, "_per_myotube_areas": per_areas}
 
1566
  with col_img:
1567
  tabs = st.tabs([
1568
  "πŸ”΅ Combined",
1569
+ "πŸ“ Nuclei outlines",
1570
+ "πŸ“ Myotube outlines",
1571
  "🟣 Nuclei only",
1572
  "🟠 Myotubes only",
1573
  "πŸ“· Original",
 
1611
  st.components.v1.html(html_combined, height=680, scrolling=False)
1612
 
1613
  with tabs[1]:
1614
+ # Nuclei-only outlines
1615
+ nuc_outline_img = make_outline_overlay(
1616
+ _rgb, _nl, np.zeros_like(_ml),
1617
  nuc_color=nuc_rgb, myo_color=(0, 255, 0),
1618
  line_width=2,
1619
  )
1620
+ nuc_outline_b64 = _b64png_disp(nuc_outline_img)
1621
+ nuc_outline_lpos = {"nuclei": lpos["nuclei"], "myotubes": [], "myotubes_nonbio": []}
1622
+ html_nuc_outline = make_svg_viewer(
1623
+ nuc_outline_b64, iw, ih, nuc_outline_lpos,
1624
+ show_nuclei=True, show_myotubes=False,
1625
  )
1626
+ st.components.v1.html(html_nuc_outline, height=680, scrolling=False)
1627
 
1628
  with tabs[2]:
1629
+ # Myotube-only outlines
1630
+ myo_outline_img = make_outline_overlay(
1631
+ _rgb, np.zeros_like(_nl), _ml,
1632
+ nuc_color=nuc_rgb, myo_color=(0, 255, 0),
1633
+ line_width=2,
1634
+ )
1635
+ myo_outline_b64 = _b64png_disp(myo_outline_img)
1636
+ myo_outline_lpos = {"nuclei": [], "myotubes": lpos["myotubes"],
1637
+ "myotubes_nonbio": lpos.get("myotubes_nonbio", [])}
1638
+ html_myo_outline = make_svg_viewer(
1639
+ myo_outline_b64, iw, ih, myo_outline_lpos,
1640
+ show_nuclei=False, show_myotubes=True,
1641
+ )
1642
+ st.components.v1.html(html_myo_outline, height=680, scrolling=False)
1643
+
1644
+ with tabs[3]:
1645
+ nuc_only_lpos = {"nuclei": lpos["nuclei"], "myotubes": [], "myotubes_nonbio": []}
1646
  html_nuc = make_svg_viewer(
1647
  nuc_only_b64, iw, ih, nuc_only_lpos,
1648
  show_nuclei=True, show_myotubes=False,
1649
  )
1650
  st.components.v1.html(html_nuc, height=680, scrolling=False)
1651
 
1652
+ with tabs[4]:
1653
+ myo_only_lpos = {"nuclei": [], "myotubes": lpos["myotubes"],
1654
+ "myotubes_nonbio": lpos.get("myotubes_nonbio", [])}
1655
  html_myo = make_svg_viewer(
1656
  myo_only_b64, iw, ih, myo_only_lpos,
1657
  show_nuclei=False, show_myotubes=True,
1658
  )
1659
  st.components.v1.html(html_myo, height=680, scrolling=False)
1660
 
 
 
1661
  with tabs[5]:
1662
+ st.image(art["rgb_u8"], use_container_width=True)
1663
  with tabs[6]:
1664
+ st.image(art["nuc_pp"], use_container_width=True)
1665
+ with tabs[7]:
1666
  st.image(art["myo_pp"], use_container_width=True)
1667
 
1668
  with col_metrics:
 
1714
 
1715
  st.divider()
1716
 
1717
+ # ─────────────────────────────────────────────────────────────────────────────
1718
+ # TRAINING CONTRIBUTION β€” User-initiated parameter + image submission
1719
+ # ─────────────────────────────────────────────────────────────────────────────
1720
+ if not private_mode and names:
1721
+ st.subheader("πŸ“€ Submit image for training")
1722
+ st.caption(
1723
+ "Once you've tuned the sidebar parameters to get the best results for "
1724
+ "this image, click below to submit both the image and your optimized "
1725
+ "parameters as a training contribution. This helps MyoSight learn "
1726
+ "better settings for similar images."
1727
+ )
1728
+
1729
+ train_pick = st.selectbox("Image to submit", names, key="train_pick")
1730
+
1731
+ if st.button("πŸ“€ Submit for training", type="primary"):
1732
+ _ensure_dirs()
1733
+ ts = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
1734
+ folder = CORRECTIONS_DIR / f"params_{ts}"
1735
+ folder.mkdir(parents=True, exist_ok=True)
1736
+
1737
+ # Save the original image
1738
+ train_art = st.session_state.artifacts[train_pick]
1739
+ Image.fromarray(train_art["rgb_u8"]).save(folder / "image.png")
1740
+
1741
+ # Save the postprocessed masks (from current parameter settings)
1742
+ nuc_pp_arr = train_art.get("nuc_pp_arr")
1743
+ myo_pp_arr = train_art.get("myo_pp_arr")
1744
+ if nuc_pp_arr is not None:
1745
+ Image.fromarray((nuc_pp_arr > 0).astype(np.uint8) * 255).save(folder / "nuclei_mask.png")
1746
+ if myo_pp_arr is not None:
1747
+ Image.fromarray((myo_pp_arr > 0).astype(np.uint8) * 255).save(folder / "myotube_mask.png")
1748
+
1749
+ # Save the current parameter set β€” self_train can learn from these
1750
+ param_snapshot = {
1751
+ "reason": "user_optimized_params",
1752
+ "has_masks": nuc_pp_arr is not None and myo_pp_arr is not None,
1753
+ "timestamp": ts,
1754
+ "source_image": train_pick,
1755
+ "parameters": {
1756
+ "thr_nuc": float(thr_nuc),
1757
+ "thr_myo": float(thr_myo),
1758
+ "min_nuc_area": int(min_nuc_area),
1759
+ "min_myo_area": int(min_myo_area),
1760
+ "nuc_close_radius": int(nuc_close_radius),
1761
+ "myo_open_radius": int(myo_open_radius),
1762
+ "myo_erode_radius": int(myo_erode_radius),
1763
+ "min_myo_aspect_ratio": float(min_myo_aspect_ratio),
1764
+ "myo_max_area_px": int(myo_max_area_px),
1765
+ "myo_split_min_seeds": int(myo_split_min_seeds),
1766
+ "image_size": int(image_size),
1767
+ },
1768
+ "metrics": st.session_state.bio_metrics.get(train_pick, {}),
1769
+ }
1770
+ (folder / "meta.json").write_text(json.dumps(param_snapshot, indent=2, default=str))
1771
+
1772
+ st.success(
1773
+ f"βœ… **{train_pick}** submitted for training with your optimized parameters. "
1774
+ "The model will incorporate this at the next retraining cycle."
1775
+ )
1776
+
1777
  # ─────────────────────────────────────────────────────────────────────────────
1778
  # ACTIVE LEARNING β€” CORRECTION UPLOAD
1779
  # ─────────────────────────────────────────────────────────────────────────────
1780
+ if enable_al and not private_mode:
1781
  st.subheader("🧠 Submit corrected labels (Active Learning)")
1782
  st.caption(
1783
  "Upload corrected binary masks for any image. "
 
1809
  f"βœ… Corrections for **{al_pick}** saved to `corrections/`. "
1810
  "The model will retrain at the next scheduled cycle."
1811
  )
1812
+ elif enable_al and private_mode:
1813
+ st.info(
1814
+ "πŸ”’ Active learning and training submissions are disabled in Private mode. "
1815
+ "Toggle off Private mode in the sidebar to enable."
1816
+ )
1817
 
1818
  st.divider()
1819