| from __future__ import annotations |
| from dataclasses import dataclass |
| import json |
| import inspect |
| import math |
| import os |
| import random |
| import shutil |
| import time |
| import traceback |
| import gc |
| from contextlib import nullcontext |
| from pathlib import Path |
| from typing import Any |
|
|
| import matplotlib |
|
|
| matplotlib.use("Agg") |
| import matplotlib.pyplot as plt |
| import numpy as np |
| import optuna |
| import pandas as pd |
| import segmentation_models_pytorch as smp |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from optuna.storages import RDBStorage, RetryFailedTrialCallback |
| from PIL import Image as PILImage |
| from scipy import ndimage |
| from torch.optim import Adam, AdamW |
| from torch.optim.lr_scheduler import CosineAnnealingLR |
| from torch.utils.data import DataLoader, Dataset |
| from tqdm.auto import tqdm |
|
|
| """============================================================================= |
| EDIT ME |
| ============================================================================= |
| """ |
|
|
| PROJECT_DIR = Path(__file__).resolve().parent |
|
|
| RUNS_ROOT = PROJECT_DIR / "runs" |
| HARD_CODED_PARAM_DIR = PROJECT_DIR |
| MODEL_NAME = "EfficientNet_Strategy3_Refinement" |
|
|
| DATASET_NAME = "BUSI_with_classes" |
| SUPPORTED_DATASET_NAMES = ("BUSI", "BUSI_with_classes") |
| DATA_ROOT = PROJECT_DIR / DATASET_NAME |
| BUSI_WITH_CLASSES_SPLIT_POLICY = "stratified" |
| SUPPORTED_BUSI_WITH_CLASSES_SPLIT_POLICIES = ("balanced_train", "stratified") |
|
|
| STRATEGIES = [3] |
| DATASET_PERCENTS = [0.1, ] |
| SPLIT_TYPE = "80_10_10" |
| SUPPORTED_SPLIT_TYPES = ("80_10_10", "70_10_20") |
| DATASET_SPLITS_JSON = PROJECT_DIR / "dataset_splits.json" |
| DATASET_SPLITS_VERSION = 1 |
| NUM_TRIALS = 25 |
| STUDY_DIRECTION = "maximize" |
| STUDY_OBJECTIVE_METRIC = "val_iou" |
| BEST_CHECKPOINT_METRIC = "val_iou" |
|
|
| SEED = 42 |
| IMG_SIZE = 128 |
| BATCH_SIZE = 2 |
| NUM_WORKERS = 0 |
| USE_PIN_MEMORY = True |
| USE_PERSISTENT_WORKERS = False |
| PRELOAD_TO_RAM = True |
|
|
| SMP_ENCODER_NAME = "efficientnet-b0" |
| SMP_ENCODER_WEIGHTS = "imagenet" |
| SMP_ENCODER_DEPTH = 5 |
| SMP_ENCODER_PROJ_DIM = 128 |
| SMP_DECODER_TYPE = "Unet" |
| BACKBONE_FAMILY = "smp" |
| VGG_FEATURE_SCALES = 4 |
| VGG_FEATURE_DILATION = 1 |
|
|
| USE_IMAGENET_NORM = True |
| REPLACE_BN_WITH_GN = True |
| GN_NUM_GROUPS = 8 |
| NUM_ACTIONS = 5 |
|
|
| STRATEGY_1_MAX_EPOCHS = 100 |
| STRATEGY_2_MAX_EPOCHS = 100 |
| STRATEGY_3_MAX_EPOCHS = 100 |
| STRATEGY_4_MAX_EPOCHS = 100 |
| STRATEGY_5_MAX_EPOCHS = 100 |
| VALIDATE_EVERY_N_EPOCHS = 5 |
| CHECKPOINT_EVERY_N_EPOCHS = 10 |
| SAVE_LATEST_EVERY_EPOCH = True |
| EARLY_STOPPING_PATIENCE = 0 |
| VERBOSE_EPOCH_LOG = False |
|
|
| DEFAULT_HEAD_LR = 1e-4 |
| DEFAULT_ENCODER_LR = 1e-5 |
| DEFAULT_WEIGHT_DECAY = 1e-4 |
| DEFAULT_TMAX = 5 |
| DEFAULT_GAMMA = 0.95 |
| DEFAULT_CRITIC_LOSS_WEIGHT = 0.5 |
| DEFAULT_ENTROPY_ALPHA_INIT = 0.2 |
| DEFAULT_ENTROPY_TARGET_RATIO = 0.1 |
| DEFAULT_ENTROPY_LR = 3e-4 |
| DEFAULT_CE_WEIGHT = 0.5 |
| DEFAULT_DICE_WEIGHT = 0.5 |
| DEFAULT_DROPOUT_P = 0.2 |
| DEFAULT_GRAD_CLIP_NORM = 6.0 |
| DEFAULT_MASK_UPDATE_STEP = 0.1 |
| DEFAULT_REFINEMENT_DICE_REWARD_WEIGHT = 0.25 |
| DEFAULT_REFINEMENT_PRIOR_REWARD_WEIGHT = 0.05 |
| ATTENTION_MAX_TOKENS = 1024 |
| ATTENTION_MIN_POOL_SIZE = 16 |
|
|
| SCHEDULER_FACTOR = 0.5 |
| SCHEDULER_PATIENCE = 5 |
| SCHEDULER_THRESHOLD = 1e-3 |
| SCHEDULER_MIN_LR = 1e-5 |
|
|
| HEAD_LR_RANGE = (1e-5, 3e-3) |
| ENCODER_LR_RANGE = (1e-6, 3e-3) |
| WEIGHT_DECAY_RANGE = (1e-6, 1e-2) |
| TMAX_RANGE = (3, 10) |
| ENTROPY_LR_RANGE = (1e-5, 1e-3) |
| DROPOUT_P_RANGE = (0.0, 0.5) |
|
|
| USE_TRIAL_PRUNING = False |
| TRIAL_PRUNER_WARMUP_STEPS = 5 |
| LOAD_EXISTING_STUDIES = True |
| SKIP_EXISTING_FINALS = False |
| RUN_OPTUNA = False |
| RESET_ALL_STUDIES_EACH_RUN = False |
| USE_EXISTING_BEST_PARAMS_WHEN_OPTUNA_OFF = False |
|
|
| EXECUTION_MODE = "train_eval" |
| EVAL_CHECKPOINT_MODE = "latest" |
| EVAL_SPECIFIC_CHECKPOINT = "" |
| STRATEGY2_CHECKPOINT_MODE = "specific" |
| STRATEGY2_SPECIFIC_CHECKPOINT = { |
| 0.1: "./runs/EfficientNet/pct_10/strategy_2/final/checkpoints/latest.pt", |
| 0.5: "./runs/EfficientNet/pct_50/strategy_2/final/checkpoints/latest.pt", |
| 1.0: "./runs/EfficientNet/pct_100/strategy_2/final/checkpoints/latest.pt" |
| } |
| STRATEGY3_BOOTSTRAP_FROM_STRATEGY2 = True |
| TRAIN_RESUME_MODE = "off" |
| TRAIN_RESUME_SPECIFIC_CHECKPOINT = "" |
| OPTUNA_HEARTBEAT_INTERVAL = 60 |
| OPTUNA_HEARTBEAT_GRACE_PERIOD = 180 |
| OPTUNA_FAILED_TRIAL_MAX_RETRY = 3 |
|
|
| USE_AMP = True |
| AMP_DTYPE = "bfloat16" |
| USE_CHANNELS_LAST = False |
| USE_TORCH_COMPILE = True |
| STEPWISE_BACKWARD = True |
| ALLOW_TF32 = True |
|
|
| RUN_SMOKE_TEST = True |
| SMOKE_TEST_SAMPLE_INDEX = 0 |
| RUN_OVERFIT_TEST = False |
| OVERFIT_N_BATCHES = 2 |
| OVERFIT_N_EPOCHS = 100 |
| OVERFIT_HEAD_LR = 1e-3 |
| OVERFIT_ENCODER_LR = 1e-4 |
| OVERFIT_PRINT_EVERY = 5 |
| CONTROLLED_MASK_THRESHOLD = 0.50 |
|
|
| REQUIRED_HPARAM_KEYS = ("head_lr", "encoder_lr", "weight_decay", "dropout_p", "tmax", "entropy_lr") |
|
|
| """============================================================================= |
| IF OPTUNA IS OFF --> USE ME |
| ============================================================================= |
| """ |
|
|
| |
| |
| MANUAL_HPARAMS_IF_OPTUNA_OFF: dict[str, str] = { |
| "3:10": "strat3_pct10.json", |
| } |
|
|
| """============================================================================= |
| RUNTIME SETUP |
| ============================================================================= |
| """ |
|
|
| torch.set_float32_matmul_precision("high") |
| if torch.cuda.is_available(): |
| torch.backends.cuda.matmul.allow_tf32 = ALLOW_TF32 |
| torch.backends.cudnn.allow_tf32 = ALLOW_TF32 |
| torch.backends.cudnn.deterministic = True |
| torch.backends.cudnn.benchmark = False |
|
|
| def select_runtime_device() -> tuple[torch.device, str]: |
| if torch.cuda.is_available(): |
| return torch.device("cuda"), "cuda" |
|
|
| mps_backend = getattr(torch.backends, "mps", None) |
| if mps_backend is not None and mps_backend.is_available(): |
| try: |
| _probe = torch.zeros(1, device="mps") |
| del _probe |
| return torch.device("mps"), "mps" |
| except Exception as exc: |
| print(f"[Device] MPS detected but failed to initialize ({exc}). Falling back to CPU.") |
|
|
| return torch.device("cpu"), "cpu" |
|
|
| DEVICE, DEVICE_FALLBACK_SOURCE = select_runtime_device() |
| CURRENT_JOB_PARAMS: dict[str, Any] = {} |
|
|
| @dataclass(frozen=True) |
| class RuntimeModelConfig: |
| backbone_family: str |
| smp_encoder_name: str |
| smp_encoder_weights: str | None |
| smp_encoder_depth: int |
| smp_encoder_proj_dim: int |
| smp_decoder_type: str |
| vgg_feature_scales: int |
| vgg_feature_dilation: int |
|
|
| @classmethod |
| def from_globals(cls) -> RuntimeModelConfig: |
| return cls( |
| backbone_family=str(BACKBONE_FAMILY).strip().lower(), |
| smp_encoder_name=str(SMP_ENCODER_NAME), |
| smp_encoder_weights=SMP_ENCODER_WEIGHTS, |
| smp_encoder_depth=int(SMP_ENCODER_DEPTH), |
| smp_encoder_proj_dim=int(SMP_ENCODER_PROJ_DIM), |
| smp_decoder_type=str(SMP_DECODER_TYPE), |
| vgg_feature_scales=int(VGG_FEATURE_SCALES), |
| vgg_feature_dilation=int(VGG_FEATURE_DILATION), |
| ) |
|
|
| @classmethod |
| def from_payload(cls, payload: dict[str, Any] | None) -> RuntimeModelConfig: |
| payload = payload or {} |
| return cls( |
| backbone_family=str(payload.get("backbone_family", "smp")).strip().lower(), |
| smp_encoder_name=str(payload.get("smp_encoder_name", SMP_ENCODER_NAME)), |
| smp_encoder_weights=payload.get("smp_encoder_weights", SMP_ENCODER_WEIGHTS), |
| smp_encoder_depth=int(payload.get("smp_encoder_depth", SMP_ENCODER_DEPTH)), |
| smp_encoder_proj_dim=int(payload.get("smp_encoder_proj_dim", SMP_ENCODER_PROJ_DIM)), |
| smp_decoder_type=str(payload.get("smp_decoder_type", SMP_DECODER_TYPE)), |
| vgg_feature_scales=int(payload.get("vgg_feature_scales", VGG_FEATURE_SCALES)), |
| vgg_feature_dilation=int(payload.get("vgg_feature_dilation", VGG_FEATURE_DILATION)), |
| ) |
|
|
| def validate(self) -> RuntimeModelConfig: |
| if self.backbone_family not in {"smp", "custom_vgg"}: |
| raise ValueError(f"BACKBONE_FAMILY must be 'smp' or 'custom_vgg', got {self.backbone_family!r}") |
| if self.vgg_feature_scales not in {3, 4}: |
| raise ValueError(f"VGG_FEATURE_SCALES must be 3 or 4, got {self.vgg_feature_scales}") |
| if self.vgg_feature_dilation < 1: |
| raise ValueError(f"VGG_FEATURE_DILATION must be >= 1, got {self.vgg_feature_dilation}") |
| if self.smp_encoder_depth < 1: |
| raise ValueError(f"SMP_ENCODER_DEPTH must be >= 1, got {self.smp_encoder_depth}") |
| if self.smp_encoder_proj_dim < 0: |
| raise ValueError(f"SMP_ENCODER_PROJ_DIM must be >= 0, got {self.smp_encoder_proj_dim}") |
| return self |
|
|
| def to_payload(self) -> dict[str, Any]: |
| return { |
| "backbone_family": self.backbone_family, |
| "smp_encoder_name": self.smp_encoder_name, |
| "smp_encoder_weights": self.smp_encoder_weights, |
| "smp_encoder_depth": self.smp_encoder_depth, |
| "smp_encoder_proj_dim": self.smp_encoder_proj_dim, |
| "smp_decoder_type": self.smp_decoder_type, |
| "vgg_feature_scales": self.vgg_feature_scales, |
| "vgg_feature_dilation": self.vgg_feature_dilation, |
| } |
|
|
| def backbone_tag(self) -> str: |
| return self.backbone_family |
|
|
| def backbone_display_name(self) -> str: |
| if self.backbone_family == "custom_vgg": |
| return f"Custom VGG (scales={self.vgg_feature_scales}, dilation={self.vgg_feature_dilation})" |
| return f"SMP {self.smp_encoder_name}" |
|
|
| def current_model_config() -> RuntimeModelConfig: |
| return RuntimeModelConfig.from_globals().validate() |
|
|
| """============================================================================= |
| UTILITIES |
| ============================================================================= |
| """ |
|
|
| def banner(title: str) -> None: |
| line = "=" * 80 |
| print(f"\n{line}\n{title}\n{line}") |
|
|
| def section(title: str) -> None: |
| print(f"\n{'-' * 80}\n{title}\n{'-' * 80}") |
|
|
| def ensure_dir(path: str | Path) -> Path: |
| path = Path(path).expanduser().resolve() |
| path.mkdir(parents=True, exist_ok=True) |
| return path |
|
|
| def save_json(path: str | Path, payload: Any) -> None: |
| path = Path(path) |
| ensure_dir(path.parent) |
| with path.open("w", encoding="utf-8") as f: |
| json.dump(payload, f, indent=2) |
|
|
| def load_json(path: str | Path) -> Any: |
| with Path(path).open("r", encoding="utf-8") as f: |
| return json.load(f) |
|
|
| def _format_history_log_value(key: str, value: Any) -> str: |
| if isinstance(value, float): |
| if key == "lr" or key.endswith("_lr"): |
| return f"{value:.6e}" |
| return json.dumps(value) |
| return json.dumps(value) |
|
|
| def format_history_log_row(row: dict[str, Any]) -> str: |
| return ", ".join(f"{key}={_format_history_log_value(key, value)}" for key, value in row.items()) |
|
|
| def _format_epoch_metric(value: Any, *, scientific: bool = False) -> str: |
| if value is None: |
| return "null" |
| if isinstance(value, (float, int, np.floating, np.integer)): |
| value = float(value) |
| return f"{value:.6e}" if scientific else f"{value:.4f}" |
| return str(value) |
|
|
| def format_concise_epoch_log(row: dict[str, Any], *, best_val_iou: float) -> str: |
| fields = ( |
| ("train_loss", row.get("train_loss"), False), |
| ("train_iou", row.get("train_iou"), False), |
| ("train_entropy", row.get("train_entropy"), False), |
| ("val_loss", row.get("val_loss"), False), |
| ("val_iou", row.get("val_iou"), False), |
| ("val_dice", row.get("val_dice"), False), |
| ("head_lr", row.get("lr"), True), |
| ("encoder_lr", row.get("encoder_lr"), True), |
| ("best_val_iou", best_val_iou, False), |
| ) |
| return ", ".join( |
| f"{name}={_format_epoch_metric(value, scientific=scientific)}" |
| for name, value, scientific in fields |
| ) |
|
|
| def set_global_seed(seed: int = 42) -> None: |
| random.seed(seed) |
| np.random.seed(seed) |
| torch.manual_seed(seed) |
| if torch.cuda.is_available(): |
| torch.cuda.manual_seed_all(seed) |
| os.environ["PYTHONHASHSEED"] = str(seed) |
| torch.backends.cudnn.deterministic = True |
| torch.backends.cudnn.benchmark = False |
|
|
| def stable_int_from_text(text: str) -> int: |
| value = 0 |
| for byte in text.encode("utf-8"): |
| value = (value * 131 + byte) % (2 ** 31 - 1) |
| return value |
|
|
| def seed_worker(worker_id: int) -> None: |
| del worker_id |
| worker_seed = torch.initial_seed() % (2 ** 32) |
| random.seed(worker_seed) |
| np.random.seed(worker_seed) |
| torch.manual_seed(worker_seed) |
|
|
| def make_seeded_generator(seed: int, tag: str) -> torch.Generator: |
| generator = torch.Generator() |
| generator.manual_seed(seed + stable_int_from_text(tag)) |
| return generator |
|
|
| def cuda_memory_snapshot() -> str: |
| if DEVICE.type != "cuda" or not torch.cuda.is_available(): |
| return "allocated=0.00 GB, reserved=0.00 GB, peak=0.00 GB" |
| allocated = torch.cuda.memory_allocated(device=DEVICE) / (1024 ** 3) |
| reserved = torch.cuda.memory_reserved(device=DEVICE) / (1024 ** 3) |
| peak = torch.cuda.max_memory_allocated(device=DEVICE) / (1024 ** 3) |
| return f"allocated={allocated:.2f} GB, reserved={reserved:.2f} GB, peak={peak:.2f} GB" |
|
|
| def run_cuda_cleanup(context: str | None = None) -> None: |
| gc.collect() |
| if DEVICE.type != "cuda" or not torch.cuda.is_available(): |
| return |
| try: |
| torch.cuda.synchronize(device=DEVICE) |
| except Exception: |
| pass |
| try: |
| torch.cuda.empty_cache() |
| except Exception: |
| pass |
| try: |
| torch.cuda.ipc_collect() |
| except Exception: |
| pass |
| if context is not None: |
| print(f"[CUDA Cleanup] {context}: {cuda_memory_snapshot()}") |
| try: |
| torch.cuda.reset_peak_memory_stats(device=DEVICE) |
| except Exception: |
| pass |
|
|
| def prune_directory_except(root: Path, keep_file_names: set[str]) -> None: |
| if not root.exists(): |
| return |
| keep_paths = {root / name for name in keep_file_names} |
| for path in sorted((p for p in root.rglob("*") if p.is_file()), reverse=True): |
| if path not in keep_paths: |
| path.unlink() |
| for path in sorted((p for p in root.rglob("*") if p.is_dir()), reverse=True): |
| if path != root: |
| try: |
| path.rmdir() |
| except OSError: |
| pass |
|
|
| def prune_optuna_trial_dir(trial_dir: Path) -> None: |
| prune_directory_except(trial_dir, {"summary.json"}) |
|
|
| def prune_optuna_study_dir(study_root: Path) -> None: |
| prune_directory_except(study_root, {"best_params.json", "summary.json", "study.sqlite3"}) |
|
|
| def to_device(batch: Any, device: torch.device) -> Any: |
| if torch.is_tensor(batch): |
| return batch.to(device, non_blocking=True) |
| if isinstance(batch, dict): |
| return {k: to_device(v, device) for k, v in batch.items()} |
| if isinstance(batch, list): |
| return [to_device(v, device) for v in batch] |
| if isinstance(batch, tuple): |
| return tuple(to_device(v, device) for v in batch) |
| return batch |
|
|
| def percent_label(percent: float) -> str: |
| return f"{int(round(percent * 100)):02d}" |
|
|
| def current_dataset_name() -> str: |
| dataset_name = str(DATASET_NAME).strip() |
| if dataset_name not in SUPPORTED_DATASET_NAMES: |
| raise ValueError(f"DATASET_NAME must be one of {SUPPORTED_DATASET_NAMES}, got {dataset_name!r}") |
| return dataset_name |
|
|
| def current_busi_with_classes_split_policy() -> str: |
| split_policy = str(BUSI_WITH_CLASSES_SPLIT_POLICY).strip().lower() |
| if split_policy not in SUPPORTED_BUSI_WITH_CLASSES_SPLIT_POLICIES: |
| raise ValueError( |
| f"BUSI_WITH_CLASSES_SPLIT_POLICY must be one of {SUPPORTED_BUSI_WITH_CLASSES_SPLIT_POLICIES}, " |
| f"got {split_policy!r}" |
| ) |
| return split_policy |
|
|
| def current_dataset_splits_json_path() -> Path: |
| dataset_name = current_dataset_name() |
| if dataset_name == "BUSI": |
| return DATASET_SPLITS_JSON |
| return PROJECT_DIR / f"dataset_splits_{dataset_name.lower()}_{current_busi_with_classes_split_policy()}.json" |
|
|
| def current_dataset_dirs() -> tuple[Path, Path]: |
| dataset_name = current_dataset_name() |
| if dataset_name == "BUSI": |
| return DATA_ROOT / "images", DATA_ROOT / "annotations" |
| return DATA_ROOT / "all_images", DATA_ROOT / "all_masks" |
|
|
| def current_pipeline_check_path() -> Path | None: |
| if current_dataset_name() != "BUSI_with_classes": |
| return None |
| return DATA_ROOT / "pipeline_check.json" |
|
|
| def normalization_cache_tag() -> str: |
| dataset_name = current_dataset_name() |
| if dataset_name == "BUSI": |
| return "BUSI" |
| return f"{dataset_name}_{current_busi_with_classes_split_policy()}" |
|
|
| def resolve_amp_dtype(key: str) -> torch.dtype: |
| key = key.lower().strip() |
| if key == "auto": |
| if DEVICE.type == "cuda": |
| try: |
| if torch.cuda.is_bf16_supported(): |
| return torch.bfloat16 |
| except Exception: |
| major, _minor = torch.cuda.get_device_capability() |
| if major >= 8: |
| return torch.bfloat16 |
| return torch.float16 |
| if key in {"float16", "fp16", "half"}: |
| return torch.float16 |
| if key in {"bfloat16", "bf16"}: |
| if DEVICE.type == "cuda": |
| try: |
| if torch.cuda.is_bf16_supported(): |
| return torch.bfloat16 |
| except Exception: |
| major, _minor = torch.cuda.get_device_capability() |
| if major >= 8: |
| return torch.bfloat16 |
| print("[AMP] bfloat16 requested but unsupported here. Falling back to float16.") |
| return torch.float16 |
| raise ValueError(f"Unsupported AMP_DTYPE: {key}") |
|
|
| def amp_autocast_enabled(device: torch.device) -> bool: |
| return USE_AMP and device.type in {"cuda", "mps"} |
|
|
| def autocast_ctx(enabled: bool, device: torch.device, amp_dtype: torch.dtype): |
| if not enabled: |
| return nullcontext() |
| return torch.autocast(device_type=device.type, dtype=amp_dtype, enabled=True) |
|
|
| def make_grad_scaler(enabled: bool, amp_dtype: torch.dtype, device: torch.device): |
| if not enabled or device.type != "cuda" or amp_dtype == torch.bfloat16: |
| return None |
| try: |
| return torch.amp.GradScaler("cuda", enabled=True, init_scale=8192.0) |
| except Exception: |
| return torch.cuda.amp.GradScaler(enabled=True, init_scale=8192.0) |
|
|
| def format_seconds(seconds: float) -> str: |
| seconds = int(seconds) |
| h, rem = divmod(seconds, 3600) |
| m, s = divmod(rem, 60) |
| return f"{h:02d}:{m:02d}:{s:02d}" |
|
|
| def tensor_bytes(t: torch.Tensor) -> int: |
| return t.numel() * t.element_size() |
|
|
| def bytes_to_gb(num_bytes: int) -> float: |
| return num_bytes / (1024 ** 3) |
|
|
| def set_current_job_params(payload: dict[str, Any] | None = None) -> None: |
| CURRENT_JOB_PARAMS.clear() |
| if payload: |
| CURRENT_JOB_PARAMS.update(dict(payload)) |
|
|
| def _job_param(name: str, default: Any) -> Any: |
| return CURRENT_JOB_PARAMS.get(name, default) |
|
|
| def _refinement_deltas(*, device: torch.device, dtype: torch.dtype) -> torch.Tensor: |
| small = float(_job_param("refine_delta_small", 0.10)) |
| large = float(_job_param("refine_delta_large", 0.25)) |
| return torch.tensor([-large, -small, 0.0, small, large], device=device, dtype=dtype) |
|
|
| def threshold_binary_mask(mask: torch.Tensor, threshold: float | None = None) -> torch.Tensor: |
| threshold = float(_job_param("threshold", CONTROLLED_MASK_THRESHOLD)) if threshold is None else float(threshold) |
| return (mask > threshold).to(dtype=mask.dtype) |
|
|
| def threshold_binary_long(mask: torch.Tensor, threshold: float | None = None) -> torch.Tensor: |
| threshold = float(_job_param("threshold", CONTROLLED_MASK_THRESHOLD)) if threshold is None else float(threshold) |
| return (mask > threshold).long() |
|
|
| """============================================================================= |
| BUSI SPLIT + NORMALIZATION |
| ============================================================================= |
| """ |
|
|
| IMAGENET_MEAN = np.array([0.485, 0.456, 0.406], dtype=np.float32) |
| IMAGENET_STD = np.array([0.229, 0.224, 0.225], dtype=np.float32) |
|
|
| def validate_image_mask_consistency(images_dir: Path, annotations_dir: Path): |
| image_files = {f for f in os.listdir(images_dir) if not f.startswith(".") and f.lower().endswith(".png")} |
| mask_files = {f for f in os.listdir(annotations_dir) if not f.startswith(".") and f.lower().endswith(".png")} |
| matched = sorted(image_files & mask_files) |
| missing_masks = sorted(image_files - mask_files) |
| missing_images = sorted(mask_files - image_files) |
| return matched, missing_masks, missing_images |
|
|
| def parse_busi_with_classes_label(filename: str) -> str: |
| upper_name = str(filename).upper() |
| if upper_name.endswith("_B.PNG"): |
| return "benign" |
| if upper_name.endswith("_M.PNG"): |
| return "malignant" |
| raise ValueError( |
| f"BUSI_with_classes filename must end with '_B.png' or '_M.png', got {filename!r}" |
| ) |
|
|
| def _candidate_report_dicts(payload: dict[str, Any]) -> list[dict[str, Any]]: |
| candidates = [payload] |
| for key in ("counts", "summary", "dataset", "report", "metadata"): |
| value = payload.get(key) |
| if isinstance(value, dict): |
| candidates.append(value) |
| return candidates |
|
|
| def _extract_report_int(payload: dict[str, Any], keys: tuple[str, ...]) -> int | None: |
| for candidate in _candidate_report_dicts(payload): |
| for key in keys: |
| value = candidate.get(key) |
| if isinstance(value, bool): |
| continue |
| if isinstance(value, (int, np.integer)): |
| return int(value) |
| if isinstance(value, float) and float(value).is_integer(): |
| return int(value) |
| return None |
|
|
| def _extract_report_filenames(payload: dict[str, Any]) -> set[str] | None: |
| for candidate in _candidate_report_dicts(payload): |
| filenames = candidate.get("filenames") |
| if isinstance(filenames, list) and all(isinstance(item, str) for item in filenames): |
| return set(filenames) |
|
|
| pairs = candidate.get("pairs") |
| if isinstance(pairs, list): |
| extracted = {item["filename"] for item in pairs if isinstance(item, dict) and isinstance(item.get("filename"), str)} |
| if extracted: |
| return extracted |
| return None |
|
|
| def validate_busi_with_classes_pipeline_report(report_path: Path, sample_records: list[dict[str, str]]) -> None: |
| if not report_path.exists(): |
| return |
|
|
| payload = load_json(report_path) |
| if not isinstance(payload, dict): |
| raise RuntimeError(f"Expected dict payload in {report_path}, found {type(payload).__name__}.") |
|
|
| benign_count = sum(1 for record in sample_records if record.get("class_label") == "benign") |
| malignant_count = sum(1 for record in sample_records if record.get("class_label") == "malignant") |
| expected_counts = { |
| "total_pairs": len(sample_records), |
| "benign": benign_count, |
| "malignant": malignant_count, |
| } |
| report_counts = { |
| "total_pairs": _extract_report_int(payload, ("total_pairs", "pair_count", "num_pairs", "total")), |
| "benign": _extract_report_int(payload, ("benign", "benign_count", "num_benign")), |
| "malignant": _extract_report_int(payload, ("malignant", "malignant_count", "num_malignant")), |
| } |
| for key, expected_value in expected_counts.items(): |
| report_value = report_counts[key] |
| if report_value is not None and report_value != expected_value: |
| raise RuntimeError( |
| f"pipeline_check mismatch for {key}: discovered={expected_value}, report={report_value} ({report_path})" |
| ) |
|
|
| report_filenames = _extract_report_filenames(payload) |
| if report_filenames is not None: |
| discovered_filenames = {record["filename"] for record in sample_records} |
| if report_filenames != discovered_filenames: |
| missing_from_report = sorted(discovered_filenames - report_filenames)[:10] |
| extra_in_report = sorted(report_filenames - discovered_filenames)[:10] |
| raise RuntimeError( |
| f"pipeline_check filenames mismatch for {report_path}: " |
| f"missing_from_report={missing_from_report}, extra_in_report={extra_in_report}" |
| ) |
|
|
| print(f"[Pipeline Check] Validated BUSI_with_classes metadata from {report_path}") |
|
|
| def check_data_leakage(splits: dict[str, list[str]]) -> dict[str, list[str]]: |
| leaks: dict[str, list[str]] = {} |
| split_names = list(splits.keys()) |
| for i, lhs in enumerate(split_names): |
| for rhs in split_names[i + 1 :]: |
| overlap = sorted(set(splits[lhs]) & set(splits[rhs])) |
| if overlap: |
| leaks[f"{lhs} ∩ {rhs}"] = overlap |
| return leaks |
|
|
| def _project_relative_path(path: Path) -> str: |
| resolved = Path(path).resolve() |
| try: |
| return str(resolved.relative_to(PROJECT_DIR.resolve())) |
| except ValueError: |
| return str(resolved) |
|
|
| def resolve_dataset_root_from_registry(split_registry: dict[str, Any]) -> Path: |
| dataset_root = Path(split_registry["dataset_root"]) |
| if dataset_root.is_absolute(): |
| return dataset_root |
| return (PROJECT_DIR / dataset_root).resolve() |
|
|
| def make_sample_record( |
| filename: str, |
| images_subdir: str, |
| annotations_subdir: str, |
| *, |
| class_label: str | None = None, |
| ) -> dict[str, str]: |
| record = { |
| "filename": filename, |
| "image_rel_path": str(Path(images_subdir) / filename), |
| "mask_rel_path": str(Path(annotations_subdir) / filename), |
| } |
| if class_label is not None: |
| record["class_label"] = class_label |
| return record |
|
|
| def build_sample_records( |
| filenames: list[str], |
| *, |
| images_subdir: str, |
| annotations_subdir: str, |
| dataset_name: str, |
| ) -> list[dict[str, str]]: |
| records = [] |
| for filename in sorted(filenames): |
| class_label = parse_busi_with_classes_label(filename) if dataset_name == "BUSI_with_classes" else None |
| records.append( |
| make_sample_record( |
| filename, |
| images_subdir, |
| annotations_subdir, |
| class_label=class_label, |
| ) |
| ) |
| return records |
|
|
| def split_ratios_for_type(split_type: str) -> tuple[float, float]: |
| if split_type == "80_10_10": |
| return 0.80, 0.10 |
| if split_type == "70_10_20": |
| return 0.70, 0.10 |
| raise ValueError(f"Unsupported split_type: {split_type}") |
|
|
| def deterministic_shuffle_records(records: list[dict[str, str]], *, seed: int, tag: str) -> list[dict[str, str]]: |
| rng = random.Random(seed + stable_int_from_text(tag)) |
| shuffled = [dict(record) for record in records] |
| rng.shuffle(shuffled) |
| return shuffled |
|
|
| def group_records_by_class(sample_records: list[dict[str, str]]) -> dict[str, list[dict[str, str]]]: |
| grouped: dict[str, list[dict[str, str]]] = {} |
| for record in sample_records: |
| class_label = record.get("class_label") |
| if class_label is None: |
| raise RuntimeError("Expected class_label in sample record for class-aware splitting.") |
| grouped.setdefault(class_label, []).append(dict(record)) |
| return grouped |
|
|
| def allocate_counts_by_ratio(total_size: int, available_counts: dict[str, int]) -> dict[str, int]: |
| allocation = {label: 0 for label in available_counts} |
| if total_size <= 0 or not available_counts: |
| return allocation |
|
|
| total_available = sum(available_counts.values()) |
| if total_available <= 0: |
| return allocation |
|
|
| exact = {label: total_size * available_counts[label] / total_available for label in available_counts} |
| for label in available_counts: |
| allocation[label] = min(available_counts[label], int(math.floor(exact[label]))) |
|
|
| remaining = min(total_size, total_available) - sum(allocation.values()) |
| order = sorted( |
| available_counts.keys(), |
| key=lambda label: (exact[label] - math.floor(exact[label]), available_counts[label], label), |
| reverse=True, |
| ) |
| while remaining > 0: |
| progressed = False |
| for label in order: |
| if allocation[label] < available_counts[label]: |
| allocation[label] += 1 |
| remaining -= 1 |
| progressed = True |
| if remaining == 0: |
| break |
| if not progressed: |
| break |
| return allocation |
|
|
| def allocate_balanced_counts(total_size: int, available_counts: dict[str, int]) -> dict[str, int]: |
| allocation = {label: 0 for label in available_counts} |
| if total_size <= 0 or not available_counts: |
| return allocation |
|
|
| labels = sorted(available_counts.keys()) |
| half = total_size // 2 |
| for label in labels: |
| allocation[label] = min(available_counts[label], half) |
|
|
| remaining = min(total_size, sum(available_counts.values())) - sum(allocation.values()) |
| while remaining > 0: |
| candidates = [label for label in labels if allocation[label] < available_counts[label]] |
| if not candidates: |
| break |
| best_label = max( |
| candidates, |
| key=lambda label: ( |
| available_counts[label] - allocation[label], |
| 1 if label == "benign" else 0, |
| label, |
| ), |
| ) |
| allocation[best_label] += 1 |
| remaining -= 1 |
| return allocation |
|
|
| def build_unstratified_base_split( |
| sample_records: list[dict[str, str]], |
| *, |
| split_type: str, |
| seed: int, |
| ) -> dict[str, list[dict[str, str]]]: |
| train_ratio, val_ratio = split_ratios_for_type(split_type) |
| records = deterministic_shuffle_records(sample_records, seed=seed, tag=f"base::{split_type}") |
|
|
| total_samples = len(records) |
| train_end = int(total_samples * train_ratio) |
| val_end = int(total_samples * (train_ratio + val_ratio)) |
| return { |
| "train": records[:train_end], |
| "val": records[train_end:val_end], |
| "test": records[val_end:], |
| } |
|
|
| def build_stratified_base_split( |
| sample_records: list[dict[str, str]], |
| *, |
| split_type: str, |
| seed: int, |
| ) -> dict[str, list[dict[str, str]]]: |
| train_ratio, val_ratio = split_ratios_for_type(split_type) |
| grouped = group_records_by_class(sample_records) |
| splits = {"train": [], "val": [], "test": []} |
|
|
| for class_label in sorted(grouped.keys()): |
| records = deterministic_shuffle_records( |
| grouped[class_label], |
| seed=seed, |
| tag=f"base::{split_type}::{class_label}", |
| ) |
| total_samples = len(records) |
| train_end = int(total_samples * train_ratio) |
| val_end = int(total_samples * (train_ratio + val_ratio)) |
| splits["train"].extend(records[:train_end]) |
| splits["val"].extend(records[train_end:val_end]) |
| splits["test"].extend(records[val_end:]) |
|
|
| for split_name in splits: |
| splits[split_name] = deterministic_shuffle_records( |
| splits[split_name], |
| seed=seed, |
| tag=f"base::{split_type}::{split_name}", |
| ) |
| return splits |
|
|
| def build_balanced_train_base_split( |
| sample_records: list[dict[str, str]], |
| *, |
| split_type: str, |
| seed: int, |
| ) -> dict[str, list[dict[str, str]]]: |
| train_ratio, val_ratio = split_ratios_for_type(split_type) |
| test_ratio = 1.0 - train_ratio - val_ratio |
| grouped = group_records_by_class(sample_records) |
| if sorted(grouped.keys()) != ["benign", "malignant"]: |
| raise RuntimeError( |
| f"balanced_train split policy expects benign/malignant classes, found {sorted(grouped.keys())}" |
| ) |
|
|
| shuffled = { |
| class_label: deterministic_shuffle_records( |
| records, |
| seed=seed, |
| tag=f"base::{split_type}::balanced_train::{class_label}", |
| ) |
| for class_label, records in grouped.items() |
| } |
|
|
| nominal_train_size = int(len(sample_records) * train_ratio) |
| per_class_train = min( |
| nominal_train_size // 2, |
| *(len(records) for records in shuffled.values()), |
| ) |
|
|
| train_records: list[dict[str, str]] = [] |
| remaining_by_class: dict[str, list[dict[str, str]]] = {} |
| for class_label in sorted(shuffled.keys()): |
| records = shuffled[class_label] |
| train_records.extend(records[:per_class_train]) |
| remaining_by_class[class_label] = records[per_class_train:] |
|
|
| remainder_val_fraction = val_ratio / max(val_ratio + test_ratio, 1e-8) |
| val_records: list[dict[str, str]] = [] |
| test_records: list[dict[str, str]] = [] |
| for class_label in sorted(remaining_by_class.keys()): |
| records = remaining_by_class[class_label] |
| val_count = int(len(records) * remainder_val_fraction) |
| val_records.extend(records[:val_count]) |
| test_records.extend(records[val_count:]) |
|
|
| return { |
| "train": deterministic_shuffle_records( |
| train_records, |
| seed=seed, |
| tag=f"base::{split_type}::balanced_train::train", |
| ), |
| "val": deterministic_shuffle_records( |
| val_records, |
| seed=seed, |
| tag=f"base::{split_type}::balanced_train::val", |
| ), |
| "test": deterministic_shuffle_records( |
| test_records, |
| seed=seed, |
| tag=f"base::{split_type}::balanced_train::test", |
| ), |
| } |
|
|
| def build_nested_train_subsets( |
| train_records: list[dict[str, str]], |
| train_fractions: list[float], |
| *, |
| split_type: str, |
| seed: int, |
| split_policy: str | None = None, |
| ) -> dict[str, list[dict[str, str]]]: |
| if not train_records: |
| return {} |
|
|
| ordered_records = deterministic_shuffle_records(train_records, seed=seed, tag=f"subset::{split_type}") |
| use_class_labels = any("class_label" in record for record in train_records) |
| if not use_class_labels: |
| subsets: dict[str, list[dict[str, str]]] = {} |
| for fraction in sorted({float(value) for value in train_fractions}): |
| if fraction <= 0.0 or fraction > 1.0: |
| raise ValueError(f"Invalid training fraction: {fraction}") |
| subset_key = percent_label(fraction) |
| subset_size = len(ordered_records) if fraction >= 1.0 else max(1, int(len(ordered_records) * fraction)) |
| subsets[subset_key] = [dict(record) for record in ordered_records[:subset_size]] |
| return subsets |
|
|
| grouped = { |
| class_label: deterministic_shuffle_records( |
| records, |
| seed=seed, |
| tag=f"subset::{split_type}::{split_policy or 'stratified'}::{class_label}", |
| ) |
| for class_label, records in group_records_by_class(train_records).items() |
| } |
| available_counts = {class_label: len(records) for class_label, records in grouped.items()} |
|
|
| subsets: dict[str, list[dict[str, str]]] = {} |
| for fraction in sorted({float(value) for value in train_fractions}): |
| if fraction <= 0.0 or fraction > 1.0: |
| raise ValueError(f"Invalid training fraction: {fraction}") |
| subset_key = percent_label(fraction) |
| subset_size = len(ordered_records) if fraction >= 1.0 else max(1, int(len(ordered_records) * fraction)) |
| if split_policy == "balanced_train": |
| class_counts = allocate_balanced_counts(subset_size, available_counts) |
| else: |
| class_counts = allocate_counts_by_ratio(subset_size, available_counts) |
|
|
| subset_records: list[dict[str, str]] = [] |
| for class_label in sorted(grouped.keys()): |
| subset_records.extend([dict(record) for record in grouped[class_label][: class_counts[class_label]]]) |
| subsets[subset_key] = deterministic_shuffle_records( |
| subset_records, |
| seed=seed, |
| tag=f"subset::{split_type}::{split_policy or 'stratified'}::{subset_key}", |
| ) |
| return subsets |
|
|
| def train_fraction_from_subset_key(subset_key: str) -> float: |
| subset_text = str(subset_key).strip() |
| if not subset_text.isdigit(): |
| raise RuntimeError(f"Invalid train subset key {subset_key!r} in dataset_splits.json.") |
|
|
| percent = int(subset_text) |
| if percent <= 0 or percent > 100: |
| raise RuntimeError(f"Train subset key {subset_key!r} must be between 1 and 100.") |
| return percent / 100.0 |
|
|
| def validate_persisted_split_no_leakage(split_type: str, split_entry: dict[str, Any], *, source: str) -> None: |
| base_splits = split_entry["base_splits"] |
| base_filenames: dict[str, list[str]] = {} |
| for split_name, records in base_splits.items(): |
| filenames = [record["filename"] for record in records] |
| if len(filenames) != len(set(filenames)): |
| raise RuntimeError(f"Duplicate filenames detected inside {split_name} for split_type={split_type}.") |
| base_filenames[split_name] = filenames |
|
|
| leaks = check_data_leakage(base_filenames) |
| if leaks: |
| raise RuntimeError(f"Data leakage detected for split_type={split_type}: {list(leaks.keys())}") |
|
|
| base_train = set(base_filenames["train"]) |
| previous_subset: set[str] = set() |
| for subset_key in sorted(split_entry["train_subsets"].keys(), key=int): |
| subset_filenames = [record["filename"] for record in split_entry["train_subsets"][subset_key]] |
| if len(subset_filenames) != len(set(subset_filenames)): |
| raise RuntimeError( |
| f"Duplicate filenames detected inside train subset {subset_key} for split_type={split_type}." |
| ) |
| subset_set = set(subset_filenames) |
| missing = sorted(subset_set - base_train) |
| if missing: |
| raise RuntimeError( |
| f"Train subset {subset_key} contains files outside the base train split for split_type={split_type}." |
| ) |
| if previous_subset and not previous_subset.issubset(subset_set): |
| raise RuntimeError( |
| f"Train subsets are not nested for split_type={split_type}." |
| ) |
| previous_subset = subset_set |
|
|
| print(f"[Split Check] No data leakage detected for split_type={split_type} ({source}).") |
|
|
| def repair_persisted_train_subsets( |
| split_registry: dict[str, Any], |
| requested_train_fractions: list[float], |
| *, |
| split_json_path: Path, |
| seed: int, |
| ) -> bool: |
| split_entries = split_registry.get("split_types", {}) |
| requested_fractions = sorted({float(value) for value in requested_train_fractions}) |
| combined_fractions = {float(value) for value in split_registry.get("train_fractions", [])} |
| combined_fractions.update(requested_fractions) |
| dataset_name = str(split_registry.get("dataset_name", "BUSI")) |
| split_policy = split_registry.get("split_policy") if dataset_name == "BUSI_with_classes" else None |
|
|
| for split_type in SUPPORTED_SPLIT_TYPES: |
| split_entry = split_entries[split_type] |
| train_subsets = split_entry.get("train_subsets", {}) |
| for subset_key in train_subsets.keys(): |
| combined_fractions.add(train_fraction_from_subset_key(subset_key)) |
|
|
| combined_fractions_list = sorted(combined_fractions) |
| requested_keys = {percent_label(fraction) for fraction in requested_fractions} |
| registry_seed = int(split_registry.get("seed", seed)) |
| repaired = False |
|
|
| if split_registry.get("train_fractions") != combined_fractions_list: |
| split_registry["train_fractions"] = combined_fractions_list |
| repaired = True |
|
|
| for split_type in SUPPORTED_SPLIT_TYPES: |
| split_entry = split_entries[split_type] |
| train_subsets = split_entry.get("train_subsets", {}) |
| missing_requested_keys = sorted(requested_keys - set(train_subsets.keys()), key=int) |
| if not missing_requested_keys: |
| continue |
|
|
| split_entry["train_subsets"] = build_nested_train_subsets( |
| split_entry["base_splits"]["train"], |
| combined_fractions_list, |
| split_type=split_type, |
| seed=registry_seed, |
| split_policy=split_policy, |
| ) |
| print( |
| f"[Splits] Rebuilt missing train subsets {missing_requested_keys} " |
| f"for split_type={split_type} in {split_json_path}" |
| ) |
| repaired = True |
|
|
| if repaired: |
| save_json(split_json_path, split_registry) |
| print(f"[Splits] Updated persisted dataset splits at {split_json_path}") |
| return repaired |
|
|
| def load_or_create_dataset_splits( |
| images_dir: Path, |
| annotations_dir: Path, |
| split_json_path: Path, |
| train_fractions: list[float], |
| seed: int, |
| ) -> tuple[dict[str, Any], str]: |
| images_dir = Path(images_dir).resolve() |
| annotations_dir = Path(annotations_dir).resolve() |
| split_json_path = Path(split_json_path).resolve() |
| dataset_name = current_dataset_name() |
| split_policy = current_busi_with_classes_split_policy() if dataset_name == "BUSI_with_classes" else None |
| if split_json_path.exists(): |
| split_registry = load_json(split_json_path) |
| if split_registry.get("version") != DATASET_SPLITS_VERSION: |
| raise RuntimeError( |
| f"Unsupported dataset_splits.json version in {split_json_path}. " |
| f"Expected version={DATASET_SPLITS_VERSION}." |
| ) |
| persisted_dataset_name = str(split_registry.get("dataset_name", "BUSI")) |
| if persisted_dataset_name != dataset_name: |
| raise RuntimeError( |
| f"dataset_splits.json at {split_json_path} targets dataset_name={persisted_dataset_name!r}, " |
| f"but current DATASET_NAME={dataset_name!r}." |
| ) |
| persisted_split_policy = split_registry.get("split_policy") |
| if dataset_name == "BUSI_with_classes" and persisted_split_policy != split_policy: |
| raise RuntimeError( |
| f"dataset_splits.json at {split_json_path} targets split_policy={persisted_split_policy!r}, " |
| f"but current BUSI_WITH_CLASSES_SPLIT_POLICY={split_policy!r}." |
| ) |
| split_entries = split_registry.get("split_types") |
| if not isinstance(split_entries, dict): |
| raise RuntimeError(f"Invalid split_types payload in {split_json_path}.") |
| for split_type in SUPPORTED_SPLIT_TYPES: |
| if split_type not in split_entries: |
| raise RuntimeError( |
| f"dataset_splits.json is missing split_type={split_type}. Delete it to regenerate cleanly." |
| ) |
| repaired = repair_persisted_train_subsets( |
| split_registry, |
| train_fractions, |
| split_json_path=split_json_path, |
| seed=seed, |
| ) |
| source = "repaired" if repaired else "loaded" |
| if dataset_name == "BUSI_with_classes": |
| sample_records = build_sample_records( |
| validate_image_mask_consistency(images_dir, annotations_dir)[0], |
| images_subdir=split_registry["images_subdir"], |
| annotations_subdir=split_registry["annotations_subdir"], |
| dataset_name=dataset_name, |
| ) |
| pipeline_check_path = current_pipeline_check_path() |
| if pipeline_check_path is not None: |
| validate_busi_with_classes_pipeline_report(pipeline_check_path, sample_records) |
| for split_type in SUPPORTED_SPLIT_TYPES: |
| validate_persisted_split_no_leakage(split_type, split_entries[split_type], source=source) |
| if repaired: |
| print(f"[Splits] Loaded and repaired persisted dataset splits from {split_json_path}") |
| else: |
| print(f"[Splits] Loaded persisted dataset splits from {split_json_path}") |
| return split_registry, source |
|
|
| matched, missing_masks, missing_images = validate_image_mask_consistency(images_dir, annotations_dir) |
| if missing_masks or missing_images: |
| raise RuntimeError( |
| "BUSI image/mask mismatch detected. " |
| f"missing_masks={len(missing_masks)}, missing_images={len(missing_images)}" |
| ) |
|
|
| dataset_root = images_dir.parent.resolve() |
| images_subdir = images_dir.relative_to(dataset_root).as_posix() |
| annotations_subdir = annotations_dir.relative_to(dataset_root).as_posix() |
| sample_records = build_sample_records( |
| matched, |
| images_subdir=images_subdir, |
| annotations_subdir=annotations_subdir, |
| dataset_name=dataset_name, |
| ) |
| if dataset_name == "BUSI_with_classes": |
| pipeline_check_path = current_pipeline_check_path() |
| if pipeline_check_path is not None: |
| validate_busi_with_classes_pipeline_report(pipeline_check_path, sample_records) |
| split_registry = { |
| "version": DATASET_SPLITS_VERSION, |
| "dataset_name": dataset_name, |
| "split_policy": split_policy, |
| "dataset_root": _project_relative_path(dataset_root), |
| "images_subdir": images_subdir, |
| "annotations_subdir": annotations_subdir, |
| "seed": seed, |
| "train_fractions": sorted({float(value) for value in train_fractions}), |
| "split_types": {}, |
| } |
|
|
| for split_type in SUPPORTED_SPLIT_TYPES: |
| if dataset_name == "BUSI_with_classes": |
| if split_policy == "balanced_train": |
| base_splits = build_balanced_train_base_split( |
| sample_records, |
| split_type=split_type, |
| seed=seed, |
| ) |
| else: |
| base_splits = build_stratified_base_split( |
| sample_records, |
| split_type=split_type, |
| seed=seed, |
| ) |
| else: |
| base_splits = build_unstratified_base_split( |
| sample_records, |
| split_type=split_type, |
| seed=seed, |
| ) |
| train_subsets = build_nested_train_subsets( |
| base_splits["train"], |
| train_fractions, |
| split_type=split_type, |
| seed=seed, |
| split_policy=split_policy, |
| ) |
| split_entry = { |
| "split_type": split_type, |
| "base_splits": base_splits, |
| "train_subsets": train_subsets, |
| } |
| validate_persisted_split_no_leakage(split_type, split_entry, source="created") |
| split_registry["split_types"][split_type] = split_entry |
|
|
| save_json(split_json_path, split_registry) |
| print(f"[Splits] Created persisted dataset splits at {split_json_path}") |
| return split_registry, "created" |
|
|
| def select_persisted_split( |
| split_registry: dict[str, Any], |
| split_type: str, |
| train_fraction: float, |
| ) -> dict[str, Any]: |
| if split_type not in SUPPORTED_SPLIT_TYPES: |
| raise ValueError(f"Unsupported split_type: {split_type}") |
|
|
| split_entries = split_registry.get("split_types", {}) |
| if split_type not in split_entries: |
| raise KeyError( |
| f"Requested split_type={split_type} is not available in dataset_splits.json. " |
| "Delete the JSON file to regenerate it with the new configuration." |
| ) |
|
|
| subset_key = percent_label(train_fraction) |
| split_entry = split_entries[split_type] |
| train_subsets = split_entry.get("train_subsets", {}) |
| if subset_key not in train_subsets: |
| raise KeyError( |
| f"Requested train fraction={train_fraction} (key={subset_key}) is not available in dataset_splits.json." |
| ) |
|
|
| return { |
| "dataset_root": resolve_dataset_root_from_registry(split_registry), |
| "split_type": split_type, |
| "train_fraction": float(train_fraction), |
| "train_subset_key": subset_key, |
| "base_train_records": split_entry["base_splits"]["train"], |
| "train_records": train_subsets[subset_key], |
| "val_records": split_entry["base_splits"]["val"], |
| "test_records": split_entry["base_splits"]["test"], |
| } |
|
|
| def compute_busi_statistics( |
| dataset_root: Path, |
| sample_records: list[dict[str, str]], |
| cache_path: Path, |
| ) -> tuple[float, float, str]: |
| filenames = [record["filename"] for record in sample_records] |
| if cache_path.exists(): |
| stats = load_json(cache_path) |
| if stats.get("filenames") == filenames: |
| print(f"[Normalization] Loaded cached normalization stats from {cache_path}") |
| return float(stats["global_mean"]), float(stats["global_std"]), "loaded_from_cache" |
|
|
| total_sum = np.float64(0.0) |
| total_sq_sum = np.float64(0.0) |
| total_pixels = 0 |
|
|
| for record in tqdm(sample_records, desc="Computing BUSI train mean/std", leave=False): |
| image_path = dataset_root / record["image_rel_path"] |
| img = np.array(PILImage.open(image_path)).astype(np.float64) |
| total_sum += img.sum() |
| total_sq_sum += (img ** 2).sum() |
| total_pixels += img.size |
|
|
| global_mean = float(total_sum / total_pixels) |
| global_std = float(np.sqrt(total_sq_sum / total_pixels - global_mean ** 2)) |
| if global_std < 1e-6: |
| global_std = 1.0 |
|
|
| save_json( |
| cache_path, |
| { |
| "global_mean": global_mean, |
| "global_std": global_std, |
| "total_pixels": int(total_pixels), |
| "num_images": len(sample_records), |
| "filenames": filenames, |
| }, |
| ) |
| print(f"[Normalization] Computed and saved normalization stats to {cache_path}") |
| return global_mean, global_std, "computed_fresh" |
|
|
| def compute_class_distribution(sample_records: list[dict[str, str]]) -> dict[str, int] | None: |
| if not sample_records or not any("class_label" in record for record in sample_records): |
| return None |
| return { |
| "benign": sum(1 for record in sample_records if record.get("class_label") == "benign"), |
| "malignant": sum(1 for record in sample_records if record.get("class_label") == "malignant"), |
| } |
|
|
| def format_class_distribution(class_distribution: dict[str, int] | None) -> str: |
| if class_distribution is None: |
| return "unavailable" |
| benign = int(class_distribution.get("benign", 0)) |
| malignant = int(class_distribution.get("malignant", 0)) |
| total = benign + malignant |
| return f"benign={benign}, malignant={malignant}, total={total}" |
|
|
| def print_loaded_class_distribution( |
| *, |
| split_type: str, |
| train_subset_key: str, |
| base_train_records: list[dict[str, str]], |
| train_records: list[dict[str, str]], |
| val_records: list[dict[str, str]], |
| test_records: list[dict[str, str]], |
| ) -> None: |
| if not any("class_label" in record for record in train_records): |
| return |
| section(f"Loaded Class Distribution | {split_type} | {train_subset_key}%") |
| print(f"Base train classes : {format_class_distribution(compute_class_distribution(base_train_records))}") |
| print(f"Train subset classes : {format_class_distribution(compute_class_distribution(train_records))}") |
| print(f"Validation classes : {format_class_distribution(compute_class_distribution(val_records))}") |
| print(f"Test classes : {format_class_distribution(compute_class_distribution(test_records))}") |
|
|
| def print_split_summary(payload: dict[str, Any]) -> None: |
| section(f"Split Summary | {payload['split_type']} | {payload['train_subset_key']}%") |
| print(f"Dataset name : {payload['dataset_name']}") |
| if payload.get("dataset_split_policy") is not None: |
| print(f"Dataset split policy : {payload['dataset_split_policy']}") |
| print(f"Dataset splits JSON : {payload['dataset_splits_path']}") |
| print(f"Split source : {payload['split_source']}") |
| print(f"Split type used : {payload['split_type']}") |
| print(f"Train fraction : {payload['train_subset_key']}% of frozen base train") |
| print(f"Base train samples : {payload['base_train_count']}") |
| print(f"Train subset samples : {payload['train_count']}") |
| print(f"Validation samples : {payload['val_count']}") |
| print(f"Test samples : {payload['test_count']}") |
| if payload.get("base_train_class_distribution") is not None: |
| print(f"Base train classes : {format_class_distribution(payload['base_train_class_distribution'])}") |
| print(f"Train subset classes : {format_class_distribution(payload['train_class_distribution'])}") |
| print(f"Validation classes : {format_class_distribution(payload['val_class_distribution'])}") |
| print(f"Test classes : {format_class_distribution(payload['test_class_distribution'])}") |
| print(f"Validation/Test frozen : {payload['val_test_frozen']}") |
| print(f"Leakage check : {payload['leakage_check']}") |
|
|
| def print_normalization_summary(payload: dict[str, Any]) -> None: |
| mode = "ImageNet mean/std" if USE_IMAGENET_NORM else "Dataset train mean/std" |
| print(f"Dataset name : {payload['dataset_name']}") |
| print(f"Normalization mode : {mode}") |
| print(f"Stats cache path : {payload['normalization_cache_path']}") |
| print(f"Stats source : {payload['normalization_source']}") |
| print(f"Split type used : {payload['split_type']}") |
| print(f"Stats computed from : {payload['train_count']} train samples ({payload['train_subset_key']}%)") |
| |
| |
| |
|
|
| def _to_three_channels(image: np.ndarray) -> np.ndarray: |
| if image.ndim == 2: |
| image = image[..., None] |
| if image.shape[2] == 1: |
| image = np.repeat(image, 3, axis=2) |
| elif image.shape[2] > 3: |
| image = image[..., :3] |
| return image |
|
|
| def _prepare_image(raw: np.ndarray, global_mean: float, global_std: float) -> np.ndarray: |
| img = raw.astype(np.float32) |
| img = _to_three_channels(img) |
| if IMG_SIZE > 0 and (img.shape[0] != IMG_SIZE or img.shape[1] != IMG_SIZE): |
| img = np.array( |
| PILImage.fromarray(img.astype(np.uint8)).resize((IMG_SIZE, IMG_SIZE), PILImage.BILINEAR) |
| ).astype(np.float32) |
| if USE_IMAGENET_NORM: |
| if img.max() > 1.0: |
| img = img / 255.0 |
| img = (img - IMAGENET_MEAN) / IMAGENET_STD |
| else: |
| img = (img - global_mean) / global_std |
| return np.transpose(img, (2, 0, 1)).copy() |
|
|
| def _prepare_mask(raw: np.ndarray) -> np.ndarray: |
| mask = raw.astype(np.uint8) |
| if mask.ndim == 3: |
| mask = mask[..., 0] |
| if IMG_SIZE > 0 and (mask.shape[0] != IMG_SIZE or mask.shape[1] != IMG_SIZE): |
| pil_mask = PILImage.fromarray(mask) |
| if pil_mask.mode != "L": |
| pil_mask = pil_mask.convert("L") |
| mask = np.array(pil_mask.resize((IMG_SIZE, IMG_SIZE), PILImage.NEAREST)) |
| return ((mask > 0).astype(np.float32))[None, ...].copy() |
|
|
| def print_imagenet_normalization_status() -> bool: |
| uses_imagenet_norm = bool(USE_IMAGENET_NORM) |
| if uses_imagenet_norm: |
| print("✅🖼️ ImageNet normalization is ACTIVE in `_prepare_image`.") |
| else: |
| print("⚠️🧪 ImageNet normalization is NOT active in `_prepare_image`.") |
| print("⚠️📊 Using dataset global mean/std normalization instead.") |
| if SMP_ENCODER_WEIGHTS == "imagenet" and not uses_imagenet_norm: |
| print("⚠️🚨 Encoder weights are set to ImageNet, but ImageNet normalization is disabled.") |
| return uses_imagenet_norm |
|
|
| def _apply_minimal_train_aug(image: torch.Tensor, mask: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: |
| if torch.rand(1).item() < 0.5: |
| image = torch.flip(image, dims=(2,)) |
| mask = torch.flip(mask, dims=(2,)) |
| if torch.rand(1).item() < 0.5: |
| image = torch.flip(image, dims=(1,)) |
| mask = torch.flip(mask, dims=(1,)) |
| if torch.rand(1).item() < 0.5: |
| k = 1 if torch.rand(1).item() < 0.5 else 3 |
| image = torch.rot90(image, k=k, dims=(1, 2)) |
| mask = torch.rot90(mask, k=k, dims=(1, 2)) |
| return image.contiguous(), mask.contiguous() |
|
|
| class BUSIDataset(Dataset): |
| def __init__( |
| self, |
| sample_records: list[dict[str, str]], |
| dataset_root: Path, |
| global_mean: float, |
| global_std: float, |
| *, |
| preload: bool, |
| augment: bool, |
| split_name: str, |
| ) -> None: |
| super().__init__() |
| self.sample_records = [dict(record) for record in sample_records] |
| self.dataset_root = Path(dataset_root) |
| self.global_mean = float(global_mean) |
| self.global_std = float(global_std) |
| self.preload = preload |
| self.augment = augment |
| self.split_name = split_name |
| self._images: list[torch.Tensor] = [] |
| self._masks: list[torch.Tensor] = [] |
| self._raw_cache_bytes = 0 |
|
|
| if not self.preload: |
| raise ValueError("PRELOAD_TO_RAM is mandatory in this RunPod runner.") |
| self._preload_to_ram() |
|
|
| def _preload_to_ram(self) -> None: |
| desc = f"Preloading {self.split_name} ({len(self.sample_records)} samples) to RAM" |
| for record in tqdm(self.sample_records, desc=desc, leave=False): |
| raw_img = np.array(PILImage.open(self.dataset_root / record["image_rel_path"])) |
| raw_mask = np.array(PILImage.open(self.dataset_root / record["mask_rel_path"])) |
| if raw_img.shape[:2] != raw_mask.shape[:2]: |
| raise RuntimeError( |
| f"Image/mask spatial size mismatch for {record['filename']}: " |
| f"image={raw_img.shape[:2]}, mask={raw_mask.shape[:2]}" |
| ) |
| image = torch.from_numpy(_prepare_image(raw_img, self.global_mean, self.global_std)) |
| mask = torch.from_numpy(_prepare_mask(raw_mask)) |
| self._raw_cache_bytes += tensor_bytes(image) + tensor_bytes(mask) |
| self._images.append(image) |
| self._masks.append(mask) |
|
|
| def __len__(self) -> int: |
| return len(self.sample_records) |
|
|
| def __getitem__(self, index: int) -> dict[str, Any]: |
| image = self._images[index].clone() |
| mask = self._masks[index].clone() |
| if self.augment: |
| image, mask = _apply_minimal_train_aug(image, mask) |
| return { |
| "image": image, |
| "mask": mask, |
| "sample_id": Path(self.sample_records[index]["filename"]).stem, |
| "dataset": current_dataset_name(), |
| } |
|
|
| @property |
| def cache_bytes(self) -> int: |
| return self._raw_cache_bytes |
|
|
| class CUDAPrefetcher: |
| def __init__(self, loader: DataLoader, device: torch.device) -> None: |
| self.loader = loader |
| self.device = device |
| self._use_cuda = device.type == "cuda" |
| self._iter = None |
| self._stream = None |
| self._next_batch = None |
|
|
| def __len__(self) -> int: |
| return len(self.loader) |
|
|
| def __iter__(self): |
| self._iter = iter(self.loader) |
| self._stream = torch.cuda.Stream(device=self.device) if self._use_cuda else None |
| self._next_batch = None |
| self._preload() |
| return self |
|
|
| def close(self) -> None: |
| self._next_batch = None |
| self._iter = None |
| self._stream = None |
|
|
| def _preload(self) -> None: |
| if self._iter is None: |
| self._next_batch = None |
| return |
| try: |
| self._next_batch = next(self._iter) |
| except StopIteration: |
| self._next_batch = None |
| return |
| if self._use_cuda: |
| assert self._stream is not None |
| with torch.cuda.stream(self._stream): |
| self._next_batch = to_device(self._next_batch, self.device) |
| else: |
| self._next_batch = to_device(self._next_batch, self.device) |
|
|
| def __next__(self): |
| if self._next_batch is None: |
| self.close() |
| raise StopIteration |
| if self._use_cuda: |
| assert self._stream is not None |
| torch.cuda.current_stream(self.device).wait_stream(self._stream) |
| batch = self._next_batch |
| self._preload() |
| if self._next_batch is None: |
| self._iter = None |
| self._stream = None |
| return batch |
|
|
| class DataBundle: |
| def __init__( |
| self, |
| *, |
| percent: float, |
| split_payload: dict[str, Any], |
| train_ds: BUSIDataset, |
| val_ds: BUSIDataset, |
| test_ds: BUSIDataset, |
| train_loader: DataLoader, |
| val_loader: DataLoader, |
| test_loader: DataLoader, |
| ) -> None: |
| self.percent = percent |
| self.split_payload = split_payload |
| self.train_ds = train_ds |
| self.val_ds = val_ds |
| self.test_ds = test_ds |
| self.train_loader = train_loader |
| self.val_loader = val_loader |
| self.test_loader = test_loader |
|
|
| @property |
| def global_mean(self) -> float: |
| return float(self.split_payload["global_mean"]) |
|
|
| @property |
| def global_std(self) -> float: |
| return float(self.split_payload["global_std"]) |
|
|
| @property |
| def total_cache_bytes(self) -> int: |
| return self.train_ds.cache_bytes + self.val_ds.cache_bytes + self.test_ds.cache_bytes |
|
|
| def make_loader(dataset: Dataset, shuffle: bool, *, loader_tag: str) -> DataLoader: |
| num_workers = NUM_WORKERS |
| persistent_workers = USE_PERSISTENT_WORKERS and num_workers > 0 |
| pin_memory = USE_PIN_MEMORY and DEVICE.type == "cuda" |
| generator = make_seeded_generator(SEED, loader_tag) |
| return DataLoader( |
| dataset, |
| batch_size=BATCH_SIZE, |
| shuffle=shuffle, |
| num_workers=num_workers, |
| pin_memory=pin_memory, |
| drop_last=False, |
| persistent_workers=persistent_workers, |
| worker_init_fn=seed_worker, |
| generator=generator, |
| ) |
|
|
| def build_data_bundle(percent: float, split_registry: dict[str, Any], split_source: str) -> DataBundle: |
| pct_label = percent_label(percent) |
| pct_root = ensure_dir(RUNS_ROOT / MODEL_NAME / f"pct_{pct_label}") |
| stats_cache_path = pct_root / f"norm_stats_{normalization_cache_tag()}_{SPLIT_TYPE}_{pct_label}pct.json" |
|
|
| selected_split = select_persisted_split(split_registry, SPLIT_TYPE, percent) |
| base_train_class_distribution = compute_class_distribution(selected_split["base_train_records"]) |
| train_class_distribution = compute_class_distribution(selected_split["train_records"]) |
| val_class_distribution = compute_class_distribution(selected_split["val_records"]) |
| test_class_distribution = compute_class_distribution(selected_split["test_records"]) |
| print_loaded_class_distribution( |
| split_type=selected_split["split_type"], |
| train_subset_key=selected_split["train_subset_key"], |
| base_train_records=selected_split["base_train_records"], |
| train_records=selected_split["train_records"], |
| val_records=selected_split["val_records"], |
| test_records=selected_split["test_records"], |
| ) |
| dataset_root = Path(selected_split["dataset_root"]).resolve() |
| global_mean, global_std, normalization_source = compute_busi_statistics( |
| dataset_root=dataset_root, |
| sample_records=selected_split["train_records"], |
| cache_path=stats_cache_path, |
| ) |
|
|
| split_payload = { |
| "dataset_name": current_dataset_name(), |
| "dataset_split_policy": split_registry.get("split_policy"), |
| "dataset_splits_path": str(current_dataset_splits_json_path().resolve()), |
| "dataset_root": str(dataset_root), |
| "split_source": split_source, |
| "split_type": SPLIT_TYPE, |
| "dataset_percent": percent, |
| "train_subset_key": selected_split["train_subset_key"], |
| "base_train_count": len(selected_split["base_train_records"]), |
| "train_count": len(selected_split["train_records"]), |
| "val_count": len(selected_split["val_records"]), |
| "test_count": len(selected_split["test_records"]), |
| "base_train_class_distribution": base_train_class_distribution, |
| "train_class_distribution": train_class_distribution, |
| "val_class_distribution": val_class_distribution, |
| "test_class_distribution": test_class_distribution, |
| "val_test_frozen": True, |
| "leakage_check": "passed", |
| "global_mean": global_mean, |
| "global_std": global_std, |
| "normalization_cache_path": str(stats_cache_path.resolve()), |
| "normalization_source": normalization_source, |
| } |
|
|
| print_split_summary(split_payload) |
| print_normalization_summary(split_payload) |
|
|
| train_ds = BUSIDataset( |
| selected_split["train_records"], |
| dataset_root, |
| global_mean, |
| global_std, |
| preload=PRELOAD_TO_RAM, |
| augment=True, |
| split_name=f"train {SPLIT_TYPE} {pct_label}%", |
| ) |
| val_ds = BUSIDataset( |
| selected_split["val_records"], |
| dataset_root, |
| global_mean, |
| global_std, |
| preload=PRELOAD_TO_RAM, |
| augment=False, |
| split_name=f"val {SPLIT_TYPE}", |
| ) |
| test_ds = BUSIDataset( |
| selected_split["test_records"], |
| dataset_root, |
| global_mean, |
| global_std, |
| preload=PRELOAD_TO_RAM, |
| augment=False, |
| split_name=f"test {SPLIT_TYPE}", |
| ) |
|
|
| bundle = DataBundle( |
| percent=percent, |
| split_payload=split_payload, |
| train_ds=train_ds, |
| val_ds=val_ds, |
| test_ds=test_ds, |
| train_loader=make_loader(train_ds, shuffle=True, loader_tag=f"{SPLIT_TYPE}:{pct_label}:train"), |
| val_loader=make_loader(val_ds, shuffle=False, loader_tag=f"{SPLIT_TYPE}:{pct_label}:val"), |
| test_loader=make_loader(test_ds, shuffle=False, loader_tag=f"{SPLIT_TYPE}:{pct_label}:test"), |
| ) |
| print_preload_summary(bundle) |
| return bundle |
|
|
| def print_preload_summary(bundle: DataBundle) -> None: |
| section( |
| f"RAM Preload Summary | {bundle.split_payload['split_type']} | {int(bundle.percent * 100)}%" |
| ) |
| print(f"Train samples : {len(bundle.train_ds)}") |
| print(f"Val samples : {len(bundle.val_ds)}") |
| print(f"Test samples : {len(bundle.test_ds)}") |
| print(f"Train batches : {len(bundle.train_loader)}") |
| print(f"Val batches : {len(bundle.val_loader)}") |
| print(f"Test batches : {len(bundle.test_loader)}") |
| print(f"Global mean : {bundle.global_mean:.6f}") |
| print(f"Global std : {bundle.global_std:.6f}") |
| first = bundle.train_ds[0] |
| print(f"Sample image shape : {tuple(first['image'].shape)}") |
| print(f"Sample mask shape : {tuple(first['mask'].shape)}") |
| print(f"Sample image dtype : {first['image'].dtype}") |
| print(f"Sample mask dtype : {first['mask'].dtype}") |
| print(f"Estimated RAM preload : {bytes_to_gb(bundle.total_cache_bytes):.3f} GB") |
|
|
| """============================================================================= |
| MODEL DEFINITIONS |
| ============================================================================= |
| """ |
|
|
| def strategy_name(strategy: int, model_config: RuntimeModelConfig | None = None) -> str: |
| model_config = (model_config or current_model_config()).validate() |
| if model_config.backbone_family == "custom_vgg": |
| if strategy == 1: |
| return "Strategy 1: Custom VGG + RL" |
| if strategy == 2: |
| return "Strategy 2: Custom VGG + Segmentation Head (Supervised)" |
| if strategy == 3: |
| return "Strategy 3: Custom VGG + Segmentation Head + RL" |
| if strategy == 4: |
| return "Strategy 4: Frozen Custom VGG + Segmentation Head + RL" |
| if strategy == 5: |
| return "Strategy 5: Frozen Custom VGG + RL" |
| raise ValueError(f"Unsupported strategy: {strategy}") |
|
|
| if strategy == 1: |
| return f"Strategy 1: SMP Encoder ({model_config.smp_encoder_name}) + RL" |
| if strategy == 2: |
| return f"Strategy 2: SMP {model_config.smp_decoder_type} ({model_config.smp_encoder_name}) supervised" |
| if strategy == 3: |
| return f"Strategy 3: SMP {model_config.smp_decoder_type} ({model_config.smp_encoder_name}) + RL" |
| if strategy == 4: |
| return f"Strategy 4: Frozen SMP {model_config.smp_decoder_type} ({model_config.smp_encoder_name}) + RL" |
| if strategy == 5: |
| return f"Strategy 5: Frozen SMP Encoder ({model_config.smp_encoder_name}) + RL" |
| raise ValueError(f"Unsupported strategy: {strategy}") |
|
|
| def _conv3x3(in_ch: int, out_ch: int, dilation: int = 1) -> nn.Conv2d: |
| return nn.Conv2d( |
| in_ch, |
| out_ch, |
| kernel_size=3, |
| stride=1, |
| padding=dilation, |
| dilation=dilation, |
| bias=True, |
| ) |
|
|
| class _ConvBlock(nn.Module): |
| def __init__( |
| self, |
| in_ch: int, |
| out_ch: int, |
| dilation: int = 1, |
| *, |
| num_groups: int = 0, |
| dropout: float = 0.0, |
| ) -> None: |
| super().__init__() |
| self.conv = _conv3x3(in_ch, out_ch, dilation=dilation) |
| self.norm = _group_norm(out_ch, num_groups=num_groups) if num_groups > 0 else nn.Identity() |
| self.act = nn.ReLU(inplace=True) |
| self.drop = nn.Dropout2d(p=dropout) if dropout > 0 else nn.Identity() |
|
|
| def forward(self, x: torch.Tensor) -> torch.Tensor: |
| return self.drop(self.act(self.norm(self.conv(x)))) |
|
|
| def _group_norm(num_channels: int, *, num_groups: int = GN_NUM_GROUPS) -> nn.GroupNorm: |
| groups = min(num_groups, num_channels) |
| while groups > 1 and num_channels % groups != 0: |
| groups -= 1 |
| return nn.GroupNorm(groups, num_channels) |
|
|
| class SelfAttentionModule(nn.Module): |
| def __init__(self, channels: int) -> None: |
| super().__init__() |
| mid = max(channels // 8, 1) |
| self.query = nn.Conv2d(channels, mid, 1) |
| self.key = nn.Conv2d(channels, mid, 1) |
| self.value = nn.Conv2d(channels, channels, 1) |
| self.gamma = nn.Parameter(torch.zeros(1)) |
|
|
| def forward(self, f: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: |
| b, c, h, w = f.shape |
| pooled = f |
| if h * w > ATTENTION_MAX_TOKENS: |
| stride_h = max(1, math.ceil(h / ATTENTION_MIN_POOL_SIZE)) |
| stride_w = max(1, math.ceil(w / ATTENTION_MIN_POOL_SIZE)) |
| pooled = F.avg_pool2d(f, kernel_size=(stride_h, stride_w), stride=(stride_h, stride_w)) |
|
|
| ph, pw = pooled.shape[-2:] |
| q = self.query(pooled).view(b, -1, ph * pw).permute(0, 2, 1) |
| k = self.key(pooled).view(b, -1, ph * pw) |
| v = self.value(pooled).view(b, -1, ph * pw).permute(0, 2, 1) |
| attn = torch.softmax(q @ k / (q.shape[-1] ** 0.5), dim=-1) |
| out = (attn @ v).permute(0, 2, 1).view(b, c, ph, pw) |
| if ph != h or pw != w: |
| out = F.interpolate(out, size=(h, w), mode="bilinear", align_corners=False) |
| return f + self.gamma * out, attn |
|
|
| class DilatedPolicyHead(nn.Module): |
| def __init__(self, in_channels: int) -> None: |
| super().__init__() |
| self.body = nn.Sequential( |
| _ConvBlock(in_channels, 512, dilation=1), |
| _ConvBlock(512, 256, dilation=2), |
| _ConvBlock(256, 128, dilation=3), |
| _ConvBlock(128, 64, dilation=4), |
| ) |
| self.classifier = nn.Conv2d(64, NUM_ACTIONS, kernel_size=1) |
| nn.init.zeros_(self.classifier.weight) |
| bias = torch.full((NUM_ACTIONS,), -2.0, dtype=torch.float32) |
| keep_index = NUM_ACTIONS // 2 if NUM_ACTIONS >= 3 else NUM_ACTIONS - 1 |
| bias[keep_index] = 2.0 |
| with torch.no_grad(): |
| self.classifier.bias.copy_(bias) |
|
|
| def forward(self, x: torch.Tensor) -> torch.Tensor: |
| return self.classifier(self.body(x)) |
|
|
| class DilatedValueHead(nn.Module): |
| def __init__(self, in_channels: int) -> None: |
| super().__init__() |
| self.net = nn.Sequential( |
| _ConvBlock(in_channels, 512, dilation=1), |
| _ConvBlock(512, 256, dilation=2), |
| _ConvBlock(256, 128, dilation=3), |
| _ConvBlock(128, 64, dilation=4), |
| nn.Conv2d(64, 1, kernel_size=1), |
| ) |
|
|
| def forward(self, x: torch.Tensor) -> torch.Tensor: |
| return self.net(x) |
|
|
| def replace_bn_with_gn(model: nn.Module, num_groups: int = 8) -> nn.Module: |
| for name, module in model.named_children(): |
| if isinstance(module, (nn.BatchNorm2d, nn.BatchNorm1d)): |
| num_channels = module.num_features |
| groups = min(num_groups, num_channels) |
| while groups > 1 and num_channels % groups != 0: |
| groups -= 1 |
| setattr(model, name, nn.GroupNorm(groups, num_channels, eps=module.eps, affine=module.affine)) |
| else: |
| replace_bn_with_gn(module, num_groups=num_groups) |
| return model |
|
|
| class HalfVGG16DilatedExtractor(nn.Module): |
| def __init__(self, *, dilation: int = 1, num_scales: int = 3) -> None: |
| super().__init__() |
| self.num_scales = num_scales |
| deep_dropout = 0.1 |
|
|
| self.conv1_1 = _ConvBlock(3, 32, dilation=dilation, num_groups=GN_NUM_GROUPS) |
| self.conv1_2 = _ConvBlock(32, 32, dilation=dilation, num_groups=GN_NUM_GROUPS) |
|
|
| self.conv2_1 = _ConvBlock(32, 64, dilation=dilation, num_groups=GN_NUM_GROUPS) |
| self.conv2_2 = _ConvBlock(64, 64, dilation=dilation, num_groups=GN_NUM_GROUPS) |
|
|
| self.conv3_1 = _ConvBlock(64, 128, dilation=dilation, num_groups=GN_NUM_GROUPS, dropout=deep_dropout) |
| self.conv3_2 = _ConvBlock(128, 128, dilation=dilation, num_groups=GN_NUM_GROUPS, dropout=deep_dropout) |
| self.conv3_3 = _ConvBlock(128, 128, dilation=dilation, num_groups=GN_NUM_GROUPS, dropout=deep_dropout) |
|
|
| self.conv4_1 = _ConvBlock(128, 256, dilation=dilation, num_groups=GN_NUM_GROUPS, dropout=deep_dropout) |
| self.conv4_2 = _ConvBlock(256, 256, dilation=dilation, num_groups=GN_NUM_GROUPS, dropout=deep_dropout) |
| self.conv4_3 = _ConvBlock(256, 256, dilation=dilation, num_groups=GN_NUM_GROUPS, dropout=deep_dropout) |
|
|
| self.pool = nn.MaxPool2d(kernel_size=2, stride=2) |
|
|
| @property |
| def out_channels(self) -> int: |
| return (32 + 64 + 128) if self.num_scales == 3 else (32 + 64 + 128 + 256) |
|
|
| @property |
| def pyramid_channels(self) -> list[int]: |
| return [32, 64, 128] if self.num_scales == 3 else [32, 64, 128, 256] |
|
|
| def forward_pyramid(self, x: torch.Tensor) -> list[torch.Tensor]: |
| x = self.conv1_1(x) |
| src1 = self.conv1_2(x) |
| x = self.pool(src1) |
|
|
| x = self.conv2_1(x) |
| src2 = self.conv2_2(x) |
| x = self.pool(src2) |
|
|
| x = self.conv3_1(x) |
| x = self.conv3_2(x) |
| src3 = self.conv3_3(x) |
|
|
| if self.num_scales == 3: |
| return [src1, src2, src3] |
|
|
| x = self.pool(src3) |
| x = self.conv4_1(x) |
| x = self.conv4_2(x) |
| src4 = self.conv4_3(x) |
| return [src1, src2, src3, src4] |
|
|
| def forward(self, x: torch.Tensor) -> torch.Tensor: |
| h, w = x.shape[-2:] |
| pyramid = self.forward_pyramid(x) |
| upsampled = [] |
| for feat in pyramid: |
| if feat.shape[-2] != h or feat.shape[-1] != w: |
| feat = F.interpolate(feat, size=(h, w), mode="bilinear", align_corners=False) |
| upsampled.append(feat) |
| return torch.cat(upsampled, dim=1) |
|
|
| class CustomVGGEncoderWrapper(nn.Module): |
| def __init__(self, *, num_scales: int, dilation: int) -> None: |
| super().__init__() |
| self.encoder = HalfVGG16DilatedExtractor(dilation=dilation, num_scales=num_scales) |
| self.projection = None |
|
|
| @property |
| def out_channels(self) -> int: |
| return self.encoder.out_channels |
|
|
| @property |
| def pyramid_channels(self) -> list[int]: |
| return self.encoder.pyramid_channels |
|
|
| def forward_pyramid(self, x: torch.Tensor) -> list[torch.Tensor]: |
| return self.encoder.forward_pyramid(x) |
|
|
| def forward(self, x: torch.Tensor) -> torch.Tensor: |
| return self.encoder(x) |
|
|
| class SMPEncoderWrapper(nn.Module): |
| def __init__( |
| self, |
| *, |
| encoder_name: str, |
| encoder_weights: str | None, |
| depth: int, |
| in_channels: int, |
| proj_dim: int, |
| ) -> None: |
| super().__init__() |
| self.encoder = smp.encoders.get_encoder( |
| encoder_name, |
| in_channels=in_channels, |
| depth=depth, |
| weights=encoder_weights, |
| ) |
| if REPLACE_BN_WITH_GN: |
| replace_bn_with_gn(self.encoder, num_groups=GN_NUM_GROUPS) |
|
|
| raw_channels = sum(self.encoder.out_channels) |
| if proj_dim > 0: |
| groups = min(GN_NUM_GROUPS, proj_dim) |
| while groups > 1 and proj_dim % groups != 0: |
| groups -= 1 |
| self.projection = nn.Sequential( |
| nn.Conv2d(raw_channels, proj_dim, kernel_size=1, bias=False), |
| nn.GroupNorm(groups, proj_dim) if REPLACE_BN_WITH_GN else nn.BatchNorm2d(proj_dim), |
| nn.ReLU(inplace=True), |
| ) |
| self._out_channels = proj_dim |
| else: |
| self.projection = None |
| self._out_channels = raw_channels |
|
|
| @property |
| def out_channels(self) -> int: |
| return self._out_channels |
|
|
| def forward(self, x: torch.Tensor) -> torch.Tensor: |
| h, w = x.shape[-2:] |
| features = self.encoder(x) |
| upsampled = [] |
| for feat in features: |
| if feat.shape[-2] != h or feat.shape[-1] != w: |
| feat = F.interpolate(feat, size=(h, w), mode="bilinear", align_corners=False) |
| upsampled.append(feat) |
| out = torch.cat(upsampled, dim=1) |
| if self.projection is not None: |
| out = self.projection(out) |
| return out |
|
|
| class VGGDecoderBlock(nn.Module): |
| def __init__(self, *, in_channels: int, skip_channels: int, out_channels: int) -> None: |
| super().__init__() |
| self.block = nn.Sequential( |
| _ConvBlock(in_channels + skip_channels, out_channels, num_groups=GN_NUM_GROUPS), |
| _ConvBlock(out_channels, out_channels, num_groups=GN_NUM_GROUPS), |
| ) |
|
|
| def forward(self, x: torch.Tensor, skip: torch.Tensor) -> torch.Tensor: |
| x = F.interpolate(x, size=skip.shape[-2:], mode="bilinear", align_corners=False) |
| return self.block(torch.cat([x, skip], dim=1)) |
|
|
| class VGGSegmentationHead(nn.Module): |
| def __init__(self, *, pyramid_channels: list[int], dropout_p: float) -> None: |
| super().__init__() |
| if len(pyramid_channels) not in {3, 4}: |
| raise ValueError(f"Expected 3 or 4 VGG pyramid channels, got {pyramid_channels}") |
|
|
| self.dropout = nn.Dropout2d(p=dropout_p) |
| self.num_scales = len(pyramid_channels) |
|
|
| deepest = pyramid_channels[-1] |
| self.bridge = _ConvBlock(deepest, deepest, num_groups=GN_NUM_GROUPS) |
| if self.num_scales == 4: |
| self.up3 = VGGDecoderBlock(in_channels=deepest, skip_channels=pyramid_channels[2], out_channels=128) |
| self.up2 = VGGDecoderBlock(in_channels=128, skip_channels=pyramid_channels[1], out_channels=64) |
| self.up1 = VGGDecoderBlock(in_channels=64, skip_channels=pyramid_channels[0], out_channels=32) |
| else: |
| self.up2 = VGGDecoderBlock(in_channels=deepest, skip_channels=pyramid_channels[1], out_channels=64) |
| self.up1 = VGGDecoderBlock(in_channels=64, skip_channels=pyramid_channels[0], out_channels=32) |
| self.out_conv = nn.Conv2d(32, 1, kernel_size=1) |
|
|
| def forward(self, pyramid: list[torch.Tensor] | tuple[torch.Tensor, ...]) -> torch.Tensor: |
| features = list(pyramid) |
| x = self.bridge(self.dropout(features[-1])) |
| if self.num_scales == 4: |
| x = self.up3(x, features[2]) |
| x = self.up2(x, features[1]) |
| x = self.up1(x, features[0]) |
| else: |
| x = self.up2(x, features[1]) |
| x = self.up1(x, features[0]) |
| return self.out_conv(x) |
|
|
| class PixelDRLMG_SMP(nn.Module): |
| def __init__( |
| self, |
| *, |
| encoder_name: str, |
| encoder_weights: str | None, |
| encoder_depth: int, |
| proj_dim: int, |
| dropout_p: float, |
| ) -> None: |
| super().__init__() |
| self.extractor = SMPEncoderWrapper( |
| encoder_name=encoder_name, |
| encoder_weights=encoder_weights, |
| depth=encoder_depth, |
| in_channels=3, |
| proj_dim=proj_dim, |
| ) |
| ch = self.extractor.out_channels |
| self.sam = SelfAttentionModule(channels=ch) |
| self.policy_head = DilatedPolicyHead(in_channels=ch) |
| self.value_head = DilatedValueHead(in_channels=ch) |
| self.head_dropout = nn.Dropout2d(p=dropout_p) |
| self.omega_conv = nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=1, bias=False) |
| nn.init.constant_(self.omega_conv.weight, 1.0 / 9.0) |
|
|
| def forward_state(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: |
| features = self.extractor(x) |
| return self.sam(features) |
|
|
| def forward_from_state(self, state: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: |
| state = self.head_dropout(state) |
| return self.policy_head(state), self.value_head(state) |
|
|
| def value_from_state(self, state: torch.Tensor) -> torch.Tensor: |
| state = self.head_dropout(state) |
| return self.value_head(state) |
|
|
| def forward(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: |
| state, attention = self.forward_state(x) |
| state = self.head_dropout(state) |
| return self.policy_head(state), self.value_head(state), attention |
|
|
| def forward_policy_only(self, x: torch.Tensor) -> torch.Tensor: |
| state, _ = self.forward_state(x) |
| state = self.head_dropout(state) |
| return self.policy_head(state) |
|
|
| def neighborhood_value(self, value_next: torch.Tensor) -> torch.Tensor: |
| return self.omega_conv(value_next) |
|
|
| class PixelDRLMG_VGG(nn.Module): |
| def __init__( |
| self, |
| *, |
| num_scales: int, |
| dilation: int, |
| dropout_p: float, |
| ) -> None: |
| super().__init__() |
| self.extractor = CustomVGGEncoderWrapper(num_scales=num_scales, dilation=dilation) |
| ch = self.extractor.out_channels |
| self.sam = SelfAttentionModule(channels=ch) |
| self.policy_head = DilatedPolicyHead(in_channels=ch) |
| self.value_head = DilatedValueHead(in_channels=ch) |
| self.head_dropout = nn.Dropout2d(p=dropout_p) |
| self.omega_conv = nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=1, bias=False) |
| nn.init.constant_(self.omega_conv.weight, 1.0 / 9.0) |
|
|
| def forward_state(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: |
| return self.sam(self.extractor(x)) |
|
|
| def forward_from_state(self, state: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: |
| state = self.head_dropout(state) |
| return self.policy_head(state), self.value_head(state) |
|
|
| def value_from_state(self, state: torch.Tensor) -> torch.Tensor: |
| state = self.head_dropout(state) |
| return self.value_head(state) |
|
|
| def forward(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: |
| state, attention = self.forward_state(x) |
| state = self.head_dropout(state) |
| return self.policy_head(state), self.value_head(state), attention |
|
|
| def forward_policy_only(self, x: torch.Tensor) -> torch.Tensor: |
| state, _ = self.forward_state(x) |
| state = self.head_dropout(state) |
| return self.policy_head(state) |
|
|
| def neighborhood_value(self, value_next: torch.Tensor) -> torch.Tensor: |
| return self.omega_conv(value_next) |
|
|
| class SupervisedSMPModel(nn.Module): |
| def __init__( |
| self, |
| *, |
| arch: str, |
| encoder_name: str, |
| encoder_weights: str | None, |
| dropout_p: float, |
| ) -> None: |
| super().__init__() |
| self.smp_model = smp.create_model( |
| arch=arch, |
| encoder_name=encoder_name, |
| encoder_weights=encoder_weights, |
| in_channels=3, |
| classes=1, |
| ) |
| self.dropout = nn.Dropout2d(p=dropout_p) |
|
|
| def forward(self, x: torch.Tensor) -> torch.Tensor: |
| encoder_features = self.smp_model.encoder(x) |
| decoder_output = run_smp_decoder(self.smp_model.decoder, encoder_features) |
| decoder_output = self.dropout(decoder_output) |
| logits = self.smp_model.segmentation_head(decoder_output) |
| if getattr(self.smp_model, "classification_head", None) is not None: |
| _ = self.smp_model.classification_head(encoder_features[-1]) |
| return logits |
|
|
| class SupervisedVGGModel(nn.Module): |
| def __init__( |
| self, |
| *, |
| num_scales: int, |
| dilation: int, |
| dropout_p: float, |
| ) -> None: |
| super().__init__() |
| self.encoder = CustomVGGEncoderWrapper(num_scales=num_scales, dilation=dilation) |
| self.segmentation_head = VGGSegmentationHead( |
| pyramid_channels=self.encoder.pyramid_channels, |
| dropout_p=dropout_p, |
| ) |
|
|
| def forward(self, x: torch.Tensor) -> torch.Tensor: |
| return self.segmentation_head(self.encoder.forward_pyramid(x)) |
|
|
| class RefinementPolicyHead(nn.Module): |
| def __init__(self, in_channels: int) -> None: |
| super().__init__() |
| self.body = nn.Sequential( |
| _ConvBlock(in_channels, 512, dilation=1), |
| _ConvBlock(512, 256, dilation=2), |
| _ConvBlock(256, 128, dilation=3), |
| _ConvBlock(128, 64, dilation=4), |
| ) |
| self.classifier = nn.Conv2d(64, NUM_ACTIONS, kernel_size=1) |
| nn.init.zeros_(self.classifier.weight) |
| keep_bias = float(_job_param("keep_bias", 0.25)) |
| keep_index = NUM_ACTIONS // 2 |
| with torch.no_grad(): |
| self.classifier.bias.zero_() |
| self.classifier.bias[keep_index] = keep_bias |
|
|
| class PixelDRLMG_WithDecoder(nn.Module): |
| def __init__( |
| self, |
| *, |
| arch: str, |
| encoder_name: str, |
| encoder_weights: str | None, |
| encoder_depth: int, |
| proj_dim: int, |
| dropout_p: float, |
| ) -> None: |
| super().__init__() |
| self.smp_model = smp.create_model( |
| arch=arch, |
| encoder_name=encoder_name, |
| encoder_weights=encoder_weights, |
| in_channels=3, |
| classes=1, |
| ) |
| self.smp_encoder = self.smp_model.encoder |
| if REPLACE_BN_WITH_GN: |
| replace_bn_with_gn(self.smp_encoder, num_groups=GN_NUM_GROUPS) |
|
|
| raw_channels = sum(self.smp_encoder.out_channels) |
| if proj_dim > 0: |
| groups = min(GN_NUM_GROUPS, proj_dim) |
| while groups > 1 and proj_dim % groups != 0: |
| groups -= 1 |
| self.projection = nn.Sequential( |
| nn.Conv2d(raw_channels, proj_dim, kernel_size=1, bias=False), |
| nn.GroupNorm(groups, proj_dim) if REPLACE_BN_WITH_GN else nn.BatchNorm2d(proj_dim), |
| nn.ReLU(inplace=True), |
| ) |
| ch = proj_dim |
| else: |
| self.projection = None |
| ch = raw_channels |
|
|
| self.refinement_adapter = nn.Sequential( |
| nn.Conv2d(ch + 6, ch, kernel_size=3, stride=1, padding=1, bias=False), |
| _group_norm(ch), |
| nn.ReLU(inplace=True), |
| _ConvBlock(ch, ch, num_groups=GN_NUM_GROUPS), |
| ) |
| self.sam = SelfAttentionModule(channels=ch) |
| self.policy_head = RefinementPolicyHead(in_channels=ch) |
| self.value_head = DilatedValueHead(in_channels=ch) |
| self.head_dropout = nn.Dropout2d(p=dropout_p) |
| self.omega_conv = nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=1, bias=False) |
| nn.init.constant_(self.omega_conv.weight, 1.0 / 9.0) |
| self.use_refinement = True |
|
|
| def set_refinement_mode(self, enabled: bool) -> None: |
| self.use_refinement = bool(enabled) |
| self.refinement_adapter.requires_grad_(self.use_refinement) |
|
|
| def _concat_from_features( |
| self, |
| features: list[torch.Tensor] | tuple[torch.Tensor, ...], |
| *, |
| output_size: tuple[int, int], |
| ) -> torch.Tensor: |
| h, w = output_size |
| upsampled = [] |
| for feat in features: |
| if feat.shape[-2] != h or feat.shape[-1] != w: |
| feat = F.interpolate(feat, size=(h, w), mode="bilinear", align_corners=False) |
| upsampled.append(feat) |
| out = torch.cat(upsampled, dim=1) |
| if self.projection is not None: |
| out = self.projection(out) |
| return out |
|
|
| def _encoder_concat(self, x: torch.Tensor) -> torch.Tensor: |
| return self._concat_from_features(self.smp_encoder(x), output_size=x.shape[-2:]) |
|
|
| def forward_decoder_from_features( |
| self, |
| encoder_features: list[torch.Tensor] | tuple[torch.Tensor, ...], |
| ) -> torch.Tensor: |
| decoder_output = run_smp_decoder(self.smp_model.decoder, encoder_features) |
| logits = self.smp_model.segmentation_head(decoder_output) |
| if getattr(self.smp_model, "classification_head", None) is not None: |
| _ = self.smp_model.classification_head(encoder_features[-1]) |
| return logits |
|
|
| def forward_decoder(self, x: torch.Tensor) -> torch.Tensor: |
| return self.smp_model(x) |
|
|
| def prepare_refinement_context(self, x: torch.Tensor) -> dict[str, torch.Tensor]: |
| encoder_features = self.smp_encoder(x) |
| decoder_logits = self.forward_decoder_from_features(encoder_features) |
| return { |
| "base_features": self._concat_from_features(encoder_features, output_size=x.shape[-2:]), |
| "decoder_logits": decoder_logits, |
| "decoder_prob": torch.sigmoid(decoder_logits), |
| } |
|
|
| def forward_refinement_state( |
| self, |
| base_features: torch.Tensor, |
| current_mask: torch.Tensor, |
| decoder_prob: torch.Tensor, |
| ) -> tuple[torch.Tensor, torch.Tensor]: |
| residual = decoder_prob - current_mask |
| uncertainty = (decoder_prob * (1.0 - decoder_prob)).clamp_(0.0, 0.25) * 4.0 |
| boundary = (current_mask - F.avg_pool2d(current_mask, kernel_size=3, stride=1, padding=1)).abs() |
| conditioning = torch.cat( |
| [decoder_prob, current_mask, residual, residual.abs(), uncertainty, boundary], |
| dim=1, |
| ) |
| fused = self.refinement_adapter(torch.cat([base_features, conditioning], dim=1)) |
| return self.sam(fused) |
|
|
| def forward_state(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: |
| concat_feat = self._encoder_concat(x) |
| return self.sam(concat_feat) |
|
|
| def forward_from_state(self, state: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: |
| state = self.head_dropout(state) |
| return self.policy_head(state), self.value_head(state) |
|
|
| def value_from_state(self, state: torch.Tensor) -> torch.Tensor: |
| state = self.head_dropout(state) |
| return self.value_head(state) |
|
|
| def forward(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: |
| if not self.use_refinement: |
| state, attention = self.forward_state(x) |
| return self.policy_head(state), self.value_head(state), attention |
| context = self.prepare_refinement_context(x) |
| state, attention = self.forward_refinement_state( |
| context["base_features"], |
| context["decoder_prob"].detach(), |
| context["decoder_prob"], |
| ) |
| policy_logits, value = self.forward_from_state(state) |
| return policy_logits, value, attention |
|
|
| def forward_policy_only(self, x: torch.Tensor) -> torch.Tensor: |
| if not self.use_refinement: |
| state, _ = self.forward_state(x) |
| state = self.head_dropout(state) |
| return self.policy_head(state) |
| context = self.prepare_refinement_context(x) |
| state, _ = self.forward_refinement_state( |
| context["base_features"], |
| context["decoder_prob"].detach(), |
| context["decoder_prob"], |
| ) |
| state = self.head_dropout(state) |
| return self.policy_head(state) |
|
|
| def neighborhood_value(self, value_next: torch.Tensor) -> torch.Tensor: |
| return self.omega_conv(value_next) |
|
|
| class PixelDRLMG_VGGWithDecoder(nn.Module): |
| def __init__( |
| self, |
| *, |
| num_scales: int, |
| dilation: int, |
| dropout_p: float, |
| ) -> None: |
| super().__init__() |
| self.encoder = CustomVGGEncoderWrapper(num_scales=num_scales, dilation=dilation) |
| self.segmentation_head = VGGSegmentationHead( |
| pyramid_channels=self.encoder.pyramid_channels, |
| dropout_p=dropout_p, |
| ) |
| ch = self.encoder.out_channels |
| self.refinement_adapter = nn.Sequential( |
| nn.Conv2d(ch + 3, ch, kernel_size=3, stride=1, padding=1, bias=False), |
| _group_norm(ch), |
| nn.ReLU(inplace=True), |
| _ConvBlock(ch, ch, num_groups=GN_NUM_GROUPS), |
| ) |
| self.sam = SelfAttentionModule(channels=ch) |
| self.policy_head = DilatedPolicyHead(in_channels=ch) |
| self.value_head = DilatedValueHead(in_channels=ch) |
| self.head_dropout = nn.Dropout2d(p=dropout_p) |
| self.omega_conv = nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=1, bias=False) |
| nn.init.constant_(self.omega_conv.weight, 1.0 / 9.0) |
| self.use_refinement = True |
|
|
| def set_refinement_mode(self, enabled: bool) -> None: |
| self.use_refinement = bool(enabled) |
| self.refinement_adapter.requires_grad_(self.use_refinement) |
|
|
| def _concat_pyramid( |
| self, |
| pyramid: list[torch.Tensor] | tuple[torch.Tensor, ...], |
| *, |
| output_size: tuple[int, int], |
| ) -> torch.Tensor: |
| h, w = output_size |
| upsampled = [] |
| for feat in pyramid: |
| if feat.shape[-2] != h or feat.shape[-1] != w: |
| feat = F.interpolate(feat, size=(h, w), mode="bilinear", align_corners=False) |
| upsampled.append(feat) |
| return torch.cat(upsampled, dim=1) |
|
|
| def forward_decoder(self, x: torch.Tensor) -> torch.Tensor: |
| return self.segmentation_head(self.encoder.forward_pyramid(x)) |
|
|
| def prepare_refinement_context(self, x: torch.Tensor) -> dict[str, torch.Tensor]: |
| pyramid = self.encoder.forward_pyramid(x) |
| decoder_logits = self.segmentation_head(pyramid) |
| return { |
| "base_features": self._concat_pyramid(pyramid, output_size=x.shape[-2:]), |
| "decoder_logits": decoder_logits, |
| "decoder_prob": torch.sigmoid(decoder_logits), |
| } |
|
|
| def forward_refinement_state( |
| self, |
| base_features: torch.Tensor, |
| current_mask: torch.Tensor, |
| decoder_prob: torch.Tensor, |
| ) -> tuple[torch.Tensor, torch.Tensor]: |
| conditioning = torch.cat([decoder_prob, current_mask, decoder_prob - current_mask], dim=1) |
| fused = self.refinement_adapter(torch.cat([base_features, conditioning], dim=1)) |
| return self.sam(fused) |
|
|
| def forward_state(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: |
| return self.sam(self.encoder(x)) |
|
|
| def forward_from_state(self, state: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: |
| state = self.head_dropout(state) |
| return self.policy_head(state), self.value_head(state) |
|
|
| def value_from_state(self, state: torch.Tensor) -> torch.Tensor: |
| state = self.head_dropout(state) |
| return self.value_head(state) |
|
|
| def forward(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: |
| if not self.use_refinement: |
| state, attention = self.forward_state(x) |
| return self.policy_head(state), self.value_head(state), attention |
| context = self.prepare_refinement_context(x) |
| state, attention = self.forward_refinement_state( |
| context["base_features"], |
| context["decoder_prob"].detach(), |
| context["decoder_prob"], |
| ) |
| policy_logits, value = self.forward_from_state(state) |
| return policy_logits, value, attention |
|
|
| def forward_policy_only(self, x: torch.Tensor) -> torch.Tensor: |
| if not self.use_refinement: |
| state, _ = self.forward_state(x) |
| state = self.head_dropout(state) |
| return self.policy_head(state) |
| context = self.prepare_refinement_context(x) |
| state, _ = self.forward_refinement_state( |
| context["base_features"], |
| context["decoder_prob"].detach(), |
| context["decoder_prob"], |
| ) |
| state = self.head_dropout(state) |
| return self.policy_head(state) |
|
|
| def neighborhood_value(self, value_next: torch.Tensor) -> torch.Tensor: |
| return self.omega_conv(value_next) |
|
|
| def run_smp_decoder(decoder: nn.Module, encoder_features: list[torch.Tensor] | tuple[torch.Tensor, ...]) -> torch.Tensor: |
| signature = inspect.signature(decoder.forward) |
| parameters = list(signature.parameters.values()) |
| if any(param.kind == inspect.Parameter.VAR_POSITIONAL for param in parameters): |
| return decoder(*encoder_features) |
| if len(parameters) == 1: |
| return decoder(encoder_features) |
| return decoder(*encoder_features) |
|
|
| def checkpoint_run_config_payload(payload: dict[str, Any]) -> dict[str, Any]: |
| return payload.get("run_config") or payload.get("config") or {} |
|
|
| def _raw_decoder_rl_model( |
| model: nn.Module, |
| ) -> PixelDRLMG_WithDecoder | PixelDRLMG_VGGWithDecoder | None: |
| raw = _unwrap_compiled(model) |
| if isinstance(raw, (PixelDRLMG_WithDecoder, PixelDRLMG_VGGWithDecoder)): |
| return raw |
| return None |
|
|
| def _uses_refinement_runtime(model: nn.Module, *, strategy: int | None = None) -> bool: |
| raw = _raw_decoder_rl_model(model) |
| if raw is None: |
| return False |
| if strategy is not None and strategy not in (3, 4): |
| return False |
| return bool(getattr(raw, "use_refinement", False)) |
|
|
| def _policy_action_count_from_state_dict(state_dict: dict[str, Any]) -> int | None: |
| for key in ( |
| "policy_head.classifier.weight", |
| "policy_head.classifier.bias", |
| "policy_head.net.4.weight", |
| "policy_head.net.4.bias", |
| ): |
| tensor = state_dict.get(key) |
| if torch.is_tensor(tensor): |
| return int(tensor.shape[0]) |
| return None |
|
|
| def _model_policy_action_count(model: nn.Module) -> int | None: |
| raw = _unwrap_compiled(model) |
| classifier = getattr(getattr(raw, "policy_head", None), "classifier", None) |
| if isinstance(classifier, nn.Conv2d): |
| return int(classifier.out_channels) |
| return None |
|
|
| def _set_model_policy_action_count(model: nn.Module, action_count: int) -> bool: |
| raw = _unwrap_compiled(model) |
| policy_head = getattr(raw, "policy_head", None) |
| classifier = getattr(policy_head, "classifier", None) |
| if not isinstance(classifier, nn.Conv2d): |
| return False |
| if int(classifier.out_channels) == int(action_count): |
| return False |
|
|
| new_classifier = nn.Conv2d( |
| classifier.in_channels, |
| int(action_count), |
| kernel_size=classifier.kernel_size, |
| stride=classifier.stride, |
| padding=classifier.padding, |
| dilation=classifier.dilation, |
| groups=classifier.groups, |
| bias=classifier.bias is not None, |
| padding_mode=classifier.padding_mode, |
| ).to(device=classifier.weight.device, dtype=classifier.weight.dtype) |
| nn.init.zeros_(new_classifier.weight) |
| if new_classifier.bias is not None: |
| bias = torch.full((int(action_count),), -2.0, dtype=new_classifier.bias.dtype, device=new_classifier.bias.device) |
| keep_index = 1 if int(action_count) >= 3 else int(action_count) - 1 |
| bias[keep_index] = 2.0 |
| with torch.no_grad(): |
| new_classifier.bias.copy_(bias) |
| policy_head.classifier = new_classifier |
| return True |
|
|
| def _configure_policy_head_compatibility( |
| model: nn.Module, |
| state_dict: dict[str, Any], |
| *, |
| source: str, |
| ) -> int | None: |
| action_count = _policy_action_count_from_state_dict(state_dict) |
| if action_count is None: |
| return None |
| if _set_model_policy_action_count(model, action_count): |
| print(f"[Policy Compatibility] source={source} num_actions={action_count}") |
| return action_count |
|
|
| def _strategy34_checkpoint_layout_info( |
| state_dict: dict[str, Any], |
| run_config: dict[str, Any] | None = None, |
| ) -> dict[str, Any]: |
| run_config = run_config or {} |
| strategy = run_config.get("strategy") |
| has_legacy_policy_head = any(key.startswith("policy_head.net.") for key in state_dict) |
| has_new_policy_body = any(key.startswith("policy_head.body.") for key in state_dict) |
| has_new_policy_classifier = any(key.startswith("policy_head.classifier.") for key in state_dict) |
| has_refinement_adapter = any(key.startswith("refinement_adapter.") for key in state_dict) |
| is_strategy34_decoder_checkpoint = bool( |
| strategy in (3, 4) |
| or has_legacy_policy_head |
| or has_new_policy_body |
| or has_new_policy_classifier |
| or has_refinement_adapter |
| ) |
| use_refinement = bool( |
| has_refinement_adapter or ((has_new_policy_body or has_new_policy_classifier) and not has_legacy_policy_head) |
| ) |
| return { |
| "strategy": strategy, |
| "is_strategy34_decoder_checkpoint": is_strategy34_decoder_checkpoint, |
| "has_legacy_policy_head": has_legacy_policy_head, |
| "has_new_policy_head": bool(has_new_policy_body or has_new_policy_classifier), |
| "has_refinement_adapter": has_refinement_adapter, |
| "requires_policy_remap": has_legacy_policy_head, |
| "policy_action_count": _policy_action_count_from_state_dict(state_dict), |
| "use_refinement": use_refinement, |
| "compatibility_mode": "refinement" if use_refinement else "legacy", |
| } |
|
|
| def inspect_strategy34_checkpoint_compatibility(path: str | Path) -> dict[str, Any]: |
| checkpoint_path = Path(path).expanduser().resolve() |
| payload = torch.load(checkpoint_path, map_location="cpu", weights_only=False) |
| layout = _strategy34_checkpoint_layout_info(payload.get("model_state_dict", {}), checkpoint_run_config_payload(payload)) |
| layout["path"] = str(checkpoint_path) |
| return layout |
|
|
| def _configure_strategy34_model_compatibility( |
| model: nn.Module, |
| layout: dict[str, Any], |
| *, |
| source: str, |
| ) -> None: |
| raw = _raw_decoder_rl_model(model) |
| if raw is None or not layout.get("is_strategy34_decoder_checkpoint"): |
| return |
| raw.set_refinement_mode(bool(layout["use_refinement"])) |
| if not bool(layout["use_refinement"]): |
| raw.refinement_adapter.eval() |
| print( |
| "[Strategy34 Compatibility] " |
| f"source={source} mode={layout['compatibility_mode']} " |
| f"legacy_policy_head={layout['has_legacy_policy_head']} " |
| f"refinement_adapter={layout['has_refinement_adapter']}" |
| ) |
|
|
| def _configure_model_from_checkpoint_path( |
| model: nn.Module, |
| checkpoint_path: str | Path, |
| ) -> dict[str, Any]: |
| checkpoint_path = Path(checkpoint_path).expanduser().resolve() |
| payload = torch.load(checkpoint_path, map_location="cpu", weights_only=False) |
| state_dict = payload.get("model_state_dict", {}) |
| _configure_policy_head_compatibility(model, state_dict, source=str(checkpoint_path)) |
| layout = _strategy34_checkpoint_layout_info(state_dict, checkpoint_run_config_payload(payload)) |
| _configure_strategy34_model_compatibility(model, layout, source=str(checkpoint_path)) |
| layout["path"] = str(checkpoint_path) |
| return layout |
|
|
| def _remap_legacy_policy_head_state_dict(state_dict: dict[str, Any]) -> dict[str, Any]: |
| remapped: dict[str, Any] = {} |
| for key, value in state_dict.items(): |
| if key.startswith("policy_head.net."): |
| suffix = key[len("policy_head.net."):] |
| layer_idx, dot, rest = suffix.partition(".") |
| if dot: |
| if layer_idx in {"0", "1", "2", "3"}: |
| remapped[f"policy_head.body.{layer_idx}.{rest}"] = value |
| continue |
| if layer_idx == "4": |
| remapped[f"policy_head.classifier.{rest}"] = value |
| continue |
| remapped[key] = value |
| return remapped |
|
|
| def _load_strategy2_checkpoint_payload( |
| path: str | Path, |
| *, |
| model_config: RuntimeModelConfig, |
| ) -> dict[str, Any]: |
| checkpoint_path = Path(path).expanduser().resolve() |
| ckpt = torch.load(checkpoint_path, map_location=DEVICE, weights_only=False) |
| saved_config = checkpoint_run_config_payload(ckpt) |
| if saved_config: |
| saved_model_config = RuntimeModelConfig.from_payload(saved_config).validate() |
| if saved_model_config.backbone_family != model_config.backbone_family: |
| raise ValueError( |
| f"Strategy 2 checkpoint backbone family mismatch: requested {model_config.backbone_family!r}, " |
| f"checkpoint has {saved_model_config.backbone_family!r} at {checkpoint_path}." |
| ) |
| return ckpt |
|
|
| def _use_channels_last_for_run(model_config: RuntimeModelConfig | None = None) -> bool: |
| model_config = (model_config or current_model_config()).validate() |
| if not USE_CHANNELS_LAST: |
| return False |
| if DEVICE.type != "cuda": |
| return False |
| amp_dtype = resolve_amp_dtype(AMP_DTYPE) |
| if ( |
| model_config.backbone_family == "smp" |
| and "efficientnet" in model_config.smp_encoder_name.lower() |
| and USE_AMP |
| and amp_dtype in {torch.float16, torch.bfloat16} |
| ): |
| print("[MemoryFormat] Disabling channels_last for EfficientNet + AMP stability.") |
| return False |
| return True |
|
|
| def build_model( |
| strategy: int, |
| dropout_p: float, |
| *, |
| model_config: RuntimeModelConfig, |
| strategy2_checkpoint_path: str | Path | None = None, |
| ) -> tuple[nn.Module, str, bool]: |
| model_config = model_config.validate() |
| if model_config.backbone_family == "custom_vgg": |
| if strategy == 1: |
| model = PixelDRLMG_VGG( |
| num_scales=model_config.vgg_feature_scales, |
| dilation=model_config.vgg_feature_dilation, |
| dropout_p=dropout_p, |
| ) |
| elif strategy == 2: |
| model = SupervisedVGGModel( |
| num_scales=model_config.vgg_feature_scales, |
| dilation=model_config.vgg_feature_dilation, |
| dropout_p=dropout_p, |
| ) |
| elif strategy == 3: |
| model = PixelDRLMG_VGGWithDecoder( |
| num_scales=model_config.vgg_feature_scales, |
| dilation=model_config.vgg_feature_dilation, |
| dropout_p=dropout_p, |
| ) |
| if strategy2_checkpoint_path is not None: |
| ckpt = _load_strategy2_checkpoint_payload(strategy2_checkpoint_path, model_config=model_config) |
| s2_state = ckpt["model_state_dict"] |
| decoder_state = { |
| key: value |
| for key, value in s2_state.items() |
| if key.startswith("encoder.") or key.startswith("segmentation_head.") |
| } |
| model.load_state_dict(decoder_state, strict=False) |
| elif strategy == 4: |
| if strategy2_checkpoint_path is None: |
| raise ValueError("Strategy 4 requires a Strategy 2 checkpoint path.") |
| model = PixelDRLMG_VGGWithDecoder( |
| num_scales=model_config.vgg_feature_scales, |
| dilation=model_config.vgg_feature_dilation, |
| dropout_p=dropout_p, |
| ) |
| ckpt = _load_strategy2_checkpoint_payload(strategy2_checkpoint_path, model_config=model_config) |
| s2_state = ckpt["model_state_dict"] |
| decoder_state = { |
| key: value |
| for key, value in s2_state.items() |
| if key.startswith("encoder.") or key.startswith("segmentation_head.") |
| } |
| model.load_state_dict(decoder_state, strict=False) |
| model.encoder.requires_grad_(False) |
| model.segmentation_head.requires_grad_(False) |
| elif strategy == 5: |
| if strategy2_checkpoint_path is None: |
| raise ValueError("Strategy 5 requires a Strategy 2 checkpoint path.") |
| model = PixelDRLMG_VGG( |
| num_scales=model_config.vgg_feature_scales, |
| dilation=model_config.vgg_feature_dilation, |
| dropout_p=dropout_p, |
| ) |
| ckpt = _load_strategy2_checkpoint_payload(strategy2_checkpoint_path, model_config=model_config) |
| s2_state = ckpt["model_state_dict"] |
| encoder_state = {} |
| for key, value in s2_state.items(): |
| if key.startswith("encoder."): |
| encoder_state[key.replace("encoder.", "extractor.encoder.")] = value |
| model.load_state_dict(encoder_state, strict=False) |
| model.extractor.encoder.requires_grad_(False) |
| else: |
| raise ValueError(f"Unsupported strategy: {strategy}") |
| else: |
| if strategy == 1: |
| model = PixelDRLMG_SMP( |
| encoder_name=model_config.smp_encoder_name, |
| encoder_weights=model_config.smp_encoder_weights, |
| encoder_depth=model_config.smp_encoder_depth, |
| proj_dim=model_config.smp_encoder_proj_dim, |
| dropout_p=dropout_p, |
| ) |
| elif strategy == 2: |
| model = SupervisedSMPModel( |
| arch=model_config.smp_decoder_type, |
| encoder_name=model_config.smp_encoder_name, |
| encoder_weights=model_config.smp_encoder_weights, |
| dropout_p=dropout_p, |
| ) |
| elif strategy == 3: |
| model = PixelDRLMG_WithDecoder( |
| arch=model_config.smp_decoder_type, |
| encoder_name=model_config.smp_encoder_name, |
| encoder_weights=model_config.smp_encoder_weights, |
| encoder_depth=model_config.smp_encoder_depth, |
| proj_dim=model_config.smp_encoder_proj_dim, |
| dropout_p=dropout_p, |
| ) |
| if strategy2_checkpoint_path is not None: |
| ckpt = _load_strategy2_checkpoint_payload(strategy2_checkpoint_path, model_config=model_config) |
| s2_state = ckpt["model_state_dict"] |
| smp_state = {k: v for k, v in s2_state.items() if k.startswith("smp_model.")} |
| model.load_state_dict(smp_state, strict=False) |
| elif strategy == 4: |
| if strategy2_checkpoint_path is None: |
| raise ValueError("Strategy 4 requires a Strategy 2 checkpoint path.") |
| model = PixelDRLMG_WithDecoder( |
| arch=model_config.smp_decoder_type, |
| encoder_name=model_config.smp_encoder_name, |
| encoder_weights=None, |
| encoder_depth=model_config.smp_encoder_depth, |
| proj_dim=model_config.smp_encoder_proj_dim, |
| dropout_p=dropout_p, |
| ) |
| ckpt = _load_strategy2_checkpoint_payload(strategy2_checkpoint_path, model_config=model_config) |
| s2_state = ckpt["model_state_dict"] |
| smp_state = {k: v for k, v in s2_state.items() if k.startswith("smp_model.")} |
| model.load_state_dict(smp_state, strict=False) |
| model.smp_model.encoder.requires_grad_(False) |
| model.smp_model.decoder.requires_grad_(False) |
| if hasattr(model.smp_model, "segmentation_head"): |
| model.smp_model.segmentation_head.requires_grad_(False) |
| elif strategy == 5: |
| if strategy2_checkpoint_path is None: |
| raise ValueError("Strategy 5 requires a Strategy 2 checkpoint path.") |
| model = PixelDRLMG_SMP( |
| encoder_name=model_config.smp_encoder_name, |
| encoder_weights=None, |
| encoder_depth=model_config.smp_encoder_depth, |
| proj_dim=model_config.smp_encoder_proj_dim, |
| dropout_p=dropout_p, |
| ) |
| ckpt = _load_strategy2_checkpoint_payload(strategy2_checkpoint_path, model_config=model_config) |
| s2_state = ckpt["model_state_dict"] |
| encoder_state = {} |
| for key, value in s2_state.items(): |
| if key.startswith("smp_model.encoder."): |
| encoder_state[key.replace("smp_model.encoder.", "extractor.encoder.")] = value |
| model.load_state_dict(encoder_state, strict=False) |
| model.extractor.encoder.requires_grad_(False) |
| else: |
| raise ValueError(f"Unsupported strategy: {strategy}") |
|
|
| use_channels_last_now = _use_channels_last_for_run(model_config) |
| model = model.to(DEVICE) |
| if use_channels_last_now: |
| model = model.to(memory_format=torch.channels_last) |
|
|
| compiled = False |
| if USE_TORCH_COMPILE and hasattr(torch, "compile"): |
| try: |
| model = torch.compile(model) |
| compiled = True |
| except Exception as exc: |
| print(f"[Compile] torch.compile skipped: {exc}") |
| return model, strategy_name(strategy, model_config), compiled |
|
|
| def _unwrap_compiled(model: nn.Module) -> nn.Module: |
| return getattr(model, "_orig_mod", model) |
|
|
| def count_parameters(module: nn.Module | None, *, only_trainable: bool = False) -> int: |
| if module is None: |
| return 0 |
| if only_trainable: |
| return sum(p.numel() for p in module.parameters() if p.requires_grad) |
| return sum(p.numel() for p in module.parameters()) |
|
|
| def print_model_parameter_summary( |
| *, |
| model: nn.Module, |
| description: str, |
| strategy: int, |
| model_config: RuntimeModelConfig, |
| dropout_p: float, |
| amp_dtype: torch.dtype, |
| compiled: bool, |
| ) -> None: |
| raw = _unwrap_compiled(model) |
| total_params = count_parameters(raw) |
| trainable_params = count_parameters(raw, only_trainable=True) |
| frozen_params = total_params - trainable_params |
| bn_count = sum(1 for m in raw.modules() if isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d))) |
| gn_count = sum(1 for m in raw.modules() if isinstance(m, nn.GroupNorm)) |
|
|
| section(f"Model Parameter Summary | {description}") |
| print(f"Strategy : {strategy}") |
| print(f"Model : {description}") |
| print(f"Dropout p : {dropout_p:.4f}") |
| print(f"Total params : {total_params:,}") |
| print(f"Trainable params : {trainable_params:,}") |
| print(f"Frozen params : {frozen_params:,}") |
| print(f"BN layers : {bn_count}") |
| print(f"GN layers : {gn_count}") |
| print(f"channels_last : {_use_channels_last_for_run(model_config)}") |
| print(f"AMP dtype : {amp_dtype}") |
| print(f"torch.compile : {compiled}") |
| print(f"Backbone family : {model_config.backbone_family}") |
|
|
| block_counts: dict[str, int] = {} |
| if strategy == 1: |
| block_counts["encoder"] = count_parameters(getattr(raw.extractor, "encoder", None)) |
| block_counts["projection"] = count_parameters(getattr(raw.extractor, "projection", None)) |
| block_counts["sam"] = count_parameters(getattr(raw, "sam", None)) |
| block_counts["policy_head"] = count_parameters(getattr(raw, "policy_head", None)) |
| block_counts["value_head"] = count_parameters(getattr(raw, "value_head", None)) |
| block_counts["omega"] = count_parameters(getattr(raw, "omega_conv", None)) |
| elif strategy == 2: |
| if hasattr(raw, "smp_model"): |
| smp_model = raw.smp_model |
| block_counts["encoder"] = count_parameters(getattr(smp_model, "encoder", None)) |
| block_counts["decoder"] = count_parameters(getattr(smp_model, "decoder", None)) |
| block_counts["segmentation_head"] = count_parameters(getattr(smp_model, "segmentation_head", None)) |
| block_counts["dropout"] = count_parameters(getattr(raw, "dropout", None)) |
| else: |
| block_counts["encoder"] = count_parameters(getattr(raw, "encoder", None)) |
| block_counts["segmentation_head"] = count_parameters(getattr(raw, "segmentation_head", None)) |
| elif strategy in (3, 4): |
| if hasattr(raw, "smp_model"): |
| smp_model = raw.smp_model |
| block_counts["encoder"] = count_parameters(getattr(smp_model, "encoder", None)) |
| block_counts["decoder"] = count_parameters(getattr(smp_model, "decoder", None)) |
| block_counts["segmentation_head"] = count_parameters(getattr(smp_model, "segmentation_head", None)) |
| else: |
| block_counts["encoder"] = count_parameters(getattr(raw, "encoder", None)) |
| block_counts["segmentation_head"] = count_parameters(getattr(raw, "segmentation_head", None)) |
| block_counts["sam"] = count_parameters(getattr(raw, "sam", None)) |
| block_counts["policy_head"] = count_parameters(getattr(raw, "policy_head", None)) |
| block_counts["value_head"] = count_parameters(getattr(raw, "value_head", None)) |
| block_counts["omega"] = count_parameters(getattr(raw, "omega_conv", None)) |
| elif strategy == 5: |
| block_counts["encoder"] = count_parameters(getattr(raw.extractor, "encoder", None)) |
| block_counts["projection"] = count_parameters(getattr(raw.extractor, "projection", None)) |
| block_counts["sam"] = count_parameters(getattr(raw, "sam", None)) |
| block_counts["policy_head"] = count_parameters(getattr(raw, "policy_head", None)) |
| block_counts["value_head"] = count_parameters(getattr(raw, "value_head", None)) |
| block_counts["omega"] = count_parameters(getattr(raw, "omega_conv", None)) |
|
|
| for name, value in block_counts.items(): |
| print(f"{name:22s}: {value:,}") |
|
|
| """============================================================================= |
| METRICS + CHECKPOINTS |
| ============================================================================= |
| """ |
|
|
| _EPS = 1e-4 |
|
|
| def _as_bool(mask: np.ndarray) -> np.ndarray: |
| return (mask[0] if mask.ndim == 3 else mask).astype(bool) |
|
|
| def _tp_fp_fn(pred: np.ndarray, target: np.ndarray): |
| p, t = _as_bool(pred), _as_bool(target) |
| tp = float((p & t).sum()) |
| fp = float((p & ~t).sum()) |
| fn = float((~p & t).sum()) |
| return tp, fp, fn, float(t.sum()), float(p.sum()) |
|
|
| def dice_score(pred: np.ndarray, target: np.ndarray) -> float: |
| tp, _, _, t, p = _tp_fp_fn(pred, target) |
| return (2 * tp + _EPS) / (t + p + _EPS) |
|
|
| def ppv_score(pred: np.ndarray, target: np.ndarray) -> float: |
| tp, fp, *_ = _tp_fp_fn(pred, target) |
| return (tp + _EPS) / (tp + fp + _EPS) |
|
|
| def sensitivity_score(pred: np.ndarray, target: np.ndarray) -> float: |
| tp, _, fn, *_ = _tp_fp_fn(pred, target) |
| return (tp + _EPS) / (tp + fn + _EPS) |
|
|
| def iou_score(pred: np.ndarray, target: np.ndarray) -> float: |
| tp, _, _, t, p = _tp_fp_fn(pred, target) |
| return (tp + _EPS) / (t + p - tp + _EPS) |
|
|
| def _boundary(mask: np.ndarray) -> np.ndarray: |
| m = _as_bool(mask) |
| if not m.any(): |
| return m |
| return m ^ ndimage.binary_erosion(m, iterations=1, border_value=0) |
|
|
| def boundary_iou_score(pred: np.ndarray, target: np.ndarray) -> float: |
| pb, tb = _boundary(pred), _boundary(target) |
| inter = float((pb & tb).sum()) |
| union = float((pb | tb).sum()) |
| return (inter + _EPS) / (union + _EPS) |
|
|
| def _surf_dist(a: np.ndarray, b: np.ndarray) -> np.ndarray: |
| a, b = _as_bool(a), _as_bool(b) |
| if not a.any() and not b.any(): |
| return np.array([0.0], dtype=np.float32) |
| if not a.any() or not b.any(): |
| return np.array([np.inf], dtype=np.float32) |
| ba, bb = _boundary(a), _boundary(b) |
| return ndimage.distance_transform_edt(~bb)[ba].astype(np.float32) |
|
|
| def hd95_score(pred: np.ndarray, target: np.ndarray) -> float: |
| distances = np.concatenate([_surf_dist(pred, target), _surf_dist(target, pred)]) |
| return float("inf") if np.isinf(distances).any() else float(np.percentile(distances, 95)) |
|
|
| def compute_all_metrics(pred: np.ndarray, target: np.ndarray) -> dict[str, float]: |
| return { |
| "dice": dice_score(pred, target), |
| "ppv": ppv_score(pred, target), |
| "sen": sensitivity_score(pred, target), |
| "iou": iou_score(pred, target), |
| "biou": boundary_iou_score(pred, target), |
| "hd95": hd95_score(pred, target), |
| } |
|
|
| def checkpoint_manifest_path(path: Path) -> Path: |
| path = Path(path) |
| return path.with_name(f"{path.name}.meta.json") |
|
|
| def checkpoint_history_path(run_dir: Path, run_type: str) -> Path: |
| if run_type == "overfit": |
| return Path(run_dir) / "overfit_history.json" |
| return Path(run_dir) / "history.json" |
|
|
| def checkpoint_state_presence(payload: dict[str, Any]) -> dict[str, bool]: |
| tracked = [ |
| "model_state_dict", |
| "optimizer_state_dict", |
| "scheduler_state_dict", |
| "scaler_state_dict", |
| "log_alpha", |
| "alpha_optimizer_state_dict", |
| "best_metric_name", |
| "best_metric_value", |
| "patience_counter", |
| "elapsed_seconds", |
| "run_config", |
| "epoch_metrics", |
| "resume_source", |
| ] |
| return {name: name in payload for name in tracked} |
|
|
| def write_checkpoint_manifest( |
| path: Path, |
| payload: dict[str, Any], |
| *, |
| extra: dict[str, Any] | None = None, |
| ) -> dict[str, Any]: |
| run_config = checkpoint_run_config_payload(payload) |
| manifest = { |
| "checkpoint_path": str(Path(path).resolve()), |
| "run_type": payload.get("run_type", "unknown"), |
| "epoch": int(payload.get("epoch", 0)), |
| "strategy": run_config.get("strategy"), |
| "dataset_percent": run_config.get("dataset_percent"), |
| "backbone_family": run_config.get("backbone_family", "smp"), |
| "saved_keys": sorted(payload.keys()), |
| "state_presence": checkpoint_state_presence(payload), |
| } |
| if "resume_source" in payload: |
| manifest["resume_source"] = payload["resume_source"] |
| if extra: |
| manifest.update(extra) |
| save_json(checkpoint_manifest_path(path), manifest) |
| return manifest |
|
|
| def checkpoint_required_keys( |
| *, |
| optimizer: torch.optim.Optimizer | None, |
| scheduler: CosineAnnealingLR | None, |
| scaler: Any | None, |
| log_alpha: torch.Tensor | None, |
| alpha_optimizer: torch.optim.Optimizer | None, |
| require_run_metadata: bool, |
| ) -> list[str]: |
| keys = ["epoch", "model_state_dict"] |
| if require_run_metadata: |
| keys.extend( |
| [ |
| "run_type", |
| "best_metric_name", |
| "best_metric_value", |
| "patience_counter", |
| "elapsed_seconds", |
| "run_config", |
| "epoch_metrics", |
| ] |
| ) |
| if optimizer is not None: |
| keys.append("optimizer_state_dict") |
| if scheduler is not None: |
| keys.append("scheduler_state_dict") |
| if scaler is not None: |
| keys.append("scaler_state_dict") |
| if log_alpha is not None: |
| keys.append("log_alpha") |
| if alpha_optimizer is not None: |
| keys.append("alpha_optimizer_state_dict") |
| return keys |
|
|
| def validate_checkpoint_payload( |
| path: Path, |
| payload: dict[str, Any], |
| *, |
| required_keys: list[str], |
| expected_run_type: str | None = None, |
| ) -> None: |
| missing = [name for name in required_keys if name not in payload] |
| if missing: |
| raise KeyError(f"Checkpoint {path} is missing required keys: {missing}") |
| if expected_run_type is not None and payload.get("run_type") != expected_run_type: |
| raise ValueError( |
| f"Checkpoint {path} run_type mismatch: expected {expected_run_type!r}, " |
| f"got {payload.get('run_type')!r}." |
| ) |
|
|
| def save_checkpoint( |
| path: Path, |
| *, |
| run_type: str, |
| model: nn.Module, |
| optimizer: torch.optim.Optimizer, |
| scheduler: ReduceLROnPlateau | None, |
| scaler: Any | None, |
| epoch: int, |
| best_metric_value: float, |
| best_metric_name: str, |
| run_config: dict[str, Any], |
| epoch_metrics: dict[str, Any], |
| patience_counter: int, |
| elapsed_seconds: float, |
| log_alpha: torch.Tensor | None = None, |
| alpha_optimizer: torch.optim.Optimizer | None = None, |
| resume_source: dict[str, Any] | None = None, |
| ) -> None: |
| payload = { |
| "run_type": run_type, |
| "epoch": epoch, |
| "model_state_dict": _unwrap_compiled(model).state_dict(), |
| "optimizer_state_dict": optimizer.state_dict(), |
| "best_metric_name": best_metric_name, |
| "best_metric_value": best_metric_value, |
| "patience_counter": int(patience_counter), |
| "elapsed_seconds": float(elapsed_seconds), |
| "run_config": run_config, |
| "config": run_config, |
| "epoch_metrics": epoch_metrics, |
| } |
| if scheduler is not None: |
| payload["scheduler_state_dict"] = scheduler.state_dict() |
| if scaler is not None: |
| payload["scaler_state_dict"] = scaler.state_dict() |
| if log_alpha is not None: |
| payload["log_alpha"] = float(log_alpha.detach().item()) |
| if alpha_optimizer is not None: |
| payload["alpha_optimizer_state_dict"] = alpha_optimizer.state_dict() |
| if resume_source is not None: |
| payload["resume_source"] = resume_source |
| validate_checkpoint_payload( |
| path, |
| payload, |
| required_keys=checkpoint_required_keys( |
| optimizer=optimizer, |
| scheduler=scheduler, |
| scaler=scaler, |
| log_alpha=log_alpha, |
| alpha_optimizer=alpha_optimizer, |
| require_run_metadata=True, |
| ), |
| expected_run_type=run_type, |
| ) |
| torch.save(payload, path) |
| write_checkpoint_manifest(path, payload) |
|
|
| def load_checkpoint( |
| path: Path, |
| *, |
| model: nn.Module, |
| optimizer: torch.optim.Optimizer | None = None, |
| scheduler: ReduceLROnPlateau | None = None, |
| scaler: Any | None = None, |
| device: torch.device, |
| log_alpha: torch.Tensor | None = None, |
| alpha_optimizer: torch.optim.Optimizer | None = None, |
| expected_run_type: str | None = None, |
| require_run_metadata: bool = False, |
| ) -> dict[str, Any]: |
| ckpt = torch.load(path, map_location=device, weights_only=False) |
| validate_checkpoint_payload( |
| path, |
| ckpt, |
| required_keys=checkpoint_required_keys( |
| optimizer=optimizer, |
| scheduler=scheduler, |
| scaler=scaler, |
| log_alpha=log_alpha, |
| alpha_optimizer=alpha_optimizer, |
| require_run_metadata=require_run_metadata, |
| ), |
| expected_run_type=expected_run_type, |
| ) |
|
|
| raw_model = _unwrap_compiled(model) |
| state_dict = ckpt["model_state_dict"] |
| load_strict = True |
| compat_layout: dict[str, Any] | None = None |
| _configure_policy_head_compatibility(model, state_dict, source=str(path)) |
| if any(key.startswith("policy_head.net.") for key in state_dict): |
| state_dict = _remap_legacy_policy_head_state_dict(state_dict) |
| if isinstance(raw_model, (PixelDRLMG_WithDecoder, PixelDRLMG_VGGWithDecoder)): |
| compat_layout = _strategy34_checkpoint_layout_info(ckpt["model_state_dict"], checkpoint_run_config_payload(ckpt)) |
| if compat_layout["is_strategy34_decoder_checkpoint"]: |
| _configure_strategy34_model_compatibility(model, compat_layout, source=str(path)) |
| load_strict = bool(compat_layout["use_refinement"]) |
|
|
| incompatible = raw_model.load_state_dict(state_dict, strict=load_strict) |
| if not load_strict: |
| missing_keys = [key for key in incompatible.missing_keys if not key.startswith("refinement_adapter.")] |
| unexpected_keys = list(incompatible.unexpected_keys) |
| if missing_keys or unexpected_keys: |
| print( |
| "[Checkpoint Restore] Non-strict legacy Strategy 3/4 load " |
| f"missing={missing_keys} unexpected={unexpected_keys}" |
| ) |
|
|
| if optimizer is not None and "optimizer_state_dict" in ckpt: |
| try: |
| optimizer.load_state_dict(ckpt["optimizer_state_dict"]) |
| except ValueError: |
| if compat_layout is None or compat_layout.get("compatibility_mode") != "legacy": |
| raise |
| print( |
| f"[Checkpoint Restore] Skipping optimizer state for legacy Strategy 3/4 checkpoint at {path} " |
| "because the parameter layout differs from the refinement-capable model." |
| ) |
| if scheduler is not None and "scheduler_state_dict" in ckpt: |
| scheduler.load_state_dict(ckpt["scheduler_state_dict"]) |
| if scaler is not None and "scaler_state_dict" in ckpt: |
| scaler.load_state_dict(ckpt["scaler_state_dict"]) |
| if log_alpha is not None and "log_alpha" in ckpt: |
| with torch.no_grad(): |
| log_alpha.fill_(float(ckpt["log_alpha"])) |
| if alpha_optimizer is not None and "alpha_optimizer_state_dict" in ckpt: |
| alpha_optimizer.load_state_dict(ckpt["alpha_optimizer_state_dict"]) |
| restored = checkpoint_state_presence(ckpt) |
| restore_info = { |
| "restored_keys": restored, |
| "restored_at_epoch": int(ckpt.get("epoch", 0)), |
| "expected_run_type": expected_run_type, |
| } |
| write_checkpoint_manifest(path, ckpt, extra={"last_restore": restore_info}) |
| print( |
| f"[Checkpoint Restore] path={path} epoch={ckpt.get('epoch')} " |
| f"run_type={ckpt.get('run_type', 'unknown')} " |
| f"backbone={checkpoint_run_config_payload(ckpt).get('backbone_family', 'unknown')}" |
| ) |
| return ckpt |
|
|
| """============================================================================= |
| TRAINING + VALIDATION |
| ============================================================================= |
| """ |
|
|
| def sample_actions(policy_logits: torch.Tensor, stochastic: bool) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: |
| logits = policy_logits.float() |
| log_probs = F.log_softmax(logits, dim=1) |
| probs = log_probs.exp() |
| entropy = -(probs * log_probs).sum(dim=1).mean() |
| if stochastic: |
| uniform = torch.rand_like(logits) |
| gumbel = -torch.log(-torch.log(uniform + 1e-8) + 1e-8) |
| actions = (logits + gumbel).argmax(dim=1) |
| else: |
| actions = logits.argmax(dim=1) |
| log_prob = log_probs.gather(1, actions.unsqueeze(1)) |
| return actions, log_prob, entropy |
|
|
| def apply_actions( |
| seg: torch.Tensor, |
| actions: torch.Tensor, |
| *, |
| soft_update_step: float | None = None, |
| num_actions: int | None = None, |
| ) -> torch.Tensor: |
| num_actions = int(num_actions if num_actions is not None else _job_param("num_actions", NUM_ACTIONS)) |
| if num_actions == 5: |
| action_map = actions.long() |
| if soft_update_step is not None: |
| deltas = _refinement_deltas(device=seg.device, dtype=seg.dtype) |
| delta = deltas[action_map].unsqueeze(1) |
| return (seg + delta).clamp_(0.0, 1.0) |
| return torch.where( |
| action_map.unsqueeze(1) <= 1, |
| torch.zeros_like(seg), |
| torch.where(action_map.unsqueeze(1) >= 3, torch.ones_like(seg), seg), |
| ) |
| action_map = actions.unsqueeze(1) |
| if num_actions >= 3: |
| if soft_update_step is not None: |
| delta = torch.where( |
| action_map == 0, |
| torch.full_like(seg, -soft_update_step), |
| torch.where(action_map == 2, torch.full_like(seg, soft_update_step), torch.zeros_like(seg)), |
| ) |
| return (seg + delta).clamp_(0.0, 1.0) |
| return torch.where( |
| action_map == 0, |
| torch.zeros_like(seg), |
| torch.where(action_map == 2, torch.ones_like(seg), seg), |
| ) |
| return seg * (action_map == 1).to(dtype=seg.dtype) |
|
|
| def _soft_dice_tensor(pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor: |
| inter = (pred * target).sum(dim=(1, 2, 3)) |
| denom = pred.sum(dim=(1, 2, 3)) + target.sum(dim=(1, 2, 3)) |
| return (2.0 * inter + 1e-6) / (denom + 1e-6) |
|
|
| def _soft_boundary(mask: torch.Tensor) -> torch.Tensor: |
| return (mask - F.avg_pool2d(mask, kernel_size=3, stride=1, padding=1)).abs() |
|
|
| def compute_refinement_reward( |
| seg: torch.Tensor, |
| seg_next: torch.Tensor, |
| gt_mask: torch.Tensor, |
| *, |
| decoder_prior: torch.Tensor | None = None, |
| ) -> torch.Tensor: |
| seg_f = seg.float() |
| seg_next_f = seg_next.float() |
| gt_f = gt_mask.float() |
|
|
| base_reward = (seg_f - gt_f).pow(2) - (seg_next_f - gt_f).pow(2) |
| if decoder_prior is not None: |
| prior_f = decoder_prior.float() |
| uncertainty_focus = (4.0 * prior_f * (1.0 - prior_f)).clamp(0.0, 1.0) |
| base_reward = base_reward * (0.35 + 0.65 * uncertainty_focus) |
| reward = base_reward |
|
|
| dice_weight = float(_job_param("dice_reward_weight", 0.35)) |
| if dice_weight > 0: |
| prev_dice = _soft_dice_tensor(seg_f, gt_f) |
| next_dice = _soft_dice_tensor(seg_next_f, gt_f) |
| reward = reward + dice_weight * (next_dice - prev_dice).view(-1, 1, 1, 1) |
|
|
| boundary_weight = float(_job_param("boundary_reward_weight", 0.15)) |
| if boundary_weight > 0: |
| gt_boundary = _soft_boundary(gt_f) |
| prev_boundary = _soft_boundary(seg_f) |
| next_boundary = _soft_boundary(seg_next_f) |
| boundary_reward = (prev_boundary - gt_boundary).pow(2) - (next_boundary - gt_boundary).pow(2) |
| reward = reward + boundary_weight * boundary_reward |
|
|
| prior_weight = float(_job_param("prior_reward_weight", 0.01)) |
| if decoder_prior is not None and prior_weight > 0: |
| prior_f = decoder_prior.float() |
| confidence = ((prior_f - 0.5).abs() * 2.0).clamp(0.0, 1.0) |
| reward = reward + prior_weight * confidence * ( |
| (seg_f - prior_f).pow(2) - (seg_next_f - prior_f).pow(2) |
| ) |
| return reward |
|
|
| def compute_strategy1_aux_segmentation_loss( |
| policy_logits: torch.Tensor, |
| gt_mask: torch.Tensor, |
| *, |
| ce_weight: float, |
| dice_weight: float, |
| seg_mask: torch.Tensor | None = None, |
| ) -> tuple[torch.Tensor, float, float]: |
| if int(policy_logits.shape[1]) == 5: |
| logits_f = policy_logits.float() |
| gt_mask_f = gt_mask.float() |
| base_seg = seg_mask.float() if seg_mask is not None else torch.full_like(gt_mask_f, 0.5) |
| probs = F.softmax(logits_f, dim=1) |
| deltas = _refinement_deltas(device=logits_f.device, dtype=logits_f.dtype).view(1, -1, 1, 1) |
| expected_delta = (probs * deltas).sum(dim=1, keepdim=True) |
| predicted_next = (base_seg + expected_delta).clamp(1e-4, 1.0 - 1e-4) |
|
|
| aux_loss = torch.zeros((), device=policy_logits.device, dtype=torch.float32) |
| ce_loss_value = 0.0 |
| dice_loss_value = 0.0 |
|
|
| if ce_weight > 0: |
| bce = F.binary_cross_entropy(predicted_next, gt_mask_f) |
| aux_loss = aux_loss + ce_weight * bce |
| ce_loss_value = float(bce.detach().item()) |
|
|
| if dice_weight > 0: |
| inter = (predicted_next * gt_mask_f).sum() |
| dice_loss = 1.0 - (2.0 * inter + 1e-6) / (predicted_next.sum() + gt_mask_f.sum() + 1e-6) |
| aux_loss = aux_loss + dice_weight * dice_loss |
| dice_loss_value = float(dice_loss.detach().item()) |
|
|
| return aux_loss, ce_loss_value, dice_loss_value |
|
|
| logits_f = policy_logits.float() |
| gt_mask_f = gt_mask.float() |
| num_actions = int(logits_f.shape[1]) |
| aux_loss = torch.zeros((), device=policy_logits.device, dtype=torch.float32) |
| ce_loss_value = 0.0 |
| dice_loss_value = 0.0 |
|
|
| if ce_weight > 0: |
| if num_actions >= 3: |
| if seg_mask is None: |
| seg_mask = torch.ones_like(gt_mask) |
| seg_bin = threshold_binary_long(seg_mask).squeeze(1) |
| gt_bin = gt_mask[:, 0].long() |
| aux_target = torch.where( |
| gt_bin == 0, |
| torch.zeros_like(gt_bin), |
| torch.where(seg_bin == 1, torch.ones_like(gt_bin), torch.full_like(gt_bin, 2)), |
| ) |
| else: |
| aux_target = gt_mask[:, 0].long() * (num_actions - 1) |
| ce_loss = F.cross_entropy(logits_f, aux_target) |
| aux_loss = aux_loss + ce_weight * ce_loss |
| ce_loss_value = float(ce_loss.detach().item()) |
|
|
| if dice_weight > 0: |
| probs_fg = F.softmax(logits_f, dim=1)[:, num_actions - 1 : num_actions] |
| inter = (probs_fg * gt_mask_f).sum() |
| dice_loss = 1.0 - (2.0 * inter + 1e-6) / (probs_fg.sum() + gt_mask_f.sum() + 1e-6) |
| aux_loss = aux_loss + dice_weight * dice_loss |
| dice_loss_value = float(dice_loss.detach().item()) |
|
|
| return aux_loss, ce_loss_value, dice_loss_value |
|
|
| def make_optimizer( |
| model: nn.Module, |
| strategy: int, |
| head_lr: float, |
| encoder_lr: float, |
| weight_decay: float, |
| rl_lr: float | None = None, |
| ): |
| raw = _unwrap_compiled(model) |
| encoder_params = [] |
| decoder_params = [] |
| rl_params = [] |
|
|
| for name, param in raw.named_parameters(): |
| if not param.requires_grad: |
| continue |
| if ( |
| name.startswith("extractor.encoder.") |
| or name.startswith("encoder.") |
| or name.startswith("smp_model.encoder.") |
| or name.startswith("smp_encoder.") |
| ): |
| encoder_params.append(param) |
| elif "decoder" in name or "segmentation_head" in name: |
| decoder_params.append(param) |
| else: |
| rl_params.append(param) |
|
|
| decoder_lr = float(_job_param("decoder_lr", head_lr)) |
| rl_group_lr = float(_job_param("rl_lr", rl_lr if rl_lr is not None else head_lr)) |
|
|
| param_groups: list[dict[str, Any]] = [] |
|
|
| if encoder_params: |
| param_groups.append({"params": encoder_params, "lr": encoder_lr}) |
|
|
| if decoder_params: |
| param_groups.append({"params": decoder_params, "lr": decoder_lr}) |
|
|
| if rl_params: |
| param_groups.append({"params": rl_params, "lr": rl_group_lr}) |
|
|
| try: |
| optimizer = AdamW(param_groups, weight_decay=weight_decay, fused=DEVICE.type == "cuda") |
| except Exception: |
| optimizer = AdamW(param_groups, weight_decay=weight_decay) |
|
|
| return optimizer |
|
|
| def infer_segmentation_mask( |
| model: nn.Module, |
| image: torch.Tensor, |
| tmax: int, |
| *, |
| strategy: int, |
| use_amp: bool, |
| amp_dtype: torch.dtype, |
| use_channels_last: bool, |
| ) -> torch.Tensor: |
| model.eval() |
| if use_channels_last and image.ndim == 4 and image.device.type == "cuda": |
| image = image.contiguous(memory_format=torch.channels_last) |
|
|
| if strategy == 2: |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| logits = model(image) |
| return threshold_binary_mask(torch.sigmoid(logits)).float() |
|
|
| refinement_runtime = _uses_refinement_runtime(model, strategy=strategy) |
| if strategy in (3, 4) and refinement_runtime: |
| keep_index = NUM_ACTIONS // 2 |
| min_refinement_steps = int(_job_param("min_refinement_steps", 2)) |
| early_stop_keep_ratio = float(_job_param("early_stop_keep_ratio", 0.985)) |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| refinement_context = model.prepare_refinement_context(image) |
| seg = refinement_context["decoder_prob"].float() |
| for step_idx in range(tmax): |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| state_t, _ = model.forward_refinement_state( |
| refinement_context["base_features"], |
| seg, |
| refinement_context["decoder_prob"], |
| ) |
| policy_logits, _ = model.forward_from_state(state_t) |
| actions, _, _ = sample_actions(policy_logits, stochastic=False) |
| seg = apply_actions( |
| seg, |
| actions, |
| soft_update_step=float(_job_param("refine_delta_small", 0.10)), |
| num_actions=policy_logits.shape[1], |
| ).to(dtype=seg.dtype) |
| if step_idx + 1 >= min_refinement_steps: |
| keep_ratio = float((actions == keep_index).float().mean().item()) |
| if keep_ratio >= early_stop_keep_ratio: |
| break |
| return threshold_binary_mask(seg.float()).float() |
|
|
| if strategy in (3, 4): |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| seg = threshold_binary_mask(torch.sigmoid(model.forward_decoder(image))).float() |
| else: |
| seg = torch.ones(image.shape[0], 1, image.shape[2], image.shape[3], device=image.device, dtype=image.dtype) |
|
|
| for _ in range(tmax): |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| x_t = image * seg |
| policy_logits = model.forward_policy_only(x_t) |
| actions, _, _ = sample_actions(policy_logits, stochastic=False) |
| seg = apply_actions(seg, actions, num_actions=policy_logits.shape[1]).to(dtype=seg.dtype) |
| return seg.float() |
|
|
| def train_step( |
| model: nn.Module, |
| batch: dict[str, Any], |
| optimizer: torch.optim.Optimizer, |
| *, |
| gamma: float, |
| tmax: int, |
| critic_loss_weight: float, |
| log_alpha: torch.Tensor, |
| alpha_optimizer: torch.optim.Optimizer, |
| target_entropy: float, |
| ce_weight: float, |
| dice_weight: float, |
| grad_clip_norm: float, |
| scaler: Any | None, |
| use_amp: bool, |
| amp_dtype: torch.dtype, |
| stepwise_backward: bool, |
| use_channels_last: bool, |
| initial_mask: torch.Tensor | None = None, |
| decoder_loss_extra: torch.Tensor | None = None, |
| ) -> dict[str, Any]: |
| model.train() |
| image = batch["image"] |
| gt_mask = batch["mask"] |
|
|
| if use_channels_last and image.ndim == 4 and image.device.type == "cuda": |
| image = image.contiguous(memory_format=torch.channels_last) |
| if use_amp and image.device.type == "cuda": |
| image = image.to(dtype=amp_dtype) |
| gt_mask = gt_mask.to(dtype=amp_dtype) |
| else: |
| image = image.float() |
| gt_mask = gt_mask.float() |
|
|
| if initial_mask is not None: |
| seg = initial_mask.to(device=image.device, dtype=image.dtype) |
| else: |
| seg = torch.ones(image.shape[0], 1, image.shape[2], image.shape[3], device=image.device, dtype=image.dtype) |
| alpha = log_alpha.exp() |
|
|
| total_actor = 0.0 |
| total_critic = 0.0 |
| total_loss = 0.0 |
| total_reward = 0.0 |
| total_entropy = 0.0 |
| total_ce_loss = 0.0 |
| total_dice_loss = 0.0 |
| accum_tensor = None |
| alpha_loss_accum = torch.tensor(0.0, device=image.device, dtype=torch.float32) |
| aux_fused = False |
|
|
| optimizer.zero_grad(set_to_none=True) |
|
|
| for _ in range(tmax): |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| x_t = image * seg |
| state_t, _ = model.forward_state(x_t) |
| policy_logits, value_t = model.forward_from_state(state_t) |
| actions, log_prob, entropy = sample_actions(policy_logits, stochastic=True) |
| log_prob = log_prob.clamp(min=-10.0) |
| seg_next = apply_actions(seg, actions, num_actions=policy_logits.shape[1]) |
| reward = (seg - gt_mask).pow(2) - (seg_next - gt_mask).pow(2) |
|
|
| with torch.no_grad(): |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| x_next = image * seg_next |
| state_next, _ = model.forward_state(x_next) |
| value_next = model.value_from_state(state_next).detach() |
|
|
| neighborhood_next = model.neighborhood_value(value_next) |
| target = reward + gamma * neighborhood_next |
| advantage = target - value_t |
| critic_loss = F.smooth_l1_loss(value_t, target) |
| actor_loss = -(log_prob * advantage.detach()).mean() |
| actor_loss = actor_loss - alpha.detach() * entropy |
| step_loss = (actor_loss + critic_loss_weight * critic_loss) / float(tmax) |
| alpha_loss_accum = alpha_loss_accum + (log_alpha * (entropy.detach() - target_entropy)) / float(tmax) |
|
|
| if not aux_fused and initial_mask is None and (ce_weight > 0 or dice_weight > 0): |
| aux_loss, ce_loss_value, dice_loss_value = compute_strategy1_aux_segmentation_loss( |
| policy_logits, |
| gt_mask, |
| ce_weight=ce_weight, |
| dice_weight=dice_weight, |
| seg_mask=seg, |
| ) |
| if ce_weight > 0: |
| total_ce_loss = ce_loss_value |
| if dice_weight > 0: |
| total_dice_loss = dice_loss_value |
| step_loss = step_loss + aux_loss |
| aux_fused = True |
|
|
| total_actor += float(actor_loss.detach().item()) |
| total_critic += float(critic_loss.detach().item()) |
| total_loss += float(step_loss.detach().item()) |
| total_reward += float(reward.detach().mean().item()) |
| total_entropy += float(entropy.detach().item()) |
|
|
| if stepwise_backward: |
| if scaler is not None: |
| scaler.scale(step_loss).backward() |
| else: |
| step_loss.backward() |
| else: |
| accum_tensor = step_loss if accum_tensor is None else accum_tensor + step_loss |
|
|
| seg = seg_next.detach() |
|
|
| if not stepwise_backward and accum_tensor is not None: |
| if scaler is not None: |
| scaler.scale(accum_tensor).backward() |
| else: |
| accum_tensor.backward() |
|
|
| if not aux_fused and (ce_weight > 0 or dice_weight > 0): |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| policy_aux, _, _ = model(image) |
| logits_f = policy_aux.float() |
| aux_loss = torch.zeros(1, device=image.device, dtype=torch.float32) |
| if ce_weight > 0: |
| num_actions = int(logits_f.shape[1]) |
| if num_actions >= 3: |
| init_seg = initial_mask if initial_mask is not None else torch.ones_like(gt_mask) |
| seg_bin = threshold_binary_long(init_seg).squeeze(1) |
| gt_bin = gt_mask[:, 0].long() |
| aux_target = torch.where( |
| gt_bin == 0, |
| torch.zeros_like(gt_bin), |
| torch.where(seg_bin == 1, torch.ones_like(gt_bin), torch.full_like(gt_bin, 2)), |
| ) |
| else: |
| aux_target = gt_mask[:, 0].long() * (num_actions - 1) |
| ce_loss = F.cross_entropy(logits_f, aux_target) |
| aux_loss = aux_loss + ce_weight * ce_loss |
| total_ce_loss = float(ce_loss.detach().item()) |
| if dice_weight > 0: |
| probs_fg = F.softmax(logits_f, dim=1)[:, num_actions - 1 : num_actions] |
| gt_f = gt_mask.float() |
| inter = (probs_fg * gt_f).sum() |
| dice_loss = 1.0 - (2.0 * inter + 1e-6) / (probs_fg.sum() + gt_f.sum() + 1e-6) |
| aux_loss = aux_loss + dice_weight * dice_loss |
| total_dice_loss = float(dice_loss.detach().item()) |
| if decoder_loss_extra is not None: |
| aux_loss = aux_loss + decoder_loss_extra |
| if scaler is not None: |
| scaler.scale(aux_loss).backward() |
| else: |
| aux_loss.backward() |
| total_loss += float(aux_loss.detach().item()) |
| elif decoder_loss_extra is not None: |
| if scaler is not None: |
| scaler.scale(decoder_loss_extra).backward() |
| else: |
| decoder_loss_extra.backward() |
| total_loss += float(decoder_loss_extra.detach().item()) |
|
|
| if scaler is not None: |
| scaler.unscale_(optimizer) |
| total_grad_norm = float(torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip_norm).item()) if grad_clip_norm > 0 else 0.0 |
|
|
| if scaler is not None: |
| scaler.step(optimizer) |
| scaler.update() |
| else: |
| optimizer.step() |
|
|
| alpha_optimizer.zero_grad(set_to_none=True) |
| alpha_loss_accum.backward() |
| alpha_optimizer.step() |
| with torch.no_grad(): |
| log_alpha.clamp_(min=-5.0, max=5.0) |
|
|
| return { |
| "loss": total_loss, |
| "actor_loss": total_actor / tmax, |
| "critic_loss": total_critic / tmax, |
| "mean_reward": total_reward / tmax, |
| "entropy": total_entropy / tmax, |
| "ce_loss": total_ce_loss, |
| "dice_loss": total_dice_loss, |
| "grad_norm": total_grad_norm, |
| "grad_clip_used": grad_clip_norm, |
| "final_mask": seg.detach(), |
| } |
|
|
| def train_step_strategy3( |
| model: nn.Module, |
| batch: dict[str, Any], |
| optimizer: torch.optim.Optimizer, |
| *, |
| gamma: float, |
| tmax: int, |
| critic_loss_weight: float, |
| log_alpha: torch.Tensor, |
| alpha_optimizer: torch.optim.Optimizer, |
| target_entropy: float, |
| ce_weight: float, |
| dice_weight: float, |
| grad_clip_norm: float, |
| scaler: Any | None, |
| use_amp: bool, |
| amp_dtype: torch.dtype, |
| stepwise_backward: bool, |
| use_channels_last: bool, |
| ) -> dict[str, Any]: |
| if not _uses_refinement_runtime(model, strategy=3): |
| image = batch["image"] |
| gt_mask = batch["mask"] |
| if use_channels_last and image.ndim == 4 and image.device.type == "cuda": |
| image = image.contiguous(memory_format=torch.channels_last) |
| if use_amp and image.device.type == "cuda": |
| image = image.to(dtype=amp_dtype) |
| gt_mask = gt_mask.to(dtype=amp_dtype) |
| else: |
| image = image.float() |
| gt_mask = gt_mask.float() |
|
|
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| decoder_logits = model.forward_decoder(image) |
| init_mask = threshold_binary_mask(torch.sigmoid(decoder_logits)).float() |
|
|
| decoder_loss = torch.zeros(1, device=image.device, dtype=torch.float32) |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| dl_f = decoder_logits.float() |
| gt_f = gt_mask.float() |
| if ce_weight > 0: |
| decoder_loss = decoder_loss + ce_weight * F.binary_cross_entropy_with_logits(dl_f, gt_f) |
| if dice_weight > 0: |
| dm_prob = torch.sigmoid(dl_f) |
| inter = (dm_prob * gt_f).sum() |
| decoder_loss = decoder_loss + dice_weight * ( |
| 1.0 - (2.0 * inter + 1e-6) / (dm_prob.sum() + gt_f.sum() + 1e-6) |
| ) |
|
|
| return train_step( |
| model, |
| batch, |
| optimizer, |
| gamma=gamma, |
| tmax=tmax, |
| critic_loss_weight=critic_loss_weight, |
| log_alpha=log_alpha, |
| alpha_optimizer=alpha_optimizer, |
| target_entropy=target_entropy, |
| ce_weight=ce_weight, |
| dice_weight=dice_weight, |
| grad_clip_norm=grad_clip_norm, |
| scaler=scaler, |
| use_amp=use_amp, |
| amp_dtype=amp_dtype, |
| stepwise_backward=stepwise_backward, |
| use_channels_last=use_channels_last, |
| initial_mask=init_mask, |
| decoder_loss_extra=decoder_loss, |
| ) |
|
|
| model.train() |
| image = batch["image"] |
| gt_mask = batch["mask"] |
| if use_channels_last and image.ndim == 4 and image.device.type == "cuda": |
| image = image.contiguous(memory_format=torch.channels_last) |
| if use_amp and image.device.type == "cuda": |
| image = image.to(dtype=amp_dtype) |
| gt_mask = gt_mask.to(dtype=amp_dtype) |
| else: |
| image = image.float() |
| gt_mask = gt_mask.float() |
|
|
| del stepwise_backward |
|
|
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| refinement_context = model.prepare_refinement_context(image) |
| decoder_logits = refinement_context["decoder_logits"] |
| decoder_prob = refinement_context["decoder_prob"] |
| base_features = refinement_context["base_features"] |
| seg = decoder_prob.detach().to(device=image.device, dtype=image.dtype) |
|
|
| decoder_loss = torch.zeros((), device=image.device, dtype=torch.float32) |
| decoder_ce_loss_value = 0.0 |
| decoder_dice_loss_value = 0.0 |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| dl_f = decoder_logits.float() |
| gt_f = gt_mask.float() |
| if ce_weight > 0: |
| decoder_ce = F.binary_cross_entropy_with_logits(dl_f, gt_f) |
| decoder_loss = decoder_loss + ce_weight * decoder_ce |
| decoder_ce_loss_value = float(decoder_ce.detach().item()) |
| if dice_weight > 0: |
| dm_prob = decoder_prob.float() |
| inter = (dm_prob * gt_f).sum() |
| decoder_dice = 1.0 - (2.0 * inter + 1e-6) / (dm_prob.sum() + gt_f.sum() + 1e-6) |
| decoder_loss = decoder_loss + dice_weight * decoder_dice |
| decoder_dice_loss_value = float(decoder_dice.detach().item()) |
|
|
| optimizer.zero_grad(set_to_none=True) |
|
|
| alpha = log_alpha.exp() |
| gae_lambda = float(_job_param("gae_lambda", 0.90)) |
| min_refinement_steps = int(_job_param("min_refinement_steps", 2)) |
| early_stop_keep_ratio = float(_job_param("early_stop_keep_ratio", 0.985)) |
| keep_index = NUM_ACTIONS // 2 |
|
|
| transitions: list[dict[str, Any]] = [] |
| alpha_loss_accum = torch.zeros((), device=image.device, dtype=torch.float32) |
| decoder_prior = decoder_prob.detach() |
| detached_base_features = base_features.detach() |
| detached_decoder_prob = decoder_prob.detach() |
|
|
| for step_idx in range(tmax): |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| state_t, _ = model.forward_refinement_state( |
| base_features, |
| seg, |
| decoder_prob, |
| ) |
| policy_logits, value_t = model.forward_from_state(state_t) |
| actions, log_prob, entropy = sample_actions(policy_logits, stochastic=True) |
| log_prob = log_prob.clamp(min=-10.0) |
| seg_next = apply_actions( |
| seg, |
| actions, |
| soft_update_step=float(_job_param("refine_delta_small", 0.10)), |
| num_actions=policy_logits.shape[1], |
| ) |
| reward = compute_refinement_reward(seg, seg_next, gt_mask.float(), decoder_prior=decoder_prior) |
| aux_loss, ce_loss_value, dice_loss_value = compute_strategy1_aux_segmentation_loss( |
| policy_logits, |
| gt_mask, |
| ce_weight=ce_weight, |
| dice_weight=dice_weight, |
| seg_mask=seg, |
| ) |
|
|
| with torch.no_grad(): |
| state_next, _ = model.forward_refinement_state( |
| detached_base_features, |
| seg_next, |
| detached_decoder_prob, |
| ) |
| value_next = model.value_from_state(state_next).detach() |
|
|
| transitions.append( |
| { |
| "value": value_t, |
| "next_value": value_next, |
| "reward": reward, |
| "log_prob": log_prob, |
| "entropy": entropy, |
| "aux_loss": aux_loss, |
| "ce_loss": ce_loss_value, |
| "dice_loss": dice_loss_value, |
| } |
| ) |
| alpha_loss_accum = alpha_loss_accum + (log_alpha * (entropy.detach() - target_entropy)) |
| seg = seg_next.detach() |
|
|
| if step_idx + 1 >= min_refinement_steps: |
| keep_ratio = float((actions == keep_index).float().mean().item()) |
| if keep_ratio >= early_stop_keep_ratio: |
| break |
|
|
| effective_steps = max(len(transitions), 1) |
| advantages_rev: list[torch.Tensor] = [] |
| gae = torch.zeros_like(transitions[-1]["value"]) |
| for transition in reversed(transitions): |
| td_error = transition["reward"] + gamma * model.neighborhood_value(transition["next_value"]) - transition["value"] |
| gae = td_error + gamma * gae_lambda * gae |
| advantages_rev.append(gae) |
| advantages = list(reversed(advantages_rev)) |
|
|
| total_loss_tensor = decoder_loss |
| total_actor = 0.0 |
| total_critic = 0.0 |
| total_reward = 0.0 |
| total_entropy = 0.0 |
| total_ce_loss = decoder_ce_loss_value |
| total_dice_loss = decoder_dice_loss_value |
|
|
| for transition, advantage in zip(transitions, advantages): |
| detached_adv = advantage.detach() |
| norm_adv = (detached_adv - detached_adv.mean()) / (detached_adv.std(unbiased=False) + 1e-6) |
| critic_target = (transition["value"] + detached_adv).detach() |
| critic_loss = F.smooth_l1_loss(transition["value"], critic_target) |
| actor_loss = -(transition["log_prob"] * norm_adv).mean() |
| actor_loss = actor_loss - alpha.detach() * transition["entropy"] |
| step_loss = (actor_loss + critic_loss_weight * critic_loss + transition["aux_loss"]) / float(effective_steps) |
| total_loss_tensor = total_loss_tensor + step_loss |
|
|
| total_actor += float(actor_loss.detach().item()) |
| total_critic += float(critic_loss.detach().item()) |
| total_reward += float(transition["reward"].detach().mean().item()) |
| total_entropy += float(transition["entropy"].detach().item()) |
| total_ce_loss += float(transition["ce_loss"]) / float(effective_steps) |
| total_dice_loss += float(transition["dice_loss"]) / float(effective_steps) |
|
|
| if scaler is not None: |
| scaler.scale(total_loss_tensor).backward() |
| scaler.unscale_(optimizer) |
| else: |
| total_loss_tensor.backward() |
|
|
| total_grad_norm = ( |
| float(torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip_norm).item()) if grad_clip_norm > 0 else 0.0 |
| ) |
|
|
| if scaler is not None: |
| scaler.step(optimizer) |
| scaler.update() |
| else: |
| optimizer.step() |
|
|
| alpha_optimizer.zero_grad(set_to_none=True) |
| (alpha_loss_accum / float(effective_steps)).backward() |
| alpha_optimizer.step() |
| with torch.no_grad(): |
| log_alpha.clamp_(min=-5.0, max=5.0) |
|
|
| return { |
| "loss": float(total_loss_tensor.detach().item()), |
| "actor_loss": total_actor / float(effective_steps), |
| "critic_loss": total_critic / float(effective_steps), |
| "mean_reward": total_reward / float(effective_steps), |
| "entropy": total_entropy / float(effective_steps), |
| "ce_loss": total_ce_loss, |
| "dice_loss": total_dice_loss, |
| "grad_norm": total_grad_norm, |
| "grad_clip_used": grad_clip_norm, |
| "final_mask": threshold_binary_mask(seg.detach().float()).float(), |
| } |
|
|
| def train_step_supervised( |
| model: nn.Module, |
| batch: dict[str, Any], |
| optimizer: torch.optim.Optimizer, |
| *, |
| scaler: Any | None, |
| use_amp: bool, |
| amp_dtype: torch.dtype, |
| use_channels_last: bool, |
| grad_clip_norm: float, |
| ce_weight: float = 0.5, |
| dice_weight: float = 0.5, |
| ) -> dict[str, Any]: |
| model.train() |
| image = batch["image"] |
| gt_mask = batch["mask"] |
|
|
| if use_channels_last and image.ndim == 4 and image.device.type == "cuda": |
| image = image.contiguous(memory_format=torch.channels_last) |
| if use_amp and image.device.type == "cuda": |
| image = image.to(dtype=amp_dtype) |
| gt_mask = gt_mask.to(dtype=amp_dtype) |
| else: |
| image = image.float() |
| gt_mask = gt_mask.float() |
|
|
| optimizer.zero_grad(set_to_none=True) |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| logits = model(image) |
| logits_f = logits.float() |
| gt_f = gt_mask.float() |
| loss = torch.zeros(1, device=image.device, dtype=torch.float32) |
| ce_loss_val = 0.0 |
| dice_loss_val = 0.0 |
| if ce_weight > 0: |
| bce = F.binary_cross_entropy_with_logits(logits_f, gt_f, reduction="mean") |
| loss = loss + ce_weight * bce |
| ce_loss_val = float(bce.detach().item()) |
| if dice_weight > 0: |
| pred_f = torch.sigmoid(logits_f) |
| inter = (pred_f * gt_f).sum() |
| dice_l = 1.0 - (2.0 * inter + 1e-6) / (pred_f.sum() + gt_f.sum() + 1e-6) |
| loss = loss + dice_weight * dice_l |
| dice_loss_val = float(dice_l.detach().item()) |
|
|
| if scaler is not None: |
| scaler.scale(loss).backward() |
| scaler.unscale_(optimizer) |
| else: |
| loss.backward() |
|
|
| total_grad_norm = float(torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip_norm).item()) if grad_clip_norm > 0 else 0.0 |
| if scaler is not None: |
| scaler.step(optimizer) |
| scaler.update() |
| else: |
| optimizer.step() |
|
|
| final_mask = threshold_binary_mask(torch.sigmoid(logits_f)).float().detach() |
| return { |
| "loss": float(loss.detach().item()), |
| "actor_loss": 0.0, |
| "critic_loss": 0.0, |
| "mean_reward": 0.0, |
| "entropy": 0.0, |
| "ce_loss": ce_loss_val, |
| "dice_loss": dice_loss_val, |
| "grad_norm": total_grad_norm, |
| "grad_clip_used": grad_clip_norm, |
| "final_mask": final_mask, |
| } |
|
|
| @torch.inference_mode() |
| def validate( |
| model: nn.Module, |
| loader: DataLoader, |
| *, |
| strategy: int, |
| tmax: int, |
| use_amp: bool, |
| amp_dtype: torch.dtype, |
| use_channels_last: bool, |
| gamma: float, |
| critic_loss_weight: float, |
| ce_weight: float, |
| dice_weight: float, |
| ) -> dict[str, float]: |
| model.eval() |
| losses: list[float] = [] |
| dice_scores: list[float] = [] |
| iou_scores: list[float] = [] |
| entropies: list[float] = [] |
| rewards: list[float] = [] |
| actor_losses: list[float] = [] |
| critic_losses: list[float] = [] |
| ce_losses: list[float] = [] |
| dice_losses: list[float] = [] |
| prefetcher = CUDAPrefetcher(loader, DEVICE) |
| try: |
| for batch in tqdm(prefetcher, total=len(loader), desc="Validating", leave=False): |
| image = batch["image"] |
| gt_mask = batch["mask"].float() |
| if use_channels_last and image.ndim == 4 and image.device.type == "cuda": |
| image = image.contiguous(memory_format=torch.channels_last) |
| if use_amp and DEVICE.type == "cuda": |
| image = image.to(dtype=amp_dtype) |
| gt_mask = gt_mask.to(dtype=amp_dtype) |
|
|
| if strategy == 2: |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| logits = model(image) |
| pred_prob = torch.sigmoid(logits).float() |
| pred = threshold_binary_mask(pred_prob).float() |
| bce = F.binary_cross_entropy_with_logits(logits.float(), gt_mask.float(), reduction="mean") if ce_weight > 0 else torch.tensor(0.0, device=image.device) |
| inter_prob = (pred_prob * gt_mask.float()).sum() |
| dice_loss = 1.0 - (2.0 * inter_prob + 1e-6) / (pred_prob.sum() + gt_mask.sum() + 1e-6) if dice_weight > 0 else torch.tensor(0.0, device=image.device) |
| losses.append(float((ce_weight * bce + dice_weight * dice_loss).item())) |
| ce_losses.append(float(bce.item()) if ce_weight > 0 else 0.0) |
| dice_losses.append(float(dice_loss.item()) if dice_weight > 0 else 0.0) |
| else: |
| refinement_runtime = _uses_refinement_runtime(model, strategy=strategy) |
| if strategy in (3, 4) and refinement_runtime: |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| refinement_context = model.prepare_refinement_context(image) |
| decoder_logits = refinement_context["decoder_logits"] |
| decoder_prob = refinement_context["decoder_prob"].float() |
| seg = decoder_prob.float() |
| decoder_loss = 0.0 |
| batch_ce = 0.0 |
| batch_dice = 0.0 |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| dl_f = decoder_logits.float() |
| gt_f = gt_mask.float() |
| if ce_weight > 0: |
| batch_ce = float(F.binary_cross_entropy_with_logits(dl_f, gt_f).item()) |
| decoder_loss += ce_weight * batch_ce |
| if dice_weight > 0: |
| dm_prob = decoder_prob.float() |
| inter = (dm_prob * gt_f).sum() |
| batch_dice = float( |
| ( |
| 1.0 - (2.0 * inter + 1e-6) / (dm_prob.sum() + gt_f.sum() + 1e-6) |
| ).item() |
| ) |
| decoder_loss += dice_weight * batch_dice |
| else: |
| fg_count = gt_mask.sum().clamp(min=1.0) |
| bg_count = (gt_mask == 0).sum().clamp(min=1.0) |
| pos_weight = bg_count / fg_count |
| _weight_map = torch.where(gt_mask == 1, pos_weight, torch.ones_like(gt_mask)) |
| if strategy in (3, 4): |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| decoder_logits = model.forward_decoder(image) |
| seg = threshold_binary_mask(torch.sigmoid(decoder_logits)).float() |
| decoder_loss = 0.0 |
| batch_ce = 0.0 |
| batch_dice = 0.0 |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| dl_f = decoder_logits.float() |
| gt_f = gt_mask.float() |
| if ce_weight > 0: |
| batch_ce = float(F.binary_cross_entropy_with_logits(dl_f, gt_f).item()) |
| decoder_loss += ce_weight * batch_ce |
| if dice_weight > 0: |
| dm_prob = torch.sigmoid(dl_f) |
| inter = (dm_prob * gt_f).sum() |
| batch_dice = float( |
| ( |
| 1.0 - (2.0 * inter + 1e-6) / (dm_prob.sum() + gt_f.sum() + 1e-6) |
| ).item() |
| ) |
| decoder_loss += dice_weight * batch_dice |
| else: |
| seg = torch.ones( |
| image.shape[0], |
| 1, |
| image.shape[2], |
| image.shape[3], |
| device=image.device, |
| dtype=image.dtype, |
| ) |
| decoder_loss = 0.0 |
| batch_ce = 0.0 |
| batch_dice = 0.0 |
|
|
| batch_actor = 0.0 |
| batch_critic = 0.0 |
| batch_reward = 0.0 |
| batch_entropy = 0.0 |
| batch_loss = decoder_loss |
|
|
| if strategy in (3, 4) and refinement_runtime: |
| decoder_prior = decoder_prob.detach() |
| detached_base_features = refinement_context["base_features"].detach() |
| detached_decoder_prob = decoder_prob.detach() |
|
|
| for _ in range(tmax): |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| state_t, _ = model.forward_refinement_state( |
| refinement_context["base_features"], |
| seg, |
| decoder_prob, |
| ) |
| policy_logits, value_t = model.forward_from_state(state_t) |
| actions, log_prob, entropy = sample_actions(policy_logits, stochastic=False) |
| log_prob = log_prob.clamp(min=-10.0) |
| seg_next = apply_actions(seg, actions, soft_update_step=DEFAULT_MASK_UPDATE_STEP, num_actions=policy_logits.shape[1]) |
| reward = compute_refinement_reward( |
| seg, |
| seg_next, |
| gt_mask.float(), |
| decoder_prior=decoder_prior, |
| ) |
| state_next, _ = model.forward_refinement_state( |
| detached_base_features, |
| seg_next, |
| detached_decoder_prob, |
| ) |
| value_next = model.value_from_state(state_next).detach() |
| target = reward + gamma * model.neighborhood_value(value_next) |
| advantage = target - value_t |
| norm_advantage = advantage.detach() |
| norm_advantage = (norm_advantage - norm_advantage.mean()) / ( |
| norm_advantage.std(unbiased=False) + 1e-6 |
| ) |
| actor_loss = -(log_prob * norm_advantage).mean() |
| critic_loss = F.smooth_l1_loss(value_t, target) |
| aux_loss, aux_ce, aux_dice = compute_strategy1_aux_segmentation_loss( |
| policy_logits, |
| gt_mask, |
| ce_weight=ce_weight, |
| dice_weight=dice_weight, |
| seg_mask=seg, |
| ) |
|
|
| batch_actor += float(actor_loss.item()) |
| batch_critic += float(critic_loss.item()) |
| batch_reward += float(reward.mean().item()) |
| batch_entropy += float(entropy.item()) |
| batch_ce += aux_ce / float(tmax) |
| batch_dice += aux_dice / float(tmax) |
| batch_loss += float( |
| ((actor_loss + critic_loss_weight * critic_loss + aux_loss) / float(tmax)).item() |
| ) |
| seg = seg_next |
| pred = threshold_binary_mask(seg.float()).float() |
| else: |
| for _ in range(tmax): |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| x_t = image * seg |
| state_t, _ = model.forward_state(x_t) |
| policy_logits, value_t = model.forward_from_state(state_t) |
| actions, log_prob, entropy = sample_actions(policy_logits, stochastic=False) |
| log_prob = log_prob.clamp(min=-10.0) |
| seg_next = apply_actions(seg, actions, num_actions=policy_logits.shape[1]) |
| reward = ((seg - gt_mask).pow(2) - (seg_next - gt_mask).pow(2)) |
| x_next = image * seg_next |
| state_next, _ = model.forward_state(x_next) |
| value_next = model.value_from_state(state_next).detach() |
| target = reward + gamma * model.neighborhood_value(value_next) |
| advantage = target - value_t |
| actor_loss = -(log_prob * advantage.detach()).mean() |
| critic_loss = F.smooth_l1_loss(value_t, target) |
|
|
| batch_actor += float(actor_loss.item()) |
| batch_critic += float(critic_loss.item()) |
| batch_reward += float(reward.mean().item()) |
| batch_entropy += float(entropy.item()) |
| batch_loss += float(((actor_loss + critic_loss_weight * critic_loss) / float(tmax)).item()) |
| seg = seg_next |
|
|
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| policy_aux, _, _ = model(image) |
| aux_loss, aux_ce, aux_dice = compute_strategy1_aux_segmentation_loss( |
| policy_aux, |
| gt_mask, |
| ce_weight=ce_weight, |
| dice_weight=dice_weight, |
| seg_mask=seg, |
| ) |
| batch_loss += float(aux_loss.item()) |
| batch_ce = aux_ce |
| batch_dice = aux_dice |
| pred = infer_segmentation_mask( |
| model, |
| image, |
| tmax, |
| strategy=strategy, |
| use_amp=use_amp, |
| amp_dtype=amp_dtype, |
| use_channels_last=use_channels_last, |
| ).float() |
|
|
| ce_losses.append(batch_ce) |
| dice_losses.append(batch_dice) |
| actor_losses.append(batch_actor / tmax) |
| critic_losses.append(batch_critic / tmax) |
| rewards.append(batch_reward / tmax) |
| entropies.append(batch_entropy / tmax) |
| losses.append(batch_loss) |
|
|
| inter = (pred * gt_mask.float()).sum(dim=(1, 2, 3)) |
| pred_sum = pred.sum(dim=(1, 2, 3)) |
| gt_sum = gt_mask.float().sum(dim=(1, 2, 3)) |
| dice = (2.0 * inter + _EPS) / (pred_sum + gt_sum + _EPS) |
| iou = (inter + _EPS) / (pred_sum + gt_sum - inter + _EPS) |
| dice_scores.extend(dice.cpu().tolist()) |
| iou_scores.extend(iou.cpu().tolist()) |
| finally: |
| prefetcher.close() |
| del prefetcher |
|
|
| return { |
| "val_loss": float(np.mean(losses)) if losses else 0.0, |
| "val_dice": float(np.mean(dice_scores)) if dice_scores else 0.0, |
| "val_iou": float(np.mean(iou_scores)) if iou_scores else 0.0, |
| "val_actor_loss": float(np.mean(actor_losses)) if actor_losses else 0.0, |
| "val_critic_loss": float(np.mean(critic_losses)) if critic_losses else 0.0, |
| "val_ce_loss": float(np.mean(ce_losses)) if ce_losses else 0.0, |
| "val_dice_loss": float(np.mean(dice_losses)) if dice_losses else 0.0, |
| "val_reward": float(np.mean(rewards)) if rewards else 0.0, |
| "val_entropy": float(np.mean(entropies)) if entropies else 0.0, |
| } |
|
|
| def _save_training_plots(history: list[dict[str, Any]], plots_dir: Path) -> None: |
| if len(history) < 1: |
| return |
| ensure_dir(plots_dir) |
| epochs = [row["epoch"] for row in history] |
| plot_specs = [ |
| ("loss.png", "Loss", [("train_loss", "Train"), ("val_loss", "Val")]), |
| ("dice.png", "Dice", [("train_dice", "Train"), ("val_dice", "Val")]), |
| ("iou.png", "IoU", [("train_iou", "Train"), ("val_iou", "Val")]), |
| ("reward.png", "Reward", [("train_mean_reward", "Train"), ("val_reward", "Val")]), |
| ("entropy.png", "Entropy", [("train_entropy", "Train"), ("val_entropy", "Val")]), |
| ("ce_loss.png", "CE Loss", [("train_ce_loss", "Train")]), |
| ("dice_loss.png", "Dice Loss", [("train_dice_loss", "Train")]), |
| ("alpha.png", "Alpha", [("alpha", "Alpha")]), |
| ("lr.png", "Learning Rate", [("lr", "Head LR"), ("encoder_lr", "Encoder LR")]), |
| ] |
| for file_name, title, curves in plot_specs: |
| fig, ax = plt.subplots(figsize=(8, 4)) |
| has_data = False |
| for key, label in curves: |
| values = [(row["epoch"], row[key]) for row in history if key in row] |
| if not values: |
| continue |
| xs, ys = zip(*values) |
| ax.plot(xs, ys, label=label, linewidth=1.2) |
| has_data = True |
| if has_data: |
| ax.set_title(title) |
| ax.set_xlabel("Epoch") |
| ax.set_ylabel(title) |
| ax.grid(True, alpha=0.3) |
| ax.legend() |
| fig.tight_layout() |
| fig.savefig(plots_dir / file_name, dpi=110) |
| plt.close(fig) |
|
|
| RESUME_IDENTITY_KEYS = ( |
| "strategy", |
| "dataset_percent", |
| "dataset_name", |
| "dataset_split_policy", |
| "split_type", |
| "train_subset_key", |
| "backbone_family", |
| "smp_encoder_name", |
| "smp_encoder_weights", |
| "smp_encoder_depth", |
| "smp_encoder_proj_dim", |
| "smp_decoder_type", |
| "vgg_feature_scales", |
| "vgg_feature_dilation", |
| "head_lr", |
| "encoder_lr", |
| "weight_decay", |
| "dropout_p", |
| "tmax", |
| "entropy_lr", |
| ) |
|
|
| def _resume_value_matches(current: Any, saved: Any) -> bool: |
| if isinstance(current, (int, float)) and isinstance(saved, (int, float)) and not isinstance(current, bool): |
| return math.isclose(float(current), float(saved), rel_tol=1e-9, abs_tol=1e-12) |
| return current == saved |
|
|
| def validate_resume_checkpoint_identity( |
| current_run_config: dict[str, Any], |
| saved_run_config: dict[str, Any], |
| *, |
| checkpoint_path: Path, |
| ) -> None: |
| mismatches: list[str] = [] |
| for key in RESUME_IDENTITY_KEYS: |
| if key not in current_run_config or key not in saved_run_config: |
| mismatches.append(f"{key}: current={current_run_config.get(key)!r}, checkpoint={saved_run_config.get(key)!r}") |
| continue |
| if not _resume_value_matches(current_run_config[key], saved_run_config[key]): |
| mismatches.append(f"{key}: current={current_run_config[key]!r}, checkpoint={saved_run_config[key]!r}") |
|
|
| current_s2 = current_run_config.get("strategy2_checkpoint_path") |
| saved_s2 = saved_run_config.get("strategy2_checkpoint_path") |
| if current_s2 or saved_s2: |
| if str(current_s2 or "") != str(saved_s2 or ""): |
| mismatches.append(f"strategy2_checkpoint_path: current={current_s2!r}, checkpoint={saved_s2!r}") |
|
|
| if mismatches: |
| raise ValueError( |
| f"Resume checkpoint identity mismatch for {checkpoint_path}:\n" + "\n".join(f" - {line}" for line in mismatches) |
| ) |
|
|
| def load_history_for_resume(history_path: Path, checkpoint_payload: dict[str, Any]) -> list[dict[str, Any]]: |
| checkpoint_epoch = int(checkpoint_payload.get("epoch", 0)) |
| epoch_metrics = checkpoint_payload.get("epoch_metrics") |
| history: list[dict[str, Any]] = [] |
| if history_path.exists(): |
| payload = load_json(history_path) |
| if not isinstance(payload, list): |
| raise RuntimeError(f"Expected list history at {history_path}, found {type(payload).__name__}.") |
| history = [dict(row) for row in payload if isinstance(row, dict)] |
| history = [row for row in history if int(row.get("epoch", 0)) <= checkpoint_epoch] |
| if not history and isinstance(epoch_metrics, dict): |
| history = [dict(epoch_metrics)] |
| elif history and isinstance(epoch_metrics, dict): |
| if int(history[-1].get("epoch", 0)) < checkpoint_epoch: |
| history.append(dict(epoch_metrics)) |
| return history |
|
|
| def train_model( |
| *, |
| run_type: str, |
| model_config: RuntimeModelConfig, |
| run_config: dict[str, Any], |
| model: nn.Module, |
| description: str, |
| strategy: int, |
| run_dir: Path, |
| bundle: DataBundle, |
| max_epochs: int, |
| head_lr: float, |
| encoder_lr: float, |
| weight_decay: float, |
| tmax: int, |
| entropy_lr: float, |
| entropy_alpha_init: float, |
| entropy_target_ratio: float, |
| critic_loss_weight: float, |
| ce_weight: float, |
| dice_weight: float, |
| dropout_p: float, |
| resume_checkpoint_path: Path | None = None, |
| trial: optuna.trial.Trial | None = None, |
| ) -> tuple[dict[str, Any], list[dict[str, Any]]]: |
| amp_dtype = resolve_amp_dtype(AMP_DTYPE) |
| use_amp = amp_autocast_enabled(DEVICE) |
| use_channels_last = _use_channels_last_for_run(model_config) |
| if resume_checkpoint_path is not None and strategy in (3, 4): |
| _configure_model_from_checkpoint_path(model, resume_checkpoint_path) |
| print_model_parameter_summary( |
| model=model, |
| description=description, |
| strategy=strategy, |
| model_config=model_config, |
| dropout_p=dropout_p, |
| amp_dtype=amp_dtype, |
| compiled=hasattr(model, "_orig_mod"), |
| ) |
|
|
| optimizer = make_optimizer(model, strategy, head_lr=head_lr * 0.1, encoder_lr=encoder_lr, weight_decay=weight_decay, rl_lr=head_lr) |
| scheduler = CosineAnnealingLR( |
| optimizer, |
| T_max=200, |
| eta_min=1e-6, |
| ) |
| scaler = make_grad_scaler(enabled=use_amp, amp_dtype=amp_dtype, device=DEVICE) |
|
|
| target_entropy = entropy_target_ratio * math.log(max(_model_policy_action_count(model) or NUM_ACTIONS, 2)) |
| log_alpha = torch.tensor(math.log(max(entropy_alpha_init, 1e-8)), dtype=torch.float32, device=DEVICE, requires_grad=True) |
| alpha_optimizer = Adam([log_alpha], lr=entropy_lr) |
|
|
| save_artifacts = run_type in {"final", "trial"} |
| ckpt_dir = ensure_dir(run_dir / "checkpoints") if save_artifacts else None |
| plots_dir = ensure_dir(run_dir / "plots") if save_artifacts else None |
| history_path = checkpoint_history_path(run_dir, run_type) |
| history: list[dict[str, Any]] = [] |
| best_val_iou = -1.0 |
| patience_counter = 0 |
| elapsed_before_resume = 0.0 |
| start_epoch = 1 |
| resume_source: dict[str, Any] | None = None |
| if resume_checkpoint_path is not None: |
| checkpoint_payload = load_checkpoint( |
| resume_checkpoint_path, |
| model=model, |
| optimizer=optimizer, |
| scheduler=scheduler, |
| scaler=scaler, |
| device=DEVICE, |
| log_alpha=log_alpha if strategy != 2 else None, |
| alpha_optimizer=alpha_optimizer if strategy != 2 else None, |
| expected_run_type=run_type, |
| require_run_metadata=True, |
| ) |
| validate_resume_checkpoint_identity( |
| run_config, |
| checkpoint_run_config_payload(checkpoint_payload), |
| checkpoint_path=resume_checkpoint_path, |
| ) |
| start_epoch = int(checkpoint_payload["epoch"]) + 1 |
| best_val_iou = float(checkpoint_payload["best_metric_value"]) |
| patience_counter = int(checkpoint_payload.get("patience_counter", 0)) |
| elapsed_before_resume = float(checkpoint_payload.get("elapsed_seconds", 0.0)) |
| history = load_history_for_resume(history_path, checkpoint_payload) |
| resume_source = { |
| "checkpoint_path": str(Path(resume_checkpoint_path).resolve()), |
| "checkpoint_epoch": int(checkpoint_payload["epoch"]), |
| "checkpoint_run_type": checkpoint_payload.get("run_type", run_type), |
| } |
| print( |
| f"[Resume] {run_type} run continuing from {resume_checkpoint_path} " |
| f"at epoch {start_epoch}/{max_epochs}." |
| ) |
| start_time = time.time() |
| validate_interval = max(int(VALIDATE_EVERY_N_EPOCHS), 1) |
| run_label = f"Trial {trial.number:03d}" if trial is not None else "Run" |
|
|
| for epoch in range(start_epoch, max_epochs + 1): |
| epoch_losses: list[float] = [] |
| epoch_actor: list[float] = [] |
| epoch_critic: list[float] = [] |
| epoch_reward: list[float] = [] |
| epoch_entropy: list[float] = [] |
| epoch_ce: list[float] = [] |
| epoch_dice_loss: list[float] = [] |
| epoch_grad: list[float] = [] |
| epoch_dices: list[float] = [] |
| epoch_ious: list[float] = [] |
|
|
| prefetcher = CUDAPrefetcher(bundle.train_loader, DEVICE) |
| progress = tqdm(prefetcher, total=len(bundle.train_loader), desc=f"Epoch {epoch}/{max_epochs}", leave=False) |
| for batch in progress: |
| if strategy == 2: |
| metrics = train_step_supervised( |
| model, |
| batch, |
| optimizer, |
| scaler=scaler, |
| use_amp=use_amp, |
| amp_dtype=amp_dtype, |
| use_channels_last=use_channels_last, |
| grad_clip_norm=DEFAULT_GRAD_CLIP_NORM, |
| ce_weight=ce_weight, |
| dice_weight=dice_weight, |
| ) |
| elif strategy in (3, 4): |
| metrics = train_step_strategy3( |
| model, |
| batch, |
| optimizer, |
| gamma=DEFAULT_GAMMA, |
| tmax=tmax, |
| critic_loss_weight=critic_loss_weight, |
| log_alpha=log_alpha, |
| alpha_optimizer=alpha_optimizer, |
| target_entropy=target_entropy, |
| ce_weight=ce_weight, |
| dice_weight=dice_weight, |
| grad_clip_norm=DEFAULT_GRAD_CLIP_NORM, |
| scaler=scaler, |
| use_amp=use_amp, |
| amp_dtype=amp_dtype, |
| stepwise_backward=STEPWISE_BACKWARD, |
| use_channels_last=use_channels_last, |
| ) |
| else: |
| metrics = train_step( |
| model, |
| batch, |
| optimizer, |
| gamma=DEFAULT_GAMMA, |
| tmax=tmax, |
| critic_loss_weight=critic_loss_weight, |
| log_alpha=log_alpha, |
| alpha_optimizer=alpha_optimizer, |
| target_entropy=target_entropy, |
| ce_weight=ce_weight, |
| dice_weight=dice_weight, |
| grad_clip_norm=DEFAULT_GRAD_CLIP_NORM, |
| scaler=scaler, |
| use_amp=use_amp, |
| amp_dtype=amp_dtype, |
| stepwise_backward=STEPWISE_BACKWARD, |
| use_channels_last=use_channels_last, |
| ) |
|
|
| epoch_losses.append(metrics["loss"]) |
| epoch_actor.append(metrics["actor_loss"]) |
| epoch_critic.append(metrics["critic_loss"]) |
| epoch_reward.append(metrics["mean_reward"]) |
| epoch_entropy.append(metrics["entropy"]) |
| epoch_ce.append(metrics["ce_loss"]) |
| epoch_dice_loss.append(metrics["dice_loss"]) |
| epoch_grad.append(metrics["grad_norm"]) |
|
|
| pred_mask = metrics["final_mask"] |
| gt_mask = batch["mask"].float() |
| inter = (pred_mask * gt_mask).sum(dim=(1, 2, 3)) |
| pred_sum = pred_mask.sum(dim=(1, 2, 3)) |
| gt_sum = gt_mask.sum(dim=(1, 2, 3)) |
| dice = (2.0 * inter + _EPS) / (pred_sum + gt_sum + _EPS) |
| iou = (inter + _EPS) / (pred_sum + gt_sum - inter + _EPS) |
| epoch_dices.extend(dice.detach().cpu().tolist()) |
| epoch_ious.extend(iou.detach().cpu().tolist()) |
|
|
| head_lr_now = float(optimizer.param_groups[-1]["lr"]) |
| enc_lr_now = float(optimizer.param_groups[0]["lr"]) if len(optimizer.param_groups) > 1 else head_lr_now |
| progress.set_postfix(loss=f"{metrics['loss']:.4f}", iou=f"{np.mean(epoch_ious):.4f}", lr=f"{head_lr_now:.2e}") |
|
|
| should_validate = epoch % validate_interval == 0 or epoch == max_epochs |
| val_metrics: dict[str, float | None] = { |
| "val_loss": None, |
| "val_dice": None, |
| "val_iou": None, |
| "val_reward": None, |
| "val_entropy": None, |
| } |
| if should_validate: |
| tqdm.write( |
| f"[{run_label}] Epoch {epoch}/{max_epochs}: running validation on {len(bundle.val_loader)} batches..." |
| ) |
| validated_metrics = validate( |
| model, |
| bundle.val_loader, |
| strategy=strategy, |
| tmax=tmax, |
| use_amp=use_amp, |
| amp_dtype=amp_dtype, |
| use_channels_last=use_channels_last, |
| gamma=DEFAULT_GAMMA, |
| critic_loss_weight=critic_loss_weight, |
| ce_weight=ce_weight, |
| dice_weight=dice_weight, |
| ) |
| val_metrics.update(validated_metrics) |
| scheduler.step() |
|
|
| row = { |
| "epoch": epoch, |
| "train_loss": float(np.mean(epoch_losses)) if epoch_losses else 0.0, |
| "train_actor_loss": float(np.mean(epoch_actor)) if epoch_actor else 0.0, |
| "train_critic_loss": float(np.mean(epoch_critic)) if epoch_critic else 0.0, |
| "train_mean_reward": float(np.mean(epoch_reward)) if epoch_reward else 0.0, |
| "train_entropy": float(np.mean(epoch_entropy)) if epoch_entropy else 0.0, |
| "train_ce_loss": float(np.mean(epoch_ce)) if epoch_ce else 0.0, |
| "train_dice_loss": float(np.mean(epoch_dice_loss)) if epoch_dice_loss else 0.0, |
| "train_dice": float(np.mean(epoch_dices)) if epoch_dices else 0.0, |
| "train_iou": float(np.mean(epoch_ious)) if epoch_ious else 0.0, |
| "grad_norm": float(np.mean(epoch_grad)) if epoch_grad else 0.0, |
| "lr": float(optimizer.param_groups[-1]["lr"]), |
| "encoder_lr": float(optimizer.param_groups[0]["lr"]) if len(optimizer.param_groups) > 1 else float(optimizer.param_groups[-1]["lr"]), |
| "alpha": float(log_alpha.exp().detach().item()) if strategy != 2 else 0.0, |
| "validated_this_epoch": should_validate, |
| **val_metrics, |
| } |
| history.append(row) |
| if save_artifacts and plots_dir is not None: |
| _save_training_plots(history, plots_dir) |
| save_json(history_path, history) |
|
|
| improved = False |
| if should_validate: |
| val_iou = float(val_metrics["val_iou"]) |
| improved = val_iou > best_val_iou |
| if improved: |
| best_val_iou = val_iou |
| patience_counter = 0 |
| if save_artifacts and ckpt_dir is not None: |
| save_checkpoint( |
| ckpt_dir / "best.pt", |
| run_type=run_type, |
| model=model, |
| optimizer=optimizer, |
| scheduler=scheduler, |
| scaler=scaler, |
| epoch=epoch, |
| best_metric_value=best_val_iou, |
| best_metric_name=BEST_CHECKPOINT_METRIC, |
| run_config=run_config, |
| epoch_metrics=row, |
| patience_counter=patience_counter, |
| elapsed_seconds=elapsed_before_resume + (time.time() - start_time), |
| log_alpha=log_alpha if strategy != 2 else None, |
| alpha_optimizer=alpha_optimizer if strategy != 2 else None, |
| resume_source=resume_source, |
| ) |
| else: |
| patience_counter += 1 |
|
|
| if save_artifacts and ckpt_dir is not None and SAVE_LATEST_EVERY_EPOCH: |
| save_checkpoint( |
| ckpt_dir / "latest.pt", |
| run_type=run_type, |
| model=model, |
| optimizer=optimizer, |
| scheduler=scheduler, |
| scaler=scaler, |
| epoch=epoch, |
| best_metric_value=best_val_iou, |
| best_metric_name=BEST_CHECKPOINT_METRIC, |
| run_config=run_config, |
| epoch_metrics=row, |
| patience_counter=patience_counter, |
| elapsed_seconds=elapsed_before_resume + (time.time() - start_time), |
| log_alpha=log_alpha if strategy != 2 else None, |
| alpha_optimizer=alpha_optimizer if strategy != 2 else None, |
| resume_source=resume_source, |
| ) |
| if save_artifacts and ckpt_dir is not None and CHECKPOINT_EVERY_N_EPOCHS > 0 and epoch % CHECKPOINT_EVERY_N_EPOCHS == 0: |
| save_checkpoint( |
| ckpt_dir / f"epoch_{epoch:04d}.pt", |
| run_type=run_type, |
| model=model, |
| optimizer=optimizer, |
| scheduler=scheduler, |
| scaler=scaler, |
| epoch=epoch, |
| best_metric_value=best_val_iou, |
| best_metric_name=BEST_CHECKPOINT_METRIC, |
| run_config=run_config, |
| epoch_metrics=row, |
| patience_counter=patience_counter, |
| elapsed_seconds=elapsed_before_resume + (time.time() - start_time), |
| log_alpha=log_alpha if strategy != 2 else None, |
| alpha_optimizer=alpha_optimizer if strategy != 2 else None, |
| resume_source=resume_source, |
| ) |
|
|
| if trial is not None and should_validate: |
| val_iou = float(val_metrics["val_iou"]) |
| trial.report(val_iou, step=epoch) |
| if USE_TRIAL_PRUNING and epoch >= TRIAL_PRUNER_WARMUP_STEPS and trial.should_prune(): |
| raise optuna.TrialPruned(f"Trial pruned at epoch {epoch} with val_iou={val_iou:.4f}") |
|
|
| if VERBOSE_EPOCH_LOG: |
| tqdm.write(f"[{run_label}] Epoch {epoch}/{max_epochs}") |
| tqdm.write(json.dumps(row, indent=2)) |
| else: |
| tqdm.write( |
| f"[{run_label}] Epoch {epoch}/{max_epochs}: " |
| f"{format_concise_epoch_log(row, best_val_iou=best_val_iou)}" |
| ) |
|
|
| if should_validate and EARLY_STOPPING_PATIENCE > 0 and patience_counter >= EARLY_STOPPING_PATIENCE: |
| print(f"Early stopping triggered at epoch {epoch}.") |
| break |
|
|
| elapsed = elapsed_before_resume + (time.time() - start_time) |
| summary = { |
| "best_val_iou": best_val_iou, |
| "best_val_dice": max((float(r["val_dice"]) for r in history if r.get("val_dice") is not None), default=0.0), |
| "final_epoch": int(history[-1]["epoch"]) if history else int(start_epoch - 1), |
| "elapsed_seconds": elapsed, |
| "seconds_per_epoch": elapsed / max(len(history), 1), |
| "device_used": str(DEVICE), |
| "strategy": strategy, |
| "run_type": run_type, |
| "resumed": resume_source is not None, |
| } |
| if resume_source is not None: |
| summary["resume_source"] = resume_source |
| if save_artifacts: |
| save_json(run_dir / "summary.json", summary) |
| return summary, history |
|
|
| """============================================================================= |
| EVALUATION + SMOKE TEST |
| ============================================================================= |
| """ |
|
|
| def _save_rgb_panel(image_chw: np.ndarray, pred_hw: np.ndarray, gt_hw: np.ndarray, output_path: Path, title: str) -> None: |
| img = image_chw.transpose(1, 2, 0) |
| img = (img - img.min()) / (img.max() - img.min() + 1e-8) |
| fig, axes = plt.subplots(1, 3, figsize=(12, 4)) |
| axes[0].imshow(img) |
| axes[0].set_title("Input") |
| axes[1].imshow(pred_hw, cmap="gray", vmin=0, vmax=1) |
| axes[1].set_title("Prediction") |
| axes[2].imshow(gt_hw, cmap="gray", vmin=0, vmax=1) |
| axes[2].set_title("Ground Truth") |
| for ax in axes: |
| ax.axis("off") |
| fig.suptitle(title) |
| fig.tight_layout() |
| fig.savefig(output_path, dpi=120) |
| plt.close(fig) |
|
|
| def evaluate_model( |
| *, |
| model: nn.Module, |
| model_config: RuntimeModelConfig, |
| bundle: DataBundle, |
| run_dir: Path, |
| strategy: int, |
| tmax: int, |
| ) -> tuple[dict[str, dict[str, float]], list[dict[str, Any]]]: |
| amp_dtype = resolve_amp_dtype(AMP_DTYPE) |
| use_amp = amp_autocast_enabled(DEVICE) |
| use_channels_last = _use_channels_last_for_run(model_config) |
| pred_dir = ensure_dir(run_dir / "predictions") |
| pred_255_dir = ensure_dir(run_dir / "predictions_255") |
|
|
| model.eval() |
| per_metric = {k: [] for k in ("dice", "ppv", "sen", "iou", "biou", "hd95")} |
| per_sample: list[dict[str, Any]] = [] |
|
|
| with torch.inference_mode(): |
| prefetcher = CUDAPrefetcher(bundle.test_loader, DEVICE) |
| for batch in tqdm(prefetcher, total=len(bundle.test_loader), desc="Evaluating", leave=False): |
| image = batch["image"] |
| gt = batch["mask"] |
| if use_amp and DEVICE.type == "cuda": |
| image = image.to(dtype=amp_dtype) |
| pred = infer_segmentation_mask( |
| model, |
| image, |
| tmax, |
| strategy=strategy, |
| use_amp=use_amp, |
| amp_dtype=amp_dtype, |
| use_channels_last=use_channels_last, |
| ).float() |
| pred_np = pred.cpu().numpy().astype(np.uint8) |
| gt_np = gt.cpu().numpy().astype(np.uint8) |
| sample_ids = batch["sample_id"] |
| for idx in range(pred_np.shape[0]): |
| metrics = compute_all_metrics(pred_np[idx], gt_np[idx]) |
| for key, value in metrics.items(): |
| per_metric[key].append(value) |
| per_sample.append({"sample_id": sample_ids[idx], **metrics}) |
| mask_2d = pred_np[idx].squeeze() |
| PILImage.fromarray(mask_2d).save(pred_dir / f"{sample_ids[idx]}.png") |
| PILImage.fromarray((mask_2d * 255).astype(np.uint8)).save(pred_255_dir / f"{sample_ids[idx]}.png") |
|
|
| aggregate: dict[str, dict[str, float]] = {} |
| for key, values in per_metric.items(): |
| values_np = np.array(values, dtype=np.float32) |
| aggregate[key] = {"mean": float(values_np.mean()), "std": float(values_np.std())} |
|
|
| save_json( |
| run_dir / "evaluation.json", |
| { |
| "strategy": strategy, |
| "best_metric_name": BEST_CHECKPOINT_METRIC, |
| "metrics": aggregate, |
| "per_sample": per_sample, |
| }, |
| ) |
|
|
| df_all = pd.DataFrame(per_sample) |
| avg_row = {} |
| for column in df_all.columns: |
| avg_row[column] = df_all[column].mean() if pd.api.types.is_numeric_dtype(df_all[column]) else "AVERAGE" |
| df_samples = pd.concat([df_all, pd.DataFrame([avg_row])], ignore_index=True) |
| df_summary = pd.DataFrame(aggregate).T |
| df_summary.index.name = "metric" |
| df_low_iou = df_all[df_all["iou"] < 0.01] |
| history_path = run_dir / "history.json" |
| df_history = pd.DataFrame(load_json(history_path)) if history_path.exists() else None |
|
|
| xlsx_path = run_dir / "evaluation_results.xlsx" |
| with pd.ExcelWriter(xlsx_path, engine="openpyxl") as writer: |
| df_samples.to_excel(writer, sheet_name="Per Sample", index=False) |
| df_summary.to_excel(writer, sheet_name="Summary") |
| if not df_low_iou.empty: |
| df_low_iou.to_excel(writer, sheet_name="Low IoU Samples", index=False) |
| if df_history is not None: |
| df_history.to_excel(writer, sheet_name="Training History", index=False) |
|
|
| csv_rows = [{"sample_id": row["sample_id"]} for row in df_low_iou.to_dict(orient="records")] |
| save_json(run_dir / "evaluation_summary.json", {"mean_iou": aggregate["iou"]["mean"], "mean_dice": aggregate["dice"]["mean"]}) |
| pd.DataFrame(csv_rows).to_csv(run_dir / "low_iou_samples.csv", index=False) |
| return aggregate, per_sample |
|
|
| def percent_root(percent: float) -> Path: |
| return ensure_dir(RUNS_ROOT / MODEL_NAME / f"pct_{percent_label(percent)}") |
|
|
| def strategy_dir_name(strategy: int, model_config: RuntimeModelConfig | None = None) -> str: |
| model_config = (model_config or current_model_config()).validate() |
| if model_config.backbone_family == "custom_vgg": |
| return f"strategy_{strategy}_custom_vgg" |
| return f"strategy_{strategy}" |
|
|
| def strategy_root_for_percent( |
| strategy: int, |
| percent: float, |
| model_config: RuntimeModelConfig | None = None, |
| ) -> Path: |
| return ensure_dir(percent_root(percent) / strategy_dir_name(strategy, model_config)) |
|
|
| def final_root_for_strategy( |
| strategy: int, |
| percent: float, |
| model_config: RuntimeModelConfig | None = None, |
| ) -> Path: |
| return ensure_dir(strategy_root_for_percent(strategy, percent, model_config) / "final") |
|
|
| def ensure_specific_checkpoint_scope(selector_name: str, selector_mode: str) -> None: |
| if selector_mode != "specific": |
| return |
| |
| if "strategy2" in selector_name.lower() and isinstance(STRATEGY2_SPECIFIC_CHECKPOINT, dict): |
| return |
| if len(STRATEGIES) != 1 or len(DATASET_PERCENTS) != 1: |
| raise ValueError( |
| f"{selector_name}=specific is only supported when exactly one strategy and one dataset percent are selected. " |
| f"Got STRATEGIES={STRATEGIES} and DATASET_PERCENTS={DATASET_PERCENTS}." |
| ) |
|
|
| def resolve_checkpoint_path( |
| *, |
| run_dir: Path, |
| selector_mode: str, |
| specific_checkpoint: str | Path | dict, |
| purpose: str, |
| ) -> Path: |
| run_dir = Path(run_dir) |
| if selector_mode == "latest": |
| checkpoint_path = run_dir / "checkpoints" / "latest.pt" |
| elif selector_mode == "best": |
| checkpoint_path = run_dir / "checkpoints" / "best.pt" |
| elif selector_mode == "specific": |
| ensure_specific_checkpoint_scope(purpose, selector_mode) |
| if not specific_checkpoint: |
| raise ValueError(f"{purpose}=specific requires a non-empty specific checkpoint path.") |
| checkpoint_path = Path(specific_checkpoint).expanduser().resolve() |
| else: |
| raise ValueError(f"Unsupported checkpoint selector mode '{selector_mode}' for {purpose}.") |
|
|
| if not checkpoint_path.exists(): |
| raise FileNotFoundError(f"Checkpoint for {purpose} not found: {checkpoint_path}") |
| return checkpoint_path |
|
|
| def resolve_train_resume_checkpoint_path(run_dir: Path) -> Path | None: |
| if TRAIN_RESUME_MODE == "off": |
| return None |
| return resolve_checkpoint_path( |
| run_dir=run_dir, |
| selector_mode=TRAIN_RESUME_MODE, |
| specific_checkpoint=TRAIN_RESUME_SPECIFIC_CHECKPOINT, |
| purpose="train_resume_checkpoint", |
| ) |
|
|
| def resolve_strategy2_checkpoint_path( |
| strategy: int, |
| percent: float, |
| model_config: RuntimeModelConfig | None = None, |
| ) -> Path: |
| if strategy not in (3, 4, 5): |
| raise ValueError(f"Strategy 2 dependency checkpoint requested for unsupported strategy {strategy}.") |
|
|
| |
| specific_checkpoint = STRATEGY2_SPECIFIC_CHECKPOINT |
| if isinstance(specific_checkpoint, dict): |
| specific_checkpoint = specific_checkpoint.get(percent, "") |
| |
|
|
| checkpoint_path = resolve_checkpoint_path( |
| run_dir=final_root_for_strategy(2, percent, model_config), |
| selector_mode=STRATEGY2_CHECKPOINT_MODE, |
| specific_checkpoint=specific_checkpoint, |
| purpose="strategy2_checkpoint", |
| ) |
| |
| |
| print(f"[Strategy 2 Checkpoint] Strategy {strategy} | {percent_label(percent)}% | Loading: {checkpoint_path}") |
| |
| return checkpoint_path |
|
|
| def load_required_hparams(payload: dict[str, Any], *, source: str, strategy: int, percent: float) -> dict[str, Any]: |
| missing_keys = [name for name in REQUIRED_HPARAM_KEYS if name not in payload] |
| if missing_keys: |
| raise KeyError( |
| f"Incomplete hyperparameters for strategy={strategy}, percent={percent_label(percent)}% from {source}. " |
| f"Missing keys: {missing_keys}. Required keys: {REQUIRED_HPARAM_KEYS}." |
| ) |
| return dict(payload) |
|
|
| def load_saved_best_params_if_optuna_off( |
| strategy: int, |
| percent: float, |
| *, |
| model_config: RuntimeModelConfig, |
| ) -> dict[str, Any]: |
| _, study_root, _ = study_paths_for(strategy, percent, model_config) |
| best_params_path = study_root / "best_params.json" |
| if not best_params_path.exists(): |
| raise FileNotFoundError( |
| f"RUN_OPTUNA=False and USE_EXISTING_BEST_PARAMS_WHEN_OPTUNA_OFF=True, but no saved best params were found " |
| f"for strategy={strategy}, percent={percent_label(percent)}% at {best_params_path}." |
| ) |
| params = load_json(best_params_path) |
| params = load_required_hparams( |
| params, |
| source=str(best_params_path), |
| strategy=strategy, |
| percent=percent, |
| ) |
| print( |
| f"[Optuna Off] Using saved best parameters for strategy={strategy}, " |
| f"percent={percent_label(percent)}% from {best_params_path}." |
| ) |
| for name in REQUIRED_HPARAM_KEYS: |
| print(f" {name:20s}: {_format_hparam_value(name, params[name])}") |
| return params |
|
|
| def load_manual_hparams_if_optuna_off(strategy: int, percent: float) -> dict[str, Any]: |
| key = manual_hparams_key(strategy, percent) |
| if key not in MANUAL_HPARAMS_IF_OPTUNA_OFF: |
| raise KeyError( |
| f"RUN_OPTUNA=False and USE_EXISTING_BEST_PARAMS_WHEN_OPTUNA_OFF=False, but no manual hyperparameter " |
| f"JSON filename was found for strategy={strategy}, percent={percent_label(percent)}% under key '{key}'. " |
| f"Required keys: {REQUIRED_HPARAM_KEYS}." |
| ) |
| manual_filename = MANUAL_HPARAMS_IF_OPTUNA_OFF[key] |
| manual_path = (HARD_CODED_PARAM_DIR / manual_filename).resolve() |
| if not manual_path.exists(): |
| raise FileNotFoundError( |
| f"RUN_OPTUNA=False and USE_EXISTING_BEST_PARAMS_WHEN_OPTUNA_OFF=False, but the manual hyperparameter " |
| f"JSON file for strategy={strategy}, percent={percent_label(percent)}% was not found at {manual_path}. " |
| f"Configured key='{key}', filename='{manual_filename}'." |
| ) |
| params = load_json(manual_path) |
| params = load_required_hparams( |
| params, |
| source=str(manual_path), |
| strategy=strategy, |
| percent=percent, |
| ) |
| print( |
| f"[Optuna Off] Using manual hyperparameters for strategy={strategy}, " |
| f"percent={percent_label(percent)}% from {manual_path}." |
| ) |
| for name in REQUIRED_HPARAM_KEYS: |
| print(f" {name:20s}: {_format_hparam_value(name, params[name])}") |
| return params |
|
|
| def resolve_job_params( |
| strategy: int, |
| percent: float, |
| bundle: DataBundle, |
| *, |
| model_config: RuntimeModelConfig, |
| strategy2_checkpoint_path: str | Path | None = None, |
| ) -> dict[str, Any]: |
| if RUN_OPTUNA: |
| banner(f"OPTUNA STUDY | Strategy {strategy} | {percent_label(percent)}%") |
| return run_study( |
| strategy, |
| bundle, |
| model_config=model_config, |
| strategy2_checkpoint_path=strategy2_checkpoint_path, |
| ) |
|
|
| if USE_EXISTING_BEST_PARAMS_WHEN_OPTUNA_OFF: |
| return load_saved_best_params_if_optuna_off(strategy, percent, model_config=model_config) |
|
|
| return load_manual_hparams_if_optuna_off(strategy, percent) |
|
|
| def read_run_config_for_eval(run_dir: Path, checkpoint_path: Path) -> dict[str, Any]: |
| run_config_path = Path(run_dir) / "run_config.json" |
| if run_config_path.exists(): |
| return load_json(run_config_path) |
| ckpt = torch.load(checkpoint_path, map_location="cpu", weights_only=False) |
| return checkpoint_run_config_payload(ckpt) |
|
|
| def run_evaluation_for_run( |
| *, |
| strategy: int, |
| percent: float, |
| bundle: DataBundle, |
| run_dir: Path, |
| strategy2_checkpoint_path: str | Path | None = None, |
| ) -> tuple[dict[str, dict[str, float]], list[dict[str, Any]]]: |
| checkpoint_path = resolve_checkpoint_path( |
| run_dir=run_dir, |
| selector_mode=EVAL_CHECKPOINT_MODE, |
| specific_checkpoint=EVAL_SPECIFIC_CHECKPOINT, |
| purpose="evaluation_checkpoint", |
| ) |
| effective_run_dir = Path(run_dir) |
| if not (effective_run_dir / "run_config.json").exists() and checkpoint_path.parent.name == "checkpoints": |
| effective_run_dir = checkpoint_path.parent.parent |
| print(f"[Evaluation] Strategy {strategy} | {percent_label(percent)}% | checkpoint={checkpoint_path}") |
| runtime_config = read_run_config_for_eval(effective_run_dir, checkpoint_path) |
| set_current_job_params(runtime_config) |
| model_config = RuntimeModelConfig.from_payload(runtime_config).validate() |
| if strategy in (4, 5): |
| if runtime_config.get("strategy2_checkpoint_path"): |
| strategy2_checkpoint_path = runtime_config["strategy2_checkpoint_path"] |
| else: |
| strategy2_checkpoint_path = resolve_strategy2_checkpoint_path(strategy, percent, model_config) |
| elif strategy == 3 and runtime_config.get("strategy2_checkpoint_path"): |
| strategy2_checkpoint_path = runtime_config["strategy2_checkpoint_path"] |
| dropout_p = float(runtime_config.get("dropout_p", DEFAULT_DROPOUT_P)) |
| tmax = int(runtime_config.get("tmax", DEFAULT_TMAX)) |
| eval_model, _description, _compiled = build_model( |
| strategy, |
| dropout_p, |
| model_config=model_config, |
| strategy2_checkpoint_path=strategy2_checkpoint_path, |
| ) |
| load_checkpoint( |
| checkpoint_path, |
| model=eval_model, |
| device=DEVICE, |
| ) |
| aggregate, per_sample = evaluate_model( |
| model=eval_model, |
| model_config=model_config, |
| bundle=bundle, |
| run_dir=effective_run_dir, |
| strategy=strategy, |
| tmax=tmax, |
| ) |
| evaluation_json_path = effective_run_dir / "evaluation.json" |
| evaluation_payload = load_json(evaluation_json_path) |
| evaluation_payload["checkpoint_mode"] = EVAL_CHECKPOINT_MODE |
| evaluation_payload["checkpoint_path"] = str(checkpoint_path) |
| save_json(evaluation_json_path, evaluation_payload) |
| del eval_model |
| run_cuda_cleanup(context=f"evaluation strategy={strategy} percent={percent_label(percent)}%") |
| return aggregate, per_sample |
|
|
| def run_smoke_test( |
| *, |
| strategy: int, |
| model_config: RuntimeModelConfig, |
| bundle: DataBundle, |
| smoke_root: Path, |
| strategy2_checkpoint_path: str | Path | None = None, |
| ) -> None: |
| banner(f"PRE-TRAINING SMOKE TEST | Strategy {strategy} | {percent_label(bundle.percent)}%") |
| smoke_root = ensure_dir(smoke_root) |
| sample = bundle.test_ds[SMOKE_TEST_SAMPLE_INDEX] |
| image = sample["image"].unsqueeze(0).to(DEVICE) |
| raw_image = sample["image"].numpy() |
| raw_gt = sample["mask"].squeeze(0).numpy() |
| amp_dtype = resolve_amp_dtype(AMP_DTYPE) |
| use_amp = amp_autocast_enabled(DEVICE) |
| if use_amp and DEVICE.type == "cuda": |
| image = image.to(dtype=amp_dtype) |
|
|
| model, description, compiled = build_model( |
| strategy, |
| DEFAULT_DROPOUT_P, |
| model_config=model_config, |
| strategy2_checkpoint_path=strategy2_checkpoint_path, |
| ) |
| print_model_parameter_summary( |
| model=model, |
| description=f"{description} | Smoke Test", |
| strategy=strategy, |
| model_config=model_config, |
| dropout_p=DEFAULT_DROPOUT_P, |
| amp_dtype=amp_dtype, |
| compiled=compiled, |
| ) |
| pred = infer_segmentation_mask( |
| model, |
| image, |
| DEFAULT_TMAX, |
| strategy=strategy, |
| use_amp=use_amp, |
| amp_dtype=amp_dtype, |
| use_channels_last=_use_channels_last_for_run(model_config), |
| ).float() |
| pred_np = pred[0, 0].detach().cpu().numpy() |
| panel_path = smoke_root / "smoke_panel.png" |
| raw_mask_path = smoke_root / "smoke_prediction.png" |
| _save_rgb_panel(raw_image, pred_np, raw_gt, panel_path, f"Smoke Test | Strategy {strategy}") |
| PILImage.fromarray((pred_np * 255).astype(np.uint8)).save(raw_mask_path) |
| del model |
| run_cuda_cleanup(context=f"smoke strategy={strategy} percent={percent_label(bundle.percent)}%") |
| print( |
| f"[Smoke Test] Strategy {strategy} passed. " |
| f"Saved panel to {panel_path.name} and mask to {raw_mask_path.name}." |
| ) |
|
|
| """============================================================================= |
| OVERFIT TEST |
| ============================================================================= |
| """ |
|
|
| OVERFIT_HISTORY_KEYS = ( |
| "dice", |
| "iou", |
| "loss", |
| "reward", |
| "actor_loss", |
| "critic_loss", |
| "ce_loss", |
| "dice_loss", |
| "entropy", |
| "grad_norm", |
| "action_dist", |
| "reward_pos_pct", |
| "pred_fg_pct", |
| "gt_fg_pct", |
| ) |
|
|
| def empty_overfit_history() -> dict[str, list[Any]]: |
| return {key: [] for key in OVERFIT_HISTORY_KEYS} |
|
|
| def load_overfit_history_for_resume(history_path: Path, checkpoint_payload: dict[str, Any]) -> dict[str, list[Any]]: |
| history = empty_overfit_history() |
| checkpoint_epoch = int(checkpoint_payload.get("epoch", 0)) |
| if history_path.exists(): |
| payload = load_json(history_path) |
| if not isinstance(payload, dict): |
| raise RuntimeError(f"Expected dict history at {history_path}, found {type(payload).__name__}.") |
| for key in OVERFIT_HISTORY_KEYS: |
| values = payload.get(key, []) |
| if isinstance(values, list): |
| history[key] = list(values[:checkpoint_epoch]) |
| return history |
|
|
| epoch_metrics = checkpoint_payload.get("epoch_metrics", {}) |
| if isinstance(epoch_metrics, dict): |
| for key in OVERFIT_HISTORY_KEYS: |
| if key in epoch_metrics: |
| history[key].append(epoch_metrics[key]) |
| return history |
|
|
| def _grad_diagnostics(model: nn.Module) -> dict[str, Any]: |
| raw = _unwrap_compiled(model) |
| groups: dict[str, list[float]] = {} |
| total_sq = 0.0 |
| n_nan = 0 |
| n_inf = 0 |
| n_zero = 0 |
| n_total_params = 0 |
|
|
| for name, param in raw.named_parameters(): |
| if not param.requires_grad: |
| continue |
| n_total_params += 1 |
| if param.grad is None: |
| n_zero += 1 |
| continue |
| grad_norm = float(param.grad.data.norm(2).item()) |
| if math.isnan(grad_norm): |
| n_nan += 1 |
| continue |
| if math.isinf(grad_norm): |
| n_inf += 1 |
| continue |
| total_sq += grad_norm ** 2 |
| group_name = name.split(".", 1)[0] |
| groups.setdefault(group_name, []).append(grad_norm) |
|
|
| group_stats: dict[str, dict[str, float | int]] = {} |
| for group_name, norms in groups.items(): |
| group_stats[group_name] = { |
| "min": min(norms), |
| "max": max(norms), |
| "mean": sum(norms) / len(norms), |
| "count": len(norms), |
| } |
| return { |
| "global_norm": total_sq ** 0.5, |
| "groups": group_stats, |
| "n_nan": n_nan, |
| "n_inf": n_inf, |
| "n_zero_grad": n_zero, |
| "n_total": n_total_params, |
| } |
|
|
| def _param_diagnostics(model: nn.Module, prev_params: dict[str, torch.Tensor] | None = None) -> dict[str, dict[str, float]]: |
| raw = _unwrap_compiled(model) |
| info: dict[str, dict[str, list[float]]] = {} |
| for name, param in raw.named_parameters(): |
| if not param.requires_grad: |
| continue |
| param_norm = float(param.data.norm(2).item()) |
| group_name = name.split(".", 1)[0] |
| entry = info.setdefault(group_name, {"norms": [], "update_ratios": []}) |
| entry["norms"].append(param_norm) |
| if prev_params is not None and name in prev_params: |
| delta = float((param.data - prev_params[name]).norm(2).item()) |
| entry["update_ratios"].append(delta / max(param_norm, 1e-12)) |
|
|
| summary: dict[str, dict[str, float]] = {} |
| for group_name, values in info.items(): |
| norms = values["norms"] |
| ratios = values["update_ratios"] |
| summary[group_name] = { |
| "p_min": min(norms), |
| "p_max": max(norms), |
| "p_mean": sum(norms) / len(norms), |
| } |
| if ratios: |
| summary[group_name]["ur_min"] = min(ratios) |
| summary[group_name]["ur_max"] = max(ratios) |
| summary[group_name]["ur_mean"] = sum(ratios) / len(ratios) |
| return summary |
|
|
| def _snapshot_params(model: nn.Module) -> dict[str, torch.Tensor]: |
| raw = _unwrap_compiled(model) |
| return { |
| name: param.data.detach().clone() |
| for name, param in raw.named_parameters() |
| if param.requires_grad |
| } |
|
|
| def _action_distribution( |
| model: nn.Module, |
| image: torch.Tensor, |
| seg: torch.Tensor, |
| tmax: int, |
| use_amp: bool, |
| amp_dtype: torch.dtype, |
| *, |
| strategy: int | None = None, |
| ) -> tuple[list[dict[int, float]], torch.Tensor]: |
| distributions: list[dict[int, float]] = [] |
| refinement_context: dict[str, torch.Tensor] | None = None |
| if strategy in (3, 4) and _uses_refinement_runtime(model, strategy=strategy): |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| refinement_context = model.prepare_refinement_context(image) |
| seg = seg.float() |
| for _step in range(tmax): |
| if refinement_context is not None: |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| state, _ = model.forward_refinement_state( |
| refinement_context["base_features"], |
| seg, |
| refinement_context["decoder_prob"], |
| ) |
| policy_logits, _ = model.forward_from_state(state) |
| actions, _, _ = sample_actions(policy_logits, stochastic=False) |
| seg = apply_actions(seg, actions, soft_update_step=DEFAULT_MASK_UPDATE_STEP).to(dtype=seg.dtype) |
| else: |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| masked = image * seg |
| policy_logits = model.forward_policy_only(masked) |
| actions, _, _ = sample_actions(policy_logits, stochastic=False) |
| seg = apply_actions(seg, actions, num_actions=policy_logits.shape[1]).to(dtype=seg.dtype) |
| total_pixels = max(actions.numel(), 1) |
| step_dist: dict[int, float] = {} |
| action_count = int(policy_logits.shape[1]) |
| for action_idx in range(action_count): |
| step_dist[action_idx] = float((actions == action_idx).sum().item()) / total_pixels * 100.0 |
| distributions.append(step_dist) |
| if refinement_context is not None: |
| return distributions, threshold_binary_mask(seg.float()).float() |
| return distributions, seg |
|
|
| def _numerical_health_check(outputs_dict: dict[str, Any], prefix: str = "") -> list[str]: |
| alerts: list[str] = [] |
| for name, value in outputs_dict.items(): |
| if value is None: |
| continue |
| if isinstance(value, (int, float)): |
| if math.isnan(value): |
| alerts.append(f"{prefix}{name} = NaN") |
| elif math.isinf(value): |
| alerts.append(f"{prefix}{name} = Inf") |
| continue |
| if torch.is_tensor(value): |
| if torch.isnan(value).any(): |
| alerts.append(f"{prefix}{name} contains NaN") |
| if torch.isinf(value).any(): |
| alerts.append(f"{prefix}{name} contains Inf") |
| return alerts |
|
|
| def _batch_binary_metrics(pred: torch.Tensor, gt: torch.Tensor) -> tuple[list[float], list[float]]: |
| pred_np = pred.detach().cpu().numpy().astype(np.uint8) |
| gt_np = gt.detach().cpu().numpy().astype(np.uint8) |
| dices: list[float] = [] |
| ious: list[float] = [] |
| for idx in range(pred_np.shape[0]): |
| metrics = compute_all_metrics(pred_np[idx], gt_np[idx]) |
| dices.append(float(metrics["dice"])) |
| ious.append(float(metrics["iou"])) |
| return dices, ious |
|
|
| def run_overfit_test( |
| *, |
| strategy: int, |
| model_config: RuntimeModelConfig, |
| bundle: DataBundle, |
| overfit_root: Path, |
| strategy2_checkpoint_path: str | Path | None = None, |
| ) -> dict[str, Any]: |
| amp_dtype = resolve_amp_dtype(AMP_DTYPE) |
| use_amp = amp_autocast_enabled(DEVICE) |
| use_channels_last = _use_channels_last_for_run(model_config) |
| overfit_root = ensure_dir(overfit_root) |
| ckpt_dir = ensure_dir(overfit_root / "checkpoints") |
| history_path = checkpoint_history_path(overfit_root, "overfit") |
| run_config = { |
| "project_dir": str(PROJECT_DIR), |
| "data_root": str(DATA_ROOT), |
| "run_type": "overfit", |
| "strategy": strategy, |
| "dataset_percent": bundle.percent, |
| "dataset_name": bundle.split_payload["dataset_name"], |
| "dataset_split_policy": bundle.split_payload["dataset_split_policy"], |
| "dataset_splits_path": bundle.split_payload["dataset_splits_path"], |
| "split_type": bundle.split_payload["split_type"], |
| "train_subset_key": bundle.split_payload["train_subset_key"], |
| "max_epochs": OVERFIT_N_EPOCHS, |
| "head_lr": OVERFIT_HEAD_LR, |
| "encoder_lr": OVERFIT_ENCODER_LR, |
| "weight_decay": DEFAULT_WEIGHT_DECAY, |
| "dropout_p": DEFAULT_DROPOUT_P, |
| "tmax": DEFAULT_TMAX, |
| "entropy_lr": DEFAULT_ENTROPY_LR, |
| "gamma": DEFAULT_GAMMA, |
| "grad_clip_norm": DEFAULT_GRAD_CLIP_NORM, |
| "train_resume_mode": TRAIN_RESUME_MODE, |
| "train_resume_specific_checkpoint": TRAIN_RESUME_SPECIFIC_CHECKPOINT, |
| } |
| run_config.update(model_config.to_payload()) |
| if strategy2_checkpoint_path is not None: |
| run_config["strategy2_checkpoint_path"] = str(Path(strategy2_checkpoint_path)) |
| save_json(overfit_root / "run_config.json", run_config) |
| set_current_job_params(run_config) |
|
|
| banner(f"OVERFIT TEST | Strategy {strategy} | {percent_label(bundle.percent)}%") |
| print( |
| f"[Overfit] Fixed batches={OVERFIT_N_BATCHES}, epochs={OVERFIT_N_EPOCHS}, " |
| f"head_lr={OVERFIT_HEAD_LR:.2e}, encoder_lr={OVERFIT_ENCODER_LR:.2e}" |
| ) |
|
|
| fixed_batches: list[dict[str, Any]] = [] |
| for batch_index, batch in enumerate(bundle.train_loader): |
| fixed_batches.append(to_device(batch, DEVICE)) |
| if batch_index + 1 >= OVERFIT_N_BATCHES: |
| break |
| if not fixed_batches: |
| raise RuntimeError("Overfit test could not collect any training batches.") |
| if len(fixed_batches) < OVERFIT_N_BATCHES: |
| print(f"[Overfit] Warning: only {len(fixed_batches)} train batch(es) available.") |
|
|
| model, description, compiled = build_model( |
| strategy, |
| DEFAULT_DROPOUT_P, |
| model_config=model_config, |
| strategy2_checkpoint_path=strategy2_checkpoint_path, |
| ) |
| resume_checkpoint_path = resolve_train_resume_checkpoint_path(overfit_root) |
| if resume_checkpoint_path is not None and strategy in (3, 4): |
| _configure_model_from_checkpoint_path(model, resume_checkpoint_path) |
| print_model_parameter_summary( |
| model=model, |
| description=f"{description} | Overfit Test", |
| strategy=strategy, |
| model_config=model_config, |
| dropout_p=DEFAULT_DROPOUT_P, |
| amp_dtype=amp_dtype, |
| compiled=compiled, |
| ) |
|
|
| optimizer = make_optimizer( |
| model, |
| strategy, |
| head_lr=OVERFIT_HEAD_LR, |
| encoder_lr=OVERFIT_ENCODER_LR, |
| weight_decay=DEFAULT_WEIGHT_DECAY, |
| ) |
| scaler = make_grad_scaler(enabled=use_amp, amp_dtype=amp_dtype, device=DEVICE) |
| log_alpha: torch.Tensor | None = None |
| alpha_optimizer: Adam | None = None |
| target_entropy = DEFAULT_ENTROPY_TARGET_RATIO * math.log(max(_model_policy_action_count(model) or NUM_ACTIONS, 2)) |
| if strategy != 2: |
| log_alpha = torch.tensor( |
| math.log(max(DEFAULT_ENTROPY_ALPHA_INIT, 1e-8)), |
| dtype=torch.float32, |
| device=DEVICE, |
| requires_grad=True, |
| ) |
| alpha_optimizer = Adam([log_alpha], lr=DEFAULT_ENTROPY_LR) |
|
|
| history = empty_overfit_history() |
| prev_loss: float | None = None |
| best_dice = -1.0 |
| elapsed_before_resume = 0.0 |
| start_epoch = 1 |
| resume_source: dict[str, Any] | None = None |
| if resume_checkpoint_path is not None: |
| checkpoint_payload = load_checkpoint( |
| resume_checkpoint_path, |
| model=model, |
| optimizer=optimizer, |
| scheduler=None, |
| scaler=scaler, |
| device=DEVICE, |
| log_alpha=log_alpha, |
| alpha_optimizer=alpha_optimizer, |
| expected_run_type="overfit", |
| require_run_metadata=True, |
| ) |
| validate_resume_checkpoint_identity( |
| run_config, |
| checkpoint_run_config_payload(checkpoint_payload), |
| checkpoint_path=resume_checkpoint_path, |
| ) |
| start_epoch = int(checkpoint_payload["epoch"]) + 1 |
| best_dice = float(checkpoint_payload["best_metric_value"]) |
| elapsed_before_resume = float(checkpoint_payload.get("elapsed_seconds", 0.0)) |
| history = load_overfit_history_for_resume(history_path, checkpoint_payload) |
| resume_source = { |
| "checkpoint_path": str(Path(resume_checkpoint_path).resolve()), |
| "checkpoint_epoch": int(checkpoint_payload["epoch"]), |
| "checkpoint_run_type": checkpoint_payload.get("run_type", "overfit"), |
| } |
| print( |
| f"[Resume] overfit run continuing from {resume_checkpoint_path} " |
| f"at epoch {start_epoch}/{OVERFIT_N_EPOCHS}." |
| ) |
| if history["loss"]: |
| prev_loss = float(history["loss"][-1]) |
| prev_params = _snapshot_params(model) |
| start_time = time.time() |
|
|
| for epoch in range(start_epoch, OVERFIT_N_EPOCHS + 1): |
| full_dump = epoch <= 5 or epoch % max(OVERFIT_PRINT_EVERY, 1) == 0 or epoch == OVERFIT_N_EPOCHS |
| epoch_losses: list[float] = [] |
| epoch_dices: list[float] = [] |
| epoch_ious: list[float] = [] |
| epoch_rewards: list[float] = [] |
| epoch_actor: list[float] = [] |
| epoch_critic: list[float] = [] |
| epoch_ce: list[float] = [] |
| epoch_dice_losses: list[float] = [] |
| epoch_entropy: list[float] = [] |
| epoch_grad_norms: list[float] = [] |
| epoch_action_dist: list[list[dict[int, float]]] = [] |
| epoch_reward_pos_pct: list[float] = [] |
| epoch_pred_fg_pct: list[float] = [] |
| epoch_gt_fg_pct: list[float] = [] |
| epoch_alerts: list[str] = [] |
|
|
| for batch in fixed_batches: |
| if strategy == 2: |
| metrics = train_step_supervised( |
| model, |
| batch, |
| optimizer, |
| scaler=scaler, |
| use_amp=use_amp, |
| amp_dtype=amp_dtype, |
| use_channels_last=use_channels_last, |
| grad_clip_norm=DEFAULT_GRAD_CLIP_NORM, |
| ce_weight=DEFAULT_CE_WEIGHT, |
| dice_weight=DEFAULT_DICE_WEIGHT, |
| ) |
| elif strategy in (3, 4): |
| assert log_alpha is not None and alpha_optimizer is not None |
| metrics = train_step_strategy3( |
| model, |
| batch, |
| optimizer, |
| gamma=DEFAULT_GAMMA, |
| tmax=DEFAULT_TMAX, |
| critic_loss_weight=DEFAULT_CRITIC_LOSS_WEIGHT, |
| log_alpha=log_alpha, |
| alpha_optimizer=alpha_optimizer, |
| target_entropy=target_entropy, |
| ce_weight=DEFAULT_CE_WEIGHT, |
| dice_weight=DEFAULT_DICE_WEIGHT, |
| grad_clip_norm=DEFAULT_GRAD_CLIP_NORM, |
| scaler=scaler, |
| use_amp=use_amp, |
| amp_dtype=amp_dtype, |
| stepwise_backward=STEPWISE_BACKWARD, |
| use_channels_last=use_channels_last, |
| ) |
| else: |
| assert log_alpha is not None and alpha_optimizer is not None |
| metrics = train_step( |
| model, |
| batch, |
| optimizer, |
| gamma=DEFAULT_GAMMA, |
| tmax=DEFAULT_TMAX, |
| critic_loss_weight=DEFAULT_CRITIC_LOSS_WEIGHT, |
| log_alpha=log_alpha, |
| alpha_optimizer=alpha_optimizer, |
| target_entropy=target_entropy, |
| ce_weight=DEFAULT_CE_WEIGHT, |
| dice_weight=DEFAULT_DICE_WEIGHT, |
| grad_clip_norm=DEFAULT_GRAD_CLIP_NORM, |
| scaler=scaler, |
| use_amp=use_amp, |
| amp_dtype=amp_dtype, |
| stepwise_backward=STEPWISE_BACKWARD, |
| use_channels_last=use_channels_last, |
| ) |
|
|
| epoch_losses.append(float(metrics["loss"])) |
| epoch_rewards.append(float(metrics["mean_reward"])) |
| epoch_actor.append(float(metrics["actor_loss"])) |
| epoch_critic.append(float(metrics["critic_loss"])) |
| epoch_ce.append(float(metrics["ce_loss"])) |
| epoch_dice_losses.append(float(metrics["dice_loss"])) |
| epoch_entropy.append(float(metrics["entropy"])) |
| epoch_grad_norms.append(float(metrics["grad_norm"])) |
| epoch_alerts.extend( |
| _numerical_health_check( |
| { |
| "loss": metrics["loss"], |
| "actor_loss": metrics["actor_loss"], |
| "critic_loss": metrics["critic_loss"], |
| "reward": metrics["mean_reward"], |
| "entropy": metrics["entropy"], |
| "grad_norm": metrics["grad_norm"], |
| "ce_loss": metrics["ce_loss"], |
| "dice_loss": metrics["dice_loss"], |
| }, |
| prefix="train:", |
| ) |
| ) |
|
|
| model.eval() |
| with torch.inference_mode(): |
| image = batch["image"] |
| gt_mask = batch["mask"].float() |
| if use_channels_last and image.ndim == 4 and image.device.type == "cuda": |
| image = image.contiguous(memory_format=torch.channels_last) |
| if use_amp and DEVICE.type == "cuda": |
| image = image.to(dtype=amp_dtype) |
|
|
| if strategy == 2: |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| logits = model(image) |
| pred = threshold_binary_mask(torch.sigmoid(logits)).float() |
| else: |
| refinement_runtime = _uses_refinement_runtime(model, strategy=strategy) |
| if strategy in (3, 4) and refinement_runtime: |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| init_mask = model.prepare_refinement_context(image)["decoder_prob"].float() |
| else: |
| init_mask = torch.ones( |
| image.shape[0], |
| 1, |
| image.shape[2], |
| image.shape[3], |
| device=image.device, |
| dtype=image.dtype, |
| ) |
| if strategy in (3, 4): |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| init_mask = threshold_binary_mask(torch.sigmoid(model.forward_decoder(image))).float() |
| action_dist, pred = _action_distribution( |
| model, |
| image, |
| init_mask, |
| DEFAULT_TMAX, |
| use_amp, |
| amp_dtype, |
| strategy=strategy, |
| ) |
| epoch_action_dist.append(action_dist) |
|
|
| if strategy in (3, 4) and refinement_runtime: |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| refinement_context = model.prepare_refinement_context(image) |
| soft_init_mask = refinement_context["decoder_prob"].float() |
| state_t, _ = model.forward_refinement_state( |
| refinement_context["base_features"], |
| soft_init_mask, |
| refinement_context["decoder_prob"], |
| ) |
| policy_logits, _ = model.forward_from_state(state_t) |
| first_actions, _, _ = sample_actions(policy_logits, stochastic=False) |
| first_seg = apply_actions(soft_init_mask, first_actions, soft_update_step=DEFAULT_MASK_UPDATE_STEP, num_actions=policy_logits.shape[1]) |
| reward_map = compute_refinement_reward( |
| soft_init_mask, |
| first_seg, |
| gt_mask.float(), |
| decoder_prior=refinement_context["decoder_prob"].detach(), |
| ) |
| else: |
| with autocast_ctx(enabled=use_amp, device=image.device, amp_dtype=amp_dtype): |
| masked = image * init_mask |
| policy_logits = model.forward_policy_only(masked) |
| first_actions, _, _ = sample_actions(policy_logits, stochastic=False) |
| first_seg = apply_actions(init_mask, first_actions, num_actions=policy_logits.shape[1]) |
| reward_map = (init_mask - gt_mask).pow(2) - (first_seg - gt_mask).pow(2) |
| epoch_reward_pos_pct.append(float((reward_map > 0).float().mean().item() * 100.0)) |
|
|
| pred_fg_pct = float(pred.sum().item()) / max(pred.numel(), 1) * 100.0 |
| gt_fg_pct = float(gt_mask.sum().item()) / max(gt_mask.numel(), 1) * 100.0 |
| epoch_pred_fg_pct.append(pred_fg_pct) |
| epoch_gt_fg_pct.append(gt_fg_pct) |
| dice_values, iou_values = _batch_binary_metrics(pred.float(), gt_mask.float()) |
| epoch_dices.extend(dice_values) |
| epoch_ious.extend(iou_values) |
| epoch_alerts.extend( |
| _numerical_health_check( |
| {"pred": pred, "gt_mask": gt_mask}, |
| prefix="eval:", |
| ) |
| ) |
| model.train() |
|
|
| grad_stats = _grad_diagnostics(model) |
| param_stats = _param_diagnostics(model, prev_params) |
| prev_params = _snapshot_params(model) |
|
|
| avg_loss = float(np.mean(epoch_losses)) if epoch_losses else 0.0 |
| avg_dice = float(np.mean(epoch_dices)) if epoch_dices else 0.0 |
| avg_iou = float(np.mean(epoch_ious)) if epoch_ious else 0.0 |
| avg_reward = float(np.mean(epoch_rewards)) if epoch_rewards else 0.0 |
| avg_actor = float(np.mean(epoch_actor)) if epoch_actor else 0.0 |
| avg_critic = float(np.mean(epoch_critic)) if epoch_critic else 0.0 |
| avg_ce = float(np.mean(epoch_ce)) if epoch_ce else 0.0 |
| avg_dice_loss = float(np.mean(epoch_dice_losses)) if epoch_dice_losses else 0.0 |
| avg_entropy = float(np.mean(epoch_entropy)) if epoch_entropy else 0.0 |
| avg_grad_norm = float(np.mean(epoch_grad_norms)) if epoch_grad_norms else 0.0 |
| avg_reward_pos = float(np.mean(epoch_reward_pos_pct)) if epoch_reward_pos_pct else 0.0 |
| avg_pred_fg = float(np.mean(epoch_pred_fg_pct)) if epoch_pred_fg_pct else 0.0 |
| avg_gt_fg = float(np.mean(epoch_gt_fg_pct)) if epoch_gt_fg_pct else 0.0 |
|
|
| avg_action_dist: list[dict[int, float]] = [] |
| if epoch_action_dist: |
| for step_idx in range(DEFAULT_TMAX): |
| step_dist: dict[int, float] = {} |
| action_indices = sorted({action_idx for run_dist in epoch_action_dist if step_idx < len(run_dist) for action_idx in run_dist[step_idx]}) |
| for action_idx in action_indices: |
| values = [ |
| run_dist[step_idx][action_idx] |
| for run_dist in epoch_action_dist |
| if step_idx < len(run_dist) |
| ] |
| step_dist[action_idx] = float(np.mean(values)) if values else 0.0 |
| avg_action_dist.append(step_dist) |
|
|
| history["dice"].append(avg_dice) |
| history["iou"].append(avg_iou) |
| history["loss"].append(avg_loss) |
| history["reward"].append(avg_reward) |
| history["actor_loss"].append(avg_actor) |
| history["critic_loss"].append(avg_critic) |
| history["ce_loss"].append(avg_ce) |
| history["dice_loss"].append(avg_dice_loss) |
| history["entropy"].append(avg_entropy) |
| history["grad_norm"].append(avg_grad_norm) |
| history["action_dist"].append(avg_action_dist) |
| history["reward_pos_pct"].append(avg_reward_pos) |
| history["pred_fg_pct"].append(avg_pred_fg) |
| history["gt_fg_pct"].append(avg_gt_fg) |
| save_json(history_path, history) |
|
|
| loss_delta = avg_loss - prev_loss if prev_loss is not None else 0.0 |
| prev_loss = avg_loss |
| if epoch_alerts: |
| print(f"[Overfit][Epoch {epoch}] Numerical alerts: {' | '.join(epoch_alerts)}") |
|
|
| if full_dump: |
| current_alpha = float(log_alpha.exp().detach().item()) if log_alpha is not None else 0.0 |
| print( |
| f"[Overfit][Epoch {epoch:03d}] loss={avg_loss:.6f} (delta={loss_delta:+.6f}) " |
| f"dice={avg_dice:.4f} iou={avg_iou:.4f} reward={avg_reward:+.6f} " |
| f"entropy={avg_entropy:.6f} alpha={current_alpha:.4f}" |
| ) |
| print( |
| f"[Overfit][Epoch {epoch:03d}] ce={avg_ce:.6f} dice_l={avg_dice_loss:.6f} " |
| f"grad_norm={avg_grad_norm:.6f} global_grad={grad_stats['global_norm']:.6f}" |
| ) |
| if avg_action_dist: |
| first = avg_action_dist[0] |
| last = avg_action_dist[-1] |
| print( |
| f"[Overfit][Epoch {epoch:03d}] action step0={first} step_last={last} " |
| f"reward_pos={avg_reward_pos:.2f}%" |
| ) |
| print( |
| f"[Overfit][Epoch {epoch:03d}] pred_fg={avg_pred_fg:.2f}% gt_fg={avg_gt_fg:.2f}% " |
| f"param_groups={list(param_stats.keys())}" |
| ) |
| else: |
| print( |
| f"[Overfit][Epoch {epoch:03d}] loss={avg_loss:.6f} dice={avg_dice:.4f} " |
| f"iou={avg_iou:.4f} reward={avg_reward:+.6f}" |
| ) |
|
|
| row = { |
| "epoch": epoch, |
| "dice": avg_dice, |
| "iou": avg_iou, |
| "loss": avg_loss, |
| "reward": avg_reward, |
| "actor_loss": avg_actor, |
| "critic_loss": avg_critic, |
| "ce_loss": avg_ce, |
| "dice_loss": avg_dice_loss, |
| "entropy": avg_entropy, |
| "grad_norm": avg_grad_norm, |
| "action_dist": avg_action_dist, |
| "reward_pos_pct": avg_reward_pos, |
| "pred_fg_pct": avg_pred_fg, |
| "gt_fg_pct": avg_gt_fg, |
| } |
| if avg_dice > best_dice: |
| best_dice = avg_dice |
| save_checkpoint( |
| ckpt_dir / "best.pt", |
| run_type="overfit", |
| model=model, |
| optimizer=optimizer, |
| scheduler=None, |
| scaler=scaler, |
| epoch=epoch, |
| best_metric_value=best_dice, |
| best_metric_name="overfit_dice", |
| run_config=run_config, |
| epoch_metrics=row, |
| patience_counter=0, |
| elapsed_seconds=elapsed_before_resume + (time.time() - start_time), |
| log_alpha=log_alpha, |
| alpha_optimizer=alpha_optimizer, |
| resume_source=resume_source, |
| ) |
|
|
| if SAVE_LATEST_EVERY_EPOCH: |
| save_checkpoint( |
| ckpt_dir / "latest.pt", |
| run_type="overfit", |
| model=model, |
| optimizer=optimizer, |
| scheduler=None, |
| scaler=scaler, |
| epoch=epoch, |
| best_metric_value=best_dice, |
| best_metric_name="overfit_dice", |
| run_config=run_config, |
| epoch_metrics=row, |
| patience_counter=0, |
| elapsed_seconds=elapsed_before_resume + (time.time() - start_time), |
| log_alpha=log_alpha, |
| alpha_optimizer=alpha_optimizer, |
| resume_source=resume_source, |
| ) |
| if CHECKPOINT_EVERY_N_EPOCHS > 0 and epoch % CHECKPOINT_EVERY_N_EPOCHS == 0: |
| save_checkpoint( |
| ckpt_dir / f"epoch_{epoch:04d}.pt", |
| run_type="overfit", |
| model=model, |
| optimizer=optimizer, |
| scheduler=None, |
| scaler=scaler, |
| epoch=epoch, |
| best_metric_value=best_dice, |
| best_metric_name="overfit_dice", |
| run_config=run_config, |
| epoch_metrics=row, |
| patience_counter=0, |
| elapsed_seconds=elapsed_before_resume + (time.time() - start_time), |
| log_alpha=log_alpha, |
| alpha_optimizer=alpha_optimizer, |
| resume_source=resume_source, |
| ) |
|
|
| peak_dice = max(history["dice"]) if history["dice"] else 0.0 |
| final_dice = history["dice"][-1] if history["dice"] else 0.0 |
| summary = { |
| "run_type": "overfit", |
| "strategy": strategy, |
| "peak_dice": peak_dice, |
| "final_dice": final_dice, |
| "description": description, |
| "resumed": resume_source is not None, |
| "elapsed_seconds": elapsed_before_resume + (time.time() - start_time), |
| "final_epoch": max(len(history["dice"]), start_epoch - 1), |
| } |
| if resume_source is not None: |
| summary["resume_source"] = resume_source |
| save_json(overfit_root / "summary.json", summary) |
| print( |
| f"[Overfit] Strategy {strategy} | {percent_label(bundle.percent)}% | " |
| f"peak_dice={peak_dice:.4f}, final_dice={final_dice:.4f}" |
| ) |
|
|
| del model |
| run_cuda_cleanup(context=f"overfit strategy={strategy} percent={percent_label(bundle.percent)}%") |
| return {**summary, "history": history} |
|
|
| def run_configured_overfit_tests( |
| bundles: dict[float, DataBundle], |
| *, |
| model_config: RuntimeModelConfig, |
| ) -> None: |
| banner("OVERFIT TEST MODE") |
| for percent in DATASET_PERCENTS: |
| bundle = bundles[percent] |
| for strategy in STRATEGIES: |
| strategy2_checkpoint_path = None |
| if strategy in (4, 5) or (strategy == 3 and STRATEGY3_BOOTSTRAP_FROM_STRATEGY2): |
| strategy2_checkpoint_path = resolve_strategy2_checkpoint_path(strategy, percent, model_config) |
| run_overfit_test( |
| strategy=strategy, |
| model_config=model_config, |
| bundle=bundle, |
| overfit_root=strategy_root_for_percent(strategy, percent, model_config) / "overfit_test", |
| strategy2_checkpoint_path=strategy2_checkpoint_path, |
| ) |
|
|
| """============================================================================= |
| OPTUNA + ORCHESTRATION |
| ============================================================================= |
| """ |
|
|
| def strategy_epochs(strategy: int) -> int: |
| if strategy == 1: |
| return STRATEGY_1_MAX_EPOCHS |
| if strategy == 2: |
| return STRATEGY_2_MAX_EPOCHS |
| if strategy == 3: |
| return STRATEGY_3_MAX_EPOCHS |
| if strategy == 4: |
| return STRATEGY_4_MAX_EPOCHS |
| if strategy == 5: |
| return STRATEGY_5_MAX_EPOCHS |
| raise ValueError(f"Unsupported strategy for epoch selection: {strategy}") |
|
|
| def suggest_hyperparameters(trial: optuna.trial.Trial, strategy: int) -> dict[str, Any]: |
| head_lr = trial.suggest_float("head_lr", HEAD_LR_RANGE[0], HEAD_LR_RANGE[1], log=True) |
| encoder_lr = trial.suggest_float("encoder_lr", ENCODER_LR_RANGE[0], min(ENCODER_LR_RANGE[1], head_lr), log=True) |
| weight_decay = trial.suggest_float("weight_decay", WEIGHT_DECAY_RANGE[0], WEIGHT_DECAY_RANGE[1], log=True) |
| dropout_p = trial.suggest_float("dropout_p", DROPOUT_P_RANGE[0], DROPOUT_P_RANGE[1]) |
| params = { |
| "head_lr": head_lr, |
| "encoder_lr": encoder_lr, |
| "weight_decay": weight_decay, |
| "dropout_p": dropout_p, |
| "tmax": DEFAULT_TMAX, |
| "entropy_lr": DEFAULT_ENTROPY_LR, |
| } |
| if strategy != 2: |
| params["tmax"] = trial.suggest_int("tmax", TMAX_RANGE[0], TMAX_RANGE[1]) |
| params["entropy_lr"] = trial.suggest_float("entropy_lr", ENTROPY_LR_RANGE[0], ENTROPY_LR_RANGE[1], log=True) |
| return params |
|
|
| def _format_hparam_value(key: str, value: Any) -> str: |
| if isinstance(value, float): |
| if key in {"head_lr", "encoder_lr", "entropy_lr"}: |
| return f"{value:.3e}" |
| return f"{value:.6g}" |
| return str(value) |
|
|
| def log_optuna_trial_start( |
| *, |
| study_name: str, |
| strategy: int, |
| bundle: DataBundle, |
| trial: optuna.trial.Trial, |
| trial_dir: Path, |
| params: dict[str, Any], |
| max_epochs: int, |
| ) -> None: |
| lines = [ |
| "", |
| "-" * 80, |
| f"OPTUNA TRIAL START | Strategy {strategy} | {percent_label(bundle.percent)}% | Trial {trial.number:03d}", |
| "-" * 80, |
| f"Study name : {study_name}", |
| f"Run dir : {trial_dir}", |
| f"Max epochs : {max_epochs}", |
| f"Objective metric : {STUDY_OBJECTIVE_METRIC}", |
| ] |
| for key in ("head_lr", "encoder_lr", "weight_decay", "dropout_p", "tmax", "entropy_lr"): |
| if key in params: |
| lines.append(f"{key:22s}: {_format_hparam_value(key, params[key])}") |
| tqdm.write("\n".join(lines)) |
|
|
| def log_optuna_trial_result( |
| *, |
| strategy: int, |
| bundle: DataBundle, |
| trial: optuna.trial.Trial, |
| metric_value: float, |
| aggregate: dict[str, dict[str, float]], |
| ) -> None: |
| tqdm.write( |
| f"[Trial {trial.number:03d} | Strategy {strategy} | {percent_label(bundle.percent)}%] " |
| f"completed: best_val_iou={metric_value:.4f}, best_test_iou={aggregate['iou']['mean']:.4f}" |
| ) |
|
|
| def study_paths_for( |
| strategy: int, |
| percent: float, |
| model_config: RuntimeModelConfig | None = None, |
| ) -> tuple[Path, Path, Path]: |
| pct_root = RUNS_ROOT / MODEL_NAME / f"pct_{percent_label(percent)}" |
| strategy_root = pct_root / strategy_dir_name(strategy, model_config) |
| study_root = strategy_root / "study" |
| trials_root = strategy_root / "trials" |
| return strategy_root, study_root, trials_root |
|
|
| def manual_hparams_key(strategy: int, percent: float) -> str: |
| return f"{strategy}:{percent_label(percent)}" |
|
|
| def reset_study_artifacts(strategy: int, percent: float, *, model_config: RuntimeModelConfig) -> None: |
| strategy_root, study_root, trials_root = study_paths_for(strategy, percent, model_config) |
| removed_any = False |
| for path in (study_root, trials_root): |
| if path.exists(): |
| shutil.rmtree(path) |
| removed_any = True |
| if removed_any: |
| print( |
| f"[Optuna Reset] Removed cached study artifacts for strategy={strategy}, " |
| f"percent={percent_label(percent)}% under {strategy_root}." |
| ) |
| else: |
| print( |
| f"[Optuna Reset] No existing study artifacts found for strategy={strategy}, " |
| f"percent={percent_label(percent)}%." |
| ) |
|
|
| def pruner_for_run() -> optuna.pruners.BasePruner: |
| if USE_TRIAL_PRUNING: |
| return optuna.pruners.MedianPruner(n_warmup_steps=TRIAL_PRUNER_WARMUP_STEPS) |
| return optuna.pruners.NopPruner() |
|
|
| def run_single_job( |
| *, |
| strategy: int, |
| model_config: RuntimeModelConfig, |
| bundle: DataBundle, |
| run_dir: Path, |
| params: dict[str, Any], |
| max_epochs: int, |
| trial: optuna.trial.Trial | None, |
| strategy2_checkpoint_path: str | Path | None = None, |
| resume_checkpoint_path: Path | None = None, |
| retrying_from_trial_number: int | None = None, |
| ) -> tuple[dict[str, Any], list[dict[str, Any]], dict[str, dict[str, float]]]: |
| set_current_job_params(params) |
| DEFAULT_ENTROPY_TARGET_RATIO = float(_job_param("entropy_target_ratio", 0.35)) |
| DEFAULT_ENTROPY_ALPHA_INIT = float(_job_param("entropy_alpha_init", 0.12)) |
| ensure_dir(run_dir) |
| run_type = "trial" if trial is not None else "final" |
| config = { |
| "project_dir": str(PROJECT_DIR), |
| "data_root": str(DATA_ROOT), |
| "run_type": run_type, |
| "strategy": strategy, |
| "dataset_percent": bundle.percent, |
| "dataset_name": bundle.split_payload["dataset_name"], |
| "dataset_split_policy": bundle.split_payload["dataset_split_policy"], |
| "dataset_splits_path": bundle.split_payload["dataset_splits_path"], |
| "split_type": bundle.split_payload["split_type"], |
| "train_subset_key": bundle.split_payload["train_subset_key"], |
| "normalization_cache_path": bundle.split_payload["normalization_cache_path"], |
| "head_lr": params["head_lr"], |
| "encoder_lr": params["encoder_lr"], |
| "weight_decay": params["weight_decay"], |
| "dropout_p": params["dropout_p"], |
| "tmax": params["tmax"], |
| "entropy_lr": params["entropy_lr"], |
| "max_epochs": max_epochs, |
| "gamma": DEFAULT_GAMMA, |
| "grad_clip_norm": DEFAULT_GRAD_CLIP_NORM, |
| "scheduler_factor": SCHEDULER_FACTOR, |
| "scheduler_patience": SCHEDULER_PATIENCE, |
| "scheduler_threshold": SCHEDULER_THRESHOLD, |
| "scheduler_min_lr": SCHEDULER_MIN_LR, |
| "execution_mode": EXECUTION_MODE, |
| "evaluation_checkpoint_mode": EVAL_CHECKPOINT_MODE, |
| "strategy2_checkpoint_mode": STRATEGY2_CHECKPOINT_MODE, |
| "train_resume_mode": TRAIN_RESUME_MODE, |
| "train_resume_specific_checkpoint": TRAIN_RESUME_SPECIFIC_CHECKPOINT, |
| } |
| config.update({key: value for key, value in params.items() if key not in config}) |
| config.update(model_config.to_payload()) |
| if strategy2_checkpoint_path is not None: |
| config["strategy2_checkpoint_path"] = str(Path(strategy2_checkpoint_path)) |
| if resume_checkpoint_path is not None: |
| config["resume_checkpoint_path"] = str(Path(resume_checkpoint_path)) |
| if retrying_from_trial_number is not None: |
| config["retrying_from_trial_number"] = int(retrying_from_trial_number) |
| save_json(run_dir / "run_config.json", config) |
|
|
| model: nn.Module | None = None |
| try: |
| model, description, _compiled = build_model( |
| strategy, |
| params["dropout_p"], |
| model_config=model_config, |
| strategy2_checkpoint_path=strategy2_checkpoint_path, |
| ) |
| summary, history = train_model( |
| run_type=run_type, |
| model_config=model_config, |
| run_config=config, |
| model=model, |
| description=description, |
| strategy=strategy, |
| run_dir=run_dir, |
| bundle=bundle, |
| max_epochs=max_epochs, |
| head_lr=params["head_lr"], |
| encoder_lr=params["encoder_lr"], |
| weight_decay=params["weight_decay"], |
| tmax=params["tmax"], |
| entropy_lr=params["entropy_lr"], |
| entropy_alpha_init=DEFAULT_ENTROPY_ALPHA_INIT, |
| entropy_target_ratio=DEFAULT_ENTROPY_TARGET_RATIO, |
| critic_loss_weight=DEFAULT_CRITIC_LOSS_WEIGHT, |
| ce_weight=DEFAULT_CE_WEIGHT, |
| dice_weight=DEFAULT_DICE_WEIGHT, |
| dropout_p=params["dropout_p"], |
| resume_checkpoint_path=resume_checkpoint_path, |
| trial=trial, |
| ) |
|
|
| if trial is not None: |
| save_json( |
| run_dir / "summary.json", |
| { |
| "params": params, |
| "best_iou": float(summary["best_val_iou"]), |
| "resumed": bool(resume_checkpoint_path is not None), |
| "retrying_from_trial_number": retrying_from_trial_number, |
| }, |
| ) |
| return summary, history, {} |
|
|
| del model |
| model = None |
| run_cuda_cleanup() |
|
|
| aggregate, _per_sample = run_evaluation_for_run( |
| strategy=strategy, |
| percent=bundle.percent, |
| bundle=bundle, |
| run_dir=run_dir, |
| strategy2_checkpoint_path=strategy2_checkpoint_path, |
| ) |
| return summary, history, aggregate |
| finally: |
| if model is not None: |
| del model |
| model = None |
| run_cuda_cleanup() |
|
|
| def run_study( |
| strategy: int, |
| bundle: DataBundle, |
| *, |
| model_config: RuntimeModelConfig, |
| strategy2_checkpoint_path: str | Path | None = None, |
| ) -> dict[str, Any]: |
| strategy_root, study_root, trials_root = study_paths_for(strategy, bundle.percent, model_config) |
| ensure_dir(strategy_root.parent) |
| strategy_root = ensure_dir(strategy_root) |
| study_root = ensure_dir(strategy_root / "study") |
| trials_root = ensure_dir(strategy_root / "trials") |
| storage_path = study_root / "study.sqlite3" |
| storage = RDBStorage( |
| url=f"sqlite:///{storage_path.resolve()}", |
| heartbeat_interval=OPTUNA_HEARTBEAT_INTERVAL, |
| grace_period=OPTUNA_HEARTBEAT_GRACE_PERIOD, |
| failed_trial_callback=RetryFailedTrialCallback(max_retry=OPTUNA_FAILED_TRIAL_MAX_RETRY), |
| ) |
| sampler = optuna.samplers.TPESampler(seed=SEED) |
| study_name = ( |
| f"{MODEL_NAME}_{model_config.backbone_tag()}_pct_{percent_label(bundle.percent)}_strategy_{strategy}" |
| ) |
| study = optuna.create_study( |
| study_name=study_name, |
| direction=STUDY_DIRECTION, |
| sampler=sampler, |
| pruner=pruner_for_run(), |
| storage=storage, |
| load_if_exists=LOAD_EXISTING_STUDIES, |
| ) |
| finished_trials = [trial for trial in study.trials if trial.state.is_finished()] |
| completed_trials = [trial for trial in study.trials if trial.state == optuna.trial.TrialState.COMPLETE] |
| remaining_trials = max(int(NUM_TRIALS) - len(finished_trials), 0) |
|
|
| if finished_trials: |
| print( |
| f"[Optuna Study] Loaded existing study '{study_name}' with " |
| f"{len(finished_trials)} finished trial(s), {len(completed_trials)} completed trial(s), " |
| f"target={NUM_TRIALS}, remaining={remaining_trials}." |
| ) |
| else: |
| print(f"[Optuna Study] Starting new study '{study_name}' with target={NUM_TRIALS} trial(s).") |
|
|
| def objective(trial: optuna.trial.Trial) -> float: |
| trial_dir = ensure_dir(trials_root / f"trial_{trial.number:03d}") |
| retried_trial_fn = getattr(RetryFailedTrialCallback, "retried_trial_number", None) |
| retrying_from_trial_number = ( |
| retried_trial_fn(trial) if callable(retried_trial_fn) else trial.system_attrs.get("failed_trial") |
| ) |
| resume_checkpoint_path: Path | None = None |
| if retrying_from_trial_number is not None: |
| prior_trial_dir = trials_root / f"trial_{int(retrying_from_trial_number):03d}" |
| candidate = prior_trial_dir / "checkpoints" / "latest.pt" |
| if candidate.exists(): |
| resume_checkpoint_path = candidate.resolve() |
| params = suggest_hyperparameters(trial, strategy) |
| log_optuna_trial_start( |
| study_name=study_name, |
| strategy=strategy, |
| bundle=bundle, |
| trial=trial, |
| trial_dir=trial_dir, |
| params=params, |
| max_epochs=strategy_epochs(strategy), |
| ) |
| summary: dict[str, Any] | None = None |
| _history: list[dict[str, Any]] | None = None |
| aggregate: dict[str, dict[str, float]] | None = None |
| completed_successfully = False |
| run_cuda_cleanup() |
| try: |
| summary, _history, aggregate = run_single_job( |
| strategy=strategy, |
| model_config=model_config, |
| bundle=bundle, |
| run_dir=trial_dir, |
| params=params, |
| max_epochs=strategy_epochs(strategy), |
| trial=trial, |
| strategy2_checkpoint_path=strategy2_checkpoint_path, |
| resume_checkpoint_path=resume_checkpoint_path, |
| retrying_from_trial_number=int(retrying_from_trial_number) if retrying_from_trial_number is not None else None, |
| ) |
| metric_value = float(summary["best_val_iou"]) |
| completed_successfully = True |
| tqdm.write( |
| f"[Trial {trial.number:03d} | Strategy {strategy} | {percent_label(bundle.percent)}%] " |
| f"completed: best_val_iou={metric_value:.4f}" |
| ) |
| return metric_value |
| finally: |
| if aggregate is not None: |
| del aggregate |
| aggregate = None |
| if _history is not None: |
| del _history |
| _history = None |
| if summary is not None: |
| del summary |
| summary = None |
| if completed_successfully: |
| prune_optuna_trial_dir(trial_dir) |
| run_cuda_cleanup(context=f"trial {trial.number:03d} boundary") |
|
|
| if remaining_trials > 0: |
| study.optimize(objective, n_trials=remaining_trials, show_progress_bar=True) |
| else: |
| print( |
| f"[Optuna Study] Skipping optimization for '{study_name}' because the study already reached " |
| f"the configured cap of {NUM_TRIALS} finished trial(s)." |
| ) |
|
|
| completed_trials = [trial for trial in study.trials if trial.state == optuna.trial.TrialState.COMPLETE] |
| if not completed_trials: |
| raise RuntimeError( |
| f"Study '{study_name}' has no completed trials, so best params cannot be resolved. " |
| f"Finished trials={len([trial for trial in study.trials if trial.state.is_finished()])}, " |
| f"configured cap={NUM_TRIALS}." |
| ) |
|
|
| best_params = dict(study.best_trial.params) |
| if strategy == 2: |
| best_params.setdefault("tmax", DEFAULT_TMAX) |
| best_params.setdefault("entropy_lr", DEFAULT_ENTROPY_LR) |
| save_json(study_root / "best_params.json", best_params) |
| save_json( |
| study_root / "summary.json", |
| { |
| "best_params": best_params, |
| "best_iou": float(study.best_value), |
| "finished_trials": len([trial for trial in study.trials if trial.state.is_finished()]), |
| "completed_trials": len(completed_trials), |
| "target_trials": int(NUM_TRIALS), |
| "remaining_trials": max(int(NUM_TRIALS) - len([trial for trial in study.trials if trial.state.is_finished()]), 0), |
| }, |
| ) |
| prune_optuna_study_dir(study_root) |
| return best_params |
|
|
| def run_final_training( |
| strategy: int, |
| bundle: DataBundle, |
| params: dict[str, Any], |
| *, |
| model_config: RuntimeModelConfig, |
| strategy2_checkpoint_path: str | Path | None = None, |
| ) -> None: |
| final_root = final_root_for_strategy(strategy, bundle.percent, model_config) |
| if SKIP_EXISTING_FINALS and (final_root / "summary.json").exists(): |
| print(f"Skipping existing final run: {final_root}") |
| return |
| save_json(final_root / "best_params.json", params) |
| resume_checkpoint_path = resolve_train_resume_checkpoint_path(final_root) |
| run_single_job( |
| strategy=strategy, |
| model_config=model_config, |
| bundle=bundle, |
| run_dir=final_root, |
| params=params, |
| max_epochs=strategy_epochs(strategy), |
| trial=None, |
| strategy2_checkpoint_path=strategy2_checkpoint_path, |
| resume_checkpoint_path=resume_checkpoint_path, |
| ) |
|
|
| def print_environment_summary(model_config: RuntimeModelConfig) -> None: |
| banner("RUNTIME SUMMARY") |
| images_dir, annotations_dir = current_dataset_dirs() |
| print(f"Project dir : {PROJECT_DIR}") |
| print(f"Data root : {DATA_ROOT}") |
| print(f"Runs root : {RUNS_ROOT}") |
| print(f"Dataset name : {current_dataset_name()}") |
| if current_dataset_name() == "BUSI_with_classes": |
| print(f"Dataset split policy : {current_busi_with_classes_split_policy()}") |
| print(f"Images dir : {images_dir}") |
| print(f"Masks dir : {annotations_dir}") |
| print(f"Dataset splits json : {current_dataset_splits_json_path()}") |
| print(f"Split type : {SPLIT_TYPE}") |
| print(f"Device : {DEVICE}") |
| print(f"Device source : {DEVICE_FALLBACK_SOURCE}") |
| print(f"Model name : {MODEL_NAME}") |
| print(f"Seed : {SEED}") |
| print(f"PyTorch version : {torch.__version__}") |
| print(f"Batch size : {BATCH_SIZE}") |
| print(f"Use AMP : {USE_AMP}") |
| print(f"Num workers : {NUM_WORKERS}") |
| print(f"Pin memory : {USE_PIN_MEMORY}") |
| print(f"CuDNN deterministic : {torch.backends.cudnn.deterministic}") |
| print(f"CuDNN benchmark : {torch.backends.cudnn.benchmark}") |
|
|
| if DEVICE.type == "cuda": |
| props = torch.cuda.get_device_properties(0) |
| print(f"GPU : {torch.cuda.get_device_name(0)}") |
| print(f"GPU VRAM : {props.total_memory / (1024 ** 3):.2f} GB") |
| print(f"AMP dtype : {resolve_amp_dtype(AMP_DTYPE)}") |
| print(f"Trial pruning : {USE_TRIAL_PRUNING}") |
| print(f"Backbone family : {model_config.backbone_family}") |
| if model_config.backbone_family == "custom_vgg": |
| print(f"VGG feature scales : {model_config.vgg_feature_scales}") |
| print(f"VGG feature dilation : {model_config.vgg_feature_dilation}") |
| else: |
| print(f"SMP encoder : {model_config.smp_encoder_name}") |
| print(f"SMP encoder depth : {model_config.smp_encoder_depth}") |
| print(f"SMP encoder proj dim : {model_config.smp_encoder_proj_dim}") |
| print(f"SMP decoder : {model_config.smp_decoder_type}") |
| print(f"Strategies : {STRATEGIES}") |
| print(f"Dataset percents : {DATASET_PERCENTS}") |
| print_imagenet_normalization_status() |
| print(f"Trials per study : {NUM_TRIALS}") |
| print(f"Execution mode : {EXECUTION_MODE}") |
| print(f"Run Optuna : {RUN_OPTUNA}") |
| print(f"Use saved best params : {USE_EXISTING_BEST_PARAMS_WHEN_OPTUNA_OFF}") |
| print(f"Reset studies/run : {RESET_ALL_STUDIES_EACH_RUN}") |
| print(f"Load existing studies : {LOAD_EXISTING_STUDIES}") |
| print(f"Eval ckpt selector : {EVAL_CHECKPOINT_MODE}") |
| print(f"S2 ckpt selector : {STRATEGY2_CHECKPOINT_MODE}") |
| print(f"S3 bootstrap from S2 : {STRATEGY3_BOOTSTRAP_FROM_STRATEGY2}") |
| print(f"Train resume mode : {TRAIN_RESUME_MODE}") |
| print(f"Verbose epoch log : {VERBOSE_EPOCH_LOG}") |
| print(f"Validate every epochs : {VALIDATE_EVERY_N_EPOCHS}") |
| print(f"Smoke test enabled : {RUN_SMOKE_TEST}") |
| print(f"Overfit test enabled : {RUN_OVERFIT_TEST}") |
| print(f"Overfit batches : {OVERFIT_N_BATCHES}") |
| print(f"Overfit epochs : {OVERFIT_N_EPOCHS}") |
|
|
| def maybe_run_strategy_smoke_test( |
| *, |
| strategy: int, |
| model_config: RuntimeModelConfig, |
| bundle: DataBundle, |
| strategy2_checkpoint_path: str | Path | None = None, |
| ) -> None: |
| if not RUN_SMOKE_TEST: |
| return |
| if EXECUTION_MODE == "eval_only": |
| return |
| run_smoke_test( |
| strategy=strategy, |
| model_config=model_config, |
| bundle=bundle, |
| smoke_root=strategy_root_for_percent(strategy, bundle.percent, model_config) / "smoke_test", |
| strategy2_checkpoint_path=strategy2_checkpoint_path, |
| ) |
|
|
| def main() -> int: |
| banner("MLR ALL STRATEGIES BAYES RUNNER") |
| set_global_seed(SEED) |
| model_config = current_model_config() |
| dataset_name = current_dataset_name() |
| images_dir, annotations_dir = current_dataset_dirs() |
| if EXECUTION_MODE not in {"train_eval", "eval_only"}: |
| raise ValueError(f"EXECUTION_MODE must be 'train_eval' or 'eval_only', got {EXECUTION_MODE!r}") |
| if SPLIT_TYPE not in SUPPORTED_SPLIT_TYPES: |
| raise ValueError(f"SPLIT_TYPE must be one of {SUPPORTED_SPLIT_TYPES}, got {SPLIT_TYPE!r}") |
| if not images_dir.exists() or not annotations_dir.exists(): |
| raise FileNotFoundError( |
| f"Expected dataset directories for {dataset_name} under {DATA_ROOT}: " |
| f"images={images_dir}, masks={annotations_dir}" |
| ) |
| ensure_specific_checkpoint_scope("EVAL_CHECKPOINT_MODE", EVAL_CHECKPOINT_MODE) |
| ensure_specific_checkpoint_scope("STRATEGY2_CHECKPOINT_MODE", STRATEGY2_CHECKPOINT_MODE) |
| ensure_specific_checkpoint_scope("TRAIN_RESUME_MODE", TRAIN_RESUME_MODE) |
|
|
| print_environment_summary(model_config) |
| split_registry, split_source = load_or_create_dataset_splits( |
| images_dir=images_dir, |
| annotations_dir=annotations_dir, |
| split_json_path=current_dataset_splits_json_path(), |
| train_fractions=DATASET_PERCENTS, |
| seed=SEED, |
| ) |
|
|
| bundles: dict[float, DataBundle] = {} |
| for percent in DATASET_PERCENTS: |
| bundles[percent] = build_data_bundle(percent, split_registry, split_source) |
|
|
| if RUN_OVERFIT_TEST: |
| run_configured_overfit_tests(bundles, model_config=model_config) |
| banner("OVERFIT TESTS COMPLETE") |
| return 0 |
|
|
| if RESET_ALL_STUDIES_EACH_RUN: |
| if RUN_OPTUNA: |
| banner("RESETTING OPTUNA STUDIES") |
| for strategy in STRATEGIES: |
| for percent in DATASET_PERCENTS: |
| reset_study_artifacts(strategy, percent, model_config=model_config) |
| else: |
| print("[Optuna Reset] Skipped because RUN_OPTUNA=False.") |
|
|
| try: |
| for percent in DATASET_PERCENTS: |
| banner(f"PERCENT STAGE | {percent_label(percent)}%") |
| bundle = bundles[percent] |
| for strategy in STRATEGIES: |
| strategy2_checkpoint_path = None |
| if strategy in (4, 5) or ( |
| EXECUTION_MODE == "train_eval" and strategy == 3 and STRATEGY3_BOOTSTRAP_FROM_STRATEGY2 |
| ): |
| strategy2_checkpoint_path = resolve_strategy2_checkpoint_path(strategy, percent, model_config) |
|
|
| if EXECUTION_MODE == "train_eval": |
| maybe_run_strategy_smoke_test( |
| strategy=strategy, |
| model_config=model_config, |
| bundle=bundle, |
| strategy2_checkpoint_path=strategy2_checkpoint_path, |
| ) |
| params = resolve_job_params( |
| strategy, |
| percent, |
| bundle, |
| model_config=model_config, |
| strategy2_checkpoint_path=strategy2_checkpoint_path, |
| ) |
| banner(f"FINAL RETRAIN | Strategy {strategy} | {percent_label(percent)}%") |
| run_final_training( |
| strategy, |
| bundle, |
| params, |
| model_config=model_config, |
| strategy2_checkpoint_path=strategy2_checkpoint_path, |
| ) |
| else: |
| banner(f"EVAL ONLY | Strategy {strategy} | {percent_label(percent)}%") |
| run_evaluation_for_run( |
| strategy=strategy, |
| percent=percent, |
| bundle=bundle, |
| run_dir=final_root_for_strategy(strategy, percent, model_config), |
| strategy2_checkpoint_path=strategy2_checkpoint_path, |
| ) |
| except Exception: |
| banner("RUN FAILED") |
| traceback.print_exc() |
| return 1 |
|
|
| banner("ALL DONE") |
| return 0 |
|
|
| if __name__ == "__main__": |
| main() |
|
|