File size: 7,217 Bytes
1c1a183 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 | """
Inference helpers for the ArcDetectorCNN door orientation model.
Supports single-image and batch inference on already-cropped images.
Classes (alphabetical ImageFolder order):
0 = double
1 = hinge_left
2 = hinge_right
Usage — single image:
from inference import load_model, predict
model = load_model("runs/blocks3_drop0.2/best_model.pt")
label, confidence = predict(img_bgr, model)
print(label, confidence) # e.g. "hinge_left", 0.94
Usage — batch:
labels, confidences = predict_batch([img1, img2, img3], model)
Usage — CLI:
python inference.py --model runs/blocks3_drop0.2/best_model.pt image1.png image2.png
"""
from __future__ import annotations
import argparse
import sys
from pathlib import Path
import cv2
from modal import Image
import numpy as np
import torch
_HERE = Path(__file__).parent
for _p in (_HERE, _HERE.parent.parent):
if str(_p) not in sys.path:
sys.path.insert(0, str(_p))
from cnn_dataset_builder import TRANSFORM, preprocess_real_crop # noqa: E402
from cnn_door_orientation_detection import ArcDetectorCNN, CLASS_NAMES # noqa: E402
if torch.cuda.is_available():
_DEVICE = torch.device("cuda")
elif torch.backends.mps.is_available():
_DEVICE = torch.device("mps")
else:
_DEVICE = torch.device("cpu")
# ── model loading ──────────────────────────────────────────────────────────────
def load_model(model_path: str | Path) -> ArcDetectorCNN:
"""
Load a trained ArcDetectorCNN from a .pt file.
Reads config.json from the same directory to restore n_blocks / dropout.
"""
import json
model_path = Path(model_path)
config_path = model_path.parent / "config.json"
config = json.loads(config_path.read_text()) if config_path.exists() else {}
model = ArcDetectorCNN(
n_blocks=config.get("n_blocks", 3),
dropout=config.get("dropout", 0.2),
)
model.load_state_dict(torch.load(model_path, map_location="cpu", weights_only=True))
model.eval()
return model.to(_DEVICE)
# ── preprocessing ──────────────────────────────────────────────────────────────
def _to_tensor(img: np.ndarray) -> torch.Tensor:
"""
Convert a single crop (BGR or grayscale uint8) to a normalised (1, 1, H, W) tensor.
Applies the same CLAHE preprocessing used during training.
"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if img.ndim == 3 else img
from PIL import Image
processed = preprocess_real_crop(gray)
return TRANSFORM(Image.fromarray(processed)).unsqueeze(0) # (1, 1, 128, 128)
def _to_batch_tensor(imgs: list[np.ndarray]) -> torch.Tensor:
"""Stack a list of crops into a (N, 1, H, W) tensor."""
return torch.cat([_to_tensor(img) for img in imgs], dim=0)
# ── inference ──────────────────────────────────────────────────────────────────
def predict(
img: np.ndarray,
model: ArcDetectorCNN,
confidence_threshold: float = 0.0,
) -> tuple[str, float]:
"""
Predict the door orientation class for a single crop.
Args:
img: H×W×3 BGR or H×W grayscale uint8 crop.
model: Loaded ArcDetectorCNN (use load_model()).
confidence_threshold: If the softmax confidence is below this, return "unknown".
Returns:
(class_name, confidence) where class_name is one of
"double" / "hinge_left" / "hinge_right" / "unknown".
"""
labels, confidences = predict_batch([img], model, confidence_threshold)
return labels[0], confidences[0]
def predict_batch(
imgs: list[np.ndarray],
model: ArcDetectorCNN,
confidence_threshold: float = 0.5,
) -> tuple[list[str], list[float]]:
"""
Predict door orientation classes for a batch of crops.
Args:
imgs: List of H×W×3 BGR or H×W grayscale uint8 crops.
model: Loaded ArcDetectorCNN (use load_model()).
confidence_threshold: Images whose top softmax score is below this get "unknown".
Returns:
(labels, confidences) — parallel lists, one entry per input image.
"""
model.eval()
batch = _to_batch_tensor(imgs).to(_DEVICE) # (N, 1, 128, 128)
with torch.no_grad():
probs = batch.float()
probs = model(probs).softmax(dim=1) # (N, 3)
confidences_t, indices_t = probs.max(dim=1) # (N,), (N,)
labels: list[str] = []
confidences: list[float] = []
for conf, idx in zip(confidences_t.cpu().tolist(), indices_t.cpu().tolist()):
if conf < confidence_threshold:
labels.append("unknown")
else:
labels.append(CLASS_NAMES[idx])
confidences.append(round(conf, 4))
return labels, confidences
# ── CLI ────────────────────────────────────────────────────────────────────────
_DEFAULT_RUNS = _HERE / "runs"
def _auto_detect_model(runs_dir: Path) -> Path:
import json
configs = list(runs_dir.glob("*/config.json"))
if not configs:
raise FileNotFoundError(f"No trained runs found in {runs_dir}. Run --train first.")
best = max(configs, key=lambda p: json.loads(p.read_text()).get("best_val_acc", 0))
return best.parent / "best_model.pt"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="ArcDetectorCNN inference")
parser.add_argument("images", nargs="+", type=Path, help="Image file(s) to classify")
parser.add_argument(
"--model",
type=Path,
default=None,
help="Path to best_model.pt (auto-detected from --runs-dir if omitted)",
)
parser.add_argument(
"--runs-dir",
type=Path,
default=_DEFAULT_RUNS,
help=f"Runs directory for auto-detection (default: {_DEFAULT_RUNS})",
)
parser.add_argument(
"--threshold",
type=float,
default=0.0,
help="Confidence threshold below which output is 'unknown' (default: 0.0)",
)
args = parser.parse_args()
model_path = args.model or _auto_detect_model(args.runs_dir)
print(f"Model: {model_path}")
model = load_model(model_path)
imgs = []
paths = []
for p in args.images:
img = cv2.imread(str(p))
if img is None:
print(f" Warning: could not read {p} — skipping")
continue
imgs.append(img)
paths.append(p)
if not imgs:
print("No valid images to process.")
sys.exit(1)
labels, confidences = predict_batch(imgs, model, confidence_threshold=args.threshold)
print(f"\n{'Image':<40} {'Label':<12} Confidence")
print("-" * 62)
for p, label, conf in zip(paths, labels, confidences):
print(f"{str(p):<40} {label:<12} {conf:.4f}")
|