road-damage / src /models /ensemble.py
Mai Phuoc Minh Tai
fix: Add HuggingFace Docker model paths and debug logging for YOLO loading
9e93b8c
# backend/src/models/ensemble.py
"""
Real YOLO Ensemble Detector for Road Damage Detection
Uses the trained yolo11base.pt model for actual inference.
Architecture supports future expansion to 3 models (YOLO11, YOLO12, SAHI)
deployed on HuggingFace Spaces.
Current Configuration:
- yolo11: 1.0 (active - local model)
- yolo12: 0.0 (pending - training)
- sahi: 0.0 (pending - training)
"""
import os
import numpy as np
from typing import List, Dict, Any, Optional
import logging
logger = logging.getLogger(__name__)
# Configure logging to show in console
logging.basicConfig(level=logging.INFO)
# Try to import YOLO from ultralytics
try:
from ultralytics import YOLO
YOLO_AVAILABLE = True
print("[ENSEMBLE] ultralytics YOLO imported successfully")
except ImportError:
YOLO_AVAILABLE = False
print("[ENSEMBLE] WARNING: ultralytics not installed, using mock detector")
logger.warning("[WARN] ultralytics not installed, using mock detector")
class EnsembleDetector:
"""
Real YOLO-based ensemble detector for road damage.
Uses local YOLO11 model for inference. When additional models
are trained and deployed to HuggingFace Spaces, they can be
added to the ensemble with appropriate weights.
"""
def __init__(self, model_path: str = None):
"""
Initialize the ensemble detector.
Args:
model_path: Path to YOLO model weights.
Defaults to models/yolo11base.pt
"""
self.model = None
self.model_loaded = False
# Model weights configuration
self.weights = {
"yolo11": 1.0, # Active - local model
"yolo12": 0.0, # Pending
"sahi": 0.0 # Pending
}
# Class names from RDD2022 dataset
self.class_names = {
0: "D00", # Longitudinal Crack
1: "D10", # Transverse Crack
2: "D20", # Alligator Crack
3: "D40", # Pothole
}
self.class_display_names = {
"D00": "Longitudinal Crack",
"D10": "Transverse Crack",
"D20": "Alligator Crack",
"D40": "Pothole",
}
# Determine model path
if model_path is None:
# Try multiple possible paths (local dev + HuggingFace Docker)
possible_paths = [
# HuggingFace Docker container paths (WORKDIR=/app)
"/app/models/yolo11base.pt",
"models/yolo11base.pt",
# Local development paths
"../models/yolo11base.pt",
os.path.join(os.path.dirname(__file__), "../../models/yolo11base.pt"),
os.path.join(os.path.dirname(__file__), "../../../models/yolo11base.pt"),
"D:/Gitrepo/road-damage/models/yolo11base.pt",
]
print(f"[MODEL] Searching for model in paths: {possible_paths}")
for path in possible_paths:
if os.path.exists(path):
model_path = path
print(f"[MODEL] Found model at: {path}")
break
if model_path is None:
print(f"[MODEL] WARNING: Model not found in any path!")
# Load model
if YOLO_AVAILABLE and model_path and os.path.exists(model_path):
try:
logger.info(f"[INFO] Loading YOLO model from: {model_path}")
self.model = YOLO(model_path)
self.model_loaded = True
logger.info("[OK] YOLO model loaded successfully!")
# Get class names from model if available
if hasattr(self.model, 'names') and self.model.names:
self.class_names = self.model.names
logger.info(f"[INFO] Model classes: {self.class_names}")
except Exception as e:
logger.error(f"[ERROR] Failed to load YOLO model: {e}")
self.model_loaded = False
else:
if not YOLO_AVAILABLE:
logger.warning("[WARN] ultralytics not available")
elif not model_path:
logger.warning("[WARN] No model path specified")
else:
logger.warning(f"[WARN] Model file not found: {model_path}")
def predict(self, image: np.ndarray, conf_threshold: float = 0.25) -> List[Dict]:
"""
Run inference on an image.
Args:
image: numpy array (HWC format, RGB or BGR)
conf_threshold: Minimum confidence threshold
Returns:
List of detection dictionaries with keys:
- box: [x1, y1, x2, y2]
- class_name: str
- class_id: int
- confidence: float
- votes: int (number of models that detected this)
"""
if not self.model_loaded:
logger.warning("[WARN] Model not loaded, returning empty results")
return []
try:
# Run inference
results = self.model(
image,
conf=conf_threshold,
verbose=False
)
detections = []
for result in results:
boxes = result.boxes
if boxes is None or len(boxes) == 0:
continue
for i in range(len(boxes)):
# Get bounding box
box = boxes.xyxy[i].cpu().numpy()
x1, y1, x2, y2 = box
# Get confidence
conf = float(boxes.conf[i].cpu().numpy())
# Get class
cls_id = int(boxes.cls[i].cpu().numpy())
# Get class name
if cls_id in self.class_names:
cls_name = self.class_names[cls_id]
else:
cls_name = f"class_{cls_id}"
# Get display name
display_name = self.class_display_names.get(cls_name, cls_name)
detection = {
"box": [float(x1), float(y1), float(x2), float(y2)],
"class_name": display_name,
"class_code": cls_name,
"class_id": cls_id,
"confidence": conf,
"votes": 1, # Single model for now
"model": "yolo11"
}
detections.append(detection)
logger.info(f"[DETECT] YOLO11 detected {len(detections)} objects")
return detections
except Exception as e:
logger.error(f"[ERROR] Inference failed: {e}")
import traceback
traceback.print_exc()
return []
def predict_with_ensemble(
self,
image: np.ndarray,
conf_threshold: float = 0.25
) -> List[Dict]:
"""
Run ensemble inference (future: multiple models).
Currently only uses YOLO11 since other models are pending.
When YOLO12 and SAHI are ready, this method will:
1. Call all 3 models in parallel
2. Merge overlapping detections (NMS)
3. Apply weighted voting
4. Return only detections agreed by 2+ models
Args:
image: numpy array
conf_threshold: Minimum confidence
Returns:
List of ensemble-merged detections
"""
# For now, just use YOLO11
yolo11_results = self.predict(image, conf_threshold)
# Future: Add YOLO12 and SAHI results
# yolo12_results = self._call_hf_space("yolo12", image)
# sahi_results = self._call_hf_space("sahi", image)
# Future: Merge with NMS and voting
# merged = self._merge_detections([yolo11_results, yolo12_results, sahi_results])
return yolo11_results
def get_model_info(self) -> Dict[str, Any]:
"""Get information about loaded models."""
return {
"yolo11": {
"loaded": self.model_loaded,
"weight": self.weights["yolo11"],
"type": "local"
},
"yolo12": {
"loaded": False,
"weight": self.weights["yolo12"],
"type": "hf_space",
"status": "pending_training"
},
"sahi": {
"loaded": False,
"weight": self.weights["sahi"],
"type": "hf_space",
"status": "pending_training"
}
}
class SeverityClassifier:
"""Classify damage severity based on area and confidence."""
def __init__(self):
# Thresholds for severity classification
self.thresholds = {
"light": 0.05, # < 5% of image
"medium": 0.15, # 5-15% of image
# > 15% = heavy
}
def classify(self, area_ratio: float, confidence: float) -> str:
"""
Classify severity based on damage area relative to image.
Args:
area_ratio: Damage area / image area
confidence: Detection confidence (0-1)
Returns:
Severity string: "light", "medium", or "heavy"
"""
# Adjust thresholds based on confidence
# Lower confidence = more conservative severity
conf_factor = min(1.0, confidence / 0.5)
effective_ratio = area_ratio * conf_factor
if effective_ratio > self.thresholds["medium"]:
return "heavy"
elif effective_ratio > self.thresholds["light"]:
return "medium"
else:
return "light"
class TTAProcessor:
"""Test-Time Augmentation processor (optional enhancement)."""
def __init__(self):
self.enabled = False # Disabled by default for speed
def predict(self, image: np.ndarray) -> List[Dict]:
"""
Run TTA inference (currently disabled).
When enabled, this applies augmentations and averages results
for more robust detections at the cost of speed.
"""
if not self.enabled:
return []
# Future: Implement TTA
# - Horizontal flip
# - Multi-scale
# - Merge results
return []