VietCat commited on
Commit
aed8966
·
1 Parent(s): 8dc1ce9

Add raw predictions debug info and scale factor warnings

Browse files
Files changed (1) hide show
  1. model.py +29 -2
model.py CHANGED
@@ -175,6 +175,11 @@ class TrafficSignDetector:
175
  print(f" - Scale factor: {scale:.3f}")
176
  print(f" - Padding X: {pad_x}, Y: {pad_y}")
177
 
 
 
 
 
 
178
  # Normalize pixel values for inference
179
  print(f"\n[STEP 3] IMAGE NORMALIZATION")
180
  image = self._preprocess(image)
@@ -188,8 +193,17 @@ class TrafficSignDetector:
188
  print(f" - Input shape to model: {image.shape}")
189
  print(f" - Confidence threshold: {self.conf_threshold}")
190
  print(f" - IOU threshold: 0.45")
 
 
 
 
 
 
 
 
 
191
  results = self.model(image, conf=self.conf_threshold, imgsz=640, iou=0.45)
192
- print(f" - Number of results: {len(results)}")
193
 
194
  # Get original dimensions for coordinate transformation
195
  orig_h, orig_w = original_image.shape[:2]
@@ -239,5 +253,18 @@ class TrafficSignDetector:
239
 
240
  print(f"\n{'='*80}")
241
  print(f"DETECTION PIPELINE COMPLETE")
242
- print(f"{'='*80}\n")
 
 
 
 
 
 
 
 
 
 
 
 
 
243
  return original_image, preprocessed_display
 
175
  print(f" - Scale factor: {scale:.3f}")
176
  print(f" - Padding X: {pad_x}, Y: {pad_y}")
177
 
178
+ # Warning if scale is too small (objects might be too small to detect)
179
+ if scale < 0.5:
180
+ print(f" ⚠️ WARNING: Scale factor < 0.5 - objects may be too small!")
181
+ print(f" Original size: {original_image.shape[:2]} → Resized: {int(original_image.shape[1]*scale)}x{int(original_image.shape[0]*scale)}")
182
+
183
  # Normalize pixel values for inference
184
  print(f"\n[STEP 3] IMAGE NORMALIZATION")
185
  image = self._preprocess(image)
 
193
  print(f" - Input shape to model: {image.shape}")
194
  print(f" - Confidence threshold: {self.conf_threshold}")
195
  print(f" - IOU threshold: 0.45")
196
+
197
+ # Run with conf=0.0 to get raw predictions (before filtering)
198
+ results_raw = self.model(image, conf=0.0, imgsz=640, iou=0.45)
199
+ print(f" - Raw detections (conf=0.0): {len(results_raw[0].boxes) if results_raw else 0}")
200
+ if results_raw and len(results_raw[0].boxes) > 0:
201
+ raw_confs = [float(box.conf[0]) for box in results_raw[0].boxes[:5]] # First 5
202
+ print(f" - Top 5 raw confidences: {[f'{c:.4f}' for c in raw_confs]}")
203
+
204
+ # Now run with actual threshold
205
  results = self.model(image, conf=self.conf_threshold, imgsz=640, iou=0.45)
206
+ print(f" - Filtered detections (conf={self.conf_threshold}): {len(results)}")
207
 
208
  # Get original dimensions for coordinate transformation
209
  orig_h, orig_w = original_image.shape[:2]
 
253
 
254
  print(f"\n{'='*80}")
255
  print(f"DETECTION PIPELINE COMPLETE")
256
+ print(f"{'='*80}")
257
+
258
+ # Analysis and recommendations
259
+ if scale < 0.5:
260
+ print(f"\n📋 ANALYSIS & RECOMMENDATIONS:")
261
+ print(f" 1. Objects are too small after resizing (scale={scale:.2f})")
262
+ print(f" 2. Possible solutions:")
263
+ print(f" a) Use larger target size (imgsz=1024 or 1280)")
264
+ print(f" b) Use multi-scale detection (detect multiple regions)")
265
+ print(f" c) Reduce input image size to avoid extreme scaling")
266
+ print(f" d) Check if model was trained with your image dimensions")
267
+ print(f" 3. Current: {original_image.shape} → {image.shape} (scale {scale:.2f})")
268
+
269
+ print()
270
  return original_image, preprocessed_display