🔀 [Merge] branch 'main' into TEST
Browse files
tests/test_utils/test_bounding_box_utils.py
CHANGED
|
@@ -138,9 +138,9 @@ def test_anc2box_autoanchor(inference_v7_cfg: Config):
|
|
| 138 |
anc2box.update((320, 640))
|
| 139 |
anchor_grids_shape = [anchor_grid.shape for anchor_grid in anc2box.anchor_grids]
|
| 140 |
assert anchor_grids_shape == [
|
| 141 |
-
torch.Size([1, 1, 80,
|
| 142 |
-
torch.Size([1, 1, 40,
|
| 143 |
-
torch.Size([1, 1, 20,
|
| 144 |
]
|
| 145 |
assert anc2box.anchor_scale.shape == torch.Size([3, 1, 3, 1, 1, 2])
|
| 146 |
|
|
|
|
| 138 |
anc2box.update((320, 640))
|
| 139 |
anchor_grids_shape = [anchor_grid.shape for anchor_grid in anc2box.anchor_grids]
|
| 140 |
assert anchor_grids_shape == [
|
| 141 |
+
torch.Size([1, 1, 80, 40, 2]),
|
| 142 |
+
torch.Size([1, 1, 40, 20, 2]),
|
| 143 |
+
torch.Size([1, 1, 20, 10, 2]),
|
| 144 |
]
|
| 145 |
assert anc2box.anchor_scale.shape == torch.Size([3, 1, 3, 1, 1, 2])
|
| 146 |
|
yolo/tools/solver.py
CHANGED
|
@@ -45,7 +45,8 @@ class ValidateModel(BaseModel):
|
|
| 45 |
|
| 46 |
def validation_step(self, batch, batch_idx):
|
| 47 |
batch_size, images, targets, rev_tensor, img_paths = batch
|
| 48 |
-
|
|
|
|
| 49 |
batch_metrics = self.metric(
|
| 50 |
[to_metrics_format(predict) for predict in predicts], [to_metrics_format(target) for target in targets]
|
| 51 |
)
|
|
|
|
| 45 |
|
| 46 |
def validation_step(self, batch, batch_idx):
|
| 47 |
batch_size, images, targets, rev_tensor, img_paths = batch
|
| 48 |
+
H, W = images.shape[2:]
|
| 49 |
+
predicts = self.post_process(self(images), image_size=[W, H])
|
| 50 |
batch_metrics = self.metric(
|
| 51 |
[to_metrics_format(predict) for predict in predicts], [to_metrics_format(target) for target in targets]
|
| 52 |
)
|
yolo/utils/bounding_box_utils.py
CHANGED
|
@@ -122,7 +122,7 @@ def generate_anchors(image_size: List[int], strides: List[int]):
|
|
| 122 |
all_anchors [HW x 2]:
|
| 123 |
all_scalers [HW]: The index of the best targets for each anchors
|
| 124 |
"""
|
| 125 |
-
|
| 126 |
anchors = []
|
| 127 |
scaler = []
|
| 128 |
for stride in strides:
|
|
@@ -312,17 +312,18 @@ class Vec2Box:
|
|
| 312 |
self.anchor_grid, self.scaler = anchor_grid.to(device), scaler.to(device)
|
| 313 |
|
| 314 |
def create_auto_anchor(self, model: YOLO, image_size):
|
| 315 |
-
|
|
|
|
| 316 |
dummy_output = model(dummy_input)
|
| 317 |
strides = []
|
| 318 |
for predict_head in dummy_output["Main"]:
|
| 319 |
_, _, *anchor_num = predict_head[2].shape
|
| 320 |
-
strides.append(
|
| 321 |
return strides
|
| 322 |
|
| 323 |
def update(self, image_size):
|
| 324 |
"""
|
| 325 |
-
image_size:
|
| 326 |
"""
|
| 327 |
if self.image_size == image_size:
|
| 328 |
return
|
|
@@ -365,12 +366,13 @@ class Anc2Box:
|
|
| 365 |
self.class_num = model.num_classes
|
| 366 |
|
| 367 |
def create_auto_anchor(self, model: YOLO, image_size):
|
| 368 |
-
|
|
|
|
| 369 |
dummy_output = model(dummy_input)
|
| 370 |
strides = []
|
| 371 |
for predict_head in dummy_output["Main"]:
|
| 372 |
_, _, *anchor_num = predict_head.shape
|
| 373 |
-
strides.append(
|
| 374 |
return strides
|
| 375 |
|
| 376 |
def generate_anchors(self, image_size: List[int]):
|
|
@@ -383,7 +385,7 @@ class Anc2Box:
|
|
| 383 |
return anchor_grids
|
| 384 |
|
| 385 |
def update(self, image_size):
|
| 386 |
-
self.
|
| 387 |
|
| 388 |
def __call__(self, predicts: List[Tensor]):
|
| 389 |
preds_box, preds_cls, preds_cnf = [], [], []
|
|
|
|
| 122 |
all_anchors [HW x 2]:
|
| 123 |
all_scalers [HW]: The index of the best targets for each anchors
|
| 124 |
"""
|
| 125 |
+
W, H = image_size
|
| 126 |
anchors = []
|
| 127 |
scaler = []
|
| 128 |
for stride in strides:
|
|
|
|
| 312 |
self.anchor_grid, self.scaler = anchor_grid.to(device), scaler.to(device)
|
| 313 |
|
| 314 |
def create_auto_anchor(self, model: YOLO, image_size):
|
| 315 |
+
W, H = image_size
|
| 316 |
+
dummy_input = torch.zeros(1, 3, H, W).to(self.device)
|
| 317 |
dummy_output = model(dummy_input)
|
| 318 |
strides = []
|
| 319 |
for predict_head in dummy_output["Main"]:
|
| 320 |
_, _, *anchor_num = predict_head[2].shape
|
| 321 |
+
strides.append(W // anchor_num[1])
|
| 322 |
return strides
|
| 323 |
|
| 324 |
def update(self, image_size):
|
| 325 |
"""
|
| 326 |
+
image_size: W, H
|
| 327 |
"""
|
| 328 |
if self.image_size == image_size:
|
| 329 |
return
|
|
|
|
| 366 |
self.class_num = model.num_classes
|
| 367 |
|
| 368 |
def create_auto_anchor(self, model: YOLO, image_size):
|
| 369 |
+
W, H = image_size
|
| 370 |
+
dummy_input = torch.zeros(1, 3, H, W).to(self.device)
|
| 371 |
dummy_output = model(dummy_input)
|
| 372 |
strides = []
|
| 373 |
for predict_head in dummy_output["Main"]:
|
| 374 |
_, _, *anchor_num = predict_head.shape
|
| 375 |
+
strides.append(W // anchor_num[1])
|
| 376 |
return strides
|
| 377 |
|
| 378 |
def generate_anchors(self, image_size: List[int]):
|
|
|
|
| 385 |
return anchor_grids
|
| 386 |
|
| 387 |
def update(self, image_size):
|
| 388 |
+
self.anchor_grids = self.generate_anchors(image_size)
|
| 389 |
|
| 390 |
def __call__(self, predicts: List[Tensor]):
|
| 391 |
preds_box, preds_cls, preds_cnf = [], [], []
|