repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
ERD
|
ERD-main/tests/test_models/test_layers/test_plugins.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import pytest
import torch
from mmengine.config import ConfigDict
from mmdet.models.layers import DropBlock
from mmdet.registry import MODELS
from mmdet.utils import register_all_modules
register_all_modules()
def test_dropblock():
feat = torch.rand(1, 1, 11, 11)
drop_prob = 1.0
dropblock = DropBlock(drop_prob, block_size=11, warmup_iters=0)
out_feat = dropblock(feat)
assert (out_feat == 0).all() and out_feat.shape == feat.shape
drop_prob = 0.5
dropblock = DropBlock(drop_prob, block_size=5, warmup_iters=0)
out_feat = dropblock(feat)
assert out_feat.shape == feat.shape
# drop_prob must be (0,1]
with pytest.raises(AssertionError):
DropBlock(1.5, 3)
# block_size cannot be an even number
with pytest.raises(AssertionError):
DropBlock(0.5, 2)
# warmup_iters cannot be less than 0
with pytest.raises(AssertionError):
DropBlock(0.5, 3, -1)
class TestPixelDecoder(unittest.TestCase):
def test_forward(self):
base_channels = 64
pixel_decoder_cfg = ConfigDict(
dict(
type='PixelDecoder',
in_channels=[base_channels * 2**i for i in range(4)],
feat_channels=base_channels,
out_channels=base_channels,
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='ReLU')))
self = MODELS.build(pixel_decoder_cfg)
self.init_weights()
img_metas = [{}, {}]
feats = [
torch.rand(
(2, base_channels * 2**i, 4 * 2**(3 - i), 5 * 2**(3 - i)))
for i in range(4)
]
mask_feature, memory = self(feats, img_metas)
assert (memory == feats[-1]).all()
assert mask_feature.shape == feats[0].shape
class TestTransformerEncoderPixelDecoder(unittest.TestCase):
def test_forward(self):
base_channels = 64
pixel_decoder_cfg = ConfigDict(
dict(
type='TransformerEncoderPixelDecoder',
in_channels=[base_channels * 2**i for i in range(4)],
feat_channels=base_channels,
out_channels=base_channels,
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='ReLU'),
encoder=dict( # DetrTransformerEncoder
num_layers=6,
layer_cfg=dict( # DetrTransformerEncoderLayer
self_attn_cfg=dict( # MultiheadAttention
embed_dims=base_channels,
num_heads=8,
attn_drop=0.1,
proj_drop=0.1,
dropout_layer=None,
batch_first=True),
ffn_cfg=dict(
embed_dims=base_channels,
feedforward_channels=base_channels * 8,
num_fcs=2,
act_cfg=dict(type='ReLU', inplace=True),
ffn_drop=0.1,
dropout_layer=None,
add_identity=True),
norm_cfg=dict(type='LN'),
init_cfg=None),
init_cfg=None),
positional_encoding=dict(
num_feats=base_channels // 2, normalize=True)))
self = MODELS.build(pixel_decoder_cfg)
self.init_weights()
img_metas = [{
'batch_input_shape': (128, 160),
'img_shape': (120, 160),
}, {
'batch_input_shape': (128, 160),
'img_shape': (125, 160),
}]
feats = [
torch.rand(
(2, base_channels * 2**i, 4 * 2**(3 - i), 5 * 2**(3 - i)))
for i in range(4)
]
mask_feature, memory = self(feats, img_metas)
assert memory.shape[-2:] == feats[-1].shape[-2:]
assert mask_feature.shape == feats[0].shape
class TestMSDeformAttnPixelDecoder(unittest.TestCase):
def test_forward(self):
base_channels = 64
pixel_decoder_cfg = ConfigDict(
dict(
type='MSDeformAttnPixelDecoder',
in_channels=[base_channels * 2**i for i in range(4)],
strides=[4, 8, 16, 32],
feat_channels=base_channels,
out_channels=base_channels,
num_outs=3,
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='ReLU'),
encoder=dict( # DeformableDetrTransformerEncoder
num_layers=6,
layer_cfg=dict( # DeformableDetrTransformerEncoderLayer
self_attn_cfg=dict( # MultiScaleDeformableAttention
embed_dims=base_channels,
num_heads=8,
num_levels=3,
num_points=4,
im2col_step=64,
dropout=0.0,
batch_first=True,
norm_cfg=None,
init_cfg=None),
ffn_cfg=dict(
embed_dims=base_channels,
feedforward_channels=base_channels * 4,
num_fcs=2,
ffn_drop=0.0,
act_cfg=dict(type='ReLU', inplace=True))),
init_cfg=None),
positional_encoding=dict(
num_feats=base_channels // 2, normalize=True),
init_cfg=None))
self = MODELS.build(pixel_decoder_cfg)
self.init_weights()
feats = [
torch.rand(
(2, base_channels * 2**i, 4 * 2**(3 - i), 5 * 2**(3 - i)))
for i in range(4)
]
mask_feature, multi_scale_features = self(feats)
assert mask_feature.shape == feats[0].shape
assert len(multi_scale_features) == 3
multi_scale_features = multi_scale_features[::-1]
for i in range(3):
assert multi_scale_features[i].shape[-2:] == feats[i +
1].shape[-2:]
| 6,408
| 36.261628
| 76
|
py
|
ERD
|
ERD-main/tests/test_models/test_layers/test_inverted_residual.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.cnn import is_norm
from torch.nn.modules import GroupNorm
from mmdet.models.layers import InvertedResidual, SELayer
def test_inverted_residual():
with pytest.raises(AssertionError):
# stride must be in [1, 2]
InvertedResidual(16, 16, 32, stride=3)
with pytest.raises(AssertionError):
# se_cfg must be None or dict
InvertedResidual(16, 16, 32, se_cfg=list())
with pytest.raises(AssertionError):
# in_channeld and mid_channels must be the same if
# with_expand_conv is False
InvertedResidual(16, 16, 32, with_expand_conv=False)
# Test InvertedResidual forward, stride=1
block = InvertedResidual(16, 16, 32, stride=1)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert getattr(block, 'se', None) is None
assert block.with_res_shortcut
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward, stride=2
block = InvertedResidual(16, 16, 32, stride=2)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert not block.with_res_shortcut
assert x_out.shape == torch.Size((1, 16, 28, 28))
# Test InvertedResidual forward with se layer
se_cfg = dict(channels=32)
block = InvertedResidual(16, 16, 32, stride=1, se_cfg=se_cfg)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert isinstance(block.se, SELayer)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward, with_expand_conv=False
block = InvertedResidual(32, 16, 32, with_expand_conv=False)
x = torch.randn(1, 32, 56, 56)
x_out = block(x)
assert getattr(block, 'expand_conv', None) is None
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with GroupNorm
block = InvertedResidual(
16, 16, 32, norm_cfg=dict(type='GN', num_groups=2))
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
for m in block.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with HSigmoid
block = InvertedResidual(16, 16, 32, act_cfg=dict(type='HSigmoid'))
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with checkpoint
block = InvertedResidual(16, 16, 32, with_cp=True)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert block.with_cp
assert x_out.shape == torch.Size((1, 16, 56, 56))
| 2,636
| 33.246753
| 71
|
py
|
ERD
|
ERD-main/tests/test_models/test_layers/test_conv_upsample.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.layers import ConvUpsample
@pytest.mark.parametrize('num_layers', [0, 1, 2])
def test_conv_upsample(num_layers):
num_upsample = num_layers if num_layers > 0 else 0
num_layers = num_layers if num_layers > 0 else 1
layer = ConvUpsample(
10,
5,
num_layers=num_layers,
num_upsample=num_upsample,
conv_cfg=None,
norm_cfg=None)
size = 5
x = torch.randn((1, 10, size, size))
size = size * pow(2, num_upsample)
x = layer(x)
assert x.shape[-2:] == (size, size)
| 629
| 24.2
| 54
|
py
|
ERD
|
ERD-main/tests/test_models/test_layers/__init__.py
| 0
| 0
| 0
|
py
|
|
ERD
|
ERD-main/tests/test_models/test_layers/test_transformer.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmengine.config import ConfigDict
from mmdet.models.layers.transformer import (AdaptivePadding,
DetrTransformerDecoder,
DetrTransformerEncoder,
PatchEmbed, PatchMerging)
def test_adaptive_padding():
for padding in ('same', 'corner'):
kernel_size = 16
stride = 16
dilation = 1
input = torch.rand(1, 1, 15, 17)
pool = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
out = pool(input)
# padding to divisible by 16
assert (out.shape[2], out.shape[3]) == (16, 32)
input = torch.rand(1, 1, 16, 17)
out = pool(input)
# padding to divisible by 16
assert (out.shape[2], out.shape[3]) == (16, 32)
kernel_size = (2, 2)
stride = (2, 2)
dilation = (1, 1)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
input = torch.rand(1, 1, 11, 13)
out = adap_pad(input)
# padding to divisible by 2
assert (out.shape[2], out.shape[3]) == (12, 14)
kernel_size = (2, 2)
stride = (10, 10)
dilation = (1, 1)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
input = torch.rand(1, 1, 10, 13)
out = adap_pad(input)
# no padding
assert (out.shape[2], out.shape[3]) == (10, 13)
kernel_size = (11, 11)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
input = torch.rand(1, 1, 11, 13)
out = adap_pad(input)
# all padding
assert (out.shape[2], out.shape[3]) == (21, 21)
# test padding as kernel is (7,9)
input = torch.rand(1, 1, 11, 13)
stride = (3, 4)
kernel_size = (4, 5)
dilation = (2, 2)
# actually (7, 9)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
dilation_out = adap_pad(input)
assert (dilation_out.shape[2], dilation_out.shape[3]) == (16, 21)
kernel_size = (7, 9)
dilation = (1, 1)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
kernel79_out = adap_pad(input)
assert (kernel79_out.shape[2], kernel79_out.shape[3]) == (16, 21)
assert kernel79_out.shape == dilation_out.shape
# assert only support "same" "corner"
with pytest.raises(AssertionError):
AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=1)
def test_patch_embed():
B = 2
H = 3
W = 4
C = 3
embed_dims = 10
kernel_size = 3
stride = 1
dummy_input = torch.rand(B, C, H, W)
patch_merge_1 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=1,
norm_cfg=None)
x1, shape = patch_merge_1(dummy_input)
# test out shape
assert x1.shape == (2, 2, 10)
# test outsize is correct
assert shape == (1, 2)
# test L = out_h * out_w
assert shape[0] * shape[1] == x1.shape[1]
B = 2
H = 10
W = 10
C = 3
embed_dims = 10
kernel_size = 5
stride = 2
dummy_input = torch.rand(B, C, H, W)
# test dilation
patch_merge_2 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=None,
)
x2, shape = patch_merge_2(dummy_input)
# test out shape
assert x2.shape == (2, 1, 10)
# test outsize is correct
assert shape == (1, 1)
# test L = out_h * out_w
assert shape[0] * shape[1] == x2.shape[1]
stride = 2
input_size = (10, 10)
dummy_input = torch.rand(B, C, H, W)
# test stride and norm
patch_merge_3 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=dict(type='LN'),
input_size=input_size)
x3, shape = patch_merge_3(dummy_input)
# test out shape
assert x3.shape == (2, 1, 10)
# test outsize is correct
assert shape == (1, 1)
# test L = out_h * out_w
assert shape[0] * shape[1] == x3.shape[1]
# test the init_out_size with nn.Unfold
assert patch_merge_3.init_out_size[1] == (input_size[0] - 2 * 4 -
1) // 2 + 1
assert patch_merge_3.init_out_size[0] == (input_size[0] - 2 * 4 -
1) // 2 + 1
H = 11
W = 12
input_size = (H, W)
dummy_input = torch.rand(B, C, H, W)
# test stride and norm
patch_merge_3 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=dict(type='LN'),
input_size=input_size)
_, shape = patch_merge_3(dummy_input)
# when input_size equal to real input
# the out_size should be equal to `init_out_size`
assert shape == patch_merge_3.init_out_size
input_size = (H, W)
dummy_input = torch.rand(B, C, H, W)
# test stride and norm
patch_merge_3 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=dict(type='LN'),
input_size=input_size)
_, shape = patch_merge_3(dummy_input)
# when input_size equal to real input
# the out_size should be equal to `init_out_size`
assert shape == patch_merge_3.init_out_size
# test adap padding
for padding in ('same', 'corner'):
in_c = 2
embed_dims = 3
B = 2
# test stride is 1
input_size = (5, 5)
kernel_size = (5, 5)
stride = (1, 1)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 25, 3)
assert out_size == (5, 5)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (5, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 1, 3)
assert out_size == (1, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (6, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 2, 3)
assert out_size == (2, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test different kernel_size with different stride
input_size = (6, 5)
kernel_size = (6, 2)
stride = (6, 2)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 3, 3)
assert out_size == (1, 3)
assert x_out.size(1) == out_size[0] * out_size[1]
def test_patch_merging():
# Test the model with int padding
in_c = 3
out_c = 4
kernel_size = 3
stride = 3
padding = 1
dilation = 1
bias = False
# test the case `pad_to_stride` is False
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
B, L, C = 1, 100, 3
input_size = (10, 10)
x = torch.rand(B, L, C)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (1, 16, 4)
assert out_size == (4, 4)
# assert out size is consistent with real output
assert x_out.size(1) == out_size[0] * out_size[1]
in_c = 4
out_c = 5
kernel_size = 6
stride = 3
padding = 2
dilation = 2
bias = False
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
B, L, C = 1, 100, 4
input_size = (10, 10)
x = torch.rand(B, L, C)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (1, 4, 5)
assert out_size == (2, 2)
# assert out size is consistent with real output
assert x_out.size(1) == out_size[0] * out_size[1]
# Test with adaptive padding
for padding in ('same', 'corner'):
in_c = 2
out_c = 3
B = 2
# test stride is 1
input_size = (5, 5)
kernel_size = (5, 5)
stride = (1, 1)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 25, 3)
assert out_size == (5, 5)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (5, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 1, 3)
assert out_size == (1, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (6, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 2, 3)
assert out_size == (2, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test different kernel_size with different stride
input_size = (6, 5)
kernel_size = (6, 2)
stride = (6, 2)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 3, 3)
assert out_size == (1, 3)
assert x_out.size(1) == out_size[0] * out_size[1]
def test_detr_transformer_encoder_decoder():
config = ConfigDict(
num_layers=6,
layer_cfg=dict( # DetrTransformerDecoderLayer
self_attn_cfg=dict( # MultiheadAttention
embed_dims=256,
num_heads=8,
dropout=0.1),
cross_attn_cfg=dict( # MultiheadAttention
embed_dims=256,
num_heads=8,
dropout=0.1),
ffn_cfg=dict(
embed_dims=256,
feedforward_channels=2048,
num_fcs=2,
ffn_drop=0.1,
act_cfg=dict(type='ReLU', inplace=True))))
assert len(DetrTransformerDecoder(**config).layers) == 6
assert DetrTransformerDecoder(**config)
config = ConfigDict(
dict(
num_layers=6,
layer_cfg=dict( # DetrTransformerEncoderLayer
self_attn_cfg=dict( # MultiheadAttention
embed_dims=256,
num_heads=8,
dropout=0.1),
ffn_cfg=dict(
embed_dims=256,
feedforward_channels=2048,
num_fcs=2,
ffn_drop=0.1,
act_cfg=dict(type='ReLU', inplace=True)))))
assert len(DetrTransformerEncoder(**config).layers) == 6
assert DetrTransformerEncoder(**config)
| 14,590
| 27.893069
| 73
|
py
|
ERD
|
ERD-main/tests/test_models/test_layers/test_brick_wrappers.py
|
from unittest.mock import patch
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.models.layers import AdaptiveAvgPool2d, adaptive_avg_pool2d
if torch.__version__ != 'parrots':
torch_version = '1.7'
else:
torch_version = 'parrots'
@patch('torch.__version__', torch_version)
def test_adaptive_avg_pool2d():
# Test the empty batch dimension
# Test the two input conditions
x_empty = torch.randn(0, 3, 4, 5)
# 1. tuple[int, int]
wrapper_out = adaptive_avg_pool2d(x_empty, (2, 2))
assert wrapper_out.shape == (0, 3, 2, 2)
# 2. int
wrapper_out = adaptive_avg_pool2d(x_empty, 2)
assert wrapper_out.shape == (0, 3, 2, 2)
# wrapper op with 3-dim input
x_normal = torch.randn(3, 3, 4, 5)
wrapper_out = adaptive_avg_pool2d(x_normal, (2, 2))
ref_out = F.adaptive_avg_pool2d(x_normal, (2, 2))
assert wrapper_out.shape == (3, 3, 2, 2)
assert torch.equal(wrapper_out, ref_out)
wrapper_out = adaptive_avg_pool2d(x_normal, 2)
ref_out = F.adaptive_avg_pool2d(x_normal, 2)
assert wrapper_out.shape == (3, 3, 2, 2)
assert torch.equal(wrapper_out, ref_out)
@patch('torch.__version__', torch_version)
def test_AdaptiveAvgPool2d():
# Test the empty batch dimension
x_empty = torch.randn(0, 3, 4, 5)
# Test the four input conditions
# 1. tuple[int, int]
wrapper = AdaptiveAvgPool2d((2, 2))
wrapper_out = wrapper(x_empty)
assert wrapper_out.shape == (0, 3, 2, 2)
# 2. int
wrapper = AdaptiveAvgPool2d(2)
wrapper_out = wrapper(x_empty)
assert wrapper_out.shape == (0, 3, 2, 2)
# 3. tuple[None, int]
wrapper = AdaptiveAvgPool2d((None, 2))
wrapper_out = wrapper(x_empty)
assert wrapper_out.shape == (0, 3, 4, 2)
# 3. tuple[int, None]
wrapper = AdaptiveAvgPool2d((2, None))
wrapper_out = wrapper(x_empty)
assert wrapper_out.shape == (0, 3, 2, 5)
# Test the normal batch dimension
x_normal = torch.randn(3, 3, 4, 5)
wrapper = AdaptiveAvgPool2d((2, 2))
ref = nn.AdaptiveAvgPool2d((2, 2))
wrapper_out = wrapper(x_normal)
ref_out = ref(x_normal)
assert wrapper_out.shape == (3, 3, 2, 2)
assert torch.equal(wrapper_out, ref_out)
wrapper = AdaptiveAvgPool2d(2)
ref = nn.AdaptiveAvgPool2d(2)
wrapper_out = wrapper(x_normal)
ref_out = ref(x_normal)
assert wrapper_out.shape == (3, 3, 2, 2)
assert torch.equal(wrapper_out, ref_out)
wrapper = AdaptiveAvgPool2d((None, 2))
ref = nn.AdaptiveAvgPool2d((None, 2))
wrapper_out = wrapper(x_normal)
ref_out = ref(x_normal)
assert wrapper_out.shape == (3, 3, 4, 2)
assert torch.equal(wrapper_out, ref_out)
wrapper = AdaptiveAvgPool2d((2, None))
ref = nn.AdaptiveAvgPool2d((2, None))
wrapper_out = wrapper(x_normal)
ref_out = ref(x_normal)
assert wrapper_out.shape == (3, 3, 2, 5)
assert torch.equal(wrapper_out, ref_out)
| 2,932
| 30.202128
| 70
|
py
|
ERD
|
ERD-main/tests/test_models/test_seg_heads/test_panoptic_fpn_head.py
|
import unittest
import torch
from mmengine.structures import PixelData
from mmengine.testing import assert_allclose
from mmdet.models.seg_heads import PanopticFPNHead
from mmdet.structures import DetDataSample
class TestPanopticFPNHead(unittest.TestCase):
def test_init_weights(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=32,
inner_channels=32)
head.init_weights()
assert_allclose(head.conv_logits.bias.data,
torch.zeros_like(head.conv_logits.bias.data))
def test_loss(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=32,
inner_channels=32,
start_level=0,
end_level=1)
x = [torch.rand((2, 32, 8, 8)), torch.rand((2, 32, 4, 4))]
data_sample1 = DetDataSample()
data_sample1.gt_sem_seg = PixelData(
sem_seg=torch.randint(0, 4, (1, 7, 8)))
data_sample2 = DetDataSample()
data_sample2.gt_sem_seg = PixelData(
sem_seg=torch.randint(0, 4, (1, 7, 8)))
batch_data_samples = [data_sample1, data_sample2]
results = head.loss(x, batch_data_samples)
self.assertIsInstance(results, dict)
def test_predict(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=32,
inner_channels=32,
start_level=0,
end_level=1)
x = [torch.rand((2, 32, 8, 8)), torch.rand((2, 32, 4, 4))]
img_meta1 = {
'batch_input_shape': (16, 16),
'img_shape': (14, 14),
'ori_shape': (12, 12),
}
img_meta2 = {
'batch_input_shape': (16, 16),
'img_shape': (16, 16),
'ori_shape': (16, 16),
}
batch_img_metas = [img_meta1, img_meta2]
head.eval()
with torch.no_grad():
seg_preds = head.predict(x, batch_img_metas, rescale=False)
self.assertTupleEqual(seg_preds[0].shape[-2:], (16, 16))
self.assertTupleEqual(seg_preds[1].shape[-2:], (16, 16))
seg_preds = head.predict(x, batch_img_metas, rescale=True)
self.assertTupleEqual(seg_preds[0].shape[-2:], (12, 12))
self.assertTupleEqual(seg_preds[1].shape[-2:], (16, 16))
| 2,446
| 33.464789
| 71
|
py
|
ERD
|
ERD-main/tests/test_models/test_seg_heads/test_heuristic_fusion_head.py
|
import unittest
import torch
from mmengine.config import Config
from mmengine.structures import InstanceData
from mmengine.testing import assert_allclose
from mmdet.evaluation import INSTANCE_OFFSET
from mmdet.models.seg_heads.panoptic_fusion_heads import HeuristicFusionHead
class TestHeuristicFusionHead(unittest.TestCase):
def test_loss(self):
head = HeuristicFusionHead(num_things_classes=2, num_stuff_classes=2)
result = head.loss()
self.assertTrue(not head.with_loss)
self.assertDictEqual(result, dict())
def test_predict(self):
test_cfg = Config(dict(mask_overlap=0.5, stuff_area_limit=1))
head = HeuristicFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
mask_results = InstanceData()
mask_results.bboxes = torch.tensor([[0, 0, 1, 1], [1, 1, 2, 2]])
mask_results.labels = torch.tensor([0, 1])
mask_results.scores = torch.tensor([0.8, 0.7])
mask_results.masks = torch.tensor([[[1, 0], [0, 0]], [[0, 0],
[0, 1]]]).bool()
seg_preds_list = [
torch.tensor([[[0.2, 0.7], [0.3, 0.1]], [[0.2, 0.2], [0.6, 0.1]],
[[0.6, 0.1], [0.1, 0.8]]])
]
target_list = [
torch.tensor([[0 + 1 * INSTANCE_OFFSET, 2],
[3, 1 + 2 * INSTANCE_OFFSET]])
]
results_list = head.predict([mask_results], seg_preds_list)
for target, result in zip(target_list, results_list):
assert_allclose(result.sem_seg[0], target)
# test with no thing
head = HeuristicFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
mask_results = InstanceData()
mask_results.bboxes = torch.zeros((0, 4))
mask_results.labels = torch.zeros((0, )).long()
mask_results.scores = torch.zeros((0, ))
mask_results.masks = torch.zeros((0, 2, 2), dtype=torch.bool)
seg_preds_list = [
torch.tensor([[[0.2, 0.7], [0.3, 0.1]], [[0.2, 0.2], [0.6, 0.1]],
[[0.6, 0.1], [0.1, 0.8]]])
]
target_list = [torch.tensor([[4, 2], [3, 4]])]
results_list = head.predict([mask_results], seg_preds_list)
for target, result in zip(target_list, results_list):
assert_allclose(result.sem_seg[0], target)
| 2,440
| 39.683333
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_seg_heads/test_maskformer_fusion_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from mmengine.config import Config
from mmdet.models.seg_heads.panoptic_fusion_heads import MaskFormerFusionHead
from mmdet.structures import DetDataSample
class TestMaskFormerFusionHead(unittest.TestCase):
def test_loss(self):
head = MaskFormerFusionHead(num_things_classes=2, num_stuff_classes=2)
result = head.loss()
self.assertTrue(not head.with_loss)
self.assertDictEqual(result, dict())
def test_predict(self):
mask_cls_results = torch.rand((2, 10, 5))
mask_pred_results = torch.rand((2, 10, 32, 32))
batch_data_samples = [
DetDataSample(
metainfo={
'batch_input_shape': (32, 32),
'img_shape': (32, 30),
'ori_shape': (30, 30)
}),
DetDataSample(
metainfo={
'batch_input_shape': (32, 32),
'img_shape': (32, 30),
'ori_shape': (29, 30)
})
]
# get panoptic and instance segmentation results
test_cfg = Config(
dict(
panoptic_on=True,
semantic_on=False,
instance_on=True,
max_per_image=10,
object_mask_thr=0.3,
iou_thr=0.3,
filter_low_score=False))
head = MaskFormerFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
results = head.predict(
mask_cls_results,
mask_pred_results,
batch_data_samples,
rescale=False)
for i in range(len(results)):
self.assertEqual(results[i]['pan_results'].sem_seg.shape[-2:],
batch_data_samples[i].img_shape)
self.assertEqual(results[i]['ins_results'].masks.shape[-2:],
batch_data_samples[i].img_shape)
results = head.predict(
mask_cls_results,
mask_pred_results,
batch_data_samples,
rescale=True)
for i in range(len(results)):
self.assertEqual(results[i]['pan_results'].sem_seg.shape[-2:],
batch_data_samples[i].ori_shape)
self.assertEqual(results[i]['ins_results'].masks.shape[-2:],
batch_data_samples[i].ori_shape)
# get empty results
test_cfg = Config(
dict(
panoptic_on=False,
semantic_on=False,
instance_on=False,
max_per_image=10,
object_mask_thr=0.3,
iou_thr=0.3,
filter_low_score=False))
head = MaskFormerFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
results = head.predict(
mask_cls_results,
mask_pred_results,
batch_data_samples,
rescale=True)
for i in range(len(results)):
self.assertEqual(results[i], dict())
# semantic segmentation is not supported
test_cfg = Config(
dict(
panoptic_on=False,
semantic_on=True,
instance_on=False,
max_per_image=10,
object_mask_thr=0.3,
iou_thr=0.3,
filter_low_score=False))
head = MaskFormerFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
with self.assertRaises(AssertionError):
results = head.predict(
mask_cls_results,
mask_pred_results,
batch_data_samples,
rescale=True)
| 3,827
| 34.119266
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_losses/test_loss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
import torch.nn.functional as F
from mmengine.utils import digit_version
from mmdet.models.losses import (BalancedL1Loss, CrossEntropyLoss, DiceLoss,
DistributionFocalLoss, FocalLoss,
GaussianFocalLoss,
KnowledgeDistillationKLDivLoss, L1Loss,
MSELoss, QualityFocalLoss, SeesawLoss,
SmoothL1Loss, VarifocalLoss)
from mmdet.models.losses.ghm_loss import GHMC, GHMR
from mmdet.models.losses.iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss,
EIoULoss, GIoULoss, IoULoss)
@pytest.mark.parametrize(
'loss_class',
[IoULoss, BoundedIoULoss, GIoULoss, DIoULoss, CIoULoss, EIoULoss])
def test_iou_type_loss_zeros_weight(loss_class):
pred = torch.rand((10, 4))
target = torch.rand((10, 4))
weight = torch.zeros(10)
loss = loss_class()(pred, target, weight)
assert loss == 0.
@pytest.mark.parametrize('loss_class', [
BalancedL1Loss, BoundedIoULoss, CIoULoss, CrossEntropyLoss, DIoULoss,
EIoULoss, FocalLoss, DistributionFocalLoss, MSELoss, SeesawLoss,
GaussianFocalLoss, GIoULoss, QualityFocalLoss, IoULoss, L1Loss,
VarifocalLoss, GHMR, GHMC, SmoothL1Loss, KnowledgeDistillationKLDivLoss,
DiceLoss
])
def test_loss_with_reduction_override(loss_class):
pred = torch.rand((10, 4))
target = torch.rand((10, 4)),
weight = None
with pytest.raises(AssertionError):
# only reduction_override from [None, 'none', 'mean', 'sum']
# is not allowed
reduction_override = True
loss_class()(
pred, target, weight, reduction_override=reduction_override)
@pytest.mark.parametrize('loss_class', [QualityFocalLoss])
@pytest.mark.parametrize('activated', [False, True])
def test_QualityFocalLoss_Loss(loss_class, activated):
input_shape = (4, 5)
pred = torch.rand(input_shape)
label = torch.Tensor([0, 1, 2, 0]).long()
quality_label = torch.rand(input_shape[0])
original_loss = loss_class(activated=activated)(pred,
(label, quality_label))
assert isinstance(original_loss, torch.Tensor)
target = torch.nn.functional.one_hot(label, 5)
target = target * quality_label.reshape(input_shape[0], 1)
new_loss = loss_class(activated=activated)(pred, target)
assert isinstance(new_loss, torch.Tensor)
assert new_loss == original_loss
@pytest.mark.parametrize('loss_class', [
IoULoss, BoundedIoULoss, GIoULoss, DIoULoss, CIoULoss, EIoULoss, MSELoss,
L1Loss, SmoothL1Loss, BalancedL1Loss
])
@pytest.mark.parametrize('input_shape', [(10, 4), (0, 4)])
def test_regression_losses(loss_class, input_shape):
pred = torch.rand(input_shape)
target = torch.rand(input_shape)
weight = torch.rand(input_shape)
# Test loss forward
loss = loss_class()(pred, target)
assert isinstance(loss, torch.Tensor)
# Test loss forward with weight
loss = loss_class()(pred, target, weight)
assert isinstance(loss, torch.Tensor)
# Test loss forward with reduction_override
loss = loss_class()(pred, target, reduction_override='mean')
assert isinstance(loss, torch.Tensor)
# Test loss forward with avg_factor
loss = loss_class()(pred, target, avg_factor=10)
assert isinstance(loss, torch.Tensor)
with pytest.raises(ValueError):
# loss can evaluate with avg_factor only if
# reduction is None, 'none' or 'mean'.
reduction_override = 'sum'
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
# Test loss forward with avg_factor and reduction
for reduction_override in [None, 'none', 'mean']:
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
assert isinstance(loss, torch.Tensor)
@pytest.mark.parametrize('loss_class', [CrossEntropyLoss])
@pytest.mark.parametrize('input_shape', [(10, 5), (0, 5)])
def test_classification_losses(loss_class, input_shape):
if input_shape[0] == 0 and digit_version(
torch.__version__) < digit_version('1.5.0'):
pytest.skip(
f'CELoss in PyTorch {torch.__version__} does not support empty'
f'tensor.')
pred = torch.rand(input_shape)
target = torch.randint(0, 5, (input_shape[0], ))
# Test loss forward
loss = loss_class()(pred, target)
assert isinstance(loss, torch.Tensor)
# Test loss forward with reduction_override
loss = loss_class()(pred, target, reduction_override='mean')
assert isinstance(loss, torch.Tensor)
# Test loss forward with avg_factor
loss = loss_class()(pred, target, avg_factor=10)
assert isinstance(loss, torch.Tensor)
with pytest.raises(ValueError):
# loss can evaluate with avg_factor only if
# reduction is None, 'none' or 'mean'.
reduction_override = 'sum'
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
# Test loss forward with avg_factor and reduction
for reduction_override in [None, 'none', 'mean']:
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
assert isinstance(loss, torch.Tensor)
@pytest.mark.parametrize('loss_class', [FocalLoss])
@pytest.mark.parametrize('input_shape', [(10, 5), (3, 5, 40, 40)])
def test_FocalLoss_loss(loss_class, input_shape):
pred = torch.rand(input_shape)
target = torch.randint(0, 5, (input_shape[0], ))
if len(input_shape) == 4:
B, N, W, H = input_shape
target = F.one_hot(torch.randint(0, 5, (B * W * H, )),
5).reshape(B, W, H, N).permute(0, 3, 1, 2)
# Test loss forward
loss = loss_class()(pred, target)
assert isinstance(loss, torch.Tensor)
# Test loss forward with reduction_override
loss = loss_class()(pred, target, reduction_override='mean')
assert isinstance(loss, torch.Tensor)
# Test loss forward with avg_factor
loss = loss_class()(pred, target, avg_factor=10)
assert isinstance(loss, torch.Tensor)
with pytest.raises(ValueError):
# loss can evaluate with avg_factor only if
# reduction is None, 'none' or 'mean'.
reduction_override = 'sum'
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
# Test loss forward with avg_factor and reduction
for reduction_override in [None, 'none', 'mean']:
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
assert isinstance(loss, torch.Tensor)
@pytest.mark.parametrize('loss_class', [GHMR])
@pytest.mark.parametrize('input_shape', [(10, 4), (0, 4)])
def test_GHMR_loss(loss_class, input_shape):
pred = torch.rand(input_shape)
target = torch.rand(input_shape)
weight = torch.rand(input_shape)
# Test loss forward
loss = loss_class()(pred, target, weight)
assert isinstance(loss, torch.Tensor)
@pytest.mark.parametrize('use_sigmoid', [True, False])
@pytest.mark.parametrize('reduction', ['sum', 'mean', None])
@pytest.mark.parametrize('avg_non_ignore', [True, False])
def test_loss_with_ignore_index(use_sigmoid, reduction, avg_non_ignore):
# Test cross_entropy loss
loss_class = CrossEntropyLoss(
use_sigmoid=use_sigmoid,
use_mask=False,
ignore_index=255,
avg_non_ignore=avg_non_ignore)
pred = torch.rand((10, 5))
target = torch.randint(0, 5, (10, ))
ignored_indices = torch.randint(0, 10, (2, ), dtype=torch.long)
target[ignored_indices] = 255
# Test loss forward with default ignore
loss_with_ignore = loss_class(pred, target, reduction_override=reduction)
assert isinstance(loss_with_ignore, torch.Tensor)
# Test loss forward with forward ignore
target[ignored_indices] = 255
loss_with_forward_ignore = loss_class(
pred, target, ignore_index=255, reduction_override=reduction)
assert isinstance(loss_with_forward_ignore, torch.Tensor)
# Verify correctness
if avg_non_ignore:
# manually remove the ignored elements
not_ignored_indices = (target != 255)
pred = pred[not_ignored_indices]
target = target[not_ignored_indices]
loss = loss_class(pred, target, reduction_override=reduction)
assert torch.allclose(loss, loss_with_ignore)
assert torch.allclose(loss, loss_with_forward_ignore)
# test ignore all target
pred = torch.rand((10, 5))
target = torch.ones((10, ), dtype=torch.long) * 255
loss = loss_class(pred, target, reduction_override=reduction)
assert loss == 0
@pytest.mark.parametrize('naive_dice', [True, False])
def test_dice_loss(naive_dice):
loss_class = DiceLoss
pred = torch.rand((10, 4, 4))
target = torch.rand((10, 4, 4))
weight = torch.rand((10))
# Test loss forward
loss = loss_class(naive_dice=naive_dice)(pred, target)
assert isinstance(loss, torch.Tensor)
# Test loss forward with weight
loss = loss_class(naive_dice=naive_dice)(pred, target, weight)
assert isinstance(loss, torch.Tensor)
# Test loss forward with reduction_override
loss = loss_class(naive_dice=naive_dice)(
pred, target, reduction_override='mean')
assert isinstance(loss, torch.Tensor)
# Test loss forward with avg_factor
loss = loss_class(naive_dice=naive_dice)(pred, target, avg_factor=10)
assert isinstance(loss, torch.Tensor)
with pytest.raises(ValueError):
# loss can evaluate with avg_factor only if
# reduction is None, 'none' or 'mean'.
reduction_override = 'sum'
loss_class(naive_dice=naive_dice)(
pred, target, avg_factor=10, reduction_override=reduction_override)
# Test loss forward with avg_factor and reduction
for reduction_override in [None, 'none', 'mean']:
loss_class(naive_dice=naive_dice)(
pred, target, avg_factor=10, reduction_override=reduction_override)
assert isinstance(loss, torch.Tensor)
# Test loss forward with has_acted=False and use_sigmoid=False
with pytest.raises(NotImplementedError):
loss_class(
use_sigmoid=False, activate=True, naive_dice=naive_dice)(pred,
target)
# Test loss forward with weight.ndim != loss.ndim
with pytest.raises(AssertionError):
weight = torch.rand((2, 8))
loss_class(naive_dice=naive_dice)(pred, target, weight)
# Test loss forward with len(weight) != len(pred)
with pytest.raises(AssertionError):
weight = torch.rand((8))
loss_class(naive_dice=naive_dice)(pred, target, weight)
| 10,928
| 36.428082
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_losses/test_gaussian_focal_loss.py
|
import unittest
import torch
from mmdet.models.losses import GaussianFocalLoss
class TestGaussianFocalLoss(unittest.TestCase):
def test_forward(self):
pred = torch.rand((10, 4))
target = torch.rand((10, 4))
gaussian_focal_loss = GaussianFocalLoss()
loss1 = gaussian_focal_loss(pred, target)
self.assertIsInstance(loss1, torch.Tensor)
loss2 = gaussian_focal_loss(pred, target, avg_factor=0.5)
self.assertIsInstance(loss2, torch.Tensor)
# test reduction
gaussian_focal_loss = GaussianFocalLoss(reduction='none')
loss = gaussian_focal_loss(pred, target)
self.assertTrue(loss.shape == (10, 4))
# test reduction_override
loss = gaussian_focal_loss(pred, target, reduction_override='mean')
self.assertTrue(loss.ndim == 0)
# Only supports None, 'none', 'mean', 'sum'
with self.assertRaises(AssertionError):
gaussian_focal_loss(pred, target, reduction_override='max')
# test pos_inds
pos_inds = (torch.rand(5) * 8).long()
pos_labels = (torch.rand(5) * 2).long()
gaussian_focal_loss = GaussianFocalLoss()
loss = gaussian_focal_loss(pred, target, pos_inds, pos_labels)
self.assertIsInstance(loss, torch.Tensor)
| 1,303
| 32.435897
| 75
|
py
|
ERD
|
ERD-main/tests/test_models/test_data_preprocessors/test_data_preprocessor.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.logging import MessageHub
from mmdet.models.data_preprocessors import (BatchFixedSizePad,
BatchSyncRandomResize,
DetDataPreprocessor,
MultiBranchDataPreprocessor)
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs
class TestDetDataPreprocessor(TestCase):
def test_init(self):
# test mean is None
processor = DetDataPreprocessor()
self.assertTrue(not hasattr(processor, 'mean'))
self.assertTrue(processor._enable_normalize is False)
# test mean is not None
processor = DetDataPreprocessor(mean=[0, 0, 0], std=[1, 1, 1])
self.assertTrue(hasattr(processor, 'mean'))
self.assertTrue(hasattr(processor, 'std'))
self.assertTrue(processor._enable_normalize)
# please specify both mean and std
with self.assertRaises(AssertionError):
DetDataPreprocessor(mean=[0, 0, 0])
# bgr2rgb and rgb2bgr cannot be set to True at the same time
with self.assertRaises(AssertionError):
DetDataPreprocessor(bgr_to_rgb=True, rgb_to_bgr=True)
def test_forward(self):
processor = DetDataPreprocessor(mean=[0, 0, 0], std=[1, 1, 1])
data = {
'inputs': [torch.randint(0, 256, (3, 11, 10))],
'data_samples': [DetDataSample()]
}
out_data = processor(data)
batch_inputs, batch_data_samples = out_data['inputs'], out_data[
'data_samples']
self.assertEqual(batch_inputs.shape, (1, 3, 11, 10))
self.assertEqual(len(batch_data_samples), 1)
# test channel_conversion
processor = DetDataPreprocessor(
mean=[0., 0., 0.], std=[1., 1., 1.], bgr_to_rgb=True)
out_data = processor(data)
batch_inputs, batch_data_samples = out_data['inputs'], out_data[
'data_samples']
self.assertEqual(batch_inputs.shape, (1, 3, 11, 10))
self.assertEqual(len(batch_data_samples), 1)
# test padding
data = {
'inputs': [
torch.randint(0, 256, (3, 10, 11)),
torch.randint(0, 256, (3, 9, 14))
]
}
processor = DetDataPreprocessor(
mean=[0., 0., 0.], std=[1., 1., 1.], bgr_to_rgb=True)
out_data = processor(data)
batch_inputs, batch_data_samples = out_data['inputs'], out_data[
'data_samples']
self.assertEqual(batch_inputs.shape, (2, 3, 10, 14))
self.assertIsNone(batch_data_samples)
# test pad_size_divisor
data = {
'inputs': [
torch.randint(0, 256, (3, 10, 11)),
torch.randint(0, 256, (3, 9, 24))
],
'data_samples': [DetDataSample()] * 2
}
processor = DetDataPreprocessor(
mean=[0., 0., 0.], std=[1., 1., 1.], pad_size_divisor=5)
out_data = processor(data)
batch_inputs, batch_data_samples = out_data['inputs'], out_data[
'data_samples']
self.assertEqual(batch_inputs.shape, (2, 3, 10, 25))
self.assertEqual(len(batch_data_samples), 2)
for data_samples, expected_shape in zip(batch_data_samples,
[(10, 15), (10, 25)]):
self.assertEqual(data_samples.pad_shape, expected_shape)
# test pad_mask=True and pad_seg=True
processor = DetDataPreprocessor(
pad_mask=True, mask_pad_value=0, pad_seg=True, seg_pad_value=0)
packed_inputs = demo_mm_inputs(
2, [[3, 10, 11], [3, 9, 24]],
with_mask=True,
with_semantic=True,
use_box_type=True)
packed_inputs['data_samples'][0].gt_sem_seg.sem_seg = torch.randint(
0, 256, (1, 10, 11))
packed_inputs['data_samples'][1].gt_sem_seg.sem_seg = torch.randint(
0, 256, (1, 9, 24))
mask_pad_sums = [
x.gt_instances.masks.masks.sum()
for x in packed_inputs['data_samples']
]
seg_pad_sums = [
x.gt_sem_seg.sem_seg.sum() for x in packed_inputs['data_samples']
]
batch_data_samples = processor(
packed_inputs, training=True)['data_samples']
for data_samples, expected_shape, mask_pad_sum, seg_pad_sum in zip(
batch_data_samples, [(10, 24), (10, 24)], mask_pad_sums,
seg_pad_sums):
self.assertEqual(data_samples.gt_instances.masks.masks.shape[-2:],
expected_shape)
self.assertEqual(data_samples.gt_sem_seg.sem_seg.shape[-2:],
expected_shape)
self.assertEqual(data_samples.gt_instances.masks.masks.sum(),
mask_pad_sum)
self.assertEqual(data_samples.gt_sem_seg.sem_seg.sum(),
seg_pad_sum)
def test_batch_sync_random_resize(self):
processor = DetDataPreprocessor(batch_augments=[
dict(
type='BatchSyncRandomResize',
random_size_range=(320, 320),
size_divisor=32,
interval=1)
])
self.assertTrue(
isinstance(processor.batch_augments[0], BatchSyncRandomResize))
message_hub = MessageHub.get_instance('test_batch_sync_random_resize')
message_hub.update_info('iter', 0)
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 128, 128]], use_box_type=True)
batch_inputs = processor(packed_inputs, training=True)['inputs']
self.assertEqual(batch_inputs.shape, (2, 3, 128, 128))
# resize after one iter
message_hub.update_info('iter', 1)
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 128, 128]], use_box_type=True)
batch_inputs = processor(packed_inputs, training=True)['inputs']
self.assertEqual(batch_inputs.shape, (2, 3, 320, 320))
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 128, 128]], use_box_type=True)
batch_inputs = processor(packed_inputs, training=False)['inputs']
self.assertEqual(batch_inputs.shape, (2, 3, 128, 128))
def test_batch_fixed_size_pad(self):
# test pad_mask=False and pad_seg=False
processor = DetDataPreprocessor(
pad_mask=False,
pad_seg=False,
batch_augments=[
dict(
type='BatchFixedSizePad',
size=(32, 32),
img_pad_value=0,
pad_mask=True,
mask_pad_value=0,
pad_seg=True,
seg_pad_value=0)
])
self.assertTrue(
isinstance(processor.batch_augments[0], BatchFixedSizePad))
packed_inputs = demo_mm_inputs(
2, [[3, 10, 11], [3, 9, 24]],
with_mask=True,
with_semantic=True,
use_box_type=True)
packed_inputs['data_samples'][0].gt_sem_seg.sem_seg = torch.randint(
0, 256, (1, 10, 11))
packed_inputs['data_samples'][1].gt_sem_seg.sem_seg = torch.randint(
0, 256, (1, 9, 24))
mask_pad_sums = [
x.gt_instances.masks.masks.sum()
for x in packed_inputs['data_samples']
]
seg_pad_sums = [
x.gt_sem_seg.sem_seg.sum() for x in packed_inputs['data_samples']
]
data = processor(packed_inputs, training=True)
batch_inputs, batch_data_samples = data['inputs'], data['data_samples']
self.assertEqual(batch_inputs.shape[-2:], (32, 32))
for data_samples, expected_shape, mask_pad_sum, seg_pad_sum in zip(
batch_data_samples, [(32, 32), (32, 32)], mask_pad_sums,
seg_pad_sums):
self.assertEqual(data_samples.gt_instances.masks.masks.shape[-2:],
expected_shape)
self.assertEqual(data_samples.gt_sem_seg.sem_seg.shape[-2:],
expected_shape)
self.assertEqual(data_samples.gt_instances.masks.masks.sum(),
mask_pad_sum)
self.assertEqual(data_samples.gt_sem_seg.sem_seg.sum(),
seg_pad_sum)
# test pad_mask=True and pad_seg=True
processor = DetDataPreprocessor(
pad_mask=True,
pad_seg=True,
seg_pad_value=0,
mask_pad_value=0,
batch_augments=[
dict(
type='BatchFixedSizePad',
size=(32, 32),
img_pad_value=0,
pad_mask=True,
mask_pad_value=0,
pad_seg=True,
seg_pad_value=0)
])
self.assertTrue(
isinstance(processor.batch_augments[0], BatchFixedSizePad))
packed_inputs = demo_mm_inputs(
2, [[3, 10, 11], [3, 9, 24]],
with_mask=True,
with_semantic=True,
use_box_type=True)
packed_inputs['data_samples'][0].gt_sem_seg.sem_seg = torch.randint(
0, 256, (1, 10, 11))
packed_inputs['data_samples'][1].gt_sem_seg.sem_seg = torch.randint(
0, 256, (1, 9, 24))
mask_pad_sums = [
x.gt_instances.masks.masks.sum()
for x in packed_inputs['data_samples']
]
seg_pad_sums = [
x.gt_sem_seg.sem_seg.sum() for x in packed_inputs['data_samples']
]
data = processor(packed_inputs, training=True)
batch_inputs, batch_data_samples = data['inputs'], data['data_samples']
self.assertEqual(batch_inputs.shape[-2:], (32, 32))
for data_samples, expected_shape, mask_pad_sum, seg_pad_sum in zip(
batch_data_samples, [(32, 32), (32, 32)], mask_pad_sums,
seg_pad_sums):
self.assertEqual(data_samples.gt_instances.masks.masks.shape[-2:],
expected_shape)
self.assertEqual(data_samples.gt_sem_seg.sem_seg.shape[-2:],
expected_shape)
self.assertEqual(data_samples.gt_instances.masks.masks.sum(),
mask_pad_sum)
self.assertEqual(data_samples.gt_sem_seg.sem_seg.sum(),
seg_pad_sum)
# test negative pad/no pad
processor = DetDataPreprocessor(
pad_mask=True,
pad_seg=True,
seg_pad_value=0,
mask_pad_value=0,
batch_augments=[
dict(
type='BatchFixedSizePad',
size=(5, 5),
img_pad_value=0,
pad_mask=True,
mask_pad_value=1,
pad_seg=True,
seg_pad_value=1)
])
self.assertTrue(
isinstance(processor.batch_augments[0], BatchFixedSizePad))
packed_inputs = demo_mm_inputs(
2, [[3, 10, 11], [3, 9, 24]],
with_mask=True,
with_semantic=True,
use_box_type=True)
packed_inputs['data_samples'][0].gt_sem_seg.sem_seg = torch.randint(
0, 256, (1, 10, 11))
packed_inputs['data_samples'][1].gt_sem_seg.sem_seg = torch.randint(
0, 256, (1, 9, 24))
mask_pad_sums = [
x.gt_instances.masks.masks.sum()
for x in packed_inputs['data_samples']
]
seg_pad_sums = [
x.gt_sem_seg.sem_seg.sum() for x in packed_inputs['data_samples']
]
data = processor(packed_inputs, training=True)
batch_inputs, batch_data_samples = data['inputs'], data['data_samples']
self.assertEqual(batch_inputs.shape[-2:], (10, 24))
for data_samples, expected_shape, mask_pad_sum, seg_pad_sum in zip(
batch_data_samples, [(10, 24), (10, 24)], mask_pad_sums,
seg_pad_sums):
self.assertEqual(data_samples.gt_instances.masks.masks.shape[-2:],
expected_shape)
self.assertEqual(data_samples.gt_sem_seg.sem_seg.shape[-2:],
expected_shape)
self.assertEqual(data_samples.gt_instances.masks.masks.sum(),
mask_pad_sum)
self.assertEqual(data_samples.gt_sem_seg.sem_seg.sum(),
seg_pad_sum)
class TestMultiBranchDataPreprocessor(TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32)
self.multi_data = {
'inputs': {
'sup': [torch.randint(0, 256, (3, 224, 224))],
'unsup_teacher': [
torch.randint(0, 256, (3, 400, 600)),
torch.randint(0, 256, (3, 600, 400))
],
'unsup_student': [
torch.randint(0, 256, (3, 700, 500)),
torch.randint(0, 256, (3, 500, 700))
]
},
'data_samples': {
'sup': [DetDataSample()],
'unsup_teacher': [DetDataSample(),
DetDataSample()],
'unsup_student': [DetDataSample(),
DetDataSample()],
}
}
self.data = {
'inputs': [torch.randint(0, 256, (3, 224, 224))],
'data_samples': [DetDataSample()]
}
def test_multi_data_preprocessor(self):
processor = MultiBranchDataPreprocessor(self.data_preprocessor)
# test processing multi_data when training
multi_data = processor(self.multi_data, training=True)
self.assertEqual(multi_data['inputs']['sup'].shape, (1, 3, 224, 224))
self.assertEqual(multi_data['inputs']['unsup_teacher'].shape,
(2, 3, 608, 608))
self.assertEqual(multi_data['inputs']['unsup_student'].shape,
(2, 3, 704, 704))
self.assertEqual(len(multi_data['data_samples']['sup']), 1)
self.assertEqual(len(multi_data['data_samples']['unsup_teacher']), 2)
self.assertEqual(len(multi_data['data_samples']['unsup_student']), 2)
# test processing data when testing
data = processor(self.data)
self.assertEqual(data['inputs'].shape, (1, 3, 224, 224))
self.assertEqual(len(data['data_samples']), 1)
| 15,083
| 41.134078
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_data_preprocessors/test_batch_resize.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from mmdet.models.data_preprocessors import BatchResize, DetDataPreprocessor
from mmdet.testing import demo_mm_inputs
class TestDetDataPreprocessor(TestCase):
def test_batch_resize(self):
processor = DetDataPreprocessor(
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False,
batch_augments=[
dict(type='BatchResize', scale=(32, 32), pad_size_divisor=32)
])
self.assertTrue(isinstance(processor.batch_augments[0], BatchResize))
packed_inputs = demo_mm_inputs(
2, [[3, 10, 11], [3, 9, 24]], use_box_type=True)
data = processor(packed_inputs, training=True)
batch_inputs, batch_data_samples = data['inputs'], data['data_samples']
self.assertEqual(batch_inputs.shape[-2:], (32, 32))
self.assertEqual(batch_data_samples[0].scale_factor,
batch_data_samples[1].scale_factor)
| 1,050
| 36.535714
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_data_preprocessors/test_boxinst_preprocessor.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmdet.models.data_preprocessors import BoxInstDataPreprocessor
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs
class TestBoxInstDataPreprocessor(TestCase):
def test_forward(self):
processor = BoxInstDataPreprocessor(mean=[0, 0, 0], std=[1, 1, 1])
data = {
'inputs': [torch.randint(0, 256, (3, 256, 256))],
'data_samples': [DetDataSample()]
}
# Test evaluation mode
out_data = processor(data)
batch_inputs, batch_data_samples = out_data['inputs'], out_data[
'data_samples']
self.assertEqual(batch_inputs.shape, (1, 3, 256, 256))
self.assertEqual(len(batch_data_samples), 1)
# Test traning mode without gt bboxes
packed_inputs = demo_mm_inputs(
2, [[3, 256, 256], [3, 128, 128]], num_items=[0, 0])
out_data = processor(packed_inputs, training=True)
batch_inputs, batch_data_samples = out_data['inputs'], out_data[
'data_samples']
self.assertEqual(batch_inputs.shape, (2, 3, 256, 256))
self.assertEqual(len(batch_data_samples), 2)
self.assertEqual(len(batch_data_samples[0].gt_instances.masks), 0)
self.assertEqual(
len(batch_data_samples[0].gt_instances.pairwise_masks), 0)
self.assertEqual(len(batch_data_samples[1].gt_instances.masks), 0)
self.assertEqual(
len(batch_data_samples[1].gt_instances.pairwise_masks), 0)
# Test traning mode with gt bboxes
packed_inputs = demo_mm_inputs(
2, [[3, 256, 256], [3, 128, 128]], num_items=[2, 1])
out_data = processor(packed_inputs, training=True)
batch_inputs, batch_data_samples = out_data['inputs'], out_data[
'data_samples']
self.assertEqual(batch_inputs.shape, (2, 3, 256, 256))
self.assertEqual(len(batch_data_samples), 2)
self.assertEqual(len(batch_data_samples[0].gt_instances.masks), 2)
self.assertEqual(
len(batch_data_samples[0].gt_instances.pairwise_masks), 2)
self.assertEqual(len(batch_data_samples[1].gt_instances.masks), 1)
self.assertEqual(
len(batch_data_samples[1].gt_instances.pairwise_masks), 1)
| 2,360
| 38.35
| 74
|
py
|
ERD
|
ERD-main/tests/test_models/test_utils/test_model_misc.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from torch.autograd import gradcheck
from mmdet.models.utils import interpolate_as, sigmoid_geometric_mean
def test_interpolate_as():
source = torch.rand((1, 5, 4, 4))
target = torch.rand((1, 1, 16, 16))
# Test 4D source and target
result = interpolate_as(source, target)
assert result.shape == torch.Size((1, 5, 16, 16))
# Test 3D target
result = interpolate_as(source, target.squeeze(0))
assert result.shape == torch.Size((1, 5, 16, 16))
# Test 3D source
result = interpolate_as(source.squeeze(0), target)
assert result.shape == torch.Size((5, 16, 16))
# Test type(target) == np.ndarray
target = np.random.rand(16, 16)
result = interpolate_as(source.squeeze(0), target)
assert result.shape == torch.Size((5, 16, 16))
def test_sigmoid_geometric_mean():
x = torch.randn(20, 20, dtype=torch.double, requires_grad=True)
y = torch.randn(20, 20, dtype=torch.double, requires_grad=True)
inputs = (x, y)
test = gradcheck(sigmoid_geometric_mean, inputs, eps=1e-6, atol=1e-4)
assert test
| 1,149
| 30.081081
| 73
|
py
|
ERD
|
ERD-main/tests/test_models/test_utils/test_misc.py
|
import copy
import pytest
import torch
from mmengine.structures import InstanceData
from mmdet.models.utils import (empty_instances, filter_gt_instances,
rename_loss_dict, reweight_loss_dict,
unpack_gt_instances)
from mmdet.testing import demo_mm_inputs
def test_parse_gt_instance_info():
packed_inputs = demo_mm_inputs()['data_samples']
batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \
= unpack_gt_instances(packed_inputs)
assert len(batch_gt_instances) == len(packed_inputs)
assert len(batch_gt_instances_ignore) == len(packed_inputs)
assert len(batch_img_metas) == len(packed_inputs)
def test_process_empty_roi():
batch_size = 2
batch_img_metas = [{'ori_shape': (10, 12)}] * batch_size
device = torch.device('cpu')
results_list = empty_instances(batch_img_metas, device, task_type='bbox')
assert len(results_list) == batch_size
for results in results_list:
assert isinstance(results, InstanceData)
assert len(results) == 0
assert torch.allclose(results.bboxes, torch.zeros(0, 4, device=device))
results_list = empty_instances(
batch_img_metas,
device,
task_type='mask',
instance_results=results_list,
mask_thr_binary=0.5)
assert len(results_list) == batch_size
for results in results_list:
assert isinstance(results, InstanceData)
assert len(results) == 0
assert results.masks.shape == (0, 10, 12)
# batch_img_metas and instance_results length must be the same
with pytest.raises(AssertionError):
empty_instances(
batch_img_metas,
device,
task_type='mask',
instance_results=[results_list[0]] * 3)
def test_filter_gt_instances():
packed_inputs = demo_mm_inputs()['data_samples']
score_thr = 0.7
with pytest.raises(AssertionError):
filter_gt_instances(packed_inputs, score_thr=score_thr)
# filter no instances by score
for inputs in packed_inputs:
inputs.gt_instances.scores = torch.ones_like(
inputs.gt_instances.labels).float()
filtered_packed_inputs = filter_gt_instances(
copy.deepcopy(packed_inputs), score_thr=score_thr)
for filtered_inputs, inputs in zip(filtered_packed_inputs, packed_inputs):
assert len(filtered_inputs.gt_instances) == len(inputs.gt_instances)
# filter all instances
for inputs in packed_inputs:
inputs.gt_instances.scores = torch.zeros_like(
inputs.gt_instances.labels).float()
filtered_packed_inputs = filter_gt_instances(
copy.deepcopy(packed_inputs), score_thr=score_thr)
for filtered_inputs in filtered_packed_inputs:
assert len(filtered_inputs.gt_instances) == 0
packed_inputs = demo_mm_inputs()['data_samples']
# filter no instances by size
wh_thr = (0, 0)
filtered_packed_inputs = filter_gt_instances(
copy.deepcopy(packed_inputs), wh_thr=wh_thr)
for filtered_inputs, inputs in zip(filtered_packed_inputs, packed_inputs):
assert len(filtered_inputs.gt_instances) == len(inputs.gt_instances)
# filter all instances by size
for inputs in packed_inputs:
img_shape = inputs.img_shape
wh_thr = (max(wh_thr[0], img_shape[0]), max(wh_thr[1], img_shape[1]))
filtered_packed_inputs = filter_gt_instances(
copy.deepcopy(packed_inputs), wh_thr=wh_thr)
for filtered_inputs in filtered_packed_inputs:
assert len(filtered_inputs.gt_instances) == 0
def test_rename_loss_dict():
prefix = 'sup_'
losses = {'cls_loss': torch.tensor(2.), 'reg_loss': torch.tensor(1.)}
sup_losses = rename_loss_dict(prefix, losses)
for name in losses.keys():
assert sup_losses[prefix + name] == losses[name]
def test_reweight_loss_dict():
weight = 4
losses = {'cls_loss': torch.tensor(2.), 'reg_loss': torch.tensor(1.)}
weighted_losses = reweight_loss_dict(copy.deepcopy(losses), weight)
for name in losses.keys():
assert weighted_losses[name] == losses[name] * weight
| 4,139
| 36.297297
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_tta/test_det_tta.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import ConfigDict
from mmdet.models import DetTTAModel
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class TestDetTTAModel(TestCase):
def setUp(self):
register_all_modules()
def test_det_tta_model(self):
detector_cfg = get_detector_cfg(
'retinanet/retinanet_r18_fpn_1x_coco.py')
cfg = ConfigDict(
type='DetTTAModel',
module=detector_cfg,
tta_cfg=dict(
nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
model: DetTTAModel = MODELS.build(cfg)
imgs = []
data_samples = []
directions = ['horizontal', 'vertical']
for i in range(12):
flip_direction = directions[0] if i % 3 == 0 else directions[1]
imgs.append(torch.randn(1, 3, 100 + 10 * i, 100 + 10 * i))
data_samples.append([
DetDataSample(
metainfo=dict(
ori_shape=(100, 100),
img_shape=(100 + 10 * i, 100 + 10 * i),
scale_factor=((100 + 10 * i) / 100,
(100 + 10 * i) / 100),
flip=(i % 2 == 0),
flip_direction=flip_direction), )
])
model.test_step(dict(inputs=imgs, data_samples=data_samples))
| 1,572
| 31.102041
| 75
|
py
|
ERD
|
ERD-main/tests/test_models/test_necks/test_ct_resnet_neck.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from mmdet.models.necks import CTResNetNeck
class TestCTResNetNeck(unittest.TestCase):
def test_init(self):
# num_filters/num_kernels must be same length
with self.assertRaises(AssertionError):
CTResNetNeck(
in_channels=10,
num_deconv_filters=(10, 10),
num_deconv_kernels=(4, ))
ct_resnet_neck = CTResNetNeck(
in_channels=16,
num_deconv_filters=(8, 8),
num_deconv_kernels=(4, 4),
use_dcn=False)
ct_resnet_neck.init_weights()
def test_forward(self):
in_channels = 16
num_filters = (8, 8)
num_kernels = (4, 4)
feat = torch.rand(1, 16, 4, 4)
ct_resnet_neck = CTResNetNeck(
in_channels=in_channels,
num_deconv_filters=num_filters,
num_deconv_kernels=num_kernels,
use_dcn=False)
# feat must be list or tuple
with self.assertRaises(AssertionError):
ct_resnet_neck(feat)
out_feat = ct_resnet_neck([feat])[0]
self.assertEqual(out_feat.shape, (1, num_filters[-1], 16, 16))
if torch.cuda.is_available():
# test dcn
ct_resnet_neck = CTResNetNeck(
in_channels=in_channels,
num_deconv_filters=num_filters,
num_deconv_kernels=num_kernels)
ct_resnet_neck = ct_resnet_neck.cuda()
feat = feat.cuda()
out_feat = ct_resnet_neck([feat])[0]
self.assertEqual(out_feat.shape, (1, num_filters[-1], 16, 16))
| 1,678
| 30.092593
| 74
|
py
|
ERD
|
ERD-main/tests/test_models/test_necks/test_necks.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.necks import (FPG, FPN, FPN_CARAFE, NASFCOS_FPN, NASFPN, SSH,
YOLOXPAFPN, ChannelMapper, DilatedEncoder,
DyHead, SSDNeck, YOLOV3Neck)
def test_fpn():
"""Tests fpn."""
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
out_channels = 8
# end_level=-1 is equal to end_level=3
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=0,
end_level=-1,
num_outs=5)
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=0,
end_level=3,
num_outs=5)
# `num_outs` is not equal to end_level - start_level + 1
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
end_level=2,
num_outs=3)
# `num_outs` is not equal to len(in_channels) - start_level
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
num_outs=2)
# `end_level` is larger than len(in_channels) - 1
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
end_level=4,
num_outs=2)
# `num_outs` is not equal to end_level - start_level
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
end_level=3,
num_outs=1)
# Invalid `add_extra_convs` option
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs='on_xxx',
num_outs=5)
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
num_outs=5)
# FPN expects a multiple levels of features per image
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
outs = fpn_model(feats)
assert fpn_model.add_extra_convs == 'on_input'
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Tests for fpn with no extra convs (pooling is used instead)
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=False,
num_outs=5)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
assert not fpn_model.add_extra_convs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Tests for fpn with lateral bns
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
no_norm_on_lateral=False,
norm_cfg=dict(type='BN', requires_grad=True),
num_outs=5)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
assert fpn_model.add_extra_convs == 'on_input'
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
bn_exist = False
for m in fpn_model.modules():
if isinstance(m, _BatchNorm):
bn_exist = True
assert bn_exist
# Bilinear upsample
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
upsample_cfg=dict(mode='bilinear', align_corners=True),
num_outs=5)
fpn_model(feats)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
assert fpn_model.add_extra_convs == 'on_input'
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Scale factor instead of fixed upsample size upsample
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
upsample_cfg=dict(scale_factor=2),
num_outs=5)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Extra convs source is 'inputs'
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_input',
start_level=1,
num_outs=5)
assert fpn_model.add_extra_convs == 'on_input'
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Extra convs source is 'laterals'
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_lateral',
start_level=1,
num_outs=5)
assert fpn_model.add_extra_convs == 'on_lateral'
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Extra convs source is 'outputs'
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_output',
start_level=1,
num_outs=5)
assert fpn_model.add_extra_convs == 'on_output'
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
def test_channel_mapper():
"""Tests ChannelMapper."""
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
out_channels = 8
kernel_size = 3
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
# in_channels must be a list
with pytest.raises(AssertionError):
channel_mapper = ChannelMapper(
in_channels=10, out_channels=out_channels, kernel_size=kernel_size)
# the length of channel_mapper's inputs must be equal to the length of
# in_channels
with pytest.raises(AssertionError):
channel_mapper = ChannelMapper(
in_channels=in_channels[:-1],
out_channels=out_channels,
kernel_size=kernel_size)
channel_mapper(feats)
channel_mapper = ChannelMapper(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size)
outs = channel_mapper(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
def test_dilated_encoder():
in_channels = 16
out_channels = 32
out_shape = 34
dilated_encoder = DilatedEncoder(in_channels, out_channels, 16, 2,
[2, 4, 6, 8])
feat = [torch.rand(1, in_channels, 34, 34)]
out_feat = dilated_encoder(feat)[0]
assert out_feat.shape == (1, out_channels, out_shape, out_shape)
def test_yolov3_neck():
# num_scales, in_channels, out_channels must be same length
with pytest.raises(AssertionError):
YOLOV3Neck(num_scales=3, in_channels=[16, 8, 4], out_channels=[8, 4])
# len(feats) must equal to num_scales
with pytest.raises(AssertionError):
neck = YOLOV3Neck(
num_scales=3, in_channels=[16, 8, 4], out_channels=[8, 4, 2])
feats = (torch.rand(1, 4, 16, 16), torch.rand(1, 8, 16, 16))
neck(feats)
# test normal channels
s = 32
in_channels = [16, 8, 4]
out_channels = [8, 4, 2]
feat_sizes = [s // 2**i for i in range(len(in_channels) - 1, -1, -1)]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels) - 1, -1, -1)
]
neck = YOLOV3Neck(
num_scales=3, in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(outs)):
assert outs[i].shape == \
(1, out_channels[i], feat_sizes[i], feat_sizes[i])
# test more flexible setting
s = 32
in_channels = [32, 8, 16]
out_channels = [19, 21, 5]
feat_sizes = [s // 2**i for i in range(len(in_channels) - 1, -1, -1)]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels) - 1, -1, -1)
]
neck = YOLOV3Neck(
num_scales=3, in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(outs)):
assert outs[i].shape == \
(1, out_channels[i], feat_sizes[i], feat_sizes[i])
def test_ssd_neck():
# level_strides/level_paddings must be same length
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[8, 16, 32],
level_strides=[2],
level_paddings=[2, 1])
# length of out_channels must larger than in_channels
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[8],
level_strides=[2],
level_paddings=[2])
# len(out_channels) - len(in_channels) must equal to len(level_strides)
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[4, 16, 64],
level_strides=[2, 2],
level_paddings=[2, 2])
# in_channels must be same with out_channels[:len(in_channels)]
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[4, 16, 64],
level_strides=[2],
level_paddings=[2])
ssd_neck = SSDNeck(
in_channels=[4],
out_channels=[4, 8, 16],
level_strides=[2, 1],
level_paddings=[1, 0])
feats = (torch.rand(1, 4, 16, 16), )
outs = ssd_neck(feats)
assert outs[0].shape == (1, 4, 16, 16)
assert outs[1].shape == (1, 8, 8, 8)
assert outs[2].shape == (1, 16, 6, 6)
# test SSD-Lite Neck
ssd_neck = SSDNeck(
in_channels=[4, 8],
out_channels=[4, 8, 16],
level_strides=[1],
level_paddings=[1],
l2_norm_scale=None,
use_depthwise=True,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'))
assert not hasattr(ssd_neck, 'l2_norm')
from mmcv.cnn.bricks import DepthwiseSeparableConvModule
assert isinstance(ssd_neck.extra_layers[0][-1],
DepthwiseSeparableConvModule)
feats = (torch.rand(1, 4, 8, 8), torch.rand(1, 8, 8, 8))
outs = ssd_neck(feats)
assert outs[0].shape == (1, 4, 8, 8)
assert outs[1].shape == (1, 8, 8, 8)
assert outs[2].shape == (1, 16, 8, 8)
def test_yolox_pafpn():
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
out_channels = 24
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = YOLOXPAFPN(in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# test depth-wise
neck = YOLOXPAFPN(
in_channels=in_channels, out_channels=out_channels, use_depthwise=True)
from mmcv.cnn.bricks import DepthwiseSeparableConvModule
assert isinstance(neck.downsamples[0], DepthwiseSeparableConvModule)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
def test_dyhead():
s = 64
in_channels = 8
out_channels = 16
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
feats = [
torch.rand(1, in_channels, feat_sizes[i], feat_sizes[i])
for i in range(len(feat_sizes))
]
neck = DyHead(
in_channels=in_channels, out_channels=out_channels, num_blocks=3)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(outs)):
assert outs[i].shape[1] == out_channels
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
feat = torch.rand(1, 8, 4, 4)
# input feat must be tuple or list
with pytest.raises(AssertionError):
neck(feat)
def test_fpg():
# end_level=-1 is equal to end_level=3
norm_cfg = dict(type='BN', requires_grad=True)
FPG(in_channels=[8, 16, 32, 64],
out_channels=8,
inter_channels=8,
num_outs=5,
add_extra_convs=True,
start_level=1,
end_level=-1,
stack_times=9,
paths=['bu'] * 9,
same_down_trans=None,
same_up_trans=dict(
type='conv',
kernel_size=3,
stride=2,
padding=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
across_lateral_trans=dict(
type='conv',
kernel_size=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
across_down_trans=dict(
type='interpolation_conv',
mode='nearest',
kernel_size=3,
norm_cfg=norm_cfg,
order=('act', 'conv', 'norm'),
inplace=False),
across_up_trans=None,
across_skip_trans=dict(
type='conv',
kernel_size=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
output_trans=dict(
type='last_conv',
kernel_size=3,
order=('act', 'conv', 'norm'),
inplace=False),
norm_cfg=norm_cfg,
skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])
FPG(in_channels=[8, 16, 32, 64],
out_channels=8,
inter_channels=8,
num_outs=5,
add_extra_convs=True,
start_level=1,
end_level=3,
stack_times=9,
paths=['bu'] * 9,
same_down_trans=None,
same_up_trans=dict(
type='conv',
kernel_size=3,
stride=2,
padding=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
across_lateral_trans=dict(
type='conv',
kernel_size=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
across_down_trans=dict(
type='interpolation_conv',
mode='nearest',
kernel_size=3,
norm_cfg=norm_cfg,
order=('act', 'conv', 'norm'),
inplace=False),
across_up_trans=None,
across_skip_trans=dict(
type='conv',
kernel_size=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
output_trans=dict(
type='last_conv',
kernel_size=3,
order=('act', 'conv', 'norm'),
inplace=False),
norm_cfg=norm_cfg,
skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])
# `end_level` is larger than len(in_channels) - 1
with pytest.raises(AssertionError):
FPG(in_channels=[8, 16, 32, 64],
out_channels=8,
stack_times=9,
paths=['bu'] * 9,
start_level=1,
end_level=4,
num_outs=2,
skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])
# `num_outs` is not equal to end_level - start_level + 1
with pytest.raises(AssertionError):
FPG(in_channels=[8, 16, 32, 64],
out_channels=8,
stack_times=9,
paths=['bu'] * 9,
start_level=1,
end_level=2,
num_outs=3,
skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])
def test_fpn_carafe():
# end_level=-1 is equal to end_level=3
FPN_CARAFE(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=0,
end_level=3,
num_outs=4)
FPN_CARAFE(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=0,
end_level=-1,
num_outs=4)
# `end_level` is larger than len(in_channels) - 1
with pytest.raises(AssertionError):
FPN_CARAFE(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=1,
end_level=4,
num_outs=2)
# `num_outs` is not equal to end_level - start_level + 1
with pytest.raises(AssertionError):
FPN_CARAFE(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=1,
end_level=2,
num_outs=3)
def test_nas_fpn():
# end_level=-1 is equal to end_level=3
NASFPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
stack_times=9,
start_level=0,
end_level=3,
num_outs=4)
NASFPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
stack_times=9,
start_level=0,
end_level=-1,
num_outs=4)
# `end_level` is larger than len(in_channels) - 1
with pytest.raises(AssertionError):
NASFPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
stack_times=9,
start_level=1,
end_level=4,
num_outs=2)
# `num_outs` is not equal to end_level - start_level + 1
with pytest.raises(AssertionError):
NASFPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
stack_times=9,
start_level=1,
end_level=2,
num_outs=3)
def test_nasfcos_fpn():
# end_level=-1 is equal to end_level=3
NASFCOS_FPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=0,
end_level=3,
num_outs=4)
NASFCOS_FPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=0,
end_level=-1,
num_outs=4)
# `end_level` is larger than len(in_channels) - 1
with pytest.raises(AssertionError):
NASFCOS_FPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=1,
end_level=4,
num_outs=2)
# `num_outs` is not equal to end_level - start_level + 1
with pytest.raises(AssertionError):
NASFCOS_FPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=1,
end_level=2,
num_outs=3)
def test_ssh_neck():
"""Tests ssh."""
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
out_channels = [16, 32, 64, 128]
ssh_model = SSH(
num_scales=4, in_channels=in_channels, out_channels=out_channels)
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
outs = ssh_model(feats)
assert len(outs) == len(feats)
for i in range(len(outs)):
assert outs[i].shape == \
(1, out_channels[i], feat_sizes[i], feat_sizes[i])
| 20,260
| 30.075153
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_reppoints_head.py
|
import unittest
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from parameterized import parameterized
from mmdet.models.dense_heads import RepPointsHead
from mmdet.structures import DetDataSample
class TestRepPointsHead(unittest.TestCase):
@parameterized.expand(['moment', 'minmax', 'partial_minmax'])
def test_head_loss(self, transform_method='moment'):
cfg = ConfigDict(
dict(
num_classes=2,
in_channels=32,
point_feat_channels=10,
num_points=9,
gradient_mul=0.1,
point_strides=[8, 16, 32, 64, 128],
point_base_scale=4,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_init=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),
loss_bbox_refine=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
use_grid_points=False,
center_init=True,
transform_method=transform_method,
moment_mul=0.01,
init_cfg=dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='reppoints_cls_out',
std=0.01,
bias_prob=0.01)),
train_cfg=dict(
init=dict(
assigner=dict(
type='PointAssigner', scale=4, pos_num=1),
allowed_border=-1,
pos_weight=-1,
debug=False),
refine=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
reppoints_head = RepPointsHead(**cfg)
s = 256
img_metas = [{
'img_shape': (s, s),
'scale_factor': (1, 1),
'pad_shape': (s, s),
'batch_input_shape': (s, s)
}]
x = [
torch.rand(1, 32, s // 2**(i + 2), s // 2**(i + 2))
for i in range(5)
]
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
gt_bboxes_ignore = None
reppoints_head.train()
forward_outputs = reppoints_head.forward(x)
empty_gt_losses = reppoints_head.loss_by_feat(*forward_outputs,
[gt_instances],
img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no pts loss.
for key, losses in empty_gt_losses.items():
for loss in losses:
if 'cls' in key:
self.assertGreater(loss.item(), 0,
'cls loss should be non-zero')
elif 'pts' in key:
self.assertEqual(
loss.item(), 0,
'there should be no reg loss when no ground true boxes'
)
# When truth is non-empty then both cls and pts loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = reppoints_head.loss_by_feat(*forward_outputs,
[gt_instances], img_metas,
gt_bboxes_ignore)
# loss_cls should all be non-zero
self.assertTrue(
all([loss.item() > 0 for loss in one_gt_losses['loss_cls']]))
# only one level loss_pts_init is non-zero
cnt_non_zero = 0
for loss in one_gt_losses['loss_pts_init']:
if loss.item() != 0:
cnt_non_zero += 1
self.assertEqual(cnt_non_zero, 1)
# only one level loss_pts_refine is non-zero
cnt_non_zero = 0
for loss in one_gt_losses['loss_pts_init']:
if loss.item() != 0:
cnt_non_zero += 1
self.assertEqual(cnt_non_zero, 1)
# test loss
samples = DetDataSample()
samples.set_metainfo(img_metas[0])
samples.gt_instances = gt_instances
reppoints_head.loss(x, [samples])
# test only predict
reppoints_head.eval()
reppoints_head.predict(x, [samples], rescale=True)
| 5,581
| 37.763889
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_rpn_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import RPNHead
class TestRPNHead(TestCase):
def test_init(self):
"""Test init rpn head."""
rpn_head = RPNHead(num_classes=1, in_channels=1)
self.assertTrue(rpn_head.rpn_conv)
self.assertTrue(rpn_head.rpn_cls)
self.assertTrue(rpn_head.rpn_reg)
# rpn_head.num_convs > 1
rpn_head = RPNHead(num_classes=1, in_channels=1, num_convs=2)
self.assertTrue(rpn_head.rpn_conv)
self.assertTrue(rpn_head.rpn_cls)
self.assertTrue(rpn_head.rpn_reg)
def test_rpn_head_loss(self):
"""Tests rpn head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False))
rpn_head = RPNHead(num_classes=1, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(rpn_head.prior_generator.strides)))
cls_scores, bbox_preds = rpn_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = rpn_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_rpn_cls'])
empty_box_loss = sum(empty_gt_losses['loss_rpn_bbox'])
self.assertGreater(empty_cls_loss.item(), 0,
'rpn cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([0])
one_gt_losses = rpn_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_rpn_cls'])
onegt_box_loss = sum(one_gt_losses['loss_rpn_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'rpn cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'rpn box loss should be non-zero')
# When there is no valid anchor, the loss will be None,
# and this will raise a ValueError.
img_metas = [{
'img_shape': (8, 8, 3),
'pad_shape': (8, 8, 3),
'scale_factor': 1,
}]
with pytest.raises(ValueError):
rpn_head.loss_by_feat(cls_scores, bbox_preds, [gt_instances],
img_metas)
def test_bbox_post_process(self):
"""Test the length of detection instance results is 0."""
from mmengine.config import ConfigDict
cfg = ConfigDict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)
rpn_head = RPNHead(num_classes=1, in_channels=1)
results = InstanceData(metainfo=dict())
results.bboxes = torch.zeros((0, 4))
results.scores = torch.zeros(0)
results = rpn_head._bbox_post_process(results, cfg, img_meta=dict())
self.assertEqual(len(results), 0)
self.assertEqual(results.bboxes.size(), (0, 4))
self.assertEqual(results.scores.size(), (0, ))
self.assertEqual(results.labels.size(), (0, ))
| 4,805
| 37.448
| 76
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_lad_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import LADHead, lad_head
from mmdet.models.dense_heads.lad_head import levels_to_images
class TestLADHead(TestCase):
def test_lad_head_loss(self):
"""Tests lad head loss when truth is empty and non-empty."""
class mock_skm:
def GaussianMixture(self, *args, **kwargs):
return self
def fit(self, loss):
pass
def predict(self, loss):
components = np.zeros_like(loss, dtype=np.long)
return components.reshape(-1)
def score_samples(self, loss):
scores = np.random.random(len(loss))
return scores
lad_head.skm = mock_skm()
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1
}]
train_cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since Focal Loss is not supported on CPU
# since Focal Loss is not supported on CPU
lad = LADHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
teacher_model = LADHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
lad.init_weights()
teacher_model.init_weights()
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
batch_gt_instances_ignore = None
outs_teacher = teacher_model(feat)
label_assignment_results = teacher_model.get_label_assignment(
*outs_teacher, [gt_instances], img_metas,
batch_gt_instances_ignore)
outs = teacher_model(feat)
empty_gt_losses = lad.loss_by_feat(*outs, [gt_instances], img_metas,
batch_gt_instances_ignore,
label_assignment_results)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
empty_iou_loss = empty_gt_losses['loss_iou']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_iou_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
batch_gt_instances_ignore = None
label_assignment_results = teacher_model.get_label_assignment(
*outs_teacher, [gt_instances], img_metas,
batch_gt_instances_ignore)
one_gt_losses = lad.loss_by_feat(*outs, [gt_instances], img_metas,
batch_gt_instances_ignore,
label_assignment_results)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
onegt_iou_loss = one_gt_losses['loss_iou']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_iou_loss.item(), 0,
'box loss should be non-zero')
n, c, h, w = 10, 4, 20, 20
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
self.assertEqual(len(results), n)
self.assertEqual(results[0].size(), (h * w * 5, c))
self.assertTrue(lad.with_score_voting)
lad = LADHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
cfg = Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
rescale = False
lad.predict_by_feat(
cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale)
| 6,520
| 37.585799
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_guided_anchor_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import GuidedAnchorHead
guided_anchor_head_config = ConfigDict(
dict(
num_classes=4,
in_channels=4,
feat_channels=4,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0),
train_cfg=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.4,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
center_ratio=0.2,
ignore_ratio=0.5,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
class TestGuidedAnchorHead(TestCase):
def test_guided_anchor_head_loss(self):
"""Tests guided anchor loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': (1, 1)
}]
guided_anchor_head = GuidedAnchorHead(**guided_anchor_head_config)
feats = (
torch.rand(1, 4, s // stride[1], s // stride[0])
for stride in guided_anchor_head.square_anchor_generator.strides)
outs = guided_anchor_head(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = guided_anchor_head.loss_by_feat(
*outs, [gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# box shape and location loss should be zero
empty_cls_loss = sum(empty_gt_losses['loss_cls']).item()
empty_box_loss = sum(empty_gt_losses['loss_bbox']).item()
empty_shape_loss = sum(empty_gt_losses['loss_shape']).item()
empty_loc_loss = sum(empty_gt_losses['loss_loc']).item()
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(empty_loc_loss, 0,
'location loss should be non-zero')
self.assertEqual(
empty_box_loss, 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_shape_loss, 0,
'there should be no shape loss when there are no true boxes')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = guided_anchor_head.loss_by_feat(
*outs, [gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls']).item()
onegt_box_loss = sum(one_gt_losses['loss_bbox']).item()
onegt_shape_loss = sum(one_gt_losses['loss_shape']).item()
onegt_loc_loss = sum(one_gt_losses['loss_loc']).item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(onegt_shape_loss, 0,
'shape loss should be non-zero')
self.assertGreater(onegt_loc_loss, 0,
'location loss should be non-zero')
def test_guided_anchor_head_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': (1, 1)
}]
guided_anchor_head = GuidedAnchorHead(**guided_anchor_head_config)
feats = (
torch.rand(1, 4, s // stride[1], s // stride[0])
for stride in guided_anchor_head.square_anchor_generator.strides)
outs = guided_anchor_head(feats)
guided_anchor_head.predict_by_feat(
*outs, batch_img_metas=img_metas, rescale=True)
| 5,905
| 36.379747
| 77
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_ssd_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from math import ceil
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import SSDHead
class TestSSDHead(TestCase):
def test_ssd_head_loss(self):
"""Tests ssd head loss when truth is empty and non-empty."""
s = 300
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
sampler=dict(type='PseudoSampler'),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False))
ssd_head = SSDHead(
num_classes=4,
in_channels=(1, 1, 1, 1, 1, 1),
stacked_convs=1,
feat_channels=1,
use_depthwise=True,
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=s,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
train_cfg=cfg)
# SSD head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))
for stride in ssd_head.prior_generator.strides)
cls_scores, bbox_preds = ssd_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = ssd_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, cls_loss and box_loss should all be zero.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = ssd_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
| 3,494
| 36.98913
| 75
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_boxinst_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
import torch
from mmengine import MessageHub
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import BoxInstBboxHead, BoxInstMaskHead
from mmdet.structures.mask import BitmapMasks
def _rand_masks(num_items, bboxes, img_w, img_h):
rng = np.random.RandomState(0)
masks = np.zeros((num_items, img_h, img_w), dtype=np.float32)
for i, bbox in enumerate(bboxes):
bbox = bbox.astype(np.int32)
mask = (rng.rand(1, bbox[3] - bbox[1], bbox[2] - bbox[0]) >
0.3).astype(np.int64)
masks[i:i + 1, bbox[1]:bbox[3], bbox[0]:bbox[2]] = mask
return BitmapMasks(masks, height=img_h, width=img_w)
def _fake_mask_feature_head():
mask_feature_head = ConfigDict(
in_channels=1,
feat_channels=1,
start_level=0,
end_level=2,
out_channels=8,
mask_stride=8,
num_stacked_convs=4,
norm_cfg=dict(type='BN', requires_grad=True))
return mask_feature_head
class TestBoxInstHead(TestCase):
def test_boxinst_maskhead_loss(self):
"""Tests boxinst maskhead loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
boxinst_bboxhead = BoxInstBboxHead(
num_classes=4,
in_channels=1,
feat_channels=1,
stacked_convs=1,
norm_cfg=None)
mask_feature_head = _fake_mask_feature_head()
boxinst_maskhead = BoxInstMaskHead(
mask_feature_head=mask_feature_head,
loss_mask=dict(
type='DiceLoss',
use_sigmoid=True,
activate=True,
eps=5e-6,
loss_weight=1.0))
# Fcos head expects a multiple levels of features per image
feats = []
for i in range(len(boxinst_bboxhead.strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 3)), s // (2**(i + 3))))
feats = tuple(feats)
cls_scores, bbox_preds, centernesses, param_preds =\
boxinst_bboxhead.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)
gt_instances.pairwise_masks = _rand_masks(
0, gt_instances.bboxes.numpy(), s // 4, s // 4).to_tensor(
dtype=torch.float32,
device='cpu').unsqueeze(1).repeat(1, 8, 1, 1)
message_hub = MessageHub.get_instance('runtime_info')
message_hub.update_info('iter', 1)
_ = boxinst_bboxhead.loss_by_feat(cls_scores, bbox_preds, centernesses,
param_preds, [gt_instances],
img_metas)
# When truth is empty then all mask loss
# should be zero for random inputs
positive_infos = boxinst_bboxhead.get_positive_infos()
mask_outs = boxinst_maskhead.forward(feats, positive_infos)
empty_gt_mask_losses = boxinst_maskhead.loss_by_feat(
*mask_outs, [gt_instances], img_metas, positive_infos)
loss_mask_project = empty_gt_mask_losses['loss_mask_project']
loss_mask_pairwise = empty_gt_mask_losses['loss_mask_pairwise']
self.assertEqual(loss_mask_project, 0,
'mask project loss should be zero')
self.assertEqual(loss_mask_pairwise, 0,
'mask pairwise loss should be zero')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor([[0.111, 0.222, 25.6667, 29.8757]])
gt_instances.labels = torch.LongTensor([2])
gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)
gt_instances.pairwise_masks = _rand_masks(
1, gt_instances.bboxes.numpy(), s // 4, s // 4).to_tensor(
dtype=torch.float32,
device='cpu').unsqueeze(1).repeat(1, 8, 1, 1)
_ = boxinst_bboxhead.loss_by_feat(cls_scores, bbox_preds, centernesses,
param_preds, [gt_instances],
img_metas)
positive_infos = boxinst_bboxhead.get_positive_infos()
mask_outs = boxinst_maskhead.forward(feats, positive_infos)
one_gt_mask_losses = boxinst_maskhead.loss_by_feat(
*mask_outs, [gt_instances], img_metas, positive_infos)
loss_mask_project = one_gt_mask_losses['loss_mask_project']
loss_mask_pairwise = one_gt_mask_losses['loss_mask_pairwise']
self.assertGreater(loss_mask_project, 0,
'mask project loss should be nonzero')
self.assertGreater(loss_mask_pairwise, 0,
'mask pairwise loss should be nonzero')
| 5,300
| 41.071429
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_anchor_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import AnchorHead
class TestAnchorHead(TestCase):
def test_anchor_head_loss(self):
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False))
anchor_head = AnchorHead(num_classes=4, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(anchor_head.prior_generator.strides)))
cls_scores, bbox_preds = anchor_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
| 3,035
| 37.923077
| 77
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_cascade_rpn_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import CascadeRPNHead
from mmdet.structures import DetDataSample
rpn_weight = 0.7
cascade_rpn_config = ConfigDict(
dict(
num_stages=2,
num_classes=1,
stages=[
dict(
type='StageCascadeRPNHead',
in_channels=1,
feat_channels=1,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[1.0],
strides=[4, 8, 16, 32, 64]),
adapt_cfg=dict(type='dilation', dilation=3),
bridged_feature=True,
with_cls=False,
reg_decoded_bbox=True,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(0.1, 0.1, 0.5, 0.5)),
loss_bbox=dict(
type='IoULoss', linear=True,
loss_weight=10.0 * rpn_weight)),
dict(
type='StageCascadeRPNHead',
in_channels=1,
feat_channels=1,
adapt_cfg=dict(type='offset'),
bridged_feature=False,
with_cls=True,
reg_decoded_bbox=True,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(0.05, 0.05, 0.1, 0.1)),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0 * rpn_weight),
loss_bbox=dict(
type='IoULoss', linear=True,
loss_weight=10.0 * rpn_weight))
],
train_cfg=[
dict(
assigner=dict(
type='RegionAssigner', center_ratio=0.2, ignore_ratio=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False)
],
test_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.8))))
class TestStageCascadeRPNHead(TestCase):
def test_cascade_rpn_head_loss(self):
"""Tests cascade rpn head loss when truth is empty and non-empty."""
cascade_rpn_head = CascadeRPNHead(**cascade_rpn_config)
s = 256
feats = [
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in cascade_rpn_head.stages[0].prior_generator.strides
]
img_metas = {
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': 1,
}
sample = DetDataSample()
sample.set_metainfo(img_metas)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
sample.gt_instances = gt_instances
empty_gt_losses = cascade_rpn_head.loss(feats, [sample])
for key, loss in empty_gt_losses.items():
loss = sum(loss)
if 'cls' in key:
self.assertGreater(loss.item(), 0,
'cls loss should be non-zero')
elif 'reg' in key:
self.assertEqual(
loss.item(), 0,
'there should be no reg loss when no ground true boxes')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([0])
sample.gt_instances = gt_instances
one_gt_losses = cascade_rpn_head.loss(feats, [sample])
for loss in one_gt_losses.values():
loss = sum(loss)
self.assertGreater(
loss.item(), 0,
'cls loss, or box loss, or iou loss should be non-zero')
def test_cascade_rpn_head_loss_and_predict(self):
"""Tests cascade rpn head loss and predict function."""
cascade_rpn_head = CascadeRPNHead(**cascade_rpn_config)
s = 256
feats = [
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in cascade_rpn_head.stages[0].prior_generator.strides
]
img_metas = {
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': 1,
}
sample = DetDataSample()
sample.set_metainfo(img_metas)
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
sample.gt_instances = gt_instances
proposal_cfg = ConfigDict(
dict(max_per_img=300, nms=dict(iou_threshold=0.8)))
cascade_rpn_head.loss_and_predict(feats, [sample], proposal_cfg)
def test_cascade_rpn_head_predict(self):
"""Tests cascade rpn head predict function."""
cascade_rpn_head = CascadeRPNHead(**cascade_rpn_config)
s = 256
feats = [
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in cascade_rpn_head.stages[0].prior_generator.strides
]
img_metas = {
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': 1,
}
sample = DetDataSample()
sample.set_metainfo(img_metas)
cascade_rpn_head.predict(feats, [sample])
| 6,383
| 34.466667
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_centernet_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import CenterNetHead
class TestCenterNetHead(TestCase):
def test_center_head_loss(self):
"""Tests center head loss when truth is empty and non-empty."""
s = 256
img_metas = [{'batch_input_shape': (s, s, 3)}]
test_cfg = dict(topK=100, max_per_img=100)
centernet_head = CenterNetHead(
num_classes=4, in_channels=1, feat_channels=4, test_cfg=test_cfg)
feat = [torch.rand(1, 1, s, s)]
center_out, wh_out, offset_out = centernet_head.forward(feat)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = centernet_head.loss_by_feat(center_out, wh_out,
offset_out,
[gt_instances],
img_metas)
loss_center = empty_gt_losses['loss_center_heatmap']
loss_wh = empty_gt_losses['loss_wh']
loss_offset = empty_gt_losses['loss_offset']
assert loss_center.item() > 0, 'loss_center should be non-zero'
assert loss_wh.item() == 0, (
'there should be no loss_wh when there are no true boxes')
assert loss_offset.item() == 0, (
'there should be no loss_offset when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = centernet_head.loss_by_feat(center_out, wh_out,
offset_out, [gt_instances],
img_metas)
loss_center = one_gt_losses['loss_center_heatmap']
loss_wh = one_gt_losses['loss_wh']
loss_offset = one_gt_losses['loss_offset']
assert loss_center.item() > 0, 'loss_center should be non-zero'
assert loss_wh.item() > 0, 'loss_wh should be non-zero'
assert loss_offset.item() > 0, 'loss_offset should be non-zero'
def test_centernet_head_get_targets(self):
"""Tests center head generating and decoding the heatmap."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'batch_input_shape': (s, s),
}]
test_cfg = ConfigDict(
dict(topk=100, local_maximum_kernel=3, max_per_img=100))
gt_bboxes = [
torch.Tensor([[10, 20, 200, 240], [40, 50, 100, 200],
[10, 20, 100, 240]])
]
gt_labels = [torch.LongTensor([1, 1, 2])]
centernet_head = CenterNetHead(
num_classes=4, in_channels=1, feat_channels=4, test_cfg=test_cfg)
self.feat_shape = (1, 1, s // 4, s // 4)
targets, _ = centernet_head.get_targets(gt_bboxes, gt_labels,
self.feat_shape,
img_metas[0]['img_shape'])
center_target = targets['center_heatmap_target']
wh_target = targets['wh_target']
offset_target = targets['offset_target']
# make sure assign target right
for i in range(len(gt_bboxes[0])):
bbox, label = gt_bboxes[0][i] / 4, gt_labels[0][i]
ctx, cty = sum(bbox[0::2]) / 2, sum(bbox[1::2]) / 2
int_ctx, int_cty = int(sum(bbox[0::2]) / 2), int(
sum(bbox[1::2]) / 2)
w, h = bbox[2] - bbox[0], bbox[3] - bbox[1]
x_off = ctx - int(ctx)
y_off = cty - int(cty)
assert center_target[0, label, int_cty, int_ctx] == 1
assert wh_target[0, 0, int_cty, int_ctx] == w
assert wh_target[0, 1, int_cty, int_ctx] == h
assert offset_target[0, 0, int_cty, int_ctx] == x_off
assert offset_target[0, 1, int_cty, int_ctx] == y_off
def test_centernet_head_get_results(self):
"""Tests center head generating and decoding the heatmap."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'batch_input_shape': (s, s),
'border': (0, 0, 0, 0),
}]
test_cfg = ConfigDict(
dict(
topk=100,
local_maximum_kernel=3,
max_per_img=100,
nms=dict(type='nms', iou_threshold=0.5)))
gt_bboxes = [
torch.Tensor([[10, 20, 200, 240], [40, 50, 100, 200],
[10, 20, 100, 240]])
]
gt_labels = [torch.LongTensor([1, 1, 2])]
centernet_head = CenterNetHead(
num_classes=4, in_channels=1, feat_channels=4, test_cfg=test_cfg)
self.feat_shape = (1, 1, s // 4, s // 4)
targets, _ = centernet_head.get_targets(gt_bboxes, gt_labels,
self.feat_shape,
img_metas[0]['img_shape'])
center_target = targets['center_heatmap_target']
wh_target = targets['wh_target']
offset_target = targets['offset_target']
# make sure get_bboxes is right
detections = centernet_head.predict_by_feat([center_target],
[wh_target],
[offset_target],
img_metas,
rescale=True,
with_nms=False)
pred_instances = detections[0]
out_bboxes = pred_instances.bboxes[:3]
out_clses = pred_instances.labels[:3]
for bbox, cls in zip(out_bboxes, out_clses):
flag = False
for gt_bbox, gt_cls in zip(gt_bboxes[0], gt_labels[0]):
if (bbox[:4] == gt_bbox[:4]).all():
flag = True
assert flag, 'get_bboxes is wrong'
detections = centernet_head.predict_by_feat([center_target],
[wh_target],
[offset_target],
img_metas,
rescale=True,
with_nms=True)
pred_instances = detections[0]
out_bboxes = pred_instances.bboxes[:3]
out_clses = pred_instances.labels[:3]
for bbox, cls in zip(out_bboxes, out_clses):
flag = False
for gt_bbox, gt_cls in zip(gt_bboxes[0], gt_labels[0]):
if (bbox[:4] == gt_bbox[:4]).all():
flag = True
assert flag, 'get_bboxes is wrong'
| 7,238
| 44.528302
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_nasfcos_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import NASFCOSHead
class TestNASFCOSHead(TestCase):
def test_nasfcos_head_loss(self):
"""Tests nasfcos head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
nasfcos_head = NASFCOSHead(
num_classes=4,
in_channels=2, # the same as `deform_groups` in dconv3x3_config
feat_channels=2,
norm_cfg=None)
# Nasfcos head expects a multiple levels of features per image
feats = (
torch.rand(1, 2, s // stride[1], s // stride[0]).float()
for stride in nasfcos_head.prior_generator.strides)
cls_scores, bbox_preds, centernesses = nasfcos_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = nasfcos_head.loss_by_feat(cls_scores, bbox_preds,
centernesses,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# box loss and centerness loss should be zero
empty_cls_loss = empty_gt_losses['loss_cls'].item()
empty_box_loss = empty_gt_losses['loss_bbox'].item()
empty_ctr_loss = empty_gt_losses['loss_centerness'].item()
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertEqual(
empty_box_loss, 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_ctr_loss, 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = nasfcos_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].item()
onegt_box_loss = one_gt_losses['loss_bbox'].item()
onegt_ctr_loss = one_gt_losses['loss_centerness'].item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(onegt_ctr_loss, 0,
'centerness loss should be non-zero')
| 3,065
| 42.183099
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_centripetal_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import CentripetalHead
class TestCentripetalHead(TestCase):
def test_centripetal_head_loss(self):
"""Tests corner head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}]
centripetal_head = CentripetalHead(
num_classes=4, in_channels=1, corner_emb_channels=0)
# Corner head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // 4, s // 4)
for _ in range(centripetal_head.num_feat_levels)
]
forward_outputs = centripetal_head.forward(feat)
# Test that empty ground truth encourages the network
# to predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
gt_bboxes_ignore = None
empty_gt_losses = centripetal_head.loss_by_feat(
*forward_outputs, [gt_instances], img_metas, gt_bboxes_ignore)
empty_det_loss = sum(empty_gt_losses['det_loss'])
empty_guiding_loss = sum(empty_gt_losses['guiding_loss'])
empty_centripetal_loss = sum(empty_gt_losses['centripetal_loss'])
empty_off_loss = sum(empty_gt_losses['off_loss'])
self.assertTrue(empty_det_loss.item() > 0,
'det loss should be non-zero')
self.assertTrue(
empty_guiding_loss.item() == 0,
'there should be no guiding loss when there are no true boxes')
self.assertTrue(
empty_centripetal_loss.item() == 0,
'there should be no centripetal loss when there are no true boxes')
self.assertTrue(
empty_off_loss.item() == 0,
'there should be no box loss when there are no true boxes')
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874],
[123.6667, 123.8757, 138.6326, 251.8874]])
gt_instances.labels = torch.LongTensor([2, 3])
two_gt_losses = centripetal_head.loss_by_feat(*forward_outputs,
[gt_instances],
img_metas,
gt_bboxes_ignore)
twogt_det_loss = sum(two_gt_losses['det_loss'])
twogt_guiding_loss = sum(two_gt_losses['guiding_loss'])
twogt_centripetal_loss = sum(two_gt_losses['centripetal_loss'])
twogt_off_loss = sum(two_gt_losses['off_loss'])
assert twogt_det_loss.item() > 0, 'det loss should be non-zero'
assert twogt_guiding_loss.item() > 0, 'push loss should be non-zero'
assert twogt_centripetal_loss.item(
) > 0, 'pull loss should be non-zero'
assert twogt_off_loss.item() > 0, 'off loss should be non-zero'
| 3,158
| 41.12
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_solov2_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import SOLOV2Head
from mmdet.structures.mask import BitmapMasks
def _rand_masks(num_items, bboxes, img_w, img_h):
rng = np.random.RandomState(0)
masks = np.zeros((num_items, img_h, img_w))
for i, bbox in enumerate(bboxes):
bbox = bbox.astype(np.int32)
mask = (rng.rand(1, bbox[3] - bbox[1], bbox[2] - bbox[0]) >
0.3).astype(np.int64)
masks[i:i + 1, bbox[1]:bbox[3], bbox[0]:bbox[2]] = mask
return BitmapMasks(masks, height=img_h, width=img_w)
def _fake_mask_feature_head():
mask_feature_head = ConfigDict(
feat_channels=128,
start_level=0,
end_level=3,
out_channels=256,
mask_stride=4,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))
return mask_feature_head
class TestSOLOv2Head(TestCase):
def test_solov2_head_loss(self):
"""Tests mask head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}]
mask_feature_head = _fake_mask_feature_head()
mask_head = SOLOV2Head(
num_classes=4, in_channels=1, mask_feature_head=mask_feature_head)
# SOLO head expects a multiple levels of features per image
feats = []
for i in range(len(mask_head.strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))))
feats = tuple(feats)
mask_outs = mask_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty(0, 4)
gt_instances.labels = torch.LongTensor([])
gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)
empty_gt_losses = mask_head.loss_by_feat(
*mask_outs,
batch_gt_instances=[gt_instances],
batch_img_metas=img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_mask_loss = empty_gt_losses['loss_mask']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_mask_loss.item(), 0,
'there should be no mask loss when there are no true mask')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)
one_gt_losses = mask_head.loss_by_feat(
*mask_outs,
batch_gt_instances=[gt_instances],
batch_img_metas=img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_mask_loss = one_gt_losses['loss_mask']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_mask_loss.item(), 0,
'mask loss should be non-zero')
def test_solov2_head_empty_result(self):
s = 256
img_metas = {
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}
mask_feature_head = _fake_mask_feature_head()
mask_head = SOLOV2Head(
num_classes=4, in_channels=1, mask_feature_head=mask_feature_head)
kernel_preds = torch.empty(0, 128)
cls_scores = torch.empty(0, 80)
mask_feats = torch.empty(0, 16, 16)
test_cfg = ConfigDict(
score_thr=0.1,
mask_thr=0.5,
)
results = mask_head._predict_by_feat_single(
kernel_preds=kernel_preds,
cls_scores=cls_scores,
mask_feats=mask_feats,
img_meta=img_metas,
cfg=test_cfg)
self.assertIsInstance(results, InstanceData)
self.assertEqual(len(results), 0)
| 4,598
| 34.10687
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_yolof_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import YOLOFHead
class TestYOLOFHead(TestCase):
def test_yolof_head_loss(self):
"""Tests yolof head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = Config(
dict(
assigner=dict(
type='UniformAssigner',
pos_ignore_thr=0.15,
neg_ignore_thr=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False))
yolof_head = YOLOFHead(
num_classes=4,
in_channels=1,
feat_channels=1,
reg_decoded_bbox=True,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[1, 2, 4, 8, 16],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1., 1., 1., 1.],
add_ctr_clamp=True,
ctr_clamp=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0))
feat = [torch.rand(1, 1, s // 32, s // 32)]
cls_scores, bbox_preds = yolof_head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = yolof_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = yolof_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
| 3,396
| 37.168539
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_vfnet_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import VFNetHead
class TestVFNetHead(TestCase):
def test_vfnet_head_loss(self):
"""Tests vfnet head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since VarFocal Loss is not supported on CPU
vfnet_head = VFNetHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='VarifocalLoss', use_sigmoid=True, loss_weight=1.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds, bbox_preds_refine = vfnet_head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = vfnet_head.loss_by_feat(cls_scores, bbox_preds,
bbox_preds_refine,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = vfnet_head.loss_by_feat(cls_scores, bbox_preds,
bbox_preds_refine,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
def test_vfnet_head_loss_without_atss(self):
"""Tests vfnet head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since VarFocal Loss is not supported on CPU
vfnet_head = VFNetHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
use_atss=False,
loss_cls=dict(
type='VarifocalLoss', use_sigmoid=True, loss_weight=1.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds, bbox_preds_refine = vfnet_head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = vfnet_head.loss_by_feat(cls_scores, bbox_preds,
bbox_preds_refine,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = vfnet_head.loss_by_feat(cls_scores, bbox_preds,
bbox_preds_refine,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
| 5,715
| 41.029412
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_free_anchor_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import FreeAnchorRetinaHead
class TestFreeAnchorRetinaHead(TestCase):
def test_free_anchor_head_loss(self):
"""Tests rpn head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
anchor_head = FreeAnchorRetinaHead(num_classes=1, in_channels=1)
# Anchor head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(anchor_head.prior_generator.strides)))
cls_scores, bbox_preds = anchor_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
positive_bag_loss = empty_gt_losses['positive_bag_loss']
negative_bag_loss = empty_gt_losses['negative_bag_loss']
self.assertGreater(negative_bag_loss.item(), 0,
'negative_bag loss should be non-zero')
self.assertEqual(
positive_bag_loss.item(), 0,
'there should be no positive_bag loss when there are no true boxes'
)
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([0])
one_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['positive_bag_loss']
onegt_box_loss = one_gt_losses['negative_bag_loss']
self.assertGreater(onegt_cls_loss.item(), 0,
'positive bag loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'negative bag loss should be non-zero')
| 2,605
| 39.71875
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_embedding_rpn_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import EmbeddingRPNHead
from mmdet.structures import DetDataSample
class TestEmbeddingRPNHead(TestCase):
def test_init(self):
"""Test init rpn head."""
rpn_head = EmbeddingRPNHead(
num_proposals=100, proposal_feature_channel=256)
rpn_head.init_weights()
self.assertTrue(rpn_head.init_proposal_bboxes)
self.assertTrue(rpn_head.init_proposal_features)
def test_loss_and_predict(self):
s = 256
img_meta = {
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}
rpn_head = EmbeddingRPNHead(
num_proposals=100, proposal_feature_channel=256)
feats = [
torch.rand(2, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(5)
]
data_sample = DetDataSample()
data_sample.set_metainfo(img_meta)
# test predict
result_list = rpn_head.predict(feats, [data_sample])
self.assertTrue(isinstance(result_list, list))
self.assertTrue(isinstance(result_list[0], InstanceData))
# test loss_and_predict
result_list = rpn_head.loss_and_predict(feats, [data_sample])
self.assertTrue(isinstance(result_list, tuple))
self.assertTrue(isinstance(result_list[0], dict))
self.assertEqual(len(result_list[0]), 0)
self.assertTrue(isinstance(result_list[1], list))
self.assertTrue(isinstance(result_list[1][0], InstanceData))
# test loss
with pytest.raises(NotImplementedError):
rpn_head.loss(feats, [data_sample])
| 1,802
| 31.196429
| 69
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_pisa_ssd_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from math import ceil
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import PISASSDHead
class TestPISASSDHead(TestCase):
def test_pisa_ssd_head_loss(self):
"""Tests pisa ssd head loss when truth is empty and non-empty."""
s = 300
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
sampler=dict(type='PseudoSampler'),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False))
pisa_ssd_head = PISASSDHead(
num_classes=4,
in_channels=(1, 1, 1, 1, 1, 1),
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=s,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
train_cfg=cfg)
# PISA SSD head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))
for stride in pisa_ssd_head.prior_generator.strides)
cls_scores, bbox_preds = pisa_ssd_head.forward(feats)
# test without isr and carl
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = pisa_ssd_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, cls_loss and box_loss should all be zero.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = pisa_ssd_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
pisa_ssd_head.train_cfg.update(
dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))
# test with isr and carl
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = pisa_ssd_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, cls_loss and box_loss should all be zero.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = pisa_ssd_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
| 5,288
| 40.320313
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_ga_rpn_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import GARPNHead
ga_rpn_config = ConfigDict(
dict(
num_classes=1,
in_channels=4,
feat_channels=4,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=8,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[8],
strides=[4, 8, 16, 32, 64]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.14, 0.14]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.11, 0.11]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0),
train_cfg=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
ms_post=1000,
max_per_img=300,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)))
class TestGARPNHead(TestCase):
def test_ga_rpn_head_loss(self):
"""Tests ga rpn head loss."""
s = 256
img_metas = [{
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': (1, 1)
}]
ga_rpn_head = GARPNHead(**ga_rpn_config)
feats = (
torch.rand(1, 4, s // stride[1], s // stride[0])
for stride in ga_rpn_head.square_anchor_generator.strides)
outs = ga_rpn_head(feats)
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([0])
one_gt_losses = ga_rpn_head.loss_by_feat(*outs, [gt_instances],
img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_rpn_cls']).item()
onegt_box_loss = sum(one_gt_losses['loss_rpn_bbox']).item()
onegt_shape_loss = sum(one_gt_losses['loss_anchor_shape']).item()
onegt_loc_loss = sum(one_gt_losses['loss_anchor_loc']).item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(onegt_shape_loss, 0,
'shape loss should be non-zero')
self.assertGreater(onegt_loc_loss, 0,
'location loss should be non-zero')
def test_ga_rpn_head_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': (1, 1)
}]
ga_rpn_head = GARPNHead(**ga_rpn_config)
feats = (
torch.rand(1, 4, s // stride[1], s // stride[0])
for stride in ga_rpn_head.square_anchor_generator.strides)
outs = ga_rpn_head(feats)
cfg = ConfigDict(
dict(
nms_pre=2000,
nms_post=1000,
max_per_img=300,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0))
ga_rpn_head.predict_by_feat(
*outs, batch_img_metas=img_metas, cfg=cfg, rescale=True)
| 5,031
| 33.944444
| 76
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_corner_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.evaluation import bbox_overlaps
from mmdet.models.dense_heads import CornerHead
class TestCornerHead(TestCase):
def test_corner_head_loss(self):
"""Tests corner head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}]
corner_head = CornerHead(num_classes=4, in_channels=1)
# Corner head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // 4, s // 4)
for _ in range(corner_head.num_feat_levels)
]
forward_outputs = corner_head.forward(feat)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
gt_bboxes_ignore = None
empty_gt_losses = corner_head.loss_by_feat(*forward_outputs,
[gt_instances], img_metas,
gt_bboxes_ignore)
empty_det_loss = sum(empty_gt_losses['det_loss'])
empty_push_loss = sum(empty_gt_losses['push_loss'])
empty_pull_loss = sum(empty_gt_losses['pull_loss'])
empty_off_loss = sum(empty_gt_losses['off_loss'])
self.assertTrue(empty_det_loss.item() > 0,
'det loss should be non-zero')
self.assertTrue(
empty_push_loss.item() == 0,
'there should be no push loss when there are no true boxes')
self.assertTrue(
empty_pull_loss.item() == 0,
'there should be no pull loss when there are no true boxes')
self.assertTrue(
empty_off_loss.item() == 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = corner_head.loss_by_feat(*forward_outputs,
[gt_instances], img_metas,
gt_bboxes_ignore)
onegt_det_loss = sum(one_gt_losses['det_loss'])
onegt_push_loss = sum(one_gt_losses['push_loss'])
onegt_pull_loss = sum(one_gt_losses['pull_loss'])
onegt_off_loss = sum(one_gt_losses['off_loss'])
self.assertTrue(onegt_det_loss.item() > 0,
'det loss should be non-zero')
self.assertTrue(
onegt_push_loss.item() == 0,
'there should be no push loss when there are only one true box')
self.assertTrue(onegt_pull_loss.item() > 0,
'pull loss should be non-zero')
self.assertTrue(onegt_off_loss.item() > 0,
'off loss should be non-zero')
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874],
[123.6667, 123.8757, 138.6326, 251.8874]])
gt_instances.labels = torch.LongTensor([2, 3])
two_gt_losses = corner_head.loss_by_feat(*forward_outputs,
[gt_instances], img_metas,
gt_bboxes_ignore)
twogt_det_loss = sum(two_gt_losses['det_loss'])
twogt_push_loss = sum(two_gt_losses['push_loss'])
twogt_pull_loss = sum(two_gt_losses['pull_loss'])
twogt_off_loss = sum(two_gt_losses['off_loss'])
self.assertTrue(twogt_det_loss.item() > 0,
'det loss should be non-zero')
# F.relu limits push loss larger than or equal to 0.
self.assertTrue(twogt_push_loss.item() >= 0,
'push loss should be non-zero')
self.assertTrue(twogt_pull_loss.item() > 0,
'pull loss should be non-zero')
self.assertTrue(twogt_off_loss.item() > 0,
'off loss should be non-zero')
def test_corner_head_encode_and_decode_heatmap(self):
"""Tests corner head generating and decoding the heatmap."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3),
'border': (0, 0, 0, 0)
}]
gt_bboxes = [
torch.Tensor([[10, 20, 200, 240], [40, 50, 100, 200],
[10, 20, 200, 240]])
]
gt_labels = [torch.LongTensor([1, 1, 2])]
corner_head = CornerHead(
num_classes=4, in_channels=1, corner_emb_channels=1)
feat = [
torch.rand(1, 1, s // 4, s // 4)
for _ in range(corner_head.num_feat_levels)
]
targets = corner_head.get_targets(
gt_bboxes,
gt_labels,
feat[0].shape,
img_metas[0]['batch_input_shape'],
with_corner_emb=corner_head.with_corner_emb)
gt_tl_heatmap = targets['topleft_heatmap']
gt_br_heatmap = targets['bottomright_heatmap']
gt_tl_offset = targets['topleft_offset']
gt_br_offset = targets['bottomright_offset']
embedding = targets['corner_embedding']
[top, left], [bottom, right] = embedding[0][0]
gt_tl_embedding_heatmap = torch.zeros([1, 1, s // 4, s // 4])
gt_br_embedding_heatmap = torch.zeros([1, 1, s // 4, s // 4])
gt_tl_embedding_heatmap[0, 0, top, left] = 1
gt_br_embedding_heatmap[0, 0, bottom, right] = 1
batch_bboxes, batch_scores, batch_clses = corner_head._decode_heatmap(
tl_heat=gt_tl_heatmap,
br_heat=gt_br_heatmap,
tl_off=gt_tl_offset,
br_off=gt_br_offset,
tl_emb=gt_tl_embedding_heatmap,
br_emb=gt_br_embedding_heatmap,
img_meta=img_metas[0],
k=100,
kernel=3,
distance_threshold=0.5)
bboxes = batch_bboxes.view(-1, 4)
scores = batch_scores.view(-1, 1)
clses = batch_clses.view(-1, 1)
idx = scores.argsort(dim=0, descending=True)
bboxes = bboxes[idx].view(-1, 4)
scores = scores[idx].view(-1)
clses = clses[idx].view(-1)
valid_bboxes = bboxes[torch.where(scores > 0.05)]
valid_labels = clses[torch.where(scores > 0.05)]
max_coordinate = valid_bboxes.max()
offsets = valid_labels.to(valid_bboxes) * (max_coordinate + 1)
gt_offsets = gt_labels[0].to(gt_bboxes[0]) * (max_coordinate + 1)
offset_bboxes = valid_bboxes + offsets[:, None]
offset_gtbboxes = gt_bboxes[0] + gt_offsets[:, None]
iou_matrix = bbox_overlaps(offset_bboxes.numpy(),
offset_gtbboxes.numpy())
self.assertEqual((iou_matrix == 1).sum(), 3)
| 7,299
| 39.782123
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_yolo_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import Config
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import YOLOV3Head
class TestYOLOV3Head(TestCase):
def test_yolo_head_loss(self):
"""Tests YOLO head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
head = YOLOV3Head(
num_classes=4,
in_channels=[1, 1, 1],
out_channels=[1, 1, 1],
train_cfg=Config(
dict(
assigner=dict(
type='GridAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0))))
head.init_weights()
# YOLO head expects a multiple levels of features per image
feats = [
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in head.prior_generator.strides
]
predmaps, = head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = head.loss_by_feat(predmaps, [gt_instances],
img_metas)
# When there is no truth, the conf loss should be nonzero but
# cls loss and xy&wh loss should be zero
empty_cls_loss = sum(empty_gt_losses['loss_cls']).item()
empty_conf_loss = sum(empty_gt_losses['loss_conf']).item()
empty_xy_loss = sum(empty_gt_losses['loss_xy']).item()
empty_wh_loss = sum(empty_gt_losses['loss_wh']).item()
self.assertGreater(empty_conf_loss, 0, 'conf loss should be non-zero')
self.assertEqual(
empty_cls_loss, 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_xy_loss, 0,
'there should be no xy loss when there are no true boxes')
self.assertEqual(
empty_wh_loss, 0,
'there should be no wh loss when there are no true boxes')
# When truth is non-empty then all conf, cls loss and xywh loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = head.loss_by_feat(predmaps, [gt_instances], img_metas)
one_gt_cls_loss = sum(one_gt_losses['loss_cls']).item()
one_gt_conf_loss = sum(one_gt_losses['loss_conf']).item()
one_gt_xy_loss = sum(one_gt_losses['loss_xy']).item()
one_gt_wh_loss = sum(one_gt_losses['loss_wh']).item()
self.assertGreater(one_gt_conf_loss, 0, 'conf loss should be non-zero')
self.assertGreater(one_gt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(one_gt_xy_loss, 0, 'xy loss should be non-zero')
self.assertGreater(one_gt_wh_loss, 0, 'wh loss should be non-zero')
| 3,290
| 39.62963
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_fsaf_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from math import ceil
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import FSAFHead
class TestFSAFHead(TestCase):
def test_fsaf_head_loss(self):
"""Tests fsaf head loss when truth is empty and non-empty."""
s = 300
img_metas = [{
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='CenterRegionAssigner',
pos_scale=0.2,
neg_scale=0.2,
min_pos_iof=0.01),
allowed_border=-1,
pos_weight=-1,
debug=False))
fsaf_head = FSAFHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=1,
reg_decoded_bbox=True,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=1,
scales_per_octave=1,
ratios=[1.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(type='TBLRBBoxCoder', normalizer=4.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0,
reduction='none'),
loss_bbox=dict(
type='IoULoss', eps=1e-6, loss_weight=1.0, reduction='none'),
train_cfg=cfg)
# FSAF head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))
for stride in fsaf_head.prior_generator.strides)
cls_scores, bbox_preds = fsaf_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = fsaf_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# box loss should be zero
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = fsaf_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
| 3,536
| 36.62766
| 77
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_tood_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config, MessageHub
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import TOODHead
def _tood_head(anchor_type):
"""Set type of tood head."""
train_cfg = Config(
dict(
initial_epoch=4,
initial_assigner=dict(type='ATSSAssigner', topk=9),
assigner=dict(type='TaskAlignedAssigner', topk=13),
alpha=1,
beta=6,
allowed_border=-1,
pos_weight=-1,
debug=False))
test_cfg = Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
tood_head = TOODHead(
num_classes=80,
in_channels=1,
stacked_convs=1,
feat_channels=8, # the same as `la_down_rate` in TaskDecomposition
norm_cfg=None,
anchor_type=anchor_type,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
initial_loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
train_cfg=train_cfg,
test_cfg=test_cfg)
return tood_head
class TestTOODHead(TestCase):
def test_tood_head_anchor_free_loss(self):
"""Tests tood head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1
}]
tood_head = _tood_head('anchor_free')
tood_head.init_weights()
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [8, 16, 32, 64, 128]
]
cls_scores, bbox_preds = tood_head(feat)
message_hub = MessageHub.get_instance('runtime_info')
message_hub.update_info('epoch', 0)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
gt_bboxes_ignore = None
empty_gt_losses = tood_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
self.assertGreater(
sum(empty_cls_loss).item(), 0, 'cls loss should be non-zero')
self.assertEqual(
sum(empty_box_loss).item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
gt_bboxes_ignore = None
one_gt_losses = tood_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas,
gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
self.assertGreater(
sum(onegt_cls_loss).item(), 0, 'cls loss should be non-zero')
self.assertGreater(
sum(onegt_box_loss).item(), 0, 'box loss should be non-zero')
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
gt_bboxes_ignore = None
empty_gt_losses = tood_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
self.assertGreater(
sum(empty_cls_loss).item(), 0, 'cls loss should be non-zero')
self.assertEqual(
sum(empty_box_loss).item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
gt_bboxes_ignore = None
one_gt_losses = tood_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas,
gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
self.assertGreater(
sum(onegt_cls_loss).item(), 0, 'cls loss should be non-zero')
self.assertGreater(
sum(onegt_box_loss).item(), 0, 'box loss should be non-zero')
def test_tood_head_anchor_based_loss(self):
"""Tests tood head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1
}]
tood_head = _tood_head('anchor_based')
tood_head.init_weights()
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [8, 16, 32, 64, 128]
]
cls_scores, bbox_preds = tood_head(feat)
message_hub = MessageHub.get_instance('runtime_info')
message_hub.update_info('epoch', 0)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
gt_bboxes_ignore = None
empty_gt_losses = tood_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
self.assertGreater(
sum(empty_cls_loss).item(), 0, 'cls loss should be non-zero')
self.assertEqual(
sum(empty_box_loss).item(), 0,
'there should be no box loss when there are no true boxes')
| 7,843
| 39.43299
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_solo_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.models.dense_heads import (DecoupledSOLOHead,
DecoupledSOLOLightHead, SOLOHead)
from mmdet.structures.mask import BitmapMasks
def _rand_masks(num_items, bboxes, img_w, img_h):
rng = np.random.RandomState(0)
masks = np.zeros((num_items, img_h, img_w))
for i, bbox in enumerate(bboxes):
bbox = bbox.astype(np.int32)
mask = (rng.rand(1, bbox[3] - bbox[1], bbox[2] - bbox[0]) >
0.3).astype(np.int64)
masks[i:i + 1, bbox[1]:bbox[3], bbox[0]:bbox[2]] = mask
return BitmapMasks(masks, height=img_h, width=img_w)
class TestSOLOHead(TestCase):
@parameterized.expand([(SOLOHead, ), (DecoupledSOLOHead, ),
(DecoupledSOLOLightHead, )])
def test_mask_head_loss(self, MaskHead):
"""Tests mask head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}]
mask_head = MaskHead(num_classes=4, in_channels=1)
# SOLO head expects a multiple levels of features per image
feats = []
for i in range(len(mask_head.strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))))
feats = tuple(feats)
mask_outs = mask_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty(0, 4)
gt_instances.labels = torch.LongTensor([])
gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)
empty_gt_losses = mask_head.loss_by_feat(
*mask_outs,
batch_gt_instances=[gt_instances],
batch_img_metas=img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_mask_loss = empty_gt_losses['loss_mask']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_mask_loss.item(), 0,
'there should be no mask loss when there are no true mask')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)
one_gt_losses = mask_head.loss_by_feat(
*mask_outs,
batch_gt_instances=[gt_instances],
batch_img_metas=img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_mask_loss = one_gt_losses['loss_mask']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_mask_loss.item(), 0,
'mask loss should be non-zero')
def test_solo_head_empty_result(self):
s = 256
img_metas = {
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}
mask_head = SOLOHead(num_classes=4, in_channels=1)
cls_scores = torch.empty(0, 80)
mask_preds = torch.empty(0, 16, 16)
test_cfg = ConfigDict(
score_thr=0.1,
mask_thr=0.5,
)
results = mask_head._predict_by_feat_single(
cls_scores=cls_scores,
mask_preds=mask_preds,
img_meta=img_metas,
cfg=test_cfg)
self.assertIsInstance(results, InstanceData)
self.assertEqual(len(results), 0)
def test_decoupled_solo_head_empty_result(self):
s = 256
img_metas = {
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}
mask_head = DecoupledSOLOHead(num_classes=4, in_channels=1)
cls_scores = torch.empty(0, 80)
mask_preds_x = torch.empty(0, 16, 16)
mask_preds_y = torch.empty(0, 16, 16)
test_cfg = ConfigDict(
score_thr=0.1,
mask_thr=0.5,
)
results = mask_head._predict_by_feat_single(
cls_scores=cls_scores,
mask_preds_x=mask_preds_x,
mask_preds_y=mask_preds_y,
img_meta=img_metas,
cfg=test_cfg)
self.assertIsInstance(results, InstanceData)
self.assertEqual(len(results), 0)
| 5,125
| 34.351724
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_yolox_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmengine.config import Config
from mmengine.model import bias_init_with_prob
from mmengine.structures import InstanceData
from mmengine.testing import assert_allclose
from mmdet.models.dense_heads import YOLOXHead
class TestYOLOXHead(TestCase):
def test_init_weights(self):
head = YOLOXHead(
num_classes=4, in_channels=1, stacked_convs=1, use_depthwise=False)
head.init_weights()
bias_init = bias_init_with_prob(0.01)
for conv_cls, conv_obj in zip(head.multi_level_conv_cls,
head.multi_level_conv_obj):
assert_allclose(conv_cls.bias.data,
torch.ones_like(conv_cls.bias.data) * bias_init)
assert_allclose(conv_obj.bias.data,
torch.ones_like(conv_obj.bias.data) * bias_init)
def test_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': (1.0, 1.0),
}]
test_cfg = Config(
dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65)))
head = YOLOXHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
use_depthwise=False,
test_cfg=test_cfg)
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds, objectnesses = head.forward(feat)
head.predict_by_feat(
cls_scores,
bbox_preds,
objectnesses,
img_metas,
cfg=test_cfg,
rescale=True,
with_nms=True)
head.predict_by_feat(
cls_scores,
bbox_preds,
objectnesses,
img_metas,
cfg=test_cfg,
rescale=False,
with_nms=False)
def test_loss_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
train_cfg = Config(
dict(
assigner=dict(
type='SimOTAAssigner',
center_radius=2.5,
candidate_topk=10,
iou_weight=3.0,
cls_weight=1.0)))
head = YOLOXHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
use_depthwise=False,
train_cfg=train_cfg)
assert not head.use_l1
assert isinstance(head.multi_level_cls_convs[0][0], ConvModule)
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds, objectnesses = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData(
bboxes=torch.empty((0, 4)), labels=torch.LongTensor([]))
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
objectnesses, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertGreater(empty_obj_loss.item(), 0,
'objectness loss should be non-zero')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
head = YOLOXHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
use_depthwise=True,
train_cfg=train_cfg)
assert isinstance(head.multi_level_cls_convs[0][0],
DepthwiseSeparableConvModule)
head.use_l1 = True
gt_instances = InstanceData(
bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
labels=torch.LongTensor([2]))
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_obj_loss = one_gt_losses['loss_obj'].sum()
onegt_l1_loss = one_gt_losses['loss_l1'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_obj_loss.item(), 0,
'obj loss should be non-zero')
self.assertGreater(onegt_l1_loss.item(), 0,
'l1 loss should be non-zero')
# Test groud truth out of bound
gt_instances = InstanceData(
bboxes=torch.Tensor([[s * 4, s * 4, s * 4 + 10, s * 4 + 10]]),
labels=torch.LongTensor([2]))
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
objectnesses, [gt_instances],
img_metas)
# When gt_bboxes out of bound, the assign results should be empty,
# so the cls and bbox loss should be zero.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when gt_bboxes out of bound')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when gt_bboxes out of bound')
self.assertGreater(empty_obj_loss.item(), 0,
'objectness loss should be non-zero')
| 6,502
| 38.412121
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_autoassign_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import AutoAssignHead
class TestAutoAssignHead(TestCase):
def test_autoassign_head_loss(self):
"""Tests autoassign head loss when truth is empty and non-empty."""
s = 300
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
autoassign_head = AutoAssignHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=1,
strides=[8, 16, 32, 64, 128],
loss_bbox=dict(type='GIoULoss', loss_weight=5.0),
norm_cfg=None)
# Fcos head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in autoassign_head.prior_generator.strides)
cls_scores, bbox_preds, centernesses = autoassign_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = autoassign_head.loss_by_feat(cls_scores, bbox_preds,
centernesses,
[gt_instances],
img_metas)
# When there is no truth, the neg loss should be nonzero but
# pos loss and center loss should be zero
empty_pos_loss = empty_gt_losses['loss_pos'].item()
empty_neg_loss = empty_gt_losses['loss_neg'].item()
empty_ctr_loss = empty_gt_losses['loss_center'].item()
self.assertGreater(empty_neg_loss, 0, 'neg loss should be non-zero')
self.assertEqual(
empty_pos_loss, 0,
'there should be no pos loss when there are no true boxes')
self.assertEqual(
empty_ctr_loss, 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then all pos, neg loss and center loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = autoassign_head.loss_by_feat(cls_scores, bbox_preds,
centernesses,
[gt_instances], img_metas)
onegt_pos_loss = one_gt_losses['loss_pos'].item()
onegt_neg_loss = one_gt_losses['loss_neg'].item()
onegt_ctr_loss = one_gt_losses['loss_center'].item()
self.assertGreater(onegt_pos_loss, 0, 'pos loss should be non-zero')
self.assertGreater(onegt_neg_loss, 0, 'neg loss should be non-zero')
self.assertGreater(onegt_ctr_loss, 0, 'center loss should be non-zero')
| 3,186
| 41.493333
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_fovea_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import FoveaHead
class TestFOVEAHead(TestCase):
def test_fovea_head_loss(self):
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
fovea_head = FoveaHead(num_classes=4, in_channels=1)
# Anchor head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(fovea_head.prior_generator.strides)))
cls_scores, bbox_preds = fovea_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = fovea_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = fovea_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
| 2,443
| 38.419355
| 76
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_pisa_retinanet_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from math import ceil
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import PISARetinaHead
class TestPISARetinaHead(TestCase):
def test_pisa_reitnanet_head_loss(self):
"""Tests pisa retinanet head loss when truth is empty and non-empty."""
s = 300
img_metas = [{
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
isr=dict(k=2., bias=0.),
carl=dict(k=1., bias=0.2),
sampler=dict(type='PseudoSampler'),
allowed_border=-1,
pos_weight=-1,
debug=False))
pisa_retinanet_head = PISARetinaHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0),
train_cfg=cfg)
# pisa retina head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))
for stride in pisa_retinanet_head.prior_generator.strides)
cls_scores, bbox_preds = pisa_retinanet_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = pisa_retinanet_head.loss_by_feat(
cls_scores, bbox_preds, [gt_instances], img_metas)
# When there is no truth, cls_loss and box_loss should all be zero.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
empty_carl_loss = empty_gt_losses['loss_carl']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_carl_loss.item(), 0,
'there should be no carl loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = pisa_retinanet_head.loss_by_feat(
cls_scores, bbox_preds, [gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
onegt_carl_loss = one_gt_losses['loss_carl']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_carl_loss.item(), 0,
'carl loss should be non-zero')
| 4,122
| 37.896226
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_ld_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import GFLHead, LDHead
class TestLDHead(TestCase):
def test_ld_head_loss(self):
"""Tests ld head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1
}]
train_cfg = Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9, ignore_iof_thr=0.1),
allowed_border=-1,
pos_weight=-1,
debug=False))
ld_head = LDHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_ld=dict(
type='KnowledgeDistillationKLDivLoss', loss_weight=1.0),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]))
teacher_model = GFLHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds = ld_head.forward(feat)
rand_soft_target = teacher_model.forward(feat)[1]
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
batch_gt_instances_ignore = None
empty_gt_losses = ld_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas,
rand_soft_target,
batch_gt_instances_ignore)
# When there is no truth, the cls loss should be nonzero, ld loss
# should be non-negative but there should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_ld_loss = sum(empty_gt_losses['loss_ld'])
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertGreaterEqual(empty_ld_loss.item(), 0,
'ld loss should be non-negative')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
batch_gt_instances_ignore = None
one_gt_losses = ld_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas,
rand_soft_target,
batch_gt_instances_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
batch_gt_instances_ignore = gt_instances
# When truth is non-empty but ignored then the cls loss should be
# nonzero, but there should be no box loss.
ignore_gt_losses = ld_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas,
rand_soft_target,
batch_gt_instances_ignore)
ignore_cls_loss = sum(ignore_gt_losses['loss_cls'])
ignore_box_loss = sum(ignore_gt_losses['loss_bbox'])
self.assertGreater(ignore_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(ignore_box_loss.item(), 0,
'gt bbox ignored loss should be zero')
# When truth is non-empty and not ignored then both cls and box loss
# should be nonzero for random inputs
batch_gt_instances_ignore = InstanceData()
batch_gt_instances_ignore.bboxes = torch.randn(1, 4)
not_ignore_gt_losses = ld_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas,
rand_soft_target,
batch_gt_instances_ignore)
not_ignore_cls_loss = sum(not_ignore_gt_losses['loss_cls'])
not_ignore_box_loss = sum(not_ignore_gt_losses['loss_bbox'])
self.assertGreater(not_ignore_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreaterEqual(not_ignore_box_loss.item(), 0,
'gt bbox not ignored loss should be non-zero')
| 6,184
| 40.233333
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_paa_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import PAAHead, paa_head
from mmdet.models.utils import levels_to_images
class TestPAAHead(TestCase):
def test_paa_head_loss(self):
"""Tests paa head loss when truth is empty and non-empty."""
class mock_skm:
def GaussianMixture(self, *args, **kwargs):
return self
def fit(self, loss):
pass
def predict(self, loss):
components = np.zeros_like(loss, dtype=np.long)
return components.reshape(-1)
def score_samples(self, loss):
scores = np.random.random(len(loss))
return scores
paa_head.skm = mock_skm()
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
train_cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since Focal Loss is not supported on CPU
paa = PAAHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
paa.init_weights()
cls_scores, bbox_preds, iou_preds = paa(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = paa.loss_by_feat(cls_scores, bbox_preds, iou_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
empty_iou_loss = empty_gt_losses['loss_iou']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_iou_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = paa.loss_by_feat(cls_scores, bbox_preds, iou_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
onegt_iou_loss = one_gt_losses['loss_iou']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_iou_loss.item(), 0,
'box loss should be non-zero')
n, c, h, w = 10, 4, 20, 20
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
self.assertEqual(len(results), n)
self.assertEqual(results[0].size(), (h * w * 5, c))
self.assertTrue(paa.with_score_voting)
paa = PAAHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
cfg = Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
rescale = False
paa.predict_by_feat(
cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale)
| 5,675
| 37.09396
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_retina_sepBN_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import RetinaSepBNHead
class TestRetinaSepBNHead(TestCase):
def test_init(self):
"""Test init RetinaSepBN head."""
anchor_head = RetinaSepBNHead(num_classes=1, num_ins=1, in_channels=1)
anchor_head.init_weights()
self.assertTrue(anchor_head.cls_convs)
self.assertTrue(anchor_head.reg_convs)
self.assertTrue(anchor_head.retina_cls)
self.assertTrue(anchor_head.retina_reg)
def test_retina_sepbn_head_loss(self):
"""Tests RetinaSepBN head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
sampler=dict(type='PseudoSampler'
), # Focal loss should use PseudoSampler
allowed_border=-1,
pos_weight=-1,
debug=False))
anchor_head = RetinaSepBNHead(
num_classes=4, num_ins=5, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feats = []
for i in range(len(anchor_head.prior_generator.strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))))
cls_scores, bbox_preds = anchor_head.forward(tuple(feats))
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
| 3,483
| 38.146067
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_ga_retina_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmdet.models.dense_heads import GARetinaHead
ga_retina_head_config = ConfigDict(
dict(
num_classes=4,
in_channels=4,
feat_channels=4,
stacked_convs=1,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0),
train_cfg=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.4,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
center_ratio=0.2,
ignore_ratio=0.5,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
class TestGARetinaHead(TestCase):
def test_ga_retina_head_init_and_forward(self):
"""The GARetinaHead inherit loss and prediction function from
GuidedAchorHead.
Here, we only test GARetinaHet initialization and forward.
"""
# Test initializaion
ga_retina_head = GARetinaHead(**ga_retina_head_config)
# Test forward
s = 256
feats = (
torch.rand(1, 4, s // stride[1], s // stride[0])
for stride in ga_retina_head.square_anchor_generator.strides)
ga_retina_head(feats)
| 3,067
| 30.306122
| 74
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_condinst_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import CondInstBboxHead, CondInstMaskHead
from mmdet.structures.mask import BitmapMasks
def _rand_masks(num_items, bboxes, img_w, img_h):
rng = np.random.RandomState(0)
masks = np.zeros((num_items, img_h, img_w), dtype=np.float32)
for i, bbox in enumerate(bboxes):
bbox = bbox.astype(np.int32)
mask = (rng.rand(1, bbox[3] - bbox[1], bbox[2] - bbox[0]) >
0.3).astype(np.int64)
masks[i:i + 1, bbox[1]:bbox[3], bbox[0]:bbox[2]] = mask
return BitmapMasks(masks, height=img_h, width=img_w)
def _fake_mask_feature_head():
mask_feature_head = ConfigDict(
in_channels=1,
feat_channels=1,
start_level=0,
end_level=2,
out_channels=8,
mask_stride=8,
num_stacked_convs=4,
norm_cfg=dict(type='BN', requires_grad=True))
return mask_feature_head
class TestCondInstHead(TestCase):
def test_condinst_bboxhead_loss(self):
"""Tests condinst bboxhead loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
condinst_bboxhead = CondInstBboxHead(
num_classes=4,
in_channels=1,
feat_channels=1,
stacked_convs=1,
norm_cfg=None)
# Fcos head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in condinst_bboxhead.prior_generator.strides)
cls_scores, bbox_preds, centernesses, param_preds =\
condinst_bboxhead.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)
empty_gt_losses = condinst_bboxhead.loss_by_feat(
cls_scores, bbox_preds, centernesses, param_preds, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but
# box loss and centerness loss should be zero
empty_cls_loss = empty_gt_losses['loss_cls'].item()
empty_box_loss = empty_gt_losses['loss_bbox'].item()
empty_ctr_loss = empty_gt_losses['loss_centerness'].item()
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertEqual(
empty_box_loss, 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_ctr_loss, 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)
one_gt_losses = condinst_bboxhead.loss_by_feat(cls_scores, bbox_preds,
centernesses,
param_preds,
[gt_instances],
img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].item()
onegt_box_loss = one_gt_losses['loss_bbox'].item()
onegt_ctr_loss = one_gt_losses['loss_centerness'].item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(onegt_ctr_loss, 0,
'centerness loss should be non-zero')
# Test the `center_sampling` works fine.
condinst_bboxhead.center_sampling = True
ctrsamp_losses = condinst_bboxhead.loss_by_feat(
cls_scores, bbox_preds, centernesses, param_preds, [gt_instances],
img_metas)
ctrsamp_cls_loss = ctrsamp_losses['loss_cls'].item()
ctrsamp_box_loss = ctrsamp_losses['loss_bbox'].item()
ctrsamp_ctr_loss = ctrsamp_losses['loss_centerness'].item()
self.assertGreater(ctrsamp_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(ctrsamp_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(ctrsamp_ctr_loss, 0,
'centerness loss should be non-zero')
# Test the `norm_on_bbox` works fine.
condinst_bboxhead.norm_on_bbox = True
normbox_losses = condinst_bboxhead.loss_by_feat(
cls_scores, bbox_preds, centernesses, param_preds, [gt_instances],
img_metas)
normbox_cls_loss = normbox_losses['loss_cls'].item()
normbox_box_loss = normbox_losses['loss_bbox'].item()
normbox_ctr_loss = normbox_losses['loss_centerness'].item()
self.assertGreater(normbox_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(normbox_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(normbox_ctr_loss, 0,
'centerness loss should be non-zero')
def test_condinst_maskhead_loss(self):
"""Tests condinst maskhead loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
condinst_bboxhead = CondInstBboxHead(
num_classes=4,
in_channels=1,
feat_channels=1,
stacked_convs=1,
norm_cfg=None)
mask_feature_head = _fake_mask_feature_head()
condinst_maskhead = CondInstMaskHead(
mask_feature_head=mask_feature_head,
loss_mask=dict(
type='DiceLoss',
use_sigmoid=True,
activate=True,
eps=5e-6,
loss_weight=1.0))
# Fcos head expects a multiple levels of features per image
feats = []
for i in range(len(condinst_bboxhead.strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 3)), s // (2**(i + 3))))
feats = tuple(feats)
cls_scores, bbox_preds, centernesses, param_preds =\
condinst_bboxhead.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)
_ = condinst_bboxhead.loss_by_feat(cls_scores, bbox_preds,
centernesses, param_preds,
[gt_instances], img_metas)
# When truth is empty then all mask loss
# should be zero for random inputs
positive_infos = condinst_bboxhead.get_positive_infos()
mask_outs = condinst_maskhead.forward(feats, positive_infos)
empty_gt_mask_losses = condinst_maskhead.loss_by_feat(
*mask_outs, [gt_instances], img_metas, positive_infos)
loss_mask = empty_gt_mask_losses['loss_mask']
self.assertEqual(loss_mask, 0, 'mask loss should be zero')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)
_ = condinst_bboxhead.loss_by_feat(cls_scores, bbox_preds,
centernesses, param_preds,
[gt_instances], img_metas)
positive_infos = condinst_bboxhead.get_positive_infos()
mask_outs = condinst_maskhead.forward(feats, positive_infos)
one_gt_mask_losses = condinst_maskhead.loss_by_feat(
*mask_outs, [gt_instances], img_metas, positive_infos)
loss_mask = one_gt_mask_losses['loss_mask']
self.assertGreater(loss_mask, 0, 'mask loss should be nonzero')
| 8,815
| 42.860697
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_fcos_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import FCOSHead
class TestFCOSHead(TestCase):
def test_fcos_head_loss(self):
"""Tests fcos head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
fcos_head = FCOSHead(
num_classes=4,
in_channels=1,
feat_channels=1,
stacked_convs=1,
norm_cfg=None)
# Fcos head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in fcos_head.prior_generator.strides)
cls_scores, bbox_preds, centernesses = fcos_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = fcos_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but
# box loss and centerness loss should be zero
empty_cls_loss = empty_gt_losses['loss_cls'].item()
empty_box_loss = empty_gt_losses['loss_bbox'].item()
empty_ctr_loss = empty_gt_losses['loss_centerness'].item()
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertEqual(
empty_box_loss, 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_ctr_loss, 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = fcos_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].item()
onegt_box_loss = one_gt_losses['loss_bbox'].item()
onegt_ctr_loss = one_gt_losses['loss_centerness'].item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(onegt_ctr_loss, 0,
'centerness loss should be non-zero')
# Test the `center_sampling` works fine.
fcos_head.center_sampling = True
ctrsamp_losses = fcos_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
ctrsamp_cls_loss = ctrsamp_losses['loss_cls'].item()
ctrsamp_box_loss = ctrsamp_losses['loss_bbox'].item()
ctrsamp_ctr_loss = ctrsamp_losses['loss_centerness'].item()
self.assertGreater(ctrsamp_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(ctrsamp_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(ctrsamp_ctr_loss, 0,
'centerness loss should be non-zero')
# Test the `norm_on_bbox` works fine.
fcos_head.norm_on_bbox = True
normbox_losses = fcos_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
normbox_cls_loss = normbox_losses['loss_cls'].item()
normbox_box_loss = normbox_losses['loss_bbox'].item()
normbox_ctr_loss = normbox_losses['loss_centerness'].item()
self.assertGreater(normbox_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(normbox_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(normbox_ctr_loss, 0,
'centerness loss should be non-zero')
| 4,509
| 45.020408
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_centernet_update_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import CenterNetUpdateHead
class TestCenterNetUpdateHead(TestCase):
def test_centernet_update_head_loss(self):
"""Tests fcos head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
centernet_head = CenterNetUpdateHead(
num_classes=4,
in_channels=1,
feat_channels=1,
stacked_convs=1,
norm_cfg=None)
# Fcos head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in centernet_head.prior_generator.strides)
cls_scores, bbox_preds = centernet_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = centernet_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but
# box loss and centerness loss should be zero
empty_cls_loss = empty_gt_losses['loss_cls'].item()
empty_box_loss = empty_gt_losses['loss_bbox'].item()
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertEqual(
empty_box_loss, 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = centernet_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].item()
onegt_box_loss = one_gt_losses['loss_bbox'].item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')
| 2,608
| 39.765625
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_sabl_retina_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import SABLRetinaHead
class TestSABLRetinaHead(TestCase):
def test_sabl_retina_head(self):
"""Tests sabl retina head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': [1, 1],
}]
train_cfg = ConfigDict(
dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
sabl_retina_head = SABLRetinaHead(
num_classes=4,
in_channels=1,
feat_channels=1,
stacked_convs=1,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5),
train_cfg=train_cfg)
# Fcos head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in sabl_retina_head.square_anchor_generator.strides)
outs = sabl_retina_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = sabl_retina_head.loss_by_feat(
*outs, [gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# box loss and centerness loss should be zero
empty_cls_loss = sum(empty_gt_losses['loss_cls']).item()
empty_box_cls_loss = sum(empty_gt_losses['loss_bbox_cls']).item()
empty_box_reg_loss = sum(empty_gt_losses['loss_bbox_reg']).item()
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertEqual(
empty_box_cls_loss, 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_box_reg_loss, 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = sabl_retina_head.loss_by_feat(*outs, [gt_instances],
img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls']).item()
onegt_box_cls_loss = sum(one_gt_losses['loss_bbox_cls']).item()
onegt_box_reg_loss = sum(one_gt_losses['loss_bbox_reg']).item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_cls_loss, 0,
'box loss should be non-zero')
self.assertGreater(onegt_box_reg_loss, 0,
'centerness loss should be non-zero')
test_cfg = ConfigDict(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
# test predict_by_feat
sabl_retina_head.predict_by_feat(
*outs, batch_img_metas=img_metas, cfg=test_cfg, rescale=True)
| 4,672
| 38.940171
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_ddod_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import DDODHead
class TestDDODHead(TestCase):
def test_ddod_head_loss(self):
"""Tests ddod head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1
}]
cfg = Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9, alpha=0.8),
reg_assigner=dict(type='ATSSAssigner', topk=9, alpha=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False))
atss_head = DDODHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=1,
use_dcn=False,
norm_cfg=None,
train_cfg=cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_iou=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [8, 16, 32, 64, 128]
]
cls_scores, bbox_preds, centernesses = atss_head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_centerness_loss = sum(empty_gt_losses['loss_iou'])
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_centerness_loss.item(), 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_centerness_loss = sum(one_gt_losses['loss_iou'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_centerness_loss.item(), 0,
'centerness loss should be non-zero')
| 3,979
| 39.20202
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_atss_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import ATSSHead
class TestATSSHead(TestCase):
def test_atss_head_loss(self):
"""Tests atss head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1
}]
cfg = Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
atss_head = ATSSHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=1,
norm_cfg=None,
train_cfg=cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [8, 16, 32, 64, 128]
]
cls_scores, bbox_preds, centernesses = atss_head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_centerness_loss = sum(empty_gt_losses['loss_centerness'])
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_centerness_loss.item(), 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_centerness_loss = sum(one_gt_losses['loss_centerness'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_centerness_loss.item(), 0,
'centerness loss should be non-zero')
| 3,776
| 38.757895
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_dense_heads/test_gfl_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import GFLHead
class TestGFLHead(TestCase):
def test_gfl_head_loss(self):
"""Tests gfl head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1
}]
train_cfg = Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
gfl_head = GFLHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds = gfl_head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = gfl_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_dfl_loss = sum(empty_gt_losses['loss_dfl'])
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_dfl_loss.item(), 0,
'there should be no dfl loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = gfl_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_dfl_loss = sum(one_gt_losses['loss_dfl'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_dfl_loss.item(), 0,
'dfl loss should be non-zero')
| 3,503
| 37.933333
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_detectors/test_single_stage.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
import unittest
from unittest import TestCase
import torch
from mmengine.logging import MessageHub
from parameterized import parameterized
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestSingleStageDetector(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand([
'retinanet/retinanet_r18_fpn_1x_coco.py',
'centernet/centernet_r18_8xb16-crop512-140e_coco.py',
'fsaf/fsaf_r50_fpn_1x_coco.py',
'yolox/yolox_tiny_8xb8-300e_coco.py',
'yolo/yolov3_mobilenetv2_8xb24-320-300e_coco.py',
'reppoints/reppoints-minmax_r50_fpn-gn_head-gn_1x_coco.py',
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
@parameterized.expand([
('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),
('centernet/centernet_r18_8xb16-crop512-140e_coco.py', ('cpu',
'cuda')),
('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('yolox/yolox_tiny_8xb8-300e_coco.py', ('cpu', 'cuda')),
('yolo/yolov3_mobilenetv2_8xb24-320-300e_coco.py', ('cpu', 'cuda')),
('reppoints/reppoints-minmax_r50_fpn-gn_head-gn_1x_coco.py', ('cpu',
'cuda')),
])
def test_single_stage_forward_loss_mode(self, cfg_file, devices):
message_hub = MessageHub.get_instance(
f'test_single_stage_forward_loss_mode-{time.time()}')
message_hub.update_info('iter', 0)
message_hub.update_info('epoch', 0)
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
detector.init_weights()
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, True)
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([
('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),
('centernet/centernet_r18_8xb16-crop512-140e_coco.py', ('cpu',
'cuda')),
('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('yolox/yolox_tiny_8xb8-300e_coco.py', ('cpu', 'cuda')),
('yolo/yolov3_mobilenetv2_8xb24-320-300e_coco.py', ('cpu', 'cuda')),
('reppoints/reppoints-minmax_r50_fpn-gn_head-gn_1x_coco.py', ('cpu',
'cuda')),
])
def test_single_stage_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([
('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),
('centernet/centernet_r18_8xb16-crop512-140e_coco.py', ('cpu',
'cuda')),
('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('yolox/yolox_tiny_8xb8-300e_coco.py', ('cpu', 'cuda')),
('yolo/yolov3_mobilenetv2_8xb24-320-300e_coco.py', ('cpu', 'cuda')),
('reppoints/reppoints-minmax_r50_fpn-gn_head-gn_1x_coco.py', ('cpu',
'cuda')),
])
def test_single_stage_forward_tensor_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
batch_results = detector.forward(**data, mode='tensor')
self.assertIsInstance(batch_results, tuple)
| 5,791
| 41.588235
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_detectors/test_single_stage_instance_seg.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestSingleStageInstanceSegmentor(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand([
'solo/solo_r50_fpn_1x_coco.py',
'solo/decoupled-solo_r50_fpn_1x_coco.py',
'solo/decoupled-solo-light_r50_fpn_3x_coco.py',
'solov2/solov2_r50_fpn_1x_coco.py',
'solov2/solov2-light_r18_fpn_ms-3x_coco.py',
'yolact/yolact_r50_1xb8-55e_coco.py',
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.mask_head)
if detector.with_bbox:
self.assertTrue(detector.bbox_head)
@parameterized.expand([
('solo/solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled-solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled-solo-light_r50_fpn_3x_coco.py', ('cpu', 'cuda')),
('solov2/solov2_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solov2/solov2-light_r18_fpn_ms-3x_coco.py', ('cpu', 'cuda')),
('yolact/yolact_r50_1xb8-55e_coco.py', ('cpu', 'cuda')),
])
def test_single_stage_forward_loss_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
detector.init_weights()
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]], with_mask=True)
data = detector.data_preprocessor(packed_inputs, True)
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([
('solo/solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled-solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled-solo-light_r50_fpn_3x_coco.py', ('cpu', 'cuda')),
('solov2/solov2_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solov2/solov2-light_r18_fpn_ms-3x_coco.py', ('cpu', 'cuda')),
('yolact/yolact_r50_1xb8-55e_coco.py', ('cpu', 'cuda')),
])
def test_single_stage_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]], with_mask=True)
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([
('solo/solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled-solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled-solo-light_r50_fpn_3x_coco.py', ('cpu', 'cuda')),
('solov2/solov2_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solov2/solov2-light_r18_fpn_ms-3x_coco.py', ('cpu', 'cuda')),
('yolact/yolact_r50_1xb8-55e_coco.py', ('cpu', 'cuda')),
])
def test_single_stage_forward_tensor_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
batch_results = detector.forward(**data, mode='tensor')
self.assertIsInstance(batch_results, tuple)
| 5,403
| 39.328358
| 77
|
py
|
ERD
|
ERD-main/tests/test_models/test_detectors/test_semi_base.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from mmengine.registry import MODELS
from parameterized import parameterized
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
register_all_modules()
class TestSemiBase(TestCase):
@parameterized.expand([
'soft_teacher/'
'soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.1-coco.py',
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.detector.backbone.depth = 18
model.detector.neck.in_channels = [64, 128, 256, 512]
model.detector.backbone.init_cfg = None
model = MODELS.build(model)
self.assertTrue(model.teacher.backbone)
self.assertTrue(model.teacher.neck)
self.assertTrue(model.teacher.rpn_head)
self.assertTrue(model.teacher.roi_head)
self.assertTrue(model.student.backbone)
self.assertTrue(model.student.neck)
self.assertTrue(model.student.rpn_head)
self.assertTrue(model.student.roi_head)
| 1,116
| 30.914286
| 71
|
py
|
ERD
|
ERD-main/tests/test_models/test_detectors/test_panoptic_two_stage_segmentor.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from parameterized import parameterized
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.testing._utils import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestTwoStagePanopticSegmentor(unittest.TestCase):
def setUp(self):
register_all_modules()
def _create_model_cfg(self):
cfg_file = 'panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py'
model_cfg = get_detector_cfg(cfg_file)
model_cfg.backbone.depth = 18
model_cfg.neck.in_channels = [64, 128, 256, 512]
model_cfg.backbone.init_cfg = None
return model_cfg
def test_init(self):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
assert detector.backbone
assert detector.neck
assert detector.rpn_head
assert detector.roi_head
assert detector.roi_head.mask_head
assert detector.with_semantic_head
assert detector.with_panoptic_fusion_head
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_loss_mode(self, device):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, True)
# Test loss mode
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_predict_mode(self, device):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_tensor_mode(self, device):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, False)
out = detector.forward(**data, mode='tensor')
self.assertIsInstance(out, tuple)
| 3,467
| 35.893617
| 68
|
py
|
ERD
|
ERD-main/tests/test_models/test_detectors/test_two_stage.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestTwoStageBBox(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand([
'faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py',
'cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py',
'sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py',
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.rpn_head)
self.assertTrue(detector.roi_head)
# if rpn.num_classes > 1, force set rpn.num_classes = 1
if hasattr(model.rpn_head, 'num_classes'):
model.rpn_head.num_classes = 2
detector = MODELS.build(model)
self.assertEqual(detector.rpn_head.num_classes, 1)
@parameterized.expand([
'faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py',
'cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py',
'sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py',
])
def test_two_stage_forward_loss_mode(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, True)
# Test loss mode
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([
'faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py',
'cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py',
'sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py',
])
def test_two_stage_forward_predict_mode(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
# TODO: Awaiting refactoring
# @parameterized.expand([
# 'faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py',
# 'cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py',
# 'sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py',
# ])
# def test_two_stage_forward_tensor_mode(self, cfg_file):
# model = get_detector_cfg(cfg_file)
# # backbone convert to ResNet18
# model.backbone.depth = 18
# model.neck.in_channels = [64, 128, 256, 512]
# model.backbone.init_cfg = None
#
# from mmdet.models import build_detector
# detector = build_detector(model)
#
# if not torch.cuda.is_available():
# return unittest.skip('test requires GPU and torch+cuda')
# detector = detector.cuda()
#
# packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# data = detector.data_preprocessor(packed_inputs, False)
# out = detector.forward(**data, mode='tensor')
# self.assertIsInstance(out, tuple)
class TestTwoStageMask(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand([
'mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py',
'cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py',
'queryinst/queryinst_r50_fpn_1x_coco.py'
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.rpn_head)
self.assertTrue(detector.roi_head)
self.assertTrue(detector.roi_head.mask_head)
# if rpn.num_classes > 1, force set rpn.num_classes = 1
if hasattr(model.rpn_head, 'num_classes'):
model.rpn_head.num_classes = 2
detector = MODELS.build(model)
self.assertEqual(detector.rpn_head.num_classes, 1)
@parameterized.expand([
'mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py',
'cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py',
'queryinst/queryinst_r50_fpn_1x_coco.py'
])
def test_two_stage_forward_loss_mode(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]], with_mask=True)
data = detector.data_preprocessor(packed_inputs, True)
# Test loss mode
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([
'mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py',
'cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py',
'queryinst/queryinst_r50_fpn_1x_coco.py'
])
def test_two_stage_forward_predict_mode(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 256, 256], [3, 255, 260]])
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
# TODO: Awaiting refactoring
# @parameterized.expand([
# 'mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py',
# 'cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py',
# 'queryinst/queryinst_r50_fpn_1x_coco.py'
# ])
# def test_two_stage_forward_tensor_mode(self, cfg_file):
# model = get_detector_cfg(cfg_file)
# # backbone convert to ResNet18
# model.backbone.depth = 18
# model.neck.in_channels = [64, 128, 256, 512]
# model.backbone.init_cfg = None
#
# from mmdet.models import build_detector
# detector = build_detector(model)
#
# if not torch.cuda.is_available():
# return unittest.skip('test requires GPU and torch+cuda')
# detector = detector.cuda()
#
# packed_inputs = demo_mm_inputs(
# 2, [[3, 128, 128], [3, 125, 130]], with_mask=True)
# data = detector.data_preprocessor(packed_inputs, False)
#
# # out = detector.forward(**data, mode='tensor')
# # self.assertIsInstance(out, tuple)
| 8,710
| 35.755274
| 75
|
py
|
ERD
|
ERD-main/tests/test_models/test_detectors/test_kd_single_stage.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestKDSingleStageDetector(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand(['ld/ld_r18-gflv1-r101_fpn_1x_coco.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
@parameterized.expand([('ld/ld_r18-gflv1-r101_fpn_1x_coco.py', ('cpu',
'cuda'))])
def test_single_stage_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, True)
# Test forward train
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('ld/ld_r18-gflv1-r101_fpn_1x_coco.py', ('cpu',
'cuda'))])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
| 2,932
| 36.126582
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_detectors/test_detr.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class TestDETR(TestCase):
def setUp(self) -> None:
register_all_modules()
def test_detr_head_loss(self):
"""Tests transformer head loss when truth is empty and non-empty."""
s = 256
metainfo = {
'img_shape': (s, s),
'scale_factor': (1, 1),
'pad_shape': (s, s),
'batch_input_shape': (s, s)
}
img_metas = DetDataSample()
img_metas.set_metainfo(metainfo)
batch_data_samples = []
batch_data_samples.append(img_metas)
config = get_detector_cfg('detr/detr_r50_8xb2-150e_coco.py')
model = MODELS.build(config)
model.init_weights()
random_image = torch.rand(1, 3, s, s)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
img_metas.gt_instances = gt_instances
batch_data_samples1 = []
batch_data_samples1.append(img_metas)
empty_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples1)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
for key, loss in empty_gt_losses.items():
if 'cls' in key:
self.assertGreater(loss.item(), 0,
'cls loss should be non-zero')
elif 'bbox' in key:
self.assertEqual(
loss.item(), 0,
'there should be no box loss when no ground true boxes')
elif 'iou' in key:
self.assertEqual(
loss.item(), 0,
'there should be no iou loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
img_metas.gt_instances = gt_instances
batch_data_samples2 = []
batch_data_samples2.append(img_metas)
one_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples2)
for loss in one_gt_losses.values():
self.assertGreater(
loss.item(), 0,
'cls loss, or box loss, or iou loss should be non-zero')
model.eval()
# test _forward
model._forward(random_image, batch_data_samples=batch_data_samples2)
# test only predict
model.predict(
random_image, batch_data_samples=batch_data_samples2, rescale=True)
| 3,116
| 35.670588
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_detectors/test_maskformer.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from parameterized import parameterized
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.testing._utils import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestMaskFormer(unittest.TestCase):
def setUp(self):
register_all_modules()
def _create_model_cfg(self):
cfg_path = 'maskformer/maskformer_r50_ms-16xb1-75e_coco.py'
model_cfg = get_detector_cfg(cfg_path)
base_channels = 32
model_cfg.backbone.depth = 18
model_cfg.backbone.init_cfg = None
model_cfg.backbone.base_channels = base_channels
model_cfg.panoptic_head.in_channels = [
base_channels * 2**i for i in range(4)
]
model_cfg.panoptic_head.feat_channels = base_channels
model_cfg.panoptic_head.out_channels = base_channels
model_cfg.panoptic_head.pixel_decoder.encoder.\
layer_cfg.self_attn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.pixel_decoder.encoder.\
layer_cfg.ffn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.pixel_decoder.encoder.\
layer_cfg.ffn_cfg.feedforward_channels = base_channels * 8
model_cfg.panoptic_head.pixel_decoder.\
positional_encoding.num_feats = base_channels // 2
model_cfg.panoptic_head.positional_encoding.\
num_feats = base_channels // 2
model_cfg.panoptic_head.transformer_decoder.\
layer_cfg.self_attn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.transformer_decoder. \
layer_cfg.cross_attn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.transformer_decoder.\
layer_cfg.ffn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.transformer_decoder.\
layer_cfg.ffn_cfg.feedforward_channels = base_channels * 8
return model_cfg
def test_init(self):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
detector.init_weights()
assert detector.backbone
assert detector.panoptic_head
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_loss_mode(self, device):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, True)
# Test loss mode
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_predict_mode(self, device):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_tensor_mode(self, device):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, False)
out = detector.forward(**data, mode='tensor')
self.assertIsInstance(out, tuple)
class TestMask2Former(unittest.TestCase):
def setUp(self):
register_all_modules()
def _create_model_cfg(self, cfg_path):
model_cfg = get_detector_cfg(cfg_path)
base_channels = 32
model_cfg.backbone.depth = 18
model_cfg.backbone.init_cfg = None
model_cfg.backbone.base_channels = base_channels
model_cfg.panoptic_head.in_channels = [
base_channels * 2**i for i in range(4)
]
model_cfg.panoptic_head.feat_channels = base_channels
model_cfg.panoptic_head.out_channels = base_channels
model_cfg.panoptic_head.pixel_decoder.encoder.\
layer_cfg.self_attn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.pixel_decoder.encoder.\
layer_cfg.ffn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.pixel_decoder.encoder.\
layer_cfg.ffn_cfg.feedforward_channels = base_channels * 4
model_cfg.panoptic_head.pixel_decoder.\
positional_encoding.num_feats = base_channels // 2
model_cfg.panoptic_head.positional_encoding.\
num_feats = base_channels // 2
model_cfg.panoptic_head.transformer_decoder.\
layer_cfg.self_attn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.transformer_decoder. \
layer_cfg.cross_attn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.transformer_decoder.\
layer_cfg.ffn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.transformer_decoder.\
layer_cfg.ffn_cfg.feedforward_channels = base_channels * 8
return model_cfg
def test_init(self):
model_cfg = self._create_model_cfg(
'mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py')
detector = MODELS.build(model_cfg)
detector.init_weights()
assert detector.backbone
assert detector.panoptic_head
@parameterized.expand([
('cpu', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'),
('cpu', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco.py'),
('cuda', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'),
('cuda', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco.py')
])
def test_forward_loss_mode(self, device, cfg_path):
print(device, cfg_path)
with_semantic = 'panoptic' in cfg_path
model_cfg = self._create_model_cfg(cfg_path)
detector = MODELS.build(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=with_semantic)
data = detector.data_preprocessor(packed_inputs, True)
# Test loss mode
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([
('cpu', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'),
('cpu', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco.py'),
('cuda', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'),
('cuda', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco.py')
])
def test_forward_predict_mode(self, device, cfg_path):
with_semantic = 'panoptic' in cfg_path
model_cfg = self._create_model_cfg(cfg_path)
detector = MODELS.build(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=with_semantic)
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([
('cpu', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'),
('cpu', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco.py'),
('cuda', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'),
('cuda', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco.py')
])
def test_forward_tensor_mode(self, device, cfg_path):
with_semantic = 'panoptic' in cfg_path
model_cfg = self._create_model_cfg(cfg_path)
detector = MODELS.build(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=with_semantic)
data = detector.data_preprocessor(packed_inputs, False)
out = detector.forward(**data, mode='tensor')
self.assertIsInstance(out, tuple)
| 9,982
| 41.122363
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_detectors/test_dino.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class TestDINO(TestCase):
def setUp(self):
register_all_modules()
def test_dino_head_loss(self):
"""Tests transformer head loss when truth is empty and non-empty."""
s = 256
metainfo = {
'img_shape': (s, s),
'scale_factor': (1, 1),
'pad_shape': (s, s),
'batch_input_shape': (s, s)
}
data_sample = DetDataSample()
data_sample.set_metainfo(metainfo)
configs = [get_detector_cfg('dino/dino-4scale_r50_8xb2-12e_coco.py')]
for config in configs:
model = MODELS.build(config)
model.init_weights()
random_image = torch.rand(1, 3, s, s)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
data_sample.gt_instances = gt_instances
batch_data_samples_1 = [data_sample]
empty_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples_1)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
for key, loss in empty_gt_losses.items():
_loss = loss.item()
if 'bbox' in key or 'iou' in key or 'dn' in key:
self.assertEqual(
_loss, 0, f'there should be no {key}({_loss}) '
f'when no ground true boxes')
elif 'cls' in key:
self.assertGreater(_loss, 0,
f'{key}({_loss}) should be non-zero')
# When truth is non-empty then both cls and box loss should
# be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
data_sample.gt_instances = gt_instances
batch_data_samples_2 = [data_sample]
one_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples_2)
for loss in one_gt_losses.values():
self.assertGreater(
loss.item(), 0,
'cls loss, or box loss, or iou loss should be non-zero')
model.eval()
# test _forward
model._forward(
random_image, batch_data_samples=batch_data_samples_2)
# test only predict
model.predict(
random_image,
batch_data_samples=batch_data_samples_2,
rescale=True)
| 3,124
| 37.109756
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_detectors/test_rpn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestRPN(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand(['rpn/rpn_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
# if rpn.num_classes > 1, force set rpn.num_classes = 1
model.rpn_head.num_classes = 2
detector = MODELS.build(model)
self.assertEqual(detector.bbox_head.num_classes, 1)
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_loss_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, True)
# Test forward train
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_tensor_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
batch_results = detector.forward(**data, mode='tensor')
self.assertIsInstance(batch_results, tuple)
| 4,309
| 37.141593
| 77
|
py
|
ERD
|
ERD-main/tests/test_models/test_detectors/test_dab_detr.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class TestDABDETR(TestCase):
def setUp(self) -> None:
register_all_modules()
def test_dab_detr_head_loss(self):
"""Tests transformer head loss when truth is empty and non-empty."""
s = 256
metainfo = {
'img_shape': (s, s),
'scale_factor': (1, 1),
'pad_shape': (s, s),
'batch_input_shape': (s, s)
}
img_metas = DetDataSample()
img_metas.set_metainfo(metainfo)
batch_data_samples = []
batch_data_samples.append(img_metas)
config = get_detector_cfg('dab_detr/dab-detr_r50_8xb2-50e_coco.py')
model = MODELS.build(config)
model.init_weights()
random_image = torch.rand(1, 3, s, s)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
img_metas.gt_instances = gt_instances
batch_data_samples1 = []
batch_data_samples1.append(img_metas)
empty_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples1)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
for key, loss in empty_gt_losses.items():
if 'cls' in key:
self.assertGreater(loss.item(), 0,
'cls loss should be non-zero')
elif 'bbox' in key:
self.assertEqual(
loss.item(), 0,
'there should be no box loss when no ground true boxes')
elif 'iou' in key:
self.assertEqual(
loss.item(), 0,
'there should be no iou loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
img_metas.gt_instances = gt_instances
batch_data_samples2 = []
batch_data_samples2.append(img_metas)
one_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples2)
for loss in one_gt_losses.values():
self.assertGreater(
loss.item(), 0,
'cls loss, or box loss, or iou loss should be non-zero')
model.eval()
# test _forward
model._forward(random_image, batch_data_samples=batch_data_samples2)
# test only predict
model.predict(
random_image, batch_data_samples=batch_data_samples2, rescale=True)
| 3,130
| 35.835294
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_detectors/test_cornernet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestCornerNet(TestCase):
def setUp(self) -> None:
register_all_modules()
model_cfg = get_detector_cfg(
'cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py')
backbone = dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(3, ),
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch')
neck = dict(
type='FPN',
in_channels=[512],
out_channels=256,
start_level=0,
add_extra_convs='on_input',
num_outs=1)
model_cfg.backbone = ConfigDict(**backbone)
model_cfg.neck = ConfigDict(**neck)
model_cfg.bbox_head.num_feat_levels = 1
self.model_cfg = model_cfg
def test_init(self):
model = get_detector_cfg(
'cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py')
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.bbox_head is not None)
self.assertTrue(detector.backbone is not None)
self.assertTrue(not hasattr(detector, 'neck'))
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_loss_mode(self):
from mmdet.registry import MODELS
detector = MODELS.build(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 511, 511], [3, 511, 511]])
data = detector.data_preprocessor(packed_inputs, True)
losses = detector.forward(**data, mode='loss')
assert isinstance(losses, dict)
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_predict_mode(self):
from mmdet.registry import MODELS
detector = MODELS.build(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 512, 512], [3, 512, 512]])
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
assert len(batch_results) == 2
assert isinstance(batch_results[0], DetDataSample)
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_tensor_mode(self):
from mmdet.registry import MODELS
detector = MODELS.build(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 512, 512], [3, 512, 512]])
data = detector.data_preprocessor(packed_inputs, False)
batch_results = detector.forward(**data, mode='tensor')
assert isinstance(batch_results, tuple)
| 3,271
| 34.182796
| 73
|
py
|
ERD
|
ERD-main/tests/test_models/test_detectors/test_conditional_detr.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class TestConditionalDETR(TestCase):
def setUp(self) -> None:
register_all_modules()
def test_conditional_detr_head_loss(self):
"""Tests transformer head loss when truth is empty and non-empty."""
s = 256
metainfo = {
'img_shape': (s, s),
'scale_factor': (1, 1),
'pad_shape': (s, s),
'batch_input_shape': (s, s)
}
img_metas = DetDataSample()
img_metas.set_metainfo(metainfo)
batch_data_samples = []
batch_data_samples.append(img_metas)
config = get_detector_cfg(
'conditional_detr/conditional-detr_r50_8xb2-50e_coco.py')
model = MODELS.build(config)
model.init_weights()
random_image = torch.rand(1, 3, s, s)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
img_metas.gt_instances = gt_instances
batch_data_samples1 = []
batch_data_samples1.append(img_metas)
empty_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples1)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
for key, loss in empty_gt_losses.items():
if 'cls' in key:
self.assertGreater(loss.item(), 0,
'cls loss should be non-zero')
elif 'bbox' in key:
self.assertEqual(
loss.item(), 0,
'there should be no box loss when no ground true boxes')
elif 'iou' in key:
self.assertEqual(
loss.item(), 0,
'there should be no iou loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
img_metas.gt_instances = gt_instances
batch_data_samples2 = []
batch_data_samples2.append(img_metas)
one_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples2)
for loss in one_gt_losses.values():
self.assertGreater(
loss.item(), 0,
'cls loss, or box loss, or iou loss should be non-zero')
model.eval()
# test _forward
model._forward(random_image, batch_data_samples=batch_data_samples2)
# test only predict
model.predict(
random_image, batch_data_samples=batch_data_samples2, rescale=True)
| 3,175
| 35.930233
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_detectors/test_deformable_detr.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class TestDeformableDETR(TestCase):
def setUp(self):
register_all_modules()
def test_deformable_detr_head_loss(self):
"""Tests transformer head loss when truth is empty and non-empty."""
s = 256
metainfo = {
'img_shape': (s, s),
'scale_factor': (1, 1),
'pad_shape': (s, s),
'batch_input_shape': (s, s)
}
img_metas = DetDataSample()
img_metas.set_metainfo(metainfo)
batch_data_samples = []
batch_data_samples.append(img_metas)
configs = [
get_detector_cfg(
'deformable_detr/deformable-detr_r50_16xb2-50e_coco.py'),
get_detector_cfg(
'deformable_detr/deformable-detr-refine_r50_16xb2-50e_coco.py' # noqa
),
get_detector_cfg(
'deformable_detr/deformable-detr-refine-twostage_r50_16xb2-50e_coco.py' # noqa
)
]
for config in configs:
model = MODELS.build(config)
model.init_weights()
random_image = torch.rand(1, 3, s, s)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
img_metas.gt_instances = gt_instances
batch_data_samples1 = []
batch_data_samples1.append(img_metas)
empty_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples1)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
for key, loss in empty_gt_losses.items():
if 'cls' in key:
self.assertGreater(loss.item(), 0,
'cls loss should be non-zero')
elif 'bbox' in key:
self.assertEqual(
loss.item(), 0,
'there should be no box loss when no ground true boxes'
)
elif 'iou' in key:
self.assertEqual(
loss.item(), 0,
'there should be no iou loss when no ground true boxes'
)
# When truth is non-empty then both cls and box loss should
# be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
img_metas.gt_instances = gt_instances
batch_data_samples2 = []
batch_data_samples2.append(img_metas)
one_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples2)
for loss in one_gt_losses.values():
self.assertGreater(
loss.item(), 0,
'cls loss, or box loss, or iou loss should be non-zero')
model.eval()
# test _forward
model._forward(
random_image, batch_data_samples=batch_data_samples2)
# test only predict
model.predict(
random_image,
batch_data_samples=batch_data_samples2,
rescale=True)
| 3,777
| 36.78
| 95
|
py
|
ERD
|
ERD-main/tests/test_models/test_task_modules/__init__.py
| 0
| 0
| 0
|
py
|
|
ERD
|
ERD-main/tests/test_models/test_task_modules/test_iou2d_calculator.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmdet.evaluation import bbox_overlaps as recall_overlaps
from mmdet.models.task_modules import BboxOverlaps2D
from mmdet.structures.bbox import bbox_overlaps
def test_bbox_overlaps_2d(eps=1e-7):
def _construct_bbox(num_bbox=None):
img_h = int(np.random.randint(3, 1000))
img_w = int(np.random.randint(3, 1000))
if num_bbox is None:
num_bbox = np.random.randint(1, 10)
x1y1 = torch.rand((num_bbox, 2))
x2y2 = torch.max(torch.rand((num_bbox, 2)), x1y1)
bboxes = torch.cat((x1y1, x2y2), -1)
bboxes[:, 0::2] *= img_w
bboxes[:, 1::2] *= img_h
return bboxes, num_bbox
# is_aligned is True, bboxes.size(-1) == 5 (include score)
self = BboxOverlaps2D()
bboxes1, num_bbox = _construct_bbox()
bboxes2, _ = _construct_bbox(num_bbox)
bboxes1 = torch.cat((bboxes1, torch.rand((num_bbox, 1))), 1)
bboxes2 = torch.cat((bboxes2, torch.rand((num_bbox, 1))), 1)
gious = self(bboxes1, bboxes2, 'giou', True)
assert gious.size() == (num_bbox, ), gious.size()
assert torch.all(gious >= -1) and torch.all(gious <= 1)
# is_aligned is True, bboxes1.size(-2) == 0
bboxes1 = torch.empty((0, 4))
bboxes2 = torch.empty((0, 4))
gious = self(bboxes1, bboxes2, 'giou', True)
assert gious.size() == (0, ), gious.size()
assert torch.all(gious == torch.empty((0, )))
assert torch.all(gious >= -1) and torch.all(gious <= 1)
# is_aligned is True, and bboxes.ndims > 2
bboxes1, num_bbox = _construct_bbox()
bboxes2, _ = _construct_bbox(num_bbox)
bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1)
# test assertion when batch dim is not the same
with pytest.raises(AssertionError):
self(bboxes1, bboxes2.unsqueeze(0).repeat(3, 1, 1), 'giou', True)
bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1)
gious = self(bboxes1, bboxes2, 'giou', True)
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (2, num_bbox)
bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1, 1)
bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1, 1)
gious = self(bboxes1, bboxes2, 'giou', True)
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (2, 2, num_bbox)
# is_aligned is False
bboxes1, num_bbox1 = _construct_bbox()
bboxes2, num_bbox2 = _construct_bbox()
gious = self(bboxes1, bboxes2, 'giou')
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (num_bbox1, num_bbox2)
# is_aligned is False, and bboxes.ndims > 2
bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1)
bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1)
gious = self(bboxes1, bboxes2, 'giou')
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (2, num_bbox1, num_bbox2)
bboxes1 = bboxes1.unsqueeze(0)
bboxes2 = bboxes2.unsqueeze(0)
gious = self(bboxes1, bboxes2, 'giou')
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (1, 2, num_bbox1, num_bbox2)
# is_aligned is False, bboxes1.size(-2) == 0
gious = self(torch.empty(1, 2, 0, 4), bboxes2, 'giou')
assert torch.all(gious == torch.empty(1, 2, 0, bboxes2.size(-2)))
assert torch.all(gious >= -1) and torch.all(gious <= 1)
# test allclose between bbox_overlaps and the original official
# implementation.
bboxes1 = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[32, 32, 38, 42],
])
bboxes2 = torch.FloatTensor([
[0, 0, 10, 20],
[0, 10, 10, 19],
[10, 10, 20, 20],
])
gious = bbox_overlaps(bboxes1, bboxes2, 'giou', is_aligned=True, eps=eps)
gious = gious.numpy().round(4)
# the gt is got with four decimal precision.
expected_gious = np.array([0.5000, -0.0500, -0.8214])
assert np.allclose(gious, expected_gious, rtol=0, atol=eps)
# test mode 'iof'
ious = bbox_overlaps(bboxes1, bboxes2, 'iof', is_aligned=True, eps=eps)
assert torch.all(ious >= -1) and torch.all(ious <= 1)
assert ious.size() == (bboxes1.size(0), )
ious = bbox_overlaps(bboxes1, bboxes2, 'iof', eps=eps)
assert torch.all(ious >= -1) and torch.all(ious <= 1)
assert ious.size() == (bboxes1.size(0), bboxes2.size(0))
def test_voc_recall_overlaps():
def _construct_bbox(num_bbox=None):
img_h = int(np.random.randint(3, 1000))
img_w = int(np.random.randint(3, 1000))
if num_bbox is None:
num_bbox = np.random.randint(1, 10)
x1y1 = torch.rand((num_bbox, 2))
x2y2 = torch.max(torch.rand((num_bbox, 2)), x1y1)
bboxes = torch.cat((x1y1, x2y2), -1)
bboxes[:, 0::2] *= img_w
bboxes[:, 1::2] *= img_h
return bboxes.numpy(), num_bbox
bboxes1, num_bbox = _construct_bbox()
bboxes2, _ = _construct_bbox(num_bbox)
ious = recall_overlaps(
bboxes1, bboxes2, 'iou', use_legacy_coordinate=False)
assert ious.shape == (num_bbox, num_bbox)
assert np.all(ious >= -1) and np.all(ious <= 1)
ious = recall_overlaps(bboxes1, bboxes2, 'iou', use_legacy_coordinate=True)
assert ious.shape == (num_bbox, num_bbox)
assert np.all(ious >= -1) and np.all(ious <= 1)
| 5,339
| 38.555556
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_task_modules/test_samplers/test_pesudo_sampler.py
|
# TODO: follow up
| 18
| 8.5
| 17
|
py
|
ERD
|
ERD-main/tests/test_models/test_task_modules/test_coder/test_delta_xywh_bbox_coder.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.task_modules.coders import DeltaXYWHBBoxCoder
def test_delta_bbox_coder():
coder = DeltaXYWHBBoxCoder()
rois = torch.Tensor([[0., 0., 1., 1.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[0., 0., 0., 0.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 1.0000, 1.0000],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
out = coder.decode(rois, deltas, max_shape=torch.Tensor((32, 32)))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
batch_rois = rois.unsqueeze(0).repeat(2, 1, 1)
batch_deltas = deltas.unsqueeze(0).repeat(2, 1, 1)
batch_out = coder.decode(batch_rois, batch_deltas, max_shape=(32, 32))[0]
assert out.allclose(batch_out)
batch_out = coder.decode(
batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32)])[0]
assert out.allclose(batch_out)
# test max_shape is not equal to batch
with pytest.raises(AssertionError):
coder.decode(
batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32), (32, 32)])
rois = torch.zeros((0, 4))
deltas = torch.zeros((0, 4))
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert rois.shape == out.shape
# test add_ctr_clamp
coder = DeltaXYWHBBoxCoder(add_ctr_clamp=True, ctr_clamp=2)
rois = torch.Tensor([[0., 0., 6., 6.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[1., 1., 2., 2.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 27.1672, 27.1672],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
| 2,494
| 42.77193
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_task_modules/test_assigners/test_point_assigner.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from mmengine.structures import InstanceData
from mmengine.testing import assert_allclose
from mmdet.models.task_modules.assigners import PointAssigner
class TestPointAssigner(unittest.TestCase):
def test_point_assigner(self):
assigner = PointAssigner()
pred_instances = InstanceData()
pred_instances.priors = torch.FloatTensor([
# [x, y, stride]
[0, 0, 1],
[10, 10, 1],
[5, 5, 1],
[32, 32, 1],
])
gt_instances = InstanceData()
gt_instances.bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_instances.labels = torch.LongTensor([0, 1])
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([1, 2, 1, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_point_assigner_with_empty_gt(self):
"""Test corner case where an image might have no true detections."""
assigner = PointAssigner()
pred_instances = InstanceData()
pred_instances.priors = torch.FloatTensor([
# [x, y, stride]
[0, 0, 1],
[10, 10, 1],
[5, 5, 1],
[32, 32, 1],
])
gt_instances = InstanceData()
gt_instances.bboxes = torch.FloatTensor([])
gt_instances.labels = torch.LongTensor([])
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_point_assigner_with_empty_boxes_and_gt(self):
"""Test corner case where an image might predict no points and no
gt."""
assigner = PointAssigner()
pred_instances = InstanceData()
pred_instances.priors = torch.FloatTensor([])
gt_instances = InstanceData()
gt_instances.bboxes = torch.FloatTensor([])
gt_instances.labels = torch.LongTensor([])
assign_result = assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0)
| 2,249
| 34.714286
| 76
|
py
|
ERD
|
ERD-main/tests/test_models/test_task_modules/test_assigners/test_hungarian_assigner.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.task_modules.assigners import HungarianAssigner
class TestHungarianAssigner(TestCase):
def test_init(self):
with self.assertRaises(AssertionError):
HungarianAssigner([])
def test_hungarian_match_assigner(self):
assigner = HungarianAssigner([
dict(type='ClassificationCost', weight=1.),
dict(type='BBoxL1Cost', weight=5.0),
dict(type='IoUCost', iou_mode='giou', weight=2.0)
])
# test no gt bboxes
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4)).float()
gt_instances.labels = torch.empty((0, )).long()
pred_instances = InstanceData()
pred_instances.scores = torch.rand((10, 81))
pred_instances.bboxes = torch.rand((10, 4))
img_meta = dict(img_shape=(10, 8))
assign_result = assigner.assign(
pred_instances, gt_instances, img_meta=img_meta)
self.assertTrue(torch.all(assign_result.gt_inds == 0))
self.assertTrue(torch.all(assign_result.labels == -1))
# test with gt bboxes
gt_instances.bboxes = torch.FloatTensor([[0, 0, 5, 7], [3, 5, 7, 8]])
gt_instances.labels = torch.LongTensor([1, 20])
assign_result = assigner.assign(
pred_instances, gt_instances, img_meta=img_meta)
self.assertTrue(torch.all(assign_result.gt_inds > -1))
self.assertEqual((assign_result.gt_inds > 0).sum(),
gt_instances.bboxes.size(0))
self.assertEqual((assign_result.labels > -1).sum(),
gt_instances.bboxes.size(0))
def test_bbox_match_cost(self):
gt_instances = InstanceData()
gt_instances.bboxes = torch.FloatTensor([[0, 0, 5, 7], [3, 5, 7, 8]])
gt_instances.labels = torch.LongTensor([1, 20])
pred_instances = InstanceData()
pred_instances.scores = torch.rand((10, 81))
pred_instances.bboxes = torch.rand((10, 4))
img_meta = dict(img_shape=(10, 8))
# test IoUCost
assigner = HungarianAssigner(
ConfigDict(dict(type='IoUCost', iou_mode='iou')))
assign_result = assigner.assign(
pred_instances, gt_instances, img_meta=img_meta)
self.assertTrue(torch.all(assign_result.gt_inds > -1))
self.assertEqual((assign_result.gt_inds > 0).sum(),
gt_instances.bboxes.size(0))
self.assertEqual((assign_result.labels > -1).sum(),
gt_instances.bboxes.size(0))
# test BBoxL1Cost
assigner = HungarianAssigner(ConfigDict(dict(type='BBoxL1Cost')))
assign_result = assigner.assign(
pred_instances, gt_instances, img_meta=img_meta)
self.assertTrue(torch.all(assign_result.gt_inds > -1))
self.assertEqual((assign_result.gt_inds > 0).sum(),
gt_instances.bboxes.size(0))
self.assertEqual((assign_result.labels > -1).sum(),
gt_instances.bboxes.size(0))
def test_cls_match_cost(self):
gt_instances = InstanceData()
gt_instances.bboxes = torch.FloatTensor([[0, 0, 5, 7], [3, 5, 7, 8]])
gt_instances.labels = torch.LongTensor([1, 20])
pred_instances = InstanceData()
pred_instances.scores = torch.rand((10, 81))
pred_instances.bboxes = torch.rand((10, 4))
img_meta = dict(img_shape=(10, 8))
# test FocalLossCost
assigner = HungarianAssigner(dict(type='FocalLossCost'))
assign_result = assigner.assign(
pred_instances, gt_instances, img_meta=img_meta)
self.assertTrue(torch.all(assign_result.gt_inds > -1))
self.assertEqual((assign_result.gt_inds > 0).sum(),
gt_instances.bboxes.size(0))
self.assertEqual((assign_result.labels > -1).sum(),
gt_instances.bboxes.size(0))
# test ClassificationCost
assigner = HungarianAssigner(dict(type='ClassificationCost'))
assign_result = assigner.assign(
pred_instances, gt_instances, img_meta=img_meta)
self.assertTrue(torch.all(assign_result.gt_inds > -1))
self.assertEqual((assign_result.gt_inds > 0).sum(),
gt_instances.bboxes.size(0))
self.assertEqual((assign_result.labels > -1).sum(),
gt_instances.bboxes.size(0))
def test_mask_match_cost(self):
gt_instances = InstanceData()
gt_instances.masks = torch.randint(0, 2, (2, 10, 10)).long()
gt_instances.labels = torch.LongTensor([1, 20])
pred_instances = InstanceData()
pred_instances.masks = torch.rand((4, 10, 10))
pred_instances.scores = torch.rand((4, 25))
img_meta = dict(img_shape=(10, 10))
# test DiceCost
assigner = HungarianAssigner(dict(type='DiceCost'))
assign_result = assigner.assign(
pred_instances, gt_instances, img_meta=img_meta)
self.assertTrue(torch.all(assign_result.gt_inds > -1))
self.assertEqual((assign_result.gt_inds > 0).sum(),
gt_instances.masks.size(0))
self.assertEqual((assign_result.labels > -1).sum(),
gt_instances.masks.size(0))
# test CrossEntropyLossCost
assigner = HungarianAssigner(dict(type='CrossEntropyLossCost'))
assign_result = assigner.assign(
pred_instances, gt_instances, img_meta=img_meta)
self.assertTrue(torch.all(assign_result.gt_inds > -1))
self.assertEqual((assign_result.gt_inds > 0).sum(),
gt_instances.masks.size(0))
self.assertEqual((assign_result.labels > -1).sum(),
gt_instances.masks.size(0))
# test FocalLossCost
assigner = HungarianAssigner(
dict(type='FocalLossCost', binary_input=True))
assign_result = assigner.assign(
pred_instances, gt_instances, img_meta=img_meta)
self.assertTrue(torch.all(assign_result.gt_inds > -1))
self.assertEqual((assign_result.gt_inds > 0).sum(),
gt_instances.masks.size(0))
self.assertEqual((assign_result.labels > -1).sum(),
gt_instances.masks.size(0))
| 6,481
| 42.503356
| 77
|
py
|
ERD
|
ERD-main/tests/test_models/test_task_modules/test_assigners/test_simota_assigner.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmengine.testing import assert_allclose
from mmdet.models.task_modules.assigners import SimOTAAssigner
class TestSimOTAAssigner(TestCase):
def test_assign(self):
assigner = SimOTAAssigner(
center_radius=2.5,
candidate_topk=1,
iou_weight=3.0,
cls_weight=1.0)
pred_instances = InstanceData(
bboxes=torch.Tensor([[23, 23, 43, 43], [4, 5, 6, 7]]),
scores=torch.FloatTensor([[0.2], [0.8]]),
priors=torch.Tensor([[30, 30, 8, 8], [4, 5, 6, 7]]))
gt_instances = InstanceData(
bboxes=torch.Tensor([[23, 23, 43, 43]]),
labels=torch.LongTensor([0]))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([1, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_assign_with_no_valid_bboxes(self):
assigner = SimOTAAssigner(
center_radius=2.5,
candidate_topk=1,
iou_weight=3.0,
cls_weight=1.0)
pred_instances = InstanceData(
bboxes=torch.Tensor([[123, 123, 143, 143], [114, 151, 161, 171]]),
scores=torch.FloatTensor([[0.2], [0.8]]),
priors=torch.Tensor([[30, 30, 8, 8], [55, 55, 8, 8]]))
gt_instances = InstanceData(
bboxes=torch.Tensor([[0, 0, 1, 1]]), labels=torch.LongTensor([0]))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([0, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_assign_with_empty_gt(self):
assigner = SimOTAAssigner(
center_radius=2.5,
candidate_topk=1,
iou_weight=3.0,
cls_weight=1.0)
pred_instances = InstanceData(
bboxes=torch.Tensor([[[30, 40, 50, 60]], [[4, 5, 6, 7]]]),
scores=torch.FloatTensor([[0.2], [0.8]]),
priors=torch.Tensor([[0, 12, 23, 34], [4, 5, 6, 7]]))
gt_instances = InstanceData(
bboxes=torch.empty(0, 4), labels=torch.empty(0))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([0, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
| 2,586
| 37.61194
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_task_modules/test_assigners/test_atss_assigner.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.task_modules.assigners import ATSSAssigner
class TestATSSAssigner(TestCase):
def test_atss_assigner(self):
atss_assigner = ATSSAssigner(topk=9)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
num_level_bboxes = [4]
assign_result = atss_assigner.assign(pred_instances, num_level_bboxes,
gt_instances)
self.assertEqual(len(assign_result.gt_inds), 4)
self.assertEqual(len(assign_result.labels), 4)
expected_gt_inds = torch.LongTensor([1, 0, 0, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
def test_atss_assigner_with_ignore(self):
atss_assigner = ATSSAssigner(topk=9)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[30, 32, 40, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
gt_instances_ignore = InstanceData(bboxes=gt_bboxes_ignore)
num_level_bboxes = [4]
assign_result = atss_assigner.assign(
pred_instances,
num_level_bboxes,
gt_instances,
gt_instances_ignore=gt_instances_ignore)
expected_gt_inds = torch.LongTensor([1, 0, 0, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
def test_atss_assigner_with_empty_gt(self):
"""Test corner case where an image might have no true detections."""
atss_assigner = ATSSAssigner(topk=9)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0)
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
num_level_bboxes = [4]
assign_result = atss_assigner.assign(pred_instances, num_level_bboxes,
gt_instances)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
def test_atss_assigner_with_empty_boxes(self):
"""Test corner case where a network might predict no boxes."""
atss_assigner = ATSSAssigner(topk=9)
priors = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
num_level_bboxes = [0]
assign_result = atss_assigner.assign(pred_instances, num_level_bboxes,
gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0)
self.assertTrue(tuple(assign_result.labels.shape) == (0, ))
def test_atss_assigner_with_empty_boxes_and_ignore(self):
"""Test corner case where a network might predict no boxes and
ignore_iof_thr is on."""
atss_assigner = ATSSAssigner(topk=9)
priors = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
gt_instances_ignore = InstanceData(bboxes=gt_bboxes_ignore)
num_level_bboxes = [0]
assign_result = atss_assigner.assign(
pred_instances,
num_level_bboxes,
gt_instances,
gt_instances_ignore=gt_instances_ignore)
self.assertEqual(len(assign_result.gt_inds), 0)
self.assertTrue(tuple(assign_result.labels.shape) == (0, ))
def test_atss_assigner_with_empty_boxes_and_gt(self):
"""Test corner case where a network might predict no boxes and no
gt."""
atss_assigner = ATSSAssigner(topk=9)
priors = torch.empty((0, 4))
gt_bboxes = torch.empty((0, 4))
gt_labels = torch.empty(0)
num_level_bboxes = [0]
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
assign_result = atss_assigner.assign(pred_instances, num_level_bboxes,
gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0)
| 5,500
| 36.168919
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_task_modules/test_assigners/test_region_assigner.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.task_modules.assigners import RegionAssigner
class TestRegionAssigner(TestCase):
def setUp(self):
self.img_meta = ConfigDict(dict(img_shape=(256, 256)))
self.featmap_sizes = [(64, 64)]
self.anchor_scale = 10
self.anchor_strides = [1]
def test_region_assigner(self):
region_assigner = RegionAssigner(center_ratio=0.5, ignore_ratio=0.8)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
valid_flags = torch.BoolTensor([1, 1, 1, 1])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors, valid_flags=valid_flags)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
num_level_anchors = [4]
assign_result = region_assigner.assign(
pred_instances, gt_instances, self.img_meta, self.featmap_sizes,
num_level_anchors, self.anchor_scale, self.anchor_strides)
self.assertEqual(len(assign_result.gt_inds), 4)
self.assertEqual(len(assign_result.labels), 4)
expected_gt_inds = torch.LongTensor([1, 0, 0, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
def test_region_assigner_with_ignore(self):
region_assigner = RegionAssigner(center_ratio=0.5)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[30, 32, 40, 42],
])
valid_flags = torch.BoolTensor([1, 1, 1, 1])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
pred_instances = InstanceData(priors=priors, valid_flags=valid_flags)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
gt_instances_ignore = InstanceData(bboxes=gt_bboxes_ignore)
num_level_anchors = [4]
with self.assertRaises(NotImplementedError):
region_assigner.assign(
pred_instances,
gt_instances,
self.img_meta,
self.featmap_sizes,
num_level_anchors,
self.anchor_scale,
self.anchor_strides,
gt_instances_ignore=gt_instances_ignore)
def test_region_assigner_with_empty_gt(self):
"""Test corner case where an image might have no true detections."""
region_assigner = RegionAssigner(center_ratio=0.5)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
valid_flags = torch.BoolTensor([1, 1, 1, 1])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0)
pred_instances = InstanceData(priors=priors, valid_flags=valid_flags)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
num_level_anchors = [4]
assign_result = region_assigner.assign(
pred_instances, gt_instances, self.img_meta, self.featmap_sizes,
num_level_anchors, self.anchor_scale, self.anchor_strides)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
def test_atss_assigner_with_empty_boxes(self):
"""Test corner case where a network might predict no boxes."""
region_assigner = RegionAssigner(center_ratio=0.5)
priors = torch.empty((0, 4))
valid_flags = torch.BoolTensor([])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors, valid_flags=valid_flags)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
num_level_anchors = [0]
assign_result = region_assigner.assign(
pred_instances, gt_instances, self.img_meta, self.featmap_sizes,
num_level_anchors, self.anchor_scale, self.anchor_strides)
self.assertEqual(len(assign_result.gt_inds), 0)
self.assertTrue(tuple(assign_result.labels.shape) == (0, ))
def test_atss_assigner_with_empty_boxes_and_gt(self):
"""Test corner case where a network might predict no boxes and no
gt."""
region_assigner = RegionAssigner(center_ratio=0.5)
priors = torch.empty((0, 4))
valid_flags = torch.BoolTensor([])
gt_bboxes = torch.empty((0, 4))
gt_labels = torch.empty(0)
num_level_anchors = [0]
pred_instances = InstanceData(priors=priors, valid_flags=valid_flags)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
assign_result = region_assigner.assign(
pred_instances, gt_instances, self.img_meta, self.featmap_sizes,
num_level_anchors, self.anchor_scale, self.anchor_strides)
self.assertEqual(len(assign_result.gt_inds), 0)
| 5,479
| 38.42446
| 77
|
py
|
ERD
|
ERD-main/tests/test_models/test_task_modules/test_assigners/test_center_region_assigner.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.task_modules.assigners import CenterRegionAssigner
class TestCenterRegionAssigner(TestCase):
def test_center_region_assigner(self):
center_region_assigner = CenterRegionAssigner(
pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
assign_result = center_region_assigner.assign(pred_instances,
gt_instances)
self.assertEqual(len(assign_result.gt_inds), 4)
self.assertEqual(len(assign_result.labels), 4)
expected_gt_inds = torch.LongTensor([1, 0, 0, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
expected_shadowed_labels = torch.LongTensor([[2, 3]])
shadowed_labels = assign_result.get_extra_property('shadowed_labels')
self.assertTrue(torch.all(shadowed_labels == expected_shadowed_labels))
def test_center_region_assigner_with_ignore(self):
center_region_assigner = CenterRegionAssigner(
pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[30, 32, 40, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
gt_instances_ignore = InstanceData(bboxes=gt_bboxes_ignore)
assign_result = center_region_assigner.assign(
pred_instances,
gt_instances,
gt_instances_ignore=gt_instances_ignore)
expected_gt_inds = torch.LongTensor([1, 0, 0, -1])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
def test_center_region_assigner_with_empty_gt(self):
"""Test corner case where an image might have no true detections."""
center_region_assigner = CenterRegionAssigner(
pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0)
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
assign_result = center_region_assigner.assign(pred_instances,
gt_instances)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
def test_center_region_assigner_with_empty_boxes(self):
"""Test corner case where a network might predict no boxes."""
center_region_assigner = CenterRegionAssigner(
pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)
priors = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
assign_result = center_region_assigner.assign(pred_instances,
gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0)
self.assertTrue(tuple(assign_result.labels.shape) == (0, ))
def test_center_region_assigner_with_empty_boxes_and_ignore(self):
"""Test corner case where a network might predict no boxes and
ignore_iof_thr is on."""
center_region_assigner = CenterRegionAssigner(
pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)
priors = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
gt_instances_ignore = InstanceData(bboxes=gt_bboxes_ignore)
assign_result = center_region_assigner.assign(
pred_instances,
gt_instances,
gt_instances_ignore=gt_instances_ignore)
self.assertEqual(len(assign_result.gt_inds), 0)
self.assertTrue(tuple(assign_result.labels.shape) == (0, ))
def test_center_region_assigner_with_empty_boxes_and_gt(self):
"""Test corner case where a network might predict no boxes and no
gt."""
center_region_assigner = CenterRegionAssigner(
pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)
priors = torch.empty((0, 4))
gt_bboxes = torch.empty((0, 4))
gt_labels = torch.empty(0)
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
assign_result = center_region_assigner.assign(pred_instances,
gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0)
| 6,061
| 39.413333
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_task_modules/test_assigners/test_grid_assigner.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmengine.testing import assert_allclose
from mmdet.models.task_modules.assigners import GridAssigner
class TestGridAssigner(TestCase):
def test_assign(self):
assigner = GridAssigner(pos_iou_thr=0.5, neg_iou_thr=0.3)
pred_instances = InstanceData(
priors=torch.Tensor([[23, 23, 43, 43], [4, 5, 6, 7]]),
responsible_flags=torch.BoolTensor([1, 1]))
gt_instances = InstanceData(
bboxes=torch.Tensor([[23, 23, 43, 43]]),
labels=torch.LongTensor([0]))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([1, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
# invalid neg_iou_thr
with self.assertRaises(AssertionError):
assigner = GridAssigner(
pos_iou_thr=0.5, neg_iou_thr=[0.3, 0.1, 0.4])
assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
# multi-neg_iou_thr
assigner = GridAssigner(pos_iou_thr=0.5, neg_iou_thr=(0.1, 0.3))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([1, -1])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
# gt_max_assign_all=False
assigner = GridAssigner(
pos_iou_thr=0.5, neg_iou_thr=0.3, gt_max_assign_all=False)
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([1, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
# large min_pos_iou
assigner = GridAssigner(
pos_iou_thr=0.5, neg_iou_thr=0.3, min_pos_iou=1)
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([1, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_assign_with_empty_gt(self):
assigner = GridAssigner(pos_iou_thr=0.5, neg_iou_thr=0.3)
pred_instances = InstanceData(
priors=torch.Tensor([[0, 12, 23, 34], [4, 5, 6, 7]]),
responsible_flags=torch.BoolTensor([1, 1]))
gt_instances = InstanceData(
bboxes=torch.empty(0, 4), labels=torch.empty(0))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([0, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_assign_with_empty_priors(self):
assigner = GridAssigner(pos_iou_thr=0.5, neg_iou_thr=0.3)
pred_instances = InstanceData(
priors=torch.Tensor(torch.empty(0, 4)),
responsible_flags=torch.empty(0))
gt_instances = InstanceData(
bboxes=torch.Tensor([[23, 23, 43, 43]]),
labels=torch.LongTensor([0]))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
| 3,428
| 40.817073
| 73
|
py
|
ERD
|
ERD-main/tests/test_models/test_task_modules/test_assigners/test_dynamic_soft_label_assigner.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmengine.testing import assert_allclose
from mmdet.models.task_modules.assigners import DynamicSoftLabelAssigner
from mmdet.structures.bbox import HorizontalBoxes
class TestDynamicSoftLabelAssigner(TestCase):
def test_assign(self):
assigner = DynamicSoftLabelAssigner(
soft_center_radius=3.0, topk=1, iou_weight=3.0)
pred_instances = InstanceData(
bboxes=torch.Tensor([[23, 23, 43, 43], [4, 5, 6, 7]]),
scores=torch.FloatTensor([[0.2], [0.8]]),
priors=torch.Tensor([[30, 30, 8, 8], [4, 5, 6, 7]]))
gt_instances = InstanceData(
bboxes=torch.Tensor([[23, 23, 43, 43]]),
labels=torch.LongTensor([0]))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([1, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_assign_with_no_valid_bboxes(self):
assigner = DynamicSoftLabelAssigner(
soft_center_radius=3.0, topk=1, iou_weight=3.0)
pred_instances = InstanceData(
bboxes=torch.Tensor([[123, 123, 143, 143], [114, 151, 161, 171]]),
scores=torch.FloatTensor([[0.2], [0.8]]),
priors=torch.Tensor([[30, 30, 8, 8], [55, 55, 8, 8]]))
gt_instances = InstanceData(
bboxes=torch.Tensor([[0, 0, 1, 1]]), labels=torch.LongTensor([0]))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([0, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_assign_with_empty_gt(self):
assigner = DynamicSoftLabelAssigner(
soft_center_radius=3.0, topk=1, iou_weight=3.0)
pred_instances = InstanceData(
bboxes=torch.Tensor([[[30, 40, 50, 60]], [[4, 5, 6, 7]]]),
scores=torch.FloatTensor([[0.2], [0.8]]),
priors=torch.Tensor([[0, 12, 23, 34], [4, 5, 6, 7]]))
gt_instances = InstanceData(
bboxes=torch.empty(0, 4), labels=torch.empty(0))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([0, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_box_type_input(self):
assigner = DynamicSoftLabelAssigner(
soft_center_radius=3.0, topk=1, iou_weight=3.0)
pred_instances = InstanceData(
bboxes=torch.Tensor([[23, 23, 43, 43], [4, 5, 6, 7]]),
scores=torch.FloatTensor([[0.2], [0.8]]),
priors=torch.Tensor([[30, 30, 8, 8], [4, 5, 6, 7]]))
gt_instances = InstanceData(
bboxes=HorizontalBoxes(torch.Tensor([[23, 23, 43, 43]])),
labels=torch.LongTensor([0]))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([1, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
| 3,259
| 42.466667
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_task_modules/test_assigners/test_max_iou_assigner.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Tests the Assigner objects.
CommandLine:
pytest tests/test_core/test_bbox/test_assigners/test_max_iou_assigner.py
xdoctest tests/test_core/test_bbox/test_assigners/test_max_iou_assigner.py zero
""" # noqa
import pytest
import torch
from mmengine.structures import InstanceData
from mmdet.models.task_modules.assigners import MaxIoUAssigner
@pytest.mark.parametrize('neg_iou_thr', [0.5, (0, 0.5)])
def test_max_iou_assigner(neg_iou_thr):
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=neg_iou_thr,
)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
assign_result = self.assign(pred_instances, gt_instances)
assert len(assign_result.gt_inds) == 4
assert len(assign_result.labels) == 4
expected_gt_inds = torch.LongTensor([1, 0, 2, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_max_iou_assigner_with_ignore():
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[30, 32, 40, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
gt_instances_ignore = InstanceData(bboxes=gt_bboxes_ignore)
assign_result = self.assign(
pred_instances, gt_instances, gt_instances_ignore=gt_instances_ignore)
expected_gt_inds = torch.LongTensor([1, 0, 2, -1])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_max_iou_assigner_with_empty_gt():
"""Test corner case where an image might have no true detections."""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0)
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
assign_result = self.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_max_iou_assigner_with_empty_priors():
"""Test corner case where a network might predict no boxes."""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
priors = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
# Test with gt_labels
assign_result = self.assign(pred_instances, gt_instances)
assert len(assign_result.gt_inds) == 0
assert tuple(assign_result.labels.shape) == (0, )
def test_max_iou_assigner_with_empty_boxes_and_ignore():
"""Test corner case where a network might predict no boxes and
ignore_iof_thr is on."""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
)
priors = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
gt_instances_ignore = InstanceData(bboxes=gt_bboxes_ignore)
# Test with gt_labels
assign_result = self.assign(
pred_instances, gt_instances, gt_instances_ignore=gt_instances_ignore)
assert len(assign_result.gt_inds) == 0
assert tuple(assign_result.labels.shape) == (0, )
def test_max_iou_assigner_with_empty_priors_and_gt():
"""Test corner case where a network might predict no boxes and no gt."""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
priors = torch.empty(0, 4)
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0)
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
assign_result = self.assign(pred_instances, gt_instances)
assert len(assign_result.gt_inds) == 0
| 5,025
| 29.834356
| 84
|
py
|
ERD
|
ERD-main/tests/test_models/test_task_modules/test_assigners/test_approx_max_iou_assigner.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.task_modules.assigners import ApproxMaxIoUAssigner
class TestApproxIoUAssigner(TestCase):
def test_approx_iou_assigner(self):
assigner = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData()
pred_instances.priors = bboxes
pred_instances.approxs = bboxes[:, None, :]
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([1, 0, 2, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
def test_approx_iou_assigner_with_empty_gt(self):
"""Test corner case where an image might have no true detections."""
assigner = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([])
gt_labels = torch.LongTensor([])
pred_instances = InstanceData()
pred_instances.priors = bboxes
pred_instances.approxs = bboxes[:, None, :]
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
def test_approx_iou_assigner_with_empty_boxes(self):
"""Test corner case where an network might predict no boxes."""
assigner = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData()
pred_instances.priors = bboxes
pred_instances.approxs = bboxes[:, None, :]
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0)
def test_approx_iou_assigner_with_empty_boxes_and_gt(self):
"""Test corner case where an network might predict no boxes and no
gt."""
assigner = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.empty((0, 4))
gt_labels = torch.LongTensor([])
pred_instances = InstanceData()
pred_instances.priors = bboxes
pred_instances.approxs = bboxes[:, None, :]
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0)
| 3,680
| 32.770642
| 77
|
py
|
ERD
|
ERD-main/tests/test_models/test_task_modules/test_assigners/test_task_uniform_assigner.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmengine.testing import assert_allclose
from mmdet.models.task_modules.assigners import UniformAssigner
class TestUniformAssigner(TestCase):
def test_uniform_assigner(self):
assigner = UniformAssigner(0.15, 0.7, 1)
pred_bbox = torch.FloatTensor([
[1, 1, 12, 8],
[4, 4, 20, 20],
[1, 5, 15, 15],
[30, 5, 32, 42],
])
anchor = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData()
pred_instances.priors = anchor
pred_instances.decoder_priors = pred_bbox
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 4)
self.assertEqual(len(assign_result.labels), 4)
expected_gt_inds = torch.LongTensor([-1, 0, 2, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_uniform_assigner_with_empty_gt(self):
"""Test corner case where an image might have no true detections."""
assigner = UniformAssigner(0.15, 0.7, 1)
pred_bbox = torch.FloatTensor([
[1, 1, 12, 8],
[4, 4, 20, 20],
[1, 5, 15, 15],
[30, 5, 32, 42],
])
anchor = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0)
pred_instances = InstanceData()
pred_instances.priors = anchor
pred_instances.decoder_priors = pred_bbox
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_uniform_assigner_with_empty_boxes(self):
"""Test corner case where a network might predict no boxes."""
assigner = UniformAssigner(0.15, 0.7, 1)
pred_bbox = torch.empty((0, 4))
anchor = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData()
pred_instances.priors = anchor
pred_instances.decoder_priors = pred_bbox
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
# Test with gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0)
self.assertEqual(tuple(assign_result.labels.shape), (0, ))
| 3,299
| 34.106383
| 76
|
py
|
ERD
|
ERD-main/tests/test_models/test_task_modules/test_assigners/test_task_aligned_assigner.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.task_modules.assigners import TaskAlignedAssigner
class TestTaskAlignedAssigner(TestCase):
def test_task_aligned_assigner(self):
with self.assertRaises(AssertionError):
TaskAlignedAssigner(topk=0)
assigner = TaskAlignedAssigner(topk=13)
pred_score = torch.FloatTensor([[0.1, 0.2], [0.2, 0.3], [0.3, 0.4],
[0.4, 0.5]])
pred_bbox = torch.FloatTensor([
[1, 1, 12, 8],
[4, 4, 20, 20],
[1, 5, 15, 15],
[30, 5, 32, 42],
])
anchor = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([0, 1])
pred_instances = InstanceData()
pred_instances.priors = anchor
pred_instances.bboxes = pred_bbox
pred_instances.scores = pred_score
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 4)
self.assertEqual(len(assign_result.labels), 4)
# test empty gt
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0, 2).long()
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
| 1,934
| 32.362069
| 77
|
py
|
ERD
|
ERD-main/tests/test_models/test_task_modules/test_prior_generators/test_anchor_generator.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""
CommandLine:
pytest tests/test_utils/test_anchor.py
xdoctest tests/test_utils/test_anchor.py zero
"""
import pytest
import torch
def test_standard_points_generator():
from mmdet.models.task_modules import build_prior_generator
# teat init
anchor_generator_cfg = dict(
type='MlvlPointGenerator', strides=[4, 8], offset=0)
anchor_generator = build_prior_generator(anchor_generator_cfg)
assert anchor_generator is not None
assert anchor_generator.num_base_priors == [1, 1]
# test_stride
from mmdet.models.task_modules.prior_generators import MlvlPointGenerator
# Square strides
mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)
mlvl_points_half_stride_generator = MlvlPointGenerator(
strides=[4, 10], offset=0.5)
assert mlvl_points.num_levels == 2
# assert self.num_levels == len(featmap_sizes)
with pytest.raises(AssertionError):
mlvl_points.grid_priors(featmap_sizes=[(2, 2)], device='cpu')
priors = mlvl_points.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], device='cpu')
priors_with_stride = mlvl_points.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], with_stride=True, device='cpu')
assert len(priors) == 2
# assert last dimension is (coord_x, coord_y, stride_w, stride_h).
assert priors_with_stride[0].size(1) == 4
assert priors_with_stride[0][0][2] == 4
assert priors_with_stride[0][0][3] == 4
assert priors_with_stride[1][0][2] == 10
assert priors_with_stride[1][0][3] == 10
stride_4_feat_2_2 = priors[0]
assert (stride_4_feat_2_2[1] - stride_4_feat_2_2[0]).sum() == 4
assert stride_4_feat_2_2.size(0) == 4
assert stride_4_feat_2_2.size(1) == 2
stride_10_feat_4_8 = priors[1]
assert (stride_10_feat_4_8[1] - stride_10_feat_4_8[0]).sum() == 10
assert stride_10_feat_4_8.size(0) == 4 * 8
assert stride_10_feat_4_8.size(1) == 2
# assert the offset of 0.5 * stride
priors_half_offset = mlvl_points_half_stride_generator.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], device='cpu')
assert (priors_half_offset[0][0] - priors[0][0]).sum() == 4 * 0.5 * 2
assert (priors_half_offset[1][0] - priors[1][0]).sum() == 10 * 0.5 * 2
if torch.cuda.is_available():
anchor_generator_cfg = dict(
type='MlvlPointGenerator', strides=[4, 8], offset=0)
anchor_generator = build_prior_generator(anchor_generator_cfg)
assert anchor_generator is not None
# Square strides
mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)
mlvl_points_half_stride_generator = MlvlPointGenerator(
strides=[4, 10], offset=0.5)
assert mlvl_points.num_levels == 2
# assert self.num_levels == len(featmap_sizes)
with pytest.raises(AssertionError):
mlvl_points.grid_priors(featmap_sizes=[(2, 2)], device='cuda')
priors = mlvl_points.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], device='cuda')
priors_with_stride = mlvl_points.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], with_stride=True, device='cuda')
assert len(priors) == 2
# assert last dimension is (coord_x, coord_y, stride_w, stride_h).
assert priors_with_stride[0].size(1) == 4
assert priors_with_stride[0][0][2] == 4
assert priors_with_stride[0][0][3] == 4
assert priors_with_stride[1][0][2] == 10
assert priors_with_stride[1][0][3] == 10
stride_4_feat_2_2 = priors[0]
assert (stride_4_feat_2_2[1] - stride_4_feat_2_2[0]).sum() == 4
assert stride_4_feat_2_2.size(0) == 4
assert stride_4_feat_2_2.size(1) == 2
stride_10_feat_4_8 = priors[1]
assert (stride_10_feat_4_8[1] - stride_10_feat_4_8[0]).sum() == 10
assert stride_10_feat_4_8.size(0) == 4 * 8
assert stride_10_feat_4_8.size(1) == 2
# assert the offset of 0.5 * stride
priors_half_offset = mlvl_points_half_stride_generator.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], device='cuda')
assert (priors_half_offset[0][0] - priors[0][0]).sum() == 4 * 0.5 * 2
assert (priors_half_offset[1][0] - priors[1][0]).sum() == 10 * 0.5 * 2
def test_sparse_prior():
from mmdet.models.task_modules.prior_generators import MlvlPointGenerator
mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)
prior_indexs = torch.Tensor([0, 2, 4, 5, 6, 9]).long()
featmap_sizes = [(3, 5), (6, 4)]
grid_anchors = mlvl_points.grid_priors(
featmap_sizes=featmap_sizes, with_stride=False, device='cpu')
sparse_prior = mlvl_points.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[0],
level_idx=0,
device='cpu')
assert not sparse_prior.is_cuda
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_points.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[1],
level_idx=1,
device='cpu')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
from mmdet.models.task_modules.prior_generators import AnchorGenerator
mlvl_anchors = AnchorGenerator(
strides=[16, 32], ratios=[1.], scales=[1.], base_sizes=[4, 8])
prior_indexs = torch.Tensor([0, 2, 4, 5, 6, 9]).long()
featmap_sizes = [(3, 5), (6, 4)]
grid_anchors = mlvl_anchors.grid_priors(
featmap_sizes=featmap_sizes, device='cpu')
sparse_prior = mlvl_anchors.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[0],
level_idx=0,
device='cpu')
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_anchors.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[1],
level_idx=1,
device='cpu')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
# for ssd
from mmdet.models.task_modules.prior_generators import SSDAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = SSDAnchorGenerator(
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32],
ratios=[[2], [2, 3], [2, 3]])
ssd_anchors = anchor_generator.grid_anchors(featmap_sizes, device='cpu')
for i in range(len(featmap_sizes)):
sparse_ssd_anchors = anchor_generator.sparse_priors(
prior_idxs=prior_indexs,
level_idx=i,
featmap_size=featmap_sizes[i],
device='cpu')
assert (sparse_ssd_anchors == ssd_anchors[i][prior_indexs]).all()
# for yolo
from mmdet.models.task_modules.prior_generators import YOLOAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = YOLOAnchorGenerator(
strides=[32, 16, 8],
base_sizes=[
[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)],
])
yolo_anchors = anchor_generator.grid_anchors(featmap_sizes, device='cpu')
for i in range(len(featmap_sizes)):
sparse_yolo_anchors = anchor_generator.sparse_priors(
prior_idxs=prior_indexs,
level_idx=i,
featmap_size=featmap_sizes[i],
device='cpu')
assert (sparse_yolo_anchors == yolo_anchors[i][prior_indexs]).all()
if torch.cuda.is_available():
mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)
prior_indexs = torch.Tensor([0, 3, 4, 5, 6, 7, 1, 2, 4, 5, 6,
9]).long().cuda()
featmap_sizes = [(6, 8), (6, 4)]
grid_anchors = mlvl_points.grid_priors(
featmap_sizes=featmap_sizes, with_stride=False, device='cuda')
sparse_prior = mlvl_points.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[0],
level_idx=0,
device='cuda')
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_points.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[1],
level_idx=1,
device='cuda')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
assert sparse_prior.is_cuda
mlvl_anchors = AnchorGenerator(
strides=[16, 32],
ratios=[1., 2.5],
scales=[1., 5.],
base_sizes=[4, 8])
prior_indexs = torch.Tensor([4, 5, 6, 7, 0, 2, 50, 4, 5, 6,
9]).long().cuda()
featmap_sizes = [(13, 5), (16, 4)]
grid_anchors = mlvl_anchors.grid_priors(
featmap_sizes=featmap_sizes, device='cuda')
sparse_prior = mlvl_anchors.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[0],
level_idx=0,
device='cuda')
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_anchors.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[1],
level_idx=1,
device='cuda')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
# for ssd
from mmdet.models.task_modules.prior_generators import \
SSDAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = SSDAnchorGenerator(
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32],
ratios=[[2], [2, 3], [2, 3]])
ssd_anchors = anchor_generator.grid_anchors(
featmap_sizes, device='cuda')
for i in range(len(featmap_sizes)):
sparse_ssd_anchors = anchor_generator.sparse_priors(
prior_idxs=prior_indexs,
level_idx=i,
featmap_size=featmap_sizes[i],
device='cuda')
assert (sparse_ssd_anchors == ssd_anchors[i][prior_indexs]).all()
# for yolo
from mmdet.models.task_modules.prior_generators import \
YOLOAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = YOLOAnchorGenerator(
strides=[32, 16, 8],
base_sizes=[
[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)],
])
yolo_anchors = anchor_generator.grid_anchors(
featmap_sizes, device='cuda')
for i in range(len(featmap_sizes)):
sparse_yolo_anchors = anchor_generator.sparse_priors(
prior_idxs=prior_indexs,
level_idx=i,
featmap_size=featmap_sizes[i],
device='cuda')
assert (sparse_yolo_anchors == yolo_anchors[i][prior_indexs]).all()
def test_standard_anchor_generator():
from mmdet.models.task_modules import build_anchor_generator
anchor_generator_cfg = dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8])
anchor_generator = build_anchor_generator(anchor_generator_cfg)
assert anchor_generator.num_base_priors == \
anchor_generator.num_base_anchors
assert anchor_generator.num_base_priors == [3, 3]
assert anchor_generator is not None
def test_strides():
from mmdet.models.task_modules.prior_generators import AnchorGenerator
# Square strides
self = AnchorGenerator([10], [1.], [1.], [10])
anchors = self.grid_anchors([(2, 2)], device='cpu')
expected_anchors = torch.tensor([[-5., -5., 5., 5.], [5., -5., 15., 5.],
[-5., 5., 5., 15.], [5., 5., 15., 15.]])
assert torch.equal(anchors[0], expected_anchors)
# Different strides in x and y direction
self = AnchorGenerator([(10, 20)], [1.], [1.], [10])
anchors = self.grid_anchors([(2, 2)], device='cpu')
expected_anchors = torch.tensor([[-5., -5., 5., 5.], [5., -5., 15., 5.],
[-5., 15., 5., 25.], [5., 15., 15., 25.]])
assert torch.equal(anchors[0], expected_anchors)
def test_ssd_anchor_generator():
from mmdet.models.task_modules import build_anchor_generator
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
# min_sizes max_sizes must set at the same time
with pytest.raises(AssertionError):
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
min_sizes=[48, 100, 150, 202, 253, 300],
max_sizes=None,
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
build_anchor_generator(anchor_generator_cfg)
# length of min_sizes max_sizes must be the same
with pytest.raises(AssertionError):
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
min_sizes=[48, 100, 150, 202, 253, 300],
max_sizes=[100, 150, 202, 253],
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
build_anchor_generator(anchor_generator_cfg)
# test setting anchor size manually
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
min_sizes=[48, 100, 150, 202, 253, 304],
max_sizes=[100, 150, 202, 253, 304, 320],
strides=[16, 32, 64, 107, 160, 320],
ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]])
featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
expected_base_anchors = [
torch.Tensor([[-16.0000, -16.0000, 32.0000, 32.0000],
[-26.6410, -26.6410, 42.6410, 42.6410],
[-25.9411, -8.9706, 41.9411, 24.9706],
[-8.9706, -25.9411, 24.9706, 41.9411],
[-33.5692, -5.8564, 49.5692, 21.8564],
[-5.8564, -33.5692, 21.8564, 49.5692]]),
torch.Tensor([[-34.0000, -34.0000, 66.0000, 66.0000],
[-45.2372, -45.2372, 77.2372, 77.2372],
[-54.7107, -19.3553, 86.7107, 51.3553],
[-19.3553, -54.7107, 51.3553, 86.7107],
[-70.6025, -12.8675, 102.6025, 44.8675],
[-12.8675, -70.6025, 44.8675, 102.6025]]),
torch.Tensor([[-43.0000, -43.0000, 107.0000, 107.0000],
[-55.0345, -55.0345, 119.0345, 119.0345],
[-74.0660, -21.0330, 138.0660, 85.0330],
[-21.0330, -74.0660, 85.0330, 138.0660],
[-97.9038, -11.3013, 161.9038, 75.3013],
[-11.3013, -97.9038, 75.3013, 161.9038]]),
torch.Tensor([[-47.5000, -47.5000, 154.5000, 154.5000],
[-59.5332, -59.5332, 166.5332, 166.5332],
[-89.3356, -17.9178, 196.3356, 124.9178],
[-17.9178, -89.3356, 124.9178, 196.3356],
[-121.4371, -4.8124, 228.4371, 111.8124],
[-4.8124, -121.4371, 111.8124, 228.4371]]),
torch.Tensor([[-46.5000, -46.5000, 206.5000, 206.5000],
[-58.6651, -58.6651, 218.6651, 218.6651],
[-98.8980, -9.4490, 258.8980, 169.4490],
[-9.4490, -98.8980, 169.4490, 258.8980],
[-139.1044, 6.9652, 299.1044, 153.0348],
[6.9652, -139.1044, 153.0348, 299.1044]]),
torch.Tensor([[8.0000, 8.0000, 312.0000, 312.0000],
[4.0513, 4.0513, 315.9487, 315.9487],
[-54.9605, 52.5198, 374.9604, 267.4802],
[52.5198, -54.9605, 267.4802, 374.9604],
[-103.2717, 72.2428, 423.2717, 247.7572],
[72.2428, -103.2717, 247.7572, 423.2717]])
]
base_anchors = anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check valid flags
expected_valid_pixels = [2400, 600, 150, 54, 24, 6]
multi_level_valid_flags = anchor_generator.valid_flags(
featmap_sizes, (320, 320), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert anchor_generator.num_base_anchors == [6, 6, 6, 6, 6, 6]
# check anchor generation
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 6
# test vgg ssd anchor setting
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
# check base anchors
expected_base_anchors = [
torch.Tensor([[-6.5000, -6.5000, 14.5000, 14.5000],
[-11.3704, -11.3704, 19.3704, 19.3704],
[-10.8492, -3.4246, 18.8492, 11.4246],
[-3.4246, -10.8492, 11.4246, 18.8492]]),
torch.Tensor([[-14.5000, -14.5000, 30.5000, 30.5000],
[-25.3729, -25.3729, 41.3729, 41.3729],
[-23.8198, -7.9099, 39.8198, 23.9099],
[-7.9099, -23.8198, 23.9099, 39.8198],
[-30.9711, -4.9904, 46.9711, 20.9904],
[-4.9904, -30.9711, 20.9904, 46.9711]]),
torch.Tensor([[-33.5000, -33.5000, 65.5000, 65.5000],
[-45.5366, -45.5366, 77.5366, 77.5366],
[-54.0036, -19.0018, 86.0036, 51.0018],
[-19.0018, -54.0036, 51.0018, 86.0036],
[-69.7365, -12.5788, 101.7365, 44.5788],
[-12.5788, -69.7365, 44.5788, 101.7365]]),
torch.Tensor([[-44.5000, -44.5000, 108.5000, 108.5000],
[-56.9817, -56.9817, 120.9817, 120.9817],
[-76.1873, -22.0937, 140.1873, 86.0937],
[-22.0937, -76.1873, 86.0937, 140.1873],
[-100.5019, -12.1673, 164.5019, 76.1673],
[-12.1673, -100.5019, 76.1673, 164.5019]]),
torch.Tensor([[-53.5000, -53.5000, 153.5000, 153.5000],
[-66.2185, -66.2185, 166.2185, 166.2185],
[-96.3711, -23.1855, 196.3711, 123.1855],
[-23.1855, -96.3711, 123.1855, 196.3711]]),
torch.Tensor([[19.5000, 19.5000, 280.5000, 280.5000],
[6.6342, 6.6342, 293.3658, 293.3658],
[-34.5549, 57.7226, 334.5549, 242.2774],
[57.7226, -34.5549, 242.2774, 334.5549]]),
]
base_anchors = anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check valid flags
expected_valid_pixels = [5776, 2166, 600, 150, 36, 4]
multi_level_valid_flags = anchor_generator.valid_flags(
featmap_sizes, (300, 300), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert anchor_generator.num_base_anchors == [4, 6, 6, 6, 4, 4]
# check anchor generation
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 6
def test_anchor_generator_with_tuples():
from mmdet.models.task_modules import build_anchor_generator
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
anchor_generator_cfg_tuples = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[(8, 8), (16, 16), (32, 32), (64, 64), (100, 100), (300, 300)],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
anchor_generator_tuples = build_anchor_generator(
anchor_generator_cfg_tuples)
anchors_tuples = anchor_generator_tuples.grid_anchors(
featmap_sizes, device)
for anchor, anchor_tuples in zip(anchors, anchors_tuples):
assert torch.equal(anchor, anchor_tuples)
def test_yolo_anchor_generator():
from mmdet.models.task_modules import build_anchor_generator
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
anchor_generator_cfg = dict(
type='YOLOAnchorGenerator',
strides=[32, 16, 8],
base_sizes=[
[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)],
])
featmap_sizes = [(14, 18), (28, 36), (56, 72)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
# check base anchors
expected_base_anchors = [
torch.Tensor([[-42.0000, -29.0000, 74.0000, 61.0000],
[-62.0000, -83.0000, 94.0000, 115.0000],
[-170.5000, -147.0000, 202.5000, 179.0000]]),
torch.Tensor([[-7.0000, -22.5000, 23.0000, 38.5000],
[-23.0000, -14.5000, 39.0000, 30.5000],
[-21.5000, -51.5000, 37.5000, 67.5000]]),
torch.Tensor([[-1.0000, -2.5000, 9.0000, 10.5000],
[-4.0000, -11.0000, 12.0000, 19.0000],
[-12.5000, -7.5000, 20.5000, 15.5000]])
]
base_anchors = anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check number of base anchors for each level
assert anchor_generator.num_base_anchors == [3, 3, 3]
# check anchor generation
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 3
def test_retina_anchor():
from mmdet.registry import MODELS
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
# head configs modified from
# configs/nas_fpn/retinanet_r50_fpn_crop640_50e.py
bbox_head = dict(
type='RetinaSepBNHead',
num_classes=4,
num_ins=5,
in_channels=4,
stacked_convs=1,
feat_channels=4,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]))
retina_head = MODELS.build(bbox_head)
assert retina_head.anchor_generator is not None
# use the featmap sizes in NASFPN setting to test retina head
featmap_sizes = [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
# check base anchors
expected_base_anchors = [
torch.Tensor([[-22.6274, -11.3137, 22.6274, 11.3137],
[-28.5088, -14.2544, 28.5088, 14.2544],
[-35.9188, -17.9594, 35.9188, 17.9594],
[-16.0000, -16.0000, 16.0000, 16.0000],
[-20.1587, -20.1587, 20.1587, 20.1587],
[-25.3984, -25.3984, 25.3984, 25.3984],
[-11.3137, -22.6274, 11.3137, 22.6274],
[-14.2544, -28.5088, 14.2544, 28.5088],
[-17.9594, -35.9188, 17.9594, 35.9188]]),
torch.Tensor([[-45.2548, -22.6274, 45.2548, 22.6274],
[-57.0175, -28.5088, 57.0175, 28.5088],
[-71.8376, -35.9188, 71.8376, 35.9188],
[-32.0000, -32.0000, 32.0000, 32.0000],
[-40.3175, -40.3175, 40.3175, 40.3175],
[-50.7968, -50.7968, 50.7968, 50.7968],
[-22.6274, -45.2548, 22.6274, 45.2548],
[-28.5088, -57.0175, 28.5088, 57.0175],
[-35.9188, -71.8376, 35.9188, 71.8376]]),
torch.Tensor([[-90.5097, -45.2548, 90.5097, 45.2548],
[-114.0350, -57.0175, 114.0350, 57.0175],
[-143.6751, -71.8376, 143.6751, 71.8376],
[-64.0000, -64.0000, 64.0000, 64.0000],
[-80.6349, -80.6349, 80.6349, 80.6349],
[-101.5937, -101.5937, 101.5937, 101.5937],
[-45.2548, -90.5097, 45.2548, 90.5097],
[-57.0175, -114.0350, 57.0175, 114.0350],
[-71.8376, -143.6751, 71.8376, 143.6751]]),
torch.Tensor([[-181.0193, -90.5097, 181.0193, 90.5097],
[-228.0701, -114.0350, 228.0701, 114.0350],
[-287.3503, -143.6751, 287.3503, 143.6751],
[-128.0000, -128.0000, 128.0000, 128.0000],
[-161.2699, -161.2699, 161.2699, 161.2699],
[-203.1873, -203.1873, 203.1873, 203.1873],
[-90.5097, -181.0193, 90.5097, 181.0193],
[-114.0350, -228.0701, 114.0350, 228.0701],
[-143.6751, -287.3503, 143.6751, 287.3503]]),
torch.Tensor([[-362.0387, -181.0193, 362.0387, 181.0193],
[-456.1401, -228.0701, 456.1401, 228.0701],
[-574.7006, -287.3503, 574.7006, 287.3503],
[-256.0000, -256.0000, 256.0000, 256.0000],
[-322.5398, -322.5398, 322.5398, 322.5398],
[-406.3747, -406.3747, 406.3747, 406.3747],
[-181.0193, -362.0387, 181.0193, 362.0387],
[-228.0701, -456.1401, 228.0701, 456.1401],
[-287.3503, -574.7006, 287.3503, 574.7006]])
]
base_anchors = retina_head.anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check valid flags
expected_valid_pixels = [57600, 14400, 3600, 900, 225]
multi_level_valid_flags = retina_head.anchor_generator.valid_flags(
featmap_sizes, (640, 640), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert retina_head.anchor_generator.num_base_anchors == [9, 9, 9, 9, 9]
# check anchor generation
anchors = retina_head.anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 5
def test_guided_anchor():
from mmdet.registry import MODELS
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
# head configs modified from
# configs/guided_anchoring/ga-retinanet_r50_fpn_1x_coco.py
bbox_head = dict(
type='GARetinaHead',
num_classes=8,
in_channels=4,
stacked_convs=1,
feat_channels=4,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]))
ga_retina_head = MODELS.build(bbox_head)
assert ga_retina_head.approx_anchor_generator is not None
# use the featmap sizes in NASFPN setting to test ga_retina_head
featmap_sizes = [(100, 152), (50, 76), (25, 38), (13, 19), (7, 10)]
# check base anchors
expected_approxs = [
torch.Tensor([[-22.6274, -11.3137, 22.6274, 11.3137],
[-28.5088, -14.2544, 28.5088, 14.2544],
[-35.9188, -17.9594, 35.9188, 17.9594],
[-16.0000, -16.0000, 16.0000, 16.0000],
[-20.1587, -20.1587, 20.1587, 20.1587],
[-25.3984, -25.3984, 25.3984, 25.3984],
[-11.3137, -22.6274, 11.3137, 22.6274],
[-14.2544, -28.5088, 14.2544, 28.5088],
[-17.9594, -35.9188, 17.9594, 35.9188]]),
torch.Tensor([[-45.2548, -22.6274, 45.2548, 22.6274],
[-57.0175, -28.5088, 57.0175, 28.5088],
[-71.8376, -35.9188, 71.8376, 35.9188],
[-32.0000, -32.0000, 32.0000, 32.0000],
[-40.3175, -40.3175, 40.3175, 40.3175],
[-50.7968, -50.7968, 50.7968, 50.7968],
[-22.6274, -45.2548, 22.6274, 45.2548],
[-28.5088, -57.0175, 28.5088, 57.0175],
[-35.9188, -71.8376, 35.9188, 71.8376]]),
torch.Tensor([[-90.5097, -45.2548, 90.5097, 45.2548],
[-114.0350, -57.0175, 114.0350, 57.0175],
[-143.6751, -71.8376, 143.6751, 71.8376],
[-64.0000, -64.0000, 64.0000, 64.0000],
[-80.6349, -80.6349, 80.6349, 80.6349],
[-101.5937, -101.5937, 101.5937, 101.5937],
[-45.2548, -90.5097, 45.2548, 90.5097],
[-57.0175, -114.0350, 57.0175, 114.0350],
[-71.8376, -143.6751, 71.8376, 143.6751]]),
torch.Tensor([[-181.0193, -90.5097, 181.0193, 90.5097],
[-228.0701, -114.0350, 228.0701, 114.0350],
[-287.3503, -143.6751, 287.3503, 143.6751],
[-128.0000, -128.0000, 128.0000, 128.0000],
[-161.2699, -161.2699, 161.2699, 161.2699],
[-203.1873, -203.1873, 203.1873, 203.1873],
[-90.5097, -181.0193, 90.5097, 181.0193],
[-114.0350, -228.0701, 114.0350, 228.0701],
[-143.6751, -287.3503, 143.6751, 287.3503]]),
torch.Tensor([[-362.0387, -181.0193, 362.0387, 181.0193],
[-456.1401, -228.0701, 456.1401, 228.0701],
[-574.7006, -287.3503, 574.7006, 287.3503],
[-256.0000, -256.0000, 256.0000, 256.0000],
[-322.5398, -322.5398, 322.5398, 322.5398],
[-406.3747, -406.3747, 406.3747, 406.3747],
[-181.0193, -362.0387, 181.0193, 362.0387],
[-228.0701, -456.1401, 228.0701, 456.1401],
[-287.3503, -574.7006, 287.3503, 574.7006]])
]
approxs = ga_retina_head.approx_anchor_generator.base_anchors
for i, base_anchor in enumerate(approxs):
assert base_anchor.allclose(expected_approxs[i])
# check valid flags
expected_valid_pixels = [136800, 34200, 8550, 2223, 630]
multi_level_valid_flags = ga_retina_head.approx_anchor_generator \
.valid_flags(featmap_sizes, (800, 1216), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert ga_retina_head.approx_anchor_generator.num_base_anchors == [
9, 9, 9, 9, 9
]
# check approx generation
squares = ga_retina_head.square_anchor_generator.grid_anchors(
featmap_sizes, device)
assert len(squares) == 5
expected_squares = [
torch.Tensor([[-16., -16., 16., 16.]]),
torch.Tensor([[-32., -32., 32., 32]]),
torch.Tensor([[-64., -64., 64., 64.]]),
torch.Tensor([[-128., -128., 128., 128.]]),
torch.Tensor([[-256., -256., 256., 256.]])
]
squares = ga_retina_head.square_anchor_generator.base_anchors
for i, base_anchor in enumerate(squares):
assert base_anchor.allclose(expected_squares[i])
# square_anchor_generator does not check valid flags
# check number of base anchors for each level
assert (ga_retina_head.square_anchor_generator.num_base_anchors == [
1, 1, 1, 1, 1
])
# check square generation
anchors = ga_retina_head.square_anchor_generator.grid_anchors(
featmap_sizes, device)
assert len(anchors) == 5
| 33,364
| 42.218912
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_dynamic_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestDynamicRoIHead(TestCase):
def setUp(self):
register_all_modules()
self.roi_head_cfg = get_roi_head_cfg(
'dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py')
def test_init(self):
roi_head = MODELS.build(self.roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
@parameterized.expand(['cpu', 'cuda'])
def test_dynamic_roi_head_loss(self, device):
"""Tests trident roi head predict."""
if not torch.cuda.is_available() and device == 'cuda':
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.to(device=device)
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device=device))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
out = roi_head.loss(feats, proposals_list, batch_data_samples)
loss_cls = out['loss_cls']
loss_bbox = out['loss_bbox']
self.assertGreater(loss_cls.sum(), 0, 'cls loss should be non-zero')
self.assertGreater(loss_bbox.sum(), 0, 'box loss should be non-zero')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
out = roi_head.loss(feats, proposals_list, batch_data_samples)
empty_cls_loss = out['loss_cls']
empty_bbox_loss = out['loss_bbox']
self.assertGreater(empty_cls_loss.sum(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_bbox_loss.sum(), 0,
'there should be no box loss when there are no true boxes')
| 2,733
| 36.972222
| 77
|
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_trident_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import unittest
from unittest import TestCase
import torch
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestTridentRoIHead(TestCase):
def setUp(self):
register_all_modules()
self.roi_head_cfg = get_roi_head_cfg(
'tridentnet/tridentnet_r50-caffe_1x_coco.py')
def test_init(self):
roi_head = MODELS.build(self.roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
self.assertTrue(roi_head.with_shared_head)
def test_trident_roi_head_predict(self):
"""Tests trident roi head predict."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head_cfg = copy.deepcopy(self.roi_head_cfg)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 1024, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
# When `test_branch_idx == 1`
roi_head.predict(feats, proposals_list, batch_data_samples)
# When `test_branch_idx == -1`
roi_head_cfg.test_branch_idx = -1
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
roi_head.predict(feats, proposals_list, batch_data_samples)
| 2,012
| 33.118644
| 77
|
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_pisa_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestPISARoIHead(TestCase):
def setUp(self):
register_all_modules()
self.roi_head_cfg = get_roi_head_cfg(
'pisa/faster-rcnn_r50_fpn_pisa_1x_coco.py')
def test_init(self):
roi_head = MODELS.build(self.roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
@parameterized.expand(['cpu', 'cuda'])
def test_pisa_roi_head(self, device):
"""Tests trident roi head predict."""
if not torch.cuda.is_available() and device == 'cuda':
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.to(device=device)
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device=device))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
out = roi_head.loss(feats, proposals_list, batch_data_samples)
loss_cls = out['loss_cls']
loss_bbox = out['loss_bbox']
self.assertGreater(loss_cls.sum(), 0, 'cls loss should be non-zero')
self.assertGreater(loss_bbox.sum(), 0, 'box loss should be non-zero')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
out = roi_head.loss(feats, proposals_list, batch_data_samples)
empty_cls_loss = out['loss_cls']
empty_bbox_loss = out['loss_bbox']
self.assertGreater(empty_cls_loss.sum(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_bbox_loss.sum(), 0,
'there should be no box loss when there are no true boxes')
| 2,718
| 36.763889
| 77
|
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_multi_instance_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import Config
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals
from mmdet.utils import register_all_modules
register_all_modules()
def _fake_roi_head():
"""Set a fake roi head config."""
roi_head = Config(
dict(
type='MultiInstanceRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign',
output_size=7,
sampling_ratio=-1,
aligned=True,
use_torchvision=True),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='MultiInstanceBBoxHead',
with_refine=False,
num_shared_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss',
loss_weight=1.0,
use_sigmoid=False,
reduction='none'),
loss_bbox=dict(
type='SmoothL1Loss', loss_weight=1.0, reduction='none')),
train_cfg=dict(
assigner=dict(
type='MultiInstanceAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.3,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='MultiInsRandomSampler',
num=512,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
pos_weight=-1,
debug=False),
test_cfg=dict(
nms=dict(iou_threshold=0.5), score_thr=0.01, max_per_img=500)))
return roi_head
class TestMultiInstanceRoIHead(TestCase):
def test_init(self):
"""Test init multi instance RoI head."""
roi_head_cfg = _fake_roi_head()
roi_head = MODELS.build(roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
def test_standard_roi_head_loss(self):
"""Tests multi instance roi head loss when truth is empty and non-
empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
roi_head_cfg = _fake_roi_head()
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then emd loss should be nonzero for
# random inputs
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=False,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
out = roi_head.loss(feats, proposals_list, batch_data_samples)
loss = out['loss_rcnn_emd']
self.assertGreater(loss.sum(), 0, 'loss should be non-zero')
# When there is no truth, the emd loss should be zero.
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
out = roi_head.loss(feats, proposals_list, batch_data_samples)
empty_loss = out['loss_rcnn_emd']
self.assertEqual(
empty_loss.sum(), 0,
'there should be no emd loss when there are no true boxes')
| 4,627
| 34.6
| 79
|
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_mask_scoring_roI_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestMaskScoringRoiHead(TestCase):
def setUp(self):
register_all_modules()
self.roi_head_cfg = get_roi_head_cfg(
'ms_rcnn/ms-rcnn_r50_fpn_1x_coco.py')
def test_init(self):
roi_head = MODELS.build(self.roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
self.assertTrue(roi_head.with_mask)
self.assertTrue(roi_head.mask_iou_head)
def test_mask_scoring_roi_head_loss(self):
"""Tests trident roi head predict."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.cuda()
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
out = roi_head.loss(feats, proposals_list, batch_data_samples)
loss_cls = out['loss_cls']
loss_bbox = out['loss_bbox']
loss_mask = out['loss_mask']
self.assertGreater(loss_cls.sum(), 0, 'cls loss should be non-zero')
self.assertGreater(loss_bbox.sum(), 0, 'box loss should be non-zero')
self.assertGreater(loss_mask.sum(), 0, 'mask loss should be non-zero')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
out = roi_head.loss(feats, proposals_list, batch_data_samples)
empty_cls_loss = out['loss_cls']
empty_bbox_loss = out['loss_bbox']
empty_mask_loss = out['loss_mask']
self.assertGreater(empty_cls_loss.sum(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_bbox_loss.sum(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_mask_loss.sum(), 0,
'there should be no mask loss when there are no true boxes')
def test_mask_scoring_roi_head_predict(self):
"""Tests trident roi head predict."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.cuda()
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
roi_head.predict(feats, proposals_list, batch_data_samples)
def test_mask_scoring_roi_head_forward(self):
"""Tests trident roi head forward."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.cuda()
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
image_shapes = [(3, s, s)]
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
roi_head.forward(feats, proposals_list)
| 4,844
| 37.76
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_htc_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import HybridTaskCascadeRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestHTCRoIHead(TestCase):
@parameterized.expand(['htc/htc_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
"""Test init htc RoI head."""
# Normal HTC RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
assert roi_head.with_bbox
assert roi_head.with_mask
assert roi_head.with_semantic
@parameterized.expand(['htc/htc_r50_fpn_1x_coco.py'])
def test_htc_roi_head_loss(self, cfg_file):
"""Tests htc roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
with_semantic=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[0],
num_classes=4,
with_mask=True,
with_semantic=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss_cls' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
elif 'loss_bbox' in name or 'loss_mask' in name:
self.assertEqual(value.sum(), 0)
@parameterized.expand(['htc/htc_r50_fpn_1x_coco.py'])
def test_htc_roi_head_predict(self, cfg_file):
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
results = roi_head.predict(
feats, proposal_list, batch_data_samples, rescale=True)
self.assertEqual(results[0].masks.shape[-2:], (s, s))
| 4,493
| 38.078261
| 77
|
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_standard_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import Config
from parameterized import parameterized
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals
from mmdet.utils import register_all_modules
register_all_modules()
def _fake_roi_head(with_shared_head=False):
"""Set a fake roi head config."""
if not with_shared_head:
roi_head = Config(
dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=1,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=1,
fc_out_channels=1,
num_classes=4),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=1,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHead',
num_convs=1,
in_channels=1,
conv_out_channels=1,
num_classes=4),
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
test_cfg=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
else:
roi_head = Config(
dict(
type='StandardRoIHead',
shared_head=dict(
type='ResLayer',
depth=50,
stage=3,
stride=2,
dilation=1,
style='caffe',
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=1,
featmap_strides=[16]),
bbox_head=dict(
type='BBoxHead',
with_avg_pool=True,
in_channels=2048,
roi_feat_size=7,
num_classes=4),
mask_roi_extractor=None,
mask_head=dict(
type='FCNMaskHead',
num_convs=0,
in_channels=2048,
conv_out_channels=1,
num_classes=4),
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=14,
pos_weight=-1,
debug=False),
test_cfg=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
return roi_head
class TestStandardRoIHead(TestCase):
def test_init(self):
"""Test init standard RoI head."""
# Normal Mask R-CNN RoI head
roi_head_cfg = _fake_roi_head()
roi_head = MODELS.build(roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
self.assertTrue(roi_head.with_mask)
# Mask R-CNN RoI head with shared_head
roi_head_cfg = _fake_roi_head(with_shared_head=True)
roi_head = MODELS.build(roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
self.assertTrue(roi_head.with_mask)
self.assertTrue(roi_head.with_shared_head)
@parameterized.expand([(False, ), (True, )])
def test_standard_roi_head_loss(self, with_shared_head):
"""Tests standard roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
roi_head_cfg = _fake_roi_head(with_shared_head=with_shared_head)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
if not with_shared_head:
feats.append(
torch.rand(1, 1, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
else:
feats.append(
torch.rand(1, 1024, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
out = roi_head.loss(feats, proposals_list, batch_data_samples)
loss_cls = out['loss_cls']
loss_bbox = out['loss_bbox']
loss_mask = out['loss_mask']
self.assertGreater(loss_cls.sum(), 0, 'cls loss should be non-zero')
self.assertGreater(loss_bbox.sum(), 0, 'box loss should be non-zero')
self.assertGreater(loss_mask.sum(), 0, 'mask loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
out = roi_head.loss(feats, proposals_list, batch_data_samples)
empty_cls_loss = out['loss_cls']
empty_bbox_loss = out['loss_bbox']
empty_mask_loss = out['loss_mask']
self.assertGreater(empty_cls_loss.sum(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_bbox_loss.sum(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_mask_loss.sum(), 0,
'there should be no mask loss when there are no true boxes')
| 8,093
| 38.101449
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_grid_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestGridRoIHead(TestCase):
def setUp(self):
register_all_modules()
self.roi_head_cfg = get_roi_head_cfg(
'grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py')
def test_init(self):
roi_head = MODELS.build(self.roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
@parameterized.expand(['cpu', 'cuda'])
def test_grid_roi_head_loss(self, device):
"""Tests trident roi head predict."""
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.to(device=device)
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device=device))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
out = roi_head.loss(feats, proposals_list, batch_data_samples)
loss_cls = out['loss_cls']
loss_grid = out['loss_grid']
self.assertGreater(loss_cls.sum(), 0, 'cls loss should be non-zero')
self.assertGreater(loss_grid.sum(), 0, 'grid loss should be non-zero')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
out = roi_head.loss(feats, proposals_list, batch_data_samples)
empty_cls_loss = out['loss_cls']
self.assertGreater(empty_cls_loss.sum(), 0,
'cls loss should be non-zero')
self.assertNotIn(
'loss_grid', out,
'grid loss should be passed when there are no true boxes')
@parameterized.expand(['cpu', 'cuda'])
def test_grid_roi_head_predict(self, device):
"""Tests trident roi head predict."""
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.to(device=device)
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device=device))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
roi_head.predict(feats, proposals_list, batch_data_samples)
@parameterized.expand(['cpu', 'cuda'])
def test_grid_roi_head_forward(self, device):
"""Tests trident roi head forward."""
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.to(device=device)
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device=device))
image_shapes = [(3, s, s)]
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
roi_head.forward(feats, proposals_list)
| 4,587
| 36.606557
| 78
|
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_point_rend_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import PointRendRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestHTCRoIHead(TestCase):
@parameterized.expand(
['point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py'])
def test_init(self, cfg_file):
"""Test init Point rend RoI head."""
# Normal HTC RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
assert roi_head.with_bbox
assert roi_head.with_mask
@parameterized.expand(
['point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py'])
def test_point_rend_roi_head_loss(self, cfg_file):
"""Tests htc roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [img_meta['img_shape'] for img_meta in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# Positive rois must not be empty
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
with self.assertRaises(AssertionError):
out = roi_head.loss(feats, proposal_list, batch_data_samples)
@parameterized.expand(
['point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py'])
def test_point_rend_roi_head_predict(self, cfg_file):
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
img_shape_list = [img_meta['img_shape'] for img_meta in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
results = roi_head.predict(
feats, proposal_list, batch_data_samples, rescale=True)
self.assertEqual(results[0].masks.shape[-2:], (s, s))
| 4,222
| 37.743119
| 77
|
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_scnet_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import SCNetRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestSCNetRoIHead(TestCase):
@parameterized.expand(['scnet/scnet_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
"""Test init scnet RoI head."""
# Normal Cascade Mask R-CNN RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
assert roi_head.with_bbox
assert roi_head.with_mask
assert roi_head.with_semantic
assert roi_head.with_feat_relay
assert roi_head.with_glbctx
@parameterized.expand(['scnet/scnet_r50_fpn_1x_coco.py'])
def test_scnet_roi_head_loss(self, cfg_file):
"""Tests htc roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
with_semantic=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[0],
num_classes=4,
with_mask=True,
with_semantic=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss_cls' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
elif 'loss_bbox' in name or 'loss_mask' in name:
self.assertEqual(value.sum(), 0)
@parameterized.expand(['scnet/scnet_r50_fpn_1x_coco.py'])
def test_scnet_roi_head_predict(self, cfg_file):
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
results = roi_head.predict(
feats, proposal_list, batch_data_samples, rescale=True)
self.assertEqual(results[0].masks.shape[-2:], (s, s))
| 4,592
| 38.25641
| 77
|
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_cascade_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import StandardRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestCascadeRoIHead(TestCase):
@parameterized.expand(
['cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
"""Test init standard RoI head."""
# Normal Cascade Mask R-CNN RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
assert roi_head.with_bbox
assert roi_head.with_mask
@parameterized.expand(
['cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py'])
def test_cascade_roi_head_loss(self, cfg_file):
"""Tests standard roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss_cls' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
elif 'loss_bbox' in name or 'loss_mask' in name:
self.assertEqual(value.sum(), 0)
| 3,156
| 37.975309
| 77
|
py
|
ERD
|
ERD-main/tests/test_models/test_roi_heads/test_sparse_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
import torch.nn as nn
from parameterized import parameterized
from mmdet.models.roi_heads import StandardRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestCascadeRoIHead(TestCase):
@parameterized.expand(['queryinst/queryinst_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
"""Test init standard RoI head."""
# Normal Cascade Mask R-CNN RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head.init_weights()
assert roi_head.with_bbox
assert roi_head.with_mask
@parameterized.expand(['queryinst/queryinst_r50_fpn_1x_coco.py'])
def test_cascade_roi_head_loss(self, cfg_file):
"""Tests standard roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
# add import elements into proposal
init_proposal_features = nn.Embedding(100, 256).cuda().weight.clone()
for proposal in proposal_list:
proposal.features = init_proposal_features
proposal.imgs_whwh = feats[0].new_tensor([[s, s, s,
s]]).repeat(100, 1)
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
# add import elements into proposal
init_proposal_features = nn.Embedding(100, 256).cuda().weight.clone()
for proposal in proposal_list:
proposal.features = init_proposal_features
proposal.imgs_whwh = feats[0].new_tensor([[s, s, s,
s]]).repeat(100, 1)
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss_cls' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
elif 'loss_bbox' in name or 'loss_mask' in name:
self.assertEqual(value.sum(), 0)
| 3,880
| 40.731183
| 77
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.