repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
ERD | ERD-main/tests/test_models/test_detectors/test_single_stage_instance_seg.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestSingleStageInstanceSegmentor(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand([
'solo/solo_r50_fpn_1x_coco.py',
'solo/decoupled-solo_r50_fpn_1x_coco.py',
'solo/decoupled-solo-light_r50_fpn_3x_coco.py',
'solov2/solov2_r50_fpn_1x_coco.py',
'solov2/solov2-light_r18_fpn_ms-3x_coco.py',
'yolact/yolact_r50_1xb8-55e_coco.py',
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.mask_head)
if detector.with_bbox:
self.assertTrue(detector.bbox_head)
@parameterized.expand([
('solo/solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled-solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled-solo-light_r50_fpn_3x_coco.py', ('cpu', 'cuda')),
('solov2/solov2_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solov2/solov2-light_r18_fpn_ms-3x_coco.py', ('cpu', 'cuda')),
('yolact/yolact_r50_1xb8-55e_coco.py', ('cpu', 'cuda')),
])
def test_single_stage_forward_loss_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
detector.init_weights()
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]], with_mask=True)
data = detector.data_preprocessor(packed_inputs, True)
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([
('solo/solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled-solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled-solo-light_r50_fpn_3x_coco.py', ('cpu', 'cuda')),
('solov2/solov2_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solov2/solov2-light_r18_fpn_ms-3x_coco.py', ('cpu', 'cuda')),
('yolact/yolact_r50_1xb8-55e_coco.py', ('cpu', 'cuda')),
])
def test_single_stage_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]], with_mask=True)
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([
('solo/solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled-solo_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solo/decoupled-solo-light_r50_fpn_3x_coco.py', ('cpu', 'cuda')),
('solov2/solov2_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('solov2/solov2-light_r18_fpn_ms-3x_coco.py', ('cpu', 'cuda')),
('yolact/yolact_r50_1xb8-55e_coco.py', ('cpu', 'cuda')),
])
def test_single_stage_forward_tensor_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
batch_results = detector.forward(**data, mode='tensor')
self.assertIsInstance(batch_results, tuple)
| 5,403 | 39.328358 | 77 | py |
ERD | ERD-main/tests/test_models/test_detectors/test_semi_base.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from mmengine.registry import MODELS
from parameterized import parameterized
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
register_all_modules()
class TestSemiBase(TestCase):
@parameterized.expand([
'soft_teacher/'
'soft-teacher_faster-rcnn_r50-caffe_fpn_180k_semi-0.1-coco.py',
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.detector.backbone.depth = 18
model.detector.neck.in_channels = [64, 128, 256, 512]
model.detector.backbone.init_cfg = None
model = MODELS.build(model)
self.assertTrue(model.teacher.backbone)
self.assertTrue(model.teacher.neck)
self.assertTrue(model.teacher.rpn_head)
self.assertTrue(model.teacher.roi_head)
self.assertTrue(model.student.backbone)
self.assertTrue(model.student.neck)
self.assertTrue(model.student.rpn_head)
self.assertTrue(model.student.roi_head)
| 1,116 | 30.914286 | 71 | py |
ERD | ERD-main/tests/test_models/test_detectors/test_panoptic_two_stage_segmentor.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from parameterized import parameterized
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.testing._utils import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestTwoStagePanopticSegmentor(unittest.TestCase):
def setUp(self):
register_all_modules()
def _create_model_cfg(self):
cfg_file = 'panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py'
model_cfg = get_detector_cfg(cfg_file)
model_cfg.backbone.depth = 18
model_cfg.neck.in_channels = [64, 128, 256, 512]
model_cfg.backbone.init_cfg = None
return model_cfg
def test_init(self):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
assert detector.backbone
assert detector.neck
assert detector.rpn_head
assert detector.roi_head
assert detector.roi_head.mask_head
assert detector.with_semantic_head
assert detector.with_panoptic_fusion_head
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_loss_mode(self, device):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, True)
# Test loss mode
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_predict_mode(self, device):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_tensor_mode(self, device):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, False)
out = detector.forward(**data, mode='tensor')
self.assertIsInstance(out, tuple)
| 3,467 | 35.893617 | 68 | py |
ERD | ERD-main/tests/test_models/test_detectors/test_two_stage.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestTwoStageBBox(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand([
'faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py',
'cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py',
'sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py',
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.rpn_head)
self.assertTrue(detector.roi_head)
# if rpn.num_classes > 1, force set rpn.num_classes = 1
if hasattr(model.rpn_head, 'num_classes'):
model.rpn_head.num_classes = 2
detector = MODELS.build(model)
self.assertEqual(detector.rpn_head.num_classes, 1)
@parameterized.expand([
'faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py',
'cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py',
'sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py',
])
def test_two_stage_forward_loss_mode(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, True)
# Test loss mode
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([
'faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py',
'cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py',
'sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py',
])
def test_two_stage_forward_predict_mode(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
# TODO: Awaiting refactoring
# @parameterized.expand([
# 'faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py',
# 'cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py',
# 'sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py',
# ])
# def test_two_stage_forward_tensor_mode(self, cfg_file):
# model = get_detector_cfg(cfg_file)
# # backbone convert to ResNet18
# model.backbone.depth = 18
# model.neck.in_channels = [64, 128, 256, 512]
# model.backbone.init_cfg = None
#
# from mmdet.models import build_detector
# detector = build_detector(model)
#
# if not torch.cuda.is_available():
# return unittest.skip('test requires GPU and torch+cuda')
# detector = detector.cuda()
#
# packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# data = detector.data_preprocessor(packed_inputs, False)
# out = detector.forward(**data, mode='tensor')
# self.assertIsInstance(out, tuple)
class TestTwoStageMask(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand([
'mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py',
'cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py',
'queryinst/queryinst_r50_fpn_1x_coco.py'
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.rpn_head)
self.assertTrue(detector.roi_head)
self.assertTrue(detector.roi_head.mask_head)
# if rpn.num_classes > 1, force set rpn.num_classes = 1
if hasattr(model.rpn_head, 'num_classes'):
model.rpn_head.num_classes = 2
detector = MODELS.build(model)
self.assertEqual(detector.rpn_head.num_classes, 1)
@parameterized.expand([
'mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py',
'cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py',
'queryinst/queryinst_r50_fpn_1x_coco.py'
])
def test_two_stage_forward_loss_mode(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]], with_mask=True)
data = detector.data_preprocessor(packed_inputs, True)
# Test loss mode
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([
'mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py',
'cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py',
'queryinst/queryinst_r50_fpn_1x_coco.py'
])
def test_two_stage_forward_predict_mode(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 256, 256], [3, 255, 260]])
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
# TODO: Awaiting refactoring
# @parameterized.expand([
# 'mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py',
# 'cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py',
# 'queryinst/queryinst_r50_fpn_1x_coco.py'
# ])
# def test_two_stage_forward_tensor_mode(self, cfg_file):
# model = get_detector_cfg(cfg_file)
# # backbone convert to ResNet18
# model.backbone.depth = 18
# model.neck.in_channels = [64, 128, 256, 512]
# model.backbone.init_cfg = None
#
# from mmdet.models import build_detector
# detector = build_detector(model)
#
# if not torch.cuda.is_available():
# return unittest.skip('test requires GPU and torch+cuda')
# detector = detector.cuda()
#
# packed_inputs = demo_mm_inputs(
# 2, [[3, 128, 128], [3, 125, 130]], with_mask=True)
# data = detector.data_preprocessor(packed_inputs, False)
#
# # out = detector.forward(**data, mode='tensor')
# # self.assertIsInstance(out, tuple)
| 8,710 | 35.755274 | 75 | py |
ERD | ERD-main/tests/test_models/test_detectors/test_kd_single_stage.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestKDSingleStageDetector(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand(['ld/ld_r18-gflv1-r101_fpn_1x_coco.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
@parameterized.expand([('ld/ld_r18-gflv1-r101_fpn_1x_coco.py', ('cpu',
'cuda'))])
def test_single_stage_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, True)
# Test forward train
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('ld/ld_r18-gflv1-r101_fpn_1x_coco.py', ('cpu',
'cuda'))])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
| 2,932 | 36.126582 | 78 | py |
ERD | ERD-main/tests/test_models/test_detectors/test_detr.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class TestDETR(TestCase):
def setUp(self) -> None:
register_all_modules()
def test_detr_head_loss(self):
"""Tests transformer head loss when truth is empty and non-empty."""
s = 256
metainfo = {
'img_shape': (s, s),
'scale_factor': (1, 1),
'pad_shape': (s, s),
'batch_input_shape': (s, s)
}
img_metas = DetDataSample()
img_metas.set_metainfo(metainfo)
batch_data_samples = []
batch_data_samples.append(img_metas)
config = get_detector_cfg('detr/detr_r50_8xb2-150e_coco.py')
model = MODELS.build(config)
model.init_weights()
random_image = torch.rand(1, 3, s, s)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
img_metas.gt_instances = gt_instances
batch_data_samples1 = []
batch_data_samples1.append(img_metas)
empty_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples1)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
for key, loss in empty_gt_losses.items():
if 'cls' in key:
self.assertGreater(loss.item(), 0,
'cls loss should be non-zero')
elif 'bbox' in key:
self.assertEqual(
loss.item(), 0,
'there should be no box loss when no ground true boxes')
elif 'iou' in key:
self.assertEqual(
loss.item(), 0,
'there should be no iou loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
img_metas.gt_instances = gt_instances
batch_data_samples2 = []
batch_data_samples2.append(img_metas)
one_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples2)
for loss in one_gt_losses.values():
self.assertGreater(
loss.item(), 0,
'cls loss, or box loss, or iou loss should be non-zero')
model.eval()
# test _forward
model._forward(random_image, batch_data_samples=batch_data_samples2)
# test only predict
model.predict(
random_image, batch_data_samples=batch_data_samples2, rescale=True)
| 3,116 | 35.670588 | 79 | py |
ERD | ERD-main/tests/test_models/test_detectors/test_maskformer.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from parameterized import parameterized
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.testing._utils import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestMaskFormer(unittest.TestCase):
def setUp(self):
register_all_modules()
def _create_model_cfg(self):
cfg_path = 'maskformer/maskformer_r50_ms-16xb1-75e_coco.py'
model_cfg = get_detector_cfg(cfg_path)
base_channels = 32
model_cfg.backbone.depth = 18
model_cfg.backbone.init_cfg = None
model_cfg.backbone.base_channels = base_channels
model_cfg.panoptic_head.in_channels = [
base_channels * 2**i for i in range(4)
]
model_cfg.panoptic_head.feat_channels = base_channels
model_cfg.panoptic_head.out_channels = base_channels
model_cfg.panoptic_head.pixel_decoder.encoder.\
layer_cfg.self_attn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.pixel_decoder.encoder.\
layer_cfg.ffn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.pixel_decoder.encoder.\
layer_cfg.ffn_cfg.feedforward_channels = base_channels * 8
model_cfg.panoptic_head.pixel_decoder.\
positional_encoding.num_feats = base_channels // 2
model_cfg.panoptic_head.positional_encoding.\
num_feats = base_channels // 2
model_cfg.panoptic_head.transformer_decoder.\
layer_cfg.self_attn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.transformer_decoder. \
layer_cfg.cross_attn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.transformer_decoder.\
layer_cfg.ffn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.transformer_decoder.\
layer_cfg.ffn_cfg.feedforward_channels = base_channels * 8
return model_cfg
def test_init(self):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
detector.init_weights()
assert detector.backbone
assert detector.panoptic_head
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_loss_mode(self, device):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, True)
# Test loss mode
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_predict_mode(self, device):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_tensor_mode(self, device):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, False)
out = detector.forward(**data, mode='tensor')
self.assertIsInstance(out, tuple)
class TestMask2Former(unittest.TestCase):
def setUp(self):
register_all_modules()
def _create_model_cfg(self, cfg_path):
model_cfg = get_detector_cfg(cfg_path)
base_channels = 32
model_cfg.backbone.depth = 18
model_cfg.backbone.init_cfg = None
model_cfg.backbone.base_channels = base_channels
model_cfg.panoptic_head.in_channels = [
base_channels * 2**i for i in range(4)
]
model_cfg.panoptic_head.feat_channels = base_channels
model_cfg.panoptic_head.out_channels = base_channels
model_cfg.panoptic_head.pixel_decoder.encoder.\
layer_cfg.self_attn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.pixel_decoder.encoder.\
layer_cfg.ffn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.pixel_decoder.encoder.\
layer_cfg.ffn_cfg.feedforward_channels = base_channels * 4
model_cfg.panoptic_head.pixel_decoder.\
positional_encoding.num_feats = base_channels // 2
model_cfg.panoptic_head.positional_encoding.\
num_feats = base_channels // 2
model_cfg.panoptic_head.transformer_decoder.\
layer_cfg.self_attn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.transformer_decoder. \
layer_cfg.cross_attn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.transformer_decoder.\
layer_cfg.ffn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.transformer_decoder.\
layer_cfg.ffn_cfg.feedforward_channels = base_channels * 8
return model_cfg
def test_init(self):
model_cfg = self._create_model_cfg(
'mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py')
detector = MODELS.build(model_cfg)
detector.init_weights()
assert detector.backbone
assert detector.panoptic_head
@parameterized.expand([
('cpu', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'),
('cpu', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco.py'),
('cuda', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'),
('cuda', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco.py')
])
def test_forward_loss_mode(self, device, cfg_path):
print(device, cfg_path)
with_semantic = 'panoptic' in cfg_path
model_cfg = self._create_model_cfg(cfg_path)
detector = MODELS.build(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=with_semantic)
data = detector.data_preprocessor(packed_inputs, True)
# Test loss mode
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([
('cpu', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'),
('cpu', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco.py'),
('cuda', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'),
('cuda', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco.py')
])
def test_forward_predict_mode(self, device, cfg_path):
with_semantic = 'panoptic' in cfg_path
model_cfg = self._create_model_cfg(cfg_path)
detector = MODELS.build(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=with_semantic)
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([
('cpu', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'),
('cpu', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco.py'),
('cuda', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'),
('cuda', 'mask2former/mask2former_r50_8xb2-lsj-50e_coco.py')
])
def test_forward_tensor_mode(self, device, cfg_path):
with_semantic = 'panoptic' in cfg_path
model_cfg = self._create_model_cfg(cfg_path)
detector = MODELS.build(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=with_semantic)
data = detector.data_preprocessor(packed_inputs, False)
out = detector.forward(**data, mode='tensor')
self.assertIsInstance(out, tuple)
| 9,982 | 41.122363 | 78 | py |
ERD | ERD-main/tests/test_models/test_detectors/test_dino.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class TestDINO(TestCase):
def setUp(self):
register_all_modules()
def test_dino_head_loss(self):
"""Tests transformer head loss when truth is empty and non-empty."""
s = 256
metainfo = {
'img_shape': (s, s),
'scale_factor': (1, 1),
'pad_shape': (s, s),
'batch_input_shape': (s, s)
}
data_sample = DetDataSample()
data_sample.set_metainfo(metainfo)
configs = [get_detector_cfg('dino/dino-4scale_r50_8xb2-12e_coco.py')]
for config in configs:
model = MODELS.build(config)
model.init_weights()
random_image = torch.rand(1, 3, s, s)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
data_sample.gt_instances = gt_instances
batch_data_samples_1 = [data_sample]
empty_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples_1)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
for key, loss in empty_gt_losses.items():
_loss = loss.item()
if 'bbox' in key or 'iou' in key or 'dn' in key:
self.assertEqual(
_loss, 0, f'there should be no {key}({_loss}) '
f'when no ground true boxes')
elif 'cls' in key:
self.assertGreater(_loss, 0,
f'{key}({_loss}) should be non-zero')
# When truth is non-empty then both cls and box loss should
# be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
data_sample.gt_instances = gt_instances
batch_data_samples_2 = [data_sample]
one_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples_2)
for loss in one_gt_losses.values():
self.assertGreater(
loss.item(), 0,
'cls loss, or box loss, or iou loss should be non-zero')
model.eval()
# test _forward
model._forward(
random_image, batch_data_samples=batch_data_samples_2)
# test only predict
model.predict(
random_image,
batch_data_samples=batch_data_samples_2,
rescale=True)
| 3,124 | 37.109756 | 78 | py |
ERD | ERD-main/tests/test_models/test_detectors/test_rpn.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestRPN(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand(['rpn/rpn_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
# if rpn.num_classes > 1, force set rpn.num_classes = 1
model.rpn_head.num_classes = 2
detector = MODELS.build(model)
self.assertEqual(detector.bbox_head.num_classes, 1)
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_loss_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, True)
# Test forward train
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_tensor_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
batch_results = detector.forward(**data, mode='tensor')
self.assertIsInstance(batch_results, tuple)
| 4,309 | 37.141593 | 77 | py |
ERD | ERD-main/tests/test_models/test_detectors/test_dab_detr.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class TestDABDETR(TestCase):
def setUp(self) -> None:
register_all_modules()
def test_dab_detr_head_loss(self):
"""Tests transformer head loss when truth is empty and non-empty."""
s = 256
metainfo = {
'img_shape': (s, s),
'scale_factor': (1, 1),
'pad_shape': (s, s),
'batch_input_shape': (s, s)
}
img_metas = DetDataSample()
img_metas.set_metainfo(metainfo)
batch_data_samples = []
batch_data_samples.append(img_metas)
config = get_detector_cfg('dab_detr/dab-detr_r50_8xb2-50e_coco.py')
model = MODELS.build(config)
model.init_weights()
random_image = torch.rand(1, 3, s, s)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
img_metas.gt_instances = gt_instances
batch_data_samples1 = []
batch_data_samples1.append(img_metas)
empty_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples1)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
for key, loss in empty_gt_losses.items():
if 'cls' in key:
self.assertGreater(loss.item(), 0,
'cls loss should be non-zero')
elif 'bbox' in key:
self.assertEqual(
loss.item(), 0,
'there should be no box loss when no ground true boxes')
elif 'iou' in key:
self.assertEqual(
loss.item(), 0,
'there should be no iou loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
img_metas.gt_instances = gt_instances
batch_data_samples2 = []
batch_data_samples2.append(img_metas)
one_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples2)
for loss in one_gt_losses.values():
self.assertGreater(
loss.item(), 0,
'cls loss, or box loss, or iou loss should be non-zero')
model.eval()
# test _forward
model._forward(random_image, batch_data_samples=batch_data_samples2)
# test only predict
model.predict(
random_image, batch_data_samples=batch_data_samples2, rescale=True)
| 3,130 | 35.835294 | 79 | py |
ERD | ERD-main/tests/test_models/test_detectors/test_cornernet.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestCornerNet(TestCase):
def setUp(self) -> None:
register_all_modules()
model_cfg = get_detector_cfg(
'cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py')
backbone = dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(3, ),
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch')
neck = dict(
type='FPN',
in_channels=[512],
out_channels=256,
start_level=0,
add_extra_convs='on_input',
num_outs=1)
model_cfg.backbone = ConfigDict(**backbone)
model_cfg.neck = ConfigDict(**neck)
model_cfg.bbox_head.num_feat_levels = 1
self.model_cfg = model_cfg
def test_init(self):
model = get_detector_cfg(
'cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py')
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.bbox_head is not None)
self.assertTrue(detector.backbone is not None)
self.assertTrue(not hasattr(detector, 'neck'))
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_loss_mode(self):
from mmdet.registry import MODELS
detector = MODELS.build(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 511, 511], [3, 511, 511]])
data = detector.data_preprocessor(packed_inputs, True)
losses = detector.forward(**data, mode='loss')
assert isinstance(losses, dict)
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_predict_mode(self):
from mmdet.registry import MODELS
detector = MODELS.build(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 512, 512], [3, 512, 512]])
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
assert len(batch_results) == 2
assert isinstance(batch_results[0], DetDataSample)
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_tensor_mode(self):
from mmdet.registry import MODELS
detector = MODELS.build(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 512, 512], [3, 512, 512]])
data = detector.data_preprocessor(packed_inputs, False)
batch_results = detector.forward(**data, mode='tensor')
assert isinstance(batch_results, tuple)
| 3,271 | 34.182796 | 73 | py |
ERD | ERD-main/tests/test_models/test_detectors/test_conditional_detr.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class TestConditionalDETR(TestCase):
def setUp(self) -> None:
register_all_modules()
def test_conditional_detr_head_loss(self):
"""Tests transformer head loss when truth is empty and non-empty."""
s = 256
metainfo = {
'img_shape': (s, s),
'scale_factor': (1, 1),
'pad_shape': (s, s),
'batch_input_shape': (s, s)
}
img_metas = DetDataSample()
img_metas.set_metainfo(metainfo)
batch_data_samples = []
batch_data_samples.append(img_metas)
config = get_detector_cfg(
'conditional_detr/conditional-detr_r50_8xb2-50e_coco.py')
model = MODELS.build(config)
model.init_weights()
random_image = torch.rand(1, 3, s, s)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
img_metas.gt_instances = gt_instances
batch_data_samples1 = []
batch_data_samples1.append(img_metas)
empty_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples1)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
for key, loss in empty_gt_losses.items():
if 'cls' in key:
self.assertGreater(loss.item(), 0,
'cls loss should be non-zero')
elif 'bbox' in key:
self.assertEqual(
loss.item(), 0,
'there should be no box loss when no ground true boxes')
elif 'iou' in key:
self.assertEqual(
loss.item(), 0,
'there should be no iou loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
img_metas.gt_instances = gt_instances
batch_data_samples2 = []
batch_data_samples2.append(img_metas)
one_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples2)
for loss in one_gt_losses.values():
self.assertGreater(
loss.item(), 0,
'cls loss, or box loss, or iou loss should be non-zero')
model.eval()
# test _forward
model._forward(random_image, batch_data_samples=batch_data_samples2)
# test only predict
model.predict(
random_image, batch_data_samples=batch_data_samples2, rescale=True)
| 3,175 | 35.930233 | 79 | py |
ERD | ERD-main/tests/test_models/test_detectors/test_deformable_detr.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class TestDeformableDETR(TestCase):
def setUp(self):
register_all_modules()
def test_deformable_detr_head_loss(self):
"""Tests transformer head loss when truth is empty and non-empty."""
s = 256
metainfo = {
'img_shape': (s, s),
'scale_factor': (1, 1),
'pad_shape': (s, s),
'batch_input_shape': (s, s)
}
img_metas = DetDataSample()
img_metas.set_metainfo(metainfo)
batch_data_samples = []
batch_data_samples.append(img_metas)
configs = [
get_detector_cfg(
'deformable_detr/deformable-detr_r50_16xb2-50e_coco.py'),
get_detector_cfg(
'deformable_detr/deformable-detr-refine_r50_16xb2-50e_coco.py' # noqa
),
get_detector_cfg(
'deformable_detr/deformable-detr-refine-twostage_r50_16xb2-50e_coco.py' # noqa
)
]
for config in configs:
model = MODELS.build(config)
model.init_weights()
random_image = torch.rand(1, 3, s, s)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
img_metas.gt_instances = gt_instances
batch_data_samples1 = []
batch_data_samples1.append(img_metas)
empty_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples1)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
for key, loss in empty_gt_losses.items():
if 'cls' in key:
self.assertGreater(loss.item(), 0,
'cls loss should be non-zero')
elif 'bbox' in key:
self.assertEqual(
loss.item(), 0,
'there should be no box loss when no ground true boxes'
)
elif 'iou' in key:
self.assertEqual(
loss.item(), 0,
'there should be no iou loss when no ground true boxes'
)
# When truth is non-empty then both cls and box loss should
# be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
img_metas.gt_instances = gt_instances
batch_data_samples2 = []
batch_data_samples2.append(img_metas)
one_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples2)
for loss in one_gt_losses.values():
self.assertGreater(
loss.item(), 0,
'cls loss, or box loss, or iou loss should be non-zero')
model.eval()
# test _forward
model._forward(
random_image, batch_data_samples=batch_data_samples2)
# test only predict
model.predict(
random_image,
batch_data_samples=batch_data_samples2,
rescale=True)
| 3,777 | 36.78 | 95 | py |
ERD | ERD-main/tests/test_models/test_task_modules/test_iou2d_calculator.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmdet.evaluation import bbox_overlaps as recall_overlaps
from mmdet.models.task_modules import BboxOverlaps2D
from mmdet.structures.bbox import bbox_overlaps
def test_bbox_overlaps_2d(eps=1e-7):
def _construct_bbox(num_bbox=None):
img_h = int(np.random.randint(3, 1000))
img_w = int(np.random.randint(3, 1000))
if num_bbox is None:
num_bbox = np.random.randint(1, 10)
x1y1 = torch.rand((num_bbox, 2))
x2y2 = torch.max(torch.rand((num_bbox, 2)), x1y1)
bboxes = torch.cat((x1y1, x2y2), -1)
bboxes[:, 0::2] *= img_w
bboxes[:, 1::2] *= img_h
return bboxes, num_bbox
# is_aligned is True, bboxes.size(-1) == 5 (include score)
self = BboxOverlaps2D()
bboxes1, num_bbox = _construct_bbox()
bboxes2, _ = _construct_bbox(num_bbox)
bboxes1 = torch.cat((bboxes1, torch.rand((num_bbox, 1))), 1)
bboxes2 = torch.cat((bboxes2, torch.rand((num_bbox, 1))), 1)
gious = self(bboxes1, bboxes2, 'giou', True)
assert gious.size() == (num_bbox, ), gious.size()
assert torch.all(gious >= -1) and torch.all(gious <= 1)
# is_aligned is True, bboxes1.size(-2) == 0
bboxes1 = torch.empty((0, 4))
bboxes2 = torch.empty((0, 4))
gious = self(bboxes1, bboxes2, 'giou', True)
assert gious.size() == (0, ), gious.size()
assert torch.all(gious == torch.empty((0, )))
assert torch.all(gious >= -1) and torch.all(gious <= 1)
# is_aligned is True, and bboxes.ndims > 2
bboxes1, num_bbox = _construct_bbox()
bboxes2, _ = _construct_bbox(num_bbox)
bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1)
# test assertion when batch dim is not the same
with pytest.raises(AssertionError):
self(bboxes1, bboxes2.unsqueeze(0).repeat(3, 1, 1), 'giou', True)
bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1)
gious = self(bboxes1, bboxes2, 'giou', True)
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (2, num_bbox)
bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1, 1)
bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1, 1)
gious = self(bboxes1, bboxes2, 'giou', True)
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (2, 2, num_bbox)
# is_aligned is False
bboxes1, num_bbox1 = _construct_bbox()
bboxes2, num_bbox2 = _construct_bbox()
gious = self(bboxes1, bboxes2, 'giou')
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (num_bbox1, num_bbox2)
# is_aligned is False, and bboxes.ndims > 2
bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1)
bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1)
gious = self(bboxes1, bboxes2, 'giou')
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (2, num_bbox1, num_bbox2)
bboxes1 = bboxes1.unsqueeze(0)
bboxes2 = bboxes2.unsqueeze(0)
gious = self(bboxes1, bboxes2, 'giou')
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (1, 2, num_bbox1, num_bbox2)
# is_aligned is False, bboxes1.size(-2) == 0
gious = self(torch.empty(1, 2, 0, 4), bboxes2, 'giou')
assert torch.all(gious == torch.empty(1, 2, 0, bboxes2.size(-2)))
assert torch.all(gious >= -1) and torch.all(gious <= 1)
# test allclose between bbox_overlaps and the original official
# implementation.
bboxes1 = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[32, 32, 38, 42],
])
bboxes2 = torch.FloatTensor([
[0, 0, 10, 20],
[0, 10, 10, 19],
[10, 10, 20, 20],
])
gious = bbox_overlaps(bboxes1, bboxes2, 'giou', is_aligned=True, eps=eps)
gious = gious.numpy().round(4)
# the gt is got with four decimal precision.
expected_gious = np.array([0.5000, -0.0500, -0.8214])
assert np.allclose(gious, expected_gious, rtol=0, atol=eps)
# test mode 'iof'
ious = bbox_overlaps(bboxes1, bboxes2, 'iof', is_aligned=True, eps=eps)
assert torch.all(ious >= -1) and torch.all(ious <= 1)
assert ious.size() == (bboxes1.size(0), )
ious = bbox_overlaps(bboxes1, bboxes2, 'iof', eps=eps)
assert torch.all(ious >= -1) and torch.all(ious <= 1)
assert ious.size() == (bboxes1.size(0), bboxes2.size(0))
def test_voc_recall_overlaps():
def _construct_bbox(num_bbox=None):
img_h = int(np.random.randint(3, 1000))
img_w = int(np.random.randint(3, 1000))
if num_bbox is None:
num_bbox = np.random.randint(1, 10)
x1y1 = torch.rand((num_bbox, 2))
x2y2 = torch.max(torch.rand((num_bbox, 2)), x1y1)
bboxes = torch.cat((x1y1, x2y2), -1)
bboxes[:, 0::2] *= img_w
bboxes[:, 1::2] *= img_h
return bboxes.numpy(), num_bbox
bboxes1, num_bbox = _construct_bbox()
bboxes2, _ = _construct_bbox(num_bbox)
ious = recall_overlaps(
bboxes1, bboxes2, 'iou', use_legacy_coordinate=False)
assert ious.shape == (num_bbox, num_bbox)
assert np.all(ious >= -1) and np.all(ious <= 1)
ious = recall_overlaps(bboxes1, bboxes2, 'iou', use_legacy_coordinate=True)
assert ious.shape == (num_bbox, num_bbox)
assert np.all(ious >= -1) and np.all(ious <= 1)
| 5,339 | 38.555556 | 79 | py |
ERD | ERD-main/tests/test_models/test_task_modules/test_coder/test_delta_xywh_bbox_coder.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.task_modules.coders import DeltaXYWHBBoxCoder
def test_delta_bbox_coder():
coder = DeltaXYWHBBoxCoder()
rois = torch.Tensor([[0., 0., 1., 1.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[0., 0., 0., 0.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 1.0000, 1.0000],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
out = coder.decode(rois, deltas, max_shape=torch.Tensor((32, 32)))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
batch_rois = rois.unsqueeze(0).repeat(2, 1, 1)
batch_deltas = deltas.unsqueeze(0).repeat(2, 1, 1)
batch_out = coder.decode(batch_rois, batch_deltas, max_shape=(32, 32))[0]
assert out.allclose(batch_out)
batch_out = coder.decode(
batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32)])[0]
assert out.allclose(batch_out)
# test max_shape is not equal to batch
with pytest.raises(AssertionError):
coder.decode(
batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32), (32, 32)])
rois = torch.zeros((0, 4))
deltas = torch.zeros((0, 4))
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert rois.shape == out.shape
# test add_ctr_clamp
coder = DeltaXYWHBBoxCoder(add_ctr_clamp=True, ctr_clamp=2)
rois = torch.Tensor([[0., 0., 6., 6.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[1., 1., 2., 2.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 27.1672, 27.1672],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
| 2,494 | 42.77193 | 79 | py |
ERD | ERD-main/tests/test_models/test_task_modules/test_assigners/test_point_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from mmengine.structures import InstanceData
from mmengine.testing import assert_allclose
from mmdet.models.task_modules.assigners import PointAssigner
class TestPointAssigner(unittest.TestCase):
def test_point_assigner(self):
assigner = PointAssigner()
pred_instances = InstanceData()
pred_instances.priors = torch.FloatTensor([
# [x, y, stride]
[0, 0, 1],
[10, 10, 1],
[5, 5, 1],
[32, 32, 1],
])
gt_instances = InstanceData()
gt_instances.bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_instances.labels = torch.LongTensor([0, 1])
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([1, 2, 1, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_point_assigner_with_empty_gt(self):
"""Test corner case where an image might have no true detections."""
assigner = PointAssigner()
pred_instances = InstanceData()
pred_instances.priors = torch.FloatTensor([
# [x, y, stride]
[0, 0, 1],
[10, 10, 1],
[5, 5, 1],
[32, 32, 1],
])
gt_instances = InstanceData()
gt_instances.bboxes = torch.FloatTensor([])
gt_instances.labels = torch.LongTensor([])
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_point_assigner_with_empty_boxes_and_gt(self):
"""Test corner case where an image might predict no points and no
gt."""
assigner = PointAssigner()
pred_instances = InstanceData()
pred_instances.priors = torch.FloatTensor([])
gt_instances = InstanceData()
gt_instances.bboxes = torch.FloatTensor([])
gt_instances.labels = torch.LongTensor([])
assign_result = assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0)
| 2,249 | 34.714286 | 76 | py |
ERD | ERD-main/tests/test_models/test_task_modules/test_assigners/test_hungarian_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.task_modules.assigners import HungarianAssigner
class TestHungarianAssigner(TestCase):
def test_init(self):
with self.assertRaises(AssertionError):
HungarianAssigner([])
def test_hungarian_match_assigner(self):
assigner = HungarianAssigner([
dict(type='ClassificationCost', weight=1.),
dict(type='BBoxL1Cost', weight=5.0),
dict(type='IoUCost', iou_mode='giou', weight=2.0)
])
# test no gt bboxes
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4)).float()
gt_instances.labels = torch.empty((0, )).long()
pred_instances = InstanceData()
pred_instances.scores = torch.rand((10, 81))
pred_instances.bboxes = torch.rand((10, 4))
img_meta = dict(img_shape=(10, 8))
assign_result = assigner.assign(
pred_instances, gt_instances, img_meta=img_meta)
self.assertTrue(torch.all(assign_result.gt_inds == 0))
self.assertTrue(torch.all(assign_result.labels == -1))
# test with gt bboxes
gt_instances.bboxes = torch.FloatTensor([[0, 0, 5, 7], [3, 5, 7, 8]])
gt_instances.labels = torch.LongTensor([1, 20])
assign_result = assigner.assign(
pred_instances, gt_instances, img_meta=img_meta)
self.assertTrue(torch.all(assign_result.gt_inds > -1))
self.assertEqual((assign_result.gt_inds > 0).sum(),
gt_instances.bboxes.size(0))
self.assertEqual((assign_result.labels > -1).sum(),
gt_instances.bboxes.size(0))
def test_bbox_match_cost(self):
gt_instances = InstanceData()
gt_instances.bboxes = torch.FloatTensor([[0, 0, 5, 7], [3, 5, 7, 8]])
gt_instances.labels = torch.LongTensor([1, 20])
pred_instances = InstanceData()
pred_instances.scores = torch.rand((10, 81))
pred_instances.bboxes = torch.rand((10, 4))
img_meta = dict(img_shape=(10, 8))
# test IoUCost
assigner = HungarianAssigner(
ConfigDict(dict(type='IoUCost', iou_mode='iou')))
assign_result = assigner.assign(
pred_instances, gt_instances, img_meta=img_meta)
self.assertTrue(torch.all(assign_result.gt_inds > -1))
self.assertEqual((assign_result.gt_inds > 0).sum(),
gt_instances.bboxes.size(0))
self.assertEqual((assign_result.labels > -1).sum(),
gt_instances.bboxes.size(0))
# test BBoxL1Cost
assigner = HungarianAssigner(ConfigDict(dict(type='BBoxL1Cost')))
assign_result = assigner.assign(
pred_instances, gt_instances, img_meta=img_meta)
self.assertTrue(torch.all(assign_result.gt_inds > -1))
self.assertEqual((assign_result.gt_inds > 0).sum(),
gt_instances.bboxes.size(0))
self.assertEqual((assign_result.labels > -1).sum(),
gt_instances.bboxes.size(0))
def test_cls_match_cost(self):
gt_instances = InstanceData()
gt_instances.bboxes = torch.FloatTensor([[0, 0, 5, 7], [3, 5, 7, 8]])
gt_instances.labels = torch.LongTensor([1, 20])
pred_instances = InstanceData()
pred_instances.scores = torch.rand((10, 81))
pred_instances.bboxes = torch.rand((10, 4))
img_meta = dict(img_shape=(10, 8))
# test FocalLossCost
assigner = HungarianAssigner(dict(type='FocalLossCost'))
assign_result = assigner.assign(
pred_instances, gt_instances, img_meta=img_meta)
self.assertTrue(torch.all(assign_result.gt_inds > -1))
self.assertEqual((assign_result.gt_inds > 0).sum(),
gt_instances.bboxes.size(0))
self.assertEqual((assign_result.labels > -1).sum(),
gt_instances.bboxes.size(0))
# test ClassificationCost
assigner = HungarianAssigner(dict(type='ClassificationCost'))
assign_result = assigner.assign(
pred_instances, gt_instances, img_meta=img_meta)
self.assertTrue(torch.all(assign_result.gt_inds > -1))
self.assertEqual((assign_result.gt_inds > 0).sum(),
gt_instances.bboxes.size(0))
self.assertEqual((assign_result.labels > -1).sum(),
gt_instances.bboxes.size(0))
def test_mask_match_cost(self):
gt_instances = InstanceData()
gt_instances.masks = torch.randint(0, 2, (2, 10, 10)).long()
gt_instances.labels = torch.LongTensor([1, 20])
pred_instances = InstanceData()
pred_instances.masks = torch.rand((4, 10, 10))
pred_instances.scores = torch.rand((4, 25))
img_meta = dict(img_shape=(10, 10))
# test DiceCost
assigner = HungarianAssigner(dict(type='DiceCost'))
assign_result = assigner.assign(
pred_instances, gt_instances, img_meta=img_meta)
self.assertTrue(torch.all(assign_result.gt_inds > -1))
self.assertEqual((assign_result.gt_inds > 0).sum(),
gt_instances.masks.size(0))
self.assertEqual((assign_result.labels > -1).sum(),
gt_instances.masks.size(0))
# test CrossEntropyLossCost
assigner = HungarianAssigner(dict(type='CrossEntropyLossCost'))
assign_result = assigner.assign(
pred_instances, gt_instances, img_meta=img_meta)
self.assertTrue(torch.all(assign_result.gt_inds > -1))
self.assertEqual((assign_result.gt_inds > 0).sum(),
gt_instances.masks.size(0))
self.assertEqual((assign_result.labels > -1).sum(),
gt_instances.masks.size(0))
# test FocalLossCost
assigner = HungarianAssigner(
dict(type='FocalLossCost', binary_input=True))
assign_result = assigner.assign(
pred_instances, gt_instances, img_meta=img_meta)
self.assertTrue(torch.all(assign_result.gt_inds > -1))
self.assertEqual((assign_result.gt_inds > 0).sum(),
gt_instances.masks.size(0))
self.assertEqual((assign_result.labels > -1).sum(),
gt_instances.masks.size(0))
| 6,481 | 42.503356 | 77 | py |
ERD | ERD-main/tests/test_models/test_task_modules/test_assigners/test_simota_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmengine.testing import assert_allclose
from mmdet.models.task_modules.assigners import SimOTAAssigner
class TestSimOTAAssigner(TestCase):
def test_assign(self):
assigner = SimOTAAssigner(
center_radius=2.5,
candidate_topk=1,
iou_weight=3.0,
cls_weight=1.0)
pred_instances = InstanceData(
bboxes=torch.Tensor([[23, 23, 43, 43], [4, 5, 6, 7]]),
scores=torch.FloatTensor([[0.2], [0.8]]),
priors=torch.Tensor([[30, 30, 8, 8], [4, 5, 6, 7]]))
gt_instances = InstanceData(
bboxes=torch.Tensor([[23, 23, 43, 43]]),
labels=torch.LongTensor([0]))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([1, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_assign_with_no_valid_bboxes(self):
assigner = SimOTAAssigner(
center_radius=2.5,
candidate_topk=1,
iou_weight=3.0,
cls_weight=1.0)
pred_instances = InstanceData(
bboxes=torch.Tensor([[123, 123, 143, 143], [114, 151, 161, 171]]),
scores=torch.FloatTensor([[0.2], [0.8]]),
priors=torch.Tensor([[30, 30, 8, 8], [55, 55, 8, 8]]))
gt_instances = InstanceData(
bboxes=torch.Tensor([[0, 0, 1, 1]]), labels=torch.LongTensor([0]))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([0, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_assign_with_empty_gt(self):
assigner = SimOTAAssigner(
center_radius=2.5,
candidate_topk=1,
iou_weight=3.0,
cls_weight=1.0)
pred_instances = InstanceData(
bboxes=torch.Tensor([[[30, 40, 50, 60]], [[4, 5, 6, 7]]]),
scores=torch.FloatTensor([[0.2], [0.8]]),
priors=torch.Tensor([[0, 12, 23, 34], [4, 5, 6, 7]]))
gt_instances = InstanceData(
bboxes=torch.empty(0, 4), labels=torch.empty(0))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([0, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
| 2,586 | 37.61194 | 78 | py |
ERD | ERD-main/tests/test_models/test_task_modules/test_assigners/test_atss_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.task_modules.assigners import ATSSAssigner
class TestATSSAssigner(TestCase):
def test_atss_assigner(self):
atss_assigner = ATSSAssigner(topk=9)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
num_level_bboxes = [4]
assign_result = atss_assigner.assign(pred_instances, num_level_bboxes,
gt_instances)
self.assertEqual(len(assign_result.gt_inds), 4)
self.assertEqual(len(assign_result.labels), 4)
expected_gt_inds = torch.LongTensor([1, 0, 0, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
def test_atss_assigner_with_ignore(self):
atss_assigner = ATSSAssigner(topk=9)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[30, 32, 40, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
gt_instances_ignore = InstanceData(bboxes=gt_bboxes_ignore)
num_level_bboxes = [4]
assign_result = atss_assigner.assign(
pred_instances,
num_level_bboxes,
gt_instances,
gt_instances_ignore=gt_instances_ignore)
expected_gt_inds = torch.LongTensor([1, 0, 0, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
def test_atss_assigner_with_empty_gt(self):
"""Test corner case where an image might have no true detections."""
atss_assigner = ATSSAssigner(topk=9)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0)
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
num_level_bboxes = [4]
assign_result = atss_assigner.assign(pred_instances, num_level_bboxes,
gt_instances)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
def test_atss_assigner_with_empty_boxes(self):
"""Test corner case where a network might predict no boxes."""
atss_assigner = ATSSAssigner(topk=9)
priors = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
num_level_bboxes = [0]
assign_result = atss_assigner.assign(pred_instances, num_level_bboxes,
gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0)
self.assertTrue(tuple(assign_result.labels.shape) == (0, ))
def test_atss_assigner_with_empty_boxes_and_ignore(self):
"""Test corner case where a network might predict no boxes and
ignore_iof_thr is on."""
atss_assigner = ATSSAssigner(topk=9)
priors = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
gt_instances_ignore = InstanceData(bboxes=gt_bboxes_ignore)
num_level_bboxes = [0]
assign_result = atss_assigner.assign(
pred_instances,
num_level_bboxes,
gt_instances,
gt_instances_ignore=gt_instances_ignore)
self.assertEqual(len(assign_result.gt_inds), 0)
self.assertTrue(tuple(assign_result.labels.shape) == (0, ))
def test_atss_assigner_with_empty_boxes_and_gt(self):
"""Test corner case where a network might predict no boxes and no
gt."""
atss_assigner = ATSSAssigner(topk=9)
priors = torch.empty((0, 4))
gt_bboxes = torch.empty((0, 4))
gt_labels = torch.empty(0)
num_level_bboxes = [0]
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
assign_result = atss_assigner.assign(pred_instances, num_level_bboxes,
gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0)
| 5,500 | 36.168919 | 78 | py |
ERD | ERD-main/tests/test_models/test_task_modules/test_assigners/test_region_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.task_modules.assigners import RegionAssigner
class TestRegionAssigner(TestCase):
def setUp(self):
self.img_meta = ConfigDict(dict(img_shape=(256, 256)))
self.featmap_sizes = [(64, 64)]
self.anchor_scale = 10
self.anchor_strides = [1]
def test_region_assigner(self):
region_assigner = RegionAssigner(center_ratio=0.5, ignore_ratio=0.8)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
valid_flags = torch.BoolTensor([1, 1, 1, 1])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors, valid_flags=valid_flags)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
num_level_anchors = [4]
assign_result = region_assigner.assign(
pred_instances, gt_instances, self.img_meta, self.featmap_sizes,
num_level_anchors, self.anchor_scale, self.anchor_strides)
self.assertEqual(len(assign_result.gt_inds), 4)
self.assertEqual(len(assign_result.labels), 4)
expected_gt_inds = torch.LongTensor([1, 0, 0, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
def test_region_assigner_with_ignore(self):
region_assigner = RegionAssigner(center_ratio=0.5)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[30, 32, 40, 42],
])
valid_flags = torch.BoolTensor([1, 1, 1, 1])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
pred_instances = InstanceData(priors=priors, valid_flags=valid_flags)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
gt_instances_ignore = InstanceData(bboxes=gt_bboxes_ignore)
num_level_anchors = [4]
with self.assertRaises(NotImplementedError):
region_assigner.assign(
pred_instances,
gt_instances,
self.img_meta,
self.featmap_sizes,
num_level_anchors,
self.anchor_scale,
self.anchor_strides,
gt_instances_ignore=gt_instances_ignore)
def test_region_assigner_with_empty_gt(self):
"""Test corner case where an image might have no true detections."""
region_assigner = RegionAssigner(center_ratio=0.5)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
valid_flags = torch.BoolTensor([1, 1, 1, 1])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0)
pred_instances = InstanceData(priors=priors, valid_flags=valid_flags)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
num_level_anchors = [4]
assign_result = region_assigner.assign(
pred_instances, gt_instances, self.img_meta, self.featmap_sizes,
num_level_anchors, self.anchor_scale, self.anchor_strides)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
def test_atss_assigner_with_empty_boxes(self):
"""Test corner case where a network might predict no boxes."""
region_assigner = RegionAssigner(center_ratio=0.5)
priors = torch.empty((0, 4))
valid_flags = torch.BoolTensor([])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors, valid_flags=valid_flags)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
num_level_anchors = [0]
assign_result = region_assigner.assign(
pred_instances, gt_instances, self.img_meta, self.featmap_sizes,
num_level_anchors, self.anchor_scale, self.anchor_strides)
self.assertEqual(len(assign_result.gt_inds), 0)
self.assertTrue(tuple(assign_result.labels.shape) == (0, ))
def test_atss_assigner_with_empty_boxes_and_gt(self):
"""Test corner case where a network might predict no boxes and no
gt."""
region_assigner = RegionAssigner(center_ratio=0.5)
priors = torch.empty((0, 4))
valid_flags = torch.BoolTensor([])
gt_bboxes = torch.empty((0, 4))
gt_labels = torch.empty(0)
num_level_anchors = [0]
pred_instances = InstanceData(priors=priors, valid_flags=valid_flags)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
assign_result = region_assigner.assign(
pred_instances, gt_instances, self.img_meta, self.featmap_sizes,
num_level_anchors, self.anchor_scale, self.anchor_strides)
self.assertEqual(len(assign_result.gt_inds), 0)
| 5,479 | 38.42446 | 77 | py |
ERD | ERD-main/tests/test_models/test_task_modules/test_assigners/test_center_region_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.task_modules.assigners import CenterRegionAssigner
class TestCenterRegionAssigner(TestCase):
def test_center_region_assigner(self):
center_region_assigner = CenterRegionAssigner(
pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
assign_result = center_region_assigner.assign(pred_instances,
gt_instances)
self.assertEqual(len(assign_result.gt_inds), 4)
self.assertEqual(len(assign_result.labels), 4)
expected_gt_inds = torch.LongTensor([1, 0, 0, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
expected_shadowed_labels = torch.LongTensor([[2, 3]])
shadowed_labels = assign_result.get_extra_property('shadowed_labels')
self.assertTrue(torch.all(shadowed_labels == expected_shadowed_labels))
def test_center_region_assigner_with_ignore(self):
center_region_assigner = CenterRegionAssigner(
pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[30, 32, 40, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
gt_instances_ignore = InstanceData(bboxes=gt_bboxes_ignore)
assign_result = center_region_assigner.assign(
pred_instances,
gt_instances,
gt_instances_ignore=gt_instances_ignore)
expected_gt_inds = torch.LongTensor([1, 0, 0, -1])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
def test_center_region_assigner_with_empty_gt(self):
"""Test corner case where an image might have no true detections."""
center_region_assigner = CenterRegionAssigner(
pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0)
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
assign_result = center_region_assigner.assign(pred_instances,
gt_instances)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
def test_center_region_assigner_with_empty_boxes(self):
"""Test corner case where a network might predict no boxes."""
center_region_assigner = CenterRegionAssigner(
pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)
priors = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
assign_result = center_region_assigner.assign(pred_instances,
gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0)
self.assertTrue(tuple(assign_result.labels.shape) == (0, ))
def test_center_region_assigner_with_empty_boxes_and_ignore(self):
"""Test corner case where a network might predict no boxes and
ignore_iof_thr is on."""
center_region_assigner = CenterRegionAssigner(
pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)
priors = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
gt_instances_ignore = InstanceData(bboxes=gt_bboxes_ignore)
assign_result = center_region_assigner.assign(
pred_instances,
gt_instances,
gt_instances_ignore=gt_instances_ignore)
self.assertEqual(len(assign_result.gt_inds), 0)
self.assertTrue(tuple(assign_result.labels.shape) == (0, ))
def test_center_region_assigner_with_empty_boxes_and_gt(self):
"""Test corner case where a network might predict no boxes and no
gt."""
center_region_assigner = CenterRegionAssigner(
pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)
priors = torch.empty((0, 4))
gt_bboxes = torch.empty((0, 4))
gt_labels = torch.empty(0)
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
assign_result = center_region_assigner.assign(pred_instances,
gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0)
| 6,061 | 39.413333 | 79 | py |
ERD | ERD-main/tests/test_models/test_task_modules/test_assigners/test_grid_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmengine.testing import assert_allclose
from mmdet.models.task_modules.assigners import GridAssigner
class TestGridAssigner(TestCase):
def test_assign(self):
assigner = GridAssigner(pos_iou_thr=0.5, neg_iou_thr=0.3)
pred_instances = InstanceData(
priors=torch.Tensor([[23, 23, 43, 43], [4, 5, 6, 7]]),
responsible_flags=torch.BoolTensor([1, 1]))
gt_instances = InstanceData(
bboxes=torch.Tensor([[23, 23, 43, 43]]),
labels=torch.LongTensor([0]))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([1, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
# invalid neg_iou_thr
with self.assertRaises(AssertionError):
assigner = GridAssigner(
pos_iou_thr=0.5, neg_iou_thr=[0.3, 0.1, 0.4])
assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
# multi-neg_iou_thr
assigner = GridAssigner(pos_iou_thr=0.5, neg_iou_thr=(0.1, 0.3))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([1, -1])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
# gt_max_assign_all=False
assigner = GridAssigner(
pos_iou_thr=0.5, neg_iou_thr=0.3, gt_max_assign_all=False)
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([1, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
# large min_pos_iou
assigner = GridAssigner(
pos_iou_thr=0.5, neg_iou_thr=0.3, min_pos_iou=1)
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([1, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_assign_with_empty_gt(self):
assigner = GridAssigner(pos_iou_thr=0.5, neg_iou_thr=0.3)
pred_instances = InstanceData(
priors=torch.Tensor([[0, 12, 23, 34], [4, 5, 6, 7]]),
responsible_flags=torch.BoolTensor([1, 1]))
gt_instances = InstanceData(
bboxes=torch.empty(0, 4), labels=torch.empty(0))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([0, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_assign_with_empty_priors(self):
assigner = GridAssigner(pos_iou_thr=0.5, neg_iou_thr=0.3)
pred_instances = InstanceData(
priors=torch.Tensor(torch.empty(0, 4)),
responsible_flags=torch.empty(0))
gt_instances = InstanceData(
bboxes=torch.Tensor([[23, 23, 43, 43]]),
labels=torch.LongTensor([0]))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
| 3,428 | 40.817073 | 73 | py |
ERD | ERD-main/tests/test_models/test_task_modules/test_assigners/test_dynamic_soft_label_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmengine.testing import assert_allclose
from mmdet.models.task_modules.assigners import DynamicSoftLabelAssigner
from mmdet.structures.bbox import HorizontalBoxes
class TestDynamicSoftLabelAssigner(TestCase):
def test_assign(self):
assigner = DynamicSoftLabelAssigner(
soft_center_radius=3.0, topk=1, iou_weight=3.0)
pred_instances = InstanceData(
bboxes=torch.Tensor([[23, 23, 43, 43], [4, 5, 6, 7]]),
scores=torch.FloatTensor([[0.2], [0.8]]),
priors=torch.Tensor([[30, 30, 8, 8], [4, 5, 6, 7]]))
gt_instances = InstanceData(
bboxes=torch.Tensor([[23, 23, 43, 43]]),
labels=torch.LongTensor([0]))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([1, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_assign_with_no_valid_bboxes(self):
assigner = DynamicSoftLabelAssigner(
soft_center_radius=3.0, topk=1, iou_weight=3.0)
pred_instances = InstanceData(
bboxes=torch.Tensor([[123, 123, 143, 143], [114, 151, 161, 171]]),
scores=torch.FloatTensor([[0.2], [0.8]]),
priors=torch.Tensor([[30, 30, 8, 8], [55, 55, 8, 8]]))
gt_instances = InstanceData(
bboxes=torch.Tensor([[0, 0, 1, 1]]), labels=torch.LongTensor([0]))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([0, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_assign_with_empty_gt(self):
assigner = DynamicSoftLabelAssigner(
soft_center_radius=3.0, topk=1, iou_weight=3.0)
pred_instances = InstanceData(
bboxes=torch.Tensor([[[30, 40, 50, 60]], [[4, 5, 6, 7]]]),
scores=torch.FloatTensor([[0.2], [0.8]]),
priors=torch.Tensor([[0, 12, 23, 34], [4, 5, 6, 7]]))
gt_instances = InstanceData(
bboxes=torch.empty(0, 4), labels=torch.empty(0))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([0, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_box_type_input(self):
assigner = DynamicSoftLabelAssigner(
soft_center_radius=3.0, topk=1, iou_weight=3.0)
pred_instances = InstanceData(
bboxes=torch.Tensor([[23, 23, 43, 43], [4, 5, 6, 7]]),
scores=torch.FloatTensor([[0.2], [0.8]]),
priors=torch.Tensor([[30, 30, 8, 8], [4, 5, 6, 7]]))
gt_instances = InstanceData(
bboxes=HorizontalBoxes(torch.Tensor([[23, 23, 43, 43]])),
labels=torch.LongTensor([0]))
assign_result = assigner.assign(
pred_instances=pred_instances, gt_instances=gt_instances)
expected_gt_inds = torch.LongTensor([1, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
| 3,259 | 42.466667 | 78 | py |
ERD | ERD-main/tests/test_models/test_task_modules/test_assigners/test_max_iou_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
"""Tests the Assigner objects.
CommandLine:
pytest tests/test_core/test_bbox/test_assigners/test_max_iou_assigner.py
xdoctest tests/test_core/test_bbox/test_assigners/test_max_iou_assigner.py zero
""" # noqa
import pytest
import torch
from mmengine.structures import InstanceData
from mmdet.models.task_modules.assigners import MaxIoUAssigner
@pytest.mark.parametrize('neg_iou_thr', [0.5, (0, 0.5)])
def test_max_iou_assigner(neg_iou_thr):
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=neg_iou_thr,
)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
assign_result = self.assign(pred_instances, gt_instances)
assert len(assign_result.gt_inds) == 4
assert len(assign_result.labels) == 4
expected_gt_inds = torch.LongTensor([1, 0, 2, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_max_iou_assigner_with_ignore():
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[30, 32, 40, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
gt_instances_ignore = InstanceData(bboxes=gt_bboxes_ignore)
assign_result = self.assign(
pred_instances, gt_instances, gt_instances_ignore=gt_instances_ignore)
expected_gt_inds = torch.LongTensor([1, 0, 2, -1])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_max_iou_assigner_with_empty_gt():
"""Test corner case where an image might have no true detections."""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
priors = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0)
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
assign_result = self.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_max_iou_assigner_with_empty_priors():
"""Test corner case where a network might predict no boxes."""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
priors = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
# Test with gt_labels
assign_result = self.assign(pred_instances, gt_instances)
assert len(assign_result.gt_inds) == 0
assert tuple(assign_result.labels.shape) == (0, )
def test_max_iou_assigner_with_empty_boxes_and_ignore():
"""Test corner case where a network might predict no boxes and
ignore_iof_thr is on."""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
)
priors = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
gt_instances_ignore = InstanceData(bboxes=gt_bboxes_ignore)
# Test with gt_labels
assign_result = self.assign(
pred_instances, gt_instances, gt_instances_ignore=gt_instances_ignore)
assert len(assign_result.gt_inds) == 0
assert tuple(assign_result.labels.shape) == (0, )
def test_max_iou_assigner_with_empty_priors_and_gt():
"""Test corner case where a network might predict no boxes and no gt."""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
priors = torch.empty(0, 4)
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0)
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
assign_result = self.assign(pred_instances, gt_instances)
assert len(assign_result.gt_inds) == 0
| 5,025 | 29.834356 | 84 | py |
ERD | ERD-main/tests/test_models/test_task_modules/test_assigners/test_approx_max_iou_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.task_modules.assigners import ApproxMaxIoUAssigner
class TestApproxIoUAssigner(TestCase):
def test_approx_iou_assigner(self):
assigner = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData()
pred_instances.priors = bboxes
pred_instances.approxs = bboxes[:, None, :]
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([1, 0, 2, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
def test_approx_iou_assigner_with_empty_gt(self):
"""Test corner case where an image might have no true detections."""
assigner = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([])
gt_labels = torch.LongTensor([])
pred_instances = InstanceData()
pred_instances.priors = bboxes
pred_instances.approxs = bboxes[:, None, :]
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
def test_approx_iou_assigner_with_empty_boxes(self):
"""Test corner case where an network might predict no boxes."""
assigner = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData()
pred_instances.priors = bboxes
pred_instances.approxs = bboxes[:, None, :]
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0)
def test_approx_iou_assigner_with_empty_boxes_and_gt(self):
"""Test corner case where an network might predict no boxes and no
gt."""
assigner = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.empty((0, 4))
gt_labels = torch.LongTensor([])
pred_instances = InstanceData()
pred_instances.priors = bboxes
pred_instances.approxs = bboxes[:, None, :]
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0)
| 3,680 | 32.770642 | 77 | py |
ERD | ERD-main/tests/test_models/test_task_modules/test_assigners/test_task_uniform_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmengine.testing import assert_allclose
from mmdet.models.task_modules.assigners import UniformAssigner
class TestUniformAssigner(TestCase):
def test_uniform_assigner(self):
assigner = UniformAssigner(0.15, 0.7, 1)
pred_bbox = torch.FloatTensor([
[1, 1, 12, 8],
[4, 4, 20, 20],
[1, 5, 15, 15],
[30, 5, 32, 42],
])
anchor = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData()
pred_instances.priors = anchor
pred_instances.decoder_priors = pred_bbox
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 4)
self.assertEqual(len(assign_result.labels), 4)
expected_gt_inds = torch.LongTensor([-1, 0, 2, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_uniform_assigner_with_empty_gt(self):
"""Test corner case where an image might have no true detections."""
assigner = UniformAssigner(0.15, 0.7, 1)
pred_bbox = torch.FloatTensor([
[1, 1, 12, 8],
[4, 4, 20, 20],
[1, 5, 15, 15],
[30, 5, 32, 42],
])
anchor = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0)
pred_instances = InstanceData()
pred_instances.priors = anchor
pred_instances.decoder_priors = pred_bbox
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert_allclose(assign_result.gt_inds, expected_gt_inds)
def test_uniform_assigner_with_empty_boxes(self):
"""Test corner case where a network might predict no boxes."""
assigner = UniformAssigner(0.15, 0.7, 1)
pred_bbox = torch.empty((0, 4))
anchor = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData()
pred_instances.priors = anchor
pred_instances.decoder_priors = pred_bbox
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
# Test with gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0)
self.assertEqual(tuple(assign_result.labels.shape), (0, ))
| 3,299 | 34.106383 | 76 | py |
ERD | ERD-main/tests/test_models/test_task_modules/test_assigners/test_task_aligned_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.task_modules.assigners import TaskAlignedAssigner
class TestTaskAlignedAssigner(TestCase):
def test_task_aligned_assigner(self):
with self.assertRaises(AssertionError):
TaskAlignedAssigner(topk=0)
assigner = TaskAlignedAssigner(topk=13)
pred_score = torch.FloatTensor([[0.1, 0.2], [0.2, 0.3], [0.3, 0.4],
[0.4, 0.5]])
pred_bbox = torch.FloatTensor([
[1, 1, 12, 8],
[4, 4, 20, 20],
[1, 5, 15, 15],
[30, 5, 32, 42],
])
anchor = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([0, 1])
pred_instances = InstanceData()
pred_instances.priors = anchor
pred_instances.bboxes = pred_bbox
pred_instances.scores = pred_score
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 4)
self.assertEqual(len(assign_result.labels), 4)
# test empty gt
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0, 2).long()
gt_instances = InstanceData()
gt_instances.bboxes = gt_bboxes
gt_instances.labels = gt_labels
assign_result = assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
self.assertTrue(torch.all(assign_result.gt_inds == expected_gt_inds))
| 1,934 | 32.362069 | 77 | py |
ERD | ERD-main/tests/test_models/test_task_modules/test_prior_generators/test_anchor_generator.py | # Copyright (c) OpenMMLab. All rights reserved.
"""
CommandLine:
pytest tests/test_utils/test_anchor.py
xdoctest tests/test_utils/test_anchor.py zero
"""
import pytest
import torch
def test_standard_points_generator():
from mmdet.models.task_modules import build_prior_generator
# teat init
anchor_generator_cfg = dict(
type='MlvlPointGenerator', strides=[4, 8], offset=0)
anchor_generator = build_prior_generator(anchor_generator_cfg)
assert anchor_generator is not None
assert anchor_generator.num_base_priors == [1, 1]
# test_stride
from mmdet.models.task_modules.prior_generators import MlvlPointGenerator
# Square strides
mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)
mlvl_points_half_stride_generator = MlvlPointGenerator(
strides=[4, 10], offset=0.5)
assert mlvl_points.num_levels == 2
# assert self.num_levels == len(featmap_sizes)
with pytest.raises(AssertionError):
mlvl_points.grid_priors(featmap_sizes=[(2, 2)], device='cpu')
priors = mlvl_points.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], device='cpu')
priors_with_stride = mlvl_points.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], with_stride=True, device='cpu')
assert len(priors) == 2
# assert last dimension is (coord_x, coord_y, stride_w, stride_h).
assert priors_with_stride[0].size(1) == 4
assert priors_with_stride[0][0][2] == 4
assert priors_with_stride[0][0][3] == 4
assert priors_with_stride[1][0][2] == 10
assert priors_with_stride[1][0][3] == 10
stride_4_feat_2_2 = priors[0]
assert (stride_4_feat_2_2[1] - stride_4_feat_2_2[0]).sum() == 4
assert stride_4_feat_2_2.size(0) == 4
assert stride_4_feat_2_2.size(1) == 2
stride_10_feat_4_8 = priors[1]
assert (stride_10_feat_4_8[1] - stride_10_feat_4_8[0]).sum() == 10
assert stride_10_feat_4_8.size(0) == 4 * 8
assert stride_10_feat_4_8.size(1) == 2
# assert the offset of 0.5 * stride
priors_half_offset = mlvl_points_half_stride_generator.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], device='cpu')
assert (priors_half_offset[0][0] - priors[0][0]).sum() == 4 * 0.5 * 2
assert (priors_half_offset[1][0] - priors[1][0]).sum() == 10 * 0.5 * 2
if torch.cuda.is_available():
anchor_generator_cfg = dict(
type='MlvlPointGenerator', strides=[4, 8], offset=0)
anchor_generator = build_prior_generator(anchor_generator_cfg)
assert anchor_generator is not None
# Square strides
mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)
mlvl_points_half_stride_generator = MlvlPointGenerator(
strides=[4, 10], offset=0.5)
assert mlvl_points.num_levels == 2
# assert self.num_levels == len(featmap_sizes)
with pytest.raises(AssertionError):
mlvl_points.grid_priors(featmap_sizes=[(2, 2)], device='cuda')
priors = mlvl_points.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], device='cuda')
priors_with_stride = mlvl_points.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], with_stride=True, device='cuda')
assert len(priors) == 2
# assert last dimension is (coord_x, coord_y, stride_w, stride_h).
assert priors_with_stride[0].size(1) == 4
assert priors_with_stride[0][0][2] == 4
assert priors_with_stride[0][0][3] == 4
assert priors_with_stride[1][0][2] == 10
assert priors_with_stride[1][0][3] == 10
stride_4_feat_2_2 = priors[0]
assert (stride_4_feat_2_2[1] - stride_4_feat_2_2[0]).sum() == 4
assert stride_4_feat_2_2.size(0) == 4
assert stride_4_feat_2_2.size(1) == 2
stride_10_feat_4_8 = priors[1]
assert (stride_10_feat_4_8[1] - stride_10_feat_4_8[0]).sum() == 10
assert stride_10_feat_4_8.size(0) == 4 * 8
assert stride_10_feat_4_8.size(1) == 2
# assert the offset of 0.5 * stride
priors_half_offset = mlvl_points_half_stride_generator.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], device='cuda')
assert (priors_half_offset[0][0] - priors[0][0]).sum() == 4 * 0.5 * 2
assert (priors_half_offset[1][0] - priors[1][0]).sum() == 10 * 0.5 * 2
def test_sparse_prior():
from mmdet.models.task_modules.prior_generators import MlvlPointGenerator
mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)
prior_indexs = torch.Tensor([0, 2, 4, 5, 6, 9]).long()
featmap_sizes = [(3, 5), (6, 4)]
grid_anchors = mlvl_points.grid_priors(
featmap_sizes=featmap_sizes, with_stride=False, device='cpu')
sparse_prior = mlvl_points.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[0],
level_idx=0,
device='cpu')
assert not sparse_prior.is_cuda
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_points.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[1],
level_idx=1,
device='cpu')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
from mmdet.models.task_modules.prior_generators import AnchorGenerator
mlvl_anchors = AnchorGenerator(
strides=[16, 32], ratios=[1.], scales=[1.], base_sizes=[4, 8])
prior_indexs = torch.Tensor([0, 2, 4, 5, 6, 9]).long()
featmap_sizes = [(3, 5), (6, 4)]
grid_anchors = mlvl_anchors.grid_priors(
featmap_sizes=featmap_sizes, device='cpu')
sparse_prior = mlvl_anchors.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[0],
level_idx=0,
device='cpu')
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_anchors.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[1],
level_idx=1,
device='cpu')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
# for ssd
from mmdet.models.task_modules.prior_generators import SSDAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = SSDAnchorGenerator(
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32],
ratios=[[2], [2, 3], [2, 3]])
ssd_anchors = anchor_generator.grid_anchors(featmap_sizes, device='cpu')
for i in range(len(featmap_sizes)):
sparse_ssd_anchors = anchor_generator.sparse_priors(
prior_idxs=prior_indexs,
level_idx=i,
featmap_size=featmap_sizes[i],
device='cpu')
assert (sparse_ssd_anchors == ssd_anchors[i][prior_indexs]).all()
# for yolo
from mmdet.models.task_modules.prior_generators import YOLOAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = YOLOAnchorGenerator(
strides=[32, 16, 8],
base_sizes=[
[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)],
])
yolo_anchors = anchor_generator.grid_anchors(featmap_sizes, device='cpu')
for i in range(len(featmap_sizes)):
sparse_yolo_anchors = anchor_generator.sparse_priors(
prior_idxs=prior_indexs,
level_idx=i,
featmap_size=featmap_sizes[i],
device='cpu')
assert (sparse_yolo_anchors == yolo_anchors[i][prior_indexs]).all()
if torch.cuda.is_available():
mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)
prior_indexs = torch.Tensor([0, 3, 4, 5, 6, 7, 1, 2, 4, 5, 6,
9]).long().cuda()
featmap_sizes = [(6, 8), (6, 4)]
grid_anchors = mlvl_points.grid_priors(
featmap_sizes=featmap_sizes, with_stride=False, device='cuda')
sparse_prior = mlvl_points.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[0],
level_idx=0,
device='cuda')
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_points.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[1],
level_idx=1,
device='cuda')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
assert sparse_prior.is_cuda
mlvl_anchors = AnchorGenerator(
strides=[16, 32],
ratios=[1., 2.5],
scales=[1., 5.],
base_sizes=[4, 8])
prior_indexs = torch.Tensor([4, 5, 6, 7, 0, 2, 50, 4, 5, 6,
9]).long().cuda()
featmap_sizes = [(13, 5), (16, 4)]
grid_anchors = mlvl_anchors.grid_priors(
featmap_sizes=featmap_sizes, device='cuda')
sparse_prior = mlvl_anchors.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[0],
level_idx=0,
device='cuda')
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_anchors.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[1],
level_idx=1,
device='cuda')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
# for ssd
from mmdet.models.task_modules.prior_generators import \
SSDAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = SSDAnchorGenerator(
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32],
ratios=[[2], [2, 3], [2, 3]])
ssd_anchors = anchor_generator.grid_anchors(
featmap_sizes, device='cuda')
for i in range(len(featmap_sizes)):
sparse_ssd_anchors = anchor_generator.sparse_priors(
prior_idxs=prior_indexs,
level_idx=i,
featmap_size=featmap_sizes[i],
device='cuda')
assert (sparse_ssd_anchors == ssd_anchors[i][prior_indexs]).all()
# for yolo
from mmdet.models.task_modules.prior_generators import \
YOLOAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = YOLOAnchorGenerator(
strides=[32, 16, 8],
base_sizes=[
[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)],
])
yolo_anchors = anchor_generator.grid_anchors(
featmap_sizes, device='cuda')
for i in range(len(featmap_sizes)):
sparse_yolo_anchors = anchor_generator.sparse_priors(
prior_idxs=prior_indexs,
level_idx=i,
featmap_size=featmap_sizes[i],
device='cuda')
assert (sparse_yolo_anchors == yolo_anchors[i][prior_indexs]).all()
def test_standard_anchor_generator():
from mmdet.models.task_modules import build_anchor_generator
anchor_generator_cfg = dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8])
anchor_generator = build_anchor_generator(anchor_generator_cfg)
assert anchor_generator.num_base_priors == \
anchor_generator.num_base_anchors
assert anchor_generator.num_base_priors == [3, 3]
assert anchor_generator is not None
def test_strides():
from mmdet.models.task_modules.prior_generators import AnchorGenerator
# Square strides
self = AnchorGenerator([10], [1.], [1.], [10])
anchors = self.grid_anchors([(2, 2)], device='cpu')
expected_anchors = torch.tensor([[-5., -5., 5., 5.], [5., -5., 15., 5.],
[-5., 5., 5., 15.], [5., 5., 15., 15.]])
assert torch.equal(anchors[0], expected_anchors)
# Different strides in x and y direction
self = AnchorGenerator([(10, 20)], [1.], [1.], [10])
anchors = self.grid_anchors([(2, 2)], device='cpu')
expected_anchors = torch.tensor([[-5., -5., 5., 5.], [5., -5., 15., 5.],
[-5., 15., 5., 25.], [5., 15., 15., 25.]])
assert torch.equal(anchors[0], expected_anchors)
def test_ssd_anchor_generator():
from mmdet.models.task_modules import build_anchor_generator
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
# min_sizes max_sizes must set at the same time
with pytest.raises(AssertionError):
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
min_sizes=[48, 100, 150, 202, 253, 300],
max_sizes=None,
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
build_anchor_generator(anchor_generator_cfg)
# length of min_sizes max_sizes must be the same
with pytest.raises(AssertionError):
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
min_sizes=[48, 100, 150, 202, 253, 300],
max_sizes=[100, 150, 202, 253],
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
build_anchor_generator(anchor_generator_cfg)
# test setting anchor size manually
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
min_sizes=[48, 100, 150, 202, 253, 304],
max_sizes=[100, 150, 202, 253, 304, 320],
strides=[16, 32, 64, 107, 160, 320],
ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]])
featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
expected_base_anchors = [
torch.Tensor([[-16.0000, -16.0000, 32.0000, 32.0000],
[-26.6410, -26.6410, 42.6410, 42.6410],
[-25.9411, -8.9706, 41.9411, 24.9706],
[-8.9706, -25.9411, 24.9706, 41.9411],
[-33.5692, -5.8564, 49.5692, 21.8564],
[-5.8564, -33.5692, 21.8564, 49.5692]]),
torch.Tensor([[-34.0000, -34.0000, 66.0000, 66.0000],
[-45.2372, -45.2372, 77.2372, 77.2372],
[-54.7107, -19.3553, 86.7107, 51.3553],
[-19.3553, -54.7107, 51.3553, 86.7107],
[-70.6025, -12.8675, 102.6025, 44.8675],
[-12.8675, -70.6025, 44.8675, 102.6025]]),
torch.Tensor([[-43.0000, -43.0000, 107.0000, 107.0000],
[-55.0345, -55.0345, 119.0345, 119.0345],
[-74.0660, -21.0330, 138.0660, 85.0330],
[-21.0330, -74.0660, 85.0330, 138.0660],
[-97.9038, -11.3013, 161.9038, 75.3013],
[-11.3013, -97.9038, 75.3013, 161.9038]]),
torch.Tensor([[-47.5000, -47.5000, 154.5000, 154.5000],
[-59.5332, -59.5332, 166.5332, 166.5332],
[-89.3356, -17.9178, 196.3356, 124.9178],
[-17.9178, -89.3356, 124.9178, 196.3356],
[-121.4371, -4.8124, 228.4371, 111.8124],
[-4.8124, -121.4371, 111.8124, 228.4371]]),
torch.Tensor([[-46.5000, -46.5000, 206.5000, 206.5000],
[-58.6651, -58.6651, 218.6651, 218.6651],
[-98.8980, -9.4490, 258.8980, 169.4490],
[-9.4490, -98.8980, 169.4490, 258.8980],
[-139.1044, 6.9652, 299.1044, 153.0348],
[6.9652, -139.1044, 153.0348, 299.1044]]),
torch.Tensor([[8.0000, 8.0000, 312.0000, 312.0000],
[4.0513, 4.0513, 315.9487, 315.9487],
[-54.9605, 52.5198, 374.9604, 267.4802],
[52.5198, -54.9605, 267.4802, 374.9604],
[-103.2717, 72.2428, 423.2717, 247.7572],
[72.2428, -103.2717, 247.7572, 423.2717]])
]
base_anchors = anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check valid flags
expected_valid_pixels = [2400, 600, 150, 54, 24, 6]
multi_level_valid_flags = anchor_generator.valid_flags(
featmap_sizes, (320, 320), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert anchor_generator.num_base_anchors == [6, 6, 6, 6, 6, 6]
# check anchor generation
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 6
# test vgg ssd anchor setting
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
# check base anchors
expected_base_anchors = [
torch.Tensor([[-6.5000, -6.5000, 14.5000, 14.5000],
[-11.3704, -11.3704, 19.3704, 19.3704],
[-10.8492, -3.4246, 18.8492, 11.4246],
[-3.4246, -10.8492, 11.4246, 18.8492]]),
torch.Tensor([[-14.5000, -14.5000, 30.5000, 30.5000],
[-25.3729, -25.3729, 41.3729, 41.3729],
[-23.8198, -7.9099, 39.8198, 23.9099],
[-7.9099, -23.8198, 23.9099, 39.8198],
[-30.9711, -4.9904, 46.9711, 20.9904],
[-4.9904, -30.9711, 20.9904, 46.9711]]),
torch.Tensor([[-33.5000, -33.5000, 65.5000, 65.5000],
[-45.5366, -45.5366, 77.5366, 77.5366],
[-54.0036, -19.0018, 86.0036, 51.0018],
[-19.0018, -54.0036, 51.0018, 86.0036],
[-69.7365, -12.5788, 101.7365, 44.5788],
[-12.5788, -69.7365, 44.5788, 101.7365]]),
torch.Tensor([[-44.5000, -44.5000, 108.5000, 108.5000],
[-56.9817, -56.9817, 120.9817, 120.9817],
[-76.1873, -22.0937, 140.1873, 86.0937],
[-22.0937, -76.1873, 86.0937, 140.1873],
[-100.5019, -12.1673, 164.5019, 76.1673],
[-12.1673, -100.5019, 76.1673, 164.5019]]),
torch.Tensor([[-53.5000, -53.5000, 153.5000, 153.5000],
[-66.2185, -66.2185, 166.2185, 166.2185],
[-96.3711, -23.1855, 196.3711, 123.1855],
[-23.1855, -96.3711, 123.1855, 196.3711]]),
torch.Tensor([[19.5000, 19.5000, 280.5000, 280.5000],
[6.6342, 6.6342, 293.3658, 293.3658],
[-34.5549, 57.7226, 334.5549, 242.2774],
[57.7226, -34.5549, 242.2774, 334.5549]]),
]
base_anchors = anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check valid flags
expected_valid_pixels = [5776, 2166, 600, 150, 36, 4]
multi_level_valid_flags = anchor_generator.valid_flags(
featmap_sizes, (300, 300), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert anchor_generator.num_base_anchors == [4, 6, 6, 6, 4, 4]
# check anchor generation
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 6
def test_anchor_generator_with_tuples():
from mmdet.models.task_modules import build_anchor_generator
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
anchor_generator_cfg_tuples = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[(8, 8), (16, 16), (32, 32), (64, 64), (100, 100), (300, 300)],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
anchor_generator_tuples = build_anchor_generator(
anchor_generator_cfg_tuples)
anchors_tuples = anchor_generator_tuples.grid_anchors(
featmap_sizes, device)
for anchor, anchor_tuples in zip(anchors, anchors_tuples):
assert torch.equal(anchor, anchor_tuples)
def test_yolo_anchor_generator():
from mmdet.models.task_modules import build_anchor_generator
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
anchor_generator_cfg = dict(
type='YOLOAnchorGenerator',
strides=[32, 16, 8],
base_sizes=[
[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)],
])
featmap_sizes = [(14, 18), (28, 36), (56, 72)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
# check base anchors
expected_base_anchors = [
torch.Tensor([[-42.0000, -29.0000, 74.0000, 61.0000],
[-62.0000, -83.0000, 94.0000, 115.0000],
[-170.5000, -147.0000, 202.5000, 179.0000]]),
torch.Tensor([[-7.0000, -22.5000, 23.0000, 38.5000],
[-23.0000, -14.5000, 39.0000, 30.5000],
[-21.5000, -51.5000, 37.5000, 67.5000]]),
torch.Tensor([[-1.0000, -2.5000, 9.0000, 10.5000],
[-4.0000, -11.0000, 12.0000, 19.0000],
[-12.5000, -7.5000, 20.5000, 15.5000]])
]
base_anchors = anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check number of base anchors for each level
assert anchor_generator.num_base_anchors == [3, 3, 3]
# check anchor generation
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 3
def test_retina_anchor():
from mmdet.registry import MODELS
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
# head configs modified from
# configs/nas_fpn/retinanet_r50_fpn_crop640_50e.py
bbox_head = dict(
type='RetinaSepBNHead',
num_classes=4,
num_ins=5,
in_channels=4,
stacked_convs=1,
feat_channels=4,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]))
retina_head = MODELS.build(bbox_head)
assert retina_head.anchor_generator is not None
# use the featmap sizes in NASFPN setting to test retina head
featmap_sizes = [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
# check base anchors
expected_base_anchors = [
torch.Tensor([[-22.6274, -11.3137, 22.6274, 11.3137],
[-28.5088, -14.2544, 28.5088, 14.2544],
[-35.9188, -17.9594, 35.9188, 17.9594],
[-16.0000, -16.0000, 16.0000, 16.0000],
[-20.1587, -20.1587, 20.1587, 20.1587],
[-25.3984, -25.3984, 25.3984, 25.3984],
[-11.3137, -22.6274, 11.3137, 22.6274],
[-14.2544, -28.5088, 14.2544, 28.5088],
[-17.9594, -35.9188, 17.9594, 35.9188]]),
torch.Tensor([[-45.2548, -22.6274, 45.2548, 22.6274],
[-57.0175, -28.5088, 57.0175, 28.5088],
[-71.8376, -35.9188, 71.8376, 35.9188],
[-32.0000, -32.0000, 32.0000, 32.0000],
[-40.3175, -40.3175, 40.3175, 40.3175],
[-50.7968, -50.7968, 50.7968, 50.7968],
[-22.6274, -45.2548, 22.6274, 45.2548],
[-28.5088, -57.0175, 28.5088, 57.0175],
[-35.9188, -71.8376, 35.9188, 71.8376]]),
torch.Tensor([[-90.5097, -45.2548, 90.5097, 45.2548],
[-114.0350, -57.0175, 114.0350, 57.0175],
[-143.6751, -71.8376, 143.6751, 71.8376],
[-64.0000, -64.0000, 64.0000, 64.0000],
[-80.6349, -80.6349, 80.6349, 80.6349],
[-101.5937, -101.5937, 101.5937, 101.5937],
[-45.2548, -90.5097, 45.2548, 90.5097],
[-57.0175, -114.0350, 57.0175, 114.0350],
[-71.8376, -143.6751, 71.8376, 143.6751]]),
torch.Tensor([[-181.0193, -90.5097, 181.0193, 90.5097],
[-228.0701, -114.0350, 228.0701, 114.0350],
[-287.3503, -143.6751, 287.3503, 143.6751],
[-128.0000, -128.0000, 128.0000, 128.0000],
[-161.2699, -161.2699, 161.2699, 161.2699],
[-203.1873, -203.1873, 203.1873, 203.1873],
[-90.5097, -181.0193, 90.5097, 181.0193],
[-114.0350, -228.0701, 114.0350, 228.0701],
[-143.6751, -287.3503, 143.6751, 287.3503]]),
torch.Tensor([[-362.0387, -181.0193, 362.0387, 181.0193],
[-456.1401, -228.0701, 456.1401, 228.0701],
[-574.7006, -287.3503, 574.7006, 287.3503],
[-256.0000, -256.0000, 256.0000, 256.0000],
[-322.5398, -322.5398, 322.5398, 322.5398],
[-406.3747, -406.3747, 406.3747, 406.3747],
[-181.0193, -362.0387, 181.0193, 362.0387],
[-228.0701, -456.1401, 228.0701, 456.1401],
[-287.3503, -574.7006, 287.3503, 574.7006]])
]
base_anchors = retina_head.anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check valid flags
expected_valid_pixels = [57600, 14400, 3600, 900, 225]
multi_level_valid_flags = retina_head.anchor_generator.valid_flags(
featmap_sizes, (640, 640), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert retina_head.anchor_generator.num_base_anchors == [9, 9, 9, 9, 9]
# check anchor generation
anchors = retina_head.anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 5
def test_guided_anchor():
from mmdet.registry import MODELS
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
# head configs modified from
# configs/guided_anchoring/ga-retinanet_r50_fpn_1x_coco.py
bbox_head = dict(
type='GARetinaHead',
num_classes=8,
in_channels=4,
stacked_convs=1,
feat_channels=4,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]))
ga_retina_head = MODELS.build(bbox_head)
assert ga_retina_head.approx_anchor_generator is not None
# use the featmap sizes in NASFPN setting to test ga_retina_head
featmap_sizes = [(100, 152), (50, 76), (25, 38), (13, 19), (7, 10)]
# check base anchors
expected_approxs = [
torch.Tensor([[-22.6274, -11.3137, 22.6274, 11.3137],
[-28.5088, -14.2544, 28.5088, 14.2544],
[-35.9188, -17.9594, 35.9188, 17.9594],
[-16.0000, -16.0000, 16.0000, 16.0000],
[-20.1587, -20.1587, 20.1587, 20.1587],
[-25.3984, -25.3984, 25.3984, 25.3984],
[-11.3137, -22.6274, 11.3137, 22.6274],
[-14.2544, -28.5088, 14.2544, 28.5088],
[-17.9594, -35.9188, 17.9594, 35.9188]]),
torch.Tensor([[-45.2548, -22.6274, 45.2548, 22.6274],
[-57.0175, -28.5088, 57.0175, 28.5088],
[-71.8376, -35.9188, 71.8376, 35.9188],
[-32.0000, -32.0000, 32.0000, 32.0000],
[-40.3175, -40.3175, 40.3175, 40.3175],
[-50.7968, -50.7968, 50.7968, 50.7968],
[-22.6274, -45.2548, 22.6274, 45.2548],
[-28.5088, -57.0175, 28.5088, 57.0175],
[-35.9188, -71.8376, 35.9188, 71.8376]]),
torch.Tensor([[-90.5097, -45.2548, 90.5097, 45.2548],
[-114.0350, -57.0175, 114.0350, 57.0175],
[-143.6751, -71.8376, 143.6751, 71.8376],
[-64.0000, -64.0000, 64.0000, 64.0000],
[-80.6349, -80.6349, 80.6349, 80.6349],
[-101.5937, -101.5937, 101.5937, 101.5937],
[-45.2548, -90.5097, 45.2548, 90.5097],
[-57.0175, -114.0350, 57.0175, 114.0350],
[-71.8376, -143.6751, 71.8376, 143.6751]]),
torch.Tensor([[-181.0193, -90.5097, 181.0193, 90.5097],
[-228.0701, -114.0350, 228.0701, 114.0350],
[-287.3503, -143.6751, 287.3503, 143.6751],
[-128.0000, -128.0000, 128.0000, 128.0000],
[-161.2699, -161.2699, 161.2699, 161.2699],
[-203.1873, -203.1873, 203.1873, 203.1873],
[-90.5097, -181.0193, 90.5097, 181.0193],
[-114.0350, -228.0701, 114.0350, 228.0701],
[-143.6751, -287.3503, 143.6751, 287.3503]]),
torch.Tensor([[-362.0387, -181.0193, 362.0387, 181.0193],
[-456.1401, -228.0701, 456.1401, 228.0701],
[-574.7006, -287.3503, 574.7006, 287.3503],
[-256.0000, -256.0000, 256.0000, 256.0000],
[-322.5398, -322.5398, 322.5398, 322.5398],
[-406.3747, -406.3747, 406.3747, 406.3747],
[-181.0193, -362.0387, 181.0193, 362.0387],
[-228.0701, -456.1401, 228.0701, 456.1401],
[-287.3503, -574.7006, 287.3503, 574.7006]])
]
approxs = ga_retina_head.approx_anchor_generator.base_anchors
for i, base_anchor in enumerate(approxs):
assert base_anchor.allclose(expected_approxs[i])
# check valid flags
expected_valid_pixels = [136800, 34200, 8550, 2223, 630]
multi_level_valid_flags = ga_retina_head.approx_anchor_generator \
.valid_flags(featmap_sizes, (800, 1216), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert ga_retina_head.approx_anchor_generator.num_base_anchors == [
9, 9, 9, 9, 9
]
# check approx generation
squares = ga_retina_head.square_anchor_generator.grid_anchors(
featmap_sizes, device)
assert len(squares) == 5
expected_squares = [
torch.Tensor([[-16., -16., 16., 16.]]),
torch.Tensor([[-32., -32., 32., 32]]),
torch.Tensor([[-64., -64., 64., 64.]]),
torch.Tensor([[-128., -128., 128., 128.]]),
torch.Tensor([[-256., -256., 256., 256.]])
]
squares = ga_retina_head.square_anchor_generator.base_anchors
for i, base_anchor in enumerate(squares):
assert base_anchor.allclose(expected_squares[i])
# square_anchor_generator does not check valid flags
# check number of base anchors for each level
assert (ga_retina_head.square_anchor_generator.num_base_anchors == [
1, 1, 1, 1, 1
])
# check square generation
anchors = ga_retina_head.square_anchor_generator.grid_anchors(
featmap_sizes, device)
assert len(anchors) == 5
| 33,364 | 42.218912 | 79 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_dynamic_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestDynamicRoIHead(TestCase):
def setUp(self):
register_all_modules()
self.roi_head_cfg = get_roi_head_cfg(
'dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py')
def test_init(self):
roi_head = MODELS.build(self.roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
@parameterized.expand(['cpu', 'cuda'])
def test_dynamic_roi_head_loss(self, device):
"""Tests trident roi head predict."""
if not torch.cuda.is_available() and device == 'cuda':
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.to(device=device)
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device=device))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
out = roi_head.loss(feats, proposals_list, batch_data_samples)
loss_cls = out['loss_cls']
loss_bbox = out['loss_bbox']
self.assertGreater(loss_cls.sum(), 0, 'cls loss should be non-zero')
self.assertGreater(loss_bbox.sum(), 0, 'box loss should be non-zero')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
out = roi_head.loss(feats, proposals_list, batch_data_samples)
empty_cls_loss = out['loss_cls']
empty_bbox_loss = out['loss_bbox']
self.assertGreater(empty_cls_loss.sum(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_bbox_loss.sum(), 0,
'there should be no box loss when there are no true boxes')
| 2,733 | 36.972222 | 77 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_trident_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import unittest
from unittest import TestCase
import torch
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestTridentRoIHead(TestCase):
def setUp(self):
register_all_modules()
self.roi_head_cfg = get_roi_head_cfg(
'tridentnet/tridentnet_r50-caffe_1x_coco.py')
def test_init(self):
roi_head = MODELS.build(self.roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
self.assertTrue(roi_head.with_shared_head)
def test_trident_roi_head_predict(self):
"""Tests trident roi head predict."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head_cfg = copy.deepcopy(self.roi_head_cfg)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 1024, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
# When `test_branch_idx == 1`
roi_head.predict(feats, proposals_list, batch_data_samples)
# When `test_branch_idx == -1`
roi_head_cfg.test_branch_idx = -1
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
roi_head.predict(feats, proposals_list, batch_data_samples)
| 2,012 | 33.118644 | 77 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_pisa_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestPISARoIHead(TestCase):
def setUp(self):
register_all_modules()
self.roi_head_cfg = get_roi_head_cfg(
'pisa/faster-rcnn_r50_fpn_pisa_1x_coco.py')
def test_init(self):
roi_head = MODELS.build(self.roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
@parameterized.expand(['cpu', 'cuda'])
def test_pisa_roi_head(self, device):
"""Tests trident roi head predict."""
if not torch.cuda.is_available() and device == 'cuda':
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.to(device=device)
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device=device))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
out = roi_head.loss(feats, proposals_list, batch_data_samples)
loss_cls = out['loss_cls']
loss_bbox = out['loss_bbox']
self.assertGreater(loss_cls.sum(), 0, 'cls loss should be non-zero')
self.assertGreater(loss_bbox.sum(), 0, 'box loss should be non-zero')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
out = roi_head.loss(feats, proposals_list, batch_data_samples)
empty_cls_loss = out['loss_cls']
empty_bbox_loss = out['loss_bbox']
self.assertGreater(empty_cls_loss.sum(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_bbox_loss.sum(), 0,
'there should be no box loss when there are no true boxes')
| 2,718 | 36.763889 | 77 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_multi_instance_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import Config
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals
from mmdet.utils import register_all_modules
register_all_modules()
def _fake_roi_head():
"""Set a fake roi head config."""
roi_head = Config(
dict(
type='MultiInstanceRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign',
output_size=7,
sampling_ratio=-1,
aligned=True,
use_torchvision=True),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='MultiInstanceBBoxHead',
with_refine=False,
num_shared_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss',
loss_weight=1.0,
use_sigmoid=False,
reduction='none'),
loss_bbox=dict(
type='SmoothL1Loss', loss_weight=1.0, reduction='none')),
train_cfg=dict(
assigner=dict(
type='MultiInstanceAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.3,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='MultiInsRandomSampler',
num=512,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
pos_weight=-1,
debug=False),
test_cfg=dict(
nms=dict(iou_threshold=0.5), score_thr=0.01, max_per_img=500)))
return roi_head
class TestMultiInstanceRoIHead(TestCase):
def test_init(self):
"""Test init multi instance RoI head."""
roi_head_cfg = _fake_roi_head()
roi_head = MODELS.build(roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
def test_standard_roi_head_loss(self):
"""Tests multi instance roi head loss when truth is empty and non-
empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
roi_head_cfg = _fake_roi_head()
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then emd loss should be nonzero for
# random inputs
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=False,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
out = roi_head.loss(feats, proposals_list, batch_data_samples)
loss = out['loss_rcnn_emd']
self.assertGreater(loss.sum(), 0, 'loss should be non-zero')
# When there is no truth, the emd loss should be zero.
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
out = roi_head.loss(feats, proposals_list, batch_data_samples)
empty_loss = out['loss_rcnn_emd']
self.assertEqual(
empty_loss.sum(), 0,
'there should be no emd loss when there are no true boxes')
| 4,627 | 34.6 | 79 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_mask_scoring_roI_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestMaskScoringRoiHead(TestCase):
def setUp(self):
register_all_modules()
self.roi_head_cfg = get_roi_head_cfg(
'ms_rcnn/ms-rcnn_r50_fpn_1x_coco.py')
def test_init(self):
roi_head = MODELS.build(self.roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
self.assertTrue(roi_head.with_mask)
self.assertTrue(roi_head.mask_iou_head)
def test_mask_scoring_roi_head_loss(self):
"""Tests trident roi head predict."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.cuda()
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
out = roi_head.loss(feats, proposals_list, batch_data_samples)
loss_cls = out['loss_cls']
loss_bbox = out['loss_bbox']
loss_mask = out['loss_mask']
self.assertGreater(loss_cls.sum(), 0, 'cls loss should be non-zero')
self.assertGreater(loss_bbox.sum(), 0, 'box loss should be non-zero')
self.assertGreater(loss_mask.sum(), 0, 'mask loss should be non-zero')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
out = roi_head.loss(feats, proposals_list, batch_data_samples)
empty_cls_loss = out['loss_cls']
empty_bbox_loss = out['loss_bbox']
empty_mask_loss = out['loss_mask']
self.assertGreater(empty_cls_loss.sum(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_bbox_loss.sum(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_mask_loss.sum(), 0,
'there should be no mask loss when there are no true boxes')
def test_mask_scoring_roi_head_predict(self):
"""Tests trident roi head predict."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.cuda()
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
roi_head.predict(feats, proposals_list, batch_data_samples)
def test_mask_scoring_roi_head_forward(self):
"""Tests trident roi head forward."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.cuda()
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
image_shapes = [(3, s, s)]
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
roi_head.forward(feats, proposals_list)
| 4,844 | 37.76 | 78 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_htc_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import HybridTaskCascadeRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestHTCRoIHead(TestCase):
@parameterized.expand(['htc/htc_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
"""Test init htc RoI head."""
# Normal HTC RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
assert roi_head.with_bbox
assert roi_head.with_mask
assert roi_head.with_semantic
@parameterized.expand(['htc/htc_r50_fpn_1x_coco.py'])
def test_htc_roi_head_loss(self, cfg_file):
"""Tests htc roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
with_semantic=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[0],
num_classes=4,
with_mask=True,
with_semantic=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss_cls' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
elif 'loss_bbox' in name or 'loss_mask' in name:
self.assertEqual(value.sum(), 0)
@parameterized.expand(['htc/htc_r50_fpn_1x_coco.py'])
def test_htc_roi_head_predict(self, cfg_file):
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
results = roi_head.predict(
feats, proposal_list, batch_data_samples, rescale=True)
self.assertEqual(results[0].masks.shape[-2:], (s, s))
| 4,493 | 38.078261 | 77 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_standard_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import Config
from parameterized import parameterized
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals
from mmdet.utils import register_all_modules
register_all_modules()
def _fake_roi_head(with_shared_head=False):
"""Set a fake roi head config."""
if not with_shared_head:
roi_head = Config(
dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=1,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=1,
fc_out_channels=1,
num_classes=4),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=1,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHead',
num_convs=1,
in_channels=1,
conv_out_channels=1,
num_classes=4),
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
test_cfg=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
else:
roi_head = Config(
dict(
type='StandardRoIHead',
shared_head=dict(
type='ResLayer',
depth=50,
stage=3,
stride=2,
dilation=1,
style='caffe',
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=1,
featmap_strides=[16]),
bbox_head=dict(
type='BBoxHead',
with_avg_pool=True,
in_channels=2048,
roi_feat_size=7,
num_classes=4),
mask_roi_extractor=None,
mask_head=dict(
type='FCNMaskHead',
num_convs=0,
in_channels=2048,
conv_out_channels=1,
num_classes=4),
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=14,
pos_weight=-1,
debug=False),
test_cfg=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
return roi_head
class TestStandardRoIHead(TestCase):
def test_init(self):
"""Test init standard RoI head."""
# Normal Mask R-CNN RoI head
roi_head_cfg = _fake_roi_head()
roi_head = MODELS.build(roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
self.assertTrue(roi_head.with_mask)
# Mask R-CNN RoI head with shared_head
roi_head_cfg = _fake_roi_head(with_shared_head=True)
roi_head = MODELS.build(roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
self.assertTrue(roi_head.with_mask)
self.assertTrue(roi_head.with_shared_head)
@parameterized.expand([(False, ), (True, )])
def test_standard_roi_head_loss(self, with_shared_head):
"""Tests standard roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
roi_head_cfg = _fake_roi_head(with_shared_head=with_shared_head)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
if not with_shared_head:
feats.append(
torch.rand(1, 1, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
else:
feats.append(
torch.rand(1, 1024, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
out = roi_head.loss(feats, proposals_list, batch_data_samples)
loss_cls = out['loss_cls']
loss_bbox = out['loss_bbox']
loss_mask = out['loss_mask']
self.assertGreater(loss_cls.sum(), 0, 'cls loss should be non-zero')
self.assertGreater(loss_bbox.sum(), 0, 'box loss should be non-zero')
self.assertGreater(loss_mask.sum(), 0, 'mask loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
out = roi_head.loss(feats, proposals_list, batch_data_samples)
empty_cls_loss = out['loss_cls']
empty_bbox_loss = out['loss_bbox']
empty_mask_loss = out['loss_mask']
self.assertGreater(empty_cls_loss.sum(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_bbox_loss.sum(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_mask_loss.sum(), 0,
'there should be no mask loss when there are no true boxes')
| 8,093 | 38.101449 | 78 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_grid_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestGridRoIHead(TestCase):
def setUp(self):
register_all_modules()
self.roi_head_cfg = get_roi_head_cfg(
'grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py')
def test_init(self):
roi_head = MODELS.build(self.roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
@parameterized.expand(['cpu', 'cuda'])
def test_grid_roi_head_loss(self, device):
"""Tests trident roi head predict."""
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.to(device=device)
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device=device))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
out = roi_head.loss(feats, proposals_list, batch_data_samples)
loss_cls = out['loss_cls']
loss_grid = out['loss_grid']
self.assertGreater(loss_cls.sum(), 0, 'cls loss should be non-zero')
self.assertGreater(loss_grid.sum(), 0, 'grid loss should be non-zero')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
out = roi_head.loss(feats, proposals_list, batch_data_samples)
empty_cls_loss = out['loss_cls']
self.assertGreater(empty_cls_loss.sum(), 0,
'cls loss should be non-zero')
self.assertNotIn(
'loss_grid', out,
'grid loss should be passed when there are no true boxes')
@parameterized.expand(['cpu', 'cuda'])
def test_grid_roi_head_predict(self, device):
"""Tests trident roi head predict."""
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.to(device=device)
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device=device))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
roi_head.predict(feats, proposals_list, batch_data_samples)
@parameterized.expand(['cpu', 'cuda'])
def test_grid_roi_head_forward(self, device):
"""Tests trident roi head forward."""
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.to(device=device)
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device=device))
image_shapes = [(3, s, s)]
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
roi_head.forward(feats, proposals_list)
| 4,587 | 36.606557 | 78 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_point_rend_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import PointRendRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestHTCRoIHead(TestCase):
@parameterized.expand(
['point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py'])
def test_init(self, cfg_file):
"""Test init Point rend RoI head."""
# Normal HTC RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
assert roi_head.with_bbox
assert roi_head.with_mask
@parameterized.expand(
['point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py'])
def test_point_rend_roi_head_loss(self, cfg_file):
"""Tests htc roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [img_meta['img_shape'] for img_meta in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# Positive rois must not be empty
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
with self.assertRaises(AssertionError):
out = roi_head.loss(feats, proposal_list, batch_data_samples)
@parameterized.expand(
['point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py'])
def test_point_rend_roi_head_predict(self, cfg_file):
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
img_shape_list = [img_meta['img_shape'] for img_meta in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
results = roi_head.predict(
feats, proposal_list, batch_data_samples, rescale=True)
self.assertEqual(results[0].masks.shape[-2:], (s, s))
| 4,222 | 37.743119 | 77 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_scnet_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import SCNetRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestSCNetRoIHead(TestCase):
@parameterized.expand(['scnet/scnet_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
"""Test init scnet RoI head."""
# Normal Cascade Mask R-CNN RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
assert roi_head.with_bbox
assert roi_head.with_mask
assert roi_head.with_semantic
assert roi_head.with_feat_relay
assert roi_head.with_glbctx
@parameterized.expand(['scnet/scnet_r50_fpn_1x_coco.py'])
def test_scnet_roi_head_loss(self, cfg_file):
"""Tests htc roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
with_semantic=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[0],
num_classes=4,
with_mask=True,
with_semantic=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss_cls' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
elif 'loss_bbox' in name or 'loss_mask' in name:
self.assertEqual(value.sum(), 0)
@parameterized.expand(['scnet/scnet_r50_fpn_1x_coco.py'])
def test_scnet_roi_head_predict(self, cfg_file):
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
results = roi_head.predict(
feats, proposal_list, batch_data_samples, rescale=True)
self.assertEqual(results[0].masks.shape[-2:], (s, s))
| 4,592 | 38.25641 | 77 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_cascade_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import StandardRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestCascadeRoIHead(TestCase):
@parameterized.expand(
['cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
"""Test init standard RoI head."""
# Normal Cascade Mask R-CNN RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
assert roi_head.with_bbox
assert roi_head.with_mask
@parameterized.expand(
['cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py'])
def test_cascade_roi_head_loss(self, cfg_file):
"""Tests standard roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss_cls' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
elif 'loss_bbox' in name or 'loss_mask' in name:
self.assertEqual(value.sum(), 0)
| 3,156 | 37.975309 | 77 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_sparse_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
import torch.nn as nn
from parameterized import parameterized
from mmdet.models.roi_heads import StandardRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestCascadeRoIHead(TestCase):
@parameterized.expand(['queryinst/queryinst_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
"""Test init standard RoI head."""
# Normal Cascade Mask R-CNN RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head.init_weights()
assert roi_head.with_bbox
assert roi_head.with_mask
@parameterized.expand(['queryinst/queryinst_r50_fpn_1x_coco.py'])
def test_cascade_roi_head_loss(self, cfg_file):
"""Tests standard roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
# add import elements into proposal
init_proposal_features = nn.Embedding(100, 256).cuda().weight.clone()
for proposal in proposal_list:
proposal.features = init_proposal_features
proposal.imgs_whwh = feats[0].new_tensor([[s, s, s,
s]]).repeat(100, 1)
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
# add import elements into proposal
init_proposal_features = nn.Embedding(100, 256).cuda().weight.clone()
for proposal in proposal_list:
proposal.features = init_proposal_features
proposal.imgs_whwh = feats[0].new_tensor([[s, s, s,
s]]).repeat(100, 1)
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss_cls' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
elif 'loss_bbox' in name or 'loss_mask' in name:
self.assertEqual(value.sum(), 0)
| 3,880 | 40.731183 | 77 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_bbox_heads/test_scnet_bbox_head.py | import unittest
import torch
from mmdet.models.roi_heads.bbox_heads import SCNetBBoxHead
class TestSCNetBBoxHead(unittest.TestCase):
def test_forward(self):
x = torch.rand((2, 1, 16, 16))
bbox_head = SCNetBBoxHead(
num_shared_fcs=2,
in_channels=1,
roi_feat_size=16,
conv_out_channels=1,
fc_out_channels=256,
)
results = bbox_head(x, return_shared_feat=False)
self.assertEqual(len(results), 2)
results = bbox_head(x, return_shared_feat=True)
self.assertEqual(len(results), 3)
| 600 | 25.130435 | 59 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_bbox_heads/test_double_bbox_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads.bbox_heads import DoubleConvFCBBoxHead
class TestDoubleBboxHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_forward_loss(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
double_bbox_head = DoubleConvFCBBoxHead(
num_convs=4,
num_fcs=2,
in_channels=1,
conv_out_channels=4,
fc_out_channels=4)
double_bbox_head = double_bbox_head.to(device=device)
num_samples = 4
feats = torch.rand((num_samples, 1, 7, 7)).to(device)
double_bbox_head(x_cls=feats, x_reg=feats)
| 876 | 28.233333 | 72 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_bbox_heads/test_sabl_bbox_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.roi_heads.bbox_heads import SABLHead
from mmdet.models.task_modules.samplers import SamplingResult
class TestSABLBboxHead(TestCase):
def test_init(self):
bbox_head = SABLHead(
cls_in_channels=1,
cls_out_channels=1,
reg_in_channels=1,
reg_offset_out_channels=1,
reg_cls_out_channels=1,
num_classes=4)
self.assertTrue(bbox_head.fc_cls)
self.assertTrue(hasattr(bbox_head, 'reg_cls_fcs'))
self.assertTrue(hasattr(bbox_head, 'reg_offset_fcs'))
self.assertFalse(hasattr(bbox_head, 'fc_reg'))
def test_bbox_head_get_results(self):
num_classes = 6
bbox_head = SABLHead(reg_class_agnostic=True, num_classes=num_classes)
s = 128
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
num_samples = 2
rois = [torch.rand((num_samples, 5))]
cls_scores = [torch.rand((num_samples, num_classes + 1))]
bbox_preds = [(torch.rand(
(num_samples, 28)), torch.rand((num_samples, 28)))]
# with nms
rcnn_test_cfg = ConfigDict(
score_thr=0.,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas,
rcnn_test_cfg=rcnn_test_cfg)
self.assertLessEqual(len(result_list[0]), num_samples * num_classes)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(result_list[0].bboxes.shape[1], 4)
self.assertEqual(len(result_list[0].scores.shape), 1)
self.assertEqual(len(result_list[0].labels.shape), 1)
# without nms
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].scores.shape, cls_scores[0].shape)
self.assertIsNone(result_list[0].get('label', None))
# num_samples is 0
num_samples = 0
rois = [torch.rand((num_samples, 5))]
cls_scores = [torch.rand((num_samples, num_classes + 1))]
bbox_preds = [(torch.rand(
(num_samples, 28)), torch.rand((num_samples, 28)))]
# with nms
rcnn_test_cfg = ConfigDict(
score_thr=0.,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas,
rcnn_test_cfg=rcnn_test_cfg)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), 0)
self.assertEqual(result_list[0].bboxes.shape[1], 4)
# without nms
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), 0)
self.assertIsNone(result_list[0].get('label', None))
def test_bbox_head_refine_bboxes(self):
num_classes = 8
bbox_head = SABLHead(reg_class_agnostic=True, num_classes=num_classes)
s = 20
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
sampling_results = [SamplingResult.random()]
num_samples = 20
rois = torch.rand((num_samples, 4))
roi_img_ids = torch.zeros(num_samples, 1)
rois = torch.cat((roi_img_ids, rois), dim=1)
cls_scores = torch.rand((num_samples, num_classes + 1))
cls_preds = torch.rand((num_samples, 28))
offset_preds = torch.rand((num_samples, 28))
labels = torch.randint(0, num_classes + 1, (num_samples, )).long()
bbox_targets = (labels, None, None, None)
bbox_results = dict(
rois=rois,
bbox_pred=(cls_preds, offset_preds),
cls_score=cls_scores,
bbox_targets=bbox_targets)
bbox_list = bbox_head.refine_bboxes(
sampling_results=sampling_results,
bbox_results=bbox_results,
batch_img_metas=img_metas)
self.assertGreaterEqual(num_samples, len(bbox_list[0]))
self.assertIsInstance(bbox_list[0], InstanceData)
self.assertEqual(bbox_list[0].bboxes.shape[1], 4)
| 5,052 | 35.615942 | 78 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_bbox_heads/test_bbox_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.roi_heads.bbox_heads import (BBoxHead, Shared2FCBBoxHead,
Shared4Conv1FCBBoxHead)
from mmdet.models.task_modules.samplers import SamplingResult
class TestBboxHead(TestCase):
def test_init(self):
# Shared2FCBBoxHead
bbox_head = Shared2FCBBoxHead(
in_channels=1, fc_out_channels=1, num_classes=4)
self.assertTrue(bbox_head.fc_cls)
self.assertTrue(bbox_head.fc_reg)
self.assertEqual(len(bbox_head.shared_fcs), 2)
# Shared4Conv1FCBBoxHead
bbox_head = Shared4Conv1FCBBoxHead(
in_channels=1, fc_out_channels=1, num_classes=4)
self.assertTrue(bbox_head.fc_cls)
self.assertTrue(bbox_head.fc_reg)
self.assertEqual(len(bbox_head.shared_convs), 4)
self.assertEqual(len(bbox_head.shared_fcs), 1)
def test_bbox_head_get_results(self):
num_classes = 6
bbox_head = BBoxHead(reg_class_agnostic=True, num_classes=num_classes)
s = 128
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
num_samples = 2
rois = [torch.rand((num_samples, 5))]
cls_scores = [torch.rand((num_samples, num_classes + 1))]
bbox_preds = [torch.rand((num_samples, 4))]
# with nms
rcnn_test_cfg = ConfigDict(
score_thr=0.,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas,
rcnn_test_cfg=rcnn_test_cfg)
self.assertLessEqual(len(result_list[0]), num_samples * num_classes)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(result_list[0].bboxes.shape[1], 4)
self.assertEqual(len(result_list[0].scores.shape), 1)
self.assertEqual(len(result_list[0].labels.shape), 1)
# without nms
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].bboxes.shape, bbox_preds[0].shape)
self.assertEqual(result_list[0].scores.shape, cls_scores[0].shape)
self.assertIsNone(result_list[0].get('label', None))
# num_samples is 0
num_samples = 0
rois = [torch.rand((num_samples, 5))]
cls_scores = [torch.rand((num_samples, num_classes + 1))]
bbox_preds = [torch.rand((num_samples, 4))]
# with nms
rcnn_test_cfg = ConfigDict(
score_thr=0.,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas,
rcnn_test_cfg=rcnn_test_cfg)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), 0)
self.assertEqual(result_list[0].bboxes.shape[1], 4)
# without nms
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), 0)
self.assertEqual(result_list[0].bboxes.shape, bbox_preds[0].shape)
self.assertIsNone(result_list[0].get('label', None))
def test_bbox_head_refine_bboxes(self):
num_classes = 6
bbox_head = BBoxHead(reg_class_agnostic=True, num_classes=num_classes)
s = 128
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
sampling_results = [SamplingResult.random()]
num_samples = 20
rois = torch.rand((num_samples, 4))
roi_img_ids = torch.zeros(num_samples, 1)
rois = torch.cat((roi_img_ids, rois), dim=1)
cls_scores = torch.rand((num_samples, num_classes + 1))
bbox_preds = torch.rand((num_samples, 4))
labels = torch.randint(0, num_classes + 1, (num_samples, )).long()
bbox_targets = (labels, None, None, None)
bbox_results = dict(
rois=rois,
bbox_pred=bbox_preds,
cls_score=cls_scores,
bbox_targets=bbox_targets)
bbox_list = bbox_head.refine_bboxes(
sampling_results=sampling_results,
bbox_results=bbox_results,
batch_img_metas=img_metas)
self.assertGreaterEqual(num_samples, len(bbox_list[0]))
self.assertIsInstance(bbox_list[0], InstanceData)
self.assertEqual(bbox_list[0].bboxes.shape[1], 4)
| 5,287 | 36.503546 | 78 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_bbox_heads/test_multi_instance_bbox_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.roi_heads.bbox_heads import MultiInstanceBBoxHead
class TestMultiInstanceBBoxHead(TestCase):
def test_init(self):
bbox_head = MultiInstanceBBoxHead(
num_instance=2,
with_refine=True,
num_shared_fcs=2,
in_channels=1,
fc_out_channels=1,
num_classes=4)
self.assertTrue(bbox_head.shared_fcs_ref)
self.assertTrue(bbox_head.fc_reg)
self.assertTrue(bbox_head.fc_cls)
self.assertEqual(len(bbox_head.shared_fcs), 2)
self.assertEqual(len(bbox_head.fc_reg), 2)
self.assertEqual(len(bbox_head.fc_cls), 2)
def test_bbox_head_get_results(self):
num_classes = 1
num_instance = 2
bbox_head = MultiInstanceBBoxHead(
num_instance=num_instance,
num_shared_fcs=2,
reg_class_agnostic=True,
num_classes=num_classes)
s = 128
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
num_samples = 2
rois = [torch.rand((num_samples, 5))]
cls_scores = []
bbox_preds = []
for k in range(num_instance):
cls_scores.append(torch.rand((num_samples, num_classes + 1)))
bbox_preds.append(torch.rand((num_samples, 4)))
cls_scores = [torch.cat(cls_scores, dim=1)]
bbox_preds = [torch.cat(bbox_preds, dim=1)]
# with nms
rcnn_test_cfg = ConfigDict(
nms=dict(type='nms', iou_threshold=0.5),
score_thr=0.01,
max_per_img=500)
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas,
rcnn_test_cfg=rcnn_test_cfg)
self.assertLessEqual(
len(result_list[0]), num_samples * num_instance * num_classes)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(result_list[0].bboxes.shape[1], 4)
self.assertEqual(len(result_list[0].scores.shape), 1)
self.assertEqual(len(result_list[0].labels.shape), 1)
# without nms
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples * num_instance)
self.assertIsNone(result_list[0].get('label', None))
# num_samples is 0
num_samples = 0
rois = [torch.rand((num_samples, 5))]
cls_scores = []
bbox_preds = []
for k in range(num_instance):
cls_scores.append(torch.rand((num_samples, num_classes + 1)))
bbox_preds.append(torch.rand((num_samples, 4)))
cls_scores = [torch.cat(cls_scores, dim=1)]
bbox_preds = [torch.cat(bbox_preds, dim=1)]
# with nms
rcnn_test_cfg = ConfigDict(
score_thr=0.,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas,
rcnn_test_cfg=rcnn_test_cfg)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), 0)
self.assertEqual(result_list[0].bboxes.shape[1], 4)
# without nms
result_list = bbox_head.predict_by_feat(
rois=tuple(rois),
cls_scores=tuple(cls_scores),
bbox_preds=tuple(bbox_preds),
batch_img_metas=img_metas)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), 0 * num_instance)
self.assertIsNone(result_list[0].get('label', None))
| 4,203 | 34.327731 | 74 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_mask_heads/test_fused_semantic_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from torch import Tensor
from mmdet.models.roi_heads.mask_heads import FusedSemanticHead
class TestFusedSemanticHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_forward_loss(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
semantic_head = FusedSemanticHead(
num_ins=5,
fusion_level=1,
in_channels=4,
conv_out_channels=4,
num_classes=6)
feats = [
torch.rand((1, 4, 32 // 2**(i + 1), 32 // 2**(i + 1)))
for i in range(5)
]
mask_pred, x = semantic_head(feats)
labels = torch.randint(0, 6, (1, 1, 64, 64))
loss = semantic_head.loss(mask_pred, labels)
self.assertIsInstance(loss, Tensor)
| 1,013 | 28.823529 | 72 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_mask_heads/test_maskiou_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from parameterized import parameterized
from mmdet.models.roi_heads.mask_heads import MaskIoUHead
from mmdet.models.utils import unpack_gt_instances
from mmdet.structures.mask import mask_target
from mmdet.testing import (demo_mm_inputs, demo_mm_proposals,
demo_mm_sampling_results)
class TestMaskIoUHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_mask_iou_head_loss_and_target(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
mask_iou_head = MaskIoUHead(num_classes=4)
mask_iou_head.to(device=device)
s = 256
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
train_cfg = ConfigDict(dict(mask_size=28, mask_thr_binary=0.5))
# prepare ground truth
(batch_gt_instances, batch_gt_instances_ignore,
_) = unpack_gt_instances(batch_data_samples)
sampling_results = demo_mm_sampling_results(
proposals_list=proposals_list,
batch_gt_instances=batch_gt_instances,
batch_gt_instances_ignore=batch_gt_instances_ignore)
# prepare mask feats, pred and target
pos_proposals = [res.pos_priors for res in sampling_results]
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds for res in sampling_results
]
gt_masks = [res.masks for res in batch_gt_instances]
mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
gt_masks, train_cfg)
mask_feats = torch.rand((mask_targets.size(0), 256, 14, 14)).to(device)
mask_preds = torch.rand((mask_targets.size(0), 4, 28, 28)).to(device)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
pos_mask_pred = mask_preds[range(mask_preds.size(0)), pos_labels]
mask_iou_pred = mask_iou_head(mask_feats, pos_mask_pred)
pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)),
pos_labels]
mask_iou_head.loss_and_target(pos_mask_iou_pred, pos_mask_pred,
mask_targets, sampling_results,
batch_gt_instances, train_cfg)
@parameterized.expand(['cpu', 'cuda'])
def test_mask_iou_head_predict_by_feat(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
mask_iou_head = MaskIoUHead(num_classes=4)
mask_iou_head.to(device=device)
s = 128
num_samples = 2
num_classes = 4
img_metas = {
'img_shape': (s, s, 3),
'scale_factor': (1, 1),
'ori_shape': (s, s, 3)
}
results = InstanceData(metainfo=img_metas)
results.bboxes = torch.rand((num_samples, 4)).to(device)
results.scores = torch.rand((num_samples, )).to(device)
results.labels = torch.randint(
num_classes, (num_samples, ), dtype=torch.long).to(device)
mask_feats = torch.rand((num_samples, 256, 14, 14)).to(device)
mask_preds = torch.rand((num_samples, num_classes, 28, 28)).to(device)
mask_iou_preds = mask_iou_head(
mask_feats, mask_preds[range(results.labels.size(0)),
results.labels])
mask_iou_head.predict_by_feat(
mask_iou_preds=[mask_iou_preds], results_list=[results])
| 4,094 | 39.544554 | 79 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_mask_heads/test_scnet_mask_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from torch import Tensor
from mmdet.models.roi_heads.mask_heads import SCNetMaskHead
class TestSCNetMaskHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_forward(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
num_classes = 6
mask_head = SCNetMaskHead(
conv_to_res=True,
num_convs=1,
in_channels=1,
conv_out_channels=1,
num_classes=num_classes)
x = torch.rand((1, 1, 10, 10))
results = mask_head(x)
self.assertIsInstance(results, Tensor)
| 828 | 26.633333 | 72 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_mask_heads/test_global_context_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from torch import Tensor
from mmdet.models.roi_heads.mask_heads import GlobalContextHead
class TestGlobalContextHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_forward_loss(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
head = GlobalContextHead(
num_convs=1, in_channels=4, conv_out_channels=4, num_classes=10)
feats = [
torch.rand((1, 4, 64 // 2**(i + 1), 64 // 2**(i + 1)))
for i in range(5)
]
mc_pred, x = head(feats)
labels = [torch.randint(0, 10, (10, ))]
loss = head.loss(mc_pred, labels)
self.assertIsInstance(loss, Tensor)
| 917 | 28.612903 | 76 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_mask_heads/test_scnet_semantic_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from torch import Tensor
from mmdet.models.roi_heads.mask_heads import SCNetSemanticHead
class TestSCNetSemanticHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_forward_loss(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
semantic_head = SCNetSemanticHead(
num_ins=5,
fusion_level=1,
in_channels=4,
conv_out_channels=4,
num_classes=6)
feats = [
torch.rand((1, 4, 32 // 2**(i + 1), 32 // 2**(i + 1)))
for i in range(5)
]
mask_pred, x = semantic_head(feats)
labels = torch.randint(0, 6, (1, 1, 64, 64))
loss = semantic_head.loss(mask_pred, labels)
self.assertIsInstance(loss, Tensor)
| 1,013 | 28.823529 | 72 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_mask_heads/test_htc_mask_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from torch import Tensor
from mmdet.models.roi_heads.mask_heads import HTCMaskHead
class TestHTCMaskHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_forward(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
num_classes = 6
mask_head = HTCMaskHead(
with_conv_res=True,
num_convs=1,
in_channels=1,
conv_out_channels=1,
num_classes=num_classes)
x = torch.rand((1, 1, 10, 10))
res_feat = torch.rand((1, 1, 10, 10))
with self.assertRaises(AssertionError):
mask_head(x, return_logits=False, return_feat=False)
results = mask_head(x)
self.assertEqual(len(results), 2)
results = mask_head(x, res_feat=res_feat)
self.assertEqual(len(results), 2)
results = mask_head(x, return_logits=False)
self.assertIsInstance(results, Tensor)
results = mask_head(x, return_feat=False)
self.assertIsInstance(results, Tensor)
results = mask_head(x, res_feat=res_feat, return_logits=False)
self.assertIsInstance(results, Tensor)
results = mask_head(x, res_feat=res_feat, return_feat=False)
self.assertIsInstance(results, Tensor)
| 1,504 | 31.021277 | 72 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_mask_heads/test_fcn_mask_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from parameterized import parameterized
from mmdet.models.roi_heads.mask_heads import FCNMaskHead
class TestFCNMaskHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_get_seg_masks(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
num_classes = 6
mask_head = FCNMaskHead(
num_convs=1,
in_channels=1,
conv_out_channels=1,
num_classes=num_classes)
rcnn_test_cfg = ConfigDict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)
s = 128
img_metas = {
'img_shape': (s, s, 3),
'scale_factor': (1, 1),
'ori_shape': (s, s, 3)
}
result = InstanceData(metainfo=img_metas)
num_samples = 2
mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]
result.bboxes = torch.rand((num_samples, 4)).to(device)
result.labels = torch.randint(
num_classes, (num_samples, ), dtype=torch.long).to(device)
mask_head.to(device=device)
result_list = mask_head.predict_by_feat(
mask_preds=tuple(mask_pred),
results_list=[result],
batch_img_metas=[img_metas],
rcnn_test_cfg=rcnn_test_cfg)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))
# test with activate_map, `mask_pred` has been activated before
num_samples = 2
mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]
mask_pred = [m.sigmoid().detach() for m in mask_pred]
result.bboxes = torch.rand((num_samples, 4)).to(device)
result.labels = torch.randint(
num_classes, (num_samples, ), dtype=torch.long).to(device)
mask_head.to(device=device)
result_list = mask_head.predict_by_feat(
mask_preds=tuple(mask_pred),
results_list=[result],
batch_img_metas=[img_metas],
rcnn_test_cfg=rcnn_test_cfg,
activate_map=True)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))
# num_samples is 0
num_samples = 0
result = InstanceData(metainfo=img_metas)
mask_pred = [torch.rand((num_samples, num_classes, 14, 14)).to(device)]
result.bboxes = torch.zeros((num_samples, 4)).to(device)
result.labels = torch.zeros((num_samples, )).to(device)
result_list = mask_head.predict_by_feat(
mask_preds=tuple(mask_pred),
results_list=[result],
batch_img_metas=[img_metas],
rcnn_test_cfg=rcnn_test_cfg)
self.assertIsInstance(result_list[0], InstanceData)
self.assertEqual(len(result_list[0]), num_samples)
self.assertEqual(result_list[0].masks.shape, (num_samples, s, s))
| 3,432 | 37.573034 | 79 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_mask_heads/test_feature_relay_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from torch import Tensor
from mmdet.models.roi_heads.mask_heads import FeatureRelayHead
class TestFeatureRelayHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_forward(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
mask_head = FeatureRelayHead(in_channels=10, out_conv_channels=10)
x = torch.rand((1, 10))
results = mask_head(x)
self.assertIsInstance(results, Tensor)
x = torch.empty((0, 10))
results = mask_head(x)
self.assertEqual(results, None)
| 795 | 28.481481 | 74 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_mask_heads/test_grid_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from parameterized import parameterized
from mmdet.models.roi_heads.mask_heads import GridHead
from mmdet.models.utils import unpack_gt_instances
from mmdet.testing import (demo_mm_inputs, demo_mm_proposals,
demo_mm_sampling_results)
class TestGridHead(TestCase):
@parameterized.expand(['cpu', 'cuda'])
def test_grid_head_loss(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
grid_head = GridHead()
grid_head.to(device=device)
s = 256
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True,
device=device)['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device=device)
train_cfg = ConfigDict(dict(pos_radius=1))
# prepare ground truth
(batch_gt_instances, batch_gt_instances_ignore,
_) = unpack_gt_instances(batch_data_samples)
sampling_results = demo_mm_sampling_results(
proposals_list=proposals_list,
batch_gt_instances=batch_gt_instances,
batch_gt_instances_ignore=batch_gt_instances_ignore)
# prepare grid feats
pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results])
grid_feats = torch.rand((pos_bboxes.size(0), 256, 14, 14)).to(device)
sample_idx = torch.arange(0, pos_bboxes.size(0))
grid_pred = grid_head(grid_feats)
grid_head.loss(grid_pred, sample_idx, sampling_results, train_cfg)
@parameterized.expand(['cpu', 'cuda'])
def test_mask_iou_head_predict_by_feat(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
grid_head = GridHead()
grid_head.to(device=device)
s = 128
num_samples = 2
num_classes = 4
img_metas = {
'img_shape': (s, s, 3),
'scale_factor': (1, 1),
'ori_shape': (s, s, 3)
}
results = InstanceData(metainfo=img_metas)
results.bboxes = torch.rand((num_samples, 4)).to(device)
results.scores = torch.rand((num_samples, )).to(device)
results.labels = torch.randint(
num_classes, (num_samples, ), dtype=torch.long).to(device)
grid_feats = torch.rand((num_samples, 256, 14, 14)).to(device)
grid_preds = grid_head(grid_feats)
grid_head.predict_by_feat(
grid_preds=grid_preds,
results_list=[results],
batch_img_metas=[img_metas])
| 3,034 | 34.290698 | 77 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_mask_heads/test_coarse_mask_head.py | import unittest
import torch
from parameterized import parameterized
from mmdet.models.roi_heads.mask_heads import CoarseMaskHead
class TestCoarseMaskHead(unittest.TestCase):
def test_init(self):
with self.assertRaises(AssertionError):
CoarseMaskHead(num_fcs=0)
with self.assertRaises(AssertionError):
CoarseMaskHead(downsample_factor=0.5)
@parameterized.expand(['cpu', 'cuda'])
def test_forward(self, device):
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
x = torch.rand((1, 32, 7, 7)).to(device)
mask_head = CoarseMaskHead(
downsample_factor=2,
in_channels=32,
conv_out_channels=32,
roi_feat_size=7).to(device)
mask_head.init_weights()
res = mask_head(x)
self.assertEqual(res.shape[-2:], (3, 3))
mask_head = CoarseMaskHead(
downsample_factor=1,
in_channels=32,
conv_out_channels=32,
roi_feat_size=7).to(device)
mask_head.init_weights()
res = mask_head(x)
self.assertEqual(res.shape[-2:], (7, 7))
| 1,229 | 28.285714 | 72 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_roi_extractors/test_generic_roi_extractor.py | import unittest
import torch
from mmdet.models.roi_heads.roi_extractors import GenericRoIExtractor
class TestGenericRoIExtractor(unittest.TestCase):
def test_init(self):
with self.assertRaises(AssertionError):
GenericRoIExtractor(
aggregation='other',
roi_layer=dict(
type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=16,
featmap_strides=[4, 8, 16, 32])
roi_extractor = GenericRoIExtractor(
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=16,
featmap_strides=[4, 8, 16, 32])
self.assertFalse(roi_extractor.with_pre)
self.assertFalse(roi_extractor.with_post)
def test_forward(self):
# test with pre/post
cfg = dict(
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=16,
featmap_strides=[4, 8, 16, 32],
pre_cfg=dict(
type='ConvModule',
in_channels=16,
out_channels=16,
kernel_size=5,
padding=2,
inplace=False),
post_cfg=dict(
type='ConvModule',
in_channels=16,
out_channels=16,
kernel_size=5,
padding=2,
inplace=False))
roi_extractor = GenericRoIExtractor(**cfg)
# empty rois
feats = (
torch.rand((1, 16, 200, 336)),
torch.rand((1, 16, 100, 168)),
)
rois = torch.empty((0, 5), dtype=torch.float32)
res = roi_extractor(feats, rois)
self.assertEqual(len(res), 0)
# single scale feature
rois = torch.tensor([[0.0000, 587.8285, 52.1405, 886.2484, 341.5644]])
feats = (torch.rand((1, 16, 200, 336)), )
res = roi_extractor(feats, rois)
self.assertEqual(res.shape, (1, 16, 7, 7))
# multi-scale features
feats = (
torch.rand((1, 16, 200, 336)),
torch.rand((1, 16, 100, 168)),
torch.rand((1, 16, 50, 84)),
torch.rand((1, 16, 25, 42)),
)
res = roi_extractor(feats, rois)
self.assertEqual(res.shape, (1, 16, 7, 7))
# test w.o. pre/post concat
cfg = dict(
aggregation='concat',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=16 * 4,
featmap_strides=[4, 8, 16, 32])
roi_extractor = GenericRoIExtractor(**cfg)
res = roi_extractor(feats, rois)
self.assertEqual(res.shape, (1, 64, 7, 7))
# test concat channels number
cfg = dict(
aggregation='concat',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256 * 5, # 256*5 != 256*4
featmap_strides=[4, 8, 16, 32])
roi_extractor = GenericRoIExtractor(**cfg)
feats = (
torch.rand((1, 256, 200, 336)),
torch.rand((1, 256, 100, 168)),
torch.rand((1, 256, 50, 84)),
torch.rand((1, 256, 25, 42)),
)
# out_channels does not sum of feat channels
with self.assertRaises(AssertionError):
roi_extractor(feats, rois)
| 3,384 | 32.186275 | 78 | py |
ERD | ERD-main/tests/test_models/test_roi_heads/test_roi_extractors/test_single_level_roi_extractor.py | import unittest
import torch
from mmdet.models.roi_heads.roi_extractors import SingleRoIExtractor
class TestSingleRoIExtractor(unittest.TestCase):
def test_forward(self):
cfg = dict(
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=16,
featmap_strides=[4, 8, 16, 32])
roi_extractor = SingleRoIExtractor(**cfg)
# empty rois
feats = (torch.rand((1, 16, 200, 336)), )
rois = torch.empty((0, 5), dtype=torch.float32)
res = roi_extractor(feats, rois)
self.assertEqual(len(res), 0)
# single scale feature
rois = torch.tensor([[0.0000, 587.8285, 52.1405, 886.2484, 341.5644]])
res = roi_extractor(feats, rois)
self.assertEqual(res.shape, (1, 16, 7, 7))
# multi-scale features
feats = (
torch.rand((1, 16, 200, 336)),
torch.rand((1, 16, 100, 168)),
torch.rand((1, 16, 50, 84)),
torch.rand((1, 16, 25, 42)),
)
res = roi_extractor(feats, rois)
self.assertEqual(res.shape, (1, 16, 7, 7))
res = roi_extractor(feats, rois, roi_scale_factor=2.0)
self.assertEqual(res.shape, (1, 16, 7, 7))
| 1,246 | 30.175 | 78 | py |
ERD | ERD-main/tests/test_datasets/test_samplers/test_multi_source_sampler.py | # Copyright (c) OpenMMLab. All rights reserved.
import bisect
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from torch.utils.data import ConcatDataset, Dataset
from mmdet.datasets.samplers import GroupMultiSourceSampler, MultiSourceSampler
class DummyDataset(Dataset):
def __init__(self, length, flag):
self.length = length
self.flag = flag
self.shapes = np.random.random((length, 2))
def __len__(self):
return self.length
def __getitem__(self, idx):
return self.shapes[idx]
def get_data_info(self, idx):
return dict(
width=self.shapes[idx][0],
height=self.shapes[idx][1],
flag=self.flag)
class DummyConcatDataset(ConcatDataset):
def _get_ori_dataset_idx(self, idx):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
sample_idx = idx if dataset_idx == 0 else idx - self.cumulative_sizes[
dataset_idx - 1]
return dataset_idx, sample_idx
def get_data_info(self, idx: int):
dataset_idx, sample_idx = self._get_ori_dataset_idx(idx)
return self.datasets[dataset_idx].get_data_info(sample_idx)
class TestMultiSourceSampler(TestCase):
@patch('mmengine.dist.get_dist_info', return_value=(7, 8))
def setUp(self, mock):
self.length_a = 100
self.dataset_a = DummyDataset(self.length_a, flag='a')
self.length_b = 1000
self.dataset_b = DummyDataset(self.length_b, flag='b')
self.dataset = DummyConcatDataset([self.dataset_a, self.dataset_b])
def test_multi_source_sampler(self):
# test dataset is not ConcatDataset
with self.assertRaises(AssertionError):
MultiSourceSampler(
self.dataset_a, batch_size=5, source_ratio=[1, 4])
# test invalid batch_size
with self.assertRaises(AssertionError):
MultiSourceSampler(
self.dataset_a, batch_size=-5, source_ratio=[1, 4])
# test source_ratio longer then dataset
with self.assertRaises(AssertionError):
MultiSourceSampler(
self.dataset, batch_size=5, source_ratio=[1, 2, 4])
sampler = MultiSourceSampler(
self.dataset, batch_size=5, source_ratio=[1, 4])
sampler = iter(sampler)
flags = []
for i in range(100):
idx = next(sampler)
flags.append(self.dataset.get_data_info(idx)['flag'])
flags_gt = ['a', 'b', 'b', 'b', 'b'] * 20
self.assertEqual(flags, flags_gt)
class TestGroupMultiSourceSampler(TestCase):
@patch('mmengine.dist.get_dist_info', return_value=(7, 8))
def setUp(self, mock):
self.length_a = 100
self.dataset_a = DummyDataset(self.length_a, flag='a')
self.length_b = 1000
self.dataset_b = DummyDataset(self.length_b, flag='b')
self.dataset = DummyConcatDataset([self.dataset_a, self.dataset_b])
def test_group_multi_source_sampler(self):
sampler = GroupMultiSourceSampler(
self.dataset, batch_size=5, source_ratio=[1, 4])
sampler = iter(sampler)
flags = []
groups = []
for i in range(100):
idx = next(sampler)
data_info = self.dataset.get_data_info(idx)
flags.append(data_info['flag'])
group = 0 if data_info['width'] < data_info['height'] else 1
groups.append(group)
flags_gt = ['a', 'b', 'b', 'b', 'b'] * 20
self.assertEqual(flags, flags_gt)
groups = set(
[sum(x) for x in (groups[k:k + 5] for k in range(0, 100, 5))])
groups_gt = set([0, 5])
self.assertEqual(groups, groups_gt)
| 3,732 | 33.564815 | 79 | py |
ERD | ERD-main/tests/test_datasets/test_samplers/test_batch_sampler.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from mmengine.dataset import DefaultSampler
from torch.utils.data import Dataset
from mmdet.datasets.samplers import AspectRatioBatchSampler
class DummyDataset(Dataset):
def __init__(self, length):
self.length = length
self.shapes = np.random.random((length, 2))
def __len__(self):
return self.length
def __getitem__(self, idx):
return self.shapes[idx]
def get_data_info(self, idx):
return dict(width=self.shapes[idx][0], height=self.shapes[idx][1])
class TestAspectRatioBatchSampler(TestCase):
@patch('mmengine.dist.get_dist_info', return_value=(0, 1))
def setUp(self, mock):
self.length = 100
self.dataset = DummyDataset(self.length)
self.sampler = DefaultSampler(self.dataset, shuffle=False)
def test_invalid_inputs(self):
with self.assertRaisesRegex(
ValueError, 'batch_size should be a positive integer value'):
AspectRatioBatchSampler(self.sampler, batch_size=-1)
with self.assertRaisesRegex(
TypeError, 'sampler should be an instance of ``Sampler``'):
AspectRatioBatchSampler(None, batch_size=1)
def test_divisible_batch(self):
batch_size = 5
batch_sampler = AspectRatioBatchSampler(
self.sampler, batch_size=batch_size, drop_last=True)
self.assertEqual(len(batch_sampler), self.length // batch_size)
for batch_idxs in batch_sampler:
self.assertEqual(len(batch_idxs), batch_size)
batch = [self.dataset[idx] for idx in batch_idxs]
flag = batch[0][0] < batch[0][1]
for i in range(1, batch_size):
self.assertEqual(batch[i][0] < batch[i][1], flag)
def test_indivisible_batch(self):
batch_size = 7
batch_sampler = AspectRatioBatchSampler(
self.sampler, batch_size=batch_size, drop_last=False)
all_batch_idxs = list(batch_sampler)
self.assertEqual(
len(batch_sampler), (self.length + batch_size - 1) // batch_size)
self.assertEqual(
len(all_batch_idxs), (self.length + batch_size - 1) // batch_size)
batch_sampler = AspectRatioBatchSampler(
self.sampler, batch_size=batch_size, drop_last=True)
all_batch_idxs = list(batch_sampler)
self.assertEqual(len(batch_sampler), self.length // batch_size)
self.assertEqual(len(all_batch_idxs), self.length // batch_size)
# the last batch may not have the same aspect ratio
for batch_idxs in all_batch_idxs[:-1]:
self.assertEqual(len(batch_idxs), batch_size)
batch = [self.dataset[idx] for idx in batch_idxs]
flag = batch[0][0] < batch[0][1]
for i in range(1, batch_size):
self.assertEqual(batch[i][0] < batch[i][1], flag)
| 2,989 | 35.91358 | 78 | py |
ERD | ERD-main/tests/test_datasets/test_transforms/test_formatting.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
import torch
from mmengine.structures import InstanceData, PixelData
from mmdet.datasets.transforms import PackDetInputs
from mmdet.structures import DetDataSample
from mmdet.structures.mask import BitmapMasks
class TestPackDetInputs(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
rng = np.random.RandomState(0)
self.results1 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'gt_ignore_flags': np.array([0, 0, 1], dtype=bool),
'proposals': rng.rand(2, 4),
'proposals_scores': rng.rand(2, )
}
self.results2 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'proposals': rng.rand(2, 4),
'proposals_scores': rng.rand(2, )
}
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'scale_factor',
'flip')
def test_transform(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results1))
self.assertIn('data_samples', results)
self.assertIsInstance(results['data_samples'], DetDataSample)
self.assertIsInstance(results['data_samples'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_samples'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_samples'].gt_instances), 2)
self.assertEqual(len(results['data_samples'].ignored_instances), 1)
self.assertIsInstance(results['data_samples'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_samples'].proposals, InstanceData)
self.assertEqual(len(results['data_samples'].proposals), 2)
self.assertIsInstance(results['data_samples'].proposals.bboxes,
torch.Tensor)
self.assertIsInstance(results['data_samples'].proposals.scores,
torch.Tensor)
def test_transform_without_ignore(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results2))
self.assertIn('data_samples', results)
self.assertIsInstance(results['data_samples'], DetDataSample)
self.assertIsInstance(results['data_samples'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_samples'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_samples'].gt_instances), 3)
self.assertEqual(len(results['data_samples'].ignored_instances), 0)
self.assertIsInstance(results['data_samples'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_samples'].proposals, InstanceData)
self.assertEqual(len(results['data_samples'].proposals), 2)
self.assertIsInstance(results['data_samples'].proposals.bboxes,
torch.Tensor)
self.assertIsInstance(results['data_samples'].proposals.scores,
torch.Tensor)
def test_repr(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
self.assertEqual(
repr(transform), f'PackDetInputs(meta_keys={self.meta_keys})')
| 4,415 | 42.294118 | 78 | py |
ERD | ERD-main/tests/test_datasets/test_transforms/test_transforms.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import mmcv
import numpy as np
import torch
from mmcv.transforms import LoadImageFromFile
# yapf:disable
from mmdet.datasets.transforms import (CopyPaste, CutOut, Expand,
FixShapeResize, MinIoURandomCrop, MixUp,
Mosaic, Pad, PhotoMetricDistortion,
RandomAffine, RandomCenterCropPad,
RandomCrop, RandomErasing, RandomFlip,
RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
# yapf:enable
from mmdet.evaluation import bbox_overlaps
from mmdet.registry import TRANSFORMS
from mmdet.structures.bbox import HorizontalBoxes, bbox_project
from mmdet.structures.mask import BitmapMasks
from .utils import construct_toy_data, create_full_masks, create_random_bboxes
try:
import albumentations
from albumentations import Compose
except ImportError:
albumentations = None
Compose = None
# yapf:enable
class TestResize(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod()
-> tearDown() -> cleanUp()
"""
rng = np.random.RandomState(0)
self.data_info1 = dict(
img=np.random.random((1333, 800, 3)),
gt_seg_map=np.random.random((1333, 800, 3)),
gt_bboxes=np.array([[0, 0, 112, 112]], dtype=np.float32),
gt_masks=BitmapMasks(
rng.rand(1, 1333, 800), height=1333, width=800))
self.data_info2 = dict(
img=np.random.random((300, 400, 3)),
gt_bboxes=np.array([[200, 150, 600, 450]], dtype=np.float32),
dtype=np.float32)
self.data_info3 = dict(img=np.random.random((300, 400, 3)))
def test_resize(self):
# test keep_ratio is True
transform = Resize(scale=(2000, 2000), keep_ratio=True)
results = transform(copy.deepcopy(self.data_info1))
self.assertEqual(results['img_shape'], (2000, 1200))
self.assertEqual(results['scale_factor'], (1200 / 800, 2000 / 1333))
# test resize_bboxes/seg/masks
transform = Resize(scale_factor=(1.5, 2))
results = transform(copy.deepcopy(self.data_info1))
self.assertTrue((results['gt_bboxes'] == np.array([[0, 0, 168,
224]])).all())
self.assertEqual(results['gt_masks'].height, 2666)
self.assertEqual(results['gt_masks'].width, 1200)
self.assertEqual(results['gt_seg_map'].shape[:2], (2666, 1200))
# test clip_object_border = False
transform = Resize(scale=(200, 150), clip_object_border=False)
results = transform(self.data_info2)
self.assertTrue((results['gt_bboxes'] == np.array([100, 75, 300,
225])).all())
# test only with image
transform = Resize(scale=(200, 150), clip_object_border=False)
results = transform(self.data_info3)
self.assertTupleEqual(results['img'].shape[:2], (150, 200))
# test geometric transformation with homography matrix
transform = Resize(scale_factor=(1.5, 2))
results = transform(copy.deepcopy(self.data_info1))
self.assertTrue((bbox_project(
copy.deepcopy(self.data_info1['gt_bboxes']),
results['homography_matrix']) == results['gt_bboxes']).all())
def test_resize_use_box_type(self):
data_info1 = copy.deepcopy(self.data_info1)
data_info1['gt_bboxes'] = HorizontalBoxes(data_info1['gt_bboxes'])
data_info2 = copy.deepcopy(self.data_info2)
data_info2['gt_bboxes'] = HorizontalBoxes(data_info2['gt_bboxes'])
# test keep_ratio is True
transform = Resize(scale=(2000, 2000), keep_ratio=True)
results = transform(copy.deepcopy(data_info1))
self.assertEqual(results['img_shape'], (2000, 1200))
self.assertEqual(results['scale_factor'], (1200 / 800, 2000 / 1333))
# test resize_bboxes/seg/masks
transform = Resize(scale_factor=(1.5, 2))
results = transform(copy.deepcopy(data_info1))
self.assertTrue(
(results['gt_bboxes'].numpy() == np.array([[0, 0, 168,
224]])).all())
self.assertEqual(results['gt_masks'].height, 2666)
self.assertEqual(results['gt_masks'].width, 1200)
self.assertEqual(results['gt_seg_map'].shape[:2], (2666, 1200))
# test clip_object_border = False
transform = Resize(scale=(200, 150), clip_object_border=False)
results = transform(data_info2)
self.assertTrue(
(results['gt_bboxes'].numpy() == np.array([100, 75, 300,
225])).all())
# test geometric transformation with homography matrix
transform = Resize(scale_factor=(1.5, 2))
results = transform(copy.deepcopy(data_info1))
self.assertTrue((bbox_project(
copy.deepcopy(data_info1['gt_bboxes'].numpy()),
results['homography_matrix']) == results['gt_bboxes'].numpy()
).all())
def test_repr(self):
transform = Resize(scale=(2000, 2000), keep_ratio=True)
self.assertEqual(
repr(transform), ('Resize(scale=(2000, 2000), '
'scale_factor=None, keep_ratio=True, '
'clip_object_border=True), backend=cv2), '
'interpolation=bilinear)'))
class TestFIXShapeResize(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
rng = np.random.RandomState(0)
self.data_info1 = dict(
img=np.random.random((1333, 800, 3)),
gt_seg_map=np.random.random((1333, 800, 3)),
gt_bboxes=np.array([[0, 0, 112, 1333]], dtype=np.float32),
gt_masks=BitmapMasks(
rng.rand(1, 1333, 800), height=1333, width=800))
self.data_info2 = dict(
img=np.random.random((300, 400, 3)),
gt_bboxes=np.array([[200, 150, 600, 450]], dtype=np.float32),
dtype=np.float32)
self.data_info3 = dict(img=np.random.random((300, 400, 3)))
self.data_info4 = dict(
img=np.random.random((600, 800, 3)),
gt_bboxes=np.array([[200, 150, 300, 400]], dtype=np.float32),
dtype=np.float32)
def test_resize(self):
# test keep_ratio is True
transform = FixShapeResize(width=2000, height=800, keep_ratio=True)
results = transform(copy.deepcopy(self.data_info1))
self.assertEqual(results['img_shape'], (800, 2000))
self.assertEqual(results['scale_factor'], (800 / 1333, 800 / 1333))
# test resize_bboxes/seg/masks
transform = FixShapeResize(width=2000, height=800, keep_ratio=False)
results = transform(copy.deepcopy(self.data_info1))
self.assertTrue((results['gt_bboxes'] == np.array([[0, 0, 280,
800]])).all())
self.assertEqual(results['gt_masks'].height, 800)
self.assertEqual(results['gt_masks'].width, 2000)
self.assertEqual(results['gt_seg_map'].shape[:2], (800, 2000))
# test clip_object_border = False
transform = FixShapeResize(
width=200, height=150, clip_object_border=False)
results = transform(copy.deepcopy(self.data_info2))
self.assertTrue((results['gt_bboxes'] == np.array([100, 75, 300,
225])).all())
# test only with image
transform = FixShapeResize(
width=200, height=150, clip_object_border=False)
results = transform(self.data_info3)
self.assertTupleEqual(results['img'].shape[:2], (150, 200))
# test geometric transformation with homography matrix
transform = FixShapeResize(width=400, height=300)
results = transform(copy.deepcopy(self.data_info4))
self.assertTrue((bbox_project(
copy.deepcopy(self.data_info4['gt_bboxes']),
results['homography_matrix']) == results['gt_bboxes']).all())
def test_resize_with_boxlist(self):
data_info1 = copy.deepcopy(self.data_info1)
data_info1['gt_bboxes'] = HorizontalBoxes(data_info1['gt_bboxes'])
data_info2 = copy.deepcopy(self.data_info2)
data_info2['gt_bboxes'] = HorizontalBoxes(data_info2['gt_bboxes'])
data_info4 = copy.deepcopy(self.data_info4)
data_info4['gt_bboxes'] = HorizontalBoxes(data_info4['gt_bboxes'])
# test keep_ratio is True
transform = FixShapeResize(width=2000, height=800, keep_ratio=True)
results = transform(copy.deepcopy(data_info1))
self.assertEqual(results['img_shape'], (800, 2000))
self.assertEqual(results['scale_factor'], (800 / 1333, 800 / 1333))
# test resize_bboxes/seg/masks
transform = FixShapeResize(width=2000, height=800, keep_ratio=False)
results = transform(copy.deepcopy(data_info1))
self.assertTrue(
(results['gt_bboxes'].numpy() == np.array([[0, 0, 280,
800]])).all())
self.assertEqual(results['gt_masks'].height, 800)
self.assertEqual(results['gt_masks'].width, 2000)
self.assertEqual(results['gt_seg_map'].shape[:2], (800, 2000))
# test clip_object_border = False
transform = FixShapeResize(
width=200, height=150, clip_object_border=False)
results = transform(copy.deepcopy(data_info2))
self.assertTrue(
(results['gt_bboxes'].numpy() == np.array([100, 75, 300,
225])).all())
# test only with image
transform = FixShapeResize(
width=200, height=150, clip_object_border=False)
results = transform(self.data_info3)
self.assertTupleEqual(results['img'].shape[:2], (150, 200))
# test geometric transformation with homography matrix
transform = FixShapeResize(width=400, height=300)
results = transform(copy.deepcopy(data_info4))
self.assertTrue((bbox_project(
copy.deepcopy(self.data_info4['gt_bboxes']),
results['homography_matrix']) == results['gt_bboxes'].numpy()
).all())
def test_repr(self):
transform = FixShapeResize(width=2000, height=2000, keep_ratio=True)
self.assertEqual(
repr(transform), ('FixShapeResize(width=2000, height=2000, '
'keep_ratio=True, '
'clip_object_border=True), backend=cv2), '
'interpolation=bilinear)'))
class TestRandomFlip(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
rng = np.random.RandomState(0)
self.results1 = {
'img': np.random.random((224, 224, 3)),
'gt_bboxes': np.array([[0, 1, 100, 101]], dtype=np.float32),
'gt_masks':
BitmapMasks(rng.rand(1, 224, 224), height=224, width=224),
'gt_seg_map': np.random.random((224, 224))
}
self.results2 = {'img': self.results1['img']}
def test_transform(self):
# test with image, gt_bboxes, gt_masks, gt_seg_map
transform = RandomFlip(1.0)
results_update = transform.transform(copy.deepcopy(self.results1))
self.assertTrue(
(results_update['gt_bboxes'] == np.array([[124, 1, 224,
101]])).all())
# test only with image
transform = RandomFlip(1.0)
results_update = transform.transform(copy.deepcopy(self.results2))
self.assertTrue(
(results_update['img'] == self.results2['img'][:, ::-1]).all())
# test geometric transformation with homography matrix
# (1) Horizontal Flip
transform = RandomFlip(1.0)
results_update = transform.transform(copy.deepcopy(self.results1))
bboxes = copy.deepcopy(self.results1['gt_bboxes'])
self.assertTrue((bbox_project(
bboxes,
results_update['homography_matrix']) == results_update['gt_bboxes']
).all())
# (2) Vertical Flip
transform = RandomFlip(1.0, direction='vertical')
results_update = transform.transform(copy.deepcopy(self.results1))
bboxes = copy.deepcopy(self.results1['gt_bboxes'])
self.assertTrue((bbox_project(
bboxes,
results_update['homography_matrix']) == results_update['gt_bboxes']
).all())
# (3) Diagonal Flip
transform = RandomFlip(1.0, direction='diagonal')
results_update = transform.transform(copy.deepcopy(self.results1))
bboxes = copy.deepcopy(self.results1['gt_bboxes'])
self.assertTrue((bbox_project(
bboxes,
results_update['homography_matrix']) == results_update['gt_bboxes']
).all())
def test_transform_use_box_type(self):
results1 = copy.deepcopy(self.results1)
results1['gt_bboxes'] = HorizontalBoxes(results1['gt_bboxes'])
# test with image, gt_bboxes, gt_masks, gt_seg_map
transform = RandomFlip(1.0)
results_update = transform.transform(copy.deepcopy(results1))
self.assertTrue((results_update['gt_bboxes'].numpy() == np.array(
[[124, 1, 224, 101]])).all())
# test geometric transformation with homography matrix
# (1) Horizontal Flip
transform = RandomFlip(1.0)
results_update = transform.transform(copy.deepcopy(results1))
bboxes = copy.deepcopy(results1['gt_bboxes'].numpy())
self.assertTrue((bbox_project(bboxes,
results_update['homography_matrix']) ==
results_update['gt_bboxes'].numpy()).all())
# (2) Vertical Flip
transform = RandomFlip(1.0, direction='vertical')
results_update = transform.transform(copy.deepcopy(results1))
bboxes = copy.deepcopy(results1['gt_bboxes'].numpy())
self.assertTrue((bbox_project(bboxes,
results_update['homography_matrix']) ==
results_update['gt_bboxes'].numpy()).all())
# (3) Diagonal Flip
transform = RandomFlip(1.0, direction='diagonal')
results_update = transform.transform(copy.deepcopy(results1))
bboxes = copy.deepcopy(results1['gt_bboxes'].numpy())
self.assertTrue((bbox_project(bboxes,
results_update['homography_matrix']) ==
results_update['gt_bboxes'].numpy()).all())
def test_repr(self):
transform = RandomFlip(0.1)
transform_str = str(transform)
self.assertIsInstance(transform_str, str)
class TestPad(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
rng = np.random.RandomState(0)
self.results = {
'img': np.random.random((1333, 800, 3)),
'gt_masks':
BitmapMasks(rng.rand(4, 1333, 800), height=1333, width=800)
}
def test_transform(self):
# test pad img/gt_masks with size
transform = Pad(size=(1200, 2000))
results = transform(copy.deepcopy(self.results))
self.assertEqual(results['img'].shape[:2], (2000, 1200))
self.assertEqual(results['gt_masks'].masks.shape[1:], (2000, 1200))
# test pad img/gt_masks with size_divisor
transform = Pad(size_divisor=11)
results = transform(copy.deepcopy(self.results))
self.assertEqual(results['img'].shape[:2], (1342, 803))
self.assertEqual(results['gt_masks'].masks.shape[1:], (1342, 803))
# test pad img/gt_masks with pad_to_square
transform = Pad(pad_to_square=True)
results = transform(copy.deepcopy(self.results))
self.assertEqual(results['img'].shape[:2], (1333, 1333))
self.assertEqual(results['gt_masks'].masks.shape[1:], (1333, 1333))
# test pad img/gt_masks with pad_to_square and size_divisor
transform = Pad(pad_to_square=True, size_divisor=11)
results = transform(copy.deepcopy(self.results))
self.assertEqual(results['img'].shape[:2], (1342, 1342))
self.assertEqual(results['gt_masks'].masks.shape[1:], (1342, 1342))
# test pad img/gt_masks with pad_to_square and size_divisor
transform = Pad(pad_to_square=True, size_divisor=11)
results = transform(copy.deepcopy(self.results))
self.assertEqual(results['img'].shape[:2], (1342, 1342))
self.assertEqual(results['gt_masks'].masks.shape[1:], (1342, 1342))
def test_repr(self):
transform = Pad(
pad_to_square=True, size_divisor=11, padding_mode='edge')
self.assertEqual(
repr(transform),
('Pad(size=None, size_divisor=11, pad_to_square=True, '
"pad_val={'img': 0, 'seg': 255}), padding_mode=edge)"))
class TestMinIoURandomCrop(unittest.TestCase):
def test_transform(self):
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')
results['img'] = img
results['img_shape'] = img.shape[:2]
gt_bboxes = create_random_bboxes(1, results['img_shape'][1],
results['img_shape'][0])
results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64)
results['gt_bboxes'] = gt_bboxes
transform = MinIoURandomCrop()
results = transform.transform(copy.deepcopy(results))
self.assertEqual(results['gt_labels'].shape[0],
results['gt_bboxes'].shape[0])
self.assertEqual(results['gt_labels'].dtype, np.int64)
self.assertEqual(results['gt_bboxes'].dtype, np.float32)
self.assertEqual(results['img_shape'], results['img'].shape[:2])
patch = np.array(
[0, 0, results['img_shape'][1], results['img_shape'][0]])
ious = bbox_overlaps(patch.reshape(-1, 4),
results['gt_bboxes']).reshape(-1)
mode = transform.mode
if mode == 1:
self.assertTrue(np.equal(results['gt_bboxes'], gt_bboxes).all())
else:
self.assertTrue((ious >= mode).all())
def test_transform_use_box_type(self):
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')
results['img'] = img
results['img_shape'] = img.shape[:2]
gt_bboxes = create_random_bboxes(1, results['img_shape'][1],
results['img_shape'][0])
results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64)
results['gt_bboxes'] = HorizontalBoxes(gt_bboxes)
transform = MinIoURandomCrop()
results = transform.transform(copy.deepcopy(results))
self.assertEqual(results['gt_labels'].shape[0],
results['gt_bboxes'].shape[0])
self.assertEqual(results['gt_labels'].dtype, np.int64)
self.assertEqual(results['gt_bboxes'].dtype, torch.float32)
patch = np.array(
[0, 0, results['img_shape'][1], results['img_shape'][0]])
ious = bbox_overlaps(
patch.reshape(-1, 4), results['gt_bboxes'].numpy()).reshape(-1)
mode = transform.mode
if mode == 1:
self.assertTrue((results['gt_bboxes'].numpy() == gt_bboxes).all())
else:
self.assertTrue((ious >= mode).all())
def test_repr(self):
transform = MinIoURandomCrop()
self.assertEqual(
repr(transform), ('MinIoURandomCrop'
'(min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), '
'min_crop_size=0.3, '
'bbox_clip_border=True)'))
class TestPhotoMetricDistortion(unittest.TestCase):
def test_transform(self):
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')
transform = PhotoMetricDistortion()
# test uint8 input
results = dict()
results['img'] = img
results = transform.transform(copy.deepcopy(results))
self.assertEqual(results['img'].dtype, np.float32)
# test float32 input
results = dict()
results['img'] = img.astype(np.float32)
results = transform.transform(copy.deepcopy(results))
self.assertEqual(results['img'].dtype, np.float32)
def test_repr(self):
transform = PhotoMetricDistortion()
self.assertEqual(
repr(transform), ('PhotoMetricDistortion'
'(brightness_delta=32, '
'contrast_range=(0.5, 1.5), '
'saturation_range=(0.5, 1.5), '
'hue_delta=18)'))
class TestExpand(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
rng = np.random.RandomState(0)
self.results = {
'img': np.random.random((224, 224, 3)),
'img_shape': (224, 224),
'gt_bboxes': np.array([[0, 1, 100, 101]]),
'gt_masks':
BitmapMasks(rng.rand(1, 224, 224), height=224, width=224),
'gt_seg_map': np.random.random((224, 224))
}
def test_transform(self):
transform = Expand()
results = transform.transform(copy.deepcopy(self.results))
self.assertEqual(results['img_shape'], results['img'].shape[:2])
self.assertEqual(
results['img_shape'],
(results['gt_masks'].height, results['gt_masks'].width))
self.assertEqual(results['img_shape'], results['gt_seg_map'].shape)
def test_transform_use_box_type(self):
results = copy.deepcopy(self.results)
results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes'])
transform = Expand()
results = transform.transform(results)
self.assertEqual(
results['img_shape'],
(results['gt_masks'].height, results['gt_masks'].width))
self.assertEqual(results['img_shape'], results['gt_seg_map'].shape)
def test_repr(self):
transform = Expand()
self.assertEqual(
repr(transform), ('Expand'
'(mean=(0, 0, 0), to_rgb=True, '
'ratio_range=(1, 4), '
'seg_ignore_label=None, '
'prob=0.5)'))
class TestSegRescale(unittest.TestCase):
def setUp(self) -> None:
seg_map = np.random.randint(0, 255, size=(32, 32), dtype=np.int32)
self.results = {'gt_seg_map': seg_map}
def test_transform(self):
# test scale_factor != 1
transform = SegRescale(scale_factor=2)
results = transform(copy.deepcopy(self.results))
self.assertEqual(results['gt_seg_map'].shape[:2], (64, 64))
# test scale_factor = 1
transform = SegRescale(scale_factor=1)
results = transform(copy.deepcopy(self.results))
self.assertEqual(results['gt_seg_map'].shape[:2], (32, 32))
def test_repr(self):
transform = SegRescale(scale_factor=2)
self.assertEqual(
repr(transform), ('SegRescale(scale_factor=2, backend=cv2)'))
class TestRandomCrop(unittest.TestCase):
def test_init(self):
# test invalid crop_type
with self.assertRaisesRegex(ValueError, 'Invalid crop_type'):
RandomCrop(crop_size=(10, 10), crop_type='unknown')
crop_type_list = ['absolute', 'absolute_range']
for crop_type in crop_type_list:
# test h > 0 and w > 0
for crop_size in [(0, 0), (0, 1), (1, 0)]:
with self.assertRaises(AssertionError):
RandomCrop(crop_size=crop_size, crop_type=crop_type)
# test type(h) = int and type(w) = int
for crop_size in [(1.0, 1), (1, 1.0), (1.0, 1.0)]:
with self.assertRaises(AssertionError):
RandomCrop(crop_size=crop_size, crop_type=crop_type)
# test crop_size[0] <= crop_size[1]
with self.assertRaises(AssertionError):
RandomCrop(crop_size=(10, 5), crop_type='absolute_range')
# test h in (0, 1] and w in (0, 1]
crop_type_list = ['relative_range', 'relative']
for crop_type in crop_type_list:
for crop_size in [(0, 1), (1, 0), (1.1, 0.5), (0.5, 1.1)]:
with self.assertRaises(AssertionError):
RandomCrop(crop_size=crop_size, crop_type=crop_type)
def test_transform(self):
# test relative and absolute crop
src_results = {
'img': np.random.randint(0, 255, size=(24, 32), dtype=np.int32)
}
target_shape = (12, 16)
for crop_type, crop_size in zip(['relative', 'absolute'], [(0.5, 0.5),
(16, 12)]):
transform = RandomCrop(crop_size=crop_size, crop_type=crop_type)
results = transform(copy.deepcopy(src_results))
print(results['img'].shape[:2])
self.assertEqual(results['img'].shape[:2], target_shape)
# test absolute_range crop
transform = RandomCrop(crop_size=(10, 20), crop_type='absolute_range')
results = transform(copy.deepcopy(src_results))
h, w = results['img'].shape
self.assertTrue(10 <= w <= 20)
self.assertTrue(10 <= h <= 20)
self.assertEqual(results['img_shape'], results['img'].shape[:2])
# test relative_range crop
transform = RandomCrop(
crop_size=(0.5, 0.5), crop_type='relative_range')
results = transform(copy.deepcopy(src_results))
h, w = results['img'].shape
self.assertTrue(16 <= w <= 32)
self.assertTrue(12 <= h <= 24)
self.assertEqual(results['img_shape'], results['img'].shape[:2])
# test with gt_bboxes, gt_bboxes_labels, gt_ignore_flags,
# gt_masks, gt_seg_map
img = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)
gt_bboxes = np.array([[0, 0, 7, 7], [2, 3, 9, 9]], dtype=np.float32)
gt_bboxes_labels = np.array([0, 1], dtype=np.int64)
gt_ignore_flags = np.array([0, 1], dtype=bool)
gt_masks_ = np.zeros((2, 10, 10), np.uint8)
gt_masks_[0, 0:7, 0:7] = 1
gt_masks_[1, 2:7, 3:8] = 1
gt_masks = BitmapMasks(gt_masks_.copy(), height=10, width=10)
gt_seg_map = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)
src_results = {
'img': img,
'gt_bboxes': gt_bboxes,
'gt_bboxes_labels': gt_bboxes_labels,
'gt_ignore_flags': gt_ignore_flags,
'gt_masks': gt_masks,
'gt_seg_map': gt_seg_map
}
transform = RandomCrop(
crop_size=(7, 5),
allow_negative_crop=False,
recompute_bbox=False,
bbox_clip_border=True)
results = transform(copy.deepcopy(src_results))
h, w = results['img'].shape
self.assertEqual(h, 5)
self.assertEqual(w, 7)
self.assertEqual(results['gt_bboxes'].shape[0], 2)
self.assertEqual(results['gt_bboxes_labels'].shape[0], 2)
self.assertEqual(results['gt_ignore_flags'].shape[0], 2)
self.assertTupleEqual(results['gt_seg_map'].shape[:2], (5, 7))
self.assertEqual(results['img_shape'], results['img'].shape[:2])
# test geometric transformation with homography matrix
bboxes = copy.deepcopy(src_results['gt_bboxes'])
self.assertTrue((bbox_project(bboxes, results['homography_matrix'],
(5, 7)) == results['gt_bboxes']).all())
# test recompute_bbox = True
gt_masks_ = np.zeros((2, 10, 10), np.uint8)
gt_masks = BitmapMasks(gt_masks_.copy(), height=10, width=10)
gt_bboxes = np.array([[0.1, 0.1, 0.2, 0.2]])
src_results = {
'img': img,
'gt_bboxes': gt_bboxes,
'gt_masks': gt_masks
}
target_gt_bboxes = np.zeros((1, 4), dtype=np.float32)
transform = RandomCrop(
crop_size=(10, 11),
allow_negative_crop=False,
recompute_bbox=True,
bbox_clip_border=True)
results = transform(copy.deepcopy(src_results))
self.assertTrue((results['gt_bboxes'] == target_gt_bboxes).all())
# test bbox_clip_border = False
src_results = {'img': img, 'gt_bboxes': gt_bboxes}
transform = RandomCrop(
crop_size=(10, 11),
allow_negative_crop=False,
recompute_bbox=True,
bbox_clip_border=False)
results = transform(copy.deepcopy(src_results))
self.assertTrue(
(results['gt_bboxes'] == src_results['gt_bboxes']).all())
# test the crop does not contain any gt-bbox
# allow_negative_crop = False
img = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
src_results = {'img': img, 'gt_bboxes': gt_bboxes}
transform = RandomCrop(crop_size=(5, 3), allow_negative_crop=False)
results = transform(copy.deepcopy(src_results))
self.assertIsNone(results)
# allow_negative_crop = True
img = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
src_results = {'img': img, 'gt_bboxes': gt_bboxes}
transform = RandomCrop(crop_size=(5, 3), allow_negative_crop=True)
results = transform(copy.deepcopy(src_results))
self.assertTrue(isinstance(results, dict))
def test_transform_use_box_type(self):
# test with gt_bboxes, gt_bboxes_labels, gt_ignore_flags,
# gt_masks, gt_seg_map
img = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)
gt_bboxes = np.array([[0, 0, 7, 7], [2, 3, 9, 9]], dtype=np.float32)
gt_bboxes_labels = np.array([0, 1], dtype=np.int64)
gt_ignore_flags = np.array([0, 1], dtype=bool)
gt_masks_ = np.zeros((2, 10, 10), np.uint8)
gt_masks_[0, 0:7, 0:7] = 1
gt_masks_[1, 2:7, 3:8] = 1
gt_masks = BitmapMasks(gt_masks_.copy(), height=10, width=10)
gt_seg_map = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)
src_results = {
'img': img,
'gt_bboxes': HorizontalBoxes(gt_bboxes),
'gt_bboxes_labels': gt_bboxes_labels,
'gt_ignore_flags': gt_ignore_flags,
'gt_masks': gt_masks,
'gt_seg_map': gt_seg_map
}
transform = RandomCrop(
crop_size=(7, 5),
allow_negative_crop=False,
recompute_bbox=False,
bbox_clip_border=True)
results = transform(copy.deepcopy(src_results))
h, w = results['img'].shape
self.assertEqual(h, 5)
self.assertEqual(w, 7)
self.assertEqual(results['gt_bboxes'].shape[0], 2)
self.assertEqual(results['gt_bboxes_labels'].shape[0], 2)
self.assertEqual(results['gt_ignore_flags'].shape[0], 2)
self.assertTupleEqual(results['gt_seg_map'].shape[:2], (5, 7))
# test geometric transformation with homography matrix
bboxes = copy.deepcopy(src_results['gt_bboxes'].numpy())
print(bboxes, results['gt_bboxes'])
self.assertTrue(
(bbox_project(bboxes, results['homography_matrix'],
(5, 7)) == results['gt_bboxes'].numpy()).all())
# test recompute_bbox = True
gt_masks_ = np.zeros((2, 10, 10), np.uint8)
gt_masks = BitmapMasks(gt_masks_.copy(), height=10, width=10)
gt_bboxes = HorizontalBoxes(np.array([[0.1, 0.1, 0.2, 0.2]]))
src_results = {
'img': img,
'gt_bboxes': gt_bboxes,
'gt_masks': gt_masks
}
target_gt_bboxes = np.zeros((1, 4), dtype=np.float32)
transform = RandomCrop(
crop_size=(10, 11),
allow_negative_crop=False,
recompute_bbox=True,
bbox_clip_border=True)
results = transform(copy.deepcopy(src_results))
self.assertTrue(
(results['gt_bboxes'].numpy() == target_gt_bboxes).all())
# test bbox_clip_border = False
src_results = {'img': img, 'gt_bboxes': gt_bboxes}
transform = RandomCrop(
crop_size=(10, 10),
allow_negative_crop=False,
recompute_bbox=True,
bbox_clip_border=False)
results = transform(copy.deepcopy(src_results))
self.assertTrue(
(results['gt_bboxes'].numpy() == src_results['gt_bboxes'].numpy()
).all())
# test the crop does not contain any gt-bbox
# allow_negative_crop = False
img = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)
gt_bboxes = HorizontalBoxes(np.zeros((0, 4), dtype=np.float32))
src_results = {'img': img, 'gt_bboxes': gt_bboxes}
transform = RandomCrop(crop_size=(5, 2), allow_negative_crop=False)
results = transform(copy.deepcopy(src_results))
self.assertIsNone(results)
# allow_negative_crop = True
img = np.random.randint(0, 255, size=(10, 10), dtype=np.uint8)
gt_bboxes = HorizontalBoxes(np.zeros((0, 4), dtype=np.float32))
src_results = {'img': img, 'gt_bboxes': gt_bboxes}
transform = RandomCrop(crop_size=(5, 2), allow_negative_crop=True)
results = transform(copy.deepcopy(src_results))
self.assertTrue(isinstance(results, dict))
def test_repr(self):
crop_type = 'absolute'
crop_size = (10, 5)
allow_negative_crop = False
recompute_bbox = True
bbox_clip_border = False
transform = RandomCrop(
crop_size=crop_size,
crop_type=crop_type,
allow_negative_crop=allow_negative_crop,
recompute_bbox=recompute_bbox,
bbox_clip_border=bbox_clip_border)
self.assertEqual(
repr(transform),
f'RandomCrop(crop_size={crop_size}, crop_type={crop_type}, '
f'allow_negative_crop={allow_negative_crop}, '
f'recompute_bbox={recompute_bbox}, '
f'bbox_clip_border={bbox_clip_border})')
class TestCutOut(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')
self.results = {'img': img}
def test_transform(self):
# test n_holes
with self.assertRaises(AssertionError):
transform = CutOut(n_holes=(5, 3), cutout_shape=(8, 8))
with self.assertRaises(AssertionError):
transform = CutOut(n_holes=(3, 4, 5), cutout_shape=(8, 8))
# test cutout_shape and cutout_ratio
with self.assertRaises(AssertionError):
transform = CutOut(n_holes=1, cutout_shape=8)
with self.assertRaises(AssertionError):
transform = CutOut(n_holes=1, cutout_ratio=0.2)
# either of cutout_shape and cutout_ratio should be given
with self.assertRaises(AssertionError):
transform = CutOut(n_holes=1)
with self.assertRaises(AssertionError):
transform = CutOut(
n_holes=1, cutout_shape=(2, 2), cutout_ratio=(0.4, 0.4))
transform = CutOut(n_holes=1, cutout_shape=(10, 10))
results = transform(copy.deepcopy(self.results))
self.assertTrue(results['img'].sum() < self.results['img'].sum())
transform = CutOut(
n_holes=(2, 4),
cutout_shape=[(10, 10), (15, 15)],
fill_in=(255, 255, 255))
results = transform(copy.deepcopy(self.results))
self.assertTrue(results['img'].sum() > self.results['img'].sum())
transform = CutOut(
n_holes=1, cutout_ratio=(0.8, 0.8), fill_in=(255, 255, 255))
results = transform(copy.deepcopy(self.results))
self.assertTrue(results['img'].sum() > self.results['img'].sum())
def test_repr(self):
transform = CutOut(n_holes=1, cutout_shape=(10, 10))
self.assertEqual(
repr(transform), ('CutOut(n_holes=(1, 1), '
'cutout_shape=[(10, 10)], '
'fill_in=(0, 0, 0))'))
transform = CutOut(
n_holes=1, cutout_ratio=(0.8, 0.8), fill_in=(255, 255, 255))
self.assertEqual(
repr(transform), ('CutOut(n_holes=(1, 1), '
'cutout_ratio=[(0.8, 0.8)], '
'fill_in=(255, 255, 255))'))
class TestMosaic(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
rng = np.random.RandomState(0)
self.results = {
'img':
np.random.random((224, 224, 3)),
'img_shape': (224, 224),
'gt_bboxes_labels':
np.array([1, 2, 3], dtype=np.int64),
'gt_bboxes':
np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],
dtype=np.float32),
'gt_ignore_flags':
np.array([0, 0, 1], dtype=bool),
'gt_masks':
BitmapMasks(rng.rand(3, 224, 224), height=224, width=224),
}
def test_transform(self):
# test assertion for invalid img_scale
with self.assertRaises(AssertionError):
transform = Mosaic(img_scale=640)
# test assertion for invalid probability
with self.assertRaises(AssertionError):
transform = Mosaic(prob=1.5)
transform = Mosaic(img_scale=(12, 10))
# test assertion for invalid mix_results
with self.assertRaises(AssertionError):
results = transform(copy.deepcopy(self.results))
self.results['mix_results'] = [copy.deepcopy(self.results)] * 3
results = transform(copy.deepcopy(self.results))
self.assertTrue(results['img'].shape[:2] == (20, 24))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == np.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
self.assertEqual(results['img_shape'], results['img'].shape[:2])
def test_transform_with_no_gt(self):
self.results['gt_bboxes'] = np.empty((0, 4), dtype=np.float32)
self.results['gt_bboxes_labels'] = np.empty((0, ), dtype=np.int64)
self.results['gt_ignore_flags'] = np.empty((0, ), dtype=bool)
transform = Mosaic(img_scale=(12, 10))
self.results['mix_results'] = [copy.deepcopy(self.results)] * 3
results = transform(copy.deepcopy(self.results))
self.assertIsInstance(results, dict)
self.assertTrue(results['img'].shape[:2] == (20, 24))
self.assertTrue(
results['gt_bboxes_labels'].shape[0] == results['gt_bboxes'].
shape[0] == results['gt_ignore_flags'].shape[0] == 0)
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == np.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
def test_transform_use_box_type(self):
transform = Mosaic(img_scale=(12, 10))
results = copy.deepcopy(self.results)
results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes'])
results['mix_results'] = [results] * 3
results = transform(results)
self.assertTrue(results['img'].shape[:2] == (20, 24))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == torch.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
def test_repr(self):
transform = Mosaic(img_scale=(640, 640), )
self.assertEqual(
repr(transform), ('Mosaic(img_scale=(640, 640), '
'center_ratio_range=(0.5, 1.5), '
'pad_val=114.0, '
'prob=1.0)'))
class TestMixUp(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
rng = np.random.RandomState(0)
self.results = {
'img':
np.random.random((224, 224, 3)),
'img_shape': (224, 224),
'gt_bboxes_labels':
np.array([1, 2, 3], dtype=np.int64),
'gt_bboxes':
np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],
dtype=np.float32),
'gt_ignore_flags':
np.array([0, 0, 1], dtype=bool),
'gt_masks':
BitmapMasks(rng.rand(3, 224, 224), height=224, width=224),
}
def test_transform(self):
# test assertion for invalid img_scale
with self.assertRaises(AssertionError):
transform = MixUp(img_scale=640)
transform = MixUp(img_scale=(12, 10))
# test assertion for invalid mix_results
with self.assertRaises(AssertionError):
results = transform(copy.deepcopy(self.results))
with self.assertRaises(AssertionError):
self.results['mix_results'] = [copy.deepcopy(self.results)] * 2
results = transform(copy.deepcopy(self.results))
self.results['mix_results'] = [copy.deepcopy(self.results)]
results = transform(copy.deepcopy(self.results))
self.assertTrue(results['img'].shape[:2] == (224, 224))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == np.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
self.assertEqual(results['img_shape'], results['img'].shape[:2])
def test_transform_use_box_type(self):
results = copy.deepcopy(self.results)
results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes'])
transform = MixUp(img_scale=(12, 10))
results['mix_results'] = [results]
results = transform(results)
self.assertTrue(results['img'].shape[:2] == (224, 224))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == torch.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
def test_repr(self):
transform = MixUp(
img_scale=(640, 640),
ratio_range=(0.8, 1.6),
pad_val=114.0,
)
self.assertEqual(
repr(transform), ('MixUp(dynamic_scale=(640, 640), '
'ratio_range=(0.8, 1.6), '
'flip_ratio=0.5, '
'pad_val=114.0, '
'max_iters=15, '
'bbox_clip_border=True)'))
class TestRandomAffine(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.results = {
'img':
np.random.random((224, 224, 3)),
'img_shape': (224, 224),
'gt_bboxes_labels':
np.array([1, 2, 3], dtype=np.int64),
'gt_bboxes':
np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],
dtype=np.float32),
'gt_ignore_flags':
np.array([0, 0, 1], dtype=bool),
}
def test_transform(self):
# test assertion for invalid translate_ratio
with self.assertRaises(AssertionError):
transform = RandomAffine(max_translate_ratio=1.5)
# test assertion for invalid scaling_ratio_range
with self.assertRaises(AssertionError):
transform = RandomAffine(scaling_ratio_range=(1.5, 0.5))
with self.assertRaises(AssertionError):
transform = RandomAffine(scaling_ratio_range=(0, 0.5))
transform = RandomAffine()
results = transform(copy.deepcopy(self.results))
self.assertTrue(results['img'].shape[:2] == (224, 224))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == np.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
self.assertEqual(results['img_shape'], results['img'].shape[:2])
def test_transform_use_box_type(self):
results = copy.deepcopy(self.results)
results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes'])
transform = RandomAffine()
results = transform(copy.deepcopy(results))
self.assertTrue(results['img'].shape[:2] == (224, 224))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == torch.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
def test_repr(self):
transform = RandomAffine(
scaling_ratio_range=(0.1, 2),
border=(-320, -320),
)
self.assertEqual(
repr(transform), ('RandomAffine(max_rotate_degree=10.0, '
'max_translate_ratio=0.1, '
'scaling_ratio_range=(0.1, 2), '
'max_shear_degree=2.0, '
'border=(-320, -320), '
'border_val=(114, 114, 114), '
'bbox_clip_border=True)'))
class TestYOLOXHSVRandomAug(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')
self.results = {
'img':
img,
'img_shape': (224, 224),
'gt_bboxes_labels':
np.array([1, 2, 3], dtype=np.int64),
'gt_bboxes':
np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],
dtype=np.float32),
'gt_ignore_flags':
np.array([0, 0, 1], dtype=bool),
}
def test_transform(self):
transform = YOLOXHSVRandomAug()
results = transform(copy.deepcopy(self.results))
self.assertTrue(
results['img'].shape[:2] == self.results['img'].shape[:2])
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == np.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
def test_repr(self):
transform = YOLOXHSVRandomAug()
self.assertEqual(
repr(transform), ('YOLOXHSVRandomAug(hue_delta=5, '
'saturation_delta=30, '
'value_delta=30)'))
class TestRandomCenterCropPad(unittest.TestCase):
def test_init(self):
# test assertion for invalid crop_size while test_mode=False
with self.assertRaises(AssertionError):
RandomCenterCropPad(
crop_size=(-1, 0), test_mode=False, test_pad_mode=None)
# test assertion for invalid ratios while test_mode=False
with self.assertRaises(AssertionError):
RandomCenterCropPad(
crop_size=(511, 511),
ratios=(1.0, 1.0),
test_mode=False,
test_pad_mode=None)
# test assertion for invalid mean, std and to_rgb
with self.assertRaises(AssertionError):
RandomCenterCropPad(
crop_size=(511, 511),
mean=None,
std=None,
to_rgb=None,
test_mode=False,
test_pad_mode=None)
# test assertion for invalid crop_size while test_mode=True
with self.assertRaises(AssertionError):
RandomCenterCropPad(
crop_size=(511, 511),
ratios=None,
border=None,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=True,
test_pad_mode=('logical_or', 127))
# test assertion for invalid ratios while test_mode=True
with self.assertRaises(AssertionError):
RandomCenterCropPad(
crop_size=None,
ratios=(0.9, 1.0, 1.1),
border=None,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=True,
test_pad_mode=('logical_or', 127))
# test assertion for invalid border while test_mode=True
with self.assertRaises(AssertionError):
RandomCenterCropPad(
crop_size=None,
ratios=None,
border=128,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=True,
test_pad_mode=('logical_or', 127))
# test assertion for invalid test_pad_mode while test_mode=True
with self.assertRaises(AssertionError):
RandomCenterCropPad(
crop_size=None,
ratios=None,
border=None,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=True,
test_pad_mode=('do_nothing', 100))
def test_transform(self):
results = dict(
img_path=osp.join(osp.dirname(__file__), '../../data/color.jpg'))
load = LoadImageFromFile(to_float32=True)
results = load(results)
test_results = copy.deepcopy(results)
h, w = results['img_shape']
gt_bboxes = create_random_bboxes(4, w, h)
gt_bboxes_labels = np.array([1, 2, 3, 1], dtype=np.int64)
gt_ignore_flags = np.array([0, 0, 1, 1], dtype=bool)
results['gt_bboxes'] = gt_bboxes
results['gt_bboxes_labels'] = gt_bboxes_labels
results['gt_ignore_flags'] = gt_ignore_flags
crop_module = RandomCenterCropPad(
crop_size=(w - 20, h - 20),
ratios=(1.0, ),
border=128,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=False,
test_pad_mode=None)
train_results = crop_module(results)
assert train_results['img'].shape[:2] == (h - 20, w - 20)
# All bboxes should be reserved after crop
assert train_results['img_shape'][:2] == (h - 20, w - 20)
assert train_results['gt_bboxes'].shape[0] == 4
assert train_results['gt_bboxes'].dtype == np.float32
self.assertEqual(results['img_shape'], results['img'].shape[:2])
crop_module = RandomCenterCropPad(
crop_size=None,
ratios=None,
border=None,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=True,
test_pad_mode=('logical_or', 127))
test_results = crop_module(test_results)
assert test_results['img'].shape[:2] == (h | 127, w | 127)
assert test_results['img_shape'][:2] == (h | 127, w | 127)
assert 'border' in test_results
def test_transform_use_box_type(self):
results = dict(
img_path=osp.join(osp.dirname(__file__), '../../data/color.jpg'))
load = LoadImageFromFile(to_float32=True)
results = load(results)
test_results = copy.deepcopy(results)
h, w = results['img_shape']
gt_bboxes = create_random_bboxes(4, w, h)
gt_bboxes_labels = np.array([1, 2, 3, 1], dtype=np.int64)
gt_ignore_flags = np.array([0, 0, 1, 1], dtype=bool)
results['gt_bboxes'] = HorizontalBoxes(gt_bboxes)
results['gt_bboxes_labels'] = gt_bboxes_labels
results['gt_ignore_flags'] = gt_ignore_flags
crop_module = RandomCenterCropPad(
crop_size=(w - 20, h - 20),
ratios=(1.0, ),
border=128,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=False,
test_pad_mode=None)
train_results = crop_module(results)
assert train_results['img'].shape[:2] == (h - 20, w - 20)
# All bboxes should be reserved after crop
assert train_results['img_shape'][:2] == (h - 20, w - 20)
assert train_results['gt_bboxes'].shape[0] == 4
assert train_results['gt_bboxes'].dtype == torch.float32
crop_module = RandomCenterCropPad(
crop_size=None,
ratios=None,
border=None,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=True,
test_pad_mode=('logical_or', 127))
test_results = crop_module(test_results)
assert test_results['img'].shape[:2] == (h | 127, w | 127)
assert test_results['img_shape'][:2] == (h | 127, w | 127)
assert 'border' in test_results
class TestCopyPaste(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')
h, w, _ = img.shape
dst_bboxes = np.array([[0.2 * w, 0.2 * h, 0.4 * w, 0.4 * h],
[0.5 * w, 0.5 * h, 0.6 * w, 0.6 * h]],
dtype=np.float32)
src_bboxes = np.array([[0.1 * w, 0.1 * h, 0.3 * w, 0.5 * h],
[0.4 * w, 0.4 * h, 0.7 * w, 0.7 * h],
[0.8 * w, 0.8 * h, 0.9 * w, 0.9 * h]],
dtype=np.float32)
self.dst_results = {
'img': img.copy(),
'gt_bboxes': dst_bboxes,
'gt_bboxes_labels': np.ones(dst_bboxes.shape[0], dtype=np.int64),
'gt_masks': create_full_masks(dst_bboxes, w, h),
'gt_ignore_flags': np.array([0, 1], dtype=bool),
}
self.src_results = {
'img': img.copy(),
'gt_bboxes': src_bboxes,
'gt_bboxes_labels':
np.ones(src_bboxes.shape[0], dtype=np.int64) * 2,
'gt_masks': create_full_masks(src_bboxes, w, h),
'gt_ignore_flags': np.array([0, 0, 1], dtype=bool),
}
def test_transform(self):
transform = CopyPaste(selected=False)
# test assertion for invalid mix_results
with self.assertRaises(AssertionError):
results = transform(copy.deepcopy(self.dst_results))
results = copy.deepcopy(self.dst_results)
results['mix_results'] = [copy.deepcopy(self.src_results)]
results = transform(results)
self.assertEqual(results['img'].shape[:2],
self.dst_results['img'].shape[:2])
# one object of destination image is totally occluded
self.assertEqual(
results['gt_bboxes'].shape[0],
self.dst_results['gt_bboxes'].shape[0] +
self.src_results['gt_bboxes'].shape[0] - 1)
self.assertEqual(
results['gt_bboxes_labels'].shape[0],
self.dst_results['gt_bboxes_labels'].shape[0] +
self.src_results['gt_bboxes_labels'].shape[0] - 1)
self.assertEqual(
results['gt_masks'].masks.shape[0],
self.dst_results['gt_masks'].masks.shape[0] +
self.src_results['gt_masks'].masks.shape[0] - 1)
self.assertEqual(
results['gt_ignore_flags'].shape[0],
self.dst_results['gt_ignore_flags'].shape[0] +
self.src_results['gt_ignore_flags'].shape[0] - 1)
# the object of destination image is partially occluded
ori_bbox = self.dst_results['gt_bboxes'][0]
occ_bbox = results['gt_bboxes'][0]
ori_mask = self.dst_results['gt_masks'].masks[0]
occ_mask = results['gt_masks'].masks[0]
self.assertTrue(ori_mask.sum() > occ_mask.sum())
self.assertTrue(
np.all(np.abs(occ_bbox - ori_bbox) <= transform.bbox_occluded_thr)
or occ_mask.sum() > transform.mask_occluded_thr)
# test copypaste with selected objects
transform = CopyPaste()
results = copy.deepcopy(self.dst_results)
results['mix_results'] = [copy.deepcopy(self.src_results)]
results = transform(results)
# test copypaste with an empty source image
results = copy.deepcopy(self.dst_results)
valid_inds = [False] * self.src_results['gt_bboxes'].shape[0]
results['mix_results'] = [{
'img':
self.src_results['img'].copy(),
'gt_bboxes':
self.src_results['gt_bboxes'][valid_inds],
'gt_bboxes_labels':
self.src_results['gt_bboxes_labels'][valid_inds],
'gt_masks':
self.src_results['gt_masks'][valid_inds],
'gt_ignore_flags':
self.src_results['gt_ignore_flags'][valid_inds],
}]
results = transform(results)
def test_transform_use_box_type(self):
src_results = copy.deepcopy(self.src_results)
src_results['gt_bboxes'] = HorizontalBoxes(src_results['gt_bboxes'])
dst_results = copy.deepcopy(self.dst_results)
dst_results['gt_bboxes'] = HorizontalBoxes(dst_results['gt_bboxes'])
transform = CopyPaste(selected=False)
results = copy.deepcopy(dst_results)
results['mix_results'] = [copy.deepcopy(src_results)]
results = transform(results)
self.assertEqual(results['img'].shape[:2],
self.dst_results['img'].shape[:2])
# one object of destination image is totally occluded
self.assertEqual(
results['gt_bboxes'].shape[0],
self.dst_results['gt_bboxes'].shape[0] +
self.src_results['gt_bboxes'].shape[0] - 1)
self.assertEqual(
results['gt_bboxes_labels'].shape[0],
self.dst_results['gt_bboxes_labels'].shape[0] +
self.src_results['gt_bboxes_labels'].shape[0] - 1)
self.assertEqual(
results['gt_masks'].masks.shape[0],
self.dst_results['gt_masks'].masks.shape[0] +
self.src_results['gt_masks'].masks.shape[0] - 1)
self.assertEqual(
results['gt_ignore_flags'].shape[0],
self.dst_results['gt_ignore_flags'].shape[0] +
self.src_results['gt_ignore_flags'].shape[0] - 1)
# the object of destination image is partially occluded
ori_bbox = dst_results['gt_bboxes'][0].numpy()
occ_bbox = results['gt_bboxes'][0].numpy()
ori_mask = dst_results['gt_masks'].masks[0]
occ_mask = results['gt_masks'].masks[0]
self.assertTrue(ori_mask.sum() > occ_mask.sum())
self.assertTrue(
np.all(np.abs(occ_bbox - ori_bbox) <= transform.bbox_occluded_thr)
or occ_mask.sum() > transform.mask_occluded_thr)
# test copypaste with selected objects
transform = CopyPaste()
results = copy.deepcopy(dst_results)
results['mix_results'] = [copy.deepcopy(src_results)]
results = transform(results)
# test copypaste with an empty source image
results = copy.deepcopy(dst_results)
valid_inds = [False] * self.src_results['gt_bboxes'].shape[0]
results['mix_results'] = [{
'img':
src_results['img'].copy(),
'gt_bboxes':
src_results['gt_bboxes'][valid_inds],
'gt_bboxes_labels':
src_results['gt_bboxes_labels'][valid_inds],
'gt_masks':
src_results['gt_masks'][valid_inds],
'gt_ignore_flags':
src_results['gt_ignore_flags'][valid_inds],
}]
results = transform(results)
def test_repr(self):
transform = CopyPaste()
self.assertEqual(
repr(transform), ('CopyPaste(max_num_pasted=100, '
'bbox_occluded_thr=10, '
'mask_occluded_thr=300, '
'selected=True)'))
class TestAlbu(unittest.TestCase):
@unittest.skipIf(albumentations is None, 'albumentations is not installed')
def test_transform(self):
results = dict(
img_path=osp.join(osp.dirname(__file__), '../../data/color.jpg'))
# Define simple pipeline
load = dict(type='LoadImageFromFile')
load = TRANSFORMS.build(load)
albu_transform = dict(
type='Albu', transforms=[dict(type='ChannelShuffle', p=1)])
albu_transform = TRANSFORMS.build(albu_transform)
# Execute transforms
results = load(results)
results = albu_transform(results)
self.assertEqual(results['img'].dtype, np.uint8)
# test bbox
albu_transform = dict(
type='Albu',
transforms=[dict(type='ChannelShuffle', p=1)],
bbox_params=dict(
type='BboxParams',
format='pascal_voc',
label_fields=['gt_bboxes_labels', 'gt_ignore_flags']),
keymap={
'img': 'image',
'gt_bboxes': 'bboxes'
})
albu_transform = TRANSFORMS.build(albu_transform)
results = {
'img':
np.random.random((224, 224, 3)),
'img_shape': (224, 224),
'gt_bboxes_labels':
np.array([1, 2, 3], dtype=np.int64),
'gt_bboxes':
np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],
dtype=np.float32),
'gt_ignore_flags':
np.array([0, 0, 1], dtype=bool),
}
results = albu_transform(results)
self.assertEqual(results['img'].dtype, np.float64)
self.assertEqual(results['gt_bboxes'].dtype, np.float32)
self.assertEqual(results['gt_ignore_flags'].dtype, bool)
self.assertEqual(results['gt_bboxes_labels'].dtype, np.int64)
self.assertEqual(results['img_shape'], results['img'].shape[:2])
@unittest.skipIf(albumentations is None, 'albumentations is not installed')
def test_repr(self):
albu_transform = dict(
type='Albu', transforms=[dict(type='ChannelShuffle', p=1)])
albu_transform = TRANSFORMS.build(albu_transform)
self.assertEqual(
repr(albu_transform), 'Albu(transforms=['
'{\'type\': \'ChannelShuffle\', '
'\'p\': 1}])')
class TestCorrupt(unittest.TestCase):
def test_transform(self):
results = dict(
img_path=osp.join(osp.dirname(__file__), '../../data/color.jpg'))
# Define simple pipeline
load = dict(type='LoadImageFromFile')
load = TRANSFORMS.build(load)
corrupt_transform = dict(type='Corrupt', corruption='gaussian_blur')
corrupt_transform = TRANSFORMS.build(corrupt_transform)
# Execute transforms
results = load(results)
results = corrupt_transform(results)
self.assertEqual(results['img'].dtype, np.uint8)
def test_repr(self):
corrupt_transform = dict(type='Corrupt', corruption='gaussian_blur')
corrupt_transform = TRANSFORMS.build(corrupt_transform)
self.assertEqual(
repr(corrupt_transform), 'Corrupt(corruption=gaussian_blur, '
'severity=1)')
class TestRandomShift(unittest.TestCase):
def test_init(self):
# test assertion for invalid shift_ratio
with self.assertRaises(AssertionError):
RandomShift(prob=1.5)
# test assertion for invalid max_shift_px
with self.assertRaises(AssertionError):
RandomShift(max_shift_px=-1)
def test_transform(self):
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')
results['img'] = img
h, w, _ = img.shape
gt_bboxes = create_random_bboxes(8, w, h)
results['gt_bboxes_labels'] = np.ones(
gt_bboxes.shape[0], dtype=np.int64)
results['gt_bboxes'] = gt_bboxes
transform = RandomShift(prob=1.0)
results = transform(results)
self.assertEqual(results['img'].shape[:2], (h, w))
self.assertEqual(results['gt_bboxes_labels'].shape[0],
results['gt_bboxes'].shape[0])
self.assertEqual(results['gt_bboxes_labels'].dtype, np.int64)
self.assertEqual(results['gt_bboxes'].dtype, np.float32)
def test_transform_use_box_type(self):
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')
results['img'] = img
h, w, _ = img.shape
gt_bboxes = create_random_bboxes(8, w, h)
results['gt_bboxes_labels'] = np.ones(
gt_bboxes.shape[0], dtype=np.int64)
results['gt_bboxes'] = HorizontalBoxes(gt_bboxes)
transform = RandomShift(prob=1.0)
results = transform(results)
self.assertEqual(results['img'].shape[:2], (h, w))
self.assertEqual(results['gt_bboxes_labels'].shape[0],
results['gt_bboxes'].shape[0])
self.assertEqual(results['gt_bboxes_labels'].dtype, np.int64)
self.assertEqual(results['gt_bboxes'].dtype, torch.float32)
def test_repr(self):
transform = RandomShift()
self.assertEqual(
repr(transform), ('RandomShift(prob=0.5, '
'max_shift_px=32, '
'filter_thr_px=1)'))
class TestRandomErasing(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.results = construct_toy_data(poly2mask=True)
def test_transform(self):
transform = RandomErasing(
n_patches=(1, 5), ratio=(0.4, 0.8), img_border_value=0)
results = transform(copy.deepcopy(self.results))
self.assertTrue(results['img'].sum() < self.results['img'].sum())
transform = RandomErasing(
n_patches=1, ratio=0.999, img_border_value=255)
results = transform(copy.deepcopy(self.results))
self.assertTrue(results['img'].sum() > self.results['img'].sum())
# test empty results
empty_results = copy.deepcopy(self.results)
empty_results['gt_bboxes'] = np.zeros((0, 4), dtype=np.float32)
empty_results['gt_bboxes_labels'] = np.zeros((0, ), dtype=np.int64)
empty_results['gt_masks'] = empty_results['gt_masks'][False]
empty_results['gt_ignore_flags'] = np.zeros((0, ), dtype=bool)
empty_results['gt_seg_map'] = np.ones_like(
empty_results['gt_seg_map']) * 255
results = transform(copy.deepcopy(empty_results))
self.assertTrue(results['img'].sum() > self.results['img'].sum())
def test_transform_use_box_type(self):
src_results = copy.deepcopy(self.results)
src_results['gt_bboxes'] = HorizontalBoxes(src_results['gt_bboxes'])
transform = RandomErasing(
n_patches=(1, 5), ratio=(0.4, 0.8), img_border_value=0)
results = transform(copy.deepcopy(src_results))
self.assertTrue(results['img'].sum() < src_results['img'].sum())
transform = RandomErasing(
n_patches=1, ratio=0.999, img_border_value=255)
results = transform(copy.deepcopy(src_results))
self.assertTrue(results['img'].sum() > src_results['img'].sum())
# test empty results
empty_results = copy.deepcopy(src_results)
empty_results['gt_bboxes'] = HorizontalBoxes([], dtype=torch.float32)
empty_results['gt_bboxes_labels'] = np.zeros((0, ), dtype=np.int64)
empty_results['gt_masks'] = empty_results['gt_masks'][False]
empty_results['gt_ignore_flags'] = np.zeros((0, ), dtype=bool)
empty_results['gt_seg_map'] = np.ones_like(
empty_results['gt_seg_map']) * 255
results = transform(copy.deepcopy(empty_results))
self.assertTrue(results['img'].sum() > src_results['img'].sum())
def test_repr(self):
transform = RandomErasing(n_patches=(1, 5), ratio=(0, 0.2))
self.assertEqual(
repr(transform), ('RandomErasing(n_patches=(1, 5), '
'ratio=(0, 0.2), '
'squared=True, '
'bbox_erased_thr=0.9, '
'img_border_value=128, '
'mask_border_value=0, '
'seg_ignore_label=255)'))
| 72,142 | 41.139603 | 79 | py |
ERD | ERD-main/tests/test_apis/test_inference.py | import os
from pathlib import Path
import numpy as np
import pytest
import torch
from mmdet.apis import inference_detector, init_detector
from mmdet.structures import DetDataSample
from mmdet.utils import register_all_modules
# TODO: Waiting to fix multiple call error bug
register_all_modules()
@pytest.mark.parametrize('config,devices',
[('configs/retinanet/retinanet_r18_fpn_1x_coco.py',
('cpu', 'cuda'))])
def test_init_detector(config, devices):
assert all([device in ['cpu', 'cuda'] for device in devices])
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(project_dir, config)
# test init_detector with config_file: str and cfg_options
cfg_options = dict(
model=dict(
backbone=dict(
depth=18,
init_cfg=dict(
type='Pretrained', checkpoint='torchvision://resnet18'))))
for device in devices:
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
model = init_detector(
config_file, device=device, cfg_options=cfg_options)
# test init_detector with :obj:`Path`
config_path_object = Path(config_file)
model = init_detector(config_path_object, device=device)
# test init_detector with undesirable type
with pytest.raises(TypeError):
config_list = [config_file]
model = init_detector(config_list) # noqa: F841
@pytest.mark.parametrize('config,devices',
[('configs/retinanet/retinanet_r18_fpn_1x_coco.py',
('cpu', 'cuda'))])
def test_inference_detector(config, devices):
assert all([device in ['cpu', 'cuda'] for device in devices])
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(project_dir, config)
# test init_detector with config_file: str and cfg_options
rng = np.random.RandomState(0)
img1 = rng.randint(0, 255, (100, 100, 3), dtype=np.uint8)
img2 = rng.randint(0, 255, (100, 100, 3), dtype=np.uint8)
for device in devices:
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
model = init_detector(config_file, device=device)
result = inference_detector(model, img1)
assert isinstance(result, DetDataSample)
result = inference_detector(model, [img1, img2])
assert isinstance(result, list) and len(result) == 2
| 2,737 | 34.558442 | 78 | py |
ERD | ERD-main/tests/test_apis/test_det_inferencer.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
from unittest import TestCase, mock
from unittest.mock import Mock, patch
import mmcv
import mmengine
import numpy as np
import torch
from mmengine.structures import InstanceData
from mmengine.utils import is_list_of
from parameterized import parameterized
from mmdet.apis import DetInferencer
from mmdet.evaluation.functional import get_classes
from mmdet.structures import DetDataSample
class TestDetInferencer(TestCase):
@mock.patch('mmengine.infer.infer._load_checkpoint', return_value=None)
def test_init(self, mock):
# init from metafile
DetInferencer('rtmdet-t')
# init from cfg
DetInferencer('configs/yolox/yolox_tiny_8xb8-300e_coco.py')
def assert_predictions_equal(self, preds1, preds2):
for pred1, pred2 in zip(preds1, preds2):
if 'bboxes' in pred1:
self.assertTrue(
np.allclose(pred1['bboxes'], pred2['bboxes'], 0.1))
if 'scores' in pred1:
self.assertTrue(
np.allclose(pred1['scores'], pred2['scores'], 0.1))
if 'labels' in pred1:
self.assertTrue(np.allclose(pred1['labels'], pred2['labels']))
if 'panoptic_seg_path' in pred1:
self.assertTrue(
pred1['panoptic_seg_path'] == pred2['panoptic_seg_path'])
@parameterized.expand([
'rtmdet-t', 'mask-rcnn_r50_fpn_1x_coco', 'panoptic_fpn_r50_fpn_1x_coco'
])
def test_call(self, model):
# single img
img_path = 'tests/data/color.jpg'
mock_load = Mock(return_value=None)
with patch('mmengine.infer.infer._load_checkpoint', mock_load):
inferencer = DetInferencer(model)
# In the case of not loading the pretrained weight, the category
# defaults to COCO 80, so it needs to be replaced.
if model == 'panoptic_fpn_r50_fpn_1x_coco':
inferencer.visualizer.dataset_meta = {
'classes': get_classes('coco_panoptic'),
'palette': 'random'
}
res_path = inferencer(img_path, return_vis=True)
# ndarray
img = mmcv.imread(img_path)
res_ndarray = inferencer(img, return_vis=True)
self.assert_predictions_equal(res_path['predictions'],
res_ndarray['predictions'])
self.assertIn('visualization', res_path)
self.assertIn('visualization', res_ndarray)
# multiple images
img_paths = ['tests/data/color.jpg', 'tests/data/gray.jpg']
res_path = inferencer(img_paths, return_vis=True)
# list of ndarray
imgs = [mmcv.imread(p) for p in img_paths]
res_ndarray = inferencer(imgs, return_vis=True)
self.assert_predictions_equal(res_path['predictions'],
res_ndarray['predictions'])
self.assertIn('visualization', res_path)
self.assertIn('visualization', res_ndarray)
# img dir, test different batch sizes
img_dir = 'tests/data/VOCdevkit/VOC2007/JPEGImages/'
res_bs1 = inferencer(img_dir, batch_size=1, return_vis=True)
res_bs3 = inferencer(img_dir, batch_size=3, return_vis=True)
self.assert_predictions_equal(res_bs1['predictions'],
res_bs3['predictions'])
# There is a jitter operation when the mask is drawn,
# so it cannot be asserted.
if model == 'rtmdet-t':
for res_bs1_vis, res_bs3_vis in zip(res_bs1['visualization'],
res_bs3['visualization']):
self.assertTrue(np.allclose(res_bs1_vis, res_bs3_vis))
@parameterized.expand([
'rtmdet-t', 'mask-rcnn_r50_fpn_1x_coco', 'panoptic_fpn_r50_fpn_1x_coco'
])
def test_visualize(self, model):
img_paths = ['tests/data/color.jpg', 'tests/data/gray.jpg']
mock_load = Mock(return_value=None)
with patch('mmengine.infer.infer._load_checkpoint', mock_load):
inferencer = DetInferencer(model)
# In the case of not loading the pretrained weight, the category
# defaults to COCO 80, so it needs to be replaced.
if model == 'panoptic_fpn_r50_fpn_1x_coco':
inferencer.visualizer.dataset_meta = {
'classes': get_classes('coco_panoptic'),
'palette': 'random'
}
with tempfile.TemporaryDirectory() as tmp_dir:
inferencer(img_paths, out_dir=tmp_dir)
for img_dir in ['color.jpg', 'gray.jpg']:
self.assertTrue(osp.exists(osp.join(tmp_dir, 'vis', img_dir)))
@parameterized.expand([
'rtmdet-t', 'mask-rcnn_r50_fpn_1x_coco', 'panoptic_fpn_r50_fpn_1x_coco'
])
def test_postprocess(self, model):
# return_datasample
img_path = 'tests/data/color.jpg'
mock_load = Mock(return_value=None)
with patch('mmengine.infer.infer._load_checkpoint', mock_load):
inferencer = DetInferencer(model)
# In the case of not loading the pretrained weight, the category
# defaults to COCO 80, so it needs to be replaced.
if model == 'panoptic_fpn_r50_fpn_1x_coco':
inferencer.visualizer.dataset_meta = {
'classes': get_classes('coco_panoptic'),
'palette': 'random'
}
res = inferencer(img_path, return_datasample=True)
self.assertTrue(is_list_of(res['predictions'], DetDataSample))
with tempfile.TemporaryDirectory() as tmp_dir:
res = inferencer(img_path, out_dir=tmp_dir, no_save_pred=False)
dumped_res = mmengine.load(
osp.join(tmp_dir, 'preds', 'color.json'))
self.assertEqual(res['predictions'][0], dumped_res)
@mock.patch('mmengine.infer.infer._load_checkpoint', return_value=None)
def test_pred2dict(self, mock):
data_sample = DetDataSample()
data_sample.pred_instances = InstanceData()
data_sample.pred_instances.bboxes = np.array([[0, 0, 1, 1]])
data_sample.pred_instances.labels = np.array([0])
data_sample.pred_instances.scores = torch.FloatTensor([0.9])
res = DetInferencer('rtmdet-t').pred2dict(data_sample)
self.assertListAlmostEqual(res['bboxes'], [[0, 0, 1, 1]])
self.assertListAlmostEqual(res['labels'], [0])
self.assertListAlmostEqual(res['scores'], [0.9])
def assertListAlmostEqual(self, list1, list2, places=7):
for i in range(len(list1)):
if isinstance(list1[i], list):
self.assertListAlmostEqual(list1[i], list2[i], places=places)
else:
self.assertAlmostEqual(list1[i], list2[i], places=places)
| 6,856 | 40.307229 | 79 | py |
ERD | ERD-main/tests/test_utils/test_memory.py | import numpy as np
import pytest
import torch
from mmdet.utils import AvoidOOM
from mmdet.utils.memory import cast_tensor_type
def test_avoidoom():
tensor = torch.from_numpy(np.random.random((20, 20)))
if torch.cuda.is_available():
tensor = tensor.cuda()
# get default result
default_result = torch.mm(tensor, tensor.transpose(1, 0))
# when not occurred OOM error
AvoidCudaOOM = AvoidOOM()
result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor,
tensor.transpose(
1, 0))
assert default_result.device == result.device and \
default_result.dtype == result.dtype and \
torch.equal(default_result, result)
# calculate with fp16 and convert back to source type
AvoidCudaOOM = AvoidOOM(test=True)
result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor,
tensor.transpose(
1, 0))
assert default_result.device == result.device and \
default_result.dtype == result.dtype and \
torch.allclose(default_result, result, 1e-3)
# calculate on cpu and convert back to source device
AvoidCudaOOM = AvoidOOM(test=True)
result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor,
tensor.transpose(
1, 0))
assert result.dtype == default_result.dtype and \
result.device == default_result.device and \
torch.allclose(default_result, result)
# do not calculate on cpu and the outputs will be same as input
AvoidCudaOOM = AvoidOOM(test=True, to_cpu=False)
result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor,
tensor.transpose(
1, 0))
assert result.dtype == default_result.dtype and \
result.device == default_result.device
else:
default_result = torch.mm(tensor, tensor.transpose(1, 0))
AvoidCudaOOM = AvoidOOM()
result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor,
tensor.transpose(
1, 0))
assert default_result.device == result.device and \
default_result.dtype == result.dtype and \
torch.equal(default_result, result)
def test_cast_tensor_type():
inputs = torch.rand(10)
if torch.cuda.is_available():
inputs = inputs.cuda()
with pytest.raises(AssertionError):
cast_tensor_type(inputs, src_type=None, dst_type=None)
# input is a float
out = cast_tensor_type(10., dst_type=torch.half)
assert out == 10. and isinstance(out, float)
# convert Tensor to fp16 and re-convert to fp32
fp16_out = cast_tensor_type(inputs, dst_type=torch.half)
assert fp16_out.dtype == torch.half
fp32_out = cast_tensor_type(fp16_out, dst_type=torch.float32)
assert fp32_out.dtype == torch.float32
# input is a list
list_input = [inputs, inputs]
list_outs = cast_tensor_type(list_input, dst_type=torch.half)
assert len(list_outs) == len(list_input) and \
isinstance(list_outs, list)
for out in list_outs:
assert out.dtype == torch.half
# input is a dict
dict_input = {'test1': inputs, 'test2': inputs}
dict_outs = cast_tensor_type(dict_input, dst_type=torch.half)
assert len(dict_outs) == len(dict_input) and \
isinstance(dict_outs, dict)
# convert the input tensor to CPU and re-convert to GPU
if torch.cuda.is_available():
cpu_device = torch.empty(0).device
gpu_device = inputs.device
cpu_out = cast_tensor_type(inputs, dst_type=cpu_device)
assert cpu_out.device == cpu_device
gpu_out = cast_tensor_type(inputs, dst_type=gpu_device)
assert gpu_out.device == gpu_device
| 4,261 | 42.050505 | 75 | py |
ERD | ERD-main/tests/test_utils/test_benchmark.py | import copy
import os
import tempfile
import unittest
import torch
from mmengine import Config, MMLogger
from mmengine.dataset import Compose
from mmengine.model import BaseModel
from torch.utils.data import Dataset
from mmdet.registry import DATASETS, MODELS
from mmdet.utils import register_all_modules
from mmdet.utils.benchmark import (DataLoaderBenchmark, DatasetBenchmark,
InferenceBenchmark)
@MODELS.register_module()
class ToyDetector(BaseModel):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, *args, **kwargs):
pass
@DATASETS.register_module()
class ToyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
def __init__(self):
self.pipeline = Compose([lambda x: x])
def __len__(self):
return self.data.size(0)
def get_data_info(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
@DATASETS.register_module()
class ToyFullInitDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
def __init__(self):
self.pipeline = Compose([lambda x: x])
def __len__(self):
return self.data.size(0)
def get_data_info(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
def full_init(self):
pass
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
class TestInferenceBenchmark(unittest.TestCase):
def setUp(self) -> None:
register_all_modules()
self.cfg = Config(
dict(
model=dict(type='ToyDetector'),
test_dataloader=dict(
dataset=dict(type='ToyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=1),
env_cfg=dict(dist_cfg=dict(backend='nccl'))))
self.max_iter = 10
self.log_interval = 5
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_init_and_run(self):
checkpoint_path = os.path.join(tempfile.gettempdir(), 'checkpoint.pth')
torch.save(ToyDetector().state_dict(), checkpoint_path)
cfg = copy.deepcopy(self.cfg)
inference_benchmark = InferenceBenchmark(cfg, checkpoint_path, False,
False, self.max_iter,
self.log_interval)
results = inference_benchmark.run()
self.assertTrue(isinstance(results, dict))
self.assertTrue('avg_fps' in results)
self.assertTrue('fps_list' in results)
self.assertEqual(len(results['fps_list']), 1)
self.assertTrue(inference_benchmark.data_loader.num_workers == 0)
self.assertTrue(inference_benchmark.data_loader.batch_size == 1)
results = inference_benchmark.run(1)
self.assertTrue('avg_fps' in results)
self.assertTrue('fps_list' in results)
self.assertEqual(len(results['fps_list']), 1)
self.assertTrue(inference_benchmark.data_loader.num_workers == 0)
self.assertTrue(inference_benchmark.data_loader.batch_size == 1)
# test repeat
results = inference_benchmark.run(3)
self.assertTrue('avg_fps' in results)
self.assertTrue('fps_list' in results)
self.assertEqual(len(results['fps_list']), 3)
# test cudnn_benchmark
cfg = copy.deepcopy(self.cfg)
cfg.env_cfg.cudnn_benchmark = True
inference_benchmark = InferenceBenchmark(cfg, checkpoint_path, False,
False, self.max_iter,
self.log_interval)
inference_benchmark.run(1)
# test mp_cfg
cfg = copy.deepcopy(self.cfg)
cfg.env_cfg.cudnn_benchmark = True
cfg.env_cfg.mp_cfg = {
'mp_start_method': 'fork',
'opencv_num_threads': 1
}
inference_benchmark = InferenceBenchmark(cfg, checkpoint_path, False,
False, self.max_iter,
self.log_interval)
inference_benchmark.run(1)
# test fp16
cfg = copy.deepcopy(self.cfg)
cfg.fp16 = True
inference_benchmark = InferenceBenchmark(cfg, checkpoint_path, False,
False, self.max_iter,
self.log_interval)
inference_benchmark.run(1)
# test logger
logger = MMLogger.get_instance(
'mmdet', log_file='temp.log', log_level='INFO')
inference_benchmark = InferenceBenchmark(
cfg,
checkpoint_path,
False,
False,
self.max_iter,
self.log_interval,
logger=logger)
inference_benchmark.run(1)
self.assertTrue(os.path.exists('temp.log'))
os.remove(checkpoint_path)
os.remove('temp.log')
class TestDataLoaderBenchmark(unittest.TestCase):
def setUp(self) -> None:
register_all_modules()
self.cfg = Config(
dict(
model=dict(type='ToyDetector'),
train_dataloader=dict(
dataset=dict(type='ToyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=2,
num_workers=1),
val_dataloader=dict(
dataset=dict(type='ToyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=1,
num_workers=2),
test_dataloader=dict(
dataset=dict(type='ToyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=1),
env_cfg=dict(dist_cfg=dict(backend='nccl'))))
self.max_iter = 5
self.log_interval = 1
self.num_warmup = 1
def test_init_and_run(self):
cfg = copy.deepcopy(self.cfg)
dataloader_benchmark = DataLoaderBenchmark(cfg, False, 'train',
self.max_iter,
self.log_interval,
self.num_warmup)
results = dataloader_benchmark.run(1)
self.assertTrue('avg_fps' in results)
self.assertTrue('fps_list' in results)
self.assertEqual(len(results['fps_list']), 1)
self.assertTrue(dataloader_benchmark.data_loader.num_workers == 1)
self.assertTrue(dataloader_benchmark.data_loader.batch_size == 2)
# test repeat
results = dataloader_benchmark.run(3)
self.assertTrue('avg_fps' in results)
self.assertTrue('fps_list' in results)
self.assertEqual(len(results['fps_list']), 3)
# test dataset_type input parameters error
with self.assertRaises(AssertionError):
DataLoaderBenchmark(cfg, False, 'training', self.max_iter,
self.log_interval, self.num_warmup)
dataloader_benchmark = DataLoaderBenchmark(cfg, False, 'val',
self.max_iter,
self.log_interval,
self.num_warmup)
self.assertTrue(dataloader_benchmark.data_loader.num_workers == 2)
self.assertTrue(dataloader_benchmark.data_loader.batch_size == 1)
dataloader_benchmark = DataLoaderBenchmark(cfg, False, 'test',
self.max_iter,
self.log_interval,
self.num_warmup)
self.assertTrue(dataloader_benchmark.data_loader.num_workers == 1)
self.assertTrue(dataloader_benchmark.data_loader.batch_size == 3)
# test mp_cfg
cfg = copy.deepcopy(self.cfg)
cfg.env_cfg.mp_cfg = {
'mp_start_method': 'fork',
'opencv_num_threads': 1
}
dataloader_benchmark = DataLoaderBenchmark(cfg, False, 'train',
self.max_iter,
self.log_interval,
self.num_warmup)
dataloader_benchmark.run(1)
class TestDatasetBenchmark(unittest.TestCase):
def setUp(self) -> None:
register_all_modules()
self.cfg = Config(
dict(
model=dict(type='ToyDetector'),
train_dataloader=dict(
dataset=dict(type='ToyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=2,
num_workers=1),
val_dataloader=dict(
dataset=dict(type='ToyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=1,
num_workers=2),
test_dataloader=dict(
dataset=dict(type='ToyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=1)))
self.max_iter = 5
self.log_interval = 1
self.num_warmup = 1
def test_init_and_run(self):
cfg = copy.deepcopy(self.cfg)
dataset_benchmark = DatasetBenchmark(cfg, 'train', self.max_iter,
self.log_interval,
self.num_warmup)
results = dataset_benchmark.run(1)
self.assertTrue('avg_fps' in results)
self.assertTrue('fps_list' in results)
self.assertEqual(len(results['fps_list']), 1)
# test repeat
results = dataset_benchmark.run(3)
self.assertTrue('avg_fps' in results)
self.assertTrue('fps_list' in results)
self.assertEqual(len(results['fps_list']), 3)
# test test dataset
dataset_benchmark = DatasetBenchmark(cfg, 'test', self.max_iter,
self.log_interval,
self.num_warmup)
dataset_benchmark.run(1)
# test val dataset
dataset_benchmark = DatasetBenchmark(cfg, 'val', self.max_iter,
self.log_interval,
self.num_warmup)
dataset_benchmark.run(1)
# test dataset_type input parameters error
with self.assertRaises(AssertionError):
DatasetBenchmark(cfg, 'training', self.max_iter, self.log_interval,
self.num_warmup)
# test full_init
cfg = copy.deepcopy(self.cfg)
cfg.test_dataloader.dataset = dict(type='ToyFullInitDataset')
dataset_benchmark = DatasetBenchmark(cfg, 'train', self.max_iter,
self.log_interval,
self.num_warmup)
dataset_benchmark.run(1)
| 11,696 | 36.732258 | 79 | py |
ERD | ERD-main/tests/test_visualization/test_local_visualizer.py | import os
from unittest import TestCase
import cv2
import numpy as np
import torch
from mmengine.structures import InstanceData, PixelData
from mmdet.evaluation import INSTANCE_OFFSET
from mmdet.structures import DetDataSample
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, cy, bw, bh = torch.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clamp(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clamp(0, h)
br_x = ((cx * w) + (w * bw / 2)).clamp(0, w)
br_y = ((cy * h) + (h * bh / 2)).clamp(0, h)
bboxes = torch.stack([tl_x, tl_y, br_x, br_y], dim=0).T
return bboxes
def _create_panoptic_data(num_boxes, h, w):
sem_seg = np.zeros((h, w), dtype=np.int64) + 2
bboxes = _rand_bboxes(num_boxes, h, w).int()
labels = torch.randint(2, (num_boxes, ))
for i in range(num_boxes):
x, y, w, h = bboxes[i]
sem_seg[y:y + h, x:x + w] = (i + 1) * INSTANCE_OFFSET + labels[i]
return sem_seg[None]
class TestDetLocalVisualizer(TestCase):
def test_add_datasample(self):
h = 12
w = 10
num_class = 3
num_bboxes = 5
out_file = 'out_file.jpg'
image = np.random.randint(0, 256, size=(h, w, 3)).astype('uint8')
# test gt_instances
gt_instances = InstanceData()
gt_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
gt_instances.labels = torch.randint(0, num_class, (num_bboxes, ))
det_data_sample = DetDataSample()
det_data_sample.gt_instances = gt_instances
det_local_visualizer = DetLocalVisualizer()
det_local_visualizer.add_datasample(
'image', image, det_data_sample, draw_pred=False)
# test out_file
det_local_visualizer.add_datasample(
'image',
image,
det_data_sample,
draw_pred=False,
out_file=out_file)
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == (h, w, 3)
os.remove(out_file)
# test gt_instances and pred_instances
pred_instances = InstanceData()
pred_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
pred_instances.labels = torch.randint(0, num_class, (num_bboxes, ))
pred_instances.scores = torch.rand((num_bboxes, ))
det_data_sample.pred_instances = pred_instances
det_local_visualizer.add_datasample(
'image', image, det_data_sample, out_file=out_file)
self._assert_image_and_shape(out_file, (h, w * 2, 3))
det_local_visualizer.add_datasample(
'image', image, det_data_sample, draw_gt=False, out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
det_local_visualizer.add_datasample(
'image',
image,
det_data_sample,
draw_pred=False,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
# test gt_panoptic_seg and pred_panoptic_seg
det_local_visualizer.dataset_meta = dict(classes=('1', '2'))
gt_sem_seg = _create_panoptic_data(num_bboxes, h, w)
panoptic_seg = PixelData(sem_seg=gt_sem_seg)
det_data_sample = DetDataSample()
det_data_sample.gt_panoptic_seg = panoptic_seg
pred_sem_seg = _create_panoptic_data(num_bboxes, h, w)
panoptic_seg = PixelData(sem_seg=pred_sem_seg)
det_data_sample.pred_panoptic_seg = panoptic_seg
det_local_visualizer.add_datasample(
'image', image, det_data_sample, out_file=out_file)
self._assert_image_and_shape(out_file, (h, w * 2, 3))
# class information must be provided
det_local_visualizer.dataset_meta = {}
with self.assertRaises(AssertionError):
det_local_visualizer.add_datasample(
'image', image, det_data_sample, out_file=out_file)
def _assert_image_and_shape(self, out_file, out_shape):
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == out_shape
os.remove(out_file)
| 4,149 | 33.297521 | 78 | py |
ERD | ERD-main/demo/webcam_demo.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
import torch
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection webcam demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument(
'--camera-id', type=int, default=0, help='camera device id')
parser.add_argument(
'--score-thr', type=float, default=0.5, help='bbox score threshold')
args = parser.parse_args()
return args
def main():
args = parse_args()
# build the model from a config file and a checkpoint file
device = torch.device(args.device)
model = init_detector(args.config, args.checkpoint, device=device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
camera = cv2.VideoCapture(args.camera_id)
print('Press "Esc", "q" or "Q" to exit.')
while True:
ret_val, img = camera.read()
result = inference_detector(model, img)
img = mmcv.imconvert(img, 'bgr', 'rgb')
visualizer.add_datasample(
name='result',
image=img,
data_sample=result,
draw_gt=False,
pred_score_thr=args.score_thr,
show=False)
img = visualizer.get_image()
img = mmcv.imconvert(img, 'bgr', 'rgb')
cv2.imshow('result', img)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord('q') or ch == ord('Q'):
break
if __name__ == '__main__':
main()
| 1,930 | 28.257576 | 78 | py |
ERD | ERD-main/demo/video_gpuaccel_demo.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from typing import Tuple
import cv2
import mmcv
import numpy as np
import torch
import torch.nn as nn
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import init_detector
from mmdet.registry import VISUALIZERS
from mmdet.structures import DetDataSample
try:
import ffmpegcv
except ImportError:
raise ImportError(
'Please install ffmpegcv with:\n\n pip install ffmpegcv')
def parse_args():
parser = argparse.ArgumentParser(
description='MMDetection video demo with GPU acceleration')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--nvdecode', action='store_true', help='Use NVIDIA decoder')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def prefetch_batch_input_shape(model: nn.Module, ori_wh: Tuple[int,
int]) -> dict:
cfg = model.cfg
w, h = ori_wh
cfg.test_dataloader.dataset.pipeline[0].type = 'LoadImageFromNDArray'
test_pipeline = Compose(cfg.test_dataloader.dataset.pipeline)
data = {'img': np.zeros((h, w, 3), dtype=np.uint8), 'img_id': 0}
data = test_pipeline(data)
_, data_sample = model.data_preprocessor([data], False)
batch_input_shape = data_sample[0].batch_input_shape
return batch_input_shape
def pack_data(frame_resize: np.ndarray, batch_input_shape: Tuple[int, int],
ori_shape: Tuple[int, int]) -> dict:
assert frame_resize.shape[:2] == batch_input_shape
data_sample = DetDataSample()
data_sample.set_metainfo({
'img_shape':
batch_input_shape,
'ori_shape':
ori_shape,
'scale_factor': (batch_input_shape[0] / ori_shape[0],
batch_input_shape[1] / ori_shape[1])
})
frame_resize = torch.from_numpy(frame_resize).permute((2, 0, 1))
data = {'inputs': frame_resize, 'data_sample': data_sample}
return data
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
model = init_detector(args.config, args.checkpoint, device=args.device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
if args.nvdecode:
VideoCapture = ffmpegcv.VideoCaptureNV
else:
VideoCapture = ffmpegcv.VideoCapture
video_origin = VideoCapture(args.video)
batch_input_shape = prefetch_batch_input_shape(
model, (video_origin.width, video_origin.height))
ori_shape = (video_origin.height, video_origin.width)
resize_wh = batch_input_shape[::-1]
video_resize = VideoCapture(
args.video,
resize=resize_wh,
resize_keepratio=True,
resize_keepratioalign='topleft')
video_writer = None
if args.out:
video_writer = ffmpegcv.VideoWriter(args.out, fps=video_origin.fps)
with torch.no_grad():
for i, (frame_resize, frame_origin) in enumerate(
zip(track_iter_progress(video_resize), video_origin)):
data = pack_data(frame_resize, batch_input_shape, ori_shape)
result = model.test_step([data])[0]
visualizer.add_datasample(
name='video',
image=frame_origin,
data_sample=result,
draw_gt=False,
show=False,
pred_score_thr=args.score_thr)
frame_mask = visualizer.get_image()
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame_mask, 'video', args.wait_time)
if args.out:
video_writer.write(frame_mask)
if video_writer:
video_writer.release()
video_origin.release()
video_resize.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| 4,725 | 32.048951 | 77 | py |
ERD | ERD-main/configs/ghm/retinanet_x101-32x4d_fpn_ghm-1x_coco.py | _base_ = './retinanet_r50_fpn_ghm-1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 423 | 27.266667 | 76 | py |
ERD | ERD-main/configs/ghm/retinanet_x101-64x4d_fpn_ghm-1x_coco.py | _base_ = './retinanet_r50_fpn_ghm-1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
| 423 | 27.266667 | 76 | py |
ERD | ERD-main/configs/ghm/retinanet_r101_fpn_ghm-1x_coco.py | _base_ = './retinanet_r50_fpn_ghm-1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 201 | 27.857143 | 61 | py |
ERD | ERD-main/configs/dcn/faster-rcnn_x101-32x4d-dconv-c3-c5_fpn_1x_coco.py | _base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 557 | 31.823529 | 76 | py |
ERD | ERD-main/configs/htc/htc-without-semantic_r50_fpn_1x_coco.py | _base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='HybridTaskCascade',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='HybridTaskCascadeRoIHead',
interleaved=True,
mask_info_flow=True,
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=[
dict(
type='HTCMaskHead',
with_conv_res=False,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))
]),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.001,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
| 7,857 | 34.080357 | 79 | py |
ERD | ERD-main/configs/htc/htc_x101-32x4d_fpn_16xb1-20e_coco.py | _base_ = './htc_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
train_dataloader = dict(batch_size=1, num_workers=1)
# learning policy
max_epochs = 20
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 19],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
| 828 | 24.121212 | 79 | py |
ERD | ERD-main/configs/htc/htc_r101_fpn_20e_coco.py | _base_ = './htc_r50_fpn_20e_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 192 | 26.571429 | 61 | py |
ERD | ERD-main/configs/dino/dino-4scale_r50_8xb2-12e_coco.py | _base_ = [
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
model = dict(
type='DINO',
num_queries=900, # num_matching_queries
with_box_refine=True,
as_two_stage=True,
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=1),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='ChannelMapper',
in_channels=[512, 1024, 2048],
kernel_size=1,
out_channels=256,
act_cfg=None,
norm_cfg=dict(type='GN', num_groups=32),
num_outs=4),
encoder=dict(
num_layers=6,
layer_cfg=dict(
self_attn_cfg=dict(embed_dims=256, num_levels=4,
dropout=0.0), # 0.1 for DeformDETR
ffn_cfg=dict(
embed_dims=256,
feedforward_channels=2048, # 1024 for DeformDETR
ffn_drop=0.0))), # 0.1 for DeformDETR
decoder=dict(
num_layers=6,
return_intermediate=True,
layer_cfg=dict(
self_attn_cfg=dict(embed_dims=256, num_heads=8,
dropout=0.0), # 0.1 for DeformDETR
cross_attn_cfg=dict(embed_dims=256, num_levels=4,
dropout=0.0), # 0.1 for DeformDETR
ffn_cfg=dict(
embed_dims=256,
feedforward_channels=2048, # 1024 for DeformDETR
ffn_drop=0.0)), # 0.1 for DeformDETR
post_norm_cfg=None),
positional_encoding=dict(
num_feats=128,
normalize=True,
offset=0.0, # -0.5 for DeformDETR
temperature=20), # 10000 for DeformDETR
bbox_head=dict(
type='DINOHead',
num_classes=80,
sync_cls_avg_factor=True,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0), # 2.0 in DeformDETR
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
loss_iou=dict(type='GIoULoss', loss_weight=2.0)),
dn_cfg=dict( # TODO: Move to model.train_cfg ?
label_noise_scale=0.5,
box_noise_scale=1.0, # 0.4 for DN-DETR
group_cfg=dict(dynamic=True, num_groups=None,
num_dn_queries=100)), # TODO: half num_dn_queries
# training and testing settings
train_cfg=dict(
assigner=dict(
type='HungarianAssigner',
match_costs=[
dict(type='FocalLossCost', weight=2.0),
dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'),
dict(type='IoUCost', iou_mode='giou', weight=2.0)
])),
test_cfg=dict(max_per_img=300)) # 100 for DeformDETR
# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different
# from the default setting in mmdet.
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[
[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
# The radio of all image in train dataset < 7
# follow the original implement
scales=[(400, 4200), (500, 4200), (600, 4200)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
]
]),
dict(type='PackDetInputs')
]
train_dataloader = dict(
dataset=dict(
filter_cfg=dict(filter_empty_gt=False), pipeline=train_pipeline))
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(
type='AdamW',
lr=0.0001, # 0.0002 for DeformDETR
weight_decay=0.0001),
clip_grad=dict(max_norm=0.1, norm_type=2),
paramwise_cfg=dict(custom_keys={'backbone': dict(lr_mult=0.1)})
) # custom_keys contains sampling_offsets and reference_points in DeformDETR # noqa
# learning policy
max_epochs = 12
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[11],
gamma=0.1)
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=16)
| 5,783 | 34.268293 | 85 | py |
ERD | ERD-main/configs/strong_baselines/mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_amp-lsj-100e_coco.py | _base_ = 'mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py' # noqa
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(type='AmpOptimWrapper')
| 202 | 39.6 | 89 | py |
ERD | ERD-main/configs/strong_baselines/mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py | _base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../common/lsj-100e_coco-instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# the model is trained from scratch, so init_cfg is None
data_preprocessor=dict(
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2), # leads to 0.1+ mAP
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
| 1,123 | 35.258065 | 77 | py |
ERD | ERD-main/configs/strong_baselines/mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py | _base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../common/lsj-100e_coco-instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# use caffe norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1,
norm_eval=False,
norm_cfg=norm_cfg,
init_cfg=None,
style='caffe'),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
| 2,276 | 32 | 76 | py |
ERD | ERD-main/configs/strong_baselines/mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-400e_coco.py | _base_ = './mask-rcnn_r50-caffe_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py' # noqa
# Use RepeatDataset to speed up training
# change repeat time from 4 (for 100 epochs) to 16 (for 400 epochs)
train_dataloader = dict(dataset=dict(times=4 * 4))
param_scheduler = [
dict(
type='LinearLR',
start_factor=0.067,
by_epoch=False,
begin=0,
end=500 * 4),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
| 543 | 24.904762 | 91 | py |
ERD | ERD-main/configs/reppoints/reppoints-moment_x101-dconv-c3-c5_fpn-gn_head-gn_2x_coco.py | _base_ = './reppoints-moment_r50_fpn-gn_head-gn_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 560 | 32 | 76 | py |
ERD | ERD-main/configs/reppoints/reppoints-moment_r101_fpn-gn_head-gn_2x_coco.py | _base_ = './reppoints-moment_r50_fpn-gn_head-gn_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 215 | 29.857143 | 61 | py |
ERD | ERD-main/configs/reppoints/reppoints-moment_r50_fpn_1x_coco.py | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='RepPointsDetector',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_input',
num_outs=5),
bbox_head=dict(
type='RepPointsHead',
num_classes=80,
in_channels=256,
feat_channels=256,
point_feat_channels=256,
stacked_convs=3,
num_points=9,
gradient_mul=0.1,
point_strides=[8, 16, 32, 64, 128],
point_base_scale=4,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_init=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.5),
loss_bbox_refine=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0),
transform_method='moment'),
# training and testing settings
train_cfg=dict(
init=dict(
assigner=dict(type='PointAssigner', scale=4, pos_num=1),
allowed_border=-1,
pos_weight=-1,
debug=False),
refine=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
optim_wrapper = dict(optimizer=dict(lr=0.01))
| 2,282 | 29.44 | 79 | py |
ERD | ERD-main/configs/reppoints/reppoints-moment_r101-dconv-c3-c5_fpn-gn_head-gn_2x_coco.py | _base_ = './reppoints-moment_r50_fpn-gn_head-gn_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 338 | 36.666667 | 72 | py |
ERD | ERD-main/configs/gfl/gfl_x101-32x4d-dconv-c4-c5_fpn_ms-2x_coco.py | _base_ = './gfl_r50_fpn_ms-2x_coco.py'
model = dict(
type='GFL',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, False, True, True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 580 | 29.578947 | 76 | py |
ERD | ERD-main/configs/gfl/gfl_r101_fpn_ms-2x_coco.py | _base_ = './gfl_r50_fpn_ms-2x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 401 | 27.714286 | 61 | py |
ERD | ERD-main/configs/gfl/gfl_x101-32x4d_fpn_ms-2x_coco.py | _base_ = './gfl_r50_fpn_ms-2x_coco.py'
model = dict(
type='GFL',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 456 | 25.882353 | 76 | py |
ERD | ERD-main/configs/gfl/gfl_r50_fpn_1x_coco.py | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='GFL',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='GFLHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),
reg_max=16,
loss_bbox=dict(type='GIoULoss', loss_weight=2.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
| 1,986 | 28.656716 | 79 | py |
ERD | ERD-main/configs/gfl/gfl_r101-dconv-c3-c5_fpn_ms-2x_coco.py | _base_ = './gfl_r50_fpn_ms-2x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 524 | 31.8125 | 72 | py |
ERD | ERD-main/configs/tridentnet/tridentnet_r50-caffe_1x_coco.py | _base_ = [
'../_base_/models/faster-rcnn_r50-caffe-c4.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='TridentFasterRCNN',
backbone=dict(
type='TridentResNet',
trident_dilations=(1, 2, 3),
num_branch=3,
test_branch_idx=1,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
roi_head=dict(type='TridentRoIHead', num_branch=3, test_branch_idx=1),
train_cfg=dict(
rpn_proposal=dict(max_per_img=500),
rcnn=dict(
sampler=dict(num=128, pos_fraction=0.5,
add_gt_as_proposals=False))))
| 748 | 31.565217 | 74 | py |
ERD | ERD-main/configs/tridentnet/tridentnet_r50-caffe_ms-1x_coco.py | _base_ = 'tridentnet_r50-caffe_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
| 505 | 30.625 | 73 | py |
ERD | ERD-main/configs/tridentnet/tridentnet_r50-caffe_ms-3x_coco.py | _base_ = 'tridentnet_r50-caffe_ms-1x_coco.py'
# learning rate
max_epochs = 36
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
| 431 | 21.736842 | 79 | py |
ERD | ERD-main/configs/paa/paa_r50_fpn_1x_coco.py | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='PAA',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='PAAHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
| 2,384 | 28.444444 | 79 | py |
ERD | ERD-main/configs/paa/paa_r101_fpn_ms-3x_coco.py | _base_ = './paa_r50_fpn_ms-3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 194 | 26.857143 | 61 | py |
ERD | ERD-main/configs/paa/paa_r101_fpn_1x_coco.py | _base_ = './paa_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 191 | 26.428571 | 61 | py |
ERD | ERD-main/configs/yolact/yolact_r50_1xb8-55e_coco.py | _base_ = [
'../_base_/datasets/coco_instance.py', '../_base_/default_runtime.py'
]
img_norm_cfg = dict(
mean=[123.68, 116.78, 103.94], std=[58.40, 57.12, 57.38], to_rgb=True)
# model settings
input_size = 550
model = dict(
type='YOLACT',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=img_norm_cfg['mean'],
std=img_norm_cfg['std'],
bgr_to_rgb=img_norm_cfg['to_rgb'],
pad_mask=True),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=-1, # do not freeze stem
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=False, # update the statistics of bn
zero_init_residual=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_input',
num_outs=5,
upsample_cfg=dict(mode='bilinear')),
bbox_head=dict(
type='YOLACTHead',
num_classes=80,
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=3,
scales_per_octave=1,
base_sizes=[8, 16, 32, 64, 128],
ratios=[0.5, 1.0, 2.0],
strides=[550.0 / x for x in [69, 35, 18, 9, 5]],
centers=[(550 * 0.5 / x, 550 * 0.5 / x)
for x in [69, 35, 18, 9, 5]]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
reduction='none',
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.5),
num_head_convs=1,
num_protos=32,
use_ohem=True),
mask_head=dict(
type='YOLACTProtonet',
in_channels=256,
num_protos=32,
num_classes=80,
max_masks_to_train=100,
loss_mask_weight=6.125,
with_seg_branch=True,
loss_segm=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
sampler=dict(type='PseudoSampler'), # YOLACT should use PseudoSampler
# smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
mask_thr=0.5,
iou_thr=0.5,
top_k=200,
max_per_img=100,
mask_thr_binary=0.5))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(4.0, 4.0)),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8,
num_workers=4,
batch_sampler=None,
dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
max_epochs = 55
# training schedule for 55e
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[20, 42, 49, 52],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4))
custom_hooks = [
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
env_cfg = dict(cudnn_benchmark=True)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (1 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=8)
| 5,373 | 30.426901 | 79 | py |
ERD | ERD-main/configs/yolact/yolact_r101_1xb8-55e_coco.py | _base_ = './yolact_r50_1xb8-55e_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 197 | 23.75 | 61 | py |
ERD | ERD-main/configs/crowddet/crowddet-rcnn_r50_fpn_8xb2-30e_crowdhuman.py | _base_ = ['../_base_/default_runtime.py']
model = dict(
type='CrowdDet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False,
pad_size_divisor=64,
# This option is set according to https://github.com/Purkialo/CrowdDet/
# blob/master/lib/data/CrowdHuman.py The images in the entire batch are
# resize together.
batch_augments=[
dict(type='BatchResize', scale=(1400, 800), pad_size_divisor=64)
]),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
upsample_cfg=dict(mode='bilinear', align_corners=False)),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[1.0, 2.0, 3.0],
strides=[4, 8, 16, 32, 64],
centers=[(8, 8), (8, 8), (8, 8), (8, 8), (8, 8)]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[1.0, 1.0, 1.0, 1.0],
clip_border=False),
loss_cls=dict(type='CrossEntropyLoss', loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='MultiInstanceRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign',
output_size=7,
sampling_ratio=-1,
aligned=True,
use_torchvision=True),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='MultiInstanceBBoxHead',
with_refine=False,
num_shared_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss',
loss_weight=1.0,
use_sigmoid=False,
reduction='none'),
loss_bbox=dict(
type='SmoothL1Loss', loss_weight=1.0, reduction='none'))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=(0.3, 0.7),
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2400,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=2),
rcnn=dict(
assigner=dict(
type='MultiInstanceAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.3,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='MultiInsRandomSampler',
num=512,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=1200,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=2),
rcnn=dict(
nms=dict(type='nms', iou_threshold=0.5),
score_thr=0.01,
max_per_img=500)))
dataset_type = 'CrowdHumanDataset'
data_root = 'data/CrowdHuman/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/tracking/CrowdHuman/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/tracking/',
# 'data/': 's3://openmmlab/datasets/tracking/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip',
'flip_direction'))
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1400, 800), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=None, # The 'batch_sampler' may decrease the precision
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotation_train.odgt',
data_prefix=dict(img='Images/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotation_val.odgt',
data_prefix=dict(img='Images/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CrowdHumanMetric',
ann_file=data_root + 'annotation_val.odgt',
metric=['AP', 'MR', 'JI'],
backend_args=backend_args)
test_evaluator = val_evaluator
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=30, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=800),
dict(
type='MultiStepLR',
begin=0,
end=30,
by_epoch=True,
milestones=[24, 27],
gamma=0.1)
]
# optimizer
auto_scale_lr = dict(base_batch_size=16)
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0001))
| 7,480 | 31.811404 | 79 | py |
ERD | ERD-main/configs/mask2former/mask2former_r101_8xb2-lsj-50e_coco.py | _base_ = ['./mask2former_r50_8xb2-lsj-50e_coco.py']
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 208 | 25.125 | 61 | py |
ERD | ERD-main/configs/mask2former/mask2former_r101_8xb2-lsj-50e_coco-panoptic.py | _base_ = './mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 215 | 26 | 61 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.