repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
ERD | ERD-main/configs/solov2/solov2_r101-dcn_fpn_ms-3x_coco.py | _base_ = './solov2_r50_fpn_ms-3x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(checkpoint='torchvision://resnet101'),
dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
mask_head=dict(
mask_feature_head=dict(conv_cfg=dict(type='DCNv2')),
dcn_cfg=dict(type='DCNv2'),
dcn_apply_to_all_conv=True))
| 457 | 31.714286 | 78 | py |
ERD | ERD-main/configs/solov2/solov2_r50_fpn_1x_coco.py | _base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='SOLOv2',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_mask=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=0,
num_outs=5),
mask_head=dict(
type='SOLOV2Head',
num_classes=80,
in_channels=256,
feat_channels=512,
stacked_convs=4,
strides=[8, 8, 16, 32, 32],
scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)),
pos_scale=0.2,
num_grids=[40, 36, 24, 16, 12],
cls_down_index=0,
mask_feature_head=dict(
feat_channels=128,
start_level=0,
end_level=3,
out_channels=256,
mask_stride=4,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)),
loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0)),
# model training and testing settings
test_cfg=dict(
nms_pre=500,
score_thr=0.1,
mask_thr=0.5,
filter_thr=0.05,
kernel='gaussian', # gaussian/linear
sigma=2.0,
max_per_img=100))
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.01), clip_grad=dict(max_norm=35, norm_type=2))
val_evaluator = dict(metric='segm')
test_evaluator = val_evaluator
| 2,046 | 27.830986 | 78 | py |
ERD | ERD-main/configs/solov2/solov2-light_r34_fpn_ms-3x_coco.py | _base_ = './solov2-light_r50_fpn_ms-3x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=34, init_cfg=dict(checkpoint='torchvision://resnet34')),
neck=dict(in_channels=[64, 128, 256, 512]))
| 218 | 26.375 | 70 | py |
ERD | ERD-main/configs/dab_detr/dab-detr_r50_8xb2-50e_coco.py | _base_ = [
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
model = dict(
type='DABDETR',
num_queries=300,
with_random_refpoints=False,
num_patterns=0,
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=1),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='ChannelMapper',
in_channels=[2048],
kernel_size=1,
out_channels=256,
act_cfg=None,
norm_cfg=None,
num_outs=1),
encoder=dict(
num_layers=6,
layer_cfg=dict(
self_attn_cfg=dict(
embed_dims=256, num_heads=8, dropout=0., batch_first=True),
ffn_cfg=dict(
embed_dims=256,
feedforward_channels=2048,
num_fcs=2,
ffn_drop=0.,
act_cfg=dict(type='PReLU')))),
decoder=dict(
num_layers=6,
query_dim=4,
query_scale_type='cond_elewise',
with_modulated_hw_attn=True,
layer_cfg=dict(
self_attn_cfg=dict(
embed_dims=256,
num_heads=8,
attn_drop=0.,
proj_drop=0.,
cross_attn=False),
cross_attn_cfg=dict(
embed_dims=256,
num_heads=8,
attn_drop=0.,
proj_drop=0.,
cross_attn=True),
ffn_cfg=dict(
embed_dims=256,
feedforward_channels=2048,
num_fcs=2,
ffn_drop=0.,
act_cfg=dict(type='PReLU'))),
return_intermediate=True),
positional_encoding=dict(num_feats=128, temperature=20, normalize=True),
bbox_head=dict(
type='DABDETRHead',
num_classes=80,
embed_dims=256,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
loss_iou=dict(type='GIoULoss', loss_weight=2.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='HungarianAssigner',
match_costs=[
dict(type='FocalLossCost', weight=2., eps=1e-8),
dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'),
dict(type='IoUCost', iou_mode='giou', weight=2.0)
])),
test_cfg=dict(max_per_img=300))
# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different
# from the default setting in mmdet.
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0001),
clip_grad=dict(max_norm=0.1, norm_type=2),
paramwise_cfg=dict(
custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)}))
# learning policy
max_epochs = 50
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[40],
gamma=0.1)
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=16, enable=False)
| 5,406 | 32.79375 | 79 | py |
ERD | ERD-main/configs/lad/lad_r101-paa-r50_fpn_2xb8_coco_1x.py | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa
model = dict(
type='LAD',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
# student
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
optim_wrapper = dict(type='AmpOptimWrapper', optimizer=dict(lr=0.01))
| 3,956 | 29.914063 | 138 | py |
ERD | ERD-main/configs/lad/lad_r50-paa-r101_fpn_2xb8_coco_1x.py | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa
model = dict(
type='LAD',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
# student
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
optim_wrapper = dict(type='AmpOptimWrapper', optimizer=dict(lr=0.01))
| 3,934 | 29.984252 | 139 | py |
ERD | ERD-main/configs/sparse_rcnn/sparse-rcnn_r101_fpn_ms-480-800-3x_coco.py | _base_ = './sparse-rcnn_r50_fpn_ms-480-800-3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 211 | 25.5 | 61 | py |
ERD | ERD-main/configs/sparse_rcnn/sparse-rcnn_r101_fpn_300-proposals_crop-ms-480-800-3x_coco.py | _base_ = './sparse-rcnn_r50_fpn_300-proposals_crop-ms-480-800-3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 230 | 27.875 | 73 | py |
ERD | ERD-main/configs/sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
num_stages = 6
num_proposals = 100
model = dict(
type='SparseRCNN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=0,
add_extra_convs='on_input',
num_outs=4),
rpn_head=dict(
type='EmbeddingRPNHead',
num_proposals=num_proposals,
proposal_feature_channel=256),
roi_head=dict(
type='SparseRoIHead',
num_stages=num_stages,
stage_loss_weights=[1] * num_stages,
proposal_feature_channel=256,
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='DIIHead',
num_classes=80,
num_ffn_fcs=2,
num_heads=8,
num_cls_fcs=1,
num_reg_fcs=3,
feedforward_channels=2048,
in_channels=256,
dropout=0.0,
ffn_act_cfg=dict(type='ReLU', inplace=True),
dynamic_conv_cfg=dict(
type='DynamicConv',
in_channels=256,
feat_channels=64,
out_channels=256,
input_feat_shape=7,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN')),
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
loss_iou=dict(type='GIoULoss', loss_weight=2.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=2.0),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
clip_border=False,
target_means=[0., 0., 0., 0.],
target_stds=[0.5, 0.5, 1., 1.])) for _ in range(num_stages)
]),
# training and testing settings
train_cfg=dict(
rpn=None,
rcnn=[
dict(
assigner=dict(
type='HungarianAssigner',
match_costs=[
dict(type='FocalLossCost', weight=2.0),
dict(type='BBoxL1Cost', weight=5.0, box_format='xyxy'),
dict(type='IoUCost', iou_mode='giou', weight=2.0)
]),
sampler=dict(type='PseudoSampler'),
pos_weight=1) for _ in range(num_stages)
]),
test_cfg=dict(rpn=None, rcnn=dict(max_per_img=num_proposals)))
# optimizer
optim_wrapper = dict(
optimizer=dict(
_delete_=True, type='AdamW', lr=0.000025, weight_decay=0.0001),
clip_grad=dict(max_norm=1, norm_type=2))
| 3,572 | 34.029412 | 79 | py |
ERD | ERD-main/docs/en/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/main/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'MMDetection'
copyright = '2018-2021, OpenMMLab'
author = 'MMDetection Authors'
version_file = '../../mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'myst_parser',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
myst_enable_extensions = ['colon_fence']
myst_heading_anchors = 3
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The main toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdetection'
},
],
# Specify the language of shared menu
'menu_lang':
'en'
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
| 3,435 | 28.367521 | 79 | py |
ERD | ERD-main/docs/zh_cn/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../../'))
# -- Project information -----------------------------------------------------
project = 'MMDetection'
copyright = '2018-2021, OpenMMLab'
author = 'MMDetection Authors'
version_file = '../../mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'myst_parser',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
myst_enable_extensions = ['colon_fence']
myst_heading_anchors = 3
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The main toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdetection'
},
],
# Specify the language of shared menu
'menu_lang':
'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
language = 'zh_CN'
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
| 3,459 | 28.07563 | 79 | py |
ERD | ERD-main/mmdet/apis/inference.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import warnings
from pathlib import Path
from typing import Optional, Sequence, Union
import numpy as np
import torch
import torch.nn as nn
from mmcv.ops import RoIPool
from mmcv.transforms import Compose
from mmengine.config import Config
from mmengine.model.utils import revert_sync_batchnorm
from mmengine.registry import init_default_scope
from mmengine.runner import load_checkpoint
from mmdet.registry import DATASETS
from ..evaluation import get_classes
from ..registry import MODELS
from ..structures import DetDataSample, SampleList
from ..utils import get_test_pipeline_cfg
def init_detector(
config: Union[str, Path, Config],
checkpoint: Optional[str] = None,
palette: str = 'none',
device: str = 'cuda:0',
cfg_options: Optional[dict] = None,
) -> nn.Module:
"""Initialize a detector from config file.
Args:
config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path,
:obj:`Path`, or the config object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
palette (str): Color palette used for visualization. If palette
is stored in checkpoint, use checkpoint's palette first, otherwise
use externally passed palette. Currently, supports 'coco', 'voc',
'citys' and 'random'. Defaults to none.
device (str): The device where the anchors will be put on.
Defaults to cuda:0.
cfg_options (dict, optional): Options to override some settings in
the used config.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, (str, Path)):
config = Config.fromfile(config)
elif not isinstance(config, Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
if cfg_options is not None:
config.merge_from_dict(cfg_options)
elif 'init_cfg' in config.model.backbone:
config.model.backbone.init_cfg = None
init_default_scope(config.get('default_scope', 'mmdet'))
model = MODELS.build(config.model)
model = revert_sync_batchnorm(model)
if checkpoint is None:
warnings.simplefilter('once')
warnings.warn('checkpoint is None, use COCO classes by default.')
model.dataset_meta = {'classes': get_classes('coco')}
else:
checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
# Weights converted from elsewhere may not have meta fields.
checkpoint_meta = checkpoint.get('meta', {})
# save the dataset_meta in the model for convenience
if 'dataset_meta' in checkpoint_meta:
# mmdet 3.x, all keys should be lowercase
model.dataset_meta = {
k.lower(): v
for k, v in checkpoint_meta['dataset_meta'].items()
}
elif 'CLASSES' in checkpoint_meta:
# < mmdet 3.x
classes = checkpoint_meta['CLASSES']
model.dataset_meta = {'classes': classes}
else:
warnings.simplefilter('once')
warnings.warn(
'dataset_meta or class names are not saved in the '
'checkpoint\'s meta data, use COCO classes by default.')
model.dataset_meta = {'classes': get_classes('coco')}
# Priority: args.palette -> config -> checkpoint
if palette != 'none':
model.dataset_meta['palette'] = palette
else:
test_dataset_cfg = copy.deepcopy(config.test_dataloader.dataset)
# lazy init. We only need the metainfo.
test_dataset_cfg['lazy_init'] = True
metainfo = DATASETS.build(test_dataset_cfg).metainfo
cfg_palette = metainfo.get('palette', None)
if cfg_palette is not None:
model.dataset_meta['palette'] = cfg_palette
else:
if 'palette' not in model.dataset_meta:
warnings.warn(
'palette does not exist, random is used by default. '
'You can also set the palette to customize.')
model.dataset_meta['palette'] = 'random'
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
ImagesType = Union[str, np.ndarray, Sequence[str], Sequence[np.ndarray]]
def inference_detector(
model: nn.Module,
imgs: ImagesType,
test_pipeline: Optional[Compose] = None
) -> Union[DetDataSample, SampleList]:
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str, ndarray, Sequence[str/ndarray]):
Either image files or loaded images.
test_pipeline (:obj:`Compose`): Test pipeline.
Returns:
:obj:`DetDataSample` or list[:obj:`DetDataSample`]:
If imgs is a list or tuple, the same length list type results
will be returned, otherwise return the detection results directly.
"""
if isinstance(imgs, (list, tuple)):
is_batch = True
else:
imgs = [imgs]
is_batch = False
cfg = model.cfg
if test_pipeline is None:
cfg = cfg.copy()
test_pipeline = get_test_pipeline_cfg(cfg)
if isinstance(imgs[0], np.ndarray):
# Calling this method across libraries will result
# in module unregistered error if not prefixed with mmdet.
test_pipeline[0].type = 'mmdet.LoadImageFromNDArray'
test_pipeline = Compose(test_pipeline)
if model.data_preprocessor.device.type == 'cpu':
for m in model.modules():
assert not isinstance(
m, RoIPool
), 'CPU inference with RoIPool is not supported currently.'
result_list = []
for img in imgs:
# prepare data
if isinstance(img, np.ndarray):
# TODO: remove img_id.
data_ = dict(img=img, img_id=0)
else:
# TODO: remove img_id.
data_ = dict(img_path=img, img_id=0)
# build the data pipeline
data_ = test_pipeline(data_)
data_['inputs'] = [data_['inputs']]
data_['data_samples'] = [data_['data_samples']]
# forward the model
with torch.no_grad():
results = model.test_step(data_)[0]
result_list.append(results)
if not is_batch:
return result_list[0]
else:
return result_list
# TODO: Awaiting refactoring
async def async_inference_detector(model, imgs):
"""Async inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
img (str | ndarray): Either image files or loaded images.
Returns:
Awaitable detection results.
"""
if not isinstance(imgs, (list, tuple)):
imgs = [imgs]
cfg = model.cfg
if isinstance(imgs[0], np.ndarray):
cfg = cfg.copy()
# set loading pipeline type
cfg.data.test.pipeline[0].type = 'LoadImageFromNDArray'
# cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
test_pipeline = Compose(cfg.data.test.pipeline)
datas = []
for img in imgs:
# prepare data
if isinstance(img, np.ndarray):
# directly add img
data = dict(img=img)
else:
# add information into dict
data = dict(img_info=dict(filename=img), img_prefix=None)
# build the data pipeline
data = test_pipeline(data)
datas.append(data)
for m in model.modules():
assert not isinstance(
m,
RoIPool), 'CPU inference with RoIPool is not supported currently.'
# We don't restore `torch.is_grad_enabled()` value during concurrent
# inference since execution can overlap
torch.set_grad_enabled(False)
results = await model.aforward_test(data, rescale=True)
return results
| 8,007 | 33.222222 | 79 | py |
ERD | ERD-main/mmdet/apis/det_inferencer.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import warnings
from typing import Dict, Iterable, List, Optional, Sequence, Union
import mmcv
import mmengine
import numpy as np
import torch.nn as nn
from mmengine.dataset import Compose
from mmengine.fileio import (get_file_backend, isdir, join_path,
list_dir_or_file)
from mmengine.infer.infer import BaseInferencer, ModelType
from mmengine.model.utils import revert_sync_batchnorm
from mmengine.registry import init_default_scope
from mmengine.runner.checkpoint import _load_checkpoint_to_model
from mmengine.visualization import Visualizer
from rich.progress import track
from mmdet.evaluation import INSTANCE_OFFSET
from mmdet.registry import DATASETS
from mmdet.structures import DetDataSample
from mmdet.structures.mask import encode_mask_results, mask2bbox
from mmdet.utils import ConfigType
from ..evaluation import get_classes
try:
from panopticapi.evaluation import VOID
from panopticapi.utils import id2rgb
except ImportError:
id2rgb = None
VOID = None
InputType = Union[str, np.ndarray]
InputsType = Union[InputType, Sequence[InputType]]
PredType = List[DetDataSample]
ImgType = Union[np.ndarray, Sequence[np.ndarray]]
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif',
'.tiff', '.webp')
class DetInferencer(BaseInferencer):
"""Object Detection Inferencer.
Args:
model (str, optional): Path to the config file or the model name
defined in metafile. For example, it could be
"rtmdet-s" or 'rtmdet_s_8xb32-300e_coco' or
"configs/rtmdet/rtmdet_s_8xb32-300e_coco.py".
If model is not specified, user must provide the
`weights` saved by MMEngine which contains the config string.
Defaults to None.
weights (str, optional): Path to the checkpoint. If it is not specified
and model is a model name of metafile, the weights will be loaded
from metafile. Defaults to None.
device (str, optional): Device to run inference. If None, the available
device will be automatically used. Defaults to None.
scope (str, optional): The scope of the model. Defaults to mmdet.
palette (str): Color palette used for visualization. The order of
priority is palette -> config -> checkpoint. Defaults to 'none'.
"""
preprocess_kwargs: set = set()
forward_kwargs: set = set()
visualize_kwargs: set = {
'return_vis',
'show',
'wait_time',
'draw_pred',
'pred_score_thr',
'img_out_dir',
'no_save_vis',
}
postprocess_kwargs: set = {
'print_result',
'pred_out_dir',
'return_datasample',
'no_save_pred',
}
def __init__(self,
model: Optional[Union[ModelType, str]] = None,
weights: Optional[str] = None,
device: Optional[str] = None,
scope: Optional[str] = 'mmdet',
palette: str = 'none') -> None:
# A global counter tracking the number of images processed, for
# naming of the output images
self.num_visualized_imgs = 0
self.num_predicted_imgs = 0
self.palette = palette
init_default_scope(scope)
super().__init__(
model=model, weights=weights, device=device, scope=scope)
self.model = revert_sync_batchnorm(self.model)
def _load_weights_to_model(self, model: nn.Module,
checkpoint: Optional[dict],
cfg: Optional[ConfigType]) -> None:
"""Loading model weights and meta information from cfg and checkpoint.
Args:
model (nn.Module): Model to load weights and meta information.
checkpoint (dict, optional): The loaded checkpoint.
cfg (Config or ConfigDict, optional): The loaded config.
"""
if checkpoint is not None:
_load_checkpoint_to_model(model, checkpoint)
checkpoint_meta = checkpoint.get('meta', {})
# save the dataset_meta in the model for convenience
if 'dataset_meta' in checkpoint_meta:
# mmdet 3.x, all keys should be lowercase
model.dataset_meta = {
k.lower(): v
for k, v in checkpoint_meta['dataset_meta'].items()
}
elif 'CLASSES' in checkpoint_meta:
# < mmdet 3.x
classes = checkpoint_meta['CLASSES']
model.dataset_meta = {'classes': classes}
else:
warnings.warn(
'dataset_meta or class names are not saved in the '
'checkpoint\'s meta data, use COCO classes by default.')
model.dataset_meta = {'classes': get_classes('coco')}
else:
warnings.warn('Checkpoint is not loaded, and the inference '
'result is calculated by the randomly initialized '
'model!')
warnings.warn('weights is None, use COCO classes by default.')
model.dataset_meta = {'classes': get_classes('coco')}
# Priority: args.palette -> config -> checkpoint
if self.palette != 'none':
model.dataset_meta['palette'] = self.palette
else:
test_dataset_cfg = copy.deepcopy(cfg.test_dataloader.dataset)
# lazy init. We only need the metainfo.
test_dataset_cfg['lazy_init'] = True
metainfo = DATASETS.build(test_dataset_cfg).metainfo
cfg_palette = metainfo.get('palette', None)
if cfg_palette is not None:
model.dataset_meta['palette'] = cfg_palette
else:
if 'palette' not in model.dataset_meta:
warnings.warn(
'palette does not exist, random is used by default. '
'You can also set the palette to customize.')
model.dataset_meta['palette'] = 'random'
def _init_pipeline(self, cfg: ConfigType) -> Compose:
"""Initialize the test pipeline."""
pipeline_cfg = cfg.test_dataloader.dataset.pipeline
# For inference, the key of ``img_id`` is not used.
if 'meta_keys' in pipeline_cfg[-1]:
pipeline_cfg[-1]['meta_keys'] = tuple(
meta_key for meta_key in pipeline_cfg[-1]['meta_keys']
if meta_key != 'img_id')
load_img_idx = self._get_transform_idx(pipeline_cfg,
'LoadImageFromFile')
if load_img_idx == -1:
raise ValueError(
'LoadImageFromFile is not found in the test pipeline')
pipeline_cfg[load_img_idx]['type'] = 'mmdet.InferencerLoader'
return Compose(pipeline_cfg)
def _get_transform_idx(self, pipeline_cfg: ConfigType, name: str) -> int:
"""Returns the index of the transform in a pipeline.
If the transform is not found, returns -1.
"""
for i, transform in enumerate(pipeline_cfg):
if transform['type'] == name:
return i
return -1
def _init_visualizer(self, cfg: ConfigType) -> Optional[Visualizer]:
"""Initialize visualizers.
Args:
cfg (ConfigType): Config containing the visualizer information.
Returns:
Visualizer or None: Visualizer initialized with config.
"""
visualizer = super()._init_visualizer(cfg)
visualizer.dataset_meta = self.model.dataset_meta
return visualizer
def _inputs_to_list(self, inputs: InputsType) -> list:
"""Preprocess the inputs to a list.
Preprocess inputs to a list according to its type:
- list or tuple: return inputs
- str:
- Directory path: return all files in the directory
- other cases: return a list containing the string. The string
could be a path to file, a url or other types of string according
to the task.
Args:
inputs (InputsType): Inputs for the inferencer.
Returns:
list: List of input for the :meth:`preprocess`.
"""
if isinstance(inputs, str):
backend = get_file_backend(inputs)
if hasattr(backend, 'isdir') and isdir(inputs):
# Backends like HttpsBackend do not implement `isdir`, so only
# those backends that implement `isdir` could accept the inputs
# as a directory
filename_list = list_dir_or_file(
inputs, list_dir=False, suffix=IMG_EXTENSIONS)
inputs = [
join_path(inputs, filename) for filename in filename_list
]
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
return list(inputs)
def preprocess(self, inputs: InputsType, batch_size: int = 1, **kwargs):
"""Process the inputs into a model-feedable format.
Customize your preprocess by overriding this method. Preprocess should
return an iterable object, of which each item will be used as the
input of ``model.test_step``.
``BaseInferencer.preprocess`` will return an iterable chunked data,
which will be used in __call__ like this:
.. code-block:: python
def __call__(self, inputs, batch_size=1, **kwargs):
chunked_data = self.preprocess(inputs, batch_size, **kwargs)
for batch in chunked_data:
preds = self.forward(batch, **kwargs)
Args:
inputs (InputsType): Inputs given by user.
batch_size (int): batch size. Defaults to 1.
Yields:
Any: Data processed by the ``pipeline`` and ``collate_fn``.
"""
chunked_data = self._get_chunk_data(inputs, batch_size)
yield from map(self.collate_fn, chunked_data)
def _get_chunk_data(self, inputs: Iterable, chunk_size: int):
"""Get batch data from inputs.
Args:
inputs (Iterable): An iterable dataset.
chunk_size (int): Equivalent to batch size.
Yields:
list: batch data.
"""
inputs_iter = iter(inputs)
while True:
try:
chunk_data = []
for _ in range(chunk_size):
inputs_ = next(inputs_iter)
chunk_data.append((inputs_, self.pipeline(inputs_)))
yield chunk_data
except StopIteration:
if chunk_data:
yield chunk_data
break
# TODO: Video and Webcam are currently not supported and
# may consume too much memory if your input folder has a lot of images.
# We will be optimized later.
def __call__(self,
inputs: InputsType,
batch_size: int = 1,
return_vis: bool = False,
show: bool = False,
wait_time: int = 0,
no_save_vis: bool = False,
draw_pred: bool = True,
pred_score_thr: float = 0.3,
return_datasample: bool = False,
print_result: bool = False,
no_save_pred: bool = True,
out_dir: str = '',
**kwargs) -> dict:
"""Call the inferencer.
Args:
inputs (InputsType): Inputs for the inferencer.
batch_size (int): Inference batch size. Defaults to 1.
show (bool): Whether to display the visualization results in a
popup window. Defaults to False.
wait_time (float): The interval of show (s). Defaults to 0.
no_save_vis (bool): Whether to force not to save prediction
vis results. Defaults to False.
draw_pred (bool): Whether to draw predicted bounding boxes.
Defaults to True.
pred_score_thr (float): Minimum score of bboxes to draw.
Defaults to 0.3.
return_datasample (bool): Whether to return results as
:obj:`DetDataSample`. Defaults to False.
print_result (bool): Whether to print the inference result w/o
visualization to the console. Defaults to False.
no_save_pred (bool): Whether to force not to save prediction
results. Defaults to True.
out_file: Dir to save the inference results or
visualization. If left as empty, no file will be saved.
Defaults to ''.
**kwargs: Other keyword arguments passed to :meth:`preprocess`,
:meth:`forward`, :meth:`visualize` and :meth:`postprocess`.
Each key in kwargs should be in the corresponding set of
``preprocess_kwargs``, ``forward_kwargs``, ``visualize_kwargs``
and ``postprocess_kwargs``.
Returns:
dict: Inference and visualization results.
"""
(
preprocess_kwargs,
forward_kwargs,
visualize_kwargs,
postprocess_kwargs,
) = self._dispatch_kwargs(**kwargs)
ori_inputs = self._inputs_to_list(inputs)
inputs = self.preprocess(
ori_inputs, batch_size=batch_size, **preprocess_kwargs)
results_dict = {'predictions': [], 'visualization': []}
for ori_inputs, data in track(inputs, description='Inference'):
preds = self.forward(data, **forward_kwargs)
visualization = self.visualize(
ori_inputs,
preds,
return_vis=return_vis,
show=show,
wait_time=wait_time,
draw_pred=draw_pred,
pred_score_thr=pred_score_thr,
no_save_vis=no_save_vis,
img_out_dir=out_dir,
**visualize_kwargs)
results = self.postprocess(
preds,
visualization,
return_datasample=return_datasample,
print_result=print_result,
no_save_pred=no_save_pred,
pred_out_dir=out_dir,
**postprocess_kwargs)
results_dict['predictions'].extend(results['predictions'])
if results['visualization'] is not None:
results_dict['visualization'].extend(results['visualization'])
return results_dict
def visualize(self,
inputs: InputsType,
preds: PredType,
return_vis: bool = False,
show: bool = False,
wait_time: int = 0,
draw_pred: bool = True,
pred_score_thr: float = 0.3,
no_save_vis: bool = False,
img_out_dir: str = '',
**kwargs) -> Union[List[np.ndarray], None]:
"""Visualize predictions.
Args:
inputs (List[Union[str, np.ndarray]]): Inputs for the inferencer.
preds (List[:obj:`DetDataSample`]): Predictions of the model.
return_vis (bool): Whether to return the visualization result.
Defaults to False.
show (bool): Whether to display the image in a popup window.
Defaults to False.
wait_time (float): The interval of show (s). Defaults to 0.
draw_pred (bool): Whether to draw predicted bounding boxes.
Defaults to True.
pred_score_thr (float): Minimum score of bboxes to draw.
Defaults to 0.3.
no_save_vis (bool): Whether to force not to save prediction
vis results. Defaults to False.
img_out_dir (str): Output directory of visualization results.
If left as empty, no file will be saved. Defaults to ''.
Returns:
List[np.ndarray] or None: Returns visualization results only if
applicable.
"""
if no_save_vis is True:
img_out_dir = ''
if not show and img_out_dir == '' and not return_vis:
return None
if self.visualizer is None:
raise ValueError('Visualization needs the "visualizer" term'
'defined in the config, but got None.')
results = []
for single_input, pred in zip(inputs, preds):
if isinstance(single_input, str):
img_bytes = mmengine.fileio.get(single_input)
img = mmcv.imfrombytes(img_bytes)
img = img[:, :, ::-1]
img_name = osp.basename(single_input)
elif isinstance(single_input, np.ndarray):
img = single_input.copy()
img_num = str(self.num_visualized_imgs).zfill(8)
img_name = f'{img_num}.jpg'
else:
raise ValueError('Unsupported input type: '
f'{type(single_input)}')
out_file = osp.join(img_out_dir, 'vis',
img_name) if img_out_dir != '' else None
self.visualizer.add_datasample(
img_name,
img,
pred,
show=show,
wait_time=wait_time,
draw_gt=False,
draw_pred=draw_pred,
pred_score_thr=pred_score_thr,
out_file=out_file,
)
results.append(self.visualizer.get_image())
self.num_visualized_imgs += 1
return results
def postprocess(
self,
preds: PredType,
visualization: Optional[List[np.ndarray]] = None,
return_datasample: bool = False,
print_result: bool = False,
no_save_pred: bool = False,
pred_out_dir: str = '',
**kwargs,
) -> Dict:
"""Process the predictions and visualization results from ``forward``
and ``visualize``.
This method should be responsible for the following tasks:
1. Convert datasamples into a json-serializable dict if needed.
2. Pack the predictions and visualization results and return them.
3. Dump or log the predictions.
Args:
preds (List[:obj:`DetDataSample`]): Predictions of the model.
visualization (Optional[np.ndarray]): Visualized predictions.
return_datasample (bool): Whether to use Datasample to store
inference results. If False, dict will be used.
print_result (bool): Whether to print the inference result w/o
visualization to the console. Defaults to False.
no_save_pred (bool): Whether to force not to save prediction
results. Defaults to False.
pred_out_dir: Dir to save the inference results w/o
visualization. If left as empty, no file will be saved.
Defaults to ''.
Returns:
dict: Inference and visualization results with key ``predictions``
and ``visualization``.
- ``visualization`` (Any): Returned by :meth:`visualize`.
- ``predictions`` (dict or DataSample): Returned by
:meth:`forward` and processed in :meth:`postprocess`.
If ``return_datasample=False``, it usually should be a
json-serializable dict containing only basic data elements such
as strings and numbers.
"""
if no_save_pred is True:
pred_out_dir = ''
result_dict = {}
results = preds
if not return_datasample:
results = []
for pred in preds:
result = self.pred2dict(pred, pred_out_dir)
results.append(result)
elif pred_out_dir != '':
warnings.warn('Currently does not support saving datasample '
'when return_datasample is set to True. '
'Prediction results are not saved!')
# Add img to the results after printing and dumping
result_dict['predictions'] = results
if print_result:
print(result_dict)
result_dict['visualization'] = visualization
return result_dict
# TODO: The data format and fields saved in json need further discussion.
# Maybe should include model name, timestamp, filename, image info etc.
def pred2dict(self,
data_sample: DetDataSample,
pred_out_dir: str = '') -> Dict:
"""Extract elements necessary to represent a prediction into a
dictionary.
It's better to contain only basic data elements such as strings and
numbers in order to guarantee it's json-serializable.
Args:
data_sample (:obj:`DetDataSample`): Predictions of the model.
pred_out_dir: Dir to save the inference results w/o
visualization. If left as empty, no file will be saved.
Defaults to ''.
Returns:
dict: Prediction results.
"""
is_save_pred = True
if pred_out_dir == '':
is_save_pred = False
if is_save_pred and 'img_path' in data_sample:
img_path = osp.basename(data_sample.img_path)
img_path = osp.splitext(img_path)[0]
out_img_path = osp.join(pred_out_dir, 'preds',
img_path + '_panoptic_seg.png')
out_json_path = osp.join(pred_out_dir, 'preds', img_path + '.json')
elif is_save_pred:
out_img_path = osp.join(
pred_out_dir, 'preds',
f'{self.num_predicted_imgs}_panoptic_seg.png')
out_json_path = osp.join(pred_out_dir, 'preds',
f'{self.num_predicted_imgs}.json')
self.num_predicted_imgs += 1
result = {}
if 'pred_instances' in data_sample:
masks = data_sample.pred_instances.get('masks')
pred_instances = data_sample.pred_instances.numpy()
result = {
'bboxes': pred_instances.bboxes.tolist(),
'labels': pred_instances.labels.tolist(),
'scores': pred_instances.scores.tolist()
}
if masks is not None:
if pred_instances.bboxes.sum() == 0:
# Fake bbox, such as the SOLO.
bboxes = mask2bbox(masks.cpu()).numpy().tolist()
result['bboxes'] = bboxes
encode_masks = encode_mask_results(pred_instances.masks)
for encode_mask in encode_masks:
if isinstance(encode_mask['counts'], bytes):
encode_mask['counts'] = encode_mask['counts'].decode()
result['masks'] = encode_masks
if 'pred_panoptic_seg' in data_sample:
if VOID is None:
raise RuntimeError(
'panopticapi is not installed, please install it by: '
'pip install git+https://github.com/cocodataset/'
'panopticapi.git.')
pan = data_sample.pred_panoptic_seg.sem_seg.cpu().numpy()[0]
pan[pan % INSTANCE_OFFSET == len(
self.model.dataset_meta['classes'])] = VOID
pan = id2rgb(pan).astype(np.uint8)
if is_save_pred:
mmcv.imwrite(pan[:, :, ::-1], out_img_path)
result['panoptic_seg_path'] = out_img_path
else:
result['panoptic_seg'] = pan
if is_save_pred:
mmengine.dump(result, out_json_path)
return result
| 24,008 | 39.624365 | 79 | py |
ERD | ERD-main/mmdet/evaluation/metrics/coco_metric.py | # Copyright (c) OpenMMLab. All rights reserved.
import datetime
import itertools
import os.path as osp
import tempfile
from collections import OrderedDict
from typing import Dict, List, Optional, Sequence, Union
import numpy as np
import torch
from mmengine.evaluator import BaseMetric
from mmengine.fileio import dump, get_local_path, load
from mmengine.logging import MMLogger
from terminaltables import AsciiTable
from mmdet.datasets.api_wrappers import COCO, COCOeval
from mmdet.registry import METRICS
from mmdet.structures.mask import encode_mask_results
from ..functional import eval_recalls
@METRICS.register_module()
class CocoMetric(BaseMetric):
"""COCO evaluation metric.
Evaluate AR, AP, and mAP for detection tasks including proposal/box
detection and instance segmentation. Please refer to
https://cocodataset.org/#detection-eval for more details.
Args:
ann_file (str, optional): Path to the coco format annotation file.
If not specified, ground truth annotations from the dataset will
be converted to coco format. Defaults to None.
metric (str | List[str]): Metrics to be evaluated. Valid metrics
include 'bbox', 'segm', 'proposal', and 'proposal_fast'.
Defaults to 'bbox'.
classwise (bool): Whether to evaluate the metric class-wise.
Defaults to False.
proposal_nums (Sequence[int]): Numbers of proposals to be evaluated.
Defaults to (100, 300, 1000).
iou_thrs (float | List[float], optional): IoU threshold to compute AP
and AR. If not specified, IoUs from 0.5 to 0.95 will be used.
Defaults to None.
metric_items (List[str], optional): Metric result names to be
recorded in the evaluation result. Defaults to None.
format_only (bool): Format the output results without perform
evaluation. It is useful when you want to format the result
to a specific format and submit it to the test server.
Defaults to False.
outfile_prefix (str, optional): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Defaults to None.
file_client_args (dict, optional): Arguments to instantiate the
corresponding backend in mmdet <= 3.0.0rc6. Defaults to None.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Defaults to None.
sort_categories (bool): Whether sort categories in annotations. Only
used for `Objects365V1Dataset`. Defaults to False.
"""
default_prefix: Optional[str] = 'coco'
def __init__(self,
ann_file: Optional[str] = None,
metric: Union[str, List[str]] = 'bbox',
classwise: bool = False,
proposal_nums: Sequence[int] = (100, 300, 1000),
iou_thrs: Optional[Union[float, Sequence[float]]] = None,
metric_items: Optional[Sequence[str]] = None,
format_only: bool = False,
outfile_prefix: Optional[str] = None,
file_client_args: dict = None,
backend_args: dict = None,
collect_device: str = 'cpu',
prefix: Optional[str] = None,
sort_categories: bool = False) -> None:
super().__init__(collect_device=collect_device, prefix=prefix)
# coco evaluation metrics
self.metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in self.metrics:
if metric not in allowed_metrics:
raise KeyError(
"metric should be one of 'bbox', 'segm', 'proposal', "
f"'proposal_fast', but got {metric}.")
# do class wise evaluation, default False
self.classwise = classwise
# proposal_nums used to compute recall or precision.
self.proposal_nums = list(proposal_nums)
# iou_thrs used to compute recall or precision.
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
self.iou_thrs = iou_thrs
self.metric_items = metric_items
self.format_only = format_only
if self.format_only:
assert outfile_prefix is not None, 'outfile_prefix must be not'
'None when format_only is True, otherwise the result files will'
'be saved to a temp directory which will be cleaned up at the end.'
self.outfile_prefix = outfile_prefix
self.backend_args = backend_args
if file_client_args is not None:
raise RuntimeError(
'The `file_client_args` is deprecated, '
'please use `backend_args` instead, please refer to'
'https://github.com/open-mmlab/mmdetection/blob/main/configs/_base_/datasets/coco_detection.py' # noqa: E501
)
# if ann_file is not specified,
# initialize coco api with the converted dataset
if ann_file is not None:
with get_local_path(
ann_file, backend_args=self.backend_args) as local_path:
self._coco_api = COCO(local_path)
if sort_categories:
# 'categories' list in objects365_train.json and
# objects365_val.json is inconsistent, need sort
# list(or dict) before get cat_ids.
cats = self._coco_api.cats
sorted_cats = {i: cats[i] for i in sorted(cats)}
self._coco_api.cats = sorted_cats
categories = self._coco_api.dataset['categories']
sorted_categories = sorted(
categories, key=lambda i: i['id'])
self._coco_api.dataset['categories'] = sorted_categories
else:
self._coco_api = None
# handle dataset lazy init
self.cat_ids = None
self.img_ids = None
def fast_eval_recall(self,
results: List[dict],
proposal_nums: Sequence[int],
iou_thrs: Sequence[float],
logger: Optional[MMLogger] = None) -> np.ndarray:
"""Evaluate proposal recall with COCO's fast_eval_recall.
Args:
results (List[dict]): Results of the dataset.
proposal_nums (Sequence[int]): Proposal numbers used for
evaluation.
iou_thrs (Sequence[float]): IoU thresholds used for evaluation.
logger (MMLogger, optional): Logger used for logging the recall
summary.
Returns:
np.ndarray: Averaged recall results.
"""
gt_bboxes = []
pred_bboxes = [result['bboxes'] for result in results]
for i in range(len(self.img_ids)):
ann_ids = self._coco_api.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self._coco_api.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, pred_bboxes, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def xyxy2xywh(self, bbox: np.ndarray) -> list:
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox: List = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def results2json(self, results: Sequence[dict],
outfile_prefix: str) -> dict:
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (Sequence[dict]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict: Possible keys are "bbox", "segm", "proposal", and
values are corresponding filenames.
"""
bbox_json_results = []
segm_json_results = [] if 'masks' in results[0] else None
for idx, result in enumerate(results):
image_id = result.get('img_id', idx)
labels = result['labels']
bboxes = result['bboxes']
scores = result['scores']
# bbox results
for i, label in enumerate(labels):
data = dict()
data['image_id'] = image_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(scores[i])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
if segm_json_results is None:
continue
# segm results
masks = result['masks']
mask_scores = result.get('mask_scores', scores)
for i, label in enumerate(labels):
data = dict()
data['image_id'] = image_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_scores[i])
data['category_id'] = self.cat_ids[label]
if isinstance(masks[i]['counts'], bytes):
masks[i]['counts'] = masks[i]['counts'].decode()
data['segmentation'] = masks[i]
segm_json_results.append(data)
result_files = dict()
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
dump(bbox_json_results, result_files['bbox'])
if segm_json_results is not None:
result_files['segm'] = f'{outfile_prefix}.segm.json'
dump(segm_json_results, result_files['segm'])
return result_files
def gt_to_coco_json(self, gt_dicts: Sequence[dict],
outfile_prefix: str) -> str:
"""Convert ground truth to coco format json file.
Args:
gt_dicts (Sequence[dict]): Ground truth of the dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json file will be named
"somepath/xxx.gt.json".
Returns:
str: The filename of the json file.
"""
categories = [
dict(id=id, name=name)
for id, name in enumerate(self.dataset_meta['classes'])
]
image_infos = []
annotations = []
for idx, gt_dict in enumerate(gt_dicts):
img_id = gt_dict.get('img_id', idx)
image_info = dict(
id=img_id,
width=gt_dict['width'],
height=gt_dict['height'],
file_name='')
image_infos.append(image_info)
for ann in gt_dict['anns']:
label = ann['bbox_label']
bbox = ann['bbox']
coco_bbox = [
bbox[0],
bbox[1],
bbox[2] - bbox[0],
bbox[3] - bbox[1],
]
annotation = dict(
id=len(annotations) +
1, # coco api requires id starts with 1
image_id=img_id,
bbox=coco_bbox,
iscrowd=ann.get('ignore_flag', 0),
category_id=int(label),
area=coco_bbox[2] * coco_bbox[3])
if ann.get('mask', None):
mask = ann['mask']
# area = mask_util.area(mask)
if isinstance(mask, dict) and isinstance(
mask['counts'], bytes):
mask['counts'] = mask['counts'].decode()
annotation['segmentation'] = mask
# annotation['area'] = float(area)
annotations.append(annotation)
info = dict(
date_created=str(datetime.datetime.now()),
description='Coco json file converted by mmdet CocoMetric.')
coco_json = dict(
info=info,
images=image_infos,
categories=categories,
licenses=None,
)
if len(annotations) > 0:
coco_json['annotations'] = annotations
converted_json_path = f'{outfile_prefix}.gt.json'
dump(coco_json, converted_json_path)
return converted_json_path
# TODO: data_batch is no longer needed, consider adjusting the
# parameter position
def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (dict): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of data samples that
contain annotations and predictions.
"""
for data_sample in data_samples:
result = dict()
pred = data_sample['pred_instances']
result['img_id'] = data_sample['img_id']
result['bboxes'] = pred['bboxes'].cpu().numpy()
result['scores'] = pred['scores'].cpu().numpy()
result['labels'] = pred['labels'].cpu().numpy()
# encode mask to RLE
if 'masks' in pred:
result['masks'] = encode_mask_results(
pred['masks'].detach().cpu().numpy()) if isinstance(
pred['masks'], torch.Tensor) else pred['masks']
# some detectors use different scores for bbox and mask
if 'mask_scores' in pred:
result['mask_scores'] = pred['mask_scores'].cpu().numpy()
# parse gt
gt = dict()
gt['width'] = data_sample['ori_shape'][1]
gt['height'] = data_sample['ori_shape'][0]
gt['img_id'] = data_sample['img_id']
if self._coco_api is None:
# TODO: Need to refactor to support LoadAnnotations
assert 'instances' in data_sample, \
'ground truth is required for evaluation when ' \
'`ann_file` is not provided'
gt['anns'] = data_sample['instances']
# add converted result to the results list
self.results.append((gt, result))
def compute_metrics(self, results: list) -> Dict[str, float]:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
Dict[str, float]: The computed metrics. The keys are the names of
the metrics, and the values are corresponding results.
"""
logger: MMLogger = MMLogger.get_current_instance()
# split gt and prediction list
gts, preds = zip(*results)
tmp_dir = None
if self.outfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
outfile_prefix = osp.join(tmp_dir.name, 'results')
else:
outfile_prefix = self.outfile_prefix
if self._coco_api is None:
# use converted gt json file to initialize coco api
logger.info('Converting ground truth to coco format...')
coco_json_path = self.gt_to_coco_json(
gt_dicts=gts, outfile_prefix=outfile_prefix)
self._coco_api = COCO(coco_json_path)
# handle lazy init
if self.cat_ids is None:
self.cat_ids = self._coco_api.get_cat_ids(
cat_names=self.dataset_meta['classes'])
if self.img_ids is None:
self.img_ids = self._coco_api.get_img_ids()
# convert predictions to coco format and dump to json file
result_files = self.results2json(preds, outfile_prefix)
eval_results = OrderedDict()
if self.format_only:
logger.info('results are saved in '
f'{osp.dirname(outfile_prefix)}')
return eval_results
for metric in self.metrics:
logger.info(f'Evaluating {metric}...')
# TODO: May refactor fast_eval_recall to an independent metric?
# fast eval recall
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
preds, self.proposal_nums, self.iou_thrs, logger=logger)
log_msg = []
for i, num in enumerate(self.proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
logger.info(log_msg)
continue
# evaluate proposal, bbox and segm
iou_type = 'bbox' if metric == 'proposal' else metric
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
predictions = load(result_files[metric])
if iou_type == 'segm':
# Refer to https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py#L331 # noqa
# When evaluating mask AP, if the results contain bbox,
# cocoapi will use the box area instead of the mask area
# for calculating the instance area. Though the overall AP
# is not affected, this leads to different
# small/medium/large mask AP results.
for x in predictions:
x.pop('bbox')
coco_dt = self._coco_api.loadRes(predictions)
except IndexError:
logger.error(
'The testing results of the whole dataset is empty.')
break
coco_eval = COCOeval(self._coco_api, coco_dt, iou_type)
coco_eval.params.catIds = self.cat_ids
coco_eval.params.imgIds = self.img_ids
coco_eval.params.maxDets = list(self.proposal_nums)
coco_eval.params.iouThrs = self.iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
metric_items = self.metric_items
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item "{metric_item}" is not supported')
if metric == 'proposal':
coco_eval.params.useCats = 0
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{coco_eval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
if self.classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = coco_eval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, cat_id in enumerate(self.cat_ids):
t = []
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self._coco_api.loadCats(cat_id)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
t.append(f'{nm["name"]}')
t.append(f'{round(ap, 3)}')
eval_results[f'{nm["name"]}_precision'] = round(ap, 3)
# indexes of IoU @50 and @75
for iou in [0, 5]:
precision = precisions[iou, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
t.append(f'{round(ap, 3)}')
# indexes of area of small, median and large
for area in [1, 2, 3]:
precision = precisions[:, :, idx, area, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
t.append(f'{round(ap, 3)}')
results_per_category.append(tuple(t))
num_columns = len(results_per_category[0])
results_flatten = list(
itertools.chain(*results_per_category))
headers = [
'category', 'mAP', 'mAP_50', 'mAP_75', 'mAP_s',
'mAP_m', 'mAP_l'
]
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
logger.info('\n' + table.table)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = coco_eval.stats[coco_metric_names[metric_item]]
eval_results[key] = float(f'{round(val, 3)}')
ap = coco_eval.stats[:6]
logger.info(f'{metric}_mAP_copypaste: {ap[0]:.3f} '
f'{ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
| 25,325 | 41.852792 | 125 | py |
ERD | ERD-main/mmdet/testing/_utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from os.path import dirname, exists, join
import numpy as np
import torch
from mmengine.config import Config
from mmengine.dataset import pseudo_collate
from mmengine.structures import InstanceData, PixelData
from ..registry import TASK_UTILS
from ..structures import DetDataSample
from ..structures.bbox import HorizontalBoxes
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection repo
repo_dpath = dirname(dirname(dirname(__file__)))
except NameError:
# For IPython development when this __file__ is not defined
import mmdet
repo_dpath = dirname(dirname(mmdet.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _get_config_module(fname):
"""Load a configuration as a python module."""
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod
def get_detector_cfg(fname):
"""Grab configs necessary to create a detector.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
return model
def get_roi_head_cfg(fname):
"""Grab configs necessary to create a roi_head.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
roi_head = model.roi_head
train_cfg = None if model.train_cfg is None else model.train_cfg.rcnn
test_cfg = None if model.test_cfg is None else model.test_cfg.rcnn
roi_head.update(dict(train_cfg=train_cfg, test_cfg=test_cfg))
return roi_head
def _rand_bboxes(rng, num_boxes, w, h):
cx, cy, bw, bh = rng.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clip(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clip(0, h)
br_x = ((cx * w) + (w * bw / 2)).clip(0, w)
br_y = ((cy * h) + (h * bh / 2)).clip(0, h)
bboxes = np.vstack([tl_x, tl_y, br_x, br_y]).T
return bboxes
def _rand_masks(rng, num_boxes, bboxes, img_w, img_h):
from mmdet.structures.mask import BitmapMasks
masks = np.zeros((num_boxes, img_h, img_w))
for i, bbox in enumerate(bboxes):
bbox = bbox.astype(np.int32)
mask = (rng.rand(1, bbox[3] - bbox[1], bbox[2] - bbox[0]) >
0.3).astype(np.int64)
masks[i:i + 1, bbox[1]:bbox[3], bbox[0]:bbox[2]] = mask
return BitmapMasks(masks, height=img_h, width=img_w)
def demo_mm_inputs(batch_size=2,
image_shapes=(3, 128, 128),
num_items=None,
num_classes=10,
sem_seg_output_strides=1,
with_mask=False,
with_semantic=False,
use_box_type=False,
device='cpu'):
"""Create a superset of inputs needed to run test or train batches.
Args:
batch_size (int): batch size. Defaults to 2.
image_shapes (List[tuple], Optional): image shape.
Defaults to (3, 128, 128)
num_items (None | List[int]): specifies the number
of boxes in each batch item. Default to None.
num_classes (int): number of different labels a
box might have. Defaults to 10.
with_mask (bool): Whether to return mask annotation.
Defaults to False.
with_semantic (bool): whether to return semantic.
Defaults to False.
device (str): Destination device type. Defaults to cpu.
"""
rng = np.random.RandomState(0)
if isinstance(image_shapes, list):
assert len(image_shapes) == batch_size
else:
image_shapes = [image_shapes] * batch_size
if isinstance(num_items, list):
assert len(num_items) == batch_size
packed_inputs = []
for idx in range(batch_size):
image_shape = image_shapes[idx]
c, h, w = image_shape
image = rng.randint(0, 255, size=image_shape, dtype=np.uint8)
mm_inputs = dict()
mm_inputs['inputs'] = torch.from_numpy(image).to(device)
img_meta = {
'img_id': idx,
'img_shape': image_shape[1:],
'ori_shape': image_shape[1:],
'filename': '<demo>.png',
'scale_factor': np.array([1.1, 1.2]),
'flip': False,
'flip_direction': None,
'border': [1, 1, 1, 1] # Only used by CenterNet
}
data_sample = DetDataSample()
data_sample.set_metainfo(img_meta)
# gt_instances
gt_instances = InstanceData()
if num_items is None:
num_boxes = rng.randint(1, 10)
else:
num_boxes = num_items[idx]
bboxes = _rand_bboxes(rng, num_boxes, w, h)
labels = rng.randint(1, num_classes, size=num_boxes)
# TODO: remove this part when all model adapted with BaseBoxes
if use_box_type:
gt_instances.bboxes = HorizontalBoxes(bboxes, dtype=torch.float32)
else:
gt_instances.bboxes = torch.FloatTensor(bboxes)
gt_instances.labels = torch.LongTensor(labels)
if with_mask:
masks = _rand_masks(rng, num_boxes, bboxes, w, h)
gt_instances.masks = masks
# TODO: waiting for ci to be fixed
# masks = np.random.randint(0, 2, (len(bboxes), h, w), dtype=np.uint8)
# gt_instances.mask = BitmapMasks(masks, h, w)
data_sample.gt_instances = gt_instances
# ignore_instances
ignore_instances = InstanceData()
bboxes = _rand_bboxes(rng, num_boxes, w, h)
if use_box_type:
ignore_instances.bboxes = HorizontalBoxes(
bboxes, dtype=torch.float32)
else:
ignore_instances.bboxes = torch.FloatTensor(bboxes)
data_sample.ignored_instances = ignore_instances
# gt_sem_seg
if with_semantic:
# assume gt_semantic_seg using scale 1/8 of the img
gt_semantic_seg = torch.from_numpy(
np.random.randint(
0,
num_classes, (1, h // sem_seg_output_strides,
w // sem_seg_output_strides),
dtype=np.uint8))
gt_sem_seg_data = dict(sem_seg=gt_semantic_seg)
data_sample.gt_sem_seg = PixelData(**gt_sem_seg_data)
mm_inputs['data_samples'] = data_sample.to(device)
# TODO: gt_ignore
packed_inputs.append(mm_inputs)
data = pseudo_collate(packed_inputs)
return data
def demo_mm_proposals(image_shapes, num_proposals, device='cpu'):
"""Create a list of fake porposals.
Args:
image_shapes (list[tuple[int]]): Batch image shapes.
num_proposals (int): The number of fake proposals.
"""
rng = np.random.RandomState(0)
results = []
for img_shape in image_shapes:
result = InstanceData()
w, h = img_shape[1:]
proposals = _rand_bboxes(rng, num_proposals, w, h)
result.bboxes = torch.from_numpy(proposals).float()
result.scores = torch.from_numpy(rng.rand(num_proposals)).float()
result.labels = torch.zeros(num_proposals).long()
results.append(result.to(device))
return results
def demo_mm_sampling_results(proposals_list,
batch_gt_instances,
batch_gt_instances_ignore=None,
assigner_cfg=None,
sampler_cfg=None,
feats=None):
"""Create sample results that can be passed to BBoxHead.get_targets."""
assert len(proposals_list) == len(batch_gt_instances)
if batch_gt_instances_ignore is None:
batch_gt_instances_ignore = [None for _ in batch_gt_instances]
else:
assert len(batch_gt_instances_ignore) == len(batch_gt_instances)
default_assigner_cfg = dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1)
assigner_cfg = assigner_cfg if assigner_cfg is not None \
else default_assigner_cfg
default_sampler_cfg = dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True)
sampler_cfg = sampler_cfg if sampler_cfg is not None \
else default_sampler_cfg
bbox_assigner = TASK_UTILS.build(assigner_cfg)
bbox_sampler = TASK_UTILS.build(sampler_cfg)
sampling_results = []
for i in range(len(batch_gt_instances)):
if feats is not None:
feats = [lvl_feat[i][None] for lvl_feat in feats]
# rename proposals.bboxes to proposals.priors
proposals = proposals_list[i]
proposals.priors = proposals.pop('bboxes')
assign_result = bbox_assigner.assign(proposals, batch_gt_instances[i],
batch_gt_instances_ignore[i])
sampling_result = bbox_sampler.sample(
assign_result, proposals, batch_gt_instances[i], feats=feats)
sampling_results.append(sampling_result)
return sampling_results
# TODO: Support full ceph
def replace_to_ceph(cfg):
backend_args = dict(
backend='petrel',
path_mapping=dict({
'./data/': 's3://openmmlab/datasets/detection/',
'data/': 's3://openmmlab/datasets/detection/'
}))
# TODO: name is a reserved interface, which will be used later.
def _process_pipeline(dataset, name):
def replace_img(pipeline):
if pipeline['type'] == 'LoadImageFromFile':
pipeline['backend_args'] = backend_args
def replace_ann(pipeline):
if pipeline['type'] == 'LoadAnnotations' or pipeline[
'type'] == 'LoadPanopticAnnotations':
pipeline['backend_args'] = backend_args
if 'pipeline' in dataset:
replace_img(dataset.pipeline[0])
replace_ann(dataset.pipeline[1])
if 'dataset' in dataset:
# dataset wrapper
replace_img(dataset.dataset.pipeline[0])
replace_ann(dataset.dataset.pipeline[1])
else:
# dataset wrapper
replace_img(dataset.dataset.pipeline[0])
replace_ann(dataset.dataset.pipeline[1])
def _process_evaluator(evaluator, name):
if evaluator['type'] == 'CocoPanopticMetric':
evaluator['backend_args'] = backend_args
# half ceph
_process_pipeline(cfg.train_dataloader.dataset, cfg.filename)
_process_pipeline(cfg.val_dataloader.dataset, cfg.filename)
_process_pipeline(cfg.test_dataloader.dataset, cfg.filename)
_process_evaluator(cfg.val_evaluator, cfg.filename)
_process_evaluator(cfg.test_evaluator, cfg.filename)
| 11,178 | 34.154088 | 78 | py |
ERD | ERD-main/mmdet/models/data_preprocessors/data_preprocessor.py | # Copyright (c) OpenMMLab. All rights reserved.
import random
from numbers import Number
from typing import List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmengine.dist import barrier, broadcast, get_dist_info
from mmengine.logging import MessageHub
from mmengine.model import BaseDataPreprocessor, ImgDataPreprocessor
from mmengine.structures import PixelData
from mmengine.utils import is_seq_of
from torch import Tensor
from mmdet.models.utils import unfold_wo_center
from mmdet.models.utils.misc import samplelist_boxtype2tensor
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.structures.mask import BitmapMasks
from mmdet.utils import ConfigType
try:
import skimage
except ImportError:
skimage = None
@MODELS.register_module()
class DetDataPreprocessor(ImgDataPreprocessor):
"""Image pre-processor for detection tasks.
Comparing with the :class:`mmengine.ImgDataPreprocessor`,
1. It supports batch augmentations.
2. It will additionally append batch_input_shape and pad_shape
to data_samples considering the object detection task.
It provides the data pre-processing as follows
- Collate and move data to the target device.
- Pad inputs to the maximum size of current batch with defined
``pad_value``. The padding size can be divisible by a defined
``pad_size_divisor``
- Stack inputs to batch_inputs.
- Convert inputs from bgr to rgb if the shape of input is (3, H, W).
- Normalize image with defined std and mean.
- Do batch augmentations during training.
Args:
mean (Sequence[Number], optional): The pixel mean of R, G, B channels.
Defaults to None.
std (Sequence[Number], optional): The pixel standard deviation of
R, G, B channels. Defaults to None.
pad_size_divisor (int): The size of padded image should be
divisible by ``pad_size_divisor``. Defaults to 1.
pad_value (Number): The padded pixel value. Defaults to 0.
pad_mask (bool): Whether to pad instance masks. Defaults to False.
mask_pad_value (int): The padded pixel value for instance masks.
Defaults to 0.
pad_seg (bool): Whether to pad semantic segmentation maps.
Defaults to False.
seg_pad_value (int): The padded pixel value for semantic
segmentation maps. Defaults to 255.
bgr_to_rgb (bool): whether to convert image from BGR to RGB.
Defaults to False.
rgb_to_bgr (bool): whether to convert image from RGB to RGB.
Defaults to False.
boxtype2tensor (bool): Whether to keep the ``BaseBoxes`` type of
bboxes data or not. Defaults to True.
non_blocking (bool): Whether block current process
when transferring data to device. Defaults to False.
batch_augments (list[dict], optional): Batch-level augmentations
"""
def __init__(self,
mean: Sequence[Number] = None,
std: Sequence[Number] = None,
pad_size_divisor: int = 1,
pad_value: Union[float, int] = 0,
pad_mask: bool = False,
mask_pad_value: int = 0,
pad_seg: bool = False,
seg_pad_value: int = 255,
bgr_to_rgb: bool = False,
rgb_to_bgr: bool = False,
boxtype2tensor: bool = True,
non_blocking: Optional[bool] = False,
batch_augments: Optional[List[dict]] = None):
super().__init__(
mean=mean,
std=std,
pad_size_divisor=pad_size_divisor,
pad_value=pad_value,
bgr_to_rgb=bgr_to_rgb,
rgb_to_bgr=rgb_to_bgr,
non_blocking=non_blocking)
if batch_augments is not None:
self.batch_augments = nn.ModuleList(
[MODELS.build(aug) for aug in batch_augments])
else:
self.batch_augments = None
self.pad_mask = pad_mask
self.mask_pad_value = mask_pad_value
self.pad_seg = pad_seg
self.seg_pad_value = seg_pad_value
self.boxtype2tensor = boxtype2tensor
def forward(self, data: dict, training: bool = False) -> dict:
"""Perform normalization、padding and bgr2rgb conversion based on
``BaseDataPreprocessor``.
Args:
data (dict): Data sampled from dataloader.
training (bool): Whether to enable training time augmentation.
Returns:
dict: Data in the same format as the model input.
"""
batch_pad_shape = self._get_pad_shape(data)
data = super().forward(data=data, training=training)
inputs, data_samples = data['inputs'], data['data_samples']
if data_samples is not None:
# NOTE the batched image size information may be useful, e.g.
# in DETR, this is needed for the construction of masks, which is
# then used for the transformer_head.
batch_input_shape = tuple(inputs[0].size()[-2:])
for data_sample, pad_shape in zip(data_samples, batch_pad_shape):
data_sample.set_metainfo({
'batch_input_shape': batch_input_shape,
'pad_shape': pad_shape
})
if self.boxtype2tensor:
samplelist_boxtype2tensor(data_samples)
if self.pad_mask and training:
self.pad_gt_masks(data_samples)
if self.pad_seg and training:
self.pad_gt_sem_seg(data_samples)
if training and self.batch_augments is not None:
for batch_aug in self.batch_augments:
inputs, data_samples = batch_aug(inputs, data_samples)
return {'inputs': inputs, 'data_samples': data_samples}
def _get_pad_shape(self, data: dict) -> List[tuple]:
"""Get the pad_shape of each image based on data and
pad_size_divisor."""
_batch_inputs = data['inputs']
# Process data with `pseudo_collate`.
if is_seq_of(_batch_inputs, torch.Tensor):
batch_pad_shape = []
for ori_input in _batch_inputs:
pad_h = int(
np.ceil(ori_input.shape[1] /
self.pad_size_divisor)) * self.pad_size_divisor
pad_w = int(
np.ceil(ori_input.shape[2] /
self.pad_size_divisor)) * self.pad_size_divisor
batch_pad_shape.append((pad_h, pad_w))
# Process data with `default_collate`.
elif isinstance(_batch_inputs, torch.Tensor):
assert _batch_inputs.dim() == 4, (
'The input of `ImgDataPreprocessor` should be a NCHW tensor '
'or a list of tensor, but got a tensor with shape: '
f'{_batch_inputs.shape}')
pad_h = int(
np.ceil(_batch_inputs.shape[1] /
self.pad_size_divisor)) * self.pad_size_divisor
pad_w = int(
np.ceil(_batch_inputs.shape[2] /
self.pad_size_divisor)) * self.pad_size_divisor
batch_pad_shape = [(pad_h, pad_w)] * _batch_inputs.shape[0]
else:
raise TypeError('Output of `cast_data` should be a dict '
'or a tuple with inputs and data_samples, but got'
f'{type(data)}: {data}')
return batch_pad_shape
def pad_gt_masks(self,
batch_data_samples: Sequence[DetDataSample]) -> None:
"""Pad gt_masks to shape of batch_input_shape."""
if 'masks' in batch_data_samples[0].gt_instances:
for data_samples in batch_data_samples:
masks = data_samples.gt_instances.masks
data_samples.gt_instances.masks = masks.pad(
data_samples.batch_input_shape,
pad_val=self.mask_pad_value)
def pad_gt_sem_seg(self,
batch_data_samples: Sequence[DetDataSample]) -> None:
"""Pad gt_sem_seg to shape of batch_input_shape."""
if 'gt_sem_seg' in batch_data_samples[0]:
for data_samples in batch_data_samples:
gt_sem_seg = data_samples.gt_sem_seg.sem_seg
h, w = gt_sem_seg.shape[-2:]
pad_h, pad_w = data_samples.batch_input_shape
gt_sem_seg = F.pad(
gt_sem_seg,
pad=(0, max(pad_w - w, 0), 0, max(pad_h - h, 0)),
mode='constant',
value=self.seg_pad_value)
data_samples.gt_sem_seg = PixelData(sem_seg=gt_sem_seg)
@MODELS.register_module()
class BatchSyncRandomResize(nn.Module):
"""Batch random resize which synchronizes the random size across ranks.
Args:
random_size_range (tuple): The multi-scale random range during
multi-scale training.
interval (int): The iter interval of change
image size. Defaults to 10.
size_divisor (int): Image size divisible factor.
Defaults to 32.
"""
def __init__(self,
random_size_range: Tuple[int, int],
interval: int = 10,
size_divisor: int = 32) -> None:
super().__init__()
self.rank, self.world_size = get_dist_info()
self._input_size = None
self._random_size_range = (round(random_size_range[0] / size_divisor),
round(random_size_range[1] / size_divisor))
self._interval = interval
self._size_divisor = size_divisor
def forward(
self, inputs: Tensor, data_samples: List[DetDataSample]
) -> Tuple[Tensor, List[DetDataSample]]:
"""resize a batch of images and bboxes to shape ``self._input_size``"""
h, w = inputs.shape[-2:]
if self._input_size is None:
self._input_size = (h, w)
scale_y = self._input_size[0] / h
scale_x = self._input_size[1] / w
if scale_x != 1 or scale_y != 1:
inputs = F.interpolate(
inputs,
size=self._input_size,
mode='bilinear',
align_corners=False)
for data_sample in data_samples:
img_shape = (int(data_sample.img_shape[0] * scale_y),
int(data_sample.img_shape[1] * scale_x))
pad_shape = (int(data_sample.pad_shape[0] * scale_y),
int(data_sample.pad_shape[1] * scale_x))
data_sample.set_metainfo({
'img_shape': img_shape,
'pad_shape': pad_shape,
'batch_input_shape': self._input_size
})
data_sample.gt_instances.bboxes[
...,
0::2] = data_sample.gt_instances.bboxes[...,
0::2] * scale_x
data_sample.gt_instances.bboxes[
...,
1::2] = data_sample.gt_instances.bboxes[...,
1::2] * scale_y
if 'ignored_instances' in data_sample:
data_sample.ignored_instances.bboxes[
..., 0::2] = data_sample.ignored_instances.bboxes[
..., 0::2] * scale_x
data_sample.ignored_instances.bboxes[
..., 1::2] = data_sample.ignored_instances.bboxes[
..., 1::2] * scale_y
message_hub = MessageHub.get_current_instance()
if (message_hub.get_info('iter') + 1) % self._interval == 0:
self._input_size = self._get_random_size(
aspect_ratio=float(w / h), device=inputs.device)
return inputs, data_samples
def _get_random_size(self, aspect_ratio: float,
device: torch.device) -> Tuple[int, int]:
"""Randomly generate a shape in ``_random_size_range`` and broadcast to
all ranks."""
tensor = torch.LongTensor(2).to(device)
if self.rank == 0:
size = random.randint(*self._random_size_range)
size = (self._size_divisor * size,
self._size_divisor * int(aspect_ratio * size))
tensor[0] = size[0]
tensor[1] = size[1]
barrier()
broadcast(tensor, 0)
input_size = (tensor[0].item(), tensor[1].item())
return input_size
@MODELS.register_module()
class BatchFixedSizePad(nn.Module):
"""Fixed size padding for batch images.
Args:
size (Tuple[int, int]): Fixed padding size. Expected padding
shape (h, w). Defaults to None.
img_pad_value (int): The padded pixel value for images.
Defaults to 0.
pad_mask (bool): Whether to pad instance masks. Defaults to False.
mask_pad_value (int): The padded pixel value for instance masks.
Defaults to 0.
pad_seg (bool): Whether to pad semantic segmentation maps.
Defaults to False.
seg_pad_value (int): The padded pixel value for semantic
segmentation maps. Defaults to 255.
"""
def __init__(self,
size: Tuple[int, int],
img_pad_value: int = 0,
pad_mask: bool = False,
mask_pad_value: int = 0,
pad_seg: bool = False,
seg_pad_value: int = 255) -> None:
super().__init__()
self.size = size
self.pad_mask = pad_mask
self.pad_seg = pad_seg
self.img_pad_value = img_pad_value
self.mask_pad_value = mask_pad_value
self.seg_pad_value = seg_pad_value
def forward(
self,
inputs: Tensor,
data_samples: Optional[List[dict]] = None
) -> Tuple[Tensor, Optional[List[dict]]]:
"""Pad image, instance masks, segmantic segmentation maps."""
src_h, src_w = inputs.shape[-2:]
dst_h, dst_w = self.size
if src_h >= dst_h and src_w >= dst_w:
return inputs, data_samples
inputs = F.pad(
inputs,
pad=(0, max(0, dst_w - src_w), 0, max(0, dst_h - src_h)),
mode='constant',
value=self.img_pad_value)
if data_samples is not None:
# update batch_input_shape
for data_sample in data_samples:
data_sample.set_metainfo({
'batch_input_shape': (dst_h, dst_w),
'pad_shape': (dst_h, dst_w)
})
if self.pad_mask:
for data_sample in data_samples:
masks = data_sample.gt_instances.masks
data_sample.gt_instances.masks = masks.pad(
(dst_h, dst_w), pad_val=self.mask_pad_value)
if self.pad_seg:
for data_sample in data_samples:
gt_sem_seg = data_sample.gt_sem_seg.sem_seg
h, w = gt_sem_seg.shape[-2:]
gt_sem_seg = F.pad(
gt_sem_seg,
pad=(0, max(0, dst_w - w), 0, max(0, dst_h - h)),
mode='constant',
value=self.seg_pad_value)
data_sample.gt_sem_seg = PixelData(sem_seg=gt_sem_seg)
return inputs, data_samples
@MODELS.register_module()
class MultiBranchDataPreprocessor(BaseDataPreprocessor):
"""DataPreprocessor wrapper for multi-branch data.
Take semi-supervised object detection as an example, assume that
the ratio of labeled data and unlabeled data in a batch is 1:2,
`sup` indicates the branch where the labeled data is augmented,
`unsup_teacher` and `unsup_student` indicate the branches where
the unlabeled data is augmented by different pipeline.
The input format of multi-branch data is shown as below :
.. code-block:: none
{
'inputs':
{
'sup': [Tensor, None, None],
'unsup_teacher': [None, Tensor, Tensor],
'unsup_student': [None, Tensor, Tensor],
},
'data_sample':
{
'sup': [DetDataSample, None, None],
'unsup_teacher': [None, DetDataSample, DetDataSample],
'unsup_student': [NOne, DetDataSample, DetDataSample],
}
}
The format of multi-branch data
after filtering None is shown as below :
.. code-block:: none
{
'inputs':
{
'sup': [Tensor],
'unsup_teacher': [Tensor, Tensor],
'unsup_student': [Tensor, Tensor],
},
'data_sample':
{
'sup': [DetDataSample],
'unsup_teacher': [DetDataSample, DetDataSample],
'unsup_student': [DetDataSample, DetDataSample],
}
}
In order to reuse `DetDataPreprocessor` for the data
from different branches, the format of multi-branch data
grouped by branch is as below :
.. code-block:: none
{
'sup':
{
'inputs': [Tensor]
'data_sample': [DetDataSample, DetDataSample]
},
'unsup_teacher':
{
'inputs': [Tensor, Tensor]
'data_sample': [DetDataSample, DetDataSample]
},
'unsup_student':
{
'inputs': [Tensor, Tensor]
'data_sample': [DetDataSample, DetDataSample]
},
}
After preprocessing data from different branches,
the multi-branch data needs to be reformatted as:
.. code-block:: none
{
'inputs':
{
'sup': [Tensor],
'unsup_teacher': [Tensor, Tensor],
'unsup_student': [Tensor, Tensor],
},
'data_sample':
{
'sup': [DetDataSample],
'unsup_teacher': [DetDataSample, DetDataSample],
'unsup_student': [DetDataSample, DetDataSample],
}
}
Args:
data_preprocessor (:obj:`ConfigDict` or dict): Config of
:class:`DetDataPreprocessor` to process the input data.
"""
def __init__(self, data_preprocessor: ConfigType) -> None:
super().__init__()
self.data_preprocessor = MODELS.build(data_preprocessor)
def forward(self, data: dict, training: bool = False) -> dict:
"""Perform normalization、padding and bgr2rgb conversion based on
``BaseDataPreprocessor`` for multi-branch data.
Args:
data (dict): Data sampled from dataloader.
training (bool): Whether to enable training time augmentation.
Returns:
dict:
- 'inputs' (Dict[str, obj:`torch.Tensor`]): The forward data of
models from different branches.
- 'data_sample' (Dict[str, obj:`DetDataSample`]): The annotation
info of the sample from different branches.
"""
if training is False:
return self.data_preprocessor(data, training)
# Filter out branches with a value of None
for key in data.keys():
for branch in data[key].keys():
data[key][branch] = list(
filter(lambda x: x is not None, data[key][branch]))
# Group data by branch
multi_branch_data = {}
for key in data.keys():
for branch in data[key].keys():
if multi_branch_data.get(branch, None) is None:
multi_branch_data[branch] = {key: data[key][branch]}
elif multi_branch_data[branch].get(key, None) is None:
multi_branch_data[branch][key] = data[key][branch]
else:
multi_branch_data[branch][key].append(data[key][branch])
# Preprocess data from different branches
for branch, _data in multi_branch_data.items():
multi_branch_data[branch] = self.data_preprocessor(_data, training)
# Format data by inputs and data_samples
format_data = {}
for branch in multi_branch_data.keys():
for key in multi_branch_data[branch].keys():
if format_data.get(key, None) is None:
format_data[key] = {branch: multi_branch_data[branch][key]}
elif format_data[key].get(branch, None) is None:
format_data[key][branch] = multi_branch_data[branch][key]
else:
format_data[key][branch].append(
multi_branch_data[branch][key])
return format_data
@property
def device(self):
return self.data_preprocessor.device
def to(self, device: Optional[Union[int, torch.device]], *args,
**kwargs) -> nn.Module:
"""Overrides this method to set the :attr:`device`
Args:
device (int or torch.device, optional): The desired device of the
parameters and buffers in this module.
Returns:
nn.Module: The model itself.
"""
return self.data_preprocessor.to(device, *args, **kwargs)
def cuda(self, *args, **kwargs) -> nn.Module:
"""Overrides this method to set the :attr:`device`
Returns:
nn.Module: The model itself.
"""
return self.data_preprocessor.cuda(*args, **kwargs)
def cpu(self, *args, **kwargs) -> nn.Module:
"""Overrides this method to set the :attr:`device`
Returns:
nn.Module: The model itself.
"""
return self.data_preprocessor.cpu(*args, **kwargs)
@MODELS.register_module()
class BatchResize(nn.Module):
"""Batch resize during training. This implementation is modified from
https://github.com/Purkialo/CrowdDet/blob/master/lib/data/CrowdHuman.py.
It provides the data pre-processing as follows:
- A batch of all images will pad to a uniform size and stack them into
a torch.Tensor by `DetDataPreprocessor`.
- `BatchFixShapeResize` resize all images to the target size.
- Padding images to make sure the size of image can be divisible by
``pad_size_divisor``.
Args:
scale (tuple): Images scales for resizing.
pad_size_divisor (int): Image size divisible factor.
Defaults to 1.
pad_value (Number): The padded pixel value. Defaults to 0.
"""
def __init__(
self,
scale: tuple,
pad_size_divisor: int = 1,
pad_value: Union[float, int] = 0,
) -> None:
super().__init__()
self.min_size = min(scale)
self.max_size = max(scale)
self.pad_size_divisor = pad_size_divisor
self.pad_value = pad_value
def forward(
self, inputs: Tensor, data_samples: List[DetDataSample]
) -> Tuple[Tensor, List[DetDataSample]]:
"""resize a batch of images and bboxes."""
batch_height, batch_width = inputs.shape[-2:]
target_height, target_width, scale = self.get_target_size(
batch_height, batch_width)
inputs = F.interpolate(
inputs,
size=(target_height, target_width),
mode='bilinear',
align_corners=False)
inputs = self.get_padded_tensor(inputs, self.pad_value)
if data_samples is not None:
batch_input_shape = tuple(inputs.size()[-2:])
for data_sample in data_samples:
img_shape = [
int(scale * _) for _ in list(data_sample.img_shape)
]
data_sample.set_metainfo({
'img_shape': tuple(img_shape),
'batch_input_shape': batch_input_shape,
'pad_shape': batch_input_shape,
'scale_factor': (scale, scale)
})
data_sample.gt_instances.bboxes *= scale
data_sample.ignored_instances.bboxes *= scale
return inputs, data_samples
def get_target_size(self, height: int,
width: int) -> Tuple[int, int, float]:
"""Get the target size of a batch of images based on data and scale."""
im_size_min = np.min([height, width])
im_size_max = np.max([height, width])
scale = self.min_size / im_size_min
if scale * im_size_max > self.max_size:
scale = self.max_size / im_size_max
target_height, target_width = int(round(height * scale)), int(
round(width * scale))
return target_height, target_width, scale
def get_padded_tensor(self, tensor: Tensor, pad_value: int) -> Tensor:
"""Pad images according to pad_size_divisor."""
assert tensor.ndim == 4
target_height, target_width = tensor.shape[-2], tensor.shape[-1]
divisor = self.pad_size_divisor
padded_height = (target_height + divisor - 1) // divisor * divisor
padded_width = (target_width + divisor - 1) // divisor * divisor
padded_tensor = torch.ones([
tensor.shape[0], tensor.shape[1], padded_height, padded_width
]) * pad_value
padded_tensor = padded_tensor.type_as(tensor)
padded_tensor[:, :, :target_height, :target_width] = tensor
return padded_tensor
@MODELS.register_module()
class BoxInstDataPreprocessor(DetDataPreprocessor):
"""Pseudo mask pre-processor for BoxInst.
Comparing with the :class:`mmdet.DetDataPreprocessor`,
1. It generates masks using box annotations.
2. It computes the images color similarity in LAB color space.
Args:
mask_stride (int): The mask output stride in boxinst. Defaults to 4.
pairwise_size (int): The size of neighborhood for each pixel.
Defaults to 3.
pairwise_dilation (int): The dilation of neighborhood for each pixel.
Defaults to 2.
pairwise_color_thresh (float): The thresh of image color similarity.
Defaults to 0.3.
bottom_pixels_removed (int): The length of removed pixels in bottom.
It is caused by the annotation error in coco dataset.
Defaults to 10.
"""
def __init__(self,
*arg,
mask_stride: int = 4,
pairwise_size: int = 3,
pairwise_dilation: int = 2,
pairwise_color_thresh: float = 0.3,
bottom_pixels_removed: int = 10,
**kwargs) -> None:
super().__init__(*arg, **kwargs)
self.mask_stride = mask_stride
self.pairwise_size = pairwise_size
self.pairwise_dilation = pairwise_dilation
self.pairwise_color_thresh = pairwise_color_thresh
self.bottom_pixels_removed = bottom_pixels_removed
if skimage is None:
raise RuntimeError('skimage is not installed,\
please install it by: pip install scikit-image')
def get_images_color_similarity(self, inputs: Tensor,
image_masks: Tensor) -> Tensor:
"""Compute the image color similarity in LAB color space."""
assert inputs.dim() == 4
assert inputs.size(0) == 1
unfolded_images = unfold_wo_center(
inputs,
kernel_size=self.pairwise_size,
dilation=self.pairwise_dilation)
diff = inputs[:, :, None] - unfolded_images
similarity = torch.exp(-torch.norm(diff, dim=1) * 0.5)
unfolded_weights = unfold_wo_center(
image_masks[None, None],
kernel_size=self.pairwise_size,
dilation=self.pairwise_dilation)
unfolded_weights = torch.max(unfolded_weights, dim=1)[0]
return similarity * unfolded_weights
def forward(self, data: dict, training: bool = False) -> dict:
"""Get pseudo mask labels using color similarity."""
det_data = super().forward(data, training)
inputs, data_samples = det_data['inputs'], det_data['data_samples']
if training:
# get image masks and remove bottom pixels
b_img_h, b_img_w = data_samples[0].batch_input_shape
img_masks = []
for i in range(inputs.shape[0]):
img_h, img_w = data_samples[i].img_shape
img_mask = inputs.new_ones((img_h, img_w))
pixels_removed = int(self.bottom_pixels_removed *
float(img_h) / float(b_img_h))
if pixels_removed > 0:
img_mask[-pixels_removed:, :] = 0
pad_w = b_img_w - img_w
pad_h = b_img_h - img_h
img_mask = F.pad(img_mask, (0, pad_w, 0, pad_h), 'constant',
0.)
img_masks.append(img_mask)
img_masks = torch.stack(img_masks, dim=0)
start = int(self.mask_stride // 2)
img_masks = img_masks[:, start::self.mask_stride,
start::self.mask_stride]
# Get origin rgb image for color similarity
ori_imgs = inputs * self.std + self.mean
downsampled_imgs = F.avg_pool2d(
ori_imgs.float(),
kernel_size=self.mask_stride,
stride=self.mask_stride,
padding=0)
# Compute color similarity for pseudo mask generation
for im_i, data_sample in enumerate(data_samples):
# TODO: Support rgb2lab in mmengine?
images_lab = skimage.color.rgb2lab(
downsampled_imgs[im_i].byte().permute(1, 2,
0).cpu().numpy())
images_lab = torch.as_tensor(
images_lab, device=ori_imgs.device, dtype=torch.float32)
images_lab = images_lab.permute(2, 0, 1)[None]
images_color_similarity = self.get_images_color_similarity(
images_lab, img_masks[im_i])
pairwise_mask = (images_color_similarity >=
self.pairwise_color_thresh).float()
per_im_bboxes = data_sample.gt_instances.bboxes
if per_im_bboxes.shape[0] > 0:
per_im_masks = []
for per_box in per_im_bboxes:
mask_full = torch.zeros((b_img_h, b_img_w),
device=self.device).float()
mask_full[int(per_box[1]):int(per_box[3] + 1),
int(per_box[0]):int(per_box[2] + 1)] = 1.0
per_im_masks.append(mask_full)
per_im_masks = torch.stack(per_im_masks, dim=0)
pairwise_masks = torch.cat(
[pairwise_mask for _ in range(per_im_bboxes.shape[0])],
dim=0)
else:
per_im_masks = torch.zeros((0, b_img_h, b_img_w))
pairwise_masks = torch.zeros(
(0, self.pairwise_size**2 - 1, b_img_h, b_img_w))
# TODO: Support BitmapMasks with tensor?
data_sample.gt_instances.masks = BitmapMasks(
per_im_masks.cpu().numpy(), b_img_h, b_img_w)
data_sample.gt_instances.pairwise_masks = pairwise_masks
return {'inputs': inputs, 'data_samples': data_samples}
| 32,074 | 39.396725 | 79 | py |
ERD | ERD-main/mmdet/models/detectors/conditional_detr.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict
import torch.nn as nn
from torch import Tensor
from mmdet.registry import MODELS
from ..layers import (ConditionalDetrTransformerDecoder,
DetrTransformerEncoder, SinePositionalEncoding)
from .detr import DETR
@MODELS.register_module()
class ConditionalDETR(DETR):
r"""Implementation of `Conditional DETR for Fast Training Convergence.
<https://arxiv.org/abs/2108.06152>`_.
Code is modified from the `official github repo
<https://github.com/Atten4Vis/ConditionalDETR>`_.
"""
def _init_layers(self) -> None:
"""Initialize layers except for backbone, neck and bbox_head."""
self.positional_encoding = SinePositionalEncoding(
**self.positional_encoding)
self.encoder = DetrTransformerEncoder(**self.encoder)
self.decoder = ConditionalDetrTransformerDecoder(**self.decoder)
self.embed_dims = self.encoder.embed_dims
# NOTE The embed_dims is typically passed from the inside out.
# For example in DETR, The embed_dims is passed as
# self_attn -> the first encoder layer -> encoder -> detector.
self.query_embedding = nn.Embedding(self.num_queries, self.embed_dims)
num_feats = self.positional_encoding.num_feats
assert num_feats * 2 == self.embed_dims, \
f'embed_dims should be exactly 2 times of num_feats. ' \
f'Found {self.embed_dims} and {num_feats}.'
def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor,
memory_mask: Tensor, memory_pos: Tensor) -> Dict:
"""Forward with Transformer decoder.
Args:
query (Tensor): The queries of decoder inputs, has shape
(bs, num_queries, dim).
query_pos (Tensor): The positional queries of decoder inputs,
has shape (bs, num_queries, dim).
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
memory_mask (Tensor): ByteTensor, the padding mask of the memory,
has shape (bs, num_feat_points).
memory_pos (Tensor): The positional embeddings of memory, has
shape (bs, num_feat_points, dim).
Returns:
dict: The dictionary of decoder outputs, which includes the
`hidden_states` and `references` of the decoder output.
- hidden_states (Tensor): Has shape
(num_decoder_layers, bs, num_queries, dim)
- references (Tensor): Has shape
(bs, num_queries, 2)
"""
hidden_states, references = self.decoder(
query=query,
key=memory,
query_pos=query_pos,
key_pos=memory_pos,
key_padding_mask=memory_mask)
head_inputs_dict = dict(
hidden_states=hidden_states, references=references)
return head_inputs_dict
| 3,029 | 39.4 | 79 | py |
ERD | ERD-main/mmdet/models/detectors/two_stage.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import warnings
from typing import List, Tuple, Union
import torch
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .base import BaseDetector
@MODELS.register_module()
class TwoStageDetector(BaseDetector):
"""Base class for two-stage detectors.
Two-stage detectors typically consisting of a region proposal network and a
task-specific regression head.
"""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
rpn_head: OptConfigType = None,
roi_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
data_preprocessor=data_preprocessor, init_cfg=init_cfg)
self.backbone = MODELS.build(backbone)
if neck is not None:
self.neck = MODELS.build(neck)
if rpn_head is not None:
rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
rpn_head_ = rpn_head.copy()
rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)
rpn_head_num_classes = rpn_head_.get('num_classes', None)
if rpn_head_num_classes is None:
rpn_head_.update(num_classes=1)
else:
if rpn_head_num_classes != 1:
warnings.warn(
'The `num_classes` should be 1 in RPN, but get '
f'{rpn_head_num_classes}, please set '
'rpn_head.num_classes = 1 in your config file.')
rpn_head_.update(num_classes=1)
self.rpn_head = MODELS.build(rpn_head_)
if roi_head is not None:
# update train and test cfg here for now
# TODO: refactor assigner & sampler
rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None
roi_head.update(train_cfg=rcnn_train_cfg)
roi_head.update(test_cfg=test_cfg.rcnn)
self.roi_head = MODELS.build(roi_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def _load_from_state_dict(self, state_dict: dict, prefix: str,
local_metadata: dict, strict: bool,
missing_keys: Union[List[str], str],
unexpected_keys: Union[List[str], str],
error_msgs: Union[List[str], str]) -> None:
"""Exchange bbox_head key to rpn_head key when loading single-stage
weights into two-stage model."""
bbox_head_prefix = prefix + '.bbox_head' if prefix else 'bbox_head'
bbox_head_keys = [
k for k in state_dict.keys() if k.startswith(bbox_head_prefix)
]
rpn_head_prefix = prefix + '.rpn_head' if prefix else 'rpn_head'
rpn_head_keys = [
k for k in state_dict.keys() if k.startswith(rpn_head_prefix)
]
if len(bbox_head_keys) != 0 and len(rpn_head_keys) == 0:
for bbox_head_key in bbox_head_keys:
rpn_head_key = rpn_head_prefix + \
bbox_head_key[len(bbox_head_prefix):]
state_dict[rpn_head_key] = state_dict.pop(bbox_head_key)
super()._load_from_state_dict(state_dict, prefix, local_metadata,
strict, missing_keys, unexpected_keys,
error_msgs)
@property
def with_rpn(self) -> bool:
"""bool: whether the detector has RPN"""
return hasattr(self, 'rpn_head') and self.rpn_head is not None
@property
def with_roi_head(self) -> bool:
"""bool: whether the detector has a RoI head"""
return hasattr(self, 'roi_head') and self.roi_head is not None
def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]:
"""Extract features.
Args:
batch_inputs (Tensor): Image tensor with shape (N, C, H ,W).
Returns:
tuple[Tensor]: Multi-level features that may have
different resolutions.
"""
x = self.backbone(batch_inputs)
if self.with_neck:
x = self.neck(x)
return x
def _forward(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> tuple:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
batch_data_samples (list[:obj:`DetDataSample`]): Each item contains
the meta information of each image and corresponding
annotations.
Returns:
tuple: A tuple of features from ``rpn_head`` and ``roi_head``
forward.
"""
results = ()
x = self.extract_feat(batch_inputs)
if self.with_rpn:
rpn_results_list = self.rpn_head.predict(
x, batch_data_samples, rescale=False)
else:
assert batch_data_samples[0].get('proposals', None) is not None
rpn_results_list = [
data_sample.proposals for data_sample in batch_data_samples
]
roi_outs = self.roi_head.forward(x, rpn_results_list,
batch_data_samples)
results = results + (roi_outs, )
return results
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""Calculate losses from a batch of inputs and data samples.
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (List[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components
"""
x = self.extract_feat(batch_inputs)
losses = dict()
# RPN forward and loss
if self.with_rpn:
proposal_cfg = self.train_cfg.get('rpn_proposal',
self.test_cfg.rpn)
rpn_data_samples = copy.deepcopy(batch_data_samples)
# set cat_id of gt_labels to 0 in RPN
for data_sample in rpn_data_samples:
data_sample.gt_instances.labels = \
torch.zeros_like(data_sample.gt_instances.labels)
rpn_losses, rpn_results_list = self.rpn_head.loss_and_predict(
x, rpn_data_samples, proposal_cfg=proposal_cfg)
# avoid get same name with roi_head loss
keys = rpn_losses.keys()
for key in list(keys):
if 'loss' in key and 'rpn' not in key:
rpn_losses[f'rpn_{key}'] = rpn_losses.pop(key)
losses.update(rpn_losses)
else:
assert batch_data_samples[0].get('proposals', None) is not None
# use pre-defined proposals in InstanceData for the second stage
# to extract ROI features.
rpn_results_list = [
data_sample.proposals for data_sample in batch_data_samples
]
roi_losses = self.roi_head.loss(x, rpn_results_list,
batch_data_samples)
losses.update(roi_losses)
return losses
def predict(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
rescale: bool = True) -> SampleList:
"""Predict results from a batch of inputs and data samples with post-
processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results.
Defaults to True.
Returns:
list[:obj:`DetDataSample`]: Return the detection results of the
input images. The returns value is DetDataSample,
which usually contain 'pred_instances'. And the
``pred_instances`` usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(batch_inputs)
# If there are no pre-defined proposals, use RPN to get proposals
if batch_data_samples[0].get('proposals', None) is None:
rpn_results_list = self.rpn_head.predict(
x, batch_data_samples, rescale=False)
else:
rpn_results_list = [
data_sample.proposals for data_sample in batch_data_samples
]
results_list = self.roi_head.predict(
x, rpn_results_list, batch_data_samples, rescale=rescale)
batch_data_samples = self.add_pred_to_datasample(
batch_data_samples, results_list)
return batch_data_samples
| 9,942 | 39.75 | 79 | py |
ERD | ERD-main/mmdet/models/detectors/base.py | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, List, Tuple, Union
import torch
from mmengine.model import BaseModel
from torch import Tensor
from mmdet.structures import DetDataSample, OptSampleList, SampleList
from mmdet.utils import InstanceList, OptConfigType, OptMultiConfig
from ..utils import samplelist_boxtype2tensor
ForwardResults = Union[Dict[str, torch.Tensor], List[DetDataSample],
Tuple[torch.Tensor], torch.Tensor]
class BaseDetector(BaseModel, metaclass=ABCMeta):
"""Base class for detectors.
Args:
data_preprocessor (dict or ConfigDict, optional): The pre-process
config of :class:`BaseDataPreprocessor`. it usually includes,
``pad_size_divisor``, ``pad_value``, ``mean`` and ``std``.
init_cfg (dict or ConfigDict, optional): the config to control the
initialization. Defaults to None.
"""
def __init__(self,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
data_preprocessor=data_preprocessor, init_cfg=init_cfg)
@property
def with_neck(self) -> bool:
"""bool: whether the detector has a neck"""
return hasattr(self, 'neck') and self.neck is not None
# TODO: these properties need to be carefully handled
# for both single stage & two stage detectors
@property
def with_shared_head(self) -> bool:
"""bool: whether the detector has a shared head in the RoI Head"""
return hasattr(self, 'roi_head') and self.roi_head.with_shared_head
@property
def with_bbox(self) -> bool:
"""bool: whether the detector has a bbox head"""
return ((hasattr(self, 'roi_head') and self.roi_head.with_bbox)
or (hasattr(self, 'bbox_head') and self.bbox_head is not None))
@property
def with_mask(self) -> bool:
"""bool: whether the detector has a mask head"""
return ((hasattr(self, 'roi_head') and self.roi_head.with_mask)
or (hasattr(self, 'mask_head') and self.mask_head is not None))
def forward(self,
inputs: torch.Tensor,
data_samples: OptSampleList = None,
mode: str = 'tensor') -> ForwardResults:
"""The unified entry for a forward process in both training and test.
The method should accept three modes: "tensor", "predict" and "loss":
- "tensor": Forward the whole network and return tensor or tuple of
tensor without any post-processing, same as a common nn.Module.
- "predict": Forward and return the predictions, which are fully
processed to a list of :obj:`DetDataSample`.
- "loss": Forward and return a dict of losses according to the given
inputs and data samples.
Note that this method doesn't handle either back propagation or
parameter update, which are supposed to be done in :meth:`train_step`.
Args:
inputs (torch.Tensor): The input tensor with shape
(N, C, ...) in general.
data_samples (list[:obj:`DetDataSample`], optional): A batch of
data samples that contain annotations and predictions.
Defaults to None.
mode (str): Return what kind of value. Defaults to 'tensor'.
Returns:
The return type depends on ``mode``.
- If ``mode="tensor"``, return a tensor or a tuple of tensor.
- If ``mode="predict"``, return a list of :obj:`DetDataSample`.
- If ``mode="loss"``, return a dict of tensor.
"""
if mode == 'loss':
return self.loss(inputs, data_samples)
elif mode == 'predict':
return self.predict(inputs, data_samples)
elif mode == 'tensor':
return self._forward(inputs, data_samples)
else:
raise RuntimeError(f'Invalid mode "{mode}". '
'Only supports loss, predict and tensor mode')
@abstractmethod
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> Union[dict, tuple]:
"""Calculate losses from a batch of inputs and data samples."""
pass
@abstractmethod
def predict(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> SampleList:
"""Predict results from a batch of inputs and data samples with post-
processing."""
pass
@abstractmethod
def _forward(self,
batch_inputs: Tensor,
batch_data_samples: OptSampleList = None):
"""Network forward process.
Usually includes backbone, neck and head forward without any post-
processing.
"""
pass
@abstractmethod
def extract_feat(self, batch_inputs: Tensor):
"""Extract features from images."""
pass
def add_pred_to_datasample(self, data_samples: SampleList,
results_list: InstanceList) -> SampleList:
"""Add predictions to `DetDataSample`.
Args:
data_samples (list[:obj:`DetDataSample`], optional): A batch of
data samples that contain annotations and predictions.
results_list (list[:obj:`InstanceData`]): Detection results of
each image.
Returns:
list[:obj:`DetDataSample`]: Detection results of the
input images. Each DetDataSample usually contain
'pred_instances'. And the ``pred_instances`` usually
contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
for data_sample, pred_instances in zip(data_samples, results_list):
data_sample.pred_instances = pred_instances
samplelist_boxtype2tensor(data_samples)
return data_samples
| 6,255 | 38.847134 | 79 | py |
ERD | ERD-main/mmdet/models/detectors/single_stage.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import OptSampleList, SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .base import BaseDetector
@MODELS.register_module()
class SingleStageDetector(BaseDetector):
"""Base class for single-stage detectors.
Single-stage detectors directly and densely predict bounding boxes on the
output features of the backbone+neck.
"""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
bbox_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
data_preprocessor=data_preprocessor, init_cfg=init_cfg)
self.backbone = MODELS.build(backbone)
if neck is not None:
self.neck = MODELS.build(neck)
bbox_head.update(train_cfg=train_cfg)
bbox_head.update(test_cfg=test_cfg)
self.bbox_head = MODELS.build(bbox_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def _load_from_state_dict(self, state_dict: dict, prefix: str,
local_metadata: dict, strict: bool,
missing_keys: Union[List[str], str],
unexpected_keys: Union[List[str], str],
error_msgs: Union[List[str], str]) -> None:
"""Exchange bbox_head key to rpn_head key when loading two-stage
weights into single-stage model."""
bbox_head_prefix = prefix + '.bbox_head' if prefix else 'bbox_head'
bbox_head_keys = [
k for k in state_dict.keys() if k.startswith(bbox_head_prefix)
]
rpn_head_prefix = prefix + '.rpn_head' if prefix else 'rpn_head'
rpn_head_keys = [
k for k in state_dict.keys() if k.startswith(rpn_head_prefix)
]
if len(bbox_head_keys) == 0 and len(rpn_head_keys) != 0:
for rpn_head_key in rpn_head_keys:
bbox_head_key = bbox_head_prefix + \
rpn_head_key[len(rpn_head_prefix):]
state_dict[bbox_head_key] = state_dict.pop(rpn_head_key)
super()._load_from_state_dict(state_dict, prefix, local_metadata,
strict, missing_keys, unexpected_keys,
error_msgs)
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> Union[dict, list]:
"""Calculate losses from a batch of inputs and data samples.
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
losses = self.bbox_head.loss(x, batch_data_samples)
return losses
def predict(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
rescale: bool = True) -> SampleList:
"""Predict results from a batch of inputs and data samples with post-
processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results.
Defaults to True.
Returns:
list[:obj:`DetDataSample`]: Detection results of the
input images. Each DetDataSample usually contain
'pred_instances'. And the ``pred_instances`` usually
contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
x = self.extract_feat(batch_inputs)
results_list = self.bbox_head.predict(
x, batch_data_samples, rescale=rescale)
batch_data_samples = self.add_pred_to_datasample(
batch_data_samples, results_list)
return batch_data_samples
def _forward(
self,
batch_inputs: Tensor,
batch_data_samples: OptSampleList = None) -> Tuple[List[Tensor]]:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
batch_data_samples (list[:obj:`DetDataSample`]): Each item contains
the meta information of each image and corresponding
annotations.
Returns:
tuple[list]: A tuple of features from ``bbox_head`` forward.
"""
x = self.extract_feat(batch_inputs)
results = self.bbox_head.forward(x)
return results
def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]:
"""Extract features.
Args:
batch_inputs (Tensor): Image tensor with shape (N, C, H ,W).
Returns:
tuple[Tensor]: Multi-level features that may have
different resolutions.
"""
x = self.backbone(batch_inputs)
if self.with_neck:
x = self.neck(x)
return x
| 6,124 | 39.833333 | 79 | py |
ERD | ERD-main/mmdet/models/detectors/gfl.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Union
from torch import Tensor
import torch
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class GFL(SingleStageDetector):
"""Implementation of `GFL <https://arxiv.org/abs/2006.04388>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of GFL. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of GFL. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
# for replay method by minimum cost
def compute_cost_for_memory(self, batch_inputs: Tensor,
batch_data_samples: SampleList, cur_class_num) -> Union[dict, list]:
"""Calculate cost for a batch images
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
batch_cost = self.bbox_head.compute_cost_for_memory(x, batch_data_samples, cur_class_num)
return batch_cost
def tensor2numpy(self, x):
return x.cpu().data.numpy() if x.is_cuda else x.data.numpy()
# for replay method by icaRL
def compute_cost_for_memory_icarl(self, batch_inputs: Tensor,
batch_data_samples: SampleList, cur_class_num) -> Union[dict, list]:
"""Calculate cost for a batch images
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
# batch_cost = self.bbox_head.compute_cost_for_memory_icarl(x, batch_data_samples, cur_class_num)
batch_cost = torch.cat([per_x.reshape(per_x.shape[0], per_x.shape[1], -1) for per_x in x], dim=2).mean(-1)
return self.tensor2numpy(batch_cost)
# importance metric
def compute_importance_for_replay_v3(self, batch_inputs: Tensor,
batch_data_samples: SampleList, cur_class_num) -> Union[dict, list]:
"""Calculate cost for a batch images
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
batch_importance = self.bbox_head.compute_importance_for_replay_v3(x, batch_data_samples, cur_class_num)
return batch_importance
def compute_cost_and_feats_for_replay_v4(self, batch_inputs: Tensor,
batch_data_samples: SampleList, cur_class_num) -> Union[dict, list]:
"""Calculate cost and feats for a batch images
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
batch_feats = torch.cat([per_x.reshape(per_x.shape[0], per_x.shape[1], -1) for per_x in x], dim=2).mean(-1)
batch_importances = self.bbox_head.compute_importance_for_replay_v4(x, batch_data_samples, cur_class_num)
return self.tensor2numpy(batch_feats), batch_importances
| 5,661 | 43.582677 | 115 | py |
ERD | ERD-main/mmdet/models/detectors/detr.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from mmdet.registry import MODELS
from mmdet.structures import OptSampleList
from ..layers import (DetrTransformerDecoder, DetrTransformerEncoder,
SinePositionalEncoding)
from .base_detr import DetectionTransformer
@MODELS.register_module()
class DETR(DetectionTransformer):
r"""Implementation of `DETR: End-to-End Object Detection with Transformers.
<https://arxiv.org/pdf/2005.12872>`_.
Code is modified from the `official github repo
<https://github.com/facebookresearch/detr>`_.
"""
def _init_layers(self) -> None:
"""Initialize layers except for backbone, neck and bbox_head."""
self.positional_encoding = SinePositionalEncoding(
**self.positional_encoding)
self.encoder = DetrTransformerEncoder(**self.encoder)
self.decoder = DetrTransformerDecoder(**self.decoder)
self.embed_dims = self.encoder.embed_dims
# NOTE The embed_dims is typically passed from the inside out.
# For example in DETR, The embed_dims is passed as
# self_attn -> the first encoder layer -> encoder -> detector.
self.query_embedding = nn.Embedding(self.num_queries, self.embed_dims)
num_feats = self.positional_encoding.num_feats
assert num_feats * 2 == self.embed_dims, \
'embed_dims should be exactly 2 times of num_feats. ' \
f'Found {self.embed_dims} and {num_feats}.'
def init_weights(self) -> None:
"""Initialize weights for Transformer and other components."""
super().init_weights()
for coder in self.encoder, self.decoder:
for p in coder.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def pre_transformer(
self,
img_feats: Tuple[Tensor],
batch_data_samples: OptSampleList = None) -> Tuple[Dict, Dict]:
"""Prepare the inputs of the Transformer.
The forward procedure of the transformer is defined as:
'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'
More details can be found at `TransformerDetector.forward_transformer`
in `mmdet/detector/base_detr.py`.
Args:
img_feats (Tuple[Tensor]): Tuple of features output from the neck,
has shape (bs, c, h, w).
batch_data_samples (List[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such as
`gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Defaults to None.
Returns:
tuple[dict, dict]: The first dict contains the inputs of encoder
and the second dict contains the inputs of decoder.
- encoder_inputs_dict (dict): The keyword args dictionary of
`self.forward_encoder()`, which includes 'feat', 'feat_mask',
and 'feat_pos'.
- decoder_inputs_dict (dict): The keyword args dictionary of
`self.forward_decoder()`, which includes 'memory_mask',
and 'memory_pos'.
"""
feat = img_feats[-1] # NOTE img_feats contains only one feature.
batch_size, feat_dim, _, _ = feat.shape
# construct binary masks which for the transformer.
assert batch_data_samples is not None
batch_input_shape = batch_data_samples[0].batch_input_shape
img_shape_list = [sample.img_shape for sample in batch_data_samples]
input_img_h, input_img_w = batch_input_shape
masks = feat.new_ones((batch_size, input_img_h, input_img_w))
for img_id in range(batch_size):
img_h, img_w = img_shape_list[img_id]
masks[img_id, :img_h, :img_w] = 0
# NOTE following the official DETR repo, non-zero values represent
# ignored positions, while zero values mean valid positions.
masks = F.interpolate(
masks.unsqueeze(1), size=feat.shape[-2:]).to(torch.bool).squeeze(1)
# [batch_size, embed_dim, h, w]
pos_embed = self.positional_encoding(masks)
# use `view` instead of `flatten` for dynamically exporting to ONNX
# [bs, c, h, w] -> [bs, h*w, c]
feat = feat.view(batch_size, feat_dim, -1).permute(0, 2, 1)
pos_embed = pos_embed.view(batch_size, feat_dim, -1).permute(0, 2, 1)
# [bs, h, w] -> [bs, h*w]
masks = masks.view(batch_size, -1)
# prepare transformer_inputs_dict
encoder_inputs_dict = dict(
feat=feat, feat_mask=masks, feat_pos=pos_embed)
decoder_inputs_dict = dict(memory_mask=masks, memory_pos=pos_embed)
return encoder_inputs_dict, decoder_inputs_dict
def forward_encoder(self, feat: Tensor, feat_mask: Tensor,
feat_pos: Tensor) -> Dict:
"""Forward with Transformer encoder.
The forward procedure of the transformer is defined as:
'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'
More details can be found at `TransformerDetector.forward_transformer`
in `mmdet/detector/base_detr.py`.
Args:
feat (Tensor): Sequential features, has shape (bs, num_feat_points,
dim).
feat_mask (Tensor): ByteTensor, the padding mask of the features,
has shape (bs, num_feat_points).
feat_pos (Tensor): The positional embeddings of the features, has
shape (bs, num_feat_points, dim).
Returns:
dict: The dictionary of encoder outputs, which includes the
`memory` of the encoder output.
"""
memory = self.encoder(
query=feat, query_pos=feat_pos,
key_padding_mask=feat_mask) # for self_attn
encoder_outputs_dict = dict(memory=memory)
return encoder_outputs_dict
def pre_decoder(self, memory: Tensor) -> Tuple[Dict, Dict]:
"""Prepare intermediate variables before entering Transformer decoder,
such as `query`, `query_pos`.
The forward procedure of the transformer is defined as:
'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'
More details can be found at `TransformerDetector.forward_transformer`
in `mmdet/detector/base_detr.py`.
Args:
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
Returns:
tuple[dict, dict]: The first dict contains the inputs of decoder
and the second dict contains the inputs of the bbox_head function.
- decoder_inputs_dict (dict): The keyword args dictionary of
`self.forward_decoder()`, which includes 'query', 'query_pos',
'memory'.
- head_inputs_dict (dict): The keyword args dictionary of the
bbox_head functions, which is usually empty, or includes
`enc_outputs_class` and `enc_outputs_class` when the detector
support 'two stage' or 'query selection' strategies.
"""
batch_size = memory.size(0) # (bs, num_feat_points, dim)
query_pos = self.query_embedding.weight
# (num_queries, dim) -> (bs, num_queries, dim)
query_pos = query_pos.unsqueeze(0).repeat(batch_size, 1, 1)
query = torch.zeros_like(query_pos)
decoder_inputs_dict = dict(
query_pos=query_pos, query=query, memory=memory)
head_inputs_dict = dict()
return decoder_inputs_dict, head_inputs_dict
def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor,
memory_mask: Tensor, memory_pos: Tensor) -> Dict:
"""Forward with Transformer decoder.
The forward procedure of the transformer is defined as:
'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'
More details can be found at `TransformerDetector.forward_transformer`
in `mmdet/detector/base_detr.py`.
Args:
query (Tensor): The queries of decoder inputs, has shape
(bs, num_queries, dim).
query_pos (Tensor): The positional queries of decoder inputs,
has shape (bs, num_queries, dim).
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
memory_mask (Tensor): ByteTensor, the padding mask of the memory,
has shape (bs, num_feat_points).
memory_pos (Tensor): The positional embeddings of memory, has
shape (bs, num_feat_points, dim).
Returns:
dict: The dictionary of decoder outputs, which includes the
`hidden_states` of the decoder output.
- hidden_states (Tensor): Has shape
(num_decoder_layers, bs, num_queries, dim)
"""
hidden_states = self.decoder(
query=query,
key=memory,
value=memory,
query_pos=query_pos,
key_pos=memory_pos,
key_padding_mask=memory_mask) # for cross_attn
head_inputs_dict = dict(hidden_states=hidden_states)
return head_inputs_dict
| 9,383 | 42.24424 | 79 | py |
ERD | ERD-main/mmdet/models/detectors/semi_base.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from typing import Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
from mmdet.models.utils import (filter_gt_instances, rename_loss_dict,
reweight_loss_dict)
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.structures.bbox import bbox_project
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .base import BaseDetector
@MODELS.register_module()
class SemiBaseDetector(BaseDetector):
"""Base class for semi-supervised detectors.
Semi-supervised detectors typically consisting of a teacher model
updated by exponential moving average and a student model updated
by gradient descent.
Args:
detector (:obj:`ConfigDict` or dict): The detector config.
semi_train_cfg (:obj:`ConfigDict` or dict, optional):
The semi-supervised training config.
semi_test_cfg (:obj:`ConfigDict` or dict, optional):
The semi-supervised testing config.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
detector: ConfigType,
semi_train_cfg: OptConfigType = None,
semi_test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
data_preprocessor=data_preprocessor, init_cfg=init_cfg)
self.student = MODELS.build(detector)
self.teacher = MODELS.build(detector)
self.semi_train_cfg = semi_train_cfg
self.semi_test_cfg = semi_test_cfg
if self.semi_train_cfg.get('freeze_teacher', True) is True:
self.freeze(self.teacher)
@staticmethod
def freeze(model: nn.Module):
"""Freeze the model."""
model.eval()
for param in model.parameters():
param.requires_grad = False
def loss(self, multi_batch_inputs: Dict[str, Tensor],
multi_batch_data_samples: Dict[str, SampleList]) -> dict:
"""Calculate losses from multi-branch inputs and data samples.
Args:
multi_batch_inputs (Dict[str, Tensor]): The dict of multi-branch
input images, each value with shape (N, C, H, W).
Each value should usually be mean centered and std scaled.
multi_batch_data_samples (Dict[str, List[:obj:`DetDataSample`]]):
The dict of multi-branch data samples.
Returns:
dict: A dictionary of loss components
"""
losses = dict()
losses.update(**self.loss_by_gt_instances(
multi_batch_inputs['sup'], multi_batch_data_samples['sup']))
origin_pseudo_data_samples, batch_info = self.get_pseudo_instances(
multi_batch_inputs['unsup_teacher'],
multi_batch_data_samples['unsup_teacher'])
multi_batch_data_samples[
'unsup_student'] = self.project_pseudo_instances(
origin_pseudo_data_samples,
multi_batch_data_samples['unsup_student'])
losses.update(**self.loss_by_pseudo_instances(
multi_batch_inputs['unsup_student'],
multi_batch_data_samples['unsup_student'], batch_info))
return losses
def loss_by_gt_instances(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""Calculate losses from a batch of inputs and ground-truth data
samples.
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (List[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components
"""
losses = self.student.loss(batch_inputs, batch_data_samples)
sup_weight = self.semi_train_cfg.get('sup_weight', 1.)
return rename_loss_dict('sup_', reweight_loss_dict(losses, sup_weight))
def loss_by_pseudo_instances(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
batch_info: Optional[dict] = None) -> dict:
"""Calculate losses from a batch of inputs and pseudo data samples.
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (List[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`,
which are `pseudo_instance` or `pseudo_panoptic_seg`
or `pseudo_sem_seg` in fact.
batch_info (dict): Batch information of teacher model
forward propagation process. Defaults to None.
Returns:
dict: A dictionary of loss components
"""
batch_data_samples = filter_gt_instances(
batch_data_samples, score_thr=self.semi_train_cfg.cls_pseudo_thr)
losses = self.student.loss(batch_inputs, batch_data_samples)
pseudo_instances_num = sum([
len(data_samples.gt_instances)
for data_samples in batch_data_samples
])
unsup_weight = self.semi_train_cfg.get(
'unsup_weight', 1.) if pseudo_instances_num > 0 else 0.
return rename_loss_dict('unsup_',
reweight_loss_dict(losses, unsup_weight))
@torch.no_grad()
def get_pseudo_instances(
self, batch_inputs: Tensor, batch_data_samples: SampleList
) -> Tuple[SampleList, Optional[dict]]:
"""Get pseudo instances from teacher model."""
self.teacher.eval()
results_list = self.teacher.predict(
batch_inputs, batch_data_samples, rescale=False)
batch_info = {}
for data_samples, results in zip(batch_data_samples, results_list):
data_samples.gt_instances = results.pred_instances
data_samples.gt_instances.bboxes = bbox_project(
data_samples.gt_instances.bboxes,
torch.from_numpy(data_samples.homography_matrix).inverse().to(
self.data_preprocessor.device), data_samples.ori_shape)
return batch_data_samples, batch_info
def project_pseudo_instances(self, batch_pseudo_instances: SampleList,
batch_data_samples: SampleList) -> SampleList:
"""Project pseudo instances."""
for pseudo_instances, data_samples in zip(batch_pseudo_instances,
batch_data_samples):
data_samples.gt_instances = copy.deepcopy(
pseudo_instances.gt_instances)
data_samples.gt_instances.bboxes = bbox_project(
data_samples.gt_instances.bboxes,
torch.tensor(data_samples.homography_matrix).to(
self.data_preprocessor.device), data_samples.img_shape)
wh_thr = self.semi_train_cfg.get('min_pseudo_bbox_wh', (1e-2, 1e-2))
return filter_gt_instances(batch_data_samples, wh_thr=wh_thr)
def predict(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> SampleList:
"""Predict results from a batch of inputs and data samples with post-
processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results.
Defaults to True.
Returns:
list[:obj:`DetDataSample`]: Return the detection results of the
input images. The returns value is DetDataSample,
which usually contain 'pred_instances'. And the
``pred_instances`` usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
if self.semi_test_cfg.get('predict_on', 'teacher') == 'teacher':
return self.teacher(
batch_inputs, batch_data_samples, mode='predict')
else:
return self.student(
batch_inputs, batch_data_samples, mode='predict')
def _forward(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> SampleList:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
Returns:
tuple: A tuple of features from ``rpn_head`` and ``roi_head``
forward.
"""
if self.semi_test_cfg.get('forward_on', 'teacher') == 'teacher':
return self.teacher(
batch_inputs, batch_data_samples, mode='tensor')
else:
return self.student(
batch_inputs, batch_data_samples, mode='tensor')
def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]:
"""Extract features.
Args:
batch_inputs (Tensor): Image tensor with shape (N, C, H ,W).
Returns:
tuple[Tensor]: Multi-level features that may have
different resolutions.
"""
if self.semi_test_cfg.get('extract_feat_on', 'teacher') == 'teacher':
return self.teacher.extract_feat(batch_inputs)
else:
return self.student.extract_feat(batch_inputs)
def _load_from_state_dict(self, state_dict: dict, prefix: str,
local_metadata: dict, strict: bool,
missing_keys: Union[List[str], str],
unexpected_keys: Union[List[str], str],
error_msgs: Union[List[str], str]) -> None:
"""Add teacher and student prefixes to model parameter names."""
if not any([
'student' in key or 'teacher' in key
for key in state_dict.keys()
]):
keys = list(state_dict.keys())
state_dict.update({'teacher.' + k: state_dict[k] for k in keys})
state_dict.update({'student.' + k: state_dict[k] for k in keys})
for k in keys:
state_dict.pop(k)
return super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
| 11,647 | 42.625468 | 79 | py |
ERD | ERD-main/mmdet/models/detectors/kd_one_stage.py | # Copyright (c) OpenMMLab. All rights reserved.
from pathlib import Path
from typing import Any, Optional, Union
import torch
import torch.nn as nn
from mmengine.config import Config
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType
from .single_stage import SingleStageDetector
@MODELS.register_module()
class KnowledgeDistillationSingleStageDetector(SingleStageDetector):
r"""Implementation of `Distilling the Knowledge in a Neural Network.
<https://arxiv.org/abs/1503.02531>`_.
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
teacher_config (:obj:`ConfigDict` | dict | str | Path): Config file
path or the config object of teacher model.
teacher_ckpt (str, optional): Checkpoint path of teacher model.
If left as None, the model will not load any weights.
Defaults to True.
eval_teacher (bool): Set the train mode for teacher.
Defaults to True.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of ATSS. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of ATSS. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
"""
def __init__(
self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
teacher_config: Union[ConfigType, str, Path],
teacher_ckpt: Optional[str] = None,
eval_teacher: bool = True,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor)
self.eval_teacher = eval_teacher
# Build teacher model
if isinstance(teacher_config, (str, Path)):
teacher_config = Config.fromfile(teacher_config)
self.teacher_model = MODELS.build(teacher_config['model'])
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
with torch.no_grad():
teacher_x = self.teacher_model.extract_feat(batch_inputs)
out_teacher = self.teacher_model.bbox_head(teacher_x)
losses = self.bbox_head.loss(x, out_teacher, batch_data_samples)
return losses
def cuda(self, device: Optional[str] = None) -> nn.Module:
"""Since teacher_model is registered as a plain object, it is necessary
to put the teacher model to cuda when calling ``cuda`` function."""
self.teacher_model.cuda(device=device)
return super().cuda(device=device)
def to(self, device: Optional[str] = None) -> nn.Module:
"""Since teacher_model is registered as a plain object, it is necessary
to put the teacher model to other device when calling ``to``
function."""
self.teacher_model.to(device=device)
return super().to(device=device)
def train(self, mode: bool = True) -> None:
"""Set the same train mode for teacher and student model."""
if self.eval_teacher:
self.teacher_model.train(False)
else:
self.teacher_model.train(mode)
super().train(mode)
def __setattr__(self, name: str, value: Any) -> None:
"""Set attribute, i.e. self.name = value
This reloading prevent the teacher model from being registered as a
nn.Module. The teacher module is registered as a plain object, so that
the teacher parameters will not show up when calling
``self.parameters``, ``self.modules``, ``self.children`` methods.
"""
if name == 'teacher_model':
object.__setattr__(self, name, value)
else:
super().__setattr__(name, value)
| 4,987 | 39.552846 | 79 | py |
ERD | ERD-main/mmdet/models/detectors/deformable_detr.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Dict, Tuple
import torch
import torch.nn.functional as F
from mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention
from mmengine.model import xavier_init
from torch import Tensor, nn
from torch.nn.init import normal_
from mmdet.registry import MODELS
from mmdet.structures import OptSampleList
from mmdet.utils import OptConfigType
from ..layers import (DeformableDetrTransformerDecoder,
DeformableDetrTransformerEncoder, SinePositionalEncoding)
from .base_detr import DetectionTransformer
@MODELS.register_module()
class DeformableDETR(DetectionTransformer):
r"""Implementation of `Deformable DETR: Deformable Transformers for
End-to-End Object Detection <https://arxiv.org/abs/2010.04159>`_
Code is modified from the `official github repo
<https://github.com/fundamentalvision/Deformable-DETR>`_.
Args:
decoder (:obj:`ConfigDict` or dict, optional): Config of the
Transformer decoder. Defaults to None.
bbox_head (:obj:`ConfigDict` or dict, optional): Config for the
bounding box head module. Defaults to None.
with_box_refine (bool, optional): Whether to refine the references
in the decoder. Defaults to `False`.
as_two_stage (bool, optional): Whether to generate the proposal
from the outputs of encoder. Defaults to `False`.
num_feature_levels (int, optional): Number of feature levels.
Defaults to 4.
"""
def __init__(self,
*args,
decoder: OptConfigType = None,
bbox_head: OptConfigType = None,
with_box_refine: bool = False,
as_two_stage: bool = False,
num_feature_levels: int = 4,
**kwargs) -> None:
self.with_box_refine = with_box_refine
self.as_two_stage = as_two_stage
self.num_feature_levels = num_feature_levels
if bbox_head is not None:
assert 'share_pred_layer' not in bbox_head and \
'num_pred_layer' not in bbox_head and \
'as_two_stage' not in bbox_head, \
'The two keyword args `share_pred_layer`, `num_pred_layer`, ' \
'and `as_two_stage are set in `detector.__init__()`, users ' \
'should not set them in `bbox_head` config.'
# The last prediction layer is used to generate proposal
# from encode feature map when `as_two_stage` is `True`.
# And all the prediction layers should share parameters
# when `with_box_refine` is `True`.
bbox_head['share_pred_layer'] = not with_box_refine
bbox_head['num_pred_layer'] = (decoder['num_layers'] + 1) \
if self.as_two_stage else decoder['num_layers']
bbox_head['as_two_stage'] = as_two_stage
super().__init__(*args, decoder=decoder, bbox_head=bbox_head, **kwargs)
def _init_layers(self) -> None:
"""Initialize layers except for backbone, neck and bbox_head."""
self.positional_encoding = SinePositionalEncoding(
**self.positional_encoding)
self.encoder = DeformableDetrTransformerEncoder(**self.encoder)
self.decoder = DeformableDetrTransformerDecoder(**self.decoder)
self.embed_dims = self.encoder.embed_dims
if not self.as_two_stage:
self.query_embedding = nn.Embedding(self.num_queries,
self.embed_dims * 2)
# NOTE The query_embedding will be split into query and query_pos
# in self.pre_decoder, hence, the embed_dims are doubled.
num_feats = self.positional_encoding.num_feats
assert num_feats * 2 == self.embed_dims, \
'embed_dims should be exactly 2 times of num_feats. ' \
f'Found {self.embed_dims} and {num_feats}.'
self.level_embed = nn.Parameter(
torch.Tensor(self.num_feature_levels, self.embed_dims))
if self.as_two_stage:
self.memory_trans_fc = nn.Linear(self.embed_dims, self.embed_dims)
self.memory_trans_norm = nn.LayerNorm(self.embed_dims)
self.pos_trans_fc = nn.Linear(self.embed_dims * 2,
self.embed_dims * 2)
self.pos_trans_norm = nn.LayerNorm(self.embed_dims * 2)
else:
self.reference_points_fc = nn.Linear(self.embed_dims, 2)
def init_weights(self) -> None:
"""Initialize weights for Transformer and other components."""
super().init_weights()
for coder in self.encoder, self.decoder:
for p in coder.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for m in self.modules():
if isinstance(m, MultiScaleDeformableAttention):
m.init_weights()
if self.as_two_stage:
nn.init.xavier_uniform_(self.memory_trans_fc.weight)
nn.init.xavier_uniform_(self.pos_trans_fc.weight)
else:
xavier_init(
self.reference_points_fc, distribution='uniform', bias=0.)
normal_(self.level_embed)
def pre_transformer(
self,
mlvl_feats: Tuple[Tensor],
batch_data_samples: OptSampleList = None) -> Tuple[Dict]:
"""Process image features before feeding them to the transformer.
The forward procedure of the transformer is defined as:
'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'
More details can be found at `TransformerDetector.forward_transformer`
in `mmdet/detector/base_detr.py`.
Args:
mlvl_feats (tuple[Tensor]): Multi-level features that may have
different resolutions, output from neck. Each feature has
shape (bs, dim, h_lvl, w_lvl), where 'lvl' means 'layer'.
batch_data_samples (list[:obj:`DetDataSample`], optional): The
batch data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Defaults to None.
Returns:
tuple[dict]: The first dict contains the inputs of encoder and the
second dict contains the inputs of decoder.
- encoder_inputs_dict (dict): The keyword args dictionary of
`self.forward_encoder()`, which includes 'feat', 'feat_mask',
and 'feat_pos'.
- decoder_inputs_dict (dict): The keyword args dictionary of
`self.forward_decoder()`, which includes 'memory_mask'.
"""
batch_size = mlvl_feats[0].size(0)
# construct binary masks for the transformer.
assert batch_data_samples is not None
batch_input_shape = batch_data_samples[0].batch_input_shape
img_shape_list = [sample.img_shape for sample in batch_data_samples]
input_img_h, input_img_w = batch_input_shape
masks = mlvl_feats[0].new_ones((batch_size, input_img_h, input_img_w))
for img_id in range(batch_size):
img_h, img_w = img_shape_list[img_id]
masks[img_id, :img_h, :img_w] = 0
# NOTE following the official DETR repo, non-zero values representing
# ignored positions, while zero values means valid positions.
mlvl_masks = []
mlvl_pos_embeds = []
for feat in mlvl_feats:
mlvl_masks.append(
F.interpolate(masks[None],
size=feat.shape[-2:]).to(torch.bool).squeeze(0))
mlvl_pos_embeds.append(self.positional_encoding(mlvl_masks[-1]))
feat_flatten = []
lvl_pos_embed_flatten = []
mask_flatten = []
spatial_shapes = []
for lvl, (feat, mask, pos_embed) in enumerate(
zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)):
batch_size, c, h, w = feat.shape
# [bs, c, h_lvl, w_lvl] -> [bs, h_lvl*w_lvl, c]
feat = feat.view(batch_size, c, -1).permute(0, 2, 1)
pos_embed = pos_embed.view(batch_size, c, -1).permute(0, 2, 1)
lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)
# [bs, h_lvl, w_lvl] -> [bs, h_lvl*w_lvl]
mask = mask.flatten(1)
spatial_shape = (h, w)
feat_flatten.append(feat)
lvl_pos_embed_flatten.append(lvl_pos_embed)
mask_flatten.append(mask)
spatial_shapes.append(spatial_shape)
# (bs, num_feat_points, dim)
feat_flatten = torch.cat(feat_flatten, 1)
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
# (bs, num_feat_points), where num_feat_points = sum_lvl(h_lvl*w_lvl)
mask_flatten = torch.cat(mask_flatten, 1)
spatial_shapes = torch.as_tensor( # (num_level, 2)
spatial_shapes,
dtype=torch.long,
device=feat_flatten.device)
level_start_index = torch.cat((
spatial_shapes.new_zeros((1, )), # (num_level)
spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack( # (bs, num_level, 2)
[self.get_valid_ratio(m) for m in mlvl_masks], 1)
encoder_inputs_dict = dict(
feat=feat_flatten,
feat_mask=mask_flatten,
feat_pos=lvl_pos_embed_flatten,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
valid_ratios=valid_ratios)
decoder_inputs_dict = dict(
memory_mask=mask_flatten,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
valid_ratios=valid_ratios)
return encoder_inputs_dict, decoder_inputs_dict
def forward_encoder(self, feat: Tensor, feat_mask: Tensor,
feat_pos: Tensor, spatial_shapes: Tensor,
level_start_index: Tensor,
valid_ratios: Tensor) -> Dict:
"""Forward with Transformer encoder.
The forward procedure of the transformer is defined as:
'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'
More details can be found at `TransformerDetector.forward_transformer`
in `mmdet/detector/base_detr.py`.
Args:
feat (Tensor): Sequential features, has shape (bs, num_feat_points,
dim).
feat_mask (Tensor): ByteTensor, the padding mask of the features,
has shape (bs, num_feat_points).
feat_pos (Tensor): The positional embeddings of the features, has
shape (bs, num_feat_points, dim).
spatial_shapes (Tensor): Spatial shapes of features in all levels,
has shape (num_levels, 2), last dimension represents (h, w).
level_start_index (Tensor): The start index of each level.
A tensor has shape (num_levels, ) and can be represented
as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].
valid_ratios (Tensor): The ratios of the valid width and the valid
height relative to the width and the height of features in all
levels, has shape (bs, num_levels, 2).
Returns:
dict: The dictionary of encoder outputs, which includes the
`memory` of the encoder output.
"""
memory = self.encoder(
query=feat,
query_pos=feat_pos,
key_padding_mask=feat_mask, # for self_attn
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
valid_ratios=valid_ratios)
encoder_outputs_dict = dict(
memory=memory,
memory_mask=feat_mask,
spatial_shapes=spatial_shapes)
return encoder_outputs_dict
def pre_decoder(self, memory: Tensor, memory_mask: Tensor,
spatial_shapes: Tensor) -> Tuple[Dict, Dict]:
"""Prepare intermediate variables before entering Transformer decoder,
such as `query`, `query_pos`, and `reference_points`.
The forward procedure of the transformer is defined as:
'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'
More details can be found at `TransformerDetector.forward_transformer`
in `mmdet/detector/base_detr.py`.
Args:
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
memory_mask (Tensor): ByteTensor, the padding mask of the memory,
has shape (bs, num_feat_points). It will only be used when
`as_two_stage` is `True`.
spatial_shapes (Tensor): Spatial shapes of features in all levels,
has shape (num_levels, 2), last dimension represents (h, w).
It will only be used when `as_two_stage` is `True`.
Returns:
tuple[dict, dict]: The decoder_inputs_dict and head_inputs_dict.
- decoder_inputs_dict (dict): The keyword dictionary args of
`self.forward_decoder()`, which includes 'query', 'query_pos',
'memory', and `reference_points`. The reference_points of
decoder input here are 4D boxes when `as_two_stage` is `True`,
otherwise 2D points, although it has `points` in its name.
The reference_points in encoder is always 2D points.
- head_inputs_dict (dict): The keyword dictionary args of the
bbox_head functions, which includes `enc_outputs_class` and
`enc_outputs_coord`. They are both `None` when 'as_two_stage'
is `False`. The dict is empty when `self.training` is `False`.
"""
batch_size, _, c = memory.shape
if self.as_two_stage:
output_memory, output_proposals = \
self.gen_encoder_output_proposals(
memory, memory_mask, spatial_shapes)
enc_outputs_class = self.bbox_head.cls_branches[
self.decoder.num_layers](
output_memory)
enc_outputs_coord_unact = self.bbox_head.reg_branches[
self.decoder.num_layers](output_memory) + output_proposals
enc_outputs_coord = enc_outputs_coord_unact.sigmoid()
# We only use the first channel in enc_outputs_class as foreground,
# the other (num_classes - 1) channels are actually not used.
# Its targets are set to be 0s, which indicates the first
# class (foreground) because we use [0, num_classes - 1] to
# indicate class labels, background class is indicated by
# num_classes (similar convention in RPN).
# See https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/deformable_detr_head.py#L241 # noqa
# This follows the official implementation of Deformable DETR.
topk_proposals = torch.topk(
enc_outputs_class[..., 0], self.num_queries, dim=1)[1]
topk_coords_unact = torch.gather(
enc_outputs_coord_unact, 1,
topk_proposals.unsqueeze(-1).repeat(1, 1, 4))
topk_coords_unact = topk_coords_unact.detach()
reference_points = topk_coords_unact.sigmoid()
pos_trans_out = self.pos_trans_fc(
self.get_proposal_pos_embed(topk_coords_unact))
pos_trans_out = self.pos_trans_norm(pos_trans_out)
query_pos, query = torch.split(pos_trans_out, c, dim=2)
else:
enc_outputs_class, enc_outputs_coord = None, None
query_embed = self.query_embedding.weight
query_pos, query = torch.split(query_embed, c, dim=1)
query_pos = query_pos.unsqueeze(0).expand(batch_size, -1, -1)
query = query.unsqueeze(0).expand(batch_size, -1, -1)
reference_points = self.reference_points_fc(query_pos).sigmoid()
decoder_inputs_dict = dict(
query=query,
query_pos=query_pos,
memory=memory,
reference_points=reference_points)
head_inputs_dict = dict(
enc_outputs_class=enc_outputs_class,
enc_outputs_coord=enc_outputs_coord) if self.training else dict()
return decoder_inputs_dict, head_inputs_dict
def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor,
memory_mask: Tensor, reference_points: Tensor,
spatial_shapes: Tensor, level_start_index: Tensor,
valid_ratios: Tensor) -> Dict:
"""Forward with Transformer decoder.
The forward procedure of the transformer is defined as:
'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'
More details can be found at `TransformerDetector.forward_transformer`
in `mmdet/detector/base_detr.py`.
Args:
query (Tensor): The queries of decoder inputs, has shape
(bs, num_queries, dim).
query_pos (Tensor): The positional queries of decoder inputs,
has shape (bs, num_queries, dim).
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
memory_mask (Tensor): ByteTensor, the padding mask of the memory,
has shape (bs, num_feat_points).
reference_points (Tensor): The initial reference, has shape
(bs, num_queries, 4) with the last dimension arranged as
(cx, cy, w, h) when `as_two_stage` is `True`, otherwise has
shape (bs, num_queries, 2) with the last dimension arranged as
(cx, cy).
spatial_shapes (Tensor): Spatial shapes of features in all levels,
has shape (num_levels, 2), last dimension represents (h, w).
level_start_index (Tensor): The start index of each level.
A tensor has shape (num_levels, ) and can be represented
as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].
valid_ratios (Tensor): The ratios of the valid width and the valid
height relative to the width and the height of features in all
levels, has shape (bs, num_levels, 2).
Returns:
dict: The dictionary of decoder outputs, which includes the
`hidden_states` of the decoder output and `references` including
the initial and intermediate reference_points.
"""
inter_states, inter_references = self.decoder(
query=query,
value=memory,
query_pos=query_pos,
key_padding_mask=memory_mask, # for cross_attn
reference_points=reference_points,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
valid_ratios=valid_ratios,
reg_branches=self.bbox_head.reg_branches
if self.with_box_refine else None)
references = [reference_points, *inter_references]
decoder_outputs_dict = dict(
hidden_states=inter_states, references=references)
return decoder_outputs_dict
@staticmethod
def get_valid_ratio(mask: Tensor) -> Tensor:
"""Get the valid radios of feature map in a level.
.. code:: text
|---> valid_W <---|
---+-----------------+-----+---
A | | | A
| | | | |
| | | | |
valid_H | | | |
| | | | H
| | | | |
V | | | |
---+-----------------+ | |
| | V
+-----------------------+---
|---------> W <---------|
The valid_ratios are defined as:
r_h = valid_H / H, r_w = valid_W / W
They are the factors to re-normalize the relative coordinates of the
image to the relative coordinates of the current level feature map.
Args:
mask (Tensor): Binary mask of a feature map, has shape (bs, H, W).
Returns:
Tensor: valid ratios [r_w, r_h] of a feature map, has shape (1, 2).
"""
_, H, W = mask.shape
valid_H = torch.sum(~mask[:, :, 0], 1)
valid_W = torch.sum(~mask[:, 0, :], 1)
valid_ratio_h = valid_H.float() / H
valid_ratio_w = valid_W.float() / W
valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)
return valid_ratio
def gen_encoder_output_proposals(
self, memory: Tensor, memory_mask: Tensor,
spatial_shapes: Tensor) -> Tuple[Tensor, Tensor]:
"""Generate proposals from encoded memory. The function will only be
used when `as_two_stage` is `True`.
Args:
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
memory_mask (Tensor): ByteTensor, the padding mask of the memory,
has shape (bs, num_feat_points).
spatial_shapes (Tensor): Spatial shapes of features in all levels,
has shape (num_levels, 2), last dimension represents (h, w).
Returns:
tuple: A tuple of transformed memory and proposals.
- output_memory (Tensor): The transformed memory for obtaining
top-k proposals, has shape (bs, num_feat_points, dim).
- output_proposals (Tensor): The inverse-normalized proposal, has
shape (batch_size, num_keys, 4) with the last dimension arranged
as (cx, cy, w, h).
"""
bs = memory.size(0)
proposals = []
_cur = 0 # start index in the sequence of the current level
for lvl, (H, W) in enumerate(spatial_shapes):
mask_flatten_ = memory_mask[:,
_cur:(_cur + H * W)].view(bs, H, W, 1)
valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1).unsqueeze(-1)
valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1).unsqueeze(-1)
grid_y, grid_x = torch.meshgrid(
torch.linspace(
0, H - 1, H, dtype=torch.float32, device=memory.device),
torch.linspace(
0, W - 1, W, dtype=torch.float32, device=memory.device))
grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)
scale = torch.cat([valid_W, valid_H], 1).view(bs, 1, 1, 2)
grid = (grid.unsqueeze(0).expand(bs, -1, -1, -1) + 0.5) / scale
wh = torch.ones_like(grid) * 0.05 * (2.0**lvl)
proposal = torch.cat((grid, wh), -1).view(bs, -1, 4)
proposals.append(proposal)
_cur += (H * W)
output_proposals = torch.cat(proposals, 1)
output_proposals_valid = ((output_proposals > 0.01) &
(output_proposals < 0.99)).all(
-1, keepdim=True)
# inverse_sigmoid
output_proposals = torch.log(output_proposals / (1 - output_proposals))
output_proposals = output_proposals.masked_fill(
memory_mask.unsqueeze(-1), float('inf'))
output_proposals = output_proposals.masked_fill(
~output_proposals_valid, float('inf'))
output_memory = memory
output_memory = output_memory.masked_fill(
memory_mask.unsqueeze(-1), float(0))
output_memory = output_memory.masked_fill(~output_proposals_valid,
float(0))
output_memory = self.memory_trans_fc(output_memory)
output_memory = self.memory_trans_norm(output_memory)
# [bs, sum(hw), 2]
return output_memory, output_proposals
@staticmethod
def get_proposal_pos_embed(proposals: Tensor,
num_pos_feats: int = 128,
temperature: int = 10000) -> Tensor:
"""Get the position embedding of the proposal.
Args:
proposals (Tensor): Not normalized proposals, has shape
(bs, num_queries, 4) with the last dimension arranged as
(cx, cy, w, h).
num_pos_feats (int, optional): The feature dimension for each
position along x, y, w, and h-axis. Note the final returned
dimension for each position is 4 times of num_pos_feats.
Default to 128.
temperature (int, optional): The temperature used for scaling the
position embedding. Defaults to 10000.
Returns:
Tensor: The position embedding of proposal, has shape
(bs, num_queries, num_pos_feats * 4), with the last dimension
arranged as (cx, cy, w, h)
"""
scale = 2 * math.pi
dim_t = torch.arange(
num_pos_feats, dtype=torch.float32, device=proposals.device)
dim_t = temperature**(2 * (dim_t // 2) / num_pos_feats)
# N, L, 4
proposals = proposals.sigmoid() * scale
# N, L, 4, 128
pos = proposals[:, :, :, None] / dim_t
# N, L, 4, 64, 2
pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()),
dim=4).flatten(2)
return pos
| 25,779 | 46.564576 | 132 | py |
ERD | ERD-main/mmdet/models/detectors/soft_teacher.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from typing import List, Optional, Tuple
import torch
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.models.utils import (filter_gt_instances, rename_loss_dict,
reweight_loss_dict)
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.structures.bbox import bbox2roi, bbox_project
from mmdet.utils import ConfigType, InstanceList, OptConfigType, OptMultiConfig
from ..utils.misc import unpack_gt_instances
from .semi_base import SemiBaseDetector
@MODELS.register_module()
class SoftTeacher(SemiBaseDetector):
r"""Implementation of `End-to-End Semi-Supervised Object Detection
with Soft Teacher <https://arxiv.org/abs/2106.09018>`_
Args:
detector (:obj:`ConfigDict` or dict): The detector config.
semi_train_cfg (:obj:`ConfigDict` or dict, optional):
The semi-supervised training config.
semi_test_cfg (:obj:`ConfigDict` or dict, optional):
The semi-supervised testing config.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
detector: ConfigType,
semi_train_cfg: OptConfigType = None,
semi_test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
detector=detector,
semi_train_cfg=semi_train_cfg,
semi_test_cfg=semi_test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
def loss_by_pseudo_instances(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
batch_info: Optional[dict] = None) -> dict:
"""Calculate losses from a batch of inputs and pseudo data samples.
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (List[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`,
which are `pseudo_instance` or `pseudo_panoptic_seg`
or `pseudo_sem_seg` in fact.
batch_info (dict): Batch information of teacher model
forward propagation process. Defaults to None.
Returns:
dict: A dictionary of loss components
"""
x = self.student.extract_feat(batch_inputs)
losses = {}
rpn_losses, rpn_results_list = self.rpn_loss_by_pseudo_instances(
x, batch_data_samples)
losses.update(**rpn_losses)
losses.update(**self.rcnn_cls_loss_by_pseudo_instances(
x, rpn_results_list, batch_data_samples, batch_info))
losses.update(**self.rcnn_reg_loss_by_pseudo_instances(
x, rpn_results_list, batch_data_samples))
unsup_weight = self.semi_train_cfg.get('unsup_weight', 1.)
return rename_loss_dict('unsup_',
reweight_loss_dict(losses, unsup_weight))
@torch.no_grad()
def get_pseudo_instances(
self, batch_inputs: Tensor, batch_data_samples: SampleList
) -> Tuple[SampleList, Optional[dict]]:
"""Get pseudo instances from teacher model."""
assert self.teacher.with_bbox, 'Bbox head must be implemented.'
x = self.teacher.extract_feat(batch_inputs)
# If there are no pre-defined proposals, use RPN to get proposals
if batch_data_samples[0].get('proposals', None) is None:
rpn_results_list = self.teacher.rpn_head.predict(
x, batch_data_samples, rescale=False)
else:
rpn_results_list = [
data_sample.proposals for data_sample in batch_data_samples
]
results_list = self.teacher.roi_head.predict(
x, rpn_results_list, batch_data_samples, rescale=False)
for data_samples, results in zip(batch_data_samples, results_list):
data_samples.gt_instances = results
batch_data_samples = filter_gt_instances(
batch_data_samples,
score_thr=self.semi_train_cfg.pseudo_label_initial_score_thr)
reg_uncs_list = self.compute_uncertainty_with_aug(
x, batch_data_samples)
for data_samples, reg_uncs in zip(batch_data_samples, reg_uncs_list):
data_samples.gt_instances['reg_uncs'] = reg_uncs
data_samples.gt_instances.bboxes = bbox_project(
data_samples.gt_instances.bboxes,
torch.from_numpy(data_samples.homography_matrix).inverse().to(
self.data_preprocessor.device), data_samples.ori_shape)
batch_info = {
'feat': x,
'img_shape': [],
'homography_matrix': [],
'metainfo': []
}
for data_samples in batch_data_samples:
batch_info['img_shape'].append(data_samples.img_shape)
batch_info['homography_matrix'].append(
torch.from_numpy(data_samples.homography_matrix).to(
self.data_preprocessor.device))
batch_info['metainfo'].append(data_samples.metainfo)
return batch_data_samples, batch_info
def rpn_loss_by_pseudo_instances(self, x: Tuple[Tensor],
batch_data_samples: SampleList) -> dict:
"""Calculate rpn loss from a batch of inputs and pseudo data samples.
Args:
x (tuple[Tensor]): Features from FPN.
batch_data_samples (List[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`,
which are `pseudo_instance` or `pseudo_panoptic_seg`
or `pseudo_sem_seg` in fact.
Returns:
dict: A dictionary of rpn loss components
"""
rpn_data_samples = copy.deepcopy(batch_data_samples)
rpn_data_samples = filter_gt_instances(
rpn_data_samples, score_thr=self.semi_train_cfg.rpn_pseudo_thr)
proposal_cfg = self.student.train_cfg.get('rpn_proposal',
self.student.test_cfg.rpn)
# set cat_id of gt_labels to 0 in RPN
for data_sample in rpn_data_samples:
data_sample.gt_instances.labels = \
torch.zeros_like(data_sample.gt_instances.labels)
rpn_losses, rpn_results_list = self.student.rpn_head.loss_and_predict(
x, rpn_data_samples, proposal_cfg=proposal_cfg)
for key in rpn_losses.keys():
if 'loss' in key and 'rpn' not in key:
rpn_losses[f'rpn_{key}'] = rpn_losses.pop(key)
return rpn_losses, rpn_results_list
def rcnn_cls_loss_by_pseudo_instances(self, x: Tuple[Tensor],
unsup_rpn_results_list: InstanceList,
batch_data_samples: SampleList,
batch_info: dict) -> dict:
"""Calculate classification loss from a batch of inputs and pseudo data
samples.
Args:
x (tuple[Tensor]): List of multi-level img features.
unsup_rpn_results_list (list[:obj:`InstanceData`]):
List of region proposals.
batch_data_samples (List[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`,
which are `pseudo_instance` or `pseudo_panoptic_seg`
or `pseudo_sem_seg` in fact.
batch_info (dict): Batch information of teacher model
forward propagation process.
Returns:
dict[str, Tensor]: A dictionary of rcnn
classification loss components
"""
rpn_results_list = copy.deepcopy(unsup_rpn_results_list)
cls_data_samples = copy.deepcopy(batch_data_samples)
cls_data_samples = filter_gt_instances(
cls_data_samples, score_thr=self.semi_train_cfg.cls_pseudo_thr)
outputs = unpack_gt_instances(cls_data_samples)
batch_gt_instances, batch_gt_instances_ignore, _ = outputs
# assign gts and sample proposals
num_imgs = len(cls_data_samples)
sampling_results = []
for i in range(num_imgs):
# rename rpn_results.bboxes to rpn_results.priors
rpn_results = rpn_results_list[i]
rpn_results.priors = rpn_results.pop('bboxes')
assign_result = self.student.roi_head.bbox_assigner.assign(
rpn_results, batch_gt_instances[i],
batch_gt_instances_ignore[i])
sampling_result = self.student.roi_head.bbox_sampler.sample(
assign_result,
rpn_results,
batch_gt_instances[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
selected_bboxes = [res.priors for res in sampling_results]
rois = bbox2roi(selected_bboxes)
bbox_results = self.student.roi_head._bbox_forward(x, rois)
# cls_reg_targets is a tuple of labels, label_weights,
# and bbox_targets, bbox_weights
cls_reg_targets = self.student.roi_head.bbox_head.get_targets(
sampling_results, self.student.train_cfg.rcnn)
selected_results_list = []
for bboxes, data_samples, teacher_matrix, teacher_img_shape in zip(
selected_bboxes, batch_data_samples,
batch_info['homography_matrix'], batch_info['img_shape']):
student_matrix = torch.tensor(
data_samples.homography_matrix, device=teacher_matrix.device)
homography_matrix = teacher_matrix @ student_matrix.inverse()
projected_bboxes = bbox_project(bboxes, homography_matrix,
teacher_img_shape)
selected_results_list.append(InstanceData(bboxes=projected_bboxes))
with torch.no_grad():
results_list = self.teacher.roi_head.predict_bbox(
batch_info['feat'],
batch_info['metainfo'],
selected_results_list,
rcnn_test_cfg=None,
rescale=False)
bg_score = torch.cat(
[results.scores[:, -1] for results in results_list])
# cls_reg_targets[0] is labels
neg_inds = cls_reg_targets[
0] == self.student.roi_head.bbox_head.num_classes
# cls_reg_targets[1] is label_weights
cls_reg_targets[1][neg_inds] = bg_score[neg_inds].detach()
losses = self.student.roi_head.bbox_head.loss(
bbox_results['cls_score'], bbox_results['bbox_pred'], rois,
*cls_reg_targets)
# cls_reg_targets[1] is label_weights
losses['loss_cls'] = losses['loss_cls'] * len(
cls_reg_targets[1]) / max(sum(cls_reg_targets[1]), 1.0)
return losses
def rcnn_reg_loss_by_pseudo_instances(
self, x: Tuple[Tensor], unsup_rpn_results_list: InstanceList,
batch_data_samples: SampleList) -> dict:
"""Calculate rcnn regression loss from a batch of inputs and pseudo
data samples.
Args:
x (tuple[Tensor]): List of multi-level img features.
unsup_rpn_results_list (list[:obj:`InstanceData`]):
List of region proposals.
batch_data_samples (List[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`,
which are `pseudo_instance` or `pseudo_panoptic_seg`
or `pseudo_sem_seg` in fact.
Returns:
dict[str, Tensor]: A dictionary of rcnn
regression loss components
"""
rpn_results_list = copy.deepcopy(unsup_rpn_results_list)
reg_data_samples = copy.deepcopy(batch_data_samples)
for data_samples in reg_data_samples:
if data_samples.gt_instances.bboxes.shape[0] > 0:
data_samples.gt_instances = data_samples.gt_instances[
data_samples.gt_instances.reg_uncs <
self.semi_train_cfg.reg_pseudo_thr]
roi_losses = self.student.roi_head.loss(x, rpn_results_list,
reg_data_samples)
return {'loss_bbox': roi_losses['loss_bbox']}
def compute_uncertainty_with_aug(
self, x: Tuple[Tensor],
batch_data_samples: SampleList) -> List[Tensor]:
"""Compute uncertainty with augmented bboxes.
Args:
x (tuple[Tensor]): List of multi-level img features.
batch_data_samples (List[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`,
which are `pseudo_instance` or `pseudo_panoptic_seg`
or `pseudo_sem_seg` in fact.
Returns:
list[Tensor]: A list of uncertainty for pseudo bboxes.
"""
auged_results_list = self.aug_box(batch_data_samples,
self.semi_train_cfg.jitter_times,
self.semi_train_cfg.jitter_scale)
# flatten
auged_results_list = [
InstanceData(bboxes=auged.reshape(-1, auged.shape[-1]))
for auged in auged_results_list
]
self.teacher.roi_head.test_cfg = None
results_list = self.teacher.roi_head.predict(
x, auged_results_list, batch_data_samples, rescale=False)
self.teacher.roi_head.test_cfg = self.teacher.test_cfg.rcnn
reg_channel = max(
[results.bboxes.shape[-1] for results in results_list]) // 4
bboxes = [
results.bboxes.reshape(self.semi_train_cfg.jitter_times, -1,
results.bboxes.shape[-1])
if results.bboxes.numel() > 0 else results.bboxes.new_zeros(
self.semi_train_cfg.jitter_times, 0, 4 * reg_channel).float()
for results in results_list
]
box_unc = [bbox.std(dim=0) for bbox in bboxes]
bboxes = [bbox.mean(dim=0) for bbox in bboxes]
labels = [
data_samples.gt_instances.labels
for data_samples in batch_data_samples
]
if reg_channel != 1:
bboxes = [
bbox.reshape(bbox.shape[0], reg_channel,
4)[torch.arange(bbox.shape[0]), label]
for bbox, label in zip(bboxes, labels)
]
box_unc = [
unc.reshape(unc.shape[0], reg_channel,
4)[torch.arange(unc.shape[0]), label]
for unc, label in zip(box_unc, labels)
]
box_shape = [(bbox[:, 2:4] - bbox[:, :2]).clamp(min=1.0)
for bbox in bboxes]
box_unc = [
torch.mean(
unc / wh[:, None, :].expand(-1, 2, 2).reshape(-1, 4), dim=-1)
if wh.numel() > 0 else unc for unc, wh in zip(box_unc, box_shape)
]
return box_unc
@staticmethod
def aug_box(batch_data_samples, times, frac):
"""Augment bboxes with jitter."""
def _aug_single(box):
box_scale = box[:, 2:4] - box[:, :2]
box_scale = (
box_scale.clamp(min=1)[:, None, :].expand(-1, 2,
2).reshape(-1, 4))
aug_scale = box_scale * frac # [n,4]
offset = (
torch.randn(times, box.shape[0], 4, device=box.device) *
aug_scale[None, ...])
new_box = box.clone()[None, ...].expand(times, box.shape[0],
-1) + offset
return new_box
return [
_aug_single(data_samples.gt_instances.bboxes)
for data_samples in batch_data_samples
]
| 16,860 | 43.488127 | 79 | py |
ERD | ERD-main/mmdet/models/detectors/d2_wrapper.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Union
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.structures.bbox import BaseBoxes
from mmdet.structures.mask import BitmapMasks, PolygonMasks
from mmdet.utils import ConfigType
from .base import BaseDetector
try:
import detectron2
from detectron2.config import get_cfg
from detectron2.modeling import build_model
from detectron2.structures.masks import BitMasks as D2_BitMasks
from detectron2.structures.masks import PolygonMasks as D2_PolygonMasks
from detectron2.utils.events import EventStorage
except ImportError:
detectron2 = None
def _to_cfgnode_list(cfg: ConfigType,
config_list: list = [],
father_name: str = 'MODEL') -> tuple:
"""Convert the key and value of mmengine.ConfigDict into a list.
Args:
cfg (ConfigDict): The detectron2 model config.
config_list (list): A list contains the key and value of ConfigDict.
Defaults to [].
father_name (str): The father name add before the key.
Defaults to "MODEL".
Returns:
tuple:
- config_list: A list contains the key and value of ConfigDict.
- father_name (str): The father name add before the key.
Defaults to "MODEL".
"""
for key, value in cfg.items():
name = f'{father_name}.{key.upper()}'
if isinstance(value, ConfigDict) or isinstance(value, dict):
config_list, fater_name = \
_to_cfgnode_list(value, config_list, name)
else:
config_list.append(name)
config_list.append(value)
return config_list, father_name
def convert_d2_pred_to_datasample(data_samples: SampleList,
d2_results_list: list) -> SampleList:
"""Convert the Detectron2's result to DetDataSample.
Args:
data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
d2_results_list (list): The list of the results of Detectron2's model.
Returns:
list[:obj:`DetDataSample`]: Detection results of the
input images. Each DetDataSample usually contain
'pred_instances'. And the ``pred_instances`` usually
contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
assert len(data_samples) == len(d2_results_list)
for data_sample, d2_results in zip(data_samples, d2_results_list):
d2_instance = d2_results['instances']
results = InstanceData()
results.bboxes = d2_instance.pred_boxes.tensor
results.scores = d2_instance.scores
results.labels = d2_instance.pred_classes
if d2_instance.has('pred_masks'):
results.masks = d2_instance.pred_masks
data_sample.pred_instances = results
return data_samples
@MODELS.register_module()
class Detectron2Wrapper(BaseDetector):
"""Wrapper of a Detectron2 model. Input/output formats of this class follow
MMDetection's convention, so a Detectron2 model can be trained and
evaluated in MMDetection.
Args:
detector (:obj:`ConfigDict` or dict): The module config of
Detectron2.
bgr_to_rgb (bool): whether to convert image from BGR to RGB.
Defaults to False.
rgb_to_bgr (bool): whether to convert image from RGB to BGR.
Defaults to False.
"""
def __init__(self,
detector: ConfigType,
bgr_to_rgb: bool = False,
rgb_to_bgr: bool = False) -> None:
if detectron2 is None:
raise ImportError('Please install Detectron2 first')
assert not (bgr_to_rgb and rgb_to_bgr), (
'`bgr2rgb` and `rgb2bgr` cannot be set to True at the same time')
super().__init__()
self._channel_conversion = rgb_to_bgr or bgr_to_rgb
cfgnode_list, _ = _to_cfgnode_list(detector)
self.cfg = get_cfg()
self.cfg.merge_from_list(cfgnode_list)
self.d2_model = build_model(self.cfg)
self.storage = EventStorage()
def init_weights(self) -> None:
"""Initialization Backbone.
NOTE: The initialization of other layers are in Detectron2,
if users want to change the initialization way, please
change the code in Detectron2.
"""
from detectron2.checkpoint import DetectionCheckpointer
checkpointer = DetectionCheckpointer(model=self.d2_model)
checkpointer.load(self.cfg.MODEL.WEIGHTS, checkpointables=[])
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> Union[dict, tuple]:
"""Calculate losses from a batch of inputs and data samples.
The inputs will first convert to the Detectron2 type and feed into
D2 models.
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
d2_batched_inputs = self._convert_to_d2_inputs(
batch_inputs=batch_inputs,
batch_data_samples=batch_data_samples,
training=True)
with self.storage as storage: # noqa
losses = self.d2_model(d2_batched_inputs)
# storage contains some training information, such as cls_accuracy.
# you can use storage.latest() to get the detail information
return losses
def predict(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> SampleList:
"""Predict results from a batch of inputs and data samples with post-
processing.
The inputs will first convert to the Detectron2 type and feed into
D2 models. And the results will convert back to the MMDet type.
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
list[:obj:`DetDataSample`]: Detection results of the
input images. Each DetDataSample usually contain
'pred_instances'. And the ``pred_instances`` usually
contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
d2_batched_inputs = self._convert_to_d2_inputs(
batch_inputs=batch_inputs,
batch_data_samples=batch_data_samples,
training=False)
# results in detectron2 has already rescale
d2_results_list = self.d2_model(d2_batched_inputs)
batch_data_samples = convert_d2_pred_to_datasample(
data_samples=batch_data_samples, d2_results_list=d2_results_list)
return batch_data_samples
def _forward(self, *args, **kwargs):
"""Network forward process.
Usually includes backbone, neck and head forward without any post-
processing.
"""
raise NotImplementedError(
f'`_forward` is not implemented in {self.__class__.__name__}')
def extract_feat(self, *args, **kwargs):
"""Extract features from images.
`extract_feat` will not be used in obj:``Detectron2Wrapper``.
"""
pass
def _convert_to_d2_inputs(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
training=True) -> list:
"""Convert inputs type to support Detectron2's model.
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
training (bool): Whether to enable training time processing.
Returns:
list[dict]: A list of dict, which will be fed into Detectron2's
model. And the dict usually contains following keys.
- image (Tensor): Image in (C, H, W) format.
- instances (Instances): GT Instance.
- height (int): the output height resolution of the model
- width (int): the output width resolution of the model
"""
from detectron2.data.detection_utils import filter_empty_instances
from detectron2.structures import Boxes, Instances
batched_d2_inputs = []
for image, data_samples in zip(batch_inputs, batch_data_samples):
d2_inputs = dict()
# deal with metainfo
meta_info = data_samples.metainfo
d2_inputs['file_name'] = meta_info['img_path']
d2_inputs['height'], d2_inputs['width'] = meta_info['ori_shape']
d2_inputs['image_id'] = meta_info['img_id']
# deal with image
if self._channel_conversion:
image = image[[2, 1, 0], ...]
d2_inputs['image'] = image
# deal with gt_instances
gt_instances = data_samples.gt_instances
d2_instances = Instances(meta_info['img_shape'])
gt_boxes = gt_instances.bboxes
# TODO: use mmdet.structures.box.get_box_tensor after PR 8658
# has merged
if isinstance(gt_boxes, BaseBoxes):
gt_boxes = gt_boxes.tensor
d2_instances.gt_boxes = Boxes(gt_boxes)
d2_instances.gt_classes = gt_instances.labels
if gt_instances.get('masks', None) is not None:
gt_masks = gt_instances.masks
if isinstance(gt_masks, PolygonMasks):
d2_instances.gt_masks = D2_PolygonMasks(gt_masks.masks)
elif isinstance(gt_masks, BitmapMasks):
d2_instances.gt_masks = D2_BitMasks(gt_masks.masks)
else:
raise TypeError('The type of `gt_mask` can be '
'`PolygonMasks` or `BitMasks`, but get '
f'{type(gt_masks)}.')
# convert to cpu and convert back to cuda to avoid
# some potential error
if training:
device = gt_boxes.device
d2_instances = filter_empty_instances(
d2_instances.to('cpu')).to(device)
d2_inputs['instances'] = d2_instances
batched_d2_inputs.append(d2_inputs)
return batched_d2_inputs
| 11,772 | 39.318493 | 79 | py |
ERD | ERD-main/mmdet/models/detectors/rtmdet.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmengine.dist import get_world_size
from mmengine.logging import print_log
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RTMDet(SingleStageDetector):
"""Implementation of RTMDet.
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of ATSS. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of ATSS. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
use_syncbn (bool): Whether to use SyncBatchNorm. Defaults to True.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None,
use_syncbn: bool = True) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
# TODO: Waiting for mmengine support
if use_syncbn and get_world_size() > 1:
torch.nn.SyncBatchNorm.convert_sync_batchnorm(self)
print_log('Using SyncBatchNorm()', 'current')
| 2,073 | 38.132075 | 77 | py |
ERD | ERD-main/mmdet/models/detectors/gfl_increment_erd.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
from collections import OrderedDict
from typing import List, Union
import torch
from mmengine import Config
from mmengine.registry import (MODELS)
from mmengine.runner.checkpoint import load_checkpoint, load_state_dict
from torch import Tensor
from ..utils import multi_apply
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .gfl import GFL
@MODELS.register_module()
class GFLIncrementERD(GFL):
"""Implementation of `GFL <https://arxiv.org/abs/2006.04388>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of GFL. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of GFL. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
ori_setting: ConfigType,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None,
latest_model_flag=True,
top_k=100,
dist_loss_weight=1,
) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
self.top_k = top_k
self.dist_loss_weight = dist_loss_weight
if latest_model_flag:
self.load_base_detector(ori_setting)
self._is_init = True
def _load_checkpoint_for_new_model(self, checkpoint_file, map_location=None, strict=True, logger=None):
# load ckpt
checkpoint = torch.load(checkpoint_file, map_location=map_location)
# get state_dict from checkpoint
if isinstance(checkpoint, OrderedDict):
state_dict = checkpoint
elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
raise RuntimeError(
'No state_dict found in checkpoint file {}'.format(checkpoint_file))
# strip prefix of state_dict
if list(state_dict.keys())[0].startswith('module.'):
state_dict = {k[7:]: v for k,
v in checkpoint['state_dict'].items()}
# modify cls head size of state_dict
added_branch_weight = self.bbox_head.gfl_cls.weight[self.ori_num_classes:, ...]
added_branch_bias = self.bbox_head.gfl_cls.bias[self.ori_num_classes:, ...]
state_dict['bbox_head.gfl_cls.weight'] = torch.cat(
(state_dict['bbox_head.gfl_cls.weight'], added_branch_weight), dim=0)
state_dict['bbox_head.gfl_cls.bias'] = torch.cat(
(state_dict['bbox_head.gfl_cls.bias'], added_branch_bias), dim=0)
# load state_dict
if hasattr(self, 'module'):
load_state_dict(self.module, state_dict, strict, logger)
else:
load_state_dict(self, state_dict, strict, logger)
def load_base_detector(self, ori_setting):
"""
Initialize detector from config file.
:param ori_setting:
:return:
"""
assert os.path.isfile(ori_setting['ori_checkpoint_file']), '{} is not a valid file'.format(
ori_setting['ori_checkpoint_file'])
##### init original model & frozen it #####
# build model
ori_cfg = Config.fromfile(ori_setting['ori_config_file'])
if hasattr(ori_cfg.model, 'latest_model_flag'):
ori_cfg.model.latest_model_flag = False
ori_model = MODELS.build(ori_cfg.model)
# load checkpoint
load_checkpoint(ori_model, ori_setting.ori_checkpoint_file, strict=True)
# # set to eval mode
ori_model.eval()
# ori_model.forward = ori_model.forward_dummy
# # set requires_grad of all parameters to False
for param in ori_model.parameters():
param.requires_grad = False
# ##### init original branchs of new model #####
self.ori_num_classes = ori_setting.ori_num_classes
self._load_checkpoint_for_new_model(ori_setting.ori_checkpoint_file)
print('======> load base checkpoint for new model from {}'.format(ori_setting.ori_checkpoint_file))
self.ori_model = ori_model
def forward_ori_model(self, img):
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
img (Tensor): Input to the model.
Returns:
outs (Tuple(List[Tensor])): Three model outputs.
# cls_scores (List[Tensor]): Classification scores for each FPN level.
# bbox_preds (List[Tensor]): BBox predictions for each FPN level.
# centernesses (List[Tensor]): Centernesses predictions for each FPN level.
"""
# forward the model without gradients
with torch.no_grad():
outs = self.ori_model(img)
return outs
def sel_pos_single(self, cat_cls_scores, cat_bbox_preds):
# select topk for classifation
cat_conf = cat_cls_scores.sigmoid()
max_scores, _ = cat_conf.max(dim=-1)
cls_thr = max_scores.mean() + 2 * max_scores.std()
valid_mask = max_scores > cls_thr
topk_cls_inds = valid_mask.nonzero(as_tuple=False).squeeze(1)
topk_cls_scores = cat_cls_scores.gather( # shape:(N,dim)
0, topk_cls_inds.unsqueeze(-1).expand(-1, cat_cls_scores.size(-1)))
# select topk for regression
max_bbox, _ = cat_bbox_preds.max(dim=-1)
bbox_thr = max_bbox.mean() + 2 * max_bbox.std()
bbox_valid_mask = max_bbox > bbox_thr
topk_bbox_inds = bbox_valid_mask.nonzero(as_tuple=False).squeeze(1)
topk_bbox_preds = cat_bbox_preds.gather(
0, topk_bbox_inds.unsqueeze(-1).expand(-1, cat_bbox_preds.size(-1)))
return topk_cls_inds, topk_cls_scores, topk_bbox_inds, topk_bbox_preds
def sel_pos(self, cls_scores, bbox_preds):
"""Select positive predictions based on classification scores.
Args:
model (nn.Module): The loaded detector.
cls_scores (List[Tensor]): Classification scores for each FPN level.
bbox_preds (List[Tensor]): BBox predictions for each FPN level.
#centernesses (List[Tensor]): Centernesses predictions for each FPN level.
Returns:
cat_cls_scores (Tensor): FPN concatenated classification scores.
#cat_centernesses (Tensor): FPN concatenated centernesses.
topk_bbox_preds (Tensor): Selected top-k bbox predictions.
topk_inds (Tensor): Selected top-k indices.
"""
assert len(cls_scores) == len(bbox_preds)
num_imgs = cls_scores[0].size(0)
cat_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(
num_imgs, -1, self.ori_model.bbox_head.cls_out_channels)
for cls_score in cls_scores
]
cat_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4 * (self.ori_model.bbox_head.reg_max + 1)) # ori:4
for bbox_pred in bbox_preds
]
cat_cls_scores = torch.cat(cat_cls_scores, dim=1)
cat_bbox_preds = torch.cat(cat_bbox_preds, dim=1)
topk_cls_inds, topk_cls_scores, topk_bbox_inds, topk_bbox_preds = multi_apply(
self.sel_pos_single,
cat_cls_scores,
cat_bbox_preds)
return topk_cls_inds, topk_cls_scores, topk_bbox_inds, topk_bbox_preds
def loss(self, batch_inputs: Tensor, batch_data_samples: SampleList) -> Union[dict, list]:
# get original model outputs
ori_outs = self.ori_model(batch_inputs)
# select positive predictions from original model
topk_cls_inds, topk_cls_scores, topk_bbox_inds, topk_bbox_preds = self.sel_pos(*ori_outs)
# get new model outputs
x = self.extract_feat(batch_inputs)
new_outs = self.bbox_head(x)
# calculate losses including general losses of new model and distillation losses of original model
loss_inputs = (ori_outs, new_outs, batch_data_samples, \
topk_cls_inds, topk_cls_scores, topk_bbox_inds, topk_bbox_preds,
self.ori_num_classes, self.dist_loss_weight, self)
losses = self.bbox_head.loss(*loss_inputs)
return losses
| 9,276 | 40.977376 | 116 | py |
ERD | ERD-main/mmdet/models/detectors/base_detr.py | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, List, Tuple, Union
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import OptSampleList, SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .base import BaseDetector
@MODELS.register_module()
class DetectionTransformer(BaseDetector, metaclass=ABCMeta):
r"""Base class for Detection Transformer.
In Detection Transformer, an encoder is used to process output features of
neck, then several queries interact with the encoder features using a
decoder and do the regression and classification with the bounding box
head.
Args:
backbone (:obj:`ConfigDict` or dict): Config of the backbone.
neck (:obj:`ConfigDict` or dict, optional): Config of the neck.
Defaults to None.
encoder (:obj:`ConfigDict` or dict, optional): Config of the
Transformer encoder. Defaults to None.
decoder (:obj:`ConfigDict` or dict, optional): Config of the
Transformer decoder. Defaults to None.
bbox_head (:obj:`ConfigDict` or dict, optional): Config for the
bounding box head module. Defaults to None.
positional_encoding (:obj:`ConfigDict` or dict, optional): Config
of the positional encoding module. Defaults to None.
num_queries (int, optional): Number of decoder query in Transformer.
Defaults to 100.
train_cfg (:obj:`ConfigDict` or dict, optional): Training config of
the bounding box head module. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of
the bounding box head module. Defaults to None.
data_preprocessor (dict or ConfigDict, optional): The pre-process
config of :class:`BaseDataPreprocessor`. it usually includes,
``pad_size_divisor``, ``pad_value``, ``mean`` and ``std``.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
encoder: OptConfigType = None,
decoder: OptConfigType = None,
bbox_head: OptConfigType = None,
positional_encoding: OptConfigType = None,
num_queries: int = 100,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
data_preprocessor=data_preprocessor, init_cfg=init_cfg)
# process args
bbox_head.update(train_cfg=train_cfg)
bbox_head.update(test_cfg=test_cfg)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.encoder = encoder
self.decoder = decoder
self.positional_encoding = positional_encoding
self.num_queries = num_queries
# init model layers
self.backbone = MODELS.build(backbone)
if neck is not None:
self.neck = MODELS.build(neck)
self.bbox_head = MODELS.build(bbox_head)
self._init_layers()
@abstractmethod
def _init_layers(self) -> None:
"""Initialize layers except for backbone, neck and bbox_head."""
pass
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> Union[dict, list]:
"""Calculate losses from a batch of inputs and data samples.
Args:
batch_inputs (Tensor): Input images of shape (bs, dim, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (List[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components
"""
img_feats = self.extract_feat(batch_inputs)
head_inputs_dict = self.forward_transformer(img_feats,
batch_data_samples)
losses = self.bbox_head.loss(
**head_inputs_dict, batch_data_samples=batch_data_samples)
return losses
def predict(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
rescale: bool = True) -> SampleList:
"""Predict results from a batch of inputs and data samples with post-
processing.
Args:
batch_inputs (Tensor): Inputs, has shape (bs, dim, H, W).
batch_data_samples (List[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
rescale (bool): Whether to rescale the results.
Defaults to True.
Returns:
list[:obj:`DetDataSample`]: Detection results of the input images.
Each DetDataSample usually contain 'pred_instances'. And the
`pred_instances` usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
img_feats = self.extract_feat(batch_inputs)
head_inputs_dict = self.forward_transformer(img_feats,
batch_data_samples)
results_list = self.bbox_head.predict(
**head_inputs_dict,
rescale=rescale,
batch_data_samples=batch_data_samples)
batch_data_samples = self.add_pred_to_datasample(
batch_data_samples, results_list)
return batch_data_samples
def _forward(
self,
batch_inputs: Tensor,
batch_data_samples: OptSampleList = None) -> Tuple[List[Tensor]]:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
batch_inputs (Tensor): Inputs, has shape (bs, dim, H, W).
batch_data_samples (List[:obj:`DetDataSample`], optional): The
batch data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Defaults to None.
Returns:
tuple[Tensor]: A tuple of features from ``bbox_head`` forward.
"""
img_feats = self.extract_feat(batch_inputs)
head_inputs_dict = self.forward_transformer(img_feats,
batch_data_samples)
results = self.bbox_head.forward(**head_inputs_dict)
return results
def forward_transformer(self,
img_feats: Tuple[Tensor],
batch_data_samples: OptSampleList = None) -> Dict:
"""Forward process of Transformer, which includes four steps:
'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'. We
summarized the parameters flow of the existing DETR-like detector,
which can be illustrated as follow:
.. code:: text
img_feats & batch_data_samples
|
V
+-----------------+
| pre_transformer |
+-----------------+
| |
| V
| +-----------------+
| | forward_encoder |
| +-----------------+
| |
| V
| +---------------+
| | pre_decoder |
| +---------------+
| | |
V V |
+-----------------+ |
| forward_decoder | |
+-----------------+ |
| |
V V
head_inputs_dict
Args:
img_feats (tuple[Tensor]): Tuple of feature maps from neck. Each
feature map has shape (bs, dim, H, W).
batch_data_samples (list[:obj:`DetDataSample`], optional): The
batch data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Defaults to None.
Returns:
dict: The dictionary of bbox_head function inputs, which always
includes the `hidden_states` of the decoder output and may contain
`references` including the initial and intermediate references.
"""
encoder_inputs_dict, decoder_inputs_dict = self.pre_transformer(
img_feats, batch_data_samples)
encoder_outputs_dict = self.forward_encoder(**encoder_inputs_dict)
tmp_dec_in, head_inputs_dict = self.pre_decoder(**encoder_outputs_dict)
decoder_inputs_dict.update(tmp_dec_in)
decoder_outputs_dict = self.forward_decoder(**decoder_inputs_dict)
head_inputs_dict.update(decoder_outputs_dict)
return head_inputs_dict
def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]:
"""Extract features.
Args:
batch_inputs (Tensor): Image tensor, has shape (bs, dim, H, W).
Returns:
tuple[Tensor]: Tuple of feature maps from neck. Each feature map
has shape (bs, dim, H, W).
"""
x = self.backbone(batch_inputs)
if self.with_neck:
x = self.neck(x)
return x
@abstractmethod
def pre_transformer(
self,
img_feats: Tuple[Tensor],
batch_data_samples: OptSampleList = None) -> Tuple[Dict, Dict]:
"""Process image features before feeding them to the transformer.
Args:
img_feats (tuple[Tensor]): Tuple of feature maps from neck. Each
feature map has shape (bs, dim, H, W).
batch_data_samples (list[:obj:`DetDataSample`], optional): The
batch data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Defaults to None.
Returns:
tuple[dict, dict]: The first dict contains the inputs of encoder
and the second dict contains the inputs of decoder.
- encoder_inputs_dict (dict): The keyword args dictionary of
`self.forward_encoder()`, which includes 'feat', 'feat_mask',
'feat_pos', and other algorithm-specific arguments.
- decoder_inputs_dict (dict): The keyword args dictionary of
`self.forward_decoder()`, which includes 'memory_mask', and
other algorithm-specific arguments.
"""
pass
@abstractmethod
def forward_encoder(self, feat: Tensor, feat_mask: Tensor,
feat_pos: Tensor, **kwargs) -> Dict:
"""Forward with Transformer encoder.
Args:
feat (Tensor): Sequential features, has shape (bs, num_feat_points,
dim).
feat_mask (Tensor): ByteTensor, the padding mask of the features,
has shape (bs, num_feat_points).
feat_pos (Tensor): The positional embeddings of the features, has
shape (bs, num_feat_points, dim).
Returns:
dict: The dictionary of encoder outputs, which includes the
`memory` of the encoder output and other algorithm-specific
arguments.
"""
pass
@abstractmethod
def pre_decoder(self, memory: Tensor, **kwargs) -> Tuple[Dict, Dict]:
"""Prepare intermediate variables before entering Transformer decoder,
such as `query`, `query_pos`, and `reference_points`.
Args:
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
Returns:
tuple[dict, dict]: The first dict contains the inputs of decoder
and the second dict contains the inputs of the bbox_head function.
- decoder_inputs_dict (dict): The keyword dictionary args of
`self.forward_decoder()`, which includes 'query', 'query_pos',
'memory', and other algorithm-specific arguments.
- head_inputs_dict (dict): The keyword dictionary args of the
bbox_head functions, which is usually empty, or includes
`enc_outputs_class` and `enc_outputs_class` when the detector
support 'two stage' or 'query selection' strategies.
"""
pass
@abstractmethod
def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor,
**kwargs) -> Dict:
"""Forward with Transformer decoder.
Args:
query (Tensor): The queries of decoder inputs, has shape
(bs, num_queries, dim).
query_pos (Tensor): The positional queries of decoder inputs,
has shape (bs, num_queries, dim).
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
Returns:
dict: The dictionary of decoder outputs, which includes the
`hidden_states` of the decoder output, `references` including
the initial and intermediate reference_points, and other
algorithm-specific arguments.
"""
pass
| 14,268 | 41.84985 | 79 | py |
ERD | ERD-main/mmdet/models/detectors/rpn.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import warnings
import torch
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RPN(SingleStageDetector):
"""Implementation of Region Proposal Network.
Args:
backbone (:obj:`ConfigDict` or dict): The backbone config.
neck (:obj:`ConfigDict` or dict): The neck config.
bbox_head (:obj:`ConfigDict` or dict): The bbox head config.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
rpn_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None,
**kwargs) -> None:
super(SingleStageDetector, self).__init__(
data_preprocessor=data_preprocessor, init_cfg=init_cfg)
self.backbone = MODELS.build(backbone)
self.neck = MODELS.build(neck) if neck is not None else None
rpn_train_cfg = train_cfg['rpn'] if train_cfg is not None else None
rpn_head_num_classes = rpn_head.get('num_classes', 1)
if rpn_head_num_classes != 1:
warnings.warn('The `num_classes` should be 1 in RPN, but get '
f'{rpn_head_num_classes}, please set '
'rpn_head.num_classes = 1 in your config file.')
rpn_head.update(num_classes=1)
rpn_head.update(train_cfg=rpn_train_cfg)
rpn_head.update(test_cfg=test_cfg['rpn'])
self.bbox_head = MODELS.build(rpn_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""Calculate losses from a batch of inputs and data samples.
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
# set cat_id of gt_labels to 0 in RPN
rpn_data_samples = copy.deepcopy(batch_data_samples)
for data_sample in rpn_data_samples:
data_sample.gt_instances.labels = \
torch.zeros_like(data_sample.gt_instances.labels)
losses = self.bbox_head.loss(x, rpn_data_samples)
return losses
| 3,380 | 40.231707 | 77 | py |
ERD | ERD-main/mmdet/models/detectors/dino.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Optional, Tuple
import torch
from torch import Tensor, nn
from torch.nn.init import normal_
from mmdet.registry import MODELS
from mmdet.structures import OptSampleList
from mmdet.utils import OptConfigType
from ..layers import (CdnQueryGenerator, DeformableDetrTransformerEncoder,
DinoTransformerDecoder, SinePositionalEncoding)
from .deformable_detr import DeformableDETR, MultiScaleDeformableAttention
@MODELS.register_module()
class DINO(DeformableDETR):
r"""Implementation of `DINO: DETR with Improved DeNoising Anchor Boxes
for End-to-End Object Detection <https://arxiv.org/abs/2203.03605>`_
Code is modified from the `official github repo
<https://github.com/IDEA-Research/DINO>`_.
Args:
dn_cfg (:obj:`ConfigDict` or dict, optional): Config of denoising
query generator. Defaults to `None`.
"""
def __init__(self, *args, dn_cfg: OptConfigType = None, **kwargs) -> None:
super().__init__(*args, **kwargs)
assert self.as_two_stage, 'as_two_stage must be True for DINO'
assert self.with_box_refine, 'with_box_refine must be True for DINO'
if dn_cfg is not None:
assert 'num_classes' not in dn_cfg and \
'num_queries' not in dn_cfg and \
'hidden_dim' not in dn_cfg, \
'The three keyword args `num_classes`, `embed_dims`, and ' \
'`num_matching_queries` are set in `detector.__init__()`, ' \
'users should not set them in `dn_cfg` config.'
dn_cfg['num_classes'] = self.bbox_head.num_classes
dn_cfg['embed_dims'] = self.embed_dims
dn_cfg['num_matching_queries'] = self.num_queries
self.dn_query_generator = CdnQueryGenerator(**dn_cfg)
def _init_layers(self) -> None:
"""Initialize layers except for backbone, neck and bbox_head."""
self.positional_encoding = SinePositionalEncoding(
**self.positional_encoding)
self.encoder = DeformableDetrTransformerEncoder(**self.encoder)
self.decoder = DinoTransformerDecoder(**self.decoder)
self.embed_dims = self.encoder.embed_dims
self.query_embedding = nn.Embedding(self.num_queries, self.embed_dims)
# NOTE In DINO, the query_embedding only contains content
# queries, while in Deformable DETR, the query_embedding
# contains both content and spatial queries, and in DETR,
# it only contains spatial queries.
num_feats = self.positional_encoding.num_feats
assert num_feats * 2 == self.embed_dims, \
f'embed_dims should be exactly 2 times of num_feats. ' \
f'Found {self.embed_dims} and {num_feats}.'
self.level_embed = nn.Parameter(
torch.Tensor(self.num_feature_levels, self.embed_dims))
self.memory_trans_fc = nn.Linear(self.embed_dims, self.embed_dims)
self.memory_trans_norm = nn.LayerNorm(self.embed_dims)
def init_weights(self) -> None:
"""Initialize weights for Transformer and other components."""
super(DeformableDETR, self).init_weights()
for coder in self.encoder, self.decoder:
for p in coder.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for m in self.modules():
if isinstance(m, MultiScaleDeformableAttention):
m.init_weights()
nn.init.xavier_uniform_(self.memory_trans_fc.weight)
nn.init.xavier_uniform_(self.query_embedding.weight)
normal_(self.level_embed)
def forward_transformer(
self,
img_feats: Tuple[Tensor],
batch_data_samples: OptSampleList = None,
) -> Dict:
"""Forward process of Transformer.
The forward procedure of the transformer is defined as:
'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'
More details can be found at `TransformerDetector.forward_transformer`
in `mmdet/detector/base_detr.py`.
The difference is that the ground truth in `batch_data_samples` is
required for the `pre_decoder` to prepare the query of DINO.
Additionally, DINO inherits the `pre_transformer` method and the
`forward_encoder` method of DeformableDETR. More details about the
two methods can be found in `mmdet/detector/deformable_detr.py`.
Args:
img_feats (tuple[Tensor]): Tuple of feature maps from neck. Each
feature map has shape (bs, dim, H, W).
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Defaults to None.
Returns:
dict: The dictionary of bbox_head function inputs, which always
includes the `hidden_states` of the decoder output and may contain
`references` including the initial and intermediate references.
"""
encoder_inputs_dict, decoder_inputs_dict = self.pre_transformer(
img_feats, batch_data_samples)
encoder_outputs_dict = self.forward_encoder(**encoder_inputs_dict)
tmp_dec_in, head_inputs_dict = self.pre_decoder(
**encoder_outputs_dict, batch_data_samples=batch_data_samples)
decoder_inputs_dict.update(tmp_dec_in)
decoder_outputs_dict = self.forward_decoder(**decoder_inputs_dict)
head_inputs_dict.update(decoder_outputs_dict)
return head_inputs_dict
def pre_decoder(
self,
memory: Tensor,
memory_mask: Tensor,
spatial_shapes: Tensor,
batch_data_samples: OptSampleList = None,
) -> Tuple[Dict]:
"""Prepare intermediate variables before entering Transformer decoder,
such as `query`, `query_pos`, and `reference_points`.
Args:
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
memory_mask (Tensor): ByteTensor, the padding mask of the memory,
has shape (bs, num_feat_points). Will only be used when
`as_two_stage` is `True`.
spatial_shapes (Tensor): Spatial shapes of features in all levels.
With shape (num_levels, 2), last dimension represents (h, w).
Will only be used when `as_two_stage` is `True`.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Defaults to None.
Returns:
tuple[dict]: The decoder_inputs_dict and head_inputs_dict.
- decoder_inputs_dict (dict): The keyword dictionary args of
`self.forward_decoder()`, which includes 'query', 'memory',
`reference_points`, and `dn_mask`. The reference points of
decoder input here are 4D boxes, although it has `points`
in its name.
- head_inputs_dict (dict): The keyword dictionary args of the
bbox_head functions, which includes `topk_score`, `topk_coords`,
and `dn_meta` when `self.training` is `True`, else is empty.
"""
bs, _, c = memory.shape
cls_out_features = self.bbox_head.cls_branches[
self.decoder.num_layers].out_features
output_memory, output_proposals = self.gen_encoder_output_proposals(
memory, memory_mask, spatial_shapes)
enc_outputs_class = self.bbox_head.cls_branches[
self.decoder.num_layers](
output_memory)
enc_outputs_coord_unact = self.bbox_head.reg_branches[
self.decoder.num_layers](output_memory) + output_proposals
# NOTE The DINO selects top-k proposals according to scores of
# multi-class classification, while DeformDETR, where the input
# is `enc_outputs_class[..., 0]` selects according to scores of
# binary classification.
topk_indices = torch.topk(
enc_outputs_class.max(-1)[0], k=self.num_queries, dim=1)[1]
topk_score = torch.gather(
enc_outputs_class, 1,
topk_indices.unsqueeze(-1).repeat(1, 1, cls_out_features))
topk_coords_unact = torch.gather(
enc_outputs_coord_unact, 1,
topk_indices.unsqueeze(-1).repeat(1, 1, 4))
topk_coords = topk_coords_unact.sigmoid()
topk_coords_unact = topk_coords_unact.detach()
query = self.query_embedding.weight[:, None, :]
query = query.repeat(1, bs, 1).transpose(0, 1)
if self.training:
dn_label_query, dn_bbox_query, dn_mask, dn_meta = \
self.dn_query_generator(batch_data_samples)
query = torch.cat([dn_label_query, query], dim=1)
reference_points = torch.cat([dn_bbox_query, topk_coords_unact],
dim=1)
else:
reference_points = topk_coords_unact
dn_mask, dn_meta = None, None
reference_points = reference_points.sigmoid()
decoder_inputs_dict = dict(
query=query,
memory=memory,
reference_points=reference_points,
dn_mask=dn_mask)
# NOTE DINO calculates encoder losses on scores and coordinates
# of selected top-k encoder queries, while DeformDETR is of all
# encoder queries.
head_inputs_dict = dict(
enc_outputs_class=topk_score,
enc_outputs_coord=topk_coords,
dn_meta=dn_meta) if self.training else dict()
return decoder_inputs_dict, head_inputs_dict
def forward_decoder(self,
query: Tensor,
memory: Tensor,
memory_mask: Tensor,
reference_points: Tensor,
spatial_shapes: Tensor,
level_start_index: Tensor,
valid_ratios: Tensor,
dn_mask: Optional[Tensor] = None) -> Dict:
"""Forward with Transformer decoder.
The forward procedure of the transformer is defined as:
'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder'
More details can be found at `TransformerDetector.forward_transformer`
in `mmdet/detector/base_detr.py`.
Args:
query (Tensor): The queries of decoder inputs, has shape
(bs, num_queries_total, dim), where `num_queries_total` is the
sum of `num_denoising_queries` and `num_matching_queries` when
`self.training` is `True`, else `num_matching_queries`.
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
memory_mask (Tensor): ByteTensor, the padding mask of the memory,
has shape (bs, num_feat_points).
reference_points (Tensor): The initial reference, has shape
(bs, num_queries_total, 4) with the last dimension arranged as
(cx, cy, w, h).
spatial_shapes (Tensor): Spatial shapes of features in all levels,
has shape (num_levels, 2), last dimension represents (h, w).
level_start_index (Tensor): The start index of each level.
A tensor has shape (num_levels, ) and can be represented
as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].
valid_ratios (Tensor): The ratios of the valid width and the valid
height relative to the width and the height of features in all
levels, has shape (bs, num_levels, 2).
dn_mask (Tensor, optional): The attention mask to prevent
information leakage from different denoising groups and
matching parts, will be used as `self_attn_mask` of the
`self.decoder`, has shape (num_queries_total,
num_queries_total).
It is `None` when `self.training` is `False`.
Returns:
dict: The dictionary of decoder outputs, which includes the
`hidden_states` of the decoder output and `references` including
the initial and intermediate reference_points.
"""
inter_states, references = self.decoder(
query=query,
value=memory,
key_padding_mask=memory_mask,
self_attn_mask=dn_mask,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
valid_ratios=valid_ratios,
reg_branches=self.bbox_head.reg_branches)
if len(query) == self.num_queries:
# NOTE: This is to make sure label_embeding can be involved to
# produce loss even if there is no denoising query (no ground truth
# target in this GPU), otherwise, this will raise runtime error in
# distributed training.
inter_states[0] += \
self.dn_query_generator.label_embedding.weight[0, 0] * 0.0
decoder_outputs_dict = dict(
hidden_states=inter_states, references=list(references))
return decoder_outputs_dict
| 13,511 | 46.244755 | 79 | py |
ERD | ERD-main/mmdet/models/detectors/trident_faster_rcnn.py | # Copyright (c) OpenMMLab. All rights reserved.
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .faster_rcnn import FasterRCNN
@MODELS.register_module()
class TridentFasterRCNN(FasterRCNN):
"""Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_"""
def __init__(self,
backbone: ConfigType,
rpn_head: ConfigType,
roi_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
assert self.backbone.num_branch == self.roi_head.num_branch
assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx
self.num_branch = self.backbone.num_branch
self.test_branch_idx = self.backbone.test_branch_idx
def _forward(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> tuple:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super()._forward(
batch_inputs=batch_inputs, batch_data_samples=trident_data_samples)
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super().loss(
batch_inputs=batch_inputs, batch_data_samples=trident_data_samples)
def predict(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
rescale: bool = True) -> SampleList:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super().predict(
batch_inputs=batch_inputs,
batch_data_samples=trident_data_samples,
rescale=rescale)
# TODO need to refactor
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
x = self.extract_feats(imgs)
num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
trident_img_metas = [img_metas * num_branch for img_metas in img_metas]
proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
| 3,358 | 39.963415 | 79 | py |
ERD | ERD-main/mmdet/models/detectors/panoptic_two_stage_segmentor.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from typing import List
import torch
from mmengine.structures import PixelData
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .two_stage import TwoStageDetector
@MODELS.register_module()
class TwoStagePanopticSegmentor(TwoStageDetector):
"""Base class of Two-stage Panoptic Segmentor.
As well as the components in TwoStageDetector, Panoptic Segmentor has extra
semantic_head and panoptic_fusion_head.
"""
def __init__(
self,
backbone: ConfigType,
neck: OptConfigType = None,
rpn_head: OptConfigType = None,
roi_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None,
# for panoptic segmentation
semantic_head: OptConfigType = None,
panoptic_fusion_head: OptConfigType = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
if semantic_head is not None:
self.semantic_head = MODELS.build(semantic_head)
if panoptic_fusion_head is not None:
panoptic_cfg = test_cfg.panoptic if test_cfg is not None else None
panoptic_fusion_head_ = panoptic_fusion_head.deepcopy()
panoptic_fusion_head_.update(test_cfg=panoptic_cfg)
self.panoptic_fusion_head = MODELS.build(panoptic_fusion_head_)
self.num_things_classes = self.panoptic_fusion_head.\
num_things_classes
self.num_stuff_classes = self.panoptic_fusion_head.\
num_stuff_classes
self.num_classes = self.panoptic_fusion_head.num_classes
@property
def with_semantic_head(self) -> bool:
"""bool: whether the detector has semantic head"""
return hasattr(self,
'semantic_head') and self.semantic_head is not None
@property
def with_panoptic_fusion_head(self) -> bool:
"""bool: whether the detector has panoptic fusion head"""
return hasattr(self, 'panoptic_fusion_head') and \
self.panoptic_fusion_head is not None
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
losses = dict()
# RPN forward and loss
if self.with_rpn:
proposal_cfg = self.train_cfg.get('rpn_proposal',
self.test_cfg.rpn)
rpn_data_samples = copy.deepcopy(batch_data_samples)
# set cat_id of gt_labels to 0 in RPN
for data_sample in rpn_data_samples:
data_sample.gt_instances.labels = \
torch.zeros_like(data_sample.gt_instances.labels)
rpn_losses, rpn_results_list = self.rpn_head.loss_and_predict(
x, rpn_data_samples, proposal_cfg=proposal_cfg)
# avoid get same name with roi_head loss
keys = rpn_losses.keys()
for key in list(keys):
if 'loss' in key and 'rpn' not in key:
rpn_losses[f'rpn_{key}'] = rpn_losses.pop(key)
losses.update(rpn_losses)
else:
# TODO: Not support currently, should have a check at Fast R-CNN
assert batch_data_samples[0].get('proposals', None) is not None
# use pre-defined proposals in InstanceData for the second stage
# to extract ROI features.
rpn_results_list = [
data_sample.proposals for data_sample in batch_data_samples
]
roi_losses = self.roi_head.loss(x, rpn_results_list,
batch_data_samples)
losses.update(roi_losses)
semantic_loss = self.semantic_head.loss(x, batch_data_samples)
losses.update(semantic_loss)
return losses
def predict(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
rescale: bool = True) -> SampleList:
"""Predict results from a batch of inputs and data samples with post-
processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results.
Defaults to True.
Returns:
List[:obj:`DetDataSample`]: Return the packed panoptic segmentation
results of input images. Each DetDataSample usually contains
'pred_panoptic_seg'. And the 'pred_panoptic_seg' has a key
``sem_seg``, which is a tensor of shape (1, h, w).
"""
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
x = self.extract_feat(batch_inputs)
# If there are no pre-defined proposals, use RPN to get proposals
if batch_data_samples[0].get('proposals', None) is None:
rpn_results_list = self.rpn_head.predict(
x, batch_data_samples, rescale=False)
else:
rpn_results_list = [
data_sample.proposals for data_sample in batch_data_samples
]
results_list = self.roi_head.predict(
x, rpn_results_list, batch_data_samples, rescale=rescale)
seg_preds = self.semantic_head.predict(x, batch_img_metas, rescale)
results_list = self.panoptic_fusion_head.predict(
results_list, seg_preds)
batch_data_samples = self.add_pred_to_datasample(
batch_data_samples, results_list)
return batch_data_samples
# TODO the code has not been verified and needs to be refactored later.
def _forward(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> tuple:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
Returns:
tuple: A tuple of features from ``rpn_head``, ``roi_head`` and
``semantic_head`` forward.
"""
results = ()
x = self.extract_feat(batch_inputs)
rpn_outs = self.rpn_head.forward(x)
results = results + (rpn_outs)
# If there are no pre-defined proposals, use RPN to get proposals
if batch_data_samples[0].get('proposals', None) is None:
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
rpn_results_list = self.rpn_head.predict_by_feat(
*rpn_outs, batch_img_metas=batch_img_metas, rescale=False)
else:
# TODO: Not checked currently.
rpn_results_list = [
data_sample.proposals for data_sample in batch_data_samples
]
# roi_head
roi_outs = self.roi_head(x, rpn_results_list)
results = results + (roi_outs)
# semantic_head
sem_outs = self.semantic_head.forward(x)
results = results + (sem_outs['seg_preds'], )
return results
def add_pred_to_datasample(self, data_samples: SampleList,
results_list: List[PixelData]) -> SampleList:
"""Add predictions to `DetDataSample`.
Args:
data_samples (list[:obj:`DetDataSample`]): The
annotation data of every samples.
results_list (List[PixelData]): Panoptic segmentation results of
each image.
Returns:
List[:obj:`DetDataSample`]: Return the packed panoptic segmentation
results of input images. Each DetDataSample usually contains
'pred_panoptic_seg'. And the 'pred_panoptic_seg' has a key
``sem_seg``, which is a tensor of shape (1, h, w).
"""
for data_sample, pred_panoptic_seg in zip(data_samples, results_list):
data_sample.pred_panoptic_seg = pred_panoptic_seg
return data_samples
| 9,188 | 38.102128 | 79 | py |
ERD | ERD-main/mmdet/models/detectors/maskformer.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, List, Tuple
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class MaskFormer(SingleStageDetector):
r"""Implementation of `Per-Pixel Classification is
NOT All You Need for Semantic Segmentation
<https://arxiv.org/pdf/2107.06278>`_."""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
panoptic_head: OptConfigType = None,
panoptic_fusion_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super(SingleStageDetector, self).__init__(
data_preprocessor=data_preprocessor, init_cfg=init_cfg)
self.backbone = MODELS.build(backbone)
if neck is not None:
self.neck = MODELS.build(neck)
panoptic_head_ = panoptic_head.deepcopy()
panoptic_head_.update(train_cfg=train_cfg)
panoptic_head_.update(test_cfg=test_cfg)
self.panoptic_head = MODELS.build(panoptic_head_)
panoptic_fusion_head_ = panoptic_fusion_head.deepcopy()
panoptic_fusion_head_.update(test_cfg=test_cfg)
self.panoptic_fusion_head = MODELS.build(panoptic_fusion_head_)
self.num_things_classes = self.panoptic_head.num_things_classes
self.num_stuff_classes = self.panoptic_head.num_stuff_classes
self.num_classes = self.panoptic_head.num_classes
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> Dict[str, Tensor]:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
x = self.extract_feat(batch_inputs)
losses = self.panoptic_head.loss(x, batch_data_samples)
return losses
def predict(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
rescale: bool = True) -> SampleList:
"""Predict results from a batch of inputs and data samples with post-
processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results.
Defaults to True.
Returns:
list[:obj:`DetDataSample`]: Detection results of the
input images. Each DetDataSample usually contain
'pred_instances' and `pred_panoptic_seg`. And the
``pred_instances`` usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
And the ``pred_panoptic_seg`` contains the following key
- sem_seg (Tensor): panoptic segmentation mask, has a
shape (1, h, w).
"""
feats = self.extract_feat(batch_inputs)
mask_cls_results, mask_pred_results = self.panoptic_head.predict(
feats, batch_data_samples)
results_list = self.panoptic_fusion_head.predict(
mask_cls_results,
mask_pred_results,
batch_data_samples,
rescale=rescale)
results = self.add_pred_to_datasample(batch_data_samples, results_list)
return results
def add_pred_to_datasample(self, data_samples: SampleList,
results_list: List[dict]) -> SampleList:
"""Add predictions to `DetDataSample`.
Args:
data_samples (list[:obj:`DetDataSample`], optional): A batch of
data samples that contain annotations and predictions.
results_list (List[dict]): Instance segmentation, segmantic
segmentation and panoptic segmentation results.
Returns:
list[:obj:`DetDataSample`]: Detection results of the
input images. Each DetDataSample usually contain
'pred_instances' and `pred_panoptic_seg`. And the
``pred_instances`` usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
And the ``pred_panoptic_seg`` contains the following key
- sem_seg (Tensor): panoptic segmentation mask, has a
shape (1, h, w).
"""
for data_sample, pred_results in zip(data_samples, results_list):
if 'pan_results' in pred_results:
data_sample.pred_panoptic_seg = pred_results['pan_results']
if 'ins_results' in pred_results:
data_sample.pred_instances = pred_results['ins_results']
assert 'sem_results' not in pred_results, 'segmantic ' \
'segmentation results are not supported yet.'
return data_samples
def _forward(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> Tuple[List[Tensor]]:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
tuple[List[Tensor]]: A tuple of features from ``panoptic_head``
forward.
"""
feats = self.extract_feat(batch_inputs)
results = self.panoptic_head.forward(feats, batch_data_samples)
return results
| 7,161 | 40.883041 | 79 | py |
ERD | ERD-main/mmdet/models/detectors/single_stage_instance_seg.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from typing import Tuple
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import OptSampleList, SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .base import BaseDetector
INF = 1e8
@MODELS.register_module()
class SingleStageInstanceSegmentor(BaseDetector):
"""Base class for single-stage instance segmentors."""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
bbox_head: OptConfigType = None,
mask_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
data_preprocessor=data_preprocessor, init_cfg=init_cfg)
self.backbone = MODELS.build(backbone)
if neck is not None:
self.neck = MODELS.build(neck)
else:
self.neck = None
if bbox_head is not None:
bbox_head.update(train_cfg=copy.deepcopy(train_cfg))
bbox_head.update(test_cfg=copy.deepcopy(test_cfg))
self.bbox_head = MODELS.build(bbox_head)
else:
self.bbox_head = None
assert mask_head, f'`mask_head` must ' \
f'be implemented in {self.__class__.__name__}'
mask_head.update(train_cfg=copy.deepcopy(train_cfg))
mask_head.update(test_cfg=copy.deepcopy(test_cfg))
self.mask_head = MODELS.build(mask_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]:
"""Extract features.
Args:
batch_inputs (Tensor): Image tensor with shape (N, C, H ,W).
Returns:
tuple[Tensor]: Multi-level features that may have different
resolutions.
"""
x = self.backbone(batch_inputs)
if self.with_neck:
x = self.neck(x)
return x
def _forward(self,
batch_inputs: Tensor,
batch_data_samples: OptSampleList = None,
**kwargs) -> tuple:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
Returns:
tuple: A tuple of features from ``bbox_head`` forward.
"""
outs = ()
# backbone
x = self.extract_feat(batch_inputs)
# bbox_head
positive_infos = None
if self.with_bbox:
assert batch_data_samples is not None
bbox_outs = self.bbox_head.forward(x)
outs = outs + (bbox_outs, )
# It is necessary to use `bbox_head.loss` to update
# `_raw_positive_infos` which will be used in `get_positive_infos`
# positive_infos will be used in the following mask head.
_ = self.bbox_head.loss(x, batch_data_samples, **kwargs)
positive_infos = self.bbox_head.get_positive_infos()
# mask_head
if positive_infos is None:
mask_outs = self.mask_head.forward(x)
else:
mask_outs = self.mask_head.forward(x, positive_infos)
outs = outs + (mask_outs, )
return outs
def loss(self, batch_inputs: Tensor, batch_data_samples: SampleList,
**kwargs) -> dict:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
x = self.extract_feat(batch_inputs)
losses = dict()
positive_infos = None
# CondInst and YOLACT have bbox_head
if self.with_bbox:
bbox_losses = self.bbox_head.loss(x, batch_data_samples, **kwargs)
losses.update(bbox_losses)
# get positive information from bbox head, which will be used
# in the following mask head.
positive_infos = self.bbox_head.get_positive_infos()
mask_loss = self.mask_head.loss(
x, batch_data_samples, positive_infos=positive_infos, **kwargs)
# avoid loss override
assert not set(mask_loss.keys()) & set(losses.keys())
losses.update(mask_loss)
return losses
def predict(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
rescale: bool = True,
**kwargs) -> SampleList:
"""Perform forward propagation of the mask head and predict mask
results on the features of the upstream network.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results.
Defaults to False.
Returns:
list[:obj:`DetDataSample`]: Detection results of the
input images. Each DetDataSample usually contain
'pred_instances'. And the ``pred_instances`` usually
contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
x = self.extract_feat(batch_inputs)
if self.with_bbox:
# the bbox branch does not need to be scaled to the original
# image scale, because the mask branch will scale both bbox
# and mask at the same time.
bbox_rescale = rescale if not self.with_mask else False
results_list = self.bbox_head.predict(
x, batch_data_samples, rescale=bbox_rescale)
else:
results_list = None
results_list = self.mask_head.predict(
x, batch_data_samples, rescale=rescale, results_list=results_list)
batch_data_samples = self.add_pred_to_datasample(
batch_data_samples, results_list)
return batch_data_samples
| 6,915 | 37.209945 | 78 | py |
ERD | ERD-main/mmdet/models/detectors/dab_detr.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Tuple
from mmengine.model import uniform_init
from torch import Tensor, nn
from mmdet.registry import MODELS
from ..layers import SinePositionalEncoding
from ..layers.transformer import (DABDetrTransformerDecoder,
DABDetrTransformerEncoder, inverse_sigmoid)
from .detr import DETR
@MODELS.register_module()
class DABDETR(DETR):
r"""Implementation of `DAB-DETR:
Dynamic Anchor Boxes are Better Queries for DETR.
<https://arxiv.org/abs/2201.12329>`_.
Code is modified from the `official github repo
<https://github.com/IDEA-Research/DAB-DETR>`_.
Args:
with_random_refpoints (bool): Whether to randomly initialize query
embeddings and not update them during training.
Defaults to False.
num_patterns (int): Inspired by Anchor-DETR. Defaults to 0.
"""
def __init__(self,
*args,
with_random_refpoints: bool = False,
num_patterns: int = 0,
**kwargs) -> None:
self.with_random_refpoints = with_random_refpoints
assert isinstance(num_patterns, int), \
f'num_patterns should be int but {num_patterns}.'
self.num_patterns = num_patterns
super().__init__(*args, **kwargs)
def _init_layers(self) -> None:
"""Initialize layers except for backbone, neck and bbox_head."""
self.positional_encoding = SinePositionalEncoding(
**self.positional_encoding)
self.encoder = DABDetrTransformerEncoder(**self.encoder)
self.decoder = DABDetrTransformerDecoder(**self.decoder)
self.embed_dims = self.encoder.embed_dims
self.query_dim = self.decoder.query_dim
self.query_embedding = nn.Embedding(self.num_queries, self.query_dim)
if self.num_patterns > 0:
self.patterns = nn.Embedding(self.num_patterns, self.embed_dims)
num_feats = self.positional_encoding.num_feats
assert num_feats * 2 == self.embed_dims, \
f'embed_dims should be exactly 2 times of num_feats. ' \
f'Found {self.embed_dims} and {num_feats}.'
def init_weights(self) -> None:
"""Initialize weights for Transformer and other components."""
super(DABDETR, self).init_weights()
if self.with_random_refpoints:
uniform_init(self.query_embedding)
self.query_embedding.weight.data[:, :2] = \
inverse_sigmoid(self.query_embedding.weight.data[:, :2])
self.query_embedding.weight.data[:, :2].requires_grad = False
def pre_decoder(self, memory: Tensor) -> Tuple[Dict, Dict]:
"""Prepare intermediate variables before entering Transformer decoder,
such as `query`, `query_pos`.
Args:
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
Returns:
tuple[dict, dict]: The first dict contains the inputs of decoder
and the second dict contains the inputs of the bbox_head function.
- decoder_inputs_dict (dict): The keyword args dictionary of
`self.forward_decoder()`, which includes 'query', 'query_pos',
'memory' and 'reg_branches'.
- head_inputs_dict (dict): The keyword args dictionary of the
bbox_head functions, which is usually empty, or includes
`enc_outputs_class` and `enc_outputs_class` when the detector
support 'two stage' or 'query selection' strategies.
"""
batch_size = memory.size(0)
query_pos = self.query_embedding.weight
query_pos = query_pos.unsqueeze(0).repeat(batch_size, 1, 1)
if self.num_patterns == 0:
query = query_pos.new_zeros(batch_size, self.num_queries,
self.embed_dims)
else:
query = self.patterns.weight[:, None, None, :]\
.repeat(1, self.num_queries, batch_size, 1)\
.view(-1, batch_size, self.embed_dims)\
.permute(1, 0, 2)
query_pos = query_pos.repeat(1, self.num_patterns, 1)
decoder_inputs_dict = dict(
query_pos=query_pos, query=query, memory=memory)
head_inputs_dict = dict()
return decoder_inputs_dict, head_inputs_dict
def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor,
memory_mask: Tensor, memory_pos: Tensor) -> Dict:
"""Forward with Transformer decoder.
Args:
query (Tensor): The queries of decoder inputs, has shape
(bs, num_queries, dim).
query_pos (Tensor): The positional queries of decoder inputs,
has shape (bs, num_queries, dim).
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
memory_mask (Tensor): ByteTensor, the padding mask of the memory,
has shape (bs, num_feat_points).
memory_pos (Tensor): The positional embeddings of memory, has
shape (bs, num_feat_points, dim).
Returns:
dict: The dictionary of decoder outputs, which includes the
`hidden_states` and `references` of the decoder output.
"""
hidden_states, references = self.decoder(
query=query,
key=memory,
query_pos=query_pos,
key_pos=memory_pos,
key_padding_mask=memory_mask,
reg_branches=self.bbox_head.
fc_reg # iterative refinement for anchor boxes
)
head_inputs_dict = dict(
hidden_states=hidden_states, references=references)
return head_inputs_dict
| 5,907 | 41.2 | 79 | py |
ERD | ERD-main/mmdet/models/detectors/lad.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
import torch.nn as nn
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType
from ..utils.misc import unpack_gt_instances
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
@MODELS.register_module()
class LAD(KnowledgeDistillationSingleStageDetector):
"""Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_."""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
teacher_backbone: ConfigType,
teacher_neck: ConfigType,
teacher_bbox_head: ConfigType,
teacher_ckpt: Optional[str] = None,
eval_teacher: bool = True,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None) -> None:
super(KnowledgeDistillationSingleStageDetector, self).__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor)
self.eval_teacher = eval_teacher
self.teacher_model = nn.Module()
self.teacher_model.backbone = MODELS.build(teacher_backbone)
if teacher_neck is not None:
self.teacher_model.neck = MODELS.build(teacher_neck)
teacher_bbox_head.update(train_cfg=train_cfg)
teacher_bbox_head.update(test_cfg=test_cfg)
self.teacher_model.bbox_head = MODELS.build(teacher_bbox_head)
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
@property
def with_teacher_neck(self) -> bool:
"""bool: whether the detector has a teacher_neck"""
return hasattr(self.teacher_model, 'neck') and \
self.teacher_model.neck is not None
def extract_teacher_feat(self, batch_inputs: Tensor) -> Tensor:
"""Directly extract teacher features from the backbone+neck."""
x = self.teacher_model.backbone(batch_inputs)
if self.with_teacher_neck:
x = self.teacher_model.neck(x)
return x
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \
= outputs
# get label assignment from the teacher
with torch.no_grad():
x_teacher = self.extract_teacher_feat(batch_inputs)
outs_teacher = self.teacher_model.bbox_head(x_teacher)
label_assignment_results = \
self.teacher_model.bbox_head.get_label_assignment(
*outs_teacher, batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
# the student use the label assignment from the teacher to learn
x = self.extract_feat(batch_inputs)
losses = self.bbox_head.loss(x, label_assignment_results,
batch_data_samples)
return losses
| 3,880 | 40.287234 | 74 | py |
ERD | ERD-main/mmdet/models/seg_heads/base_semantic_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, List, Tuple, Union
import torch.nn.functional as F
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptMultiConfig
@MODELS.register_module()
class BaseSemanticHead(BaseModule, metaclass=ABCMeta):
"""Base module of Semantic Head.
Args:
num_classes (int): the number of classes.
seg_rescale_factor (float): the rescale factor for ``gt_sem_seg``,
which equals to ``1 / output_strides``. The output_strides is
for ``seg_preds``. Defaults to 1 / 4.
init_cfg (Optional[Union[:obj:`ConfigDict`, dict]]): the initialization
config.
loss_seg (Union[:obj:`ConfigDict`, dict]): the loss of the semantic
head.
"""
def __init__(self,
num_classes: int,
seg_rescale_factor: float = 1 / 4.,
loss_seg: ConfigType = dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=1.0),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.loss_seg = MODELS.build(loss_seg)
self.num_classes = num_classes
self.seg_rescale_factor = seg_rescale_factor
@abstractmethod
def forward(self, x: Union[Tensor, Tuple[Tensor]]) -> Dict[str, Tensor]:
"""Placeholder of forward function.
Args:
x (Tensor): Feature maps.
Returns:
Dict[str, Tensor]: A dictionary, including features
and predicted scores. Required keys: 'seg_preds'
and 'feats'.
"""
pass
@abstractmethod
def loss(self, x: Union[Tensor, Tuple[Tensor]],
batch_data_samples: SampleList) -> Dict[str, Tensor]:
"""
Args:
x (Union[Tensor, Tuple[Tensor]]): Feature maps.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Args:
x (Tensor): Feature maps.
Returns:
Dict[str, Tensor]: The loss of semantic head.
"""
pass
def predict(self,
x: Union[Tensor, Tuple[Tensor]],
batch_img_metas: List[dict],
rescale: bool = False) -> List[Tensor]:
"""Test without Augmentation.
Args:
x (Union[Tensor, Tuple[Tensor]]): Feature maps.
batch_img_metas (List[dict]): List of image information.
rescale (bool): Whether to rescale the results.
Defaults to False.
Returns:
list[Tensor]: semantic segmentation logits.
"""
seg_preds = self.forward(x)['seg_preds']
seg_preds = F.interpolate(
seg_preds,
size=batch_img_metas[0]['batch_input_shape'],
mode='bilinear',
align_corners=False)
seg_preds = [seg_preds[i] for i in range(len(batch_img_metas))]
if rescale:
seg_pred_list = []
for i in range(len(batch_img_metas)):
h, w = batch_img_metas[i]['img_shape']
seg_pred = seg_preds[i][:, :h, :w]
h, w = batch_img_metas[i]['ori_shape']
seg_pred = F.interpolate(
seg_pred[None],
size=(h, w),
mode='bilinear',
align_corners=False)[0]
seg_pred_list.append(seg_pred)
else:
seg_pred_list = seg_preds
return seg_pred_list
| 3,866 | 32.921053 | 79 | py |
ERD | ERD-main/mmdet/models/seg_heads/panoptic_fpn_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmengine.model import ModuleList
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from ..layers import ConvUpsample
from ..utils import interpolate_as
from .base_semantic_head import BaseSemanticHead
@MODELS.register_module()
class PanopticFPNHead(BaseSemanticHead):
"""PanopticFPNHead used in Panoptic FPN.
In this head, the number of output channels is ``num_stuff_classes
+ 1``, including all stuff classes and one thing class. The stuff
classes will be reset from ``0`` to ``num_stuff_classes - 1``, the
thing classes will be merged to ``num_stuff_classes``-th channel.
Arg:
num_things_classes (int): Number of thing classes. Default: 80.
num_stuff_classes (int): Number of stuff classes. Default: 53.
in_channels (int): Number of channels in the input feature
map.
inner_channels (int): Number of channels in inner features.
start_level (int): The start level of the input features
used in PanopticFPN.
end_level (int): The end level of the used features, the
``end_level``-th layer will not be used.
conv_cfg (Optional[Union[ConfigDict, dict]]): Dictionary to construct
and config conv layer.
norm_cfg (Union[ConfigDict, dict]): Dictionary to construct and config
norm layer. Use ``GN`` by default.
init_cfg (Optional[Union[ConfigDict, dict]]): Initialization config
dict.
loss_seg (Union[ConfigDict, dict]): the loss of the semantic head.
"""
def __init__(self,
num_things_classes: int = 80,
num_stuff_classes: int = 53,
in_channels: int = 256,
inner_channels: int = 128,
start_level: int = 0,
end_level: int = 4,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(
type='GN', num_groups=32, requires_grad=True),
loss_seg: ConfigType = dict(
type='CrossEntropyLoss', ignore_index=-1,
loss_weight=1.0),
init_cfg: OptMultiConfig = None) -> None:
seg_rescale_factor = 1 / 2**(start_level + 2)
super().__init__(
num_classes=num_stuff_classes + 1,
seg_rescale_factor=seg_rescale_factor,
loss_seg=loss_seg,
init_cfg=init_cfg)
self.num_things_classes = num_things_classes
self.num_stuff_classes = num_stuff_classes
# Used feature layers are [start_level, end_level)
self.start_level = start_level
self.end_level = end_level
self.num_stages = end_level - start_level
self.inner_channels = inner_channels
self.conv_upsample_layers = ModuleList()
for i in range(start_level, end_level):
self.conv_upsample_layers.append(
ConvUpsample(
in_channels,
inner_channels,
num_layers=i if i > 0 else 1,
num_upsample=i if i > 0 else 0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
))
self.conv_logits = nn.Conv2d(inner_channels, self.num_classes, 1)
def _set_things_to_void(self, gt_semantic_seg: Tensor) -> Tensor:
"""Merge thing classes to one class.
In PanopticFPN, the background labels will be reset from `0` to
`self.num_stuff_classes-1`, the foreground labels will be merged to
`self.num_stuff_classes`-th channel.
"""
gt_semantic_seg = gt_semantic_seg.int()
fg_mask = gt_semantic_seg < self.num_things_classes
bg_mask = (gt_semantic_seg >= self.num_things_classes) * (
gt_semantic_seg < self.num_things_classes + self.num_stuff_classes)
new_gt_seg = torch.clone(gt_semantic_seg)
new_gt_seg = torch.where(bg_mask,
gt_semantic_seg - self.num_things_classes,
new_gt_seg)
new_gt_seg = torch.where(fg_mask,
fg_mask.int() * self.num_stuff_classes,
new_gt_seg)
return new_gt_seg
def loss(self, x: Union[Tensor, Tuple[Tensor]],
batch_data_samples: SampleList) -> Dict[str, Tensor]:
"""
Args:
x (Union[Tensor, Tuple[Tensor]]): Feature maps.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
Dict[str, Tensor]: The loss of semantic head.
"""
seg_preds = self(x)['seg_preds']
gt_semantic_segs = [
data_sample.gt_sem_seg.sem_seg
for data_sample in batch_data_samples
]
gt_semantic_segs = torch.stack(gt_semantic_segs)
if self.seg_rescale_factor != 1.0:
gt_semantic_segs = F.interpolate(
gt_semantic_segs.float(),
scale_factor=self.seg_rescale_factor,
mode='nearest').squeeze(1)
# Things classes will be merged to one class in PanopticFPN.
gt_semantic_segs = self._set_things_to_void(gt_semantic_segs)
if seg_preds.shape[-2:] != gt_semantic_segs.shape[-2:]:
seg_preds = interpolate_as(seg_preds, gt_semantic_segs)
seg_preds = seg_preds.permute((0, 2, 3, 1))
loss_seg = self.loss_seg(
seg_preds.reshape(-1, self.num_classes), # => [NxHxW, C]
gt_semantic_segs.reshape(-1).long())
return dict(loss_seg=loss_seg)
def init_weights(self) -> None:
"""Initialize weights."""
super().init_weights()
nn.init.normal_(self.conv_logits.weight.data, 0, 0.01)
self.conv_logits.bias.data.zero_()
def forward(self, x: Tuple[Tensor]) -> Dict[str, Tensor]:
"""Forward.
Args:
x (Tuple[Tensor]): Multi scale Feature maps.
Returns:
dict[str, Tensor]: semantic segmentation predictions and
feature maps.
"""
# the number of subnets must be not more than
# the length of features.
assert self.num_stages <= len(x)
feats = []
for i, layer in enumerate(self.conv_upsample_layers):
f = layer(x[self.start_level + i])
feats.append(f)
seg_feats = torch.sum(torch.stack(feats, dim=0), dim=0)
seg_preds = self.conv_logits(seg_feats)
out = dict(seg_preds=seg_preds, seg_feats=seg_feats)
return out
| 6,975 | 38.862857 | 79 | py |
ERD | ERD-main/mmdet/models/seg_heads/panoptic_fusion_heads/maskformer_fusion_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List
import torch
import torch.nn.functional as F
from mmengine.structures import InstanceData, PixelData
from torch import Tensor
from mmdet.evaluation.functional import INSTANCE_OFFSET
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.structures.mask import mask2bbox
from mmdet.utils import OptConfigType, OptMultiConfig
from .base_panoptic_fusion_head import BasePanopticFusionHead
@MODELS.register_module()
class MaskFormerFusionHead(BasePanopticFusionHead):
"""MaskFormer fusion head which postprocesses results for panoptic
segmentation, instance segmentation and semantic segmentation."""
def __init__(self,
num_things_classes: int = 80,
num_stuff_classes: int = 53,
test_cfg: OptConfigType = None,
loss_panoptic: OptConfigType = None,
init_cfg: OptMultiConfig = None,
**kwargs):
super().__init__(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
test_cfg=test_cfg,
loss_panoptic=loss_panoptic,
init_cfg=init_cfg,
**kwargs)
def loss(self, **kwargs):
"""MaskFormerFusionHead has no training loss."""
return dict()
def panoptic_postprocess(self, mask_cls: Tensor,
mask_pred: Tensor) -> PixelData:
"""Panoptic segmengation inference.
Args:
mask_cls (Tensor): Classfication outputs of shape
(num_queries, cls_out_channels) for a image.
Note `cls_out_channels` should includes
background.
mask_pred (Tensor): Mask outputs of shape
(num_queries, h, w) for a image.
Returns:
:obj:`PixelData`: Panoptic segment result of shape \
(h, w), each element in Tensor means: \
``segment_id = _cls + instance_id * INSTANCE_OFFSET``.
"""
object_mask_thr = self.test_cfg.get('object_mask_thr', 0.8)
iou_thr = self.test_cfg.get('iou_thr', 0.8)
filter_low_score = self.test_cfg.get('filter_low_score', False)
scores, labels = F.softmax(mask_cls, dim=-1).max(-1)
mask_pred = mask_pred.sigmoid()
keep = labels.ne(self.num_classes) & (scores > object_mask_thr)
cur_scores = scores[keep]
cur_classes = labels[keep]
cur_masks = mask_pred[keep]
cur_prob_masks = cur_scores.view(-1, 1, 1) * cur_masks
h, w = cur_masks.shape[-2:]
panoptic_seg = torch.full((h, w),
self.num_classes,
dtype=torch.int32,
device=cur_masks.device)
if cur_masks.shape[0] == 0:
# We didn't detect any mask :(
pass
else:
cur_mask_ids = cur_prob_masks.argmax(0)
instance_id = 1
for k in range(cur_classes.shape[0]):
pred_class = int(cur_classes[k].item())
isthing = pred_class < self.num_things_classes
mask = cur_mask_ids == k
mask_area = mask.sum().item()
original_area = (cur_masks[k] >= 0.5).sum().item()
if filter_low_score:
mask = mask & (cur_masks[k] >= 0.5)
if mask_area > 0 and original_area > 0:
if mask_area / original_area < iou_thr:
continue
if not isthing:
# different stuff regions of same class will be
# merged here, and stuff share the instance_id 0.
panoptic_seg[mask] = pred_class
else:
panoptic_seg[mask] = (
pred_class + instance_id * INSTANCE_OFFSET)
instance_id += 1
return PixelData(sem_seg=panoptic_seg[None])
def semantic_postprocess(self, mask_cls: Tensor,
mask_pred: Tensor) -> PixelData:
"""Semantic segmengation postprocess.
Args:
mask_cls (Tensor): Classfication outputs of shape
(num_queries, cls_out_channels) for a image.
Note `cls_out_channels` should includes
background.
mask_pred (Tensor): Mask outputs of shape
(num_queries, h, w) for a image.
Returns:
:obj:`PixelData`: Semantic segment result.
"""
# TODO add semantic segmentation result
raise NotImplementedError
def instance_postprocess(self, mask_cls: Tensor,
mask_pred: Tensor) -> InstanceData:
"""Instance segmengation postprocess.
Args:
mask_cls (Tensor): Classfication outputs of shape
(num_queries, cls_out_channels) for a image.
Note `cls_out_channels` should includes
background.
mask_pred (Tensor): Mask outputs of shape
(num_queries, h, w) for a image.
Returns:
:obj:`InstanceData`: Instance segmentation results.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
max_per_image = self.test_cfg.get('max_per_image', 100)
num_queries = mask_cls.shape[0]
# shape (num_queries, num_class)
scores = F.softmax(mask_cls, dim=-1)[:, :-1]
# shape (num_queries * num_class, )
labels = torch.arange(self.num_classes, device=mask_cls.device).\
unsqueeze(0).repeat(num_queries, 1).flatten(0, 1)
scores_per_image, top_indices = scores.flatten(0, 1).topk(
max_per_image, sorted=False)
labels_per_image = labels[top_indices]
query_indices = top_indices // self.num_classes
mask_pred = mask_pred[query_indices]
# extract things
is_thing = labels_per_image < self.num_things_classes
scores_per_image = scores_per_image[is_thing]
labels_per_image = labels_per_image[is_thing]
mask_pred = mask_pred[is_thing]
mask_pred_binary = (mask_pred > 0).float()
mask_scores_per_image = (mask_pred.sigmoid() *
mask_pred_binary).flatten(1).sum(1) / (
mask_pred_binary.flatten(1).sum(1) + 1e-6)
det_scores = scores_per_image * mask_scores_per_image
mask_pred_binary = mask_pred_binary.bool()
bboxes = mask2bbox(mask_pred_binary)
results = InstanceData()
results.bboxes = bboxes
results.labels = labels_per_image
results.scores = det_scores
results.masks = mask_pred_binary
return results
def predict(self,
mask_cls_results: Tensor,
mask_pred_results: Tensor,
batch_data_samples: SampleList,
rescale: bool = False,
**kwargs) -> List[dict]:
"""Test segment without test-time aumengtation.
Only the output of last decoder layers was used.
Args:
mask_cls_results (Tensor): Mask classification logits,
shape (batch_size, num_queries, cls_out_channels).
Note `cls_out_channels` should includes background.
mask_pred_results (Tensor): Mask logits, shape
(batch_size, num_queries, h, w).
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): If True, return boxes in
original image space. Default False.
Returns:
list[dict]: Instance segmentation \
results and panoptic segmentation results for each \
image.
.. code-block:: none
[
{
'pan_results': PixelData,
'ins_results': InstanceData,
# semantic segmentation results are not supported yet
'sem_results': PixelData
},
...
]
"""
batch_img_metas = [
data_sample.metainfo for data_sample in batch_data_samples
]
panoptic_on = self.test_cfg.get('panoptic_on', True)
semantic_on = self.test_cfg.get('semantic_on', False)
instance_on = self.test_cfg.get('instance_on', False)
assert not semantic_on, 'segmantic segmentation '\
'results are not supported yet.'
results = []
for mask_cls_result, mask_pred_result, meta in zip(
mask_cls_results, mask_pred_results, batch_img_metas):
# remove padding
img_height, img_width = meta['img_shape'][:2]
mask_pred_result = mask_pred_result[:, :img_height, :img_width]
if rescale:
# return result in original resolution
ori_height, ori_width = meta['ori_shape'][:2]
mask_pred_result = F.interpolate(
mask_pred_result[:, None],
size=(ori_height, ori_width),
mode='bilinear',
align_corners=False)[:, 0]
result = dict()
if panoptic_on:
pan_results = self.panoptic_postprocess(
mask_cls_result, mask_pred_result)
result['pan_results'] = pan_results
if instance_on:
ins_results = self.instance_postprocess(
mask_cls_result, mask_pred_result)
result['ins_results'] = ins_results
if semantic_on:
sem_results = self.semantic_postprocess(
mask_cls_result, mask_pred_result)
result['sem_results'] = sem_results
results.append(result)
return results
| 10,522 | 38.411985 | 79 | py |
ERD | ERD-main/mmdet/models/seg_heads/panoptic_fusion_heads/heuristic_fusion_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List
import torch
from mmengine.structures import InstanceData, PixelData
from torch import Tensor
from mmdet.evaluation.functional import INSTANCE_OFFSET
from mmdet.registry import MODELS
from mmdet.utils import InstanceList, OptConfigType, OptMultiConfig, PixelList
from .base_panoptic_fusion_head import BasePanopticFusionHead
@MODELS.register_module()
class HeuristicFusionHead(BasePanopticFusionHead):
"""Fusion Head with Heuristic method."""
def __init__(self,
num_things_classes: int = 80,
num_stuff_classes: int = 53,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None,
**kwargs) -> None:
super().__init__(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
test_cfg=test_cfg,
loss_panoptic=None,
init_cfg=init_cfg,
**kwargs)
def loss(self, **kwargs) -> dict:
"""HeuristicFusionHead has no training loss."""
return dict()
def _lay_masks(self,
mask_results: InstanceData,
overlap_thr: float = 0.5) -> Tensor:
"""Lay instance masks to a result map.
Args:
mask_results (:obj:`InstanceData`): Instance segmentation results,
each contains ``bboxes``, ``labels``, ``scores`` and ``masks``.
overlap_thr (float): Threshold to determine whether two masks
overlap. default: 0.5.
Returns:
Tensor: The result map, (H, W).
"""
bboxes = mask_results.bboxes
scores = mask_results.scores
labels = mask_results.labels
masks = mask_results.masks
num_insts = bboxes.shape[0]
id_map = torch.zeros(
masks.shape[-2:], device=bboxes.device, dtype=torch.long)
if num_insts == 0:
return id_map, labels
# Sort by score to use heuristic fusion
order = torch.argsort(-scores)
bboxes = bboxes[order]
labels = labels[order]
segm_masks = masks[order]
instance_id = 1
left_labels = []
for idx in range(bboxes.shape[0]):
_cls = labels[idx]
_mask = segm_masks[idx]
instance_id_map = torch.ones_like(
_mask, dtype=torch.long) * instance_id
area = _mask.sum()
if area == 0:
continue
pasted = id_map > 0
intersect = (_mask * pasted).sum()
if (intersect / (area + 1e-5)) > overlap_thr:
continue
_part = _mask * (~pasted)
id_map = torch.where(_part, instance_id_map, id_map)
left_labels.append(_cls)
instance_id += 1
if len(left_labels) > 0:
instance_labels = torch.stack(left_labels)
else:
instance_labels = bboxes.new_zeros((0, ), dtype=torch.long)
assert instance_id == (len(instance_labels) + 1)
return id_map, instance_labels
def _predict_single(self, mask_results: InstanceData, seg_preds: Tensor,
**kwargs) -> PixelData:
"""Fuse the results of instance and semantic segmentations.
Args:
mask_results (:obj:`InstanceData`): Instance segmentation results,
each contains ``bboxes``, ``labels``, ``scores`` and ``masks``.
seg_preds (Tensor): The semantic segmentation results,
(num_stuff + 1, H, W).
Returns:
Tensor: The panoptic segmentation result, (H, W).
"""
id_map, labels = self._lay_masks(mask_results,
self.test_cfg.mask_overlap)
seg_results = seg_preds.argmax(dim=0)
seg_results = seg_results + self.num_things_classes
pan_results = seg_results
instance_id = 1
for idx in range(len(mask_results)):
_mask = id_map == (idx + 1)
if _mask.sum() == 0:
continue
_cls = labels[idx]
# simply trust detection
segment_id = _cls + instance_id * INSTANCE_OFFSET
pan_results[_mask] = segment_id
instance_id += 1
ids, counts = torch.unique(
pan_results % INSTANCE_OFFSET, return_counts=True)
stuff_ids = ids[ids >= self.num_things_classes]
stuff_counts = counts[ids >= self.num_things_classes]
ignore_stuff_ids = stuff_ids[
stuff_counts < self.test_cfg.stuff_area_limit]
assert pan_results.ndim == 2
pan_results[(pan_results.unsqueeze(2) == ignore_stuff_ids.reshape(
1, 1, -1)).any(dim=2)] = self.num_classes
pan_results = PixelData(sem_seg=pan_results[None].int())
return pan_results
def predict(self, mask_results_list: InstanceList,
seg_preds_list: List[Tensor], **kwargs) -> PixelList:
"""Predict results by fusing the results of instance and semantic
segmentations.
Args:
mask_results_list (list[:obj:`InstanceData`]): Instance
segmentation results, each contains ``bboxes``, ``labels``,
``scores`` and ``masks``.
seg_preds_list (Tensor): List of semantic segmentation results.
Returns:
List[PixelData]: Panoptic segmentation result.
"""
results_list = [
self._predict_single(mask_results_list[i], seg_preds_list[i])
for i in range(len(mask_results_list))
]
return results_list
| 5,702 | 34.64375 | 79 | py |
ERD | ERD-main/mmdet/models/necks/yolox_pafpn.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmengine.model import BaseModule
from mmdet.registry import MODELS
from ..layers import CSPLayer
@MODELS.register_module()
class YOLOXPAFPN(BaseModule):
"""Path Aggregation Network used in YOLOX.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
num_csp_blocks (int): Number of bottlenecks in CSPLayer. Default: 3
use_depthwise (bool): Whether to depthwise separable convolution in
blocks. Default: False
upsample_cfg (dict): Config dict for interpolate layer.
Default: `dict(scale_factor=2, mode='nearest')`
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN')
act_cfg (dict): Config dict for activation layer.
Default: dict(type='Swish')
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
"""
def __init__(self,
in_channels,
out_channels,
num_csp_blocks=3,
use_depthwise=False,
upsample_cfg=dict(scale_factor=2, mode='nearest'),
conv_cfg=None,
norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
act_cfg=dict(type='Swish'),
init_cfg=dict(
type='Kaiming',
layer='Conv2d',
a=math.sqrt(5),
distribution='uniform',
mode='fan_in',
nonlinearity='leaky_relu')):
super(YOLOXPAFPN, self).__init__(init_cfg)
self.in_channels = in_channels
self.out_channels = out_channels
conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule
# build top-down blocks
self.upsample = nn.Upsample(**upsample_cfg)
self.reduce_layers = nn.ModuleList()
self.top_down_blocks = nn.ModuleList()
for idx in range(len(in_channels) - 1, 0, -1):
self.reduce_layers.append(
ConvModule(
in_channels[idx],
in_channels[idx - 1],
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.top_down_blocks.append(
CSPLayer(
in_channels[idx - 1] * 2,
in_channels[idx - 1],
num_blocks=num_csp_blocks,
add_identity=False,
use_depthwise=use_depthwise,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
# build bottom-up blocks
self.downsamples = nn.ModuleList()
self.bottom_up_blocks = nn.ModuleList()
for idx in range(len(in_channels) - 1):
self.downsamples.append(
conv(
in_channels[idx],
in_channels[idx],
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.bottom_up_blocks.append(
CSPLayer(
in_channels[idx] * 2,
in_channels[idx + 1],
num_blocks=num_csp_blocks,
add_identity=False,
use_depthwise=use_depthwise,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.out_convs = nn.ModuleList()
for i in range(len(in_channels)):
self.out_convs.append(
ConvModule(
in_channels[i],
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def forward(self, inputs):
"""
Args:
inputs (tuple[Tensor]): input features.
Returns:
tuple[Tensor]: YOLOXPAFPN features.
"""
assert len(inputs) == len(self.in_channels)
# top-down path
inner_outs = [inputs[-1]]
for idx in range(len(self.in_channels) - 1, 0, -1):
feat_heigh = inner_outs[0]
feat_low = inputs[idx - 1]
feat_heigh = self.reduce_layers[len(self.in_channels) - 1 - idx](
feat_heigh)
inner_outs[0] = feat_heigh
upsample_feat = self.upsample(feat_heigh)
inner_out = self.top_down_blocks[len(self.in_channels) - 1 - idx](
torch.cat([upsample_feat, feat_low], 1))
inner_outs.insert(0, inner_out)
# bottom-up path
outs = [inner_outs[0]]
for idx in range(len(self.in_channels) - 1):
feat_low = outs[-1]
feat_height = inner_outs[idx + 1]
downsample_feat = self.downsamples[idx](feat_low)
out = self.bottom_up_blocks[idx](
torch.cat([downsample_feat, feat_height], 1))
outs.append(out)
# out convs
for idx, conv in enumerate(self.out_convs):
outs[idx] = conv(outs[idx])
return tuple(outs)
| 5,658 | 35.044586 | 78 | py |
ERD | ERD-main/mmdet/models/necks/ssd_neck.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmengine.model import BaseModule
from mmdet.registry import MODELS
@MODELS.register_module()
class SSDNeck(BaseModule):
"""Extra layers of SSD backbone to generate multi-scale feature maps.
Args:
in_channels (Sequence[int]): Number of input channels per scale.
out_channels (Sequence[int]): Number of output channels per scale.
level_strides (Sequence[int]): Stride of 3x3 conv per level.
level_paddings (Sequence[int]): Padding size of 3x3 conv per level.
l2_norm_scale (float|None): L2 normalization layer init scale.
If None, not use L2 normalization on the first input feature.
last_kernel_size (int): Kernel size of the last conv layer.
Default: 3.
use_depthwise (bool): Whether to use DepthwiseSeparableConv.
Default: False.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: None.
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
level_strides,
level_paddings,
l2_norm_scale=20.,
last_kernel_size=3,
use_depthwise=False,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
init_cfg=[
dict(
type='Xavier', distribution='uniform',
layer='Conv2d'),
dict(type='Constant', val=1, layer='BatchNorm2d'),
]):
super(SSDNeck, self).__init__(init_cfg)
assert len(out_channels) > len(in_channels)
assert len(out_channels) - len(in_channels) == len(level_strides)
assert len(level_strides) == len(level_paddings)
assert in_channels == out_channels[:len(in_channels)]
if l2_norm_scale:
self.l2_norm = L2Norm(in_channels[0], l2_norm_scale)
self.init_cfg += [
dict(
type='Constant',
val=self.l2_norm.scale,
override=dict(name='l2_norm'))
]
self.extra_layers = nn.ModuleList()
extra_layer_channels = out_channels[len(in_channels):]
second_conv = DepthwiseSeparableConvModule if \
use_depthwise else ConvModule
for i, (out_channel, stride, padding) in enumerate(
zip(extra_layer_channels, level_strides, level_paddings)):
kernel_size = last_kernel_size \
if i == len(extra_layer_channels) - 1 else 3
per_lvl_convs = nn.Sequential(
ConvModule(
out_channels[len(in_channels) - 1 + i],
out_channel // 2,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
second_conv(
out_channel // 2,
out_channel,
kernel_size,
stride=stride,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.extra_layers.append(per_lvl_convs)
def forward(self, inputs):
"""Forward function."""
outs = [feat for feat in inputs]
if hasattr(self, 'l2_norm'):
outs[0] = self.l2_norm(outs[0])
feat = outs[-1]
for layer in self.extra_layers:
feat = layer(feat)
outs.append(feat)
return tuple(outs)
class L2Norm(nn.Module):
def __init__(self, n_dims, scale=20., eps=1e-10):
"""L2 normalization layer.
Args:
n_dims (int): Number of dimensions to be normalized
scale (float, optional): Defaults to 20..
eps (float, optional): Used to avoid division by zero.
Defaults to 1e-10.
"""
super(L2Norm, self).__init__()
self.n_dims = n_dims
self.weight = nn.Parameter(torch.Tensor(self.n_dims))
self.eps = eps
self.scale = scale
def forward(self, x):
"""Forward function."""
# normalization layer convert to FP32 in FP16 training
x_float = x.float()
norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps
return (self.weight[None, :, None, None].float().expand_as(x_float) *
x_float / norm).type_as(x)
| 4,901 | 36.707692 | 77 | py |
ERD | ERD-main/mmdet/models/necks/rfp.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmengine.model import BaseModule, ModuleList, constant_init, xavier_init
from mmdet.registry import MODELS
from .fpn import FPN
class ASPP(BaseModule):
"""ASPP (Atrous Spatial Pyramid Pooling)
This is an implementation of the ASPP module used in DetectoRS
(https://arxiv.org/pdf/2006.02334.pdf)
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of channels produced by this module
dilations (tuple[int]): Dilations of the four branches.
Default: (1, 3, 6, 1)
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
dilations=(1, 3, 6, 1),
init_cfg=dict(type='Kaiming', layer='Conv2d')):
super().__init__(init_cfg)
assert dilations[-1] == 1
self.aspp = nn.ModuleList()
for dilation in dilations:
kernel_size = 3 if dilation > 1 else 1
padding = dilation if dilation > 1 else 0
conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
dilation=dilation,
padding=padding,
bias=True)
self.aspp.append(conv)
self.gap = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
avg_x = self.gap(x)
out = []
for aspp_idx in range(len(self.aspp)):
inp = avg_x if (aspp_idx == len(self.aspp) - 1) else x
out.append(F.relu_(self.aspp[aspp_idx](inp)))
out[-1] = out[-1].expand_as(out[-2])
out = torch.cat(out, dim=1)
return out
@MODELS.register_module()
class RFP(FPN):
"""RFP (Recursive Feature Pyramid)
This is an implementation of RFP in `DetectoRS
<https://arxiv.org/pdf/2006.02334.pdf>`_. Different from standard FPN, the
input of RFP should be multi level features along with origin input image
of backbone.
Args:
rfp_steps (int): Number of unrolled steps of RFP.
rfp_backbone (dict): Configuration of the backbone for RFP.
aspp_out_channels (int): Number of output channels of ASPP module.
aspp_dilations (tuple[int]): Dilation rates of four branches.
Default: (1, 3, 6, 1)
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
rfp_steps,
rfp_backbone,
aspp_out_channels,
aspp_dilations=(1, 3, 6, 1),
init_cfg=None,
**kwargs):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super().__init__(init_cfg=init_cfg, **kwargs)
self.rfp_steps = rfp_steps
# Be careful! Pretrained weights cannot be loaded when use
# nn.ModuleList
self.rfp_modules = ModuleList()
for rfp_idx in range(1, rfp_steps):
rfp_module = MODELS.build(rfp_backbone)
self.rfp_modules.append(rfp_module)
self.rfp_aspp = ASPP(self.out_channels, aspp_out_channels,
aspp_dilations)
self.rfp_weight = nn.Conv2d(
self.out_channels,
1,
kernel_size=1,
stride=1,
padding=0,
bias=True)
def init_weights(self):
# Avoid using super().init_weights(), which may alter the default
# initialization of the modules in self.rfp_modules that have missing
# keys in the pretrained checkpoint.
for convs in [self.lateral_convs, self.fpn_convs]:
for m in convs.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
for rfp_idx in range(self.rfp_steps - 1):
self.rfp_modules[rfp_idx].init_weights()
constant_init(self.rfp_weight, 0)
def forward(self, inputs):
inputs = list(inputs)
assert len(inputs) == len(self.in_channels) + 1 # +1 for input image
img = inputs.pop(0)
# FPN forward
x = super().forward(tuple(inputs))
for rfp_idx in range(self.rfp_steps - 1):
rfp_feats = [x[0]] + list(
self.rfp_aspp(x[i]) for i in range(1, len(x)))
x_idx = self.rfp_modules[rfp_idx].rfp_forward(img, rfp_feats)
# FPN forward
x_idx = super().forward(x_idx)
x_new = []
for ft_idx in range(len(x_idx)):
add_weight = torch.sigmoid(self.rfp_weight(x_idx[ft_idx]))
x_new.append(add_weight * x_idx[ft_idx] +
(1 - add_weight) * x[ft_idx])
x = x_new
return x
| 5,024 | 36.222222 | 78 | py |
ERD | ERD-main/mmdet/models/necks/dilated_encoder.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule, is_norm
from mmengine.model import caffe2_xavier_init, constant_init, normal_init
from torch.nn import BatchNorm2d
from mmdet.registry import MODELS
class Bottleneck(nn.Module):
"""Bottleneck block for DilatedEncoder used in `YOLOF.
<https://arxiv.org/abs/2103.09460>`.
The Bottleneck contains three ConvLayers and one residual connection.
Args:
in_channels (int): The number of input channels.
mid_channels (int): The number of middle output channels.
dilation (int): Dilation rate.
norm_cfg (dict): Dictionary to construct and config norm layer.
"""
def __init__(self,
in_channels,
mid_channels,
dilation,
norm_cfg=dict(type='BN', requires_grad=True)):
super(Bottleneck, self).__init__()
self.conv1 = ConvModule(
in_channels, mid_channels, 1, norm_cfg=norm_cfg)
self.conv2 = ConvModule(
mid_channels,
mid_channels,
3,
padding=dilation,
dilation=dilation,
norm_cfg=norm_cfg)
self.conv3 = ConvModule(
mid_channels, in_channels, 1, norm_cfg=norm_cfg)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
out = out + identity
return out
@MODELS.register_module()
class DilatedEncoder(nn.Module):
"""Dilated Encoder for YOLOF <https://arxiv.org/abs/2103.09460>`.
This module contains two types of components:
- the original FPN lateral convolution layer and fpn convolution layer,
which are 1x1 conv + 3x3 conv
- the dilated residual block
Args:
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
block_mid_channels (int): The number of middle block output channels
num_residual_blocks (int): The number of residual blocks.
block_dilations (list): The list of residual blocks dilation.
"""
def __init__(self, in_channels, out_channels, block_mid_channels,
num_residual_blocks, block_dilations):
super(DilatedEncoder, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.block_mid_channels = block_mid_channels
self.num_residual_blocks = num_residual_blocks
self.block_dilations = block_dilations
self._init_layers()
def _init_layers(self):
self.lateral_conv = nn.Conv2d(
self.in_channels, self.out_channels, kernel_size=1)
self.lateral_norm = BatchNorm2d(self.out_channels)
self.fpn_conv = nn.Conv2d(
self.out_channels, self.out_channels, kernel_size=3, padding=1)
self.fpn_norm = BatchNorm2d(self.out_channels)
encoder_blocks = []
for i in range(self.num_residual_blocks):
dilation = self.block_dilations[i]
encoder_blocks.append(
Bottleneck(
self.out_channels,
self.block_mid_channels,
dilation=dilation))
self.dilated_encoder_blocks = nn.Sequential(*encoder_blocks)
def init_weights(self):
caffe2_xavier_init(self.lateral_conv)
caffe2_xavier_init(self.fpn_conv)
for m in [self.lateral_norm, self.fpn_norm]:
constant_init(m, 1)
for m in self.dilated_encoder_blocks.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, mean=0, std=0.01)
if is_norm(m):
constant_init(m, 1)
def forward(self, feature):
out = self.lateral_norm(self.lateral_conv(feature[-1]))
out = self.fpn_norm(self.fpn_conv(out))
return self.dilated_encoder_blocks(out),
| 3,967 | 35.072727 | 79 | py |
ERD | ERD-main/mmdet/models/necks/fpg.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from mmdet.registry import MODELS
class Transition(BaseModule):
"""Base class for transition.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
"""
def __init__(self, in_channels, out_channels, init_cfg=None):
super().__init__(init_cfg)
self.in_channels = in_channels
self.out_channels = out_channels
def forward(x):
pass
class UpInterpolationConv(Transition):
"""A transition used for up-sampling.
Up-sample the input by interpolation then refines the feature by
a convolution layer.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
scale_factor (int): Up-sampling factor. Default: 2.
mode (int): Interpolation mode. Default: nearest.
align_corners (bool): Whether align corners when interpolation.
Default: None.
kernel_size (int): Kernel size for the conv. Default: 3.
"""
def __init__(self,
in_channels,
out_channels,
scale_factor=2,
mode='nearest',
align_corners=None,
kernel_size=3,
init_cfg=None,
**kwargs):
super().__init__(in_channels, out_channels, init_cfg)
self.mode = mode
self.scale_factor = scale_factor
self.align_corners = align_corners
self.conv = ConvModule(
in_channels,
out_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
**kwargs)
def forward(self, x):
x = F.interpolate(
x,
scale_factor=self.scale_factor,
mode=self.mode,
align_corners=self.align_corners)
x = self.conv(x)
return x
class LastConv(Transition):
"""A transition used for refining the output of the last stage.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
num_inputs (int): Number of inputs of the FPN features.
kernel_size (int): Kernel size for the conv. Default: 3.
"""
def __init__(self,
in_channels,
out_channels,
num_inputs,
kernel_size=3,
init_cfg=None,
**kwargs):
super().__init__(in_channels, out_channels, init_cfg)
self.num_inputs = num_inputs
self.conv_out = ConvModule(
in_channels,
out_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
**kwargs)
def forward(self, inputs):
assert len(inputs) == self.num_inputs
return self.conv_out(inputs[-1])
@MODELS.register_module()
class FPG(BaseModule):
"""FPG.
Implementation of `Feature Pyramid Grids (FPG)
<https://arxiv.org/abs/2004.03580>`_.
This implementation only gives the basic structure stated in the paper.
But users can implement different type of transitions to fully explore the
the potential power of the structure of FPG.
Args:
in_channels (int): Number of input channels (feature maps of all levels
should have the same channels).
out_channels (int): Number of output channels (used at each scale)
num_outs (int): Number of output scales.
stack_times (int): The number of times the pyramid architecture will
be stacked.
paths (list[str]): Specify the path order of each stack level.
Each element in the list should be either 'bu' (bottom-up) or
'td' (top-down).
inter_channels (int): Number of inter channels.
same_up_trans (dict): Transition that goes down at the same stage.
same_down_trans (dict): Transition that goes up at the same stage.
across_lateral_trans (dict): Across-pathway same-stage
across_down_trans (dict): Across-pathway bottom-up connection.
across_up_trans (dict): Across-pathway top-down connection.
across_skip_trans (dict): Across-pathway skip connection.
output_trans (dict): Transition that trans the output of the
last stage.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Default: 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Default: -1, which means the last level.
add_extra_convs (bool): It decides whether to add conv
layers on top of the original feature maps. Default to False.
If True, its actual mode is specified by `extra_convs_on_inputs`.
norm_cfg (dict): Config dict for normalization layer. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
transition_types = {
'conv': ConvModule,
'interpolation_conv': UpInterpolationConv,
'last_conv': LastConv,
}
def __init__(self,
in_channels,
out_channels,
num_outs,
stack_times,
paths,
inter_channels=None,
same_down_trans=None,
same_up_trans=dict(
type='conv', kernel_size=3, stride=2, padding=1),
across_lateral_trans=dict(type='conv', kernel_size=1),
across_down_trans=dict(type='conv', kernel_size=3),
across_up_trans=None,
across_skip_trans=dict(type='identity'),
output_trans=dict(type='last_conv', kernel_size=3),
start_level=0,
end_level=-1,
add_extra_convs=False,
norm_cfg=None,
skip_inds=None,
init_cfg=[
dict(type='Caffe2Xavier', layer='Conv2d'),
dict(
type='Constant',
layer=[
'_BatchNorm', '_InstanceNorm', 'GroupNorm',
'LayerNorm'
],
val=1.0)
]):
super(FPG, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
if inter_channels is None:
self.inter_channels = [out_channels for _ in range(num_outs)]
elif isinstance(inter_channels, int):
self.inter_channels = [inter_channels for _ in range(num_outs)]
else:
assert isinstance(inter_channels, list)
assert len(inter_channels) == num_outs
self.inter_channels = inter_channels
self.stack_times = stack_times
self.paths = paths
assert isinstance(paths, list) and len(paths) == stack_times
for d in paths:
assert d in ('bu', 'td')
self.same_down_trans = same_down_trans
self.same_up_trans = same_up_trans
self.across_lateral_trans = across_lateral_trans
self.across_down_trans = across_down_trans
self.across_up_trans = across_up_trans
self.output_trans = output_trans
self.across_skip_trans = across_skip_trans
self.with_bias = norm_cfg is None
# skip inds must be specified if across skip trans is not None
if self.across_skip_trans is not None:
skip_inds is not None
self.skip_inds = skip_inds
assert len(self.skip_inds[0]) <= self.stack_times
if end_level == -1 or end_level == self.num_ins - 1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level is not the last level, no extra level is allowed
self.backbone_end_level = end_level + 1
assert end_level < self.num_ins
assert num_outs == end_level - start_level + 1
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
# build lateral 1x1 convs to reduce channels
self.lateral_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = nn.Conv2d(self.in_channels[i],
self.inter_channels[i - self.start_level], 1)
self.lateral_convs.append(l_conv)
extra_levels = num_outs - self.backbone_end_level + self.start_level
self.extra_downsamples = nn.ModuleList()
for i in range(extra_levels):
if self.add_extra_convs:
fpn_idx = self.backbone_end_level - self.start_level + i
extra_conv = nn.Conv2d(
self.inter_channels[fpn_idx - 1],
self.inter_channels[fpn_idx],
3,
stride=2,
padding=1)
self.extra_downsamples.append(extra_conv)
else:
self.extra_downsamples.append(nn.MaxPool2d(1, stride=2))
self.fpn_transitions = nn.ModuleList() # stack times
for s in range(self.stack_times):
stage_trans = nn.ModuleList() # num of feature levels
for i in range(self.num_outs):
# same, across_lateral, across_down, across_up
trans = nn.ModuleDict()
if s in self.skip_inds[i]:
stage_trans.append(trans)
continue
# build same-stage down trans (used in bottom-up paths)
if i == 0 or self.same_up_trans is None:
same_up_trans = None
else:
same_up_trans = self.build_trans(
self.same_up_trans, self.inter_channels[i - 1],
self.inter_channels[i])
trans['same_up'] = same_up_trans
# build same-stage up trans (used in top-down paths)
if i == self.num_outs - 1 or self.same_down_trans is None:
same_down_trans = None
else:
same_down_trans = self.build_trans(
self.same_down_trans, self.inter_channels[i + 1],
self.inter_channels[i])
trans['same_down'] = same_down_trans
# build across lateral trans
across_lateral_trans = self.build_trans(
self.across_lateral_trans, self.inter_channels[i],
self.inter_channels[i])
trans['across_lateral'] = across_lateral_trans
# build across down trans
if i == self.num_outs - 1 or self.across_down_trans is None:
across_down_trans = None
else:
across_down_trans = self.build_trans(
self.across_down_trans, self.inter_channels[i + 1],
self.inter_channels[i])
trans['across_down'] = across_down_trans
# build across up trans
if i == 0 or self.across_up_trans is None:
across_up_trans = None
else:
across_up_trans = self.build_trans(
self.across_up_trans, self.inter_channels[i - 1],
self.inter_channels[i])
trans['across_up'] = across_up_trans
if self.across_skip_trans is None:
across_skip_trans = None
else:
across_skip_trans = self.build_trans(
self.across_skip_trans, self.inter_channels[i - 1],
self.inter_channels[i])
trans['across_skip'] = across_skip_trans
# build across_skip trans
stage_trans.append(trans)
self.fpn_transitions.append(stage_trans)
self.output_transition = nn.ModuleList() # output levels
for i in range(self.num_outs):
trans = self.build_trans(
self.output_trans,
self.inter_channels[i],
self.out_channels,
num_inputs=self.stack_times + 1)
self.output_transition.append(trans)
self.relu = nn.ReLU(inplace=True)
def build_trans(self, cfg, in_channels, out_channels, **extra_args):
cfg_ = cfg.copy()
trans_type = cfg_.pop('type')
trans_cls = self.transition_types[trans_type]
return trans_cls(in_channels, out_channels, **cfg_, **extra_args)
def fuse(self, fuse_dict):
out = None
for item in fuse_dict.values():
if item is not None:
if out is None:
out = item
else:
out = out + item
return out
def forward(self, inputs):
assert len(inputs) == len(self.in_channels)
# build all levels from original feature maps
feats = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
for downsample in self.extra_downsamples:
feats.append(downsample(feats[-1]))
outs = [feats]
for i in range(self.stack_times):
current_outs = outs[-1]
next_outs = []
direction = self.paths[i]
for j in range(self.num_outs):
if i in self.skip_inds[j]:
next_outs.append(outs[-1][j])
continue
# feature level
if direction == 'td':
lvl = self.num_outs - j - 1
else:
lvl = j
# get transitions
if direction == 'td':
same_trans = self.fpn_transitions[i][lvl]['same_down']
else:
same_trans = self.fpn_transitions[i][lvl]['same_up']
across_lateral_trans = self.fpn_transitions[i][lvl][
'across_lateral']
across_down_trans = self.fpn_transitions[i][lvl]['across_down']
across_up_trans = self.fpn_transitions[i][lvl]['across_up']
across_skip_trans = self.fpn_transitions[i][lvl]['across_skip']
# init output
to_fuse = dict(
same=None, lateral=None, across_up=None, across_down=None)
# same downsample/upsample
if same_trans is not None:
to_fuse['same'] = same_trans(next_outs[-1])
# across lateral
if across_lateral_trans is not None:
to_fuse['lateral'] = across_lateral_trans(
current_outs[lvl])
# across downsample
if lvl > 0 and across_up_trans is not None:
to_fuse['across_up'] = across_up_trans(current_outs[lvl -
1])
# across upsample
if (lvl < self.num_outs - 1 and across_down_trans is not None):
to_fuse['across_down'] = across_down_trans(
current_outs[lvl + 1])
if across_skip_trans is not None:
to_fuse['across_skip'] = across_skip_trans(outs[0][lvl])
x = self.fuse(to_fuse)
next_outs.append(x)
if direction == 'td':
outs.append(next_outs[::-1])
else:
outs.append(next_outs)
# output trans
final_outs = []
for i in range(self.num_outs):
lvl_out_list = []
for s in range(len(outs)):
lvl_out_list.append(outs[s][i])
lvl_out = self.output_transition[i](lvl_out_list)
final_outs.append(lvl_out)
return final_outs
| 16,397 | 39.289926 | 79 | py |
ERD | ERD-main/mmdet/models/necks/pafpn.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmdet.registry import MODELS
from .fpn import FPN
@MODELS.register_module()
class PAFPN(FPN):
"""Path Aggregation Network for Instance Segmentation.
This is an implementation of the `PAFPN in Path Aggregation Network
<https://arxiv.org/abs/1803.01534>`_.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
num_outs (int): Number of output scales.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Default: 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Default: -1, which means the last level.
add_extra_convs (bool | str): If bool, it decides whether to add conv
layers on top of the original feature maps. Default to False.
If True, it is equivalent to `add_extra_convs='on_input'`.
If str, it specifies the source feature map of the extra convs.
Only the following options are allowed
- 'on_input': Last feat map of neck inputs (i.e. backbone feature).
- 'on_lateral': Last feature map after lateral convs.
- 'on_output': The last output feature map after fpn convs.
relu_before_extra_convs (bool): Whether to apply relu before the extra
conv. Default: False.
no_norm_on_lateral (bool): Whether to apply norm on lateral.
Default: False.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Config dict for normalization layer. Default: None.
act_cfg (str): Config dict for activation layer in ConvModule.
Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
add_extra_convs=False,
relu_before_extra_convs=False,
no_norm_on_lateral=False,
conv_cfg=None,
norm_cfg=None,
act_cfg=None,
init_cfg=dict(
type='Xavier', layer='Conv2d', distribution='uniform')):
super(PAFPN, self).__init__(
in_channels,
out_channels,
num_outs,
start_level,
end_level,
add_extra_convs,
relu_before_extra_convs,
no_norm_on_lateral,
conv_cfg,
norm_cfg,
act_cfg,
init_cfg=init_cfg)
# add extra bottom up pathway
self.downsample_convs = nn.ModuleList()
self.pafpn_convs = nn.ModuleList()
for i in range(self.start_level + 1, self.backbone_end_level):
d_conv = ConvModule(
out_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
pafpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
self.downsample_convs.append(d_conv)
self.pafpn_convs.append(pafpn_conv)
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] = laterals[i - 1] + F.interpolate(
laterals[i], size=prev_shape, mode='nearest')
# build outputs
# part 1: from original levels
inter_outs = [
self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
]
# part 2: add bottom-up path
for i in range(0, used_backbone_levels - 1):
inter_outs[i + 1] = inter_outs[i + 1] + \
self.downsample_convs[i](inter_outs[i])
outs = []
outs.append(inter_outs[0])
outs.extend([
self.pafpn_convs[i - 1](inter_outs[i])
for i in range(1, used_backbone_levels)
])
# part 3: add extra levels
if self.num_outs > len(outs):
# use max pool to get more levels on top of outputs
# (e.g., Faster R-CNN, Mask R-CNN)
if not self.add_extra_convs:
for i in range(self.num_outs - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
# add conv layers on top of original feature maps (RetinaNet)
else:
if self.add_extra_convs == 'on_input':
orig = inputs[self.backbone_end_level - 1]
outs.append(self.fpn_convs[used_backbone_levels](orig))
elif self.add_extra_convs == 'on_lateral':
outs.append(self.fpn_convs[used_backbone_levels](
laterals[-1]))
elif self.add_extra_convs == 'on_output':
outs.append(self.fpn_convs[used_backbone_levels](outs[-1]))
else:
raise NotImplementedError
for i in range(used_backbone_levels + 1, self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[-1])))
else:
outs.append(self.fpn_convs[i](outs[-1]))
return tuple(outs)
| 6,277 | 38.734177 | 79 | py |
ERD | ERD-main/mmdet/models/necks/nasfcos_fpn.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.ops.merge_cells import ConcatCell
from mmengine.model import BaseModule, caffe2_xavier_init
from mmdet.registry import MODELS
@MODELS.register_module()
class NASFCOS_FPN(BaseModule):
"""FPN structure in NASFPN.
Implementation of paper `NAS-FCOS: Fast Neural Architecture Search for
Object Detection <https://arxiv.org/abs/1906.04423>`_
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
num_outs (int): Number of output scales.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Default: 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Default: -1, which means the last level.
add_extra_convs (bool): It decides whether to add conv
layers on top of the original feature maps. Default to False.
If True, its actual mode is specified by `extra_convs_on_inputs`.
conv_cfg (dict): dictionary to construct and config conv layer.
norm_cfg (dict): dictionary to construct and config norm layer.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=1,
end_level=-1,
add_extra_convs=False,
conv_cfg=None,
norm_cfg=None,
init_cfg=None):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(NASFCOS_FPN, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.norm_cfg = norm_cfg
self.conv_cfg = conv_cfg
if end_level == -1 or end_level == self.num_ins - 1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level is not the last level, no extra level is allowed
self.backbone_end_level = end_level + 1
assert end_level < self.num_ins
assert num_outs == end_level - start_level + 1
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
self.adapt_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
adapt_conv = ConvModule(
in_channels[i],
out_channels,
1,
stride=1,
padding=0,
bias=False,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU', inplace=False))
self.adapt_convs.append(adapt_conv)
# C2 is omitted according to the paper
extra_levels = num_outs - self.backbone_end_level + self.start_level
def build_concat_cell(with_input1_conv, with_input2_conv):
cell_conv_cfg = dict(
kernel_size=1, padding=0, bias=False, groups=out_channels)
return ConcatCell(
in_channels=out_channels,
out_channels=out_channels,
with_out_conv=True,
out_conv_cfg=cell_conv_cfg,
out_norm_cfg=dict(type='BN'),
out_conv_order=('norm', 'act', 'conv'),
with_input1_conv=with_input1_conv,
with_input2_conv=with_input2_conv,
input_conv_cfg=conv_cfg,
input_norm_cfg=norm_cfg,
upsample_mode='nearest')
# Denote c3=f0, c4=f1, c5=f2 for convince
self.fpn = nn.ModuleDict()
self.fpn['c22_1'] = build_concat_cell(True, True)
self.fpn['c22_2'] = build_concat_cell(True, True)
self.fpn['c32'] = build_concat_cell(True, False)
self.fpn['c02'] = build_concat_cell(True, False)
self.fpn['c42'] = build_concat_cell(True, True)
self.fpn['c36'] = build_concat_cell(True, True)
self.fpn['c61'] = build_concat_cell(True, True) # f9
self.extra_downsamples = nn.ModuleList()
for i in range(extra_levels):
extra_act_cfg = None if i == 0 \
else dict(type='ReLU', inplace=False)
self.extra_downsamples.append(
ConvModule(
out_channels,
out_channels,
3,
stride=2,
padding=1,
act_cfg=extra_act_cfg,
order=('act', 'norm', 'conv')))
def forward(self, inputs):
"""Forward function."""
feats = [
adapt_conv(inputs[i + self.start_level])
for i, adapt_conv in enumerate(self.adapt_convs)
]
for (i, module_name) in enumerate(self.fpn):
idx_1, idx_2 = int(module_name[1]), int(module_name[2])
res = self.fpn[module_name](feats[idx_1], feats[idx_2])
feats.append(res)
ret = []
for (idx, input_idx) in zip([9, 8, 7], [1, 2, 3]): # add P3, P4, P5
feats1, feats2 = feats[idx], feats[5]
feats2_resize = F.interpolate(
feats2,
size=feats1.size()[2:],
mode='bilinear',
align_corners=False)
feats_sum = feats1 + feats2_resize
ret.append(
F.interpolate(
feats_sum,
size=inputs[input_idx].size()[2:],
mode='bilinear',
align_corners=False))
for submodule in self.extra_downsamples:
ret.append(submodule(ret[-1]))
return tuple(ret)
def init_weights(self):
"""Initialize the weights of module."""
super(NASFCOS_FPN, self).init_weights()
for module in self.fpn.values():
if hasattr(module, 'conv_out'):
caffe2_xavier_init(module.out_conv.conv)
for modules in [
self.adapt_convs.modules(),
self.extra_downsamples.modules()
]:
for module in modules:
if isinstance(module, nn.Conv2d):
caffe2_xavier_init(module)
| 6,713 | 38.263158 | 79 | py |
ERD | ERD-main/mmdet/models/necks/fpn_carafe.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule, build_upsample_layer
from mmcv.ops.carafe import CARAFEPack
from mmengine.model import BaseModule, ModuleList, xavier_init
from mmdet.registry import MODELS
@MODELS.register_module()
class FPN_CARAFE(BaseModule):
"""FPN_CARAFE is a more flexible implementation of FPN. It allows more
choice for upsample methods during the top-down pathway.
It can reproduce the performance of ICCV 2019 paper
CARAFE: Content-Aware ReAssembly of FEatures
Please refer to https://arxiv.org/abs/1905.02188 for more details.
Args:
in_channels (list[int]): Number of channels for each input feature map.
out_channels (int): Output channels of feature pyramids.
num_outs (int): Number of output stages.
start_level (int): Start level of feature pyramids.
(Default: 0)
end_level (int): End level of feature pyramids.
(Default: -1 indicates the last level).
norm_cfg (dict): Dictionary to construct and config norm layer.
activate (str): Type of activation function in ConvModule
(Default: None indicates w/o activation).
order (dict): Order of components in ConvModule.
upsample (str): Type of upsample layer.
upsample_cfg (dict): Dictionary to construct and config upsample layer.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
norm_cfg=None,
act_cfg=None,
order=('conv', 'norm', 'act'),
upsample_cfg=dict(
type='carafe',
up_kernel=5,
up_group=1,
encoder_kernel=3,
encoder_dilation=1),
init_cfg=None):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(FPN_CARAFE, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.with_bias = norm_cfg is None
self.upsample_cfg = upsample_cfg.copy()
self.upsample = self.upsample_cfg.get('type')
self.relu = nn.ReLU(inplace=False)
self.order = order
assert order in [('conv', 'norm', 'act'), ('act', 'conv', 'norm')]
assert self.upsample in [
'nearest', 'bilinear', 'deconv', 'pixel_shuffle', 'carafe', None
]
if self.upsample in ['deconv', 'pixel_shuffle']:
assert hasattr(
self.upsample_cfg,
'upsample_kernel') and self.upsample_cfg.upsample_kernel > 0
self.upsample_kernel = self.upsample_cfg.pop('upsample_kernel')
if end_level == -1 or end_level == self.num_ins - 1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level is not the last level, no extra level is allowed
self.backbone_end_level = end_level + 1
assert end_level < self.num_ins
assert num_outs == end_level - start_level + 1
self.start_level = start_level
self.end_level = end_level
self.lateral_convs = ModuleList()
self.fpn_convs = ModuleList()
self.upsample_modules = ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
norm_cfg=norm_cfg,
bias=self.with_bias,
act_cfg=act_cfg,
inplace=False,
order=self.order)
fpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
norm_cfg=self.norm_cfg,
bias=self.with_bias,
act_cfg=act_cfg,
inplace=False,
order=self.order)
if i != self.backbone_end_level - 1:
upsample_cfg_ = self.upsample_cfg.copy()
if self.upsample == 'deconv':
upsample_cfg_.update(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=self.upsample_kernel,
stride=2,
padding=(self.upsample_kernel - 1) // 2,
output_padding=(self.upsample_kernel - 1) // 2)
elif self.upsample == 'pixel_shuffle':
upsample_cfg_.update(
in_channels=out_channels,
out_channels=out_channels,
scale_factor=2,
upsample_kernel=self.upsample_kernel)
elif self.upsample == 'carafe':
upsample_cfg_.update(channels=out_channels, scale_factor=2)
else:
# suppress warnings
align_corners = (None
if self.upsample == 'nearest' else False)
upsample_cfg_.update(
scale_factor=2,
mode=self.upsample,
align_corners=align_corners)
upsample_module = build_upsample_layer(upsample_cfg_)
self.upsample_modules.append(upsample_module)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
# add extra conv layers (e.g., RetinaNet)
extra_out_levels = (
num_outs - self.backbone_end_level + self.start_level)
if extra_out_levels >= 1:
for i in range(extra_out_levels):
in_channels = (
self.in_channels[self.backbone_end_level -
1] if i == 0 else out_channels)
extra_l_conv = ConvModule(
in_channels,
out_channels,
3,
stride=2,
padding=1,
norm_cfg=norm_cfg,
bias=self.with_bias,
act_cfg=act_cfg,
inplace=False,
order=self.order)
if self.upsample == 'deconv':
upsampler_cfg_ = dict(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=self.upsample_kernel,
stride=2,
padding=(self.upsample_kernel - 1) // 2,
output_padding=(self.upsample_kernel - 1) // 2)
elif self.upsample == 'pixel_shuffle':
upsampler_cfg_ = dict(
in_channels=out_channels,
out_channels=out_channels,
scale_factor=2,
upsample_kernel=self.upsample_kernel)
elif self.upsample == 'carafe':
upsampler_cfg_ = dict(
channels=out_channels,
scale_factor=2,
**self.upsample_cfg)
else:
# suppress warnings
align_corners = (None
if self.upsample == 'nearest' else False)
upsampler_cfg_ = dict(
scale_factor=2,
mode=self.upsample,
align_corners=align_corners)
upsampler_cfg_['type'] = self.upsample
upsample_module = build_upsample_layer(upsampler_cfg_)
extra_fpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
norm_cfg=self.norm_cfg,
bias=self.with_bias,
act_cfg=act_cfg,
inplace=False,
order=self.order)
self.upsample_modules.append(upsample_module)
self.fpn_convs.append(extra_fpn_conv)
self.lateral_convs.append(extra_l_conv)
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
"""Initialize the weights of module."""
super(FPN_CARAFE, self).init_weights()
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
xavier_init(m, distribution='uniform')
for m in self.modules():
if isinstance(m, CARAFEPack):
m.init_weights()
def slice_as(self, src, dst):
"""Slice ``src`` as ``dst``
Note:
``src`` should have the same or larger size than ``dst``.
Args:
src (torch.Tensor): Tensors to be sliced.
dst (torch.Tensor): ``src`` will be sliced to have the same
size as ``dst``.
Returns:
torch.Tensor: Sliced tensor.
"""
assert (src.size(2) >= dst.size(2)) and (src.size(3) >= dst.size(3))
if src.size(2) == dst.size(2) and src.size(3) == dst.size(3):
return src
else:
return src[:, :, :dst.size(2), :dst.size(3)]
def tensor_add(self, a, b):
"""Add tensors ``a`` and ``b`` that might have different sizes."""
if a.size() == b.size():
c = a + b
else:
c = a + self.slice_as(b, a)
return c
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = []
for i, lateral_conv in enumerate(self.lateral_convs):
if i <= self.backbone_end_level - self.start_level:
input = inputs[min(i + self.start_level, len(inputs) - 1)]
else:
input = laterals[-1]
lateral = lateral_conv(input)
laterals.append(lateral)
# build top-down path
for i in range(len(laterals) - 1, 0, -1):
if self.upsample is not None:
upsample_feat = self.upsample_modules[i - 1](laterals[i])
else:
upsample_feat = laterals[i]
laterals[i - 1] = self.tensor_add(laterals[i - 1], upsample_feat)
# build outputs
num_conv_outs = len(self.fpn_convs)
outs = []
for i in range(num_conv_outs):
out = self.fpn_convs[i](laterals[i])
outs.append(out)
return tuple(outs)
| 11,159 | 39.434783 | 79 | py |
ERD | ERD-main/mmdet/models/necks/ct_resnet_neck.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Sequence, Tuple
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from mmdet.registry import MODELS
from mmdet.utils import OptMultiConfig
@MODELS.register_module()
class CTResNetNeck(BaseModule):
"""The neck used in `CenterNet <https://arxiv.org/abs/1904.07850>`_ for
object classification and box regression.
Args:
in_channels (int): Number of input channels.
num_deconv_filters (tuple[int]): Number of filters per stage.
num_deconv_kernels (tuple[int]): Number of kernels per stage.
use_dcn (bool): If True, use DCNv2. Defaults to True.
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`], optional): Initialization
config dict.
"""
def __init__(self,
in_channels: int,
num_deconv_filters: Tuple[int, ...],
num_deconv_kernels: Tuple[int, ...],
use_dcn: bool = True,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
assert len(num_deconv_filters) == len(num_deconv_kernels)
self.fp16_enabled = False
self.use_dcn = use_dcn
self.in_channels = in_channels
self.deconv_layers = self._make_deconv_layer(num_deconv_filters,
num_deconv_kernels)
def _make_deconv_layer(
self, num_deconv_filters: Tuple[int, ...],
num_deconv_kernels: Tuple[int, ...]) -> nn.Sequential:
"""use deconv layers to upsample backbone's output."""
layers = []
for i in range(len(num_deconv_filters)):
feat_channels = num_deconv_filters[i]
conv_module = ConvModule(
self.in_channels,
feat_channels,
3,
padding=1,
conv_cfg=dict(type='DCNv2') if self.use_dcn else None,
norm_cfg=dict(type='BN'))
layers.append(conv_module)
upsample_module = ConvModule(
feat_channels,
feat_channels,
num_deconv_kernels[i],
stride=2,
padding=1,
conv_cfg=dict(type='deconv'),
norm_cfg=dict(type='BN'))
layers.append(upsample_module)
self.in_channels = feat_channels
return nn.Sequential(*layers)
def init_weights(self) -> None:
"""Initialize the parameters."""
for m in self.modules():
if isinstance(m, nn.ConvTranspose2d):
# In order to be consistent with the source code,
# reset the ConvTranspose2d initialization parameters
m.reset_parameters()
# Simulated bilinear upsampling kernel
w = m.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (
1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# self.use_dcn is False
elif not self.use_dcn and isinstance(m, nn.Conv2d):
# In order to be consistent with the source code,
# reset the Conv2d initialization parameters
m.reset_parameters()
def forward(self, x: Sequence[torch.Tensor]) -> Tuple[torch.Tensor]:
"""model forward."""
assert isinstance(x, (list, tuple))
outs = self.deconv_layers(x[-1])
return outs,
| 4,038 | 38.213592 | 75 | py |
ERD | ERD-main/mmdet/models/necks/ssh.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
import torch
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
class SSHContextModule(BaseModule):
"""This is an implementation of `SSH context module` described in `SSH:
Single Stage Headless Face Detector.
<https://arxiv.org/pdf/1708.03979.pdf>`_.
Args:
in_channels (int): Number of input channels used at each scale.
out_channels (int): Number of output channels used at each scale.
conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
convolution layer. Defaults to None.
norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization
layer. Defaults to dict(type='BN').
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
in_channels: int,
out_channels: int,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN'),
init_cfg: OptMultiConfig = None):
super().__init__(init_cfg=init_cfg)
assert out_channels % 4 == 0
self.in_channels = in_channels
self.out_channels = out_channels
self.conv5x5_1 = ConvModule(
self.in_channels,
self.out_channels // 4,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
)
self.conv5x5_2 = ConvModule(
self.out_channels // 4,
self.out_channels // 4,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
self.conv7x7_2 = ConvModule(
self.out_channels // 4,
self.out_channels // 4,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
)
self.conv7x7_3 = ConvModule(
self.out_channels // 4,
self.out_channels // 4,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None,
)
def forward(self, x: torch.Tensor) -> tuple:
conv5x5_1 = self.conv5x5_1(x)
conv5x5 = self.conv5x5_2(conv5x5_1)
conv7x7_2 = self.conv7x7_2(conv5x5_1)
conv7x7 = self.conv7x7_3(conv7x7_2)
return (conv5x5, conv7x7)
class SSHDetModule(BaseModule):
"""This is an implementation of `SSH detection module` described in `SSH:
Single Stage Headless Face Detector.
<https://arxiv.org/pdf/1708.03979.pdf>`_.
Args:
in_channels (int): Number of input channels used at each scale.
out_channels (int): Number of output channels used at each scale.
conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
convolution layer. Defaults to None.
norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization
layer. Defaults to dict(type='BN').
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
in_channels: int,
out_channels: int,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN'),
init_cfg: OptMultiConfig = None):
super().__init__(init_cfg=init_cfg)
assert out_channels % 4 == 0
self.in_channels = in_channels
self.out_channels = out_channels
self.conv3x3 = ConvModule(
self.in_channels,
self.out_channels // 2,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
self.context_module = SSHContextModule(
in_channels=self.in_channels,
out_channels=self.out_channels,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg)
def forward(self, x: torch.Tensor) -> torch.Tensor:
conv3x3 = self.conv3x3(x)
conv5x5, conv7x7 = self.context_module(x)
out = torch.cat([conv3x3, conv5x5, conv7x7], dim=1)
out = F.relu(out)
return out
@MODELS.register_module()
class SSH(BaseModule):
"""`SSH Neck` used in `SSH: Single Stage Headless Face Detector.
<https://arxiv.org/pdf/1708.03979.pdf>`_.
Args:
num_scales (int): The number of scales / stages.
in_channels (list[int]): The number of input channels per scale.
out_channels (list[int]): The number of output channels per scale.
conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
convolution layer. Defaults to None.
norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization
layer. Defaults to dict(type='BN').
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Example:
>>> import torch
>>> in_channels = [8, 16, 32, 64]
>>> out_channels = [16, 32, 64, 128]
>>> scales = [340, 170, 84, 43]
>>> inputs = [torch.rand(1, c, s, s)
... for c, s in zip(in_channels, scales)]
>>> self = SSH(num_scales=4, in_channels=in_channels,
... out_channels=out_channels)
>>> outputs = self.forward(inputs)
>>> for i in range(len(outputs)):
... print(f'outputs[{i}].shape = {outputs[i].shape}')
outputs[0].shape = torch.Size([1, 16, 340, 340])
outputs[1].shape = torch.Size([1, 32, 170, 170])
outputs[2].shape = torch.Size([1, 64, 84, 84])
outputs[3].shape = torch.Size([1, 128, 43, 43])
"""
def __init__(self,
num_scales: int,
in_channels: List[int],
out_channels: List[int],
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN'),
init_cfg: OptMultiConfig = dict(
type='Xavier', layer='Conv2d', distribution='uniform')):
super().__init__(init_cfg=init_cfg)
assert (num_scales == len(in_channels) == len(out_channels))
self.num_scales = num_scales
self.in_channels = in_channels
self.out_channels = out_channels
for idx in range(self.num_scales):
in_c, out_c = self.in_channels[idx], self.out_channels[idx]
self.add_module(
f'ssh_module{idx}',
SSHDetModule(
in_channels=in_c,
out_channels=out_c,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
def forward(self, inputs: Tuple[torch.Tensor]) -> tuple:
assert len(inputs) == self.num_scales
outs = []
for idx, x in enumerate(inputs):
ssh_module = getattr(self, f'ssh_module{idx}')
out = ssh_module(x)
outs.append(out)
return tuple(outs)
| 7,456 | 33.364055 | 77 | py |
ERD | ERD-main/mmdet/models/necks/cspnext_pafpn.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Sequence, Tuple
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptMultiConfig
from ..layers import CSPLayer
@MODELS.register_module()
class CSPNeXtPAFPN(BaseModule):
"""Path Aggregation Network with CSPNeXt blocks.
Args:
in_channels (Sequence[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
num_csp_blocks (int): Number of bottlenecks in CSPLayer.
Defaults to 3.
use_depthwise (bool): Whether to use depthwise separable convolution in
blocks. Defaults to False.
expand_ratio (float): Ratio to adjust the number of channels of the
hidden layer. Default: 0.5
upsample_cfg (dict): Config dict for interpolate layer.
Default: `dict(scale_factor=2, mode='nearest')`
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN')
act_cfg (dict): Config dict for activation layer.
Default: dict(type='Swish')
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
"""
def __init__(
self,
in_channels: Sequence[int],
out_channels: int,
num_csp_blocks: int = 3,
use_depthwise: bool = False,
expand_ratio: float = 0.5,
upsample_cfg: ConfigType = dict(scale_factor=2, mode='nearest'),
conv_cfg: bool = None,
norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='Swish'),
init_cfg: OptMultiConfig = dict(
type='Kaiming',
layer='Conv2d',
a=math.sqrt(5),
distribution='uniform',
mode='fan_in',
nonlinearity='leaky_relu')
) -> None:
super().__init__(init_cfg)
self.in_channels = in_channels
self.out_channels = out_channels
conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule
# build top-down blocks
self.upsample = nn.Upsample(**upsample_cfg)
self.reduce_layers = nn.ModuleList()
self.top_down_blocks = nn.ModuleList()
for idx in range(len(in_channels) - 1, 0, -1):
self.reduce_layers.append(
ConvModule(
in_channels[idx],
in_channels[idx - 1],
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.top_down_blocks.append(
CSPLayer(
in_channels[idx - 1] * 2,
in_channels[idx - 1],
num_blocks=num_csp_blocks,
add_identity=False,
use_depthwise=use_depthwise,
use_cspnext_block=True,
expand_ratio=expand_ratio,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
# build bottom-up blocks
self.downsamples = nn.ModuleList()
self.bottom_up_blocks = nn.ModuleList()
for idx in range(len(in_channels) - 1):
self.downsamples.append(
conv(
in_channels[idx],
in_channels[idx],
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.bottom_up_blocks.append(
CSPLayer(
in_channels[idx] * 2,
in_channels[idx + 1],
num_blocks=num_csp_blocks,
add_identity=False,
use_depthwise=use_depthwise,
use_cspnext_block=True,
expand_ratio=expand_ratio,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.out_convs = nn.ModuleList()
for i in range(len(in_channels)):
self.out_convs.append(
conv(
in_channels[i],
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def forward(self, inputs: Tuple[Tensor, ...]) -> Tuple[Tensor, ...]:
"""
Args:
inputs (tuple[Tensor]): input features.
Returns:
tuple[Tensor]: YOLOXPAFPN features.
"""
assert len(inputs) == len(self.in_channels)
# top-down path
inner_outs = [inputs[-1]]
for idx in range(len(self.in_channels) - 1, 0, -1):
feat_heigh = inner_outs[0]
feat_low = inputs[idx - 1]
feat_heigh = self.reduce_layers[len(self.in_channels) - 1 - idx](
feat_heigh)
inner_outs[0] = feat_heigh
upsample_feat = self.upsample(feat_heigh)
inner_out = self.top_down_blocks[len(self.in_channels) - 1 - idx](
torch.cat([upsample_feat, feat_low], 1))
inner_outs.insert(0, inner_out)
# bottom-up path
outs = [inner_outs[0]]
for idx in range(len(self.in_channels) - 1):
feat_low = outs[-1]
feat_height = inner_outs[idx + 1]
downsample_feat = self.downsamples[idx](feat_low)
out = self.bottom_up_blocks[idx](
torch.cat([downsample_feat, feat_height], 1))
outs.append(out)
# out convs
for idx, conv in enumerate(self.out_convs):
outs[idx] = conv(outs[idx])
return tuple(outs)
| 6,178 | 35.134503 | 79 | py |
ERD | ERD-main/mmdet/models/necks/fpn.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, MultiConfig, OptConfigType
@MODELS.register_module()
class FPN(BaseModule):
r"""Feature Pyramid Network.
This is an implementation of paper `Feature Pyramid Networks for Object
Detection <https://arxiv.org/abs/1612.03144>`_.
Args:
in_channels (list[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale).
num_outs (int): Number of output scales.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Defaults to 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Defaults to -1, which means the
last level.
add_extra_convs (bool | str): If bool, it decides whether to add conv
layers on top of the original feature maps. Defaults to False.
If True, it is equivalent to `add_extra_convs='on_input'`.
If str, it specifies the source feature map of the extra convs.
Only the following options are allowed
- 'on_input': Last feat map of neck inputs (i.e. backbone feature).
- 'on_lateral': Last feature map after lateral convs.
- 'on_output': The last output feature map after fpn convs.
relu_before_extra_convs (bool): Whether to apply relu before the extra
conv. Defaults to False.
no_norm_on_lateral (bool): Whether to apply norm on lateral.
Defaults to False.
conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
convolution layer. Defaults to None.
norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
normalization layer. Defaults to None.
act_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
activation layer in ConvModule. Defaults to None.
upsample_cfg (:obj:`ConfigDict` or dict, optional): Config dict
for interpolate layer. Defaults to dict(mode='nearest').
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict]): Initialization config dict.
Example:
>>> import torch
>>> in_channels = [2, 3, 5, 7]
>>> scales = [340, 170, 84, 43]
>>> inputs = [torch.rand(1, c, s, s)
... for c, s in zip(in_channels, scales)]
>>> self = FPN(in_channels, 11, len(in_channels)).eval()
>>> outputs = self.forward(inputs)
>>> for i in range(len(outputs)):
... print(f'outputs[{i}].shape = {outputs[i].shape}')
outputs[0].shape = torch.Size([1, 11, 340, 340])
outputs[1].shape = torch.Size([1, 11, 170, 170])
outputs[2].shape = torch.Size([1, 11, 84, 84])
outputs[3].shape = torch.Size([1, 11, 43, 43])
"""
def __init__(
self,
in_channels: List[int],
out_channels: int,
num_outs: int,
start_level: int = 0,
end_level: int = -1,
add_extra_convs: Union[bool, str] = False,
relu_before_extra_convs: bool = False,
no_norm_on_lateral: bool = False,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
act_cfg: OptConfigType = None,
upsample_cfg: ConfigType = dict(mode='nearest'),
init_cfg: MultiConfig = dict(
type='Xavier', layer='Conv2d', distribution='uniform')
) -> None:
super().__init__(init_cfg=init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.relu_before_extra_convs = relu_before_extra_convs
self.no_norm_on_lateral = no_norm_on_lateral
self.fp16_enabled = False
self.upsample_cfg = upsample_cfg.copy()
if end_level == -1 or end_level == self.num_ins - 1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level is not the last level, no extra level is allowed
self.backbone_end_level = end_level + 1
assert end_level < self.num_ins
assert num_outs == end_level - start_level + 1
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
assert isinstance(add_extra_convs, (str, bool))
if isinstance(add_extra_convs, str):
# Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output'
assert add_extra_convs in ('on_input', 'on_lateral', 'on_output')
elif add_extra_convs: # True
self.add_extra_convs = 'on_input'
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,
act_cfg=act_cfg,
inplace=False)
fpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
# add extra conv layers (e.g., RetinaNet)
extra_levels = num_outs - self.backbone_end_level + self.start_level
if self.add_extra_convs and extra_levels >= 1:
for i in range(extra_levels):
if i == 0 and self.add_extra_convs == 'on_input':
in_channels = self.in_channels[self.backbone_end_level - 1]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(
in_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
self.fpn_convs.append(extra_fpn_conv)
def forward(self, inputs: Tuple[Tensor]) -> tuple:
"""Forward function.
Args:
inputs (tuple[Tensor]): Features from the upstream network, each
is a 4D-tensor.
Returns:
tuple: Feature maps, each is a 4D-tensor.
"""
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
# In some cases, fixing `scale factor` (e.g. 2) is preferred, but
# it cannot co-exist with `size` in `F.interpolate`.
if 'scale_factor' in self.upsample_cfg:
# fix runtime error of "+=" inplace operation in PyTorch 1.10
laterals[i - 1] = laterals[i - 1] + F.interpolate(
laterals[i], **self.upsample_cfg)
else:
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] = laterals[i - 1] + F.interpolate(
laterals[i], size=prev_shape, **self.upsample_cfg)
# build outputs
# part 1: from original levels
outs = [
self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
]
# part 2: add extra levels
if self.num_outs > len(outs):
# use max pool to get more levels on top of outputs
# (e.g., Faster R-CNN, Mask R-CNN)
if not self.add_extra_convs:
for i in range(self.num_outs - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
# add conv layers on top of original feature maps (RetinaNet)
else:
if self.add_extra_convs == 'on_input':
extra_source = inputs[self.backbone_end_level - 1]
elif self.add_extra_convs == 'on_lateral':
extra_source = laterals[-1]
elif self.add_extra_convs == 'on_output':
extra_source = outs[-1]
else:
raise NotImplementedError
outs.append(self.fpn_convs[used_backbone_levels](extra_source))
for i in range(used_backbone_levels + 1, self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[-1])))
else:
outs.append(self.fpn_convs[i](outs[-1]))
return tuple(outs)
| 9,420 | 41.436937 | 79 | py |
ERD | ERD-main/mmdet/models/necks/nas_fpn.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.ops.merge_cells import GlobalPoolingCell, SumCell
from mmengine.model import BaseModule, ModuleList
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import MultiConfig, OptConfigType
@MODELS.register_module()
class NASFPN(BaseModule):
"""NAS-FPN.
Implementation of `NAS-FPN: Learning Scalable Feature Pyramid Architecture
for Object Detection <https://arxiv.org/abs/1904.07392>`_
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
num_outs (int): Number of output scales.
stack_times (int): The number of times the pyramid architecture will
be stacked.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Defaults to 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Defaults to -1, which means the
last level.
norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
normalization layer. Defaults to None.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict]): Initialization config dict.
"""
def __init__(
self,
in_channels: List[int],
out_channels: int,
num_outs: int,
stack_times: int,
start_level: int = 0,
end_level: int = -1,
norm_cfg: OptConfigType = None,
init_cfg: MultiConfig = dict(type='Caffe2Xavier', layer='Conv2d')
) -> None:
super().__init__(init_cfg=init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels) # num of input feature levels
self.num_outs = num_outs # num of output feature levels
self.stack_times = stack_times
self.norm_cfg = norm_cfg
if end_level == -1 or end_level == self.num_ins - 1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level is not the last level, no extra level is allowed
self.backbone_end_level = end_level + 1
assert end_level < self.num_ins
assert num_outs == end_level - start_level + 1
self.start_level = start_level
self.end_level = end_level
# add lateral connections
self.lateral_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
norm_cfg=norm_cfg,
act_cfg=None)
self.lateral_convs.append(l_conv)
# add extra downsample layers (stride-2 pooling or conv)
extra_levels = num_outs - self.backbone_end_level + self.start_level
self.extra_downsamples = nn.ModuleList()
for i in range(extra_levels):
extra_conv = ConvModule(
out_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
self.extra_downsamples.append(
nn.Sequential(extra_conv, nn.MaxPool2d(2, 2)))
# add NAS FPN connections
self.fpn_stages = ModuleList()
for _ in range(self.stack_times):
stage = nn.ModuleDict()
# gp(p6, p4) -> p4_1
stage['gp_64_4'] = GlobalPoolingCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# sum(p4_1, p4) -> p4_2
stage['sum_44_4'] = SumCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# sum(p4_2, p3) -> p3_out
stage['sum_43_3'] = SumCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# sum(p3_out, p4_2) -> p4_out
stage['sum_34_4'] = SumCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# sum(p5, gp(p4_out, p3_out)) -> p5_out
stage['gp_43_5'] = GlobalPoolingCell(with_out_conv=False)
stage['sum_55_5'] = SumCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# sum(p7, gp(p5_out, p4_2)) -> p7_out
stage['gp_54_7'] = GlobalPoolingCell(with_out_conv=False)
stage['sum_77_7'] = SumCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# gp(p7_out, p5_out) -> p6_out
stage['gp_75_6'] = GlobalPoolingCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
self.fpn_stages.append(stage)
def forward(self, inputs: Tuple[Tensor]) -> tuple:
"""Forward function.
Args:
inputs (tuple[Tensor]): Features from the upstream network, each
is a 4D-tensor.
Returns:
tuple: Feature maps, each is a 4D-tensor.
"""
# build P3-P5
feats = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# build P6-P7 on top of P5
for downsample in self.extra_downsamples:
feats.append(downsample(feats[-1]))
p3, p4, p5, p6, p7 = feats
for stage in self.fpn_stages:
# gp(p6, p4) -> p4_1
p4_1 = stage['gp_64_4'](p6, p4, out_size=p4.shape[-2:])
# sum(p4_1, p4) -> p4_2
p4_2 = stage['sum_44_4'](p4_1, p4, out_size=p4.shape[-2:])
# sum(p4_2, p3) -> p3_out
p3 = stage['sum_43_3'](p4_2, p3, out_size=p3.shape[-2:])
# sum(p3_out, p4_2) -> p4_out
p4 = stage['sum_34_4'](p3, p4_2, out_size=p4.shape[-2:])
# sum(p5, gp(p4_out, p3_out)) -> p5_out
p5_tmp = stage['gp_43_5'](p4, p3, out_size=p5.shape[-2:])
p5 = stage['sum_55_5'](p5, p5_tmp, out_size=p5.shape[-2:])
# sum(p7, gp(p5_out, p4_2)) -> p7_out
p7_tmp = stage['gp_54_7'](p5, p4_2, out_size=p7.shape[-2:])
p7 = stage['sum_77_7'](p7, p7_tmp, out_size=p7.shape[-2:])
# gp(p7_out, p5_out) -> p6_out
p6 = stage['gp_75_6'](p7, p5, out_size=p6.shape[-2:])
return p3, p4, p5, p6, p7
| 6,878 | 38.994186 | 79 | py |
ERD | ERD-main/mmdet/models/necks/dyhead.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import build_activation_layer, build_norm_layer
from mmcv.ops.modulated_deform_conv import ModulatedDeformConv2d
from mmengine.model import BaseModule, constant_init, normal_init
from mmdet.registry import MODELS
from ..layers import DyReLU
# Reference:
# https://github.com/microsoft/DynamicHead
# https://github.com/jshilong/SEPC
class DyDCNv2(nn.Module):
"""ModulatedDeformConv2d with normalization layer used in DyHead.
This module cannot be configured with `conv_cfg=dict(type='DCNv2')`
because DyHead calculates offset and mask from middle-level feature.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
stride (int | tuple[int], optional): Stride of the convolution.
Default: 1.
norm_cfg (dict, optional): Config dict for normalization layer.
Default: dict(type='GN', num_groups=16, requires_grad=True).
"""
def __init__(self,
in_channels,
out_channels,
stride=1,
norm_cfg=dict(type='GN', num_groups=16, requires_grad=True)):
super().__init__()
self.with_norm = norm_cfg is not None
bias = not self.with_norm
self.conv = ModulatedDeformConv2d(
in_channels, out_channels, 3, stride=stride, padding=1, bias=bias)
if self.with_norm:
self.norm = build_norm_layer(norm_cfg, out_channels)[1]
def forward(self, x, offset, mask):
"""Forward function."""
x = self.conv(x.contiguous(), offset, mask)
if self.with_norm:
x = self.norm(x)
return x
class DyHeadBlock(nn.Module):
"""DyHead Block with three types of attention.
HSigmoid arguments in default act_cfg follow official code, not paper.
https://github.com/microsoft/DynamicHead/blob/master/dyhead/dyrelu.py
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
zero_init_offset (bool, optional): Whether to use zero init for
`spatial_conv_offset`. Default: True.
act_cfg (dict, optional): Config dict for the last activation layer of
scale-aware attention. Default: dict(type='HSigmoid', bias=3.0,
divisor=6.0).
"""
def __init__(self,
in_channels,
out_channels,
zero_init_offset=True,
act_cfg=dict(type='HSigmoid', bias=3.0, divisor=6.0)):
super().__init__()
self.zero_init_offset = zero_init_offset
# (offset_x, offset_y, mask) * kernel_size_y * kernel_size_x
self.offset_and_mask_dim = 3 * 3 * 3
self.offset_dim = 2 * 3 * 3
self.spatial_conv_high = DyDCNv2(in_channels, out_channels)
self.spatial_conv_mid = DyDCNv2(in_channels, out_channels)
self.spatial_conv_low = DyDCNv2(in_channels, out_channels, stride=2)
self.spatial_conv_offset = nn.Conv2d(
in_channels, self.offset_and_mask_dim, 3, padding=1)
self.scale_attn_module = nn.Sequential(
nn.AdaptiveAvgPool2d(1), nn.Conv2d(out_channels, 1, 1),
nn.ReLU(inplace=True), build_activation_layer(act_cfg))
self.task_attn_module = DyReLU(out_channels)
self._init_weights()
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, 0, 0.01)
if self.zero_init_offset:
constant_init(self.spatial_conv_offset, 0)
def forward(self, x):
"""Forward function."""
outs = []
for level in range(len(x)):
# calculate offset and mask of DCNv2 from middle-level feature
offset_and_mask = self.spatial_conv_offset(x[level])
offset = offset_and_mask[:, :self.offset_dim, :, :]
mask = offset_and_mask[:, self.offset_dim:, :, :].sigmoid()
mid_feat = self.spatial_conv_mid(x[level], offset, mask)
sum_feat = mid_feat * self.scale_attn_module(mid_feat)
summed_levels = 1
if level > 0:
low_feat = self.spatial_conv_low(x[level - 1], offset, mask)
sum_feat += low_feat * self.scale_attn_module(low_feat)
summed_levels += 1
if level < len(x) - 1:
# this upsample order is weird, but faster than natural order
# https://github.com/microsoft/DynamicHead/issues/25
high_feat = F.interpolate(
self.spatial_conv_high(x[level + 1], offset, mask),
size=x[level].shape[-2:],
mode='bilinear',
align_corners=True)
sum_feat += high_feat * self.scale_attn_module(high_feat)
summed_levels += 1
outs.append(self.task_attn_module(sum_feat / summed_levels))
return outs
@MODELS.register_module()
class DyHead(BaseModule):
"""DyHead neck consisting of multiple DyHead Blocks.
See `Dynamic Head: Unifying Object Detection Heads with Attentions
<https://arxiv.org/abs/2106.08322>`_ for details.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
num_blocks (int, optional): Number of DyHead Blocks. Default: 6.
zero_init_offset (bool, optional): Whether to use zero init for
`spatial_conv_offset`. Default: True.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
"""
def __init__(self,
in_channels,
out_channels,
num_blocks=6,
zero_init_offset=True,
init_cfg=None):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super().__init__(init_cfg=init_cfg)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_blocks = num_blocks
self.zero_init_offset = zero_init_offset
dyhead_blocks = []
for i in range(num_blocks):
in_channels = self.in_channels if i == 0 else self.out_channels
dyhead_blocks.append(
DyHeadBlock(
in_channels,
self.out_channels,
zero_init_offset=zero_init_offset))
self.dyhead_blocks = nn.Sequential(*dyhead_blocks)
def forward(self, inputs):
"""Forward function."""
assert isinstance(inputs, (tuple, list))
outs = self.dyhead_blocks(inputs)
return tuple(outs)
| 6,859 | 38.425287 | 78 | py |
ERD | ERD-main/mmdet/models/necks/bfp.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.cnn.bricks import NonLocal2d
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType, OptMultiConfig
@MODELS.register_module()
class BFP(BaseModule):
"""BFP (Balanced Feature Pyramids)
BFP takes multi-level features as inputs and gather them into a single one,
then refine the gathered feature and scatter the refined results to
multi-level features. This module is used in Libra R-CNN (CVPR 2019), see
the paper `Libra R-CNN: Towards Balanced Learning for Object Detection
<https://arxiv.org/abs/1904.02701>`_ for details.
Args:
in_channels (int): Number of input channels (feature maps of all levels
should have the same channels).
num_levels (int): Number of input feature levels.
refine_level (int): Index of integration and refine level of BSF in
multi-level features from bottom to top.
refine_type (str): Type of the refine op, currently support
[None, 'conv', 'non_local'].
conv_cfg (:obj:`ConfigDict` or dict, optional): The config dict for
convolution layers.
norm_cfg (:obj:`ConfigDict` or dict, optional): The config dict for
normalization layers.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or
dict], optional): Initialization config dict.
"""
def __init__(
self,
in_channels: int,
num_levels: int,
refine_level: int = 2,
refine_type: str = None,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = dict(
type='Xavier', layer='Conv2d', distribution='uniform')
) -> None:
super().__init__(init_cfg=init_cfg)
assert refine_type in [None, 'conv', 'non_local']
self.in_channels = in_channels
self.num_levels = num_levels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.refine_level = refine_level
self.refine_type = refine_type
assert 0 <= self.refine_level < self.num_levels
if self.refine_type == 'conv':
self.refine = ConvModule(
self.in_channels,
self.in_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
elif self.refine_type == 'non_local':
self.refine = NonLocal2d(
self.in_channels,
reduction=1,
use_scale=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
def forward(self, inputs: Tuple[Tensor]) -> Tuple[Tensor]:
"""Forward function."""
assert len(inputs) == self.num_levels
# step 1: gather multi-level features by resize and average
feats = []
gather_size = inputs[self.refine_level].size()[2:]
for i in range(self.num_levels):
if i < self.refine_level:
gathered = F.adaptive_max_pool2d(
inputs[i], output_size=gather_size)
else:
gathered = F.interpolate(
inputs[i], size=gather_size, mode='nearest')
feats.append(gathered)
bsf = sum(feats) / len(feats)
# step 2: refine gathered features
if self.refine_type is not None:
bsf = self.refine(bsf)
# step 3: scatter refined features to multi-levels by a residual path
outs = []
for i in range(self.num_levels):
out_size = inputs[i].size()[2:]
if i < self.refine_level:
residual = F.interpolate(bsf, size=out_size, mode='nearest')
else:
residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
outs.append(residual + inputs[i])
return tuple(outs)
| 4,090 | 35.526786 | 79 | py |
ERD | ERD-main/mmdet/models/necks/yolo_neck.py | # Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) 2019 Western Digital Corporation or its affiliates.
from typing import List, Tuple
import torch
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
class DetectionBlock(BaseModule):
"""Detection block in YOLO neck.
Let out_channels = n, the DetectionBlock contains:
Six ConvLayers, 1 Conv2D Layer and 1 YoloLayer.
The first 6 ConvLayers are formed the following way:
1x1xn, 3x3x2n, 1x1xn, 3x3x2n, 1x1xn, 3x3x2n.
The Conv2D layer is 1x1x255.
Some block will have branch after the fifth ConvLayer.
The input channel is arbitrary (in_channels)
Args:
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: dict(type='BN', requires_grad=True)
act_cfg (dict): Config dict for activation layer.
Default: dict(type='LeakyReLU', negative_slope=0.1).
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels: int,
out_channels: int,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN', requires_grad=True),
act_cfg: ConfigType = dict(
type='LeakyReLU', negative_slope=0.1),
init_cfg: OptMultiConfig = None) -> None:
super(DetectionBlock, self).__init__(init_cfg)
double_out_channels = out_channels * 2
# shortcut
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.conv1 = ConvModule(in_channels, out_channels, 1, **cfg)
self.conv2 = ConvModule(
out_channels, double_out_channels, 3, padding=1, **cfg)
self.conv3 = ConvModule(double_out_channels, out_channels, 1, **cfg)
self.conv4 = ConvModule(
out_channels, double_out_channels, 3, padding=1, **cfg)
self.conv5 = ConvModule(double_out_channels, out_channels, 1, **cfg)
def forward(self, x: Tensor) -> Tensor:
tmp = self.conv1(x)
tmp = self.conv2(tmp)
tmp = self.conv3(tmp)
tmp = self.conv4(tmp)
out = self.conv5(tmp)
return out
@MODELS.register_module()
class YOLOV3Neck(BaseModule):
"""The neck of YOLOV3.
It can be treated as a simplified version of FPN. It
will take the result from Darknet backbone and do some upsampling and
concatenation. It will finally output the detection result.
Note:
The input feats should be from top to bottom.
i.e., from high-lvl to low-lvl
But YOLOV3Neck will process them in reversed order.
i.e., from bottom (high-lvl) to top (low-lvl)
Args:
num_scales (int): The number of scales / stages.
in_channels (List[int]): The number of input channels per scale.
out_channels (List[int]): The number of output channels per scale.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None.
norm_cfg (dict, optional): Dictionary to construct and config norm
layer. Default: dict(type='BN', requires_grad=True)
act_cfg (dict, optional): Config dict for activation layer.
Default: dict(type='LeakyReLU', negative_slope=0.1).
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
num_scales: int,
in_channels: List[int],
out_channels: List[int],
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN', requires_grad=True),
act_cfg: ConfigType = dict(
type='LeakyReLU', negative_slope=0.1),
init_cfg: OptMultiConfig = None) -> None:
super(YOLOV3Neck, self).__init__(init_cfg)
assert (num_scales == len(in_channels) == len(out_channels))
self.num_scales = num_scales
self.in_channels = in_channels
self.out_channels = out_channels
# shortcut
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
# To support arbitrary scales, the code looks awful, but it works.
# Better solution is welcomed.
self.detect1 = DetectionBlock(in_channels[0], out_channels[0], **cfg)
for i in range(1, self.num_scales):
in_c, out_c = self.in_channels[i], self.out_channels[i]
inter_c = out_channels[i - 1]
self.add_module(f'conv{i}', ConvModule(inter_c, out_c, 1, **cfg))
# in_c + out_c : High-lvl feats will be cat with low-lvl feats
self.add_module(f'detect{i+1}',
DetectionBlock(in_c + out_c, out_c, **cfg))
def forward(self, feats=Tuple[Tensor]) -> Tuple[Tensor]:
assert len(feats) == self.num_scales
# processed from bottom (high-lvl) to top (low-lvl)
outs = []
out = self.detect1(feats[-1])
outs.append(out)
for i, x in enumerate(reversed(feats[:-1])):
conv = getattr(self, f'conv{i+1}')
tmp = conv(out)
# Cat with low-lvl feats
tmp = F.interpolate(tmp, scale_factor=2)
tmp = torch.cat((tmp, x), 1)
detect = getattr(self, f'detect{i+2}')
out = detect(tmp)
outs.append(out)
return tuple(outs)
| 5,835 | 38.972603 | 77 | py |
ERD | ERD-main/mmdet/models/necks/channel_mapper.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType, OptMultiConfig
@MODELS.register_module()
class ChannelMapper(BaseModule):
"""Channel Mapper to reduce/increase channels of backbone features.
This is used to reduce/increase channels of backbone features.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale).
kernel_size (int, optional): kernel_size for reducing channels (used
at each scale). Default: 3.
conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
convolution layer. Default: None.
norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
normalization layer. Default: None.
act_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
activation layer in ConvModule. Default: dict(type='ReLU').
num_outs (int, optional): Number of output feature maps. There would
be extra_convs when num_outs larger than the length of in_channels.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or dict],
optional): Initialization config dict.
Example:
>>> import torch
>>> in_channels = [2, 3, 5, 7]
>>> scales = [340, 170, 84, 43]
>>> inputs = [torch.rand(1, c, s, s)
... for c, s in zip(in_channels, scales)]
>>> self = ChannelMapper(in_channels, 11, 3).eval()
>>> outputs = self.forward(inputs)
>>> for i in range(len(outputs)):
... print(f'outputs[{i}].shape = {outputs[i].shape}')
outputs[0].shape = torch.Size([1, 11, 340, 340])
outputs[1].shape = torch.Size([1, 11, 170, 170])
outputs[2].shape = torch.Size([1, 11, 84, 84])
outputs[3].shape = torch.Size([1, 11, 43, 43])
"""
def __init__(
self,
in_channels: List[int],
out_channels: int,
kernel_size: int = 3,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
act_cfg: OptConfigType = dict(type='ReLU'),
num_outs: int = None,
init_cfg: OptMultiConfig = dict(
type='Xavier', layer='Conv2d', distribution='uniform')
) -> None:
super().__init__(init_cfg=init_cfg)
assert isinstance(in_channels, list)
self.extra_convs = None
if num_outs is None:
num_outs = len(in_channels)
self.convs = nn.ModuleList()
for in_channel in in_channels:
self.convs.append(
ConvModule(
in_channel,
out_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
if num_outs > len(in_channels):
self.extra_convs = nn.ModuleList()
for i in range(len(in_channels), num_outs):
if i == len(in_channels):
in_channel = in_channels[-1]
else:
in_channel = out_channels
self.extra_convs.append(
ConvModule(
in_channel,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def forward(self, inputs: Tuple[Tensor]) -> Tuple[Tensor]:
"""Forward function."""
assert len(inputs) == len(self.convs)
outs = [self.convs[i](inputs[i]) for i in range(len(inputs))]
if self.extra_convs:
for i in range(len(self.extra_convs)):
if i == 0:
outs.append(self.extra_convs[0](inputs[-1]))
else:
outs.append(self.extra_convs[i](outs[-1]))
return tuple(outs)
| 4,262 | 38.841121 | 79 | py |
ERD | ERD-main/mmdet/models/necks/hrfpn.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch.utils.checkpoint import checkpoint
from mmdet.registry import MODELS
@MODELS.register_module()
class HRFPN(BaseModule):
"""HRFPN (High Resolution Feature Pyramids)
paper: `High-Resolution Representations for Labeling Pixels and Regions
<https://arxiv.org/abs/1904.04514>`_.
Args:
in_channels (list): number of channels for each branch.
out_channels (int): output channels of feature pyramids.
num_outs (int): number of output stages.
pooling_type (str): pooling for generating feature pyramids
from {MAX, AVG}.
conv_cfg (dict): dictionary to construct and config conv layer.
norm_cfg (dict): dictionary to construct and config norm layer.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
stride (int): stride of 3x3 convolutional layers
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
num_outs=5,
pooling_type='AVG',
conv_cfg=None,
norm_cfg=None,
with_cp=False,
stride=1,
init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')):
super(HRFPN, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.reduction_conv = ConvModule(
sum(in_channels),
out_channels,
kernel_size=1,
conv_cfg=self.conv_cfg,
act_cfg=None)
self.fpn_convs = nn.ModuleList()
for i in range(self.num_outs):
self.fpn_convs.append(
ConvModule(
out_channels,
out_channels,
kernel_size=3,
padding=1,
stride=stride,
conv_cfg=self.conv_cfg,
act_cfg=None))
if pooling_type == 'MAX':
self.pooling = F.max_pool2d
else:
self.pooling = F.avg_pool2d
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == self.num_ins
outs = [inputs[0]]
for i in range(1, self.num_ins):
outs.append(
F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear'))
out = torch.cat(outs, dim=1)
if out.requires_grad and self.with_cp:
out = checkpoint(self.reduction_conv, out)
else:
out = self.reduction_conv(out)
outs = [out]
for i in range(1, self.num_outs):
outs.append(self.pooling(out, kernel_size=2**i, stride=2**i))
outputs = []
for i in range(self.num_outs):
if outs[i].requires_grad and self.with_cp:
tmp_out = checkpoint(self.fpn_convs[i], outs[i])
else:
tmp_out = self.fpn_convs[i](outs[i])
outputs.append(tmp_out)
return tuple(outputs)
| 3,519 | 33.851485 | 79 | py |
ERD | ERD-main/mmdet/models/layers/csp_layer.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .se_layer import ChannelAttention
class DarknetBottleneck(BaseModule):
"""The basic bottleneck block used in Darknet.
Each ResBlock consists of two ConvModules and the input is added to the
final output. Each ConvModule is composed of Conv, BN, and LeakyReLU.
The first convLayer has filter size of 1x1 and the second one has the
filter size of 3x3.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
expansion (float): The kernel size of the convolution.
Defaults to 0.5.
add_identity (bool): Whether to add identity to the out.
Defaults to True.
use_depthwise (bool): Whether to use depthwise separable convolution.
Defaults to False.
conv_cfg (dict): Config dict for convolution layer. Defaults to None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='Swish').
"""
def __init__(self,
in_channels: int,
out_channels: int,
expansion: float = 0.5,
add_identity: bool = True,
use_depthwise: bool = False,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='Swish'),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
hidden_channels = int(out_channels * expansion)
conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule
self.conv1 = ConvModule(
in_channels,
hidden_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.conv2 = conv(
hidden_channels,
out_channels,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.add_identity = \
add_identity and in_channels == out_channels
def forward(self, x: Tensor) -> Tensor:
"""Forward function."""
identity = x
out = self.conv1(x)
out = self.conv2(out)
if self.add_identity:
return out + identity
else:
return out
class CSPNeXtBlock(BaseModule):
"""The basic bottleneck block used in CSPNeXt.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
expansion (float): Expand ratio of the hidden channel. Defaults to 0.5.
add_identity (bool): Whether to add identity to the out. Only works
when in_channels == out_channels. Defaults to True.
use_depthwise (bool): Whether to use depthwise separable convolution.
Defaults to False.
kernel_size (int): The kernel size of the second convolution layer.
Defaults to 5.
conv_cfg (dict): Config dict for convolution layer. Defaults to None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='SiLU').
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
in_channels: int,
out_channels: int,
expansion: float = 0.5,
add_identity: bool = True,
use_depthwise: bool = False,
kernel_size: int = 5,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='SiLU'),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
hidden_channels = int(out_channels * expansion)
conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule
self.conv1 = conv(
in_channels,
hidden_channels,
3,
stride=1,
padding=1,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.conv2 = DepthwiseSeparableConvModule(
hidden_channels,
out_channels,
kernel_size,
stride=1,
padding=kernel_size // 2,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.add_identity = \
add_identity and in_channels == out_channels
def forward(self, x: Tensor) -> Tensor:
"""Forward function."""
identity = x
out = self.conv1(x)
out = self.conv2(out)
if self.add_identity:
return out + identity
else:
return out
class CSPLayer(BaseModule):
"""Cross Stage Partial Layer.
Args:
in_channels (int): The input channels of the CSP layer.
out_channels (int): The output channels of the CSP layer.
expand_ratio (float): Ratio to adjust the number of channels of the
hidden layer. Defaults to 0.5.
num_blocks (int): Number of blocks. Defaults to 1.
add_identity (bool): Whether to add identity in blocks.
Defaults to True.
use_cspnext_block (bool): Whether to use CSPNeXt block.
Defaults to False.
use_depthwise (bool): Whether to use depthwise separable convolution in
blocks. Defaults to False.
channel_attention (bool): Whether to add channel attention in each
stage. Defaults to True.
conv_cfg (dict, optional): Config dict for convolution layer.
Defaults to None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN')
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='Swish')
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
in_channels: int,
out_channels: int,
expand_ratio: float = 0.5,
num_blocks: int = 1,
add_identity: bool = True,
use_depthwise: bool = False,
use_cspnext_block: bool = False,
channel_attention: bool = False,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='Swish'),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
block = CSPNeXtBlock if use_cspnext_block else DarknetBottleneck
mid_channels = int(out_channels * expand_ratio)
self.channel_attention = channel_attention
self.main_conv = ConvModule(
in_channels,
mid_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.short_conv = ConvModule(
in_channels,
mid_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.final_conv = ConvModule(
2 * mid_channels,
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.blocks = nn.Sequential(*[
block(
mid_channels,
mid_channels,
1.0,
add_identity,
use_depthwise,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg) for _ in range(num_blocks)
])
if channel_attention:
self.attention = ChannelAttention(2 * mid_channels)
def forward(self, x: Tensor) -> Tensor:
"""Forward function."""
x_short = self.short_conv(x)
x_main = self.main_conv(x)
x_main = self.blocks(x_main)
x_final = torch.cat((x_main, x_short), dim=1)
if self.channel_attention:
x_final = self.attention(x_final)
return self.final_conv(x_final)
| 9,136 | 35.991903 | 79 | py |
ERD | ERD-main/mmdet/models/layers/msdeformattn_pixel_decoder.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import Conv2d, ConvModule
from mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention
from mmengine.model import (BaseModule, ModuleList, caffe2_xavier_init,
normal_init, xavier_init)
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptMultiConfig
from ..task_modules.prior_generators import MlvlPointGenerator
from .positional_encoding import SinePositionalEncoding
from .transformer import Mask2FormerTransformerEncoder
@MODELS.register_module()
class MSDeformAttnPixelDecoder(BaseModule):
"""Pixel decoder with multi-scale deformable attention.
Args:
in_channels (list[int] | tuple[int]): Number of channels in the
input feature maps.
strides (list[int] | tuple[int]): Output strides of feature from
backbone.
feat_channels (int): Number of channels for feature.
out_channels (int): Number of channels for output.
num_outs (int): Number of output scales.
norm_cfg (:obj:`ConfigDict` or dict): Config for normalization.
Defaults to dict(type='GN', num_groups=32).
act_cfg (:obj:`ConfigDict` or dict): Config for activation.
Defaults to dict(type='ReLU').
encoder (:obj:`ConfigDict` or dict): Config for transformer
encoder. Defaults to None.
positional_encoding (:obj:`ConfigDict` or dict): Config for
transformer encoder position encoding. Defaults to
dict(num_feats=128, normalize=True).
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict], optional): Initialization config dict. Defaults to None.
"""
def __init__(self,
in_channels: Union[List[int],
Tuple[int]] = [256, 512, 1024, 2048],
strides: Union[List[int], Tuple[int]] = [4, 8, 16, 32],
feat_channels: int = 256,
out_channels: int = 256,
num_outs: int = 3,
norm_cfg: ConfigType = dict(type='GN', num_groups=32),
act_cfg: ConfigType = dict(type='ReLU'),
encoder: ConfigType = None,
positional_encoding: ConfigType = dict(
num_feats=128, normalize=True),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.strides = strides
self.num_input_levels = len(in_channels)
self.num_encoder_levels = \
encoder.layer_cfg.self_attn_cfg.num_levels
assert self.num_encoder_levels >= 1, \
'num_levels in attn_cfgs must be at least one'
input_conv_list = []
# from top to down (low to high resolution)
for i in range(self.num_input_levels - 1,
self.num_input_levels - self.num_encoder_levels - 1,
-1):
input_conv = ConvModule(
in_channels[i],
feat_channels,
kernel_size=1,
norm_cfg=norm_cfg,
act_cfg=None,
bias=True)
input_conv_list.append(input_conv)
self.input_convs = ModuleList(input_conv_list)
self.encoder = Mask2FormerTransformerEncoder(**encoder)
self.postional_encoding = SinePositionalEncoding(**positional_encoding)
# high resolution to low resolution
self.level_encoding = nn.Embedding(self.num_encoder_levels,
feat_channels)
# fpn-like structure
self.lateral_convs = ModuleList()
self.output_convs = ModuleList()
self.use_bias = norm_cfg is None
# from top to down (low to high resolution)
# fpn for the rest features that didn't pass in encoder
for i in range(self.num_input_levels - self.num_encoder_levels - 1, -1,
-1):
lateral_conv = ConvModule(
in_channels[i],
feat_channels,
kernel_size=1,
bias=self.use_bias,
norm_cfg=norm_cfg,
act_cfg=None)
output_conv = ConvModule(
feat_channels,
feat_channels,
kernel_size=3,
stride=1,
padding=1,
bias=self.use_bias,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.lateral_convs.append(lateral_conv)
self.output_convs.append(output_conv)
self.mask_feature = Conv2d(
feat_channels, out_channels, kernel_size=1, stride=1, padding=0)
self.num_outs = num_outs
self.point_generator = MlvlPointGenerator(strides)
def init_weights(self) -> None:
"""Initialize weights."""
for i in range(0, self.num_encoder_levels):
xavier_init(
self.input_convs[i].conv,
gain=1,
bias=0,
distribution='uniform')
for i in range(0, self.num_input_levels - self.num_encoder_levels):
caffe2_xavier_init(self.lateral_convs[i].conv, bias=0)
caffe2_xavier_init(self.output_convs[i].conv, bias=0)
caffe2_xavier_init(self.mask_feature, bias=0)
normal_init(self.level_encoding, mean=0, std=1)
for p in self.encoder.parameters():
if p.dim() > 1:
nn.init.xavier_normal_(p)
# init_weights defined in MultiScaleDeformableAttention
for m in self.encoder.layers.modules():
if isinstance(m, MultiScaleDeformableAttention):
m.init_weights()
def forward(self, feats: List[Tensor]) -> Tuple[Tensor, Tensor]:
"""
Args:
feats (list[Tensor]): Feature maps of each level. Each has
shape of (batch_size, c, h, w).
Returns:
tuple: A tuple containing the following:
- mask_feature (Tensor): shape (batch_size, c, h, w).
- multi_scale_features (list[Tensor]): Multi scale \
features, each in shape (batch_size, c, h, w).
"""
# generate padding mask for each level, for each image
batch_size = feats[0].shape[0]
encoder_input_list = []
padding_mask_list = []
level_positional_encoding_list = []
spatial_shapes = []
reference_points_list = []
for i in range(self.num_encoder_levels):
level_idx = self.num_input_levels - i - 1
feat = feats[level_idx]
feat_projected = self.input_convs[i](feat)
h, w = feat.shape[-2:]
# no padding
padding_mask_resized = feat.new_zeros(
(batch_size, ) + feat.shape[-2:], dtype=torch.bool)
pos_embed = self.postional_encoding(padding_mask_resized)
level_embed = self.level_encoding.weight[i]
level_pos_embed = level_embed.view(1, -1, 1, 1) + pos_embed
# (h_i * w_i, 2)
reference_points = self.point_generator.single_level_grid_priors(
feat.shape[-2:], level_idx, device=feat.device)
# normalize
factor = feat.new_tensor([[w, h]]) * self.strides[level_idx]
reference_points = reference_points / factor
# shape (batch_size, c, h_i, w_i) -> (h_i * w_i, batch_size, c)
feat_projected = feat_projected.flatten(2).permute(0, 2, 1)
level_pos_embed = level_pos_embed.flatten(2).permute(0, 2, 1)
padding_mask_resized = padding_mask_resized.flatten(1)
encoder_input_list.append(feat_projected)
padding_mask_list.append(padding_mask_resized)
level_positional_encoding_list.append(level_pos_embed)
spatial_shapes.append(feat.shape[-2:])
reference_points_list.append(reference_points)
# shape (batch_size, total_num_queries),
# total_num_queries=sum([., h_i * w_i,.])
padding_masks = torch.cat(padding_mask_list, dim=1)
# shape (total_num_queries, batch_size, c)
encoder_inputs = torch.cat(encoder_input_list, dim=1)
level_positional_encodings = torch.cat(
level_positional_encoding_list, dim=1)
device = encoder_inputs.device
# shape (num_encoder_levels, 2), from low
# resolution to high resolution
spatial_shapes = torch.as_tensor(
spatial_shapes, dtype=torch.long, device=device)
# shape (0, h_0*w_0, h_0*w_0+h_1*w_1, ...)
level_start_index = torch.cat((spatial_shapes.new_zeros(
(1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))
reference_points = torch.cat(reference_points_list, dim=0)
reference_points = reference_points[None, :, None].repeat(
batch_size, 1, self.num_encoder_levels, 1)
valid_radios = reference_points.new_ones(
(batch_size, self.num_encoder_levels, 2))
# shape (num_total_queries, batch_size, c)
memory = self.encoder(
query=encoder_inputs,
query_pos=level_positional_encodings,
key_padding_mask=padding_masks,
spatial_shapes=spatial_shapes,
reference_points=reference_points,
level_start_index=level_start_index,
valid_ratios=valid_radios)
# (batch_size, c, num_total_queries)
memory = memory.permute(0, 2, 1)
# from low resolution to high resolution
num_queries_per_level = [e[0] * e[1] for e in spatial_shapes]
outs = torch.split(memory, num_queries_per_level, dim=-1)
outs = [
x.reshape(batch_size, -1, spatial_shapes[i][0],
spatial_shapes[i][1]) for i, x in enumerate(outs)
]
for i in range(self.num_input_levels - self.num_encoder_levels - 1, -1,
-1):
x = feats[i]
cur_feat = self.lateral_convs[i](x)
y = cur_feat + F.interpolate(
outs[-1],
size=cur_feat.shape[-2:],
mode='bilinear',
align_corners=False)
y = self.output_convs[i](y)
outs.append(y)
multi_scale_features = outs[:self.num_outs]
mask_feature = self.mask_feature(outs[-1])
return mask_feature, multi_scale_features
| 10,613 | 41.798387 | 79 | py |
ERD | ERD-main/mmdet/models/layers/dropblock.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.registry import MODELS
eps = 1e-6
@MODELS.register_module()
class DropBlock(nn.Module):
"""Randomly drop some regions of feature maps.
Please refer to the method proposed in `DropBlock
<https://arxiv.org/abs/1810.12890>`_ for details.
Args:
drop_prob (float): The probability of dropping each block.
block_size (int): The size of dropped blocks.
warmup_iters (int): The drop probability will linearly increase
from `0` to `drop_prob` during the first `warmup_iters` iterations.
Default: 2000.
"""
def __init__(self, drop_prob, block_size, warmup_iters=2000, **kwargs):
super(DropBlock, self).__init__()
assert block_size % 2 == 1
assert 0 < drop_prob <= 1
assert warmup_iters >= 0
self.drop_prob = drop_prob
self.block_size = block_size
self.warmup_iters = warmup_iters
self.iter_cnt = 0
def forward(self, x):
"""
Args:
x (Tensor): Input feature map on which some areas will be randomly
dropped.
Returns:
Tensor: The tensor after DropBlock layer.
"""
if not self.training:
return x
self.iter_cnt += 1
N, C, H, W = list(x.shape)
gamma = self._compute_gamma((H, W))
mask_shape = (N, C, H - self.block_size + 1, W - self.block_size + 1)
mask = torch.bernoulli(torch.full(mask_shape, gamma, device=x.device))
mask = F.pad(mask, [self.block_size // 2] * 4, value=0)
mask = F.max_pool2d(
input=mask,
stride=(1, 1),
kernel_size=(self.block_size, self.block_size),
padding=self.block_size // 2)
mask = 1 - mask
x = x * mask * mask.numel() / (eps + mask.sum())
return x
def _compute_gamma(self, feat_size):
"""Compute the value of gamma according to paper. gamma is the
parameter of bernoulli distribution, which controls the number of
features to drop.
gamma = (drop_prob * fm_area) / (drop_area * keep_area)
Args:
feat_size (tuple[int, int]): The height and width of feature map.
Returns:
float: The value of gamma.
"""
gamma = (self.drop_prob * feat_size[0] * feat_size[1])
gamma /= ((feat_size[0] - self.block_size + 1) *
(feat_size[1] - self.block_size + 1))
gamma /= (self.block_size**2)
factor = (1.0 if self.iter_cnt > self.warmup_iters else self.iter_cnt /
self.warmup_iters)
return gamma * factor
def extra_repr(self):
return (f'drop_prob={self.drop_prob}, block_size={self.block_size}, '
f'warmup_iters={self.warmup_iters}')
| 2,918 | 32.551724 | 79 | py |
ERD | ERD-main/mmdet/models/layers/se_layer.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from mmengine.utils import digit_version, is_tuple_of
from torch import Tensor
from mmdet.utils import MultiConfig, OptConfigType, OptMultiConfig
class SELayer(BaseModule):
"""Squeeze-and-Excitation Module.
Args:
channels (int): The input (and output) channels of the SE layer.
ratio (int): Squeeze ratio in SELayer, the intermediate channel will be
``int(channels/ratio)``. Defaults to 16.
conv_cfg (None or dict): Config dict for convolution layer.
Defaults to None, which means using conv2d.
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
If act_cfg is a dict, two activation layers will be configurated
by this dict. If act_cfg is a sequence of dicts, the first
activation layer will be configurated by the first dict and the
second activation layer will be configurated by the second dict.
Defaults to (dict(type='ReLU'), dict(type='Sigmoid'))
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None
"""
def __init__(self,
channels: int,
ratio: int = 16,
conv_cfg: OptConfigType = None,
act_cfg: MultiConfig = (dict(type='ReLU'),
dict(type='Sigmoid')),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
if isinstance(act_cfg, dict):
act_cfg = (act_cfg, act_cfg)
assert len(act_cfg) == 2
assert is_tuple_of(act_cfg, dict)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.conv1 = ConvModule(
in_channels=channels,
out_channels=int(channels / ratio),
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[0])
self.conv2 = ConvModule(
in_channels=int(channels / ratio),
out_channels=channels,
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[1])
def forward(self, x: Tensor) -> Tensor:
"""Forward function for SELayer."""
out = self.global_avgpool(x)
out = self.conv1(out)
out = self.conv2(out)
return x * out
class DyReLU(BaseModule):
"""Dynamic ReLU (DyReLU) module.
See `Dynamic ReLU <https://arxiv.org/abs/2003.10027>`_ for details.
Current implementation is specialized for task-aware attention in DyHead.
HSigmoid arguments in default act_cfg follow DyHead official code.
https://github.com/microsoft/DynamicHead/blob/master/dyhead/dyrelu.py
Args:
channels (int): The input (and output) channels of DyReLU module.
ratio (int): Squeeze ratio in Squeeze-and-Excitation-like module,
the intermediate channel will be ``int(channels/ratio)``.
Defaults to 4.
conv_cfg (None or dict): Config dict for convolution layer.
Defaults to None, which means using conv2d.
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
If act_cfg is a dict, two activation layers will be configurated
by this dict. If act_cfg is a sequence of dicts, the first
activation layer will be configurated by the first dict and the
second activation layer will be configurated by the second dict.
Defaults to (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0,
divisor=6.0))
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None
"""
def __init__(self,
channels: int,
ratio: int = 4,
conv_cfg: OptConfigType = None,
act_cfg: MultiConfig = (dict(type='ReLU'),
dict(
type='HSigmoid',
bias=3.0,
divisor=6.0)),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
if isinstance(act_cfg, dict):
act_cfg = (act_cfg, act_cfg)
assert len(act_cfg) == 2
assert is_tuple_of(act_cfg, dict)
self.channels = channels
self.expansion = 4 # for a1, b1, a2, b2
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.conv1 = ConvModule(
in_channels=channels,
out_channels=int(channels / ratio),
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[0])
self.conv2 = ConvModule(
in_channels=int(channels / ratio),
out_channels=channels * self.expansion,
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[1])
def forward(self, x: Tensor) -> Tensor:
"""Forward function."""
coeffs = self.global_avgpool(x)
coeffs = self.conv1(coeffs)
coeffs = self.conv2(coeffs) - 0.5 # value range: [-0.5, 0.5]
a1, b1, a2, b2 = torch.split(coeffs, self.channels, dim=1)
a1 = a1 * 2.0 + 1.0 # [-1.0, 1.0] + 1.0
a2 = a2 * 2.0 # [-1.0, 1.0]
out = torch.max(x * a1 + b1, x * a2 + b2)
return out
class ChannelAttention(BaseModule):
"""Channel attention Module.
Args:
channels (int): The input (and output) channels of the attention layer.
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None
"""
def __init__(self, channels: int, init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Conv2d(channels, channels, 1, 1, 0, bias=True)
if digit_version(torch.__version__) < (1, 7, 0):
self.act = nn.Hardsigmoid()
else:
self.act = nn.Hardsigmoid(inplace=True)
def forward(self, x: Tensor) -> Tensor:
"""Forward function for ChannelAttention."""
with torch.cuda.amp.autocast(enabled=False):
out = self.global_avgpool(x)
out = self.fc(out)
out = self.act(out)
return x * out
| 6,523 | 39.02454 | 79 | py |
ERD | ERD-main/mmdet/models/layers/normed_predictor.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from mmdet.registry import MODELS
MODELS.register_module('Linear', module=nn.Linear)
@MODELS.register_module(name='NormedLinear')
class NormedLinear(nn.Linear):
"""Normalized Linear Layer.
Args:
tempeature (float, optional): Tempeature term. Defaults to 20.
power (int, optional): Power term. Defaults to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Defaults to 1e-6.
"""
def __init__(self,
*args,
tempearture: float = 20,
power: int = 1.0,
eps: float = 1e-6,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.eps = eps
self.init_weights()
def init_weights(self) -> None:
"""Initialize the weights."""
nn.init.normal_(self.weight, mean=0, std=0.01)
if self.bias is not None:
nn.init.constant_(self.bias, 0)
def forward(self, x: Tensor) -> Tensor:
"""Forward function for `NormedLinear`."""
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
return F.linear(x_, weight_, self.bias)
@MODELS.register_module(name='NormedConv2d')
class NormedConv2d(nn.Conv2d):
"""Normalized Conv2d Layer.
Args:
tempeature (float, optional): Tempeature term. Defaults to 20.
power (int, optional): Power term. Defaults to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Defaults to 1e-6.
norm_over_kernel (bool, optional): Normalize over kernel.
Defaults to False.
"""
def __init__(self,
*args,
tempearture: float = 20,
power: int = 1.0,
eps: float = 1e-6,
norm_over_kernel: bool = False,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.norm_over_kernel = norm_over_kernel
self.eps = eps
def forward(self, x: Tensor) -> Tensor:
"""Forward function for `NormedConv2d`."""
if not self.norm_over_kernel:
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) +
self.eps)
else:
weight_ = self.weight / (
self.weight.view(self.weight.size(0), -1).norm(
dim=1, keepdim=True).pow(self.power)[..., None, None] +
self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
if hasattr(self, 'conv2d_forward'):
x_ = self.conv2d_forward(x_, weight_)
else:
if torch.__version__ >= '1.8':
x_ = self._conv_forward(x_, weight_, self.bias)
else:
x_ = self._conv_forward(x_, weight_)
return x_
| 3,343 | 32.777778 | 77 | py |
ERD | ERD-main/mmdet/models/layers/pixel_decoder.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import Conv2d, ConvModule
from mmengine.model import BaseModule, ModuleList, caffe2_xavier_init
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptMultiConfig
from .positional_encoding import SinePositionalEncoding
from .transformer import DetrTransformerEncoder
@MODELS.register_module()
class PixelDecoder(BaseModule):
"""Pixel decoder with a structure like fpn.
Args:
in_channels (list[int] | tuple[int]): Number of channels in the
input feature maps.
feat_channels (int): Number channels for feature.
out_channels (int): Number channels for output.
norm_cfg (:obj:`ConfigDict` or dict): Config for normalization.
Defaults to dict(type='GN', num_groups=32).
act_cfg (:obj:`ConfigDict` or dict): Config for activation.
Defaults to dict(type='ReLU').
encoder (:obj:`ConfigDict` or dict): Config for transorformer
encoder.Defaults to None.
positional_encoding (:obj:`ConfigDict` or dict): Config for
transformer encoder position encoding. Defaults to
dict(type='SinePositionalEncoding', num_feats=128,
normalize=True).
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict], optional): Initialization config dict. Defaults to None.
"""
def __init__(self,
in_channels: Union[List[int], Tuple[int]],
feat_channels: int,
out_channels: int,
norm_cfg: ConfigType = dict(type='GN', num_groups=32),
act_cfg: ConfigType = dict(type='ReLU'),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.in_channels = in_channels
self.num_inputs = len(in_channels)
self.lateral_convs = ModuleList()
self.output_convs = ModuleList()
self.use_bias = norm_cfg is None
for i in range(0, self.num_inputs - 1):
lateral_conv = ConvModule(
in_channels[i],
feat_channels,
kernel_size=1,
bias=self.use_bias,
norm_cfg=norm_cfg,
act_cfg=None)
output_conv = ConvModule(
feat_channels,
feat_channels,
kernel_size=3,
stride=1,
padding=1,
bias=self.use_bias,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.lateral_convs.append(lateral_conv)
self.output_convs.append(output_conv)
self.last_feat_conv = ConvModule(
in_channels[-1],
feat_channels,
kernel_size=3,
padding=1,
stride=1,
bias=self.use_bias,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.mask_feature = Conv2d(
feat_channels, out_channels, kernel_size=3, stride=1, padding=1)
def init_weights(self) -> None:
"""Initialize weights."""
for i in range(0, self.num_inputs - 2):
caffe2_xavier_init(self.lateral_convs[i].conv, bias=0)
caffe2_xavier_init(self.output_convs[i].conv, bias=0)
caffe2_xavier_init(self.mask_feature, bias=0)
caffe2_xavier_init(self.last_feat_conv, bias=0)
def forward(self, feats: List[Tensor],
batch_img_metas: List[dict]) -> Tuple[Tensor, Tensor]:
"""
Args:
feats (list[Tensor]): Feature maps of each level. Each has
shape of (batch_size, c, h, w).
batch_img_metas (list[dict]): List of image information.
Pass in for creating more accurate padding mask. Not
used here.
Returns:
tuple[Tensor, Tensor]: a tuple containing the following:
- mask_feature (Tensor): Shape (batch_size, c, h, w).
- memory (Tensor): Output of last stage of backbone.\
Shape (batch_size, c, h, w).
"""
y = self.last_feat_conv(feats[-1])
for i in range(self.num_inputs - 2, -1, -1):
x = feats[i]
cur_feat = self.lateral_convs[i](x)
y = cur_feat + \
F.interpolate(y, size=cur_feat.shape[-2:], mode='nearest')
y = self.output_convs[i](y)
mask_feature = self.mask_feature(y)
memory = feats[-1]
return mask_feature, memory
@MODELS.register_module()
class TransformerEncoderPixelDecoder(PixelDecoder):
"""Pixel decoder with transormer encoder inside.
Args:
in_channels (list[int] | tuple[int]): Number of channels in the
input feature maps.
feat_channels (int): Number channels for feature.
out_channels (int): Number channels for output.
norm_cfg (:obj:`ConfigDict` or dict): Config for normalization.
Defaults to dict(type='GN', num_groups=32).
act_cfg (:obj:`ConfigDict` or dict): Config for activation.
Defaults to dict(type='ReLU').
encoder (:obj:`ConfigDict` or dict): Config for transformer encoder.
Defaults to None.
positional_encoding (:obj:`ConfigDict` or dict): Config for
transformer encoder position encoding. Defaults to
dict(num_feats=128, normalize=True).
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict], optional): Initialization config dict. Defaults to None.
"""
def __init__(self,
in_channels: Union[List[int], Tuple[int]],
feat_channels: int,
out_channels: int,
norm_cfg: ConfigType = dict(type='GN', num_groups=32),
act_cfg: ConfigType = dict(type='ReLU'),
encoder: ConfigType = None,
positional_encoding: ConfigType = dict(
num_feats=128, normalize=True),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
in_channels=in_channels,
feat_channels=feat_channels,
out_channels=out_channels,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
init_cfg=init_cfg)
self.last_feat_conv = None
self.encoder = DetrTransformerEncoder(**encoder)
self.encoder_embed_dims = self.encoder.embed_dims
assert self.encoder_embed_dims == feat_channels, 'embed_dims({}) of ' \
'tranformer encoder must equal to feat_channels({})'.format(
feat_channels, self.encoder_embed_dims)
self.positional_encoding = SinePositionalEncoding(
**positional_encoding)
self.encoder_in_proj = Conv2d(
in_channels[-1], feat_channels, kernel_size=1)
self.encoder_out_proj = ConvModule(
feat_channels,
feat_channels,
kernel_size=3,
stride=1,
padding=1,
bias=self.use_bias,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
def init_weights(self) -> None:
"""Initialize weights."""
for i in range(0, self.num_inputs - 2):
caffe2_xavier_init(self.lateral_convs[i].conv, bias=0)
caffe2_xavier_init(self.output_convs[i].conv, bias=0)
caffe2_xavier_init(self.mask_feature, bias=0)
caffe2_xavier_init(self.encoder_in_proj, bias=0)
caffe2_xavier_init(self.encoder_out_proj.conv, bias=0)
for p in self.encoder.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, feats: List[Tensor],
batch_img_metas: List[dict]) -> Tuple[Tensor, Tensor]:
"""
Args:
feats (list[Tensor]): Feature maps of each level. Each has
shape of (batch_size, c, h, w).
batch_img_metas (list[dict]): List of image information. Pass in
for creating more accurate padding mask.
Returns:
tuple: a tuple containing the following:
- mask_feature (Tensor): shape (batch_size, c, h, w).
- memory (Tensor): shape (batch_size, c, h, w).
"""
feat_last = feats[-1]
bs, c, h, w = feat_last.shape
input_img_h, input_img_w = batch_img_metas[0]['batch_input_shape']
padding_mask = feat_last.new_ones((bs, input_img_h, input_img_w),
dtype=torch.float32)
for i in range(bs):
img_h, img_w = batch_img_metas[i]['img_shape']
padding_mask[i, :img_h, :img_w] = 0
padding_mask = F.interpolate(
padding_mask.unsqueeze(1),
size=feat_last.shape[-2:],
mode='nearest').to(torch.bool).squeeze(1)
pos_embed = self.positional_encoding(padding_mask)
feat_last = self.encoder_in_proj(feat_last)
# (batch_size, c, h, w) -> (batch_size, num_queries, c)
feat_last = feat_last.flatten(2).permute(0, 2, 1)
pos_embed = pos_embed.flatten(2).permute(0, 2, 1)
# (batch_size, h, w) -> (batch_size, h*w)
padding_mask = padding_mask.flatten(1)
memory = self.encoder(
query=feat_last,
query_pos=pos_embed,
key_padding_mask=padding_mask)
# (batch_size, num_queries, c) -> (batch_size, c, h, w)
memory = memory.permute(0, 2, 1).view(bs, self.encoder_embed_dims, h,
w)
y = self.encoder_out_proj(memory)
for i in range(self.num_inputs - 2, -1, -1):
x = feats[i]
cur_feat = self.lateral_convs[i](x)
y = cur_feat + \
F.interpolate(y, size=cur_feat.shape[-2:], mode='nearest')
y = self.output_convs[i](y)
mask_feature = self.mask_feature(y)
return mask_feature, memory
| 10,136 | 39.548 | 79 | py |
ERD | ERD-main/mmdet/models/layers/conv_upsample.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule, ModuleList
class ConvUpsample(BaseModule):
"""ConvUpsample performs 2x upsampling after Conv.
There are several `ConvModule` layers. In the first few layers, upsampling
will be applied after each layer of convolution. The number of upsampling
must be no more than the number of ConvModule layers.
Args:
in_channels (int): Number of channels in the input feature map.
inner_channels (int): Number of channels produced by the convolution.
num_layers (int): Number of convolution layers.
num_upsample (int | optional): Number of upsampling layer. Must be no
more than num_layers. Upsampling will be applied after the first
``num_upsample`` layers of convolution. Default: ``num_layers``.
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer. Default: None.
init_cfg (dict): Config dict for initialization. Default: None.
kwargs (key word augments): Other augments used in ConvModule.
"""
def __init__(self,
in_channels,
inner_channels,
num_layers=1,
num_upsample=None,
conv_cfg=None,
norm_cfg=None,
init_cfg=None,
**kwargs):
super(ConvUpsample, self).__init__(init_cfg)
if num_upsample is None:
num_upsample = num_layers
assert num_upsample <= num_layers, \
f'num_upsample({num_upsample})must be no more than ' \
f'num_layers({num_layers})'
self.num_layers = num_layers
self.num_upsample = num_upsample
self.conv = ModuleList()
for i in range(num_layers):
self.conv.append(
ConvModule(
in_channels,
inner_channels,
3,
padding=1,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
in_channels = inner_channels
def forward(self, x):
num_upsample = self.num_upsample
for i in range(self.num_layers):
x = self.conv[i](x)
if num_upsample > 0:
num_upsample -= 1
x = F.interpolate(
x, scale_factor=2, mode='bilinear', align_corners=False)
return x
| 2,656 | 38.073529 | 78 | py |
ERD | ERD-main/mmdet/models/layers/bbox_nms.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Tuple, Union
import torch
from mmcv.ops.nms import batched_nms
from torch import Tensor
from mmdet.structures.bbox import bbox_overlaps
from mmdet.utils import ConfigType
def multiclass_nms(
multi_bboxes: Tensor,
multi_scores: Tensor,
score_thr: float,
nms_cfg: ConfigType,
max_num: int = -1,
score_factors: Optional[Tensor] = None,
return_inds: bool = False,
box_dim: int = 4
) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[Tensor, Tensor]]:
"""NMS for multi-class bboxes.
Args:
multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
multi_scores (Tensor): shape (n, #class), where the last column
contains scores of the background class, but this will be ignored.
score_thr (float): bbox threshold, bboxes with scores lower than it
will not be considered.
nms_cfg (Union[:obj:`ConfigDict`, dict]): a dict that contains
the arguments of nms operations.
max_num (int, optional): if there are more than max_num bboxes after
NMS, only top max_num will be kept. Default to -1.
score_factors (Tensor, optional): The factors multiplied to scores
before applying NMS. Default to None.
return_inds (bool, optional): Whether return the indices of kept
bboxes. Default to False.
box_dim (int): The dimension of boxes. Defaults to 4.
Returns:
Union[Tuple[Tensor, Tensor, Tensor], Tuple[Tensor, Tensor]]:
(dets, labels, indices (optional)), tensors of shape (k, 5),
(k), and (k). Dets are boxes with scores. Labels are 0-based.
"""
num_classes = multi_scores.size(1) - 1
# exclude background category
if multi_bboxes.shape[1] > box_dim:
bboxes = multi_bboxes.view(multi_scores.size(0), -1, box_dim)
else:
bboxes = multi_bboxes[:, None].expand(
multi_scores.size(0), num_classes, box_dim)
scores = multi_scores[:, :-1]
labels = torch.arange(num_classes, dtype=torch.long, device=scores.device)
labels = labels.view(1, -1).expand_as(scores)
bboxes = bboxes.reshape(-1, box_dim)
scores = scores.reshape(-1)
labels = labels.reshape(-1)
if not torch.onnx.is_in_onnx_export():
# NonZero not supported in TensorRT
# remove low scoring boxes
valid_mask = scores > score_thr
# multiply score_factor after threshold to preserve more bboxes, improve
# mAP by 1% for YOLOv3
if score_factors is not None:
# expand the shape to match original shape of score
score_factors = score_factors.view(-1, 1).expand(
multi_scores.size(0), num_classes)
score_factors = score_factors.reshape(-1)
scores = scores * score_factors
if not torch.onnx.is_in_onnx_export():
# NonZero not supported in TensorRT
inds = valid_mask.nonzero(as_tuple=False).squeeze(1)
bboxes, scores, labels = bboxes[inds], scores[inds], labels[inds]
else:
# TensorRT NMS plugin has invalid output filled with -1
# add dummy data to make detection output correct.
bboxes = torch.cat([bboxes, bboxes.new_zeros(1, box_dim)], dim=0)
scores = torch.cat([scores, scores.new_zeros(1)], dim=0)
labels = torch.cat([labels, labels.new_zeros(1)], dim=0)
if bboxes.numel() == 0:
if torch.onnx.is_in_onnx_export():
raise RuntimeError('[ONNX Error] Can not record NMS '
'as it has not been executed this time')
dets = torch.cat([bboxes, scores[:, None]], -1)
if return_inds:
return dets, labels, inds
else:
return dets, labels
dets, keep = batched_nms(bboxes, scores, labels, nms_cfg)
if max_num > 0:
dets = dets[:max_num]
keep = keep[:max_num]
if return_inds:
return dets, labels[keep], inds[keep]
else:
return dets, labels[keep]
def fast_nms(
multi_bboxes: Tensor,
multi_scores: Tensor,
multi_coeffs: Tensor,
score_thr: float,
iou_thr: float,
top_k: int,
max_num: int = -1
) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[Tensor, Tensor]]:
"""Fast NMS in `YOLACT <https://arxiv.org/abs/1904.02689>`_.
Fast NMS allows already-removed detections to suppress other detections so
that every instance can be decided to be kept or discarded in parallel,
which is not possible in traditional NMS. This relaxation allows us to
implement Fast NMS entirely in standard GPU-accelerated matrix operations.
Args:
multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
multi_scores (Tensor): shape (n, #class+1), where the last column
contains scores of the background class, but this will be ignored.
multi_coeffs (Tensor): shape (n, #class*coeffs_dim).
score_thr (float): bbox threshold, bboxes with scores lower than it
will not be considered.
iou_thr (float): IoU threshold to be considered as conflicted.
top_k (int): if there are more than top_k bboxes before NMS,
only top top_k will be kept.
max_num (int): if there are more than max_num bboxes after NMS,
only top max_num will be kept. If -1, keep all the bboxes.
Default: -1.
Returns:
Union[Tuple[Tensor, Tensor, Tensor], Tuple[Tensor, Tensor]]:
(dets, labels, coefficients), tensors of shape (k, 5), (k, 1),
and (k, coeffs_dim). Dets are boxes with scores.
Labels are 0-based.
"""
scores = multi_scores[:, :-1].t() # [#class, n]
scores, idx = scores.sort(1, descending=True)
idx = idx[:, :top_k].contiguous()
scores = scores[:, :top_k] # [#class, topk]
num_classes, num_dets = idx.size()
boxes = multi_bboxes[idx.view(-1), :].view(num_classes, num_dets, 4)
coeffs = multi_coeffs[idx.view(-1), :].view(num_classes, num_dets, -1)
iou = bbox_overlaps(boxes, boxes) # [#class, topk, topk]
iou.triu_(diagonal=1)
iou_max, _ = iou.max(dim=1)
# Now just filter out the ones higher than the threshold
keep = iou_max <= iou_thr
# Second thresholding introduces 0.2 mAP gain at negligible time cost
keep *= scores > score_thr
# Assign each kept detection to its corresponding class
classes = torch.arange(
num_classes, device=boxes.device)[:, None].expand_as(keep)
classes = classes[keep]
boxes = boxes[keep]
coeffs = coeffs[keep]
scores = scores[keep]
# Only keep the top max_num highest scores across all classes
scores, idx = scores.sort(0, descending=True)
if max_num > 0:
idx = idx[:max_num]
scores = scores[:max_num]
classes = classes[idx]
boxes = boxes[idx]
coeffs = coeffs[idx]
cls_dets = torch.cat([boxes, scores[:, None]], dim=1)
return cls_dets, classes, coeffs
| 6,987 | 36.772973 | 78 | py |
ERD | ERD-main/mmdet/models/layers/res_layer.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmengine.model import BaseModule, Sequential
from torch import Tensor
from torch import nn as nn
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
class ResLayer(Sequential):
"""ResLayer to build ResNet style backbone.
Args:
block (nn.Module): block used to build ResLayer.
inplanes (int): inplanes of block.
planes (int): planes of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Defaults to 1
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Defaults to False
conv_cfg (dict): dictionary to construct and config conv layer.
Defaults to None
norm_cfg (dict): dictionary to construct and config norm layer.
Defaults to dict(type='BN')
downsample_first (bool): Downsample at the first block or last block.
False for Hourglass, True for ResNet. Defaults to True
"""
def __init__(self,
block: BaseModule,
inplanes: int,
planes: int,
num_blocks: int,
stride: int = 1,
avg_down: bool = False,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN'),
downsample_first: bool = True,
**kwargs) -> None:
self.block = block
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = []
conv_stride = stride
if avg_down:
conv_stride = 1
downsample.append(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False))
downsample.extend([
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=conv_stride,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1]
])
downsample = nn.Sequential(*downsample)
layers = []
if downsample_first:
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
inplanes = planes * block.expansion
for _ in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
else: # downsample_first=False is for HourglassModule
for _ in range(num_blocks - 1):
layers.append(
block(
inplanes=inplanes,
planes=inplanes,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
super().__init__(*layers)
class SimplifiedBasicBlock(BaseModule):
"""Simplified version of original basic residual block. This is used in
`SCNet <https://arxiv.org/abs/2012.10150>`_.
- Norm layer is now optional
- Last ReLU in forward function is removed
"""
expansion = 1
def __init__(self,
inplanes: int,
planes: int,
stride: int = 1,
dilation: int = 1,
downsample: Optional[Sequential] = None,
style: ConfigType = 'pytorch',
with_cp: bool = False,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN'),
dcn: OptConfigType = None,
plugins: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
assert dcn is None, 'Not implemented yet.'
assert plugins is None, 'Not implemented yet.'
assert not with_cp, 'Not implemented yet.'
self.with_norm = norm_cfg is not None
with_bias = True if norm_cfg is None else False
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=with_bias)
if self.with_norm:
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, planes, postfix=1)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg, planes, planes, 3, padding=1, bias=with_bias)
if self.with_norm:
self.norm2_name, norm2 = build_norm_layer(
norm_cfg, planes, postfix=2)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
@property
def norm1(self) -> Optional[BaseModule]:
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name) if self.with_norm else None
@property
def norm2(self) -> Optional[BaseModule]:
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm2_name) if self.with_norm else None
def forward(self, x: Tensor) -> Tensor:
"""Forward function for SimplifiedBasicBlock."""
identity = x
out = self.conv1(x)
if self.with_norm:
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
if self.with_norm:
out = self.norm2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
| 6,843 | 33.918367 | 79 | py |
ERD | ERD-main/mmdet/models/layers/brick_wrappers.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn.bricks.wrappers import NewEmptyTensorOp, obsolete_torch_version
if torch.__version__ == 'parrots':
TORCH_VERSION = torch.__version__
else:
# torch.__version__ could be 1.3.1+cu92, we only need the first two
# for comparison
TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2])
def adaptive_avg_pool2d(input, output_size):
"""Handle empty batch dimension to adaptive_avg_pool2d.
Args:
input (tensor): 4D tensor.
output_size (int, tuple[int,int]): the target output size.
"""
if input.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
if isinstance(output_size, int):
output_size = [output_size, output_size]
output_size = [*input.shape[:2], *output_size]
empty = NewEmptyTensorOp.apply(input, output_size)
return empty
else:
return F.adaptive_avg_pool2d(input, output_size)
class AdaptiveAvgPool2d(nn.AdaptiveAvgPool2d):
"""Handle empty batch dimension to AdaptiveAvgPool2d."""
def forward(self, x):
# PyTorch 1.9 does not support empty tensor inference yet
if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
output_size = self.output_size
if isinstance(output_size, int):
output_size = [output_size, output_size]
else:
output_size = [
v if v is not None else d
for v, d in zip(output_size,
x.size()[-2:])
]
output_size = [*x.shape[:2], *output_size]
empty = NewEmptyTensorOp.apply(x, output_size)
return empty
return super().forward(x)
| 1,856 | 34.711538 | 77 | py |
ERD | ERD-main/mmdet/models/layers/matrix_nms.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
def mask_matrix_nms(masks,
labels,
scores,
filter_thr=-1,
nms_pre=-1,
max_num=-1,
kernel='gaussian',
sigma=2.0,
mask_area=None):
"""Matrix NMS for multi-class masks.
Args:
masks (Tensor): Has shape (num_instances, h, w)
labels (Tensor): Labels of corresponding masks,
has shape (num_instances,).
scores (Tensor): Mask scores of corresponding masks,
has shape (num_instances).
filter_thr (float): Score threshold to filter the masks
after matrix nms. Default: -1, which means do not
use filter_thr.
nms_pre (int): The max number of instances to do the matrix nms.
Default: -1, which means do not use nms_pre.
max_num (int, optional): If there are more than max_num masks after
matrix, only top max_num will be kept. Default: -1, which means
do not use max_num.
kernel (str): 'linear' or 'gaussian'.
sigma (float): std in gaussian method.
mask_area (Tensor): The sum of seg_masks.
Returns:
tuple(Tensor): Processed mask results.
- scores (Tensor): Updated scores, has shape (n,).
- labels (Tensor): Remained labels, has shape (n,).
- masks (Tensor): Remained masks, has shape (n, w, h).
- keep_inds (Tensor): The indices number of
the remaining mask in the input mask, has shape (n,).
"""
assert len(labels) == len(masks) == len(scores)
if len(labels) == 0:
return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros(
0, *masks.shape[-2:]), labels.new_zeros(0)
if mask_area is None:
mask_area = masks.sum((1, 2)).float()
else:
assert len(masks) == len(mask_area)
# sort and keep top nms_pre
scores, sort_inds = torch.sort(scores, descending=True)
keep_inds = sort_inds
if nms_pre > 0 and len(sort_inds) > nms_pre:
sort_inds = sort_inds[:nms_pre]
keep_inds = keep_inds[:nms_pre]
scores = scores[:nms_pre]
masks = masks[sort_inds]
mask_area = mask_area[sort_inds]
labels = labels[sort_inds]
num_masks = len(labels)
flatten_masks = masks.reshape(num_masks, -1).float()
# inter.
inter_matrix = torch.mm(flatten_masks, flatten_masks.transpose(1, 0))
expanded_mask_area = mask_area.expand(num_masks, num_masks)
# Upper triangle iou matrix.
iou_matrix = (inter_matrix /
(expanded_mask_area + expanded_mask_area.transpose(1, 0) -
inter_matrix)).triu(diagonal=1)
# label_specific matrix.
expanded_labels = labels.expand(num_masks, num_masks)
# Upper triangle label matrix.
label_matrix = (expanded_labels == expanded_labels.transpose(
1, 0)).triu(diagonal=1)
# IoU compensation
compensate_iou, _ = (iou_matrix * label_matrix).max(0)
compensate_iou = compensate_iou.expand(num_masks,
num_masks).transpose(1, 0)
# IoU decay
decay_iou = iou_matrix * label_matrix
# Calculate the decay_coefficient
if kernel == 'gaussian':
decay_matrix = torch.exp(-1 * sigma * (decay_iou**2))
compensate_matrix = torch.exp(-1 * sigma * (compensate_iou**2))
decay_coefficient, _ = (decay_matrix / compensate_matrix).min(0)
elif kernel == 'linear':
decay_matrix = (1 - decay_iou) / (1 - compensate_iou)
decay_coefficient, _ = decay_matrix.min(0)
else:
raise NotImplementedError(
f'{kernel} kernel is not supported in matrix nms!')
# update the score.
scores = scores * decay_coefficient
if filter_thr > 0:
keep = scores >= filter_thr
keep_inds = keep_inds[keep]
if not keep.any():
return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros(
0, *masks.shape[-2:]), labels.new_zeros(0)
masks = masks[keep]
scores = scores[keep]
labels = labels[keep]
# sort and keep top max_num
scores, sort_inds = torch.sort(scores, descending=True)
keep_inds = keep_inds[sort_inds]
if max_num > 0 and len(sort_inds) > max_num:
sort_inds = sort_inds[:max_num]
keep_inds = keep_inds[:max_num]
scores = scores[:max_num]
masks = masks[sort_inds]
labels = labels[sort_inds]
return scores, labels, masks, keep_inds
| 4,622 | 36.893443 | 77 | py |
ERD | ERD-main/mmdet/models/layers/positional_encoding.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
import torch.nn as nn
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import MultiConfig, OptMultiConfig
@MODELS.register_module()
class SinePositionalEncoding(BaseModule):
"""Position encoding with sine and cosine functions.
See `End-to-End Object Detection with Transformers
<https://arxiv.org/pdf/2005.12872>`_ for details.
Args:
num_feats (int): The feature dimension for each position
along x-axis or y-axis. Note the final returned dimension
for each position is 2 times of this value.
temperature (int, optional): The temperature used for scaling
the position embedding. Defaults to 10000.
normalize (bool, optional): Whether to normalize the position
embedding. Defaults to False.
scale (float, optional): A scale factor that scales the position
embedding. The scale will be used only when `normalize` is True.
Defaults to 2*pi.
eps (float, optional): A value added to the denominator for
numerical stability. Defaults to 1e-6.
offset (float): offset add to embed when do the normalization.
Defaults to 0.
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None
"""
def __init__(self,
num_feats: int,
temperature: int = 10000,
normalize: bool = False,
scale: float = 2 * math.pi,
eps: float = 1e-6,
offset: float = 0.,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
if normalize:
assert isinstance(scale, (float, int)), 'when normalize is set,' \
'scale should be provided and in float or int type, ' \
f'found {type(scale)}'
self.num_feats = num_feats
self.temperature = temperature
self.normalize = normalize
self.scale = scale
self.eps = eps
self.offset = offset
def forward(self, mask: Tensor) -> Tensor:
"""Forward function for `SinePositionalEncoding`.
Args:
mask (Tensor): ByteTensor mask. Non-zero values representing
ignored positions, while zero values means valid positions
for this image. Shape [bs, h, w].
Returns:
pos (Tensor): Returned position embedding with shape
[bs, num_feats*2, h, w].
"""
# For convenience of exporting to ONNX, it's required to convert
# `masks` from bool to int.
mask = mask.to(torch.int)
not_mask = 1 - mask # logical_not
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
y_embed = (y_embed + self.offset) / \
(y_embed[:, -1:, :] + self.eps) * self.scale
x_embed = (x_embed + self.offset) / \
(x_embed[:, :, -1:] + self.eps) * self.scale
dim_t = torch.arange(
self.num_feats, dtype=torch.float32, device=mask.device)
dim_t = self.temperature**(2 * (dim_t // 2) / self.num_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
# use `view` instead of `flatten` for dynamically exporting to ONNX
B, H, W = mask.size()
pos_x = torch.stack(
(pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()),
dim=4).view(B, H, W, -1)
pos_y = torch.stack(
(pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()),
dim=4).view(B, H, W, -1)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
def __repr__(self) -> str:
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(num_feats={self.num_feats}, '
repr_str += f'temperature={self.temperature}, '
repr_str += f'normalize={self.normalize}, '
repr_str += f'scale={self.scale}, '
repr_str += f'eps={self.eps})'
return repr_str
@MODELS.register_module()
class LearnedPositionalEncoding(BaseModule):
"""Position embedding with learnable embedding weights.
Args:
num_feats (int): The feature dimension for each position
along x-axis or y-axis. The final returned dimension for
each position is 2 times of this value.
row_num_embed (int, optional): The dictionary size of row embeddings.
Defaults to 50.
col_num_embed (int, optional): The dictionary size of col embeddings.
Defaults to 50.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(
self,
num_feats: int,
row_num_embed: int = 50,
col_num_embed: int = 50,
init_cfg: MultiConfig = dict(type='Uniform', layer='Embedding')
) -> None:
super().__init__(init_cfg=init_cfg)
self.row_embed = nn.Embedding(row_num_embed, num_feats)
self.col_embed = nn.Embedding(col_num_embed, num_feats)
self.num_feats = num_feats
self.row_num_embed = row_num_embed
self.col_num_embed = col_num_embed
def forward(self, mask: Tensor) -> Tensor:
"""Forward function for `LearnedPositionalEncoding`.
Args:
mask (Tensor): ByteTensor mask. Non-zero values representing
ignored positions, while zero values means valid positions
for this image. Shape [bs, h, w].
Returns:
pos (Tensor): Returned position embedding with shape
[bs, num_feats*2, h, w].
"""
h, w = mask.shape[-2:]
x = torch.arange(w, device=mask.device)
y = torch.arange(h, device=mask.device)
x_embed = self.col_embed(x)
y_embed = self.row_embed(y)
pos = torch.cat(
(x_embed.unsqueeze(0).repeat(h, 1, 1), y_embed.unsqueeze(1).repeat(
1, w, 1)),
dim=-1).permute(2, 0,
1).unsqueeze(0).repeat(mask.shape[0], 1, 1, 1)
return pos
def __repr__(self) -> str:
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(num_feats={self.num_feats}, '
repr_str += f'row_num_embed={self.row_num_embed}, '
repr_str += f'col_num_embed={self.col_num_embed})'
return repr_str
| 6,710 | 38.710059 | 79 | py |
ERD | ERD-main/mmdet/models/layers/inverted_residual.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule
from mmcv.cnn.bricks import DropPath
from mmengine.model import BaseModule
from .se_layer import SELayer
class InvertedResidual(BaseModule):
"""Inverted Residual Block.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
mid_channels (int): The input channels of the depthwise convolution.
kernel_size (int): The kernel size of the depthwise convolution.
Default: 3.
stride (int): The stride of the depthwise convolution. Default: 1.
se_cfg (dict): Config dict for se layer. Default: None, which means no
se layer.
with_expand_conv (bool): Use expand conv or not. If set False,
mid_channels must be the same with in_channels.
Default: True.
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
drop_path_rate (float): stochastic depth rate. Defaults to 0.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Returns:
Tensor: The output tensor.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
kernel_size=3,
stride=1,
se_cfg=None,
with_expand_conv=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
drop_path_rate=0.,
with_cp=False,
init_cfg=None):
super(InvertedResidual, self).__init__(init_cfg)
self.with_res_shortcut = (stride == 1 and in_channels == out_channels)
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.drop_path = DropPath(
drop_path_rate) if drop_path_rate > 0 else nn.Identity()
self.with_se = se_cfg is not None
self.with_expand_conv = with_expand_conv
if self.with_se:
assert isinstance(se_cfg, dict)
if not self.with_expand_conv:
assert mid_channels == in_channels
if self.with_expand_conv:
self.expand_conv = ConvModule(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.depthwise_conv = ConvModule(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
stride=stride,
padding=kernel_size // 2,
groups=mid_channels,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if self.with_se:
self.se = SELayer(**se_cfg)
self.linear_conv = ConvModule(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
def forward(self, x):
def _inner_forward(x):
out = x
if self.with_expand_conv:
out = self.expand_conv(out)
out = self.depthwise_conv(out)
if self.with_se:
out = self.se(out)
out = self.linear_conv(out)
if self.with_res_shortcut:
return x + self.drop_path(out)
else:
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
| 4,383 | 32.465649 | 78 | py |
ERD | ERD-main/mmdet/models/layers/activations.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmengine.utils import digit_version
from mmdet.registry import MODELS
if digit_version(torch.__version__) >= digit_version('1.7.0'):
from torch.nn import SiLU
else:
class SiLU(nn.Module):
"""Sigmoid Weighted Liner Unit."""
def __init__(self, inplace=True):
super().__init__()
def forward(self, inputs) -> torch.Tensor:
return inputs * torch.sigmoid(inputs)
MODELS.register_module(module=SiLU, name='SiLU')
| 557 | 23.26087 | 62 | py |
ERD | ERD-main/mmdet/models/layers/ema.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Optional
import torch
import torch.nn as nn
from mmengine.model import ExponentialMovingAverage
from torch import Tensor
from mmdet.registry import MODELS
@MODELS.register_module()
class ExpMomentumEMA(ExponentialMovingAverage):
"""Exponential moving average (EMA) with exponential momentum strategy,
which is used in YOLOX.
Args:
model (nn.Module): The model to be averaged.
momentum (float): The momentum used for updating ema parameter.
Ema's parameter are updated with the formula:
`averaged_param = (1-momentum) * averaged_param + momentum *
source_param`. Defaults to 0.0002.
gamma (int): Use a larger momentum early in training and gradually
annealing to a smaller value to update the ema model smoothly. The
momentum is calculated as
`(1 - momentum) * exp(-(1 + steps) / gamma) + momentum`.
Defaults to 2000.
interval (int): Interval between two updates. Defaults to 1.
device (torch.device, optional): If provided, the averaged model will
be stored on the :attr:`device`. Defaults to None.
update_buffers (bool): if True, it will compute running averages for
both the parameters and the buffers of the model. Defaults to
False.
"""
def __init__(self,
model: nn.Module,
momentum: float = 0.0002,
gamma: int = 2000,
interval=1,
device: Optional[torch.device] = None,
update_buffers: bool = False) -> None:
super().__init__(
model=model,
momentum=momentum,
interval=interval,
device=device,
update_buffers=update_buffers)
assert gamma > 0, f'gamma must be greater than 0, but got {gamma}'
self.gamma = gamma
def avg_func(self, averaged_param: Tensor, source_param: Tensor,
steps: int) -> None:
"""Compute the moving average of the parameters using the exponential
momentum strategy.
Args:
averaged_param (Tensor): The averaged parameters.
source_param (Tensor): The source parameters.
steps (int): The number of times the parameters have been
updated.
"""
momentum = (1 - self.momentum) * math.exp(
-float(1 + steps) / self.gamma) + self.momentum
averaged_param.mul_(1 - momentum).add_(source_param, alpha=momentum)
| 2,614 | 38.029851 | 78 | py |
ERD | ERD-main/mmdet/models/layers/transformer/dino_layers.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
from typing import Tuple, Union
import torch
from mmengine.model import BaseModule
from torch import Tensor, nn
from mmdet.structures import SampleList
from mmdet.structures.bbox import bbox_xyxy_to_cxcywh
from mmdet.utils import OptConfigType
from .deformable_detr_layers import DeformableDetrTransformerDecoder
from .utils import MLP, coordinate_to_encoding, inverse_sigmoid
class DinoTransformerDecoder(DeformableDetrTransformerDecoder):
"""Transformer encoder of DINO."""
def _init_layers(self) -> None:
"""Initialize decoder layers."""
super()._init_layers()
self.ref_point_head = MLP(self.embed_dims * 2, self.embed_dims,
self.embed_dims, 2)
self.norm = nn.LayerNorm(self.embed_dims)
def forward(self, query: Tensor, value: Tensor, key_padding_mask: Tensor,
self_attn_mask: Tensor, reference_points: Tensor,
spatial_shapes: Tensor, level_start_index: Tensor,
valid_ratios: Tensor, reg_branches: nn.ModuleList,
**kwargs) -> Tensor:
"""Forward function of Transformer encoder.
Args:
query (Tensor): The input query, has shape (num_queries, bs, dim).
value (Tensor): The input values, has shape (num_value, bs, dim).
key_padding_mask (Tensor): The `key_padding_mask` of `self_attn`
input. ByteTensor, has shape (num_queries, bs).
self_attn_mask (Tensor): The attention mask to prevent information
leakage from different denoising groups and matching parts, has
shape (num_queries_total, num_queries_total). It is `None` when
`self.training` is `False`.
reference_points (Tensor): The initial reference, has shape
(bs, num_queries, 4) with the last dimension arranged as
(cx, cy, w, h).
spatial_shapes (Tensor): Spatial shapes of features in all levels,
has shape (num_levels, 2), last dimension represents (h, w).
level_start_index (Tensor): The start index of each level.
A tensor has shape (num_levels, ) and can be represented
as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].
valid_ratios (Tensor): The ratios of the valid width and the valid
height relative to the width and the height of features in all
levels, has shape (bs, num_levels, 2).
reg_branches: (obj:`nn.ModuleList`): Used for refining the
regression results.
Returns:
Tensor: Output queries of Transformer encoder, which is also
called 'encoder output embeddings' or 'memory', has shape
(num_queries, bs, dim)
"""
intermediate = []
intermediate_reference_points = [reference_points]
for lid, layer in enumerate(self.layers):
if reference_points.shape[-1] == 4:
reference_points_input = \
reference_points[:, :, None] * torch.cat(
[valid_ratios, valid_ratios], -1)[:, None]
else:
assert reference_points.shape[-1] == 2
reference_points_input = \
reference_points[:, :, None] * valid_ratios[:, None]
query_sine_embed = coordinate_to_encoding(
reference_points_input[:, :, 0, :])
query_pos = self.ref_point_head(query_sine_embed)
query = layer(
query,
query_pos=query_pos,
value=value,
key_padding_mask=key_padding_mask,
self_attn_mask=self_attn_mask,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
valid_ratios=valid_ratios,
reference_points=reference_points_input,
**kwargs)
if reg_branches is not None:
tmp = reg_branches[lid](query)
assert reference_points.shape[-1] == 4
new_reference_points = tmp + inverse_sigmoid(
reference_points, eps=1e-3)
new_reference_points = new_reference_points.sigmoid()
reference_points = new_reference_points.detach()
if self.return_intermediate:
intermediate.append(self.norm(query))
intermediate_reference_points.append(new_reference_points)
# NOTE this is for the "Look Forward Twice" module,
# in the DeformDETR, reference_points was appended.
if self.return_intermediate:
return torch.stack(intermediate), torch.stack(
intermediate_reference_points)
return query, reference_points
class CdnQueryGenerator(BaseModule):
"""Implement query generator of the Contrastive denoising (CDN) proposed in
`DINO: DETR with Improved DeNoising Anchor Boxes for End-to-End Object
Detection <https://arxiv.org/abs/2203.03605>`_
Code is modified from the `official github repo
<https://github.com/IDEA-Research/DINO>`_.
Args:
num_classes (int): Number of object classes.
embed_dims (int): The embedding dimensions of the generated queries.
num_matching_queries (int): The queries number of the matching part.
Used for generating dn_mask.
label_noise_scale (float): The scale of label noise, defaults to 0.5.
box_noise_scale (float): The scale of box noise, defaults to 1.0.
group_cfg (:obj:`ConfigDict` or dict, optional): The config of the
denoising queries grouping, includes `dynamic`, `num_dn_queries`,
and `num_groups`. Two grouping strategies, 'static dn groups' and
'dynamic dn groups', are supported. When `dynamic` is `False`,
the `num_groups` should be set, and the number of denoising query
groups will always be `num_groups`. When `dynamic` is `True`, the
`num_dn_queries` should be set, and the group number will be
dynamic to ensure that the denoising queries number will not exceed
`num_dn_queries` to prevent large fluctuations of memory. Defaults
to `None`.
"""
def __init__(self,
num_classes: int,
embed_dims: int,
num_matching_queries: int,
label_noise_scale: float = 0.5,
box_noise_scale: float = 1.0,
group_cfg: OptConfigType = None) -> None:
super().__init__()
self.num_classes = num_classes
self.embed_dims = embed_dims
self.num_matching_queries = num_matching_queries
self.label_noise_scale = label_noise_scale
self.box_noise_scale = box_noise_scale
# prepare grouping strategy
group_cfg = {} if group_cfg is None else group_cfg
self.dynamic_dn_groups = group_cfg.get('dynamic', True)
if self.dynamic_dn_groups:
if 'num_dn_queries' not in group_cfg:
warnings.warn("'num_dn_queries' should be set when using "
'dynamic dn groups, use 100 as default.')
self.num_dn_queries = group_cfg.get('num_dn_queries', 100)
assert isinstance(self.num_dn_queries, int), \
f'Expected the num_dn_queries to have type int, but got ' \
f'{self.num_dn_queries}({type(self.num_dn_queries)}). '
else:
assert 'num_groups' in group_cfg, \
'num_groups should be set when using static dn groups'
self.num_groups = group_cfg['num_groups']
assert isinstance(self.num_groups, int), \
f'Expected the num_groups to have type int, but got ' \
f'{self.num_groups}({type(self.num_groups)}). '
# NOTE The original repo of DINO set the num_embeddings 92 for coco,
# 91 (0~90) of which represents target classes and the 92 (91)
# indicates `Unknown` class. However, the embedding of `unknown` class
# is not used in the original DINO.
# TODO: num_classes + 1 or num_classes ?
self.label_embedding = nn.Embedding(self.num_classes, self.embed_dims)
def __call__(self, batch_data_samples: SampleList) -> tuple:
"""Generate contrastive denoising (cdn) queries with ground truth.
Descriptions of the Number Values in code and comments:
- num_target_total: the total target number of the input batch
samples.
- max_num_target: the max target number of the input batch samples.
- num_noisy_targets: the total targets number after adding noise,
i.e., num_target_total * num_groups * 2.
- num_denoising_queries: the length of the output batched queries,
i.e., max_num_target * num_groups * 2.
NOTE The format of input bboxes in batch_data_samples is unnormalized
(x, y, x, y), and the output bbox queries are embedded by normalized
(cx, cy, w, h) format bboxes going through inverse_sigmoid.
Args:
batch_data_samples (list[:obj:`DetDataSample`]): List of the batch
data samples, each includes `gt_instance` which has attributes
`bboxes` and `labels`. The `bboxes` has unnormalized coordinate
format (x, y, x, y).
Returns:
tuple: The outputs of the dn query generator.
- dn_label_query (Tensor): The output content queries for denoising
part, has shape (bs, num_denoising_queries, dim), where
`num_denoising_queries = max_num_target * num_groups * 2`.
- dn_bbox_query (Tensor): The output reference bboxes as positions
of queries for denoising part, which are embedded by normalized
(cx, cy, w, h) format bboxes going through inverse_sigmoid, has
shape (bs, num_denoising_queries, 4) with the last dimension
arranged as (cx, cy, w, h).
- attn_mask (Tensor): The attention mask to prevent information
leakage from different denoising groups and matching parts,
will be used as `self_attn_mask` of the `decoder`, has shape
(num_queries_total, num_queries_total), where `num_queries_total`
is the sum of `num_denoising_queries` and `num_matching_queries`.
- dn_meta (Dict[str, int]): The dictionary saves information about
group collation, including 'num_denoising_queries' and
'num_denoising_groups'. It will be used for split outputs of
denoising and matching parts and loss calculation.
"""
# normalize bbox and collate ground truth (gt)
gt_labels_list = []
gt_bboxes_list = []
for sample in batch_data_samples:
img_h, img_w = sample.img_shape
bboxes = sample.gt_instances.bboxes
factor = bboxes.new_tensor([img_w, img_h, img_w,
img_h]).unsqueeze(0)
bboxes_normalized = bboxes / factor
gt_bboxes_list.append(bboxes_normalized)
gt_labels_list.append(sample.gt_instances.labels)
gt_labels = torch.cat(gt_labels_list) # (num_target_total, 4)
gt_bboxes = torch.cat(gt_bboxes_list)
num_target_list = [len(bboxes) for bboxes in gt_bboxes_list]
max_num_target = max(num_target_list)
num_groups = self.get_num_groups(max_num_target)
dn_label_query = self.generate_dn_label_query(gt_labels, num_groups)
dn_bbox_query = self.generate_dn_bbox_query(gt_bboxes, num_groups)
# The `batch_idx` saves the batch index of the corresponding sample
# for each target, has shape (num_target_total).
batch_idx = torch.cat([
torch.full_like(t.long(), i) for i, t in enumerate(gt_labels_list)
])
dn_label_query, dn_bbox_query = self.collate_dn_queries(
dn_label_query, dn_bbox_query, batch_idx, len(batch_data_samples),
num_groups)
attn_mask = self.generate_dn_mask(
max_num_target, num_groups, device=dn_label_query.device)
dn_meta = dict(
num_denoising_queries=int(max_num_target * 2 * num_groups),
num_denoising_groups=num_groups)
return dn_label_query, dn_bbox_query, attn_mask, dn_meta
def get_num_groups(self, max_num_target: int = None) -> int:
"""Calculate denoising query groups number.
Two grouping strategies, 'static dn groups' and 'dynamic dn groups',
are supported. When `self.dynamic_dn_groups` is `False`, the number
of denoising query groups will always be `self.num_groups`. When
`self.dynamic_dn_groups` is `True`, the group number will be dynamic,
ensuring the denoising queries number will not exceed
`self.num_dn_queries` to prevent large fluctuations of memory.
NOTE The `num_group` is shared for different samples in a batch. When
the target numbers in the samples varies, the denoising queries of the
samples containing fewer targets are padded to the max length.
Args:
max_num_target (int, optional): The max target number of the batch
samples. It will only be used when `self.dynamic_dn_groups` is
`True`. Defaults to `None`.
Returns:
int: The denoising group number of the current batch.
"""
if self.dynamic_dn_groups:
assert max_num_target is not None, \
'group_queries should be provided when using ' \
'dynamic dn groups'
if max_num_target == 0:
num_groups = 1
else:
num_groups = self.num_dn_queries // max_num_target
else:
num_groups = self.num_groups
if num_groups < 1:
num_groups = 1
return int(num_groups)
def generate_dn_label_query(self, gt_labels: Tensor,
num_groups: int) -> Tensor:
"""Generate noisy labels and their query embeddings.
The strategy for generating noisy labels is: Randomly choose labels of
`self.label_noise_scale * 0.5` proportion and override each of them
with a random object category label.
NOTE Not add noise to all labels. Besides, the `self.label_noise_scale
* 0.5` arg is the ratio of the chosen positions, which is higher than
the actual proportion of noisy labels, because the labels to override
may be correct. And the gap becomes larger as the number of target
categories decreases. The users should notice this and modify the scale
arg or the corresponding logic according to specific dataset.
Args:
gt_labels (Tensor): The concatenated gt labels of all samples
in the batch, has shape (num_target_total, ) where
`num_target_total = sum(num_target_list)`.
num_groups (int): The number of denoising query groups.
Returns:
Tensor: The query embeddings of noisy labels, has shape
(num_noisy_targets, embed_dims), where `num_noisy_targets =
num_target_total * num_groups * 2`.
"""
assert self.label_noise_scale > 0
gt_labels_expand = gt_labels.repeat(2 * num_groups,
1).view(-1) # Note `* 2` # noqa
p = torch.rand_like(gt_labels_expand.float())
chosen_indice = torch.nonzero(p < (self.label_noise_scale * 0.5)).view(
-1) # Note `* 0.5`
new_labels = torch.randint_like(chosen_indice, 0, self.num_classes)
noisy_labels_expand = gt_labels_expand.scatter(0, chosen_indice,
new_labels)
dn_label_query = self.label_embedding(noisy_labels_expand)
return dn_label_query
def generate_dn_bbox_query(self, gt_bboxes: Tensor,
num_groups: int) -> Tensor:
"""Generate noisy bboxes and their query embeddings.
The strategy for generating noisy bboxes is as follow:
.. code:: text
+--------------------+
| negative |
| +----------+ |
| | positive | |
| | +-----|----+------------+
| | | | | |
| +----+-----+ | |
| | | |
+---------+----------+ |
| |
| gt bbox |
| |
| +---------+----------+
| | | |
| | +----+-----+ |
| | | | | |
+-------------|--- +----+ | |
| | positive | |
| +----------+ |
| negative |
+--------------------+
The random noise is added to the top-left and down-right point
positions, hence, normalized (x, y, x, y) format of bboxes are
required. The noisy bboxes of positive queries have the points
both within the inner square, while those of negative queries
have the points both between the inner and outer squares.
Besides, the length of outer square is twice as long as that of
the inner square, i.e., self.box_noise_scale * w_or_h / 2.
NOTE The noise is added to all the bboxes. Moreover, there is still
unconsidered case when one point is within the positive square and
the others is between the inner and outer squares.
Args:
gt_bboxes (Tensor): The concatenated gt bboxes of all samples
in the batch, has shape (num_target_total, 4) with the last
dimension arranged as (cx, cy, w, h) where
`num_target_total = sum(num_target_list)`.
num_groups (int): The number of denoising query groups.
Returns:
Tensor: The output noisy bboxes, which are embedded by normalized
(cx, cy, w, h) format bboxes going through inverse_sigmoid, has
shape (num_noisy_targets, 4) with the last dimension arranged as
(cx, cy, w, h), where
`num_noisy_targets = num_target_total * num_groups * 2`.
"""
assert self.box_noise_scale > 0
device = gt_bboxes.device
# expand gt_bboxes as groups
gt_bboxes_expand = gt_bboxes.repeat(2 * num_groups, 1) # xyxy
# obtain index of negative queries in gt_bboxes_expand
positive_idx = torch.arange(
len(gt_bboxes), dtype=torch.long, device=device)
positive_idx = positive_idx.unsqueeze(0).repeat(num_groups, 1)
positive_idx += 2 * len(gt_bboxes) * torch.arange(
num_groups, dtype=torch.long, device=device)[:, None]
positive_idx = positive_idx.flatten()
negative_idx = positive_idx + len(gt_bboxes)
# determine the sign of each element in the random part of the added
# noise to be positive or negative randomly.
rand_sign = torch.randint_like(
gt_bboxes_expand, low=0, high=2,
dtype=torch.float32) * 2.0 - 1.0 # [low, high), 1 or -1, randomly
# calculate the random part of the added noise
rand_part = torch.rand_like(gt_bboxes_expand) # [0, 1)
rand_part[negative_idx] += 1.0 # pos: [0, 1); neg: [1, 2)
rand_part *= rand_sign # pos: (-1, 1); neg: (-2, -1] U [1, 2)
# add noise to the bboxes
bboxes_whwh = bbox_xyxy_to_cxcywh(gt_bboxes_expand)[:, 2:].repeat(1, 2)
noisy_bboxes_expand = gt_bboxes_expand + torch.mul(
rand_part, bboxes_whwh) * self.box_noise_scale / 2 # xyxy
noisy_bboxes_expand = noisy_bboxes_expand.clamp(min=0.0, max=1.0)
noisy_bboxes_expand = bbox_xyxy_to_cxcywh(noisy_bboxes_expand)
dn_bbox_query = inverse_sigmoid(noisy_bboxes_expand, eps=1e-3)
return dn_bbox_query
def collate_dn_queries(self, input_label_query: Tensor,
input_bbox_query: Tensor, batch_idx: Tensor,
batch_size: int, num_groups: int) -> Tuple[Tensor]:
"""Collate generated queries to obtain batched dn queries.
The strategy for query collation is as follow:
.. code:: text
input_queries (num_target_total, query_dim)
P_A1 P_B1 P_B2 N_A1 N_B1 N_B2 P'A1 P'B1 P'B2 N'A1 N'B1 N'B2
|________ group1 ________| |________ group2 ________|
|
V
P_A1 Pad0 N_A1 Pad0 P'A1 Pad0 N'A1 Pad0
P_B1 P_B2 N_B1 N_B2 P'B1 P'B2 N'B1 N'B2
|____ group1 ____| |____ group2 ____|
batched_queries (batch_size, max_num_target, query_dim)
where query_dim is 4 for bbox and self.embed_dims for label.
Notation: _-group 1; '-group 2;
A-Sample1(has 1 target); B-sample2(has 2 targets)
Args:
input_label_query (Tensor): The generated label queries of all
targets, has shape (num_target_total, embed_dims) where
`num_target_total = sum(num_target_list)`.
input_bbox_query (Tensor): The generated bbox queries of all
targets, has shape (num_target_total, 4) with the last
dimension arranged as (cx, cy, w, h).
batch_idx (Tensor): The batch index of the corresponding sample
for each target, has shape (num_target_total).
batch_size (int): The size of the input batch.
num_groups (int): The number of denoising query groups.
Returns:
tuple[Tensor]: Output batched label and bbox queries.
- batched_label_query (Tensor): The output batched label queries,
has shape (batch_size, max_num_target, embed_dims).
- batched_bbox_query (Tensor): The output batched bbox queries,
has shape (batch_size, max_num_target, 4) with the last dimension
arranged as (cx, cy, w, h).
"""
device = input_label_query.device
num_target_list = [
torch.sum(batch_idx == idx) for idx in range(batch_size)
]
max_num_target = max(num_target_list)
num_denoising_queries = int(max_num_target * 2 * num_groups)
map_query_index = torch.cat([
torch.arange(num_target, device=device)
for num_target in num_target_list
])
map_query_index = torch.cat([
map_query_index + max_num_target * i for i in range(2 * num_groups)
]).long()
batch_idx_expand = batch_idx.repeat(2 * num_groups, 1).view(-1)
mapper = (batch_idx_expand, map_query_index)
batched_label_query = torch.zeros(
batch_size, num_denoising_queries, self.embed_dims, device=device)
batched_bbox_query = torch.zeros(
batch_size, num_denoising_queries, 4, device=device)
batched_label_query[mapper] = input_label_query
batched_bbox_query[mapper] = input_bbox_query
return batched_label_query, batched_bbox_query
def generate_dn_mask(self, max_num_target: int, num_groups: int,
device: Union[torch.device, str]) -> Tensor:
"""Generate attention mask to prevent information leakage from
different denoising groups and matching parts.
.. code:: text
0 0 0 0 1 1 1 1 0 0 0 0 0
0 0 0 0 1 1 1 1 0 0 0 0 0
0 0 0 0 1 1 1 1 0 0 0 0 0
0 0 0 0 1 1 1 1 0 0 0 0 0
1 1 1 1 0 0 0 0 0 0 0 0 0
1 1 1 1 0 0 0 0 0 0 0 0 0
1 1 1 1 0 0 0 0 0 0 0 0 0
1 1 1 1 0 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 0 0 0 0 0
1 1 1 1 1 1 1 1 0 0 0 0 0
1 1 1 1 1 1 1 1 0 0 0 0 0
1 1 1 1 1 1 1 1 0 0 0 0 0
1 1 1 1 1 1 1 1 0 0 0 0 0
max_num_target |_| |_________| num_matching_queries
|_____________| num_denoising_queries
1 -> True (Masked), means 'can not see'.
0 -> False (UnMasked), means 'can see'.
Args:
max_num_target (int): The max target number of the input batch
samples.
num_groups (int): The number of denoising query groups.
device (obj:`device` or str): The device of generated mask.
Returns:
Tensor: The attention mask to prevent information leakage from
different denoising groups and matching parts, will be used as
`self_attn_mask` of the `decoder`, has shape (num_queries_total,
num_queries_total), where `num_queries_total` is the sum of
`num_denoising_queries` and `num_matching_queries`.
"""
num_denoising_queries = int(max_num_target * 2 * num_groups)
num_queries_total = num_denoising_queries + self.num_matching_queries
attn_mask = torch.zeros(
num_queries_total,
num_queries_total,
device=device,
dtype=torch.bool)
# Make the matching part cannot see the denoising groups
attn_mask[num_denoising_queries:, :num_denoising_queries] = True
# Make the denoising groups cannot see each other
for i in range(num_groups):
# Mask rows of one group per step.
row_scope = slice(max_num_target * 2 * i,
max_num_target * 2 * (i + 1))
left_scope = slice(max_num_target * 2 * i)
right_scope = slice(max_num_target * 2 * (i + 1),
num_denoising_queries)
attn_mask[row_scope, right_scope] = True
attn_mask[row_scope, left_scope] = True
return attn_mask
| 26,710 | 47.301989 | 79 | py |
ERD | ERD-main/mmdet/models/layers/transformer/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import warnings
from typing import Optional, Sequence, Tuple, Union
import torch
import torch.nn.functional as F
from mmcv.cnn import (Linear, build_activation_layer, build_conv_layer,
build_norm_layer)
from mmcv.cnn.bricks.drop import Dropout
from mmengine.model import BaseModule, ModuleList
from mmengine.utils import to_2tuple
from torch import Tensor, nn
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType, OptMultiConfig
def nlc_to_nchw(x: Tensor, hw_shape: Sequence[int]) -> Tensor:
"""Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor.
Args:
x (Tensor): The input tensor of shape [N, L, C] before conversion.
hw_shape (Sequence[int]): The height and width of output feature map.
Returns:
Tensor: The output tensor of shape [N, C, H, W] after conversion.
"""
H, W = hw_shape
assert len(x.shape) == 3
B, L, C = x.shape
assert L == H * W, 'The seq_len does not match H, W'
return x.transpose(1, 2).reshape(B, C, H, W).contiguous()
def nchw_to_nlc(x):
"""Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor.
Args:
x (Tensor): The input tensor of shape [N, C, H, W] before conversion.
Returns:
Tensor: The output tensor of shape [N, L, C] after conversion.
"""
assert len(x.shape) == 4
return x.flatten(2).transpose(1, 2).contiguous()
def coordinate_to_encoding(coord_tensor: Tensor,
num_feats: int = 128,
temperature: int = 10000,
scale: float = 2 * math.pi):
"""Convert coordinate tensor to positional encoding.
Args:
coord_tensor (Tensor): Coordinate tensor to be converted to
positional encoding. With the last dimension as 2 or 4.
num_feats (int, optional): The feature dimension for each position
along x-axis or y-axis. Note the final returned dimension
for each position is 2 times of this value. Defaults to 128.
temperature (int, optional): The temperature used for scaling
the position embedding. Defaults to 10000.
scale (float, optional): A scale factor that scales the position
embedding. The scale will be used only when `normalize` is True.
Defaults to 2*pi.
Returns:
Tensor: Returned encoded positional tensor.
"""
dim_t = torch.arange(
num_feats, dtype=torch.float32, device=coord_tensor.device)
dim_t = temperature**(2 * (dim_t // 2) / num_feats)
x_embed = coord_tensor[..., 0] * scale
y_embed = coord_tensor[..., 1] * scale
pos_x = x_embed[..., None] / dim_t
pos_y = y_embed[..., None] / dim_t
pos_x = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()),
dim=-1).flatten(2)
pos_y = torch.stack((pos_y[..., 0::2].sin(), pos_y[..., 1::2].cos()),
dim=-1).flatten(2)
if coord_tensor.size(-1) == 2:
pos = torch.cat((pos_y, pos_x), dim=-1)
elif coord_tensor.size(-1) == 4:
w_embed = coord_tensor[..., 2] * scale
pos_w = w_embed[..., None] / dim_t
pos_w = torch.stack((pos_w[..., 0::2].sin(), pos_w[..., 1::2].cos()),
dim=-1).flatten(2)
h_embed = coord_tensor[..., 3] * scale
pos_h = h_embed[..., None] / dim_t
pos_h = torch.stack((pos_h[..., 0::2].sin(), pos_h[..., 1::2].cos()),
dim=-1).flatten(2)
pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=-1)
else:
raise ValueError('Unknown pos_tensor shape(-1):{}'.format(
coord_tensor.size(-1)))
return pos
def inverse_sigmoid(x: Tensor, eps: float = 1e-5) -> Tensor:
"""Inverse function of sigmoid.
Args:
x (Tensor): The tensor to do the inverse.
eps (float): EPS avoid numerical overflow. Defaults 1e-5.
Returns:
Tensor: The x has passed the inverse function of sigmoid, has the same
shape with input.
"""
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
return torch.log(x1 / x2)
class AdaptivePadding(nn.Module):
"""Applies padding to input (if needed) so that input can get fully covered
by filter you specified. It support two modes "same" and "corner". The
"same" mode is same with "SAME" padding mode in TensorFlow, pad zero around
input. The "corner" mode would pad zero to bottom right.
Args:
kernel_size (int | tuple): Size of the kernel:
stride (int | tuple): Stride of the filter. Default: 1:
dilation (int | tuple): Spacing between kernel elements.
Default: 1
padding (str): Support "same" and "corner", "corner" mode
would pad zero to bottom right, and "same" mode would
pad zero around input. Default: "corner".
Example:
>>> kernel_size = 16
>>> stride = 16
>>> dilation = 1
>>> input = torch.rand(1, 1, 15, 17)
>>> adap_pad = AdaptivePadding(
>>> kernel_size=kernel_size,
>>> stride=stride,
>>> dilation=dilation,
>>> padding="corner")
>>> out = adap_pad(input)
>>> assert (out.shape[2], out.shape[3]) == (16, 32)
>>> input = torch.rand(1, 1, 16, 17)
>>> out = adap_pad(input)
>>> assert (out.shape[2], out.shape[3]) == (16, 32)
"""
def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'):
super(AdaptivePadding, self).__init__()
assert padding in ('same', 'corner')
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
padding = to_2tuple(padding)
dilation = to_2tuple(dilation)
self.padding = padding
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
def get_pad_shape(self, input_shape):
input_h, input_w = input_shape
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.stride
output_h = math.ceil(input_h / stride_h)
output_w = math.ceil(input_w / stride_w)
pad_h = max((output_h - 1) * stride_h +
(kernel_h - 1) * self.dilation[0] + 1 - input_h, 0)
pad_w = max((output_w - 1) * stride_w +
(kernel_w - 1) * self.dilation[1] + 1 - input_w, 0)
return pad_h, pad_w
def forward(self, x):
pad_h, pad_w = self.get_pad_shape(x.size()[-2:])
if pad_h > 0 or pad_w > 0:
if self.padding == 'corner':
x = F.pad(x, [0, pad_w, 0, pad_h])
elif self.padding == 'same':
x = F.pad(x, [
pad_w // 2, pad_w - pad_w // 2, pad_h // 2,
pad_h - pad_h // 2
])
return x
class PatchEmbed(BaseModule):
"""Image to Patch Embedding.
We use a conv layer to implement PatchEmbed.
Args:
in_channels (int): The num of input channels. Default: 3
embed_dims (int): The dimensions of embedding. Default: 768
conv_type (str): The config dict for embedding
conv layer type selection. Default: "Conv2d.
kernel_size (int): The kernel_size of embedding conv. Default: 16.
stride (int): The slide stride of embedding conv.
Default: None (Would be set as `kernel_size`).
padding (int | tuple | string ): The padding length of
embedding conv. When it is a string, it means the mode
of adaptive padding, support "same" and "corner" now.
Default: "corner".
dilation (int): The dilation rate of embedding conv. Default: 1.
bias (bool): Bias of embed conv. Default: True.
norm_cfg (dict, optional): Config dict for normalization layer.
Default: None.
input_size (int | tuple | None): The size of input, which will be
used to calculate the out size. Only work when `dynamic_size`
is False. Default: None.
init_cfg (`mmengine.ConfigDict`, optional): The Config for
initialization. Default: None.
"""
def __init__(self,
in_channels: int = 3,
embed_dims: int = 768,
conv_type: str = 'Conv2d',
kernel_size: int = 16,
stride: int = 16,
padding: Union[int, tuple, str] = 'corner',
dilation: int = 1,
bias: bool = True,
norm_cfg: OptConfigType = None,
input_size: Union[int, tuple] = None,
init_cfg: OptConfigType = None) -> None:
super(PatchEmbed, self).__init__(init_cfg=init_cfg)
self.embed_dims = embed_dims
if stride is None:
stride = kernel_size
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
dilation = to_2tuple(dilation)
if isinstance(padding, str):
self.adap_padding = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
# disable the padding of conv
padding = 0
else:
self.adap_padding = None
padding = to_2tuple(padding)
self.projection = build_conv_layer(
dict(type=conv_type),
in_channels=in_channels,
out_channels=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
if norm_cfg is not None:
self.norm = build_norm_layer(norm_cfg, embed_dims)[1]
else:
self.norm = None
if input_size:
input_size = to_2tuple(input_size)
# `init_out_size` would be used outside to
# calculate the num_patches
# when `use_abs_pos_embed` outside
self.init_input_size = input_size
if self.adap_padding:
pad_h, pad_w = self.adap_padding.get_pad_shape(input_size)
input_h, input_w = input_size
input_h = input_h + pad_h
input_w = input_w + pad_w
input_size = (input_h, input_w)
# https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
h_out = (input_size[0] + 2 * padding[0] - dilation[0] *
(kernel_size[0] - 1) - 1) // stride[0] + 1
w_out = (input_size[1] + 2 * padding[1] - dilation[1] *
(kernel_size[1] - 1) - 1) // stride[1] + 1
self.init_out_size = (h_out, w_out)
else:
self.init_input_size = None
self.init_out_size = None
def forward(self, x: Tensor) -> Tuple[Tensor, Tuple[int]]:
"""
Args:
x (Tensor): Has shape (B, C, H, W). In most case, C is 3.
Returns:
tuple: Contains merged results and its spatial shape.
- x (Tensor): Has shape (B, out_h * out_w, embed_dims)
- out_size (tuple[int]): Spatial shape of x, arrange as
(out_h, out_w).
"""
if self.adap_padding:
x = self.adap_padding(x)
x = self.projection(x)
out_size = (x.shape[2], x.shape[3])
x = x.flatten(2).transpose(1, 2)
if self.norm is not None:
x = self.norm(x)
return x, out_size
class PatchMerging(BaseModule):
"""Merge patch feature map.
This layer groups feature map by kernel_size, and applies norm and linear
layers to the grouped feature map. Our implementation uses `nn.Unfold` to
merge patch, which is about 25% faster than original implementation.
Instead, we need to modify pretrained models for compatibility.
Args:
in_channels (int): The num of input channels.
to gets fully covered by filter and stride you specified..
Default: True.
out_channels (int): The num of output channels.
kernel_size (int | tuple, optional): the kernel size in the unfold
layer. Defaults to 2.
stride (int | tuple, optional): the stride of the sliding blocks in the
unfold layer. Default: None. (Would be set as `kernel_size`)
padding (int | tuple | string ): The padding length of
embedding conv. When it is a string, it means the mode
of adaptive padding, support "same" and "corner" now.
Default: "corner".
dilation (int | tuple, optional): dilation parameter in the unfold
layer. Default: 1.
bias (bool, optional): Whether to add bias in linear layer or not.
Defaults: False.
norm_cfg (dict, optional): Config dict for normalization layer.
Default: dict(type='LN').
init_cfg (dict, optional): The extra config for initialization.
Default: None.
"""
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Optional[Union[int, tuple]] = 2,
stride: Optional[Union[int, tuple]] = None,
padding: Union[int, tuple, str] = 'corner',
dilation: Optional[Union[int, tuple]] = 1,
bias: Optional[bool] = False,
norm_cfg: OptConfigType = dict(type='LN'),
init_cfg: OptConfigType = None) -> None:
super().__init__(init_cfg=init_cfg)
self.in_channels = in_channels
self.out_channels = out_channels
if stride:
stride = stride
else:
stride = kernel_size
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
dilation = to_2tuple(dilation)
if isinstance(padding, str):
self.adap_padding = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
# disable the padding of unfold
padding = 0
else:
self.adap_padding = None
padding = to_2tuple(padding)
self.sampler = nn.Unfold(
kernel_size=kernel_size,
dilation=dilation,
padding=padding,
stride=stride)
sample_dim = kernel_size[0] * kernel_size[1] * in_channels
if norm_cfg is not None:
self.norm = build_norm_layer(norm_cfg, sample_dim)[1]
else:
self.norm = None
self.reduction = nn.Linear(sample_dim, out_channels, bias=bias)
def forward(self, x: Tensor,
input_size: Tuple[int]) -> Tuple[Tensor, Tuple[int]]:
"""
Args:
x (Tensor): Has shape (B, H*W, C_in).
input_size (tuple[int]): The spatial shape of x, arrange as (H, W).
Default: None.
Returns:
tuple: Contains merged results and its spatial shape.
- x (Tensor): Has shape (B, Merged_H * Merged_W, C_out)
- out_size (tuple[int]): Spatial shape of x, arrange as
(Merged_H, Merged_W).
"""
B, L, C = x.shape
assert isinstance(input_size, Sequence), f'Expect ' \
f'input_size is ' \
f'`Sequence` ' \
f'but get {input_size}'
H, W = input_size
assert L == H * W, 'input feature has wrong size'
x = x.view(B, H, W, C).permute([0, 3, 1, 2]) # B, C, H, W
# Use nn.Unfold to merge patch. About 25% faster than original method,
# but need to modify pretrained model for compatibility
if self.adap_padding:
x = self.adap_padding(x)
H, W = x.shape[-2:]
x = self.sampler(x)
# if kernel_size=2 and stride=2, x should has shape (B, 4*C, H/2*W/2)
out_h = (H + 2 * self.sampler.padding[0] - self.sampler.dilation[0] *
(self.sampler.kernel_size[0] - 1) -
1) // self.sampler.stride[0] + 1
out_w = (W + 2 * self.sampler.padding[1] - self.sampler.dilation[1] *
(self.sampler.kernel_size[1] - 1) -
1) // self.sampler.stride[1] + 1
output_size = (out_h, out_w)
x = x.transpose(1, 2) # B, H/2*W/2, 4*C
x = self.norm(x) if self.norm else x
x = self.reduction(x)
return x, output_size
class ConditionalAttention(BaseModule):
"""A wrapper of conditional attention, dropout and residual connection.
Args:
embed_dims (int): The embedding dimension.
num_heads (int): Parallel attention heads.
attn_drop (float): A Dropout layer on attn_output_weights.
Default: 0.0.
proj_drop: A Dropout layer after `nn.MultiheadAttention`.
Default: 0.0.
cross_attn (bool): Whether the attention module is for cross attention.
Default: False
keep_query_pos (bool): Whether to transform query_pos before cross
attention.
Default: False.
batch_first (bool): When it is True, Key, Query and Value are shape of
(batch, n, embed_dim), otherwise (n, batch, embed_dim).
Default: True.
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
Default: None.
"""
def __init__(self,
embed_dims: int,
num_heads: int,
attn_drop: float = 0.,
proj_drop: float = 0.,
cross_attn: bool = False,
keep_query_pos: bool = False,
batch_first: bool = True,
init_cfg: OptMultiConfig = None):
super().__init__(init_cfg=init_cfg)
assert batch_first is True, 'Set `batch_first`\
to False is NOT supported in ConditionalAttention. \
First dimension of all DETRs in mmdet is `batch`, \
please set `batch_first` to True.'
self.cross_attn = cross_attn
self.keep_query_pos = keep_query_pos
self.embed_dims = embed_dims
self.num_heads = num_heads
self.attn_drop = Dropout(attn_drop)
self.proj_drop = Dropout(proj_drop)
self._init_layers()
def _init_layers(self):
"""Initialize layers for qkv projection."""
embed_dims = self.embed_dims
self.qcontent_proj = Linear(embed_dims, embed_dims)
self.qpos_proj = Linear(embed_dims, embed_dims)
self.kcontent_proj = Linear(embed_dims, embed_dims)
self.kpos_proj = Linear(embed_dims, embed_dims)
self.v_proj = Linear(embed_dims, embed_dims)
if self.cross_attn:
self.qpos_sine_proj = Linear(embed_dims, embed_dims)
self.out_proj = Linear(embed_dims, embed_dims)
nn.init.constant_(self.out_proj.bias, 0.)
def forward_attn(self,
query: Tensor,
key: Tensor,
value: Tensor,
attn_mask: Tensor = None,
key_padding_mask: Tensor = None) -> Tuple[Tensor]:
"""Forward process for `ConditionalAttention`.
Args:
query (Tensor): The input query with shape [bs, num_queries,
embed_dims].
key (Tensor): The key tensor with shape [bs, num_keys,
embed_dims].
If None, the `query` will be used. Defaults to None.
value (Tensor): The value tensor with same shape as `key`.
Same in `nn.MultiheadAttention.forward`. Defaults to None.
If None, the `key` will be used.
attn_mask (Tensor): ByteTensor mask with shape [num_queries,
num_keys]. Same in `nn.MultiheadAttention.forward`.
Defaults to None.
key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys].
Defaults to None.
Returns:
Tuple[Tensor]: Attention outputs of shape :math:`(N, L, E)`,
where :math:`N` is the batch size, :math:`L` is the target
sequence length , and :math:`E` is the embedding dimension
`embed_dim`. Attention weights per head of shape :math:`
(num_heads, L, S)`. where :math:`N` is batch size, :math:`L`
is target sequence length, and :math:`S` is the source sequence
length.
"""
assert key.size(1) == value.size(1), \
f'{"key, value must have the same sequence length"}'
assert query.size(0) == key.size(0) == value.size(0), \
f'{"batch size must be equal for query, key, value"}'
assert query.size(2) == key.size(2), \
f'{"q_dims, k_dims must be equal"}'
assert value.size(2) == self.embed_dims, \
f'{"v_dims must be equal to embed_dims"}'
bs, tgt_len, hidden_dims = query.size()
_, src_len, _ = key.size()
head_dims = hidden_dims // self.num_heads
v_head_dims = self.embed_dims // self.num_heads
assert head_dims * self.num_heads == hidden_dims, \
f'{"hidden_dims must be divisible by num_heads"}'
scaling = float(head_dims)**-0.5
q = query * scaling
k = key
v = value
if attn_mask is not None:
assert attn_mask.dtype == torch.float32 or \
attn_mask.dtype == torch.float64 or \
attn_mask.dtype == torch.float16 or \
attn_mask.dtype == torch.uint8 or \
attn_mask.dtype == torch.bool, \
'Only float, byte, and bool types are supported for \
attn_mask'
if attn_mask.dtype == torch.uint8:
warnings.warn('Byte tensor for attn_mask is deprecated.\
Use bool tensor instead.')
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(1), key.size(1)]:
raise RuntimeError(
'The size of the 2D attn_mask is not correct.')
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [
bs * self.num_heads,
query.size(1),
key.size(1)
]:
raise RuntimeError(
'The size of the 3D attn_mask is not correct.')
else:
raise RuntimeError(
"attn_mask's dimension {} is not supported".format(
attn_mask.dim()))
# attn_mask's dim is 3 now.
if key_padding_mask is not None and key_padding_mask.dtype == int:
key_padding_mask = key_padding_mask.to(torch.bool)
q = q.contiguous().view(bs, tgt_len, self.num_heads,
head_dims).permute(0, 2, 1, 3).flatten(0, 1)
if k is not None:
k = k.contiguous().view(bs, src_len, self.num_heads,
head_dims).permute(0, 2, 1,
3).flatten(0, 1)
if v is not None:
v = v.contiguous().view(bs, src_len, self.num_heads,
v_head_dims).permute(0, 2, 1,
3).flatten(0, 1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bs
assert key_padding_mask.size(1) == src_len
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [
bs * self.num_heads, tgt_len, src_len
]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float('-inf'))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(
bs, self.num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(
bs * self.num_heads, tgt_len, src_len)
attn_output_weights = F.softmax(
attn_output_weights -
attn_output_weights.max(dim=-1, keepdim=True)[0],
dim=-1)
attn_output_weights = self.attn_drop(attn_output_weights)
attn_output = torch.bmm(attn_output_weights, v)
assert list(
attn_output.size()) == [bs * self.num_heads, tgt_len, v_head_dims]
attn_output = attn_output.view(bs, self.num_heads, tgt_len,
v_head_dims).permute(0, 2, 1,
3).flatten(2)
attn_output = self.out_proj(attn_output)
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bs, self.num_heads,
tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / self.num_heads
def forward(self,
query: Tensor,
key: Tensor,
query_pos: Tensor = None,
ref_sine_embed: Tensor = None,
key_pos: Tensor = None,
attn_mask: Tensor = None,
key_padding_mask: Tensor = None,
is_first: bool = False) -> Tensor:
"""Forward function for `ConditionalAttention`.
Args:
query (Tensor): The input query with shape [bs, num_queries,
embed_dims].
key (Tensor): The key tensor with shape [bs, num_keys,
embed_dims].
If None, the `query` will be used. Defaults to None.
query_pos (Tensor): The positional encoding for query in self
attention, with the same shape as `x`. If not None, it will
be added to `x` before forward function.
Defaults to None.
query_sine_embed (Tensor): The positional encoding for query in
cross attention, with the same shape as `x`. If not None, it
will be added to `x` before forward function.
Defaults to None.
key_pos (Tensor): The positional encoding for `key`, with the
same shape as `key`. Defaults to None. If not None, it will
be added to `key` before forward function. If None, and
`query_pos` has the same shape as `key`, then `query_pos`
will be used for `key_pos`. Defaults to None.
attn_mask (Tensor): ByteTensor mask with shape [num_queries,
num_keys]. Same in `nn.MultiheadAttention.forward`.
Defaults to None.
key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys].
Defaults to None.
is_first (bool): A indicator to tell whether the current layer
is the first layer of the decoder.
Defaults to False.
Returns:
Tensor: forwarded results with shape
[bs, num_queries, embed_dims].
"""
if self.cross_attn:
q_content = self.qcontent_proj(query)
k_content = self.kcontent_proj(key)
v = self.v_proj(key)
bs, nq, c = q_content.size()
_, hw, _ = k_content.size()
k_pos = self.kpos_proj(key_pos)
if is_first or self.keep_query_pos:
q_pos = self.qpos_proj(query_pos)
q = q_content + q_pos
k = k_content + k_pos
else:
q = q_content
k = k_content
q = q.view(bs, nq, self.num_heads, c // self.num_heads)
query_sine_embed = self.qpos_sine_proj(ref_sine_embed)
query_sine_embed = query_sine_embed.view(bs, nq, self.num_heads,
c // self.num_heads)
q = torch.cat([q, query_sine_embed], dim=3).view(bs, nq, 2 * c)
k = k.view(bs, hw, self.num_heads, c // self.num_heads)
k_pos = k_pos.view(bs, hw, self.num_heads, c // self.num_heads)
k = torch.cat([k, k_pos], dim=3).view(bs, hw, 2 * c)
ca_output = self.forward_attn(
query=q,
key=k,
value=v,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask)[0]
query = query + self.proj_drop(ca_output)
else:
q_content = self.qcontent_proj(query)
q_pos = self.qpos_proj(query_pos)
k_content = self.kcontent_proj(query)
k_pos = self.kpos_proj(query_pos)
v = self.v_proj(query)
q = q_content if q_pos is None else q_content + q_pos
k = k_content if k_pos is None else k_content + k_pos
sa_output = self.forward_attn(
query=q,
key=k,
value=v,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask)[0]
query = query + self.proj_drop(sa_output)
return query
class MLP(BaseModule):
"""Very simple multi-layer perceptron (also called FFN) with relu. Mostly
used in DETR series detectors.
Args:
input_dim (int): Feature dim of the input tensor.
hidden_dim (int): Feature dim of the hidden layer.
output_dim (int): Feature dim of the output tensor.
num_layers (int): Number of FFN layers. As the last
layer of MLP only contains FFN (Linear).
"""
def __init__(self, input_dim: int, hidden_dim: int, output_dim: int,
num_layers: int) -> None:
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = ModuleList(
Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x: Tensor) -> Tensor:
"""Forward function of MLP.
Args:
x (Tensor): The input feature, has shape
(num_queries, bs, input_dim).
Returns:
Tensor: The output feature, has shape
(num_queries, bs, output_dim).
"""
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
@MODELS.register_module()
class DynamicConv(BaseModule):
"""Implements Dynamic Convolution.
This module generate parameters for each sample and
use bmm to implement 1*1 convolution. Code is modified
from the `official github repo <https://github.com/PeizeSun/
SparseR-CNN/blob/main/projects/SparseRCNN/sparsercnn/head.py#L258>`_ .
Args:
in_channels (int): The input feature channel.
Defaults to 256.
feat_channels (int): The inner feature channel.
Defaults to 64.
out_channels (int, optional): The output feature channel.
When not specified, it will be set to `in_channels`
by default
input_feat_shape (int): The shape of input feature.
Defaults to 7.
with_proj (bool): Project two-dimentional feature to
one-dimentional feature. Default to True.
act_cfg (dict): The activation config for DynamicConv.
norm_cfg (dict): Config dict for normalization layer. Default
layer normalization.
init_cfg (obj:`mmengine.ConfigDict`): The Config for initialization.
Default: None.
"""
def __init__(self,
in_channels: int = 256,
feat_channels: int = 64,
out_channels: Optional[int] = None,
input_feat_shape: int = 7,
with_proj: bool = True,
act_cfg: OptConfigType = dict(type='ReLU', inplace=True),
norm_cfg: OptConfigType = dict(type='LN'),
init_cfg: OptConfigType = None) -> None:
super(DynamicConv, self).__init__(init_cfg)
self.in_channels = in_channels
self.feat_channels = feat_channels
self.out_channels_raw = out_channels
self.input_feat_shape = input_feat_shape
self.with_proj = with_proj
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.out_channels = out_channels if out_channels else in_channels
self.num_params_in = self.in_channels * self.feat_channels
self.num_params_out = self.out_channels * self.feat_channels
self.dynamic_layer = nn.Linear(
self.in_channels, self.num_params_in + self.num_params_out)
self.norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1]
self.norm_out = build_norm_layer(norm_cfg, self.out_channels)[1]
self.activation = build_activation_layer(act_cfg)
num_output = self.out_channels * input_feat_shape**2
if self.with_proj:
self.fc_layer = nn.Linear(num_output, self.out_channels)
self.fc_norm = build_norm_layer(norm_cfg, self.out_channels)[1]
def forward(self, param_feature: Tensor, input_feature: Tensor) -> Tensor:
"""Forward function for `DynamicConv`.
Args:
param_feature (Tensor): The feature can be used
to generate the parameter, has shape
(num_all_proposals, in_channels).
input_feature (Tensor): Feature that
interact with parameters, has shape
(num_all_proposals, in_channels, H, W).
Returns:
Tensor: The output feature has shape
(num_all_proposals, out_channels).
"""
input_feature = input_feature.flatten(2).permute(2, 0, 1)
input_feature = input_feature.permute(1, 0, 2)
parameters = self.dynamic_layer(param_feature)
param_in = parameters[:, :self.num_params_in].view(
-1, self.in_channels, self.feat_channels)
param_out = parameters[:, -self.num_params_out:].view(
-1, self.feat_channels, self.out_channels)
# input_feature has shape (num_all_proposals, H*W, in_channels)
# param_in has shape (num_all_proposals, in_channels, feat_channels)
# feature has shape (num_all_proposals, H*W, feat_channels)
features = torch.bmm(input_feature, param_in)
features = self.norm_in(features)
features = self.activation(features)
# param_out has shape (batch_size, feat_channels, out_channels)
features = torch.bmm(features, param_out)
features = self.norm_out(features)
features = self.activation(features)
if self.with_proj:
features = features.flatten(1)
features = self.fc_layer(features)
features = self.fc_norm(features)
features = self.activation(features)
return features
| 35,539 | 39.524515 | 79 | py |
ERD | ERD-main/mmdet/models/layers/transformer/dab_detr_layers.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List
import torch
import torch.nn as nn
from mmcv.cnn import build_norm_layer
from mmcv.cnn.bricks.transformer import FFN
from mmengine.model import ModuleList
from torch import Tensor
from .detr_layers import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer)
from .utils import (MLP, ConditionalAttention, coordinate_to_encoding,
inverse_sigmoid)
class DABDetrTransformerDecoderLayer(DetrTransformerDecoderLayer):
"""Implements decoder layer in DAB-DETR transformer."""
def _init_layers(self):
"""Initialize self-attention, cross-attention, FFN, normalization and
others."""
self.self_attn = ConditionalAttention(**self.self_attn_cfg)
self.cross_attn = ConditionalAttention(**self.cross_attn_cfg)
self.embed_dims = self.self_attn.embed_dims
self.ffn = FFN(**self.ffn_cfg)
norms_list = [
build_norm_layer(self.norm_cfg, self.embed_dims)[1]
for _ in range(3)
]
self.norms = ModuleList(norms_list)
self.keep_query_pos = self.cross_attn.keep_query_pos
def forward(self,
query: Tensor,
key: Tensor,
query_pos: Tensor,
key_pos: Tensor,
ref_sine_embed: Tensor = None,
self_attn_masks: Tensor = None,
cross_attn_masks: Tensor = None,
key_padding_mask: Tensor = None,
is_first: bool = False,
**kwargs) -> Tensor:
"""
Args:
query (Tensor): The input query with shape [bs, num_queries,
dim].
key (Tensor): The key tensor with shape [bs, num_keys,
dim].
query_pos (Tensor): The positional encoding for query in self
attention, with the same shape as `x`.
key_pos (Tensor): The positional encoding for `key`, with the
same shape as `key`.
ref_sine_embed (Tensor): The positional encoding for query in
cross attention, with the same shape as `x`.
Defaults to None.
self_attn_masks (Tensor): ByteTensor mask with shape [num_queries,
num_keys]. Same in `nn.MultiheadAttention.forward`.
Defaults to None.
cross_attn_masks (Tensor): ByteTensor mask with shape [num_queries,
num_keys]. Same in `nn.MultiheadAttention.forward`.
Defaults to None.
key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys].
Defaults to None.
is_first (bool): A indicator to tell whether the current layer
is the first layer of the decoder.
Defaults to False.
Returns:
Tensor: forwarded results with shape
[bs, num_queries, dim].
"""
query = self.self_attn(
query=query,
key=query,
query_pos=query_pos,
key_pos=query_pos,
attn_mask=self_attn_masks,
**kwargs)
query = self.norms[0](query)
query = self.cross_attn(
query=query,
key=key,
query_pos=query_pos,
key_pos=key_pos,
ref_sine_embed=ref_sine_embed,
attn_mask=cross_attn_masks,
key_padding_mask=key_padding_mask,
is_first=is_first,
**kwargs)
query = self.norms[1](query)
query = self.ffn(query)
query = self.norms[2](query)
return query
class DABDetrTransformerDecoder(DetrTransformerDecoder):
"""Decoder of DAB-DETR.
Args:
query_dim (int): The last dimension of query pos,
4 for anchor format, 2 for point format.
Defaults to 4.
query_scale_type (str): Type of transformation applied
to content query. Defaults to `cond_elewise`.
with_modulated_hw_attn (bool): Whether to inject h&w info
during cross conditional attention. Defaults to True.
"""
def __init__(self,
*args,
query_dim: int = 4,
query_scale_type: str = 'cond_elewise',
with_modulated_hw_attn: bool = True,
**kwargs):
self.query_dim = query_dim
self.query_scale_type = query_scale_type
self.with_modulated_hw_attn = with_modulated_hw_attn
super().__init__(*args, **kwargs)
def _init_layers(self):
"""Initialize decoder layers and other layers."""
assert self.query_dim in [2, 4], \
f'{"dab-detr only supports anchor prior or reference point prior"}'
assert self.query_scale_type in [
'cond_elewise', 'cond_scalar', 'fix_elewise'
]
self.layers = ModuleList([
DABDetrTransformerDecoderLayer(**self.layer_cfg)
for _ in range(self.num_layers)
])
embed_dims = self.layers[0].embed_dims
self.embed_dims = embed_dims
self.post_norm = build_norm_layer(self.post_norm_cfg, embed_dims)[1]
if self.query_scale_type == 'cond_elewise':
self.query_scale = MLP(embed_dims, embed_dims, embed_dims, 2)
elif self.query_scale_type == 'cond_scalar':
self.query_scale = MLP(embed_dims, embed_dims, 1, 2)
elif self.query_scale_type == 'fix_elewise':
self.query_scale = nn.Embedding(self.num_layers, embed_dims)
else:
raise NotImplementedError('Unknown query_scale_type: {}'.format(
self.query_scale_type))
self.ref_point_head = MLP(self.query_dim // 2 * embed_dims, embed_dims,
embed_dims, 2)
if self.with_modulated_hw_attn and self.query_dim == 4:
self.ref_anchor_head = MLP(embed_dims, embed_dims, 2, 2)
self.keep_query_pos = self.layers[0].keep_query_pos
if not self.keep_query_pos:
for layer_id in range(self.num_layers - 1):
self.layers[layer_id + 1].cross_attn.qpos_proj = None
def forward(self,
query: Tensor,
key: Tensor,
query_pos: Tensor,
key_pos: Tensor,
reg_branches: nn.Module,
key_padding_mask: Tensor = None,
**kwargs) -> List[Tensor]:
"""Forward function of decoder.
Args:
query (Tensor): The input query with shape (bs, num_queries, dim).
key (Tensor): The input key with shape (bs, num_keys, dim).
query_pos (Tensor): The positional encoding for `query`, with the
same shape as `query`.
key_pos (Tensor): The positional encoding for `key`, with the
same shape as `key`.
reg_branches (nn.Module): The regression branch for dynamically
updating references in each layer.
key_padding_mask (Tensor): ByteTensor with shape (bs, num_keys).
Defaults to `None`.
Returns:
List[Tensor]: forwarded results with shape (num_decoder_layers,
bs, num_queries, dim) if `return_intermediate` is True, otherwise
with shape (1, bs, num_queries, dim). references with shape
(num_decoder_layers, bs, num_queries, 2/4).
"""
output = query
unsigmoid_references = query_pos
reference_points = unsigmoid_references.sigmoid()
intermediate_reference_points = [reference_points]
intermediate = []
for layer_id, layer in enumerate(self.layers):
obj_center = reference_points[..., :self.query_dim]
ref_sine_embed = coordinate_to_encoding(
coord_tensor=obj_center, num_feats=self.embed_dims // 2)
query_pos = self.ref_point_head(
ref_sine_embed) # [bs, nq, 2c] -> [bs, nq, c]
# For the first decoder layer, do not apply transformation
if self.query_scale_type != 'fix_elewise':
if layer_id == 0:
pos_transformation = 1
else:
pos_transformation = self.query_scale(output)
else:
pos_transformation = self.query_scale.weight[layer_id]
# apply transformation
ref_sine_embed = ref_sine_embed[
..., :self.embed_dims] * pos_transformation
# modulated height and weight attention
if self.with_modulated_hw_attn:
assert obj_center.size(-1) == 4
ref_hw = self.ref_anchor_head(output).sigmoid()
ref_sine_embed[..., self.embed_dims // 2:] *= \
(ref_hw[..., 0] / obj_center[..., 2]).unsqueeze(-1)
ref_sine_embed[..., : self.embed_dims // 2] *= \
(ref_hw[..., 1] / obj_center[..., 3]).unsqueeze(-1)
output = layer(
output,
key,
query_pos=query_pos,
ref_sine_embed=ref_sine_embed,
key_pos=key_pos,
key_padding_mask=key_padding_mask,
is_first=(layer_id == 0),
**kwargs)
# iter update
tmp_reg_preds = reg_branches(output)
tmp_reg_preds[..., :self.query_dim] += inverse_sigmoid(
reference_points)
new_reference_points = tmp_reg_preds[
..., :self.query_dim].sigmoid()
if layer_id != self.num_layers - 1:
intermediate_reference_points.append(new_reference_points)
reference_points = new_reference_points.detach()
if self.return_intermediate:
intermediate.append(self.post_norm(output))
output = self.post_norm(output)
if self.return_intermediate:
return [
torch.stack(intermediate),
torch.stack(intermediate_reference_points),
]
else:
return [
output.unsqueeze(0),
torch.stack(intermediate_reference_points)
]
class DABDetrTransformerEncoder(DetrTransformerEncoder):
"""Encoder of DAB-DETR."""
def _init_layers(self):
"""Initialize encoder layers."""
self.layers = ModuleList([
DetrTransformerEncoderLayer(**self.layer_cfg)
for _ in range(self.num_layers)
])
embed_dims = self.layers[0].embed_dims
self.embed_dims = embed_dims
self.query_scale = MLP(embed_dims, embed_dims, embed_dims, 2)
def forward(self, query: Tensor, query_pos: Tensor,
key_padding_mask: Tensor, **kwargs):
"""Forward function of encoder.
Args:
query (Tensor): Input queries of encoder, has shape
(bs, num_queries, dim).
query_pos (Tensor): The positional embeddings of the queries, has
shape (bs, num_feat_points, dim).
key_padding_mask (Tensor): ByteTensor, the key padding mask
of the queries, has shape (bs, num_feat_points).
Returns:
Tensor: With shape (num_queries, bs, dim).
"""
for layer in self.layers:
pos_scales = self.query_scale(query)
query = layer(
query,
query_pos=query_pos * pos_scales,
key_padding_mask=key_padding_mask,
**kwargs)
return query
| 11,683 | 38.076923 | 79 | py |
ERD | ERD-main/mmdet/models/layers/transformer/deformable_detr_layers.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Tuple, Union
import torch
from mmcv.cnn import build_norm_layer
from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention
from mmcv.ops import MultiScaleDeformableAttention
from mmengine.model import ModuleList
from torch import Tensor, nn
from .detr_layers import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer)
from .utils import inverse_sigmoid
class DeformableDetrTransformerEncoder(DetrTransformerEncoder):
"""Transformer encoder of Deformable DETR."""
def _init_layers(self) -> None:
"""Initialize encoder layers."""
self.layers = ModuleList([
DeformableDetrTransformerEncoderLayer(**self.layer_cfg)
for _ in range(self.num_layers)
])
self.embed_dims = self.layers[0].embed_dims
def forward(self, query: Tensor, query_pos: Tensor,
key_padding_mask: Tensor, spatial_shapes: Tensor,
level_start_index: Tensor, valid_ratios: Tensor,
**kwargs) -> Tensor:
"""Forward function of Transformer encoder.
Args:
query (Tensor): The input query, has shape (bs, num_queries, dim).
query_pos (Tensor): The positional encoding for query, has shape
(bs, num_queries, dim).
key_padding_mask (Tensor): The `key_padding_mask` of `self_attn`
input. ByteTensor, has shape (bs, num_queries).
spatial_shapes (Tensor): Spatial shapes of features in all levels,
has shape (num_levels, 2), last dimension represents (h, w).
level_start_index (Tensor): The start index of each level.
A tensor has shape (num_levels, ) and can be represented
as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].
valid_ratios (Tensor): The ratios of the valid width and the valid
height relative to the width and the height of features in all
levels, has shape (bs, num_levels, 2).
Returns:
Tensor: Output queries of Transformer encoder, which is also
called 'encoder output embeddings' or 'memory', has shape
(bs, num_queries, dim)
"""
reference_points = self.get_encoder_reference_points(
spatial_shapes, valid_ratios, device=query.device)
for layer in self.layers:
query = layer(
query=query,
query_pos=query_pos,
key_padding_mask=key_padding_mask,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
valid_ratios=valid_ratios,
reference_points=reference_points,
**kwargs)
return query
@staticmethod
def get_encoder_reference_points(
spatial_shapes: Tensor, valid_ratios: Tensor,
device: Union[torch.device, str]) -> Tensor:
"""Get the reference points used in encoder.
Args:
spatial_shapes (Tensor): Spatial shapes of features in all levels,
has shape (num_levels, 2), last dimension represents (h, w).
valid_ratios (Tensor): The ratios of the valid width and the valid
height relative to the width and the height of features in all
levels, has shape (bs, num_levels, 2).
device (obj:`device` or str): The device acquired by the
`reference_points`.
Returns:
Tensor: Reference points used in decoder, has shape (bs, length,
num_levels, 2).
"""
reference_points_list = []
for lvl, (H, W) in enumerate(spatial_shapes):
ref_y, ref_x = torch.meshgrid(
torch.linspace(
0.5, H - 0.5, H, dtype=torch.float32, device=device),
torch.linspace(
0.5, W - 0.5, W, dtype=torch.float32, device=device))
ref_y = ref_y.reshape(-1)[None] / (
valid_ratios[:, None, lvl, 1] * H)
ref_x = ref_x.reshape(-1)[None] / (
valid_ratios[:, None, lvl, 0] * W)
ref = torch.stack((ref_x, ref_y), -1)
reference_points_list.append(ref)
reference_points = torch.cat(reference_points_list, 1)
# [bs, sum(hw), num_level, 2]
reference_points = reference_points[:, :, None] * valid_ratios[:, None]
return reference_points
class DeformableDetrTransformerDecoder(DetrTransformerDecoder):
"""Transformer Decoder of Deformable DETR."""
def _init_layers(self) -> None:
"""Initialize decoder layers."""
self.layers = ModuleList([
DeformableDetrTransformerDecoderLayer(**self.layer_cfg)
for _ in range(self.num_layers)
])
self.embed_dims = self.layers[0].embed_dims
if self.post_norm_cfg is not None:
raise ValueError('There is not post_norm in '
f'{self._get_name()}')
def forward(self,
query: Tensor,
query_pos: Tensor,
value: Tensor,
key_padding_mask: Tensor,
reference_points: Tensor,
spatial_shapes: Tensor,
level_start_index: Tensor,
valid_ratios: Tensor,
reg_branches: Optional[nn.Module] = None,
**kwargs) -> Tuple[Tensor]:
"""Forward function of Transformer decoder.
Args:
query (Tensor): The input queries, has shape (bs, num_queries,
dim).
query_pos (Tensor): The input positional query, has shape
(bs, num_queries, dim). It will be added to `query` before
forward function.
value (Tensor): The input values, has shape (bs, num_value, dim).
key_padding_mask (Tensor): The `key_padding_mask` of `cross_attn`
input. ByteTensor, has shape (bs, num_value).
reference_points (Tensor): The initial reference, has shape
(bs, num_queries, 4) with the last dimension arranged as
(cx, cy, w, h) when `as_two_stage` is `True`, otherwise has
shape (bs, num_queries, 2) with the last dimension arranged
as (cx, cy).
spatial_shapes (Tensor): Spatial shapes of features in all levels,
has shape (num_levels, 2), last dimension represents (h, w).
level_start_index (Tensor): The start index of each level.
A tensor has shape (num_levels, ) and can be represented
as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].
valid_ratios (Tensor): The ratios of the valid width and the valid
height relative to the width and the height of features in all
levels, has shape (bs, num_levels, 2).
reg_branches: (obj:`nn.ModuleList`, optional): Used for refining
the regression results. Only would be passed when
`with_box_refine` is `True`, otherwise would be `None`.
Returns:
tuple[Tensor]: Outputs of Deformable Transformer Decoder.
- output (Tensor): Output embeddings of the last decoder, has
shape (num_queries, bs, embed_dims) when `return_intermediate`
is `False`. Otherwise, Intermediate output embeddings of all
decoder layers, has shape (num_decoder_layers, num_queries, bs,
embed_dims).
- reference_points (Tensor): The reference of the last decoder
layer, has shape (bs, num_queries, 4) when `return_intermediate`
is `False`. Otherwise, Intermediate references of all decoder
layers, has shape (num_decoder_layers, bs, num_queries, 4). The
coordinates are arranged as (cx, cy, w, h)
"""
output = query
intermediate = []
intermediate_reference_points = []
for layer_id, layer in enumerate(self.layers):
if reference_points.shape[-1] == 4:
reference_points_input = \
reference_points[:, :, None] * \
torch.cat([valid_ratios, valid_ratios], -1)[:, None]
else:
assert reference_points.shape[-1] == 2
reference_points_input = \
reference_points[:, :, None] * \
valid_ratios[:, None]
output = layer(
output,
query_pos=query_pos,
value=value,
key_padding_mask=key_padding_mask,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
valid_ratios=valid_ratios,
reference_points=reference_points_input,
**kwargs)
if reg_branches is not None:
tmp_reg_preds = reg_branches[layer_id](output)
if reference_points.shape[-1] == 4:
new_reference_points = tmp_reg_preds + inverse_sigmoid(
reference_points)
new_reference_points = new_reference_points.sigmoid()
else:
assert reference_points.shape[-1] == 2
new_reference_points = tmp_reg_preds
new_reference_points[..., :2] = tmp_reg_preds[
..., :2] + inverse_sigmoid(reference_points)
new_reference_points = new_reference_points.sigmoid()
reference_points = new_reference_points.detach()
if self.return_intermediate:
intermediate.append(output)
intermediate_reference_points.append(reference_points)
if self.return_intermediate:
return torch.stack(intermediate), torch.stack(
intermediate_reference_points)
return output, reference_points
class DeformableDetrTransformerEncoderLayer(DetrTransformerEncoderLayer):
"""Encoder layer of Deformable DETR."""
def _init_layers(self) -> None:
"""Initialize self_attn, ffn, and norms."""
self.self_attn = MultiScaleDeformableAttention(**self.self_attn_cfg)
self.embed_dims = self.self_attn.embed_dims
self.ffn = FFN(**self.ffn_cfg)
norms_list = [
build_norm_layer(self.norm_cfg, self.embed_dims)[1]
for _ in range(2)
]
self.norms = ModuleList(norms_list)
class DeformableDetrTransformerDecoderLayer(DetrTransformerDecoderLayer):
"""Decoder layer of Deformable DETR."""
def _init_layers(self) -> None:
"""Initialize self_attn, cross-attn, ffn, and norms."""
self.self_attn = MultiheadAttention(**self.self_attn_cfg)
self.cross_attn = MultiScaleDeformableAttention(**self.cross_attn_cfg)
self.embed_dims = self.self_attn.embed_dims
self.ffn = FFN(**self.ffn_cfg)
norms_list = [
build_norm_layer(self.norm_cfg, self.embed_dims)[1]
for _ in range(3)
]
self.norms = ModuleList(norms_list)
| 11,274 | 43.920319 | 79 | py |
ERD | ERD-main/mmdet/models/layers/transformer/conditional_detr_layers.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.cnn import build_norm_layer
from mmcv.cnn.bricks.transformer import FFN
from torch import Tensor
from torch.nn import ModuleList
from .detr_layers import DetrTransformerDecoder, DetrTransformerDecoderLayer
from .utils import MLP, ConditionalAttention, coordinate_to_encoding
class ConditionalDetrTransformerDecoder(DetrTransformerDecoder):
"""Decoder of Conditional DETR."""
def _init_layers(self) -> None:
"""Initialize decoder layers and other layers."""
self.layers = ModuleList([
ConditionalDetrTransformerDecoderLayer(**self.layer_cfg)
for _ in range(self.num_layers)
])
self.embed_dims = self.layers[0].embed_dims
self.post_norm = build_norm_layer(self.post_norm_cfg,
self.embed_dims)[1]
# conditional detr affline
self.query_scale = MLP(self.embed_dims, self.embed_dims,
self.embed_dims, 2)
self.ref_point_head = MLP(self.embed_dims, self.embed_dims, 2, 2)
# we have substitute 'qpos_proj' with 'qpos_sine_proj' except for
# the first decoder layer), so 'qpos_proj' should be deleted
# in other layers.
for layer_id in range(self.num_layers - 1):
self.layers[layer_id + 1].cross_attn.qpos_proj = None
def forward(self,
query: Tensor,
key: Tensor = None,
query_pos: Tensor = None,
key_pos: Tensor = None,
key_padding_mask: Tensor = None):
"""Forward function of decoder.
Args:
query (Tensor): The input query with shape
(bs, num_queries, dim).
key (Tensor): The input key with shape (bs, num_keys, dim) If
`None`, the `query` will be used. Defaults to `None`.
query_pos (Tensor): The positional encoding for `query`, with the
same shape as `query`. If not `None`, it will be added to
`query` before forward function. Defaults to `None`.
key_pos (Tensor): The positional encoding for `key`, with the
same shape as `key`. If not `None`, it will be added to
`key` before forward function. If `None`, and `query_pos`
has the same shape as `key`, then `query_pos` will be used
as `key_pos`. Defaults to `None`.
key_padding_mask (Tensor): ByteTensor with shape (bs, num_keys).
Defaults to `None`.
Returns:
List[Tensor]: forwarded results with shape (num_decoder_layers,
bs, num_queries, dim) if `return_intermediate` is True, otherwise
with shape (1, bs, num_queries, dim). References with shape
(bs, num_queries, 2).
"""
reference_unsigmoid = self.ref_point_head(
query_pos) # [bs, num_queries, 2]
reference = reference_unsigmoid.sigmoid()
reference_xy = reference[..., :2]
intermediate = []
for layer_id, layer in enumerate(self.layers):
if layer_id == 0:
pos_transformation = 1
else:
pos_transformation = self.query_scale(query)
# get sine embedding for the query reference
ref_sine_embed = coordinate_to_encoding(coord_tensor=reference_xy)
# apply transformation
ref_sine_embed = ref_sine_embed * pos_transformation
query = layer(
query,
key=key,
query_pos=query_pos,
key_pos=key_pos,
key_padding_mask=key_padding_mask,
ref_sine_embed=ref_sine_embed,
is_first=(layer_id == 0))
if self.return_intermediate:
intermediate.append(self.post_norm(query))
if self.return_intermediate:
return torch.stack(intermediate), reference
query = self.post_norm(query)
return query.unsqueeze(0), reference
class ConditionalDetrTransformerDecoderLayer(DetrTransformerDecoderLayer):
"""Implements decoder layer in Conditional DETR transformer."""
def _init_layers(self):
"""Initialize self-attention, cross-attention, FFN, and
normalization."""
self.self_attn = ConditionalAttention(**self.self_attn_cfg)
self.cross_attn = ConditionalAttention(**self.cross_attn_cfg)
self.embed_dims = self.self_attn.embed_dims
self.ffn = FFN(**self.ffn_cfg)
norms_list = [
build_norm_layer(self.norm_cfg, self.embed_dims)[1]
for _ in range(3)
]
self.norms = ModuleList(norms_list)
def forward(self,
query: Tensor,
key: Tensor = None,
query_pos: Tensor = None,
key_pos: Tensor = None,
self_attn_masks: Tensor = None,
cross_attn_masks: Tensor = None,
key_padding_mask: Tensor = None,
ref_sine_embed: Tensor = None,
is_first: bool = False):
"""
Args:
query (Tensor): The input query, has shape (bs, num_queries, dim)
key (Tensor, optional): The input key, has shape (bs, num_keys,
dim). If `None`, the `query` will be used. Defaults to `None`.
query_pos (Tensor, optional): The positional encoding for `query`,
has the same shape as `query`. If not `None`, it will be
added to `query` before forward function. Defaults to `None`.
ref_sine_embed (Tensor): The positional encoding for query in
cross attention, with the same shape as `x`. Defaults to None.
key_pos (Tensor, optional): The positional encoding for `key`, has
the same shape as `key`. If not None, it will be added to
`key` before forward function. If None, and `query_pos` has
the same shape as `key`, then `query_pos` will be used for
`key_pos`. Defaults to None.
self_attn_masks (Tensor, optional): ByteTensor mask, has shape
(num_queries, num_keys), Same in `nn.MultiheadAttention.
forward`. Defaults to None.
cross_attn_masks (Tensor, optional): ByteTensor mask, has shape
(num_queries, num_keys), Same in `nn.MultiheadAttention.
forward`. Defaults to None.
key_padding_mask (Tensor, optional): ByteTensor, has shape
(bs, num_keys). Defaults to None.
is_first (bool): A indicator to tell whether the current layer
is the first layer of the decoder. Defaults to False.
Returns:
Tensor: Forwarded results, has shape (bs, num_queries, dim).
"""
query = self.self_attn(
query=query,
key=query,
query_pos=query_pos,
key_pos=query_pos,
attn_mask=self_attn_masks)
query = self.norms[0](query)
query = self.cross_attn(
query=query,
key=key,
query_pos=query_pos,
key_pos=key_pos,
attn_mask=cross_attn_masks,
key_padding_mask=key_padding_mask,
ref_sine_embed=ref_sine_embed,
is_first=is_first)
query = self.norms[1](query)
query = self.ffn(query)
query = self.norms[2](query)
return query
| 7,563 | 43.233918 | 78 | py |
ERD | ERD-main/mmdet/models/layers/transformer/mask2former_layers.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import build_norm_layer
from mmengine.model import ModuleList
from torch import Tensor
from .deformable_detr_layers import DeformableDetrTransformerEncoder
from .detr_layers import DetrTransformerDecoder, DetrTransformerDecoderLayer
class Mask2FormerTransformerEncoder(DeformableDetrTransformerEncoder):
"""Encoder in PixelDecoder of Mask2Former."""
def forward(self, query: Tensor, query_pos: Tensor,
key_padding_mask: Tensor, spatial_shapes: Tensor,
level_start_index: Tensor, valid_ratios: Tensor,
reference_points: Tensor, **kwargs) -> Tensor:
"""Forward function of Transformer encoder.
Args:
query (Tensor): The input query, has shape (bs, num_queries, dim).
query_pos (Tensor): The positional encoding for query, has shape
(bs, num_queries, dim). If not None, it will be added to the
`query` before forward function. Defaults to None.
key_padding_mask (Tensor): The `key_padding_mask` of `self_attn`
input. ByteTensor, has shape (bs, num_queries).
spatial_shapes (Tensor): Spatial shapes of features in all levels,
has shape (num_levels, 2), last dimension represents (h, w).
level_start_index (Tensor): The start index of each level.
A tensor has shape (num_levels, ) and can be represented
as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].
valid_ratios (Tensor): The ratios of the valid width and the valid
height relative to the width and the height of features in all
levels, has shape (bs, num_levels, 2).
reference_points (Tensor): The initial reference, has shape
(bs, num_queries, 2) with the last dimension arranged
as (cx, cy).
Returns:
Tensor: Output queries of Transformer encoder, which is also
called 'encoder output embeddings' or 'memory', has shape
(bs, num_queries, dim)
"""
for layer in self.layers:
query = layer(
query=query,
query_pos=query_pos,
key_padding_mask=key_padding_mask,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
valid_ratios=valid_ratios,
reference_points=reference_points,
**kwargs)
return query
class Mask2FormerTransformerDecoder(DetrTransformerDecoder):
"""Decoder of Mask2Former."""
def _init_layers(self) -> None:
"""Initialize decoder layers."""
self.layers = ModuleList([
Mask2FormerTransformerDecoderLayer(**self.layer_cfg)
for _ in range(self.num_layers)
])
self.embed_dims = self.layers[0].embed_dims
self.post_norm = build_norm_layer(self.post_norm_cfg,
self.embed_dims)[1]
class Mask2FormerTransformerDecoderLayer(DetrTransformerDecoderLayer):
"""Implements decoder layer in Mask2Former transformer."""
def forward(self,
query: Tensor,
key: Tensor = None,
value: Tensor = None,
query_pos: Tensor = None,
key_pos: Tensor = None,
self_attn_mask: Tensor = None,
cross_attn_mask: Tensor = None,
key_padding_mask: Tensor = None,
**kwargs) -> Tensor:
"""
Args:
query (Tensor): The input query, has shape (bs, num_queries, dim).
key (Tensor, optional): The input key, has shape (bs, num_keys,
dim). If `None`, the `query` will be used. Defaults to `None`.
value (Tensor, optional): The input value, has the same shape as
`key`, as in `nn.MultiheadAttention.forward`. If `None`, the
`key` will be used. Defaults to `None`.
query_pos (Tensor, optional): The positional encoding for `query`,
has the same shape as `query`. If not `None`, it will be added
to `query` before forward function. Defaults to `None`.
key_pos (Tensor, optional): The positional encoding for `key`, has
the same shape as `key`. If not `None`, it will be added to
`key` before forward function. If None, and `query_pos` has the
same shape as `key`, then `query_pos` will be used for
`key_pos`. Defaults to None.
self_attn_mask (Tensor, optional): ByteTensor mask, has shape
(num_queries, num_keys), as in `nn.MultiheadAttention.forward`.
Defaults to None.
cross_attn_mask (Tensor, optional): ByteTensor mask, has shape
(num_queries, num_keys), as in `nn.MultiheadAttention.forward`.
Defaults to None.
key_padding_mask (Tensor, optional): The `key_padding_mask` of
`self_attn` input. ByteTensor, has shape (bs, num_value).
Defaults to None.
Returns:
Tensor: forwarded results, has shape (bs, num_queries, dim).
"""
query = self.cross_attn(
query=query,
key=key,
value=value,
query_pos=query_pos,
key_pos=key_pos,
attn_mask=cross_attn_mask,
key_padding_mask=key_padding_mask,
**kwargs)
query = self.norms[0](query)
query = self.self_attn(
query=query,
key=query,
value=query,
query_pos=query_pos,
key_pos=query_pos,
attn_mask=self_attn_mask,
**kwargs)
query = self.norms[1](query)
query = self.ffn(query)
query = self.norms[2](query)
return query
| 5,960 | 42.830882 | 79 | py |
ERD | ERD-main/mmdet/models/layers/transformer/detr_layers.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Union
import torch
from mmcv.cnn import build_norm_layer
from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention
from mmengine import ConfigDict
from mmengine.model import BaseModule, ModuleList
from torch import Tensor
from mmdet.utils import ConfigType, OptConfigType
class DetrTransformerEncoder(BaseModule):
"""Encoder of DETR.
Args:
num_layers (int): Number of encoder layers.
layer_cfg (:obj:`ConfigDict` or dict): the config of each encoder
layer. All the layers will share the same config.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
num_layers: int,
layer_cfg: ConfigType,
init_cfg: OptConfigType = None) -> None:
super().__init__(init_cfg=init_cfg)
self.num_layers = num_layers
self.layer_cfg = layer_cfg
self._init_layers()
def _init_layers(self) -> None:
"""Initialize encoder layers."""
self.layers = ModuleList([
DetrTransformerEncoderLayer(**self.layer_cfg)
for _ in range(self.num_layers)
])
self.embed_dims = self.layers[0].embed_dims
def forward(self, query: Tensor, query_pos: Tensor,
key_padding_mask: Tensor, **kwargs) -> Tensor:
"""Forward function of encoder.
Args:
query (Tensor): Input queries of encoder, has shape
(bs, num_queries, dim).
query_pos (Tensor): The positional embeddings of the queries, has
shape (bs, num_queries, dim).
key_padding_mask (Tensor): The `key_padding_mask` of `self_attn`
input. ByteTensor, has shape (bs, num_queries).
Returns:
Tensor: Has shape (bs, num_queries, dim) if `batch_first` is
`True`, otherwise (num_queries, bs, dim).
"""
for layer in self.layers:
query = layer(query, query_pos, key_padding_mask, **kwargs)
return query
class DetrTransformerDecoder(BaseModule):
"""Decoder of DETR.
Args:
num_layers (int): Number of decoder layers.
layer_cfg (:obj:`ConfigDict` or dict): the config of each encoder
layer. All the layers will share the same config.
post_norm_cfg (:obj:`ConfigDict` or dict, optional): Config of the
post normalization layer. Defaults to `LN`.
return_intermediate (bool, optional): Whether to return outputs of
intermediate layers. Defaults to `True`,
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
num_layers: int,
layer_cfg: ConfigType,
post_norm_cfg: OptConfigType = dict(type='LN'),
return_intermediate: bool = True,
init_cfg: Union[dict, ConfigDict] = None) -> None:
super().__init__(init_cfg=init_cfg)
self.layer_cfg = layer_cfg
self.num_layers = num_layers
self.post_norm_cfg = post_norm_cfg
self.return_intermediate = return_intermediate
self._init_layers()
def _init_layers(self) -> None:
"""Initialize decoder layers."""
self.layers = ModuleList([
DetrTransformerDecoderLayer(**self.layer_cfg)
for _ in range(self.num_layers)
])
self.embed_dims = self.layers[0].embed_dims
self.post_norm = build_norm_layer(self.post_norm_cfg,
self.embed_dims)[1]
def forward(self, query: Tensor, key: Tensor, value: Tensor,
query_pos: Tensor, key_pos: Tensor, key_padding_mask: Tensor,
**kwargs) -> Tensor:
"""Forward function of decoder
Args:
query (Tensor): The input query, has shape (bs, num_queries, dim).
key (Tensor): The input key, has shape (bs, num_keys, dim).
value (Tensor): The input value with the same shape as `key`.
query_pos (Tensor): The positional encoding for `query`, with the
same shape as `query`.
key_pos (Tensor): The positional encoding for `key`, with the
same shape as `key`.
key_padding_mask (Tensor): The `key_padding_mask` of `cross_attn`
input. ByteTensor, has shape (bs, num_value).
Returns:
Tensor: The forwarded results will have shape
(num_decoder_layers, bs, num_queries, dim) if
`return_intermediate` is `True` else (1, bs, num_queries, dim).
"""
intermediate = []
for layer in self.layers:
query = layer(
query,
key=key,
value=value,
query_pos=query_pos,
key_pos=key_pos,
key_padding_mask=key_padding_mask,
**kwargs)
if self.return_intermediate:
intermediate.append(self.post_norm(query))
query = self.post_norm(query)
if self.return_intermediate:
return torch.stack(intermediate)
return query.unsqueeze(0)
class DetrTransformerEncoderLayer(BaseModule):
"""Implements encoder layer in DETR transformer.
Args:
self_attn_cfg (:obj:`ConfigDict` or dict, optional): Config for self
attention.
ffn_cfg (:obj:`ConfigDict` or dict, optional): Config for FFN.
norm_cfg (:obj:`ConfigDict` or dict, optional): Config for
normalization layers. All the layers will share the same
config. Defaults to `LN`.
init_cfg (:obj:`ConfigDict` or dict, optional): Config to control
the initialization. Defaults to None.
"""
def __init__(self,
self_attn_cfg: OptConfigType = dict(
embed_dims=256, num_heads=8, dropout=0.0),
ffn_cfg: OptConfigType = dict(
embed_dims=256,
feedforward_channels=1024,
num_fcs=2,
ffn_drop=0.,
act_cfg=dict(type='ReLU', inplace=True)),
norm_cfg: OptConfigType = dict(type='LN'),
init_cfg: OptConfigType = None) -> None:
super().__init__(init_cfg=init_cfg)
self.self_attn_cfg = self_attn_cfg
if 'batch_first' not in self.self_attn_cfg:
self.self_attn_cfg['batch_first'] = True
else:
assert self.self_attn_cfg['batch_first'] is True, 'First \
dimension of all DETRs in mmdet is `batch`, \
please set `batch_first` flag.'
self.ffn_cfg = ffn_cfg
self.norm_cfg = norm_cfg
self._init_layers()
def _init_layers(self) -> None:
"""Initialize self-attention, FFN, and normalization."""
self.self_attn = MultiheadAttention(**self.self_attn_cfg)
self.embed_dims = self.self_attn.embed_dims
self.ffn = FFN(**self.ffn_cfg)
norms_list = [
build_norm_layer(self.norm_cfg, self.embed_dims)[1]
for _ in range(2)
]
self.norms = ModuleList(norms_list)
def forward(self, query: Tensor, query_pos: Tensor,
key_padding_mask: Tensor, **kwargs) -> Tensor:
"""Forward function of an encoder layer.
Args:
query (Tensor): The input query, has shape (bs, num_queries, dim).
query_pos (Tensor): The positional encoding for query, with
the same shape as `query`.
key_padding_mask (Tensor): The `key_padding_mask` of `self_attn`
input. ByteTensor. has shape (bs, num_queries).
Returns:
Tensor: forwarded results, has shape (bs, num_queries, dim).
"""
query = self.self_attn(
query=query,
key=query,
value=query,
query_pos=query_pos,
key_pos=query_pos,
key_padding_mask=key_padding_mask,
**kwargs)
query = self.norms[0](query)
query = self.ffn(query)
query = self.norms[1](query)
return query
class DetrTransformerDecoderLayer(BaseModule):
"""Implements decoder layer in DETR transformer.
Args:
self_attn_cfg (:obj:`ConfigDict` or dict, optional): Config for self
attention.
cross_attn_cfg (:obj:`ConfigDict` or dict, optional): Config for cross
attention.
ffn_cfg (:obj:`ConfigDict` or dict, optional): Config for FFN.
norm_cfg (:obj:`ConfigDict` or dict, optional): Config for
normalization layers. All the layers will share the same
config. Defaults to `LN`.
init_cfg (:obj:`ConfigDict` or dict, optional): Config to control
the initialization. Defaults to None.
"""
def __init__(self,
self_attn_cfg: OptConfigType = dict(
embed_dims=256,
num_heads=8,
dropout=0.0,
batch_first=True),
cross_attn_cfg: OptConfigType = dict(
embed_dims=256,
num_heads=8,
dropout=0.0,
batch_first=True),
ffn_cfg: OptConfigType = dict(
embed_dims=256,
feedforward_channels=1024,
num_fcs=2,
ffn_drop=0.,
act_cfg=dict(type='ReLU', inplace=True),
),
norm_cfg: OptConfigType = dict(type='LN'),
init_cfg: OptConfigType = None) -> None:
super().__init__(init_cfg=init_cfg)
self.self_attn_cfg = self_attn_cfg
self.cross_attn_cfg = cross_attn_cfg
if 'batch_first' not in self.self_attn_cfg:
self.self_attn_cfg['batch_first'] = True
else:
assert self.self_attn_cfg['batch_first'] is True, 'First \
dimension of all DETRs in mmdet is `batch`, \
please set `batch_first` flag.'
if 'batch_first' not in self.cross_attn_cfg:
self.cross_attn_cfg['batch_first'] = True
else:
assert self.cross_attn_cfg['batch_first'] is True, 'First \
dimension of all DETRs in mmdet is `batch`, \
please set `batch_first` flag.'
self.ffn_cfg = ffn_cfg
self.norm_cfg = norm_cfg
self._init_layers()
def _init_layers(self) -> None:
"""Initialize self-attention, FFN, and normalization."""
self.self_attn = MultiheadAttention(**self.self_attn_cfg)
self.cross_attn = MultiheadAttention(**self.cross_attn_cfg)
self.embed_dims = self.self_attn.embed_dims
self.ffn = FFN(**self.ffn_cfg)
norms_list = [
build_norm_layer(self.norm_cfg, self.embed_dims)[1]
for _ in range(3)
]
self.norms = ModuleList(norms_list)
def forward(self,
query: Tensor,
key: Tensor = None,
value: Tensor = None,
query_pos: Tensor = None,
key_pos: Tensor = None,
self_attn_mask: Tensor = None,
cross_attn_mask: Tensor = None,
key_padding_mask: Tensor = None,
**kwargs) -> Tensor:
"""
Args:
query (Tensor): The input query, has shape (bs, num_queries, dim).
key (Tensor, optional): The input key, has shape (bs, num_keys,
dim). If `None`, the `query` will be used. Defaults to `None`.
value (Tensor, optional): The input value, has the same shape as
`key`, as in `nn.MultiheadAttention.forward`. If `None`, the
`key` will be used. Defaults to `None`.
query_pos (Tensor, optional): The positional encoding for `query`,
has the same shape as `query`. If not `None`, it will be added
to `query` before forward function. Defaults to `None`.
key_pos (Tensor, optional): The positional encoding for `key`, has
the same shape as `key`. If not `None`, it will be added to
`key` before forward function. If None, and `query_pos` has the
same shape as `key`, then `query_pos` will be used for
`key_pos`. Defaults to None.
self_attn_mask (Tensor, optional): ByteTensor mask, has shape
(num_queries, num_keys), as in `nn.MultiheadAttention.forward`.
Defaults to None.
cross_attn_mask (Tensor, optional): ByteTensor mask, has shape
(num_queries, num_keys), as in `nn.MultiheadAttention.forward`.
Defaults to None.
key_padding_mask (Tensor, optional): The `key_padding_mask` of
`self_attn` input. ByteTensor, has shape (bs, num_value).
Defaults to None.
Returns:
Tensor: forwarded results, has shape (bs, num_queries, dim).
"""
query = self.self_attn(
query=query,
key=query,
value=query,
query_pos=query_pos,
key_pos=query_pos,
attn_mask=self_attn_mask,
**kwargs)
query = self.norms[0](query)
query = self.cross_attn(
query=query,
key=key,
value=value,
query_pos=query_pos,
key_pos=key_pos,
attn_mask=cross_attn_mask,
key_padding_mask=key_padding_mask,
**kwargs)
query = self.norms[1](query)
query = self.ffn(query)
query = self.norms[2](query)
return query
| 13,965 | 38.340845 | 79 | py |
ERD | ERD-main/mmdet/models/dense_heads/nasfcos_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import torch.nn as nn
from mmcv.cnn import ConvModule, Scale
from mmdet.models.dense_heads.fcos_head import FCOSHead
from mmdet.registry import MODELS
from mmdet.utils import OptMultiConfig
@MODELS.register_module()
class NASFCOSHead(FCOSHead):
"""Anchor-free head used in `NASFCOS <https://arxiv.org/abs/1906.04423>`_.
It is quite similar with FCOS head, except for the searched structure of
classification branch and bbox regression branch, where a structure of
"dconv3x3, conv3x3, dconv3x3, conv1x1" is utilized instead.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
strides (Sequence[int] or Sequence[Tuple[int, int]]): Strides of points
in multiple feature levels. Defaults to (4, 8, 16, 32, 64).
regress_ranges (Sequence[Tuple[int, int]]): Regress range of multiple
level points.
center_sampling (bool): If true, use center sampling.
Defaults to False.
center_sample_radius (float): Radius of center sampling.
Defaults to 1.5.
norm_on_bbox (bool): If true, normalize the regression targets with
FPN strides. Defaults to False.
centerness_on_reg (bool): If true, position centerness on the
regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042.
Defaults to False.
conv_bias (bool or str): If specified as `auto`, it will be decided by
the norm_cfg. Bias of conv will be set as True if `norm_cfg` is
None, otherwise False. Defaults to "auto".
loss_cls (:obj:`ConfigDict` or dict): Config of classification loss.
loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss.
loss_centerness (:obj:`ConfigDict`, or dict): Config of centerness
loss.
norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and
config norm layer. Defaults to
``norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)``.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict], opitonal): Initialization config dict.
""" # noqa: E501
def __init__(self,
*args,
init_cfg: OptMultiConfig = None,
**kwargs) -> None:
if init_cfg is None:
init_cfg = [
dict(type='Caffe2Xavier', layer=['ConvModule', 'Conv2d']),
dict(
type='Normal',
std=0.01,
override=[
dict(name='conv_reg'),
dict(name='conv_centerness'),
dict(
name='conv_cls',
type='Normal',
std=0.01,
bias_prob=0.01)
]),
]
super().__init__(*args, init_cfg=init_cfg, **kwargs)
def _init_layers(self) -> None:
"""Initialize layers of the head."""
dconv3x3_config = dict(
type='DCNv2',
kernel_size=3,
use_bias=True,
deform_groups=2,
padding=1)
conv3x3_config = dict(type='Conv', kernel_size=3, padding=1)
conv1x1_config = dict(type='Conv', kernel_size=1)
self.arch_config = [
dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config
]
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i, op_ in enumerate(self.arch_config):
op = copy.deepcopy(op_)
chn = self.in_channels if i == 0 else self.feat_channels
assert isinstance(op, dict)
use_bias = op.pop('use_bias', False)
padding = op.pop('padding', 0)
kernel_size = op.pop('kernel_size')
module = ConvModule(
chn,
self.feat_channels,
kernel_size,
stride=1,
padding=padding,
norm_cfg=self.norm_cfg,
bias=use_bias,
conv_cfg=op)
self.cls_convs.append(copy.deepcopy(module))
self.reg_convs.append(copy.deepcopy(module))
self.conv_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
| 4,776 | 40.53913 | 113 | py |
ERD | ERD-main/mmdet/models/dense_heads/reppoints_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, List, Sequence, Tuple
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.ops import DeformConv2d
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.registry import MODELS, TASK_UTILS
from mmdet.utils import ConfigType, InstanceList, MultiConfig, OptInstanceList
from ..task_modules.prior_generators import MlvlPointGenerator
from ..task_modules.samplers import PseudoSampler
from ..utils import (filter_scores_and_topk, images_to_levels, multi_apply,
unmap)
from .anchor_free_head import AnchorFreeHead
@MODELS.register_module()
class RepPointsHead(AnchorFreeHead):
"""RepPoint head.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
point_feat_channels (int): Number of channels of points features.
num_points (int): Number of points.
gradient_mul (float): The multiplier to gradients from
points refinement and recognition.
point_strides (Sequence[int]): points strides.
point_base_scale (int): bbox scale for assigning labels.
loss_cls (:obj:`ConfigDict` or dict): Config of classification loss.
loss_bbox_init (:obj:`ConfigDict` or dict): Config of initial points
loss.
loss_bbox_refine (:obj:`ConfigDict` or dict): Config of points loss in
refinement.
use_grid_points (bool): If we use bounding box representation, the
reppoints is represented as grid points on the bounding box.
center_init (bool): Whether to use center point assignment.
transform_method (str): The methods to transform RepPoints to bbox.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict]): Initialization config dict.
""" # noqa: W605
def __init__(self,
num_classes: int,
in_channels: int,
point_feat_channels: int = 256,
num_points: int = 9,
gradient_mul: float = 0.1,
point_strides: Sequence[int] = [8, 16, 32, 64, 128],
point_base_scale: int = 4,
loss_cls: ConfigType = dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_init: ConfigType = dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),
loss_bbox_refine: ConfigType = dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
use_grid_points: bool = False,
center_init: bool = True,
transform_method: str = 'moment',
moment_mul: float = 0.01,
init_cfg: MultiConfig = dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='reppoints_cls_out',
std=0.01,
bias_prob=0.01)),
**kwargs) -> None:
self.num_points = num_points
self.point_feat_channels = point_feat_channels
self.use_grid_points = use_grid_points
self.center_init = center_init
# we use deform conv to extract points features
self.dcn_kernel = int(np.sqrt(num_points))
self.dcn_pad = int((self.dcn_kernel - 1) / 2)
assert self.dcn_kernel * self.dcn_kernel == num_points, \
'The points number should be a square number.'
assert self.dcn_kernel % 2 == 1, \
'The points number should be an odd square number.'
dcn_base = np.arange(-self.dcn_pad,
self.dcn_pad + 1).astype(np.float64)
dcn_base_y = np.repeat(dcn_base, self.dcn_kernel)
dcn_base_x = np.tile(dcn_base, self.dcn_kernel)
dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(
(-1))
self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)
super().__init__(
num_classes=num_classes,
in_channels=in_channels,
loss_cls=loss_cls,
init_cfg=init_cfg,
**kwargs)
self.gradient_mul = gradient_mul
self.point_base_scale = point_base_scale
self.point_strides = point_strides
self.prior_generator = MlvlPointGenerator(
self.point_strides, offset=0.)
if self.train_cfg:
self.init_assigner = TASK_UTILS.build(
self.train_cfg['init']['assigner'])
self.refine_assigner = TASK_UTILS.build(
self.train_cfg['refine']['assigner'])
if self.train_cfg.get('sampler', None) is not None:
self.sampler = TASK_UTILS.build(
self.train_cfg['sampler'], default_args=dict(context=self))
else:
self.sampler = PseudoSampler(context=self)
self.transform_method = transform_method
if self.transform_method == 'moment':
self.moment_transfer = nn.Parameter(
data=torch.zeros(2), requires_grad=True)
self.moment_mul = moment_mul
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
if self.use_sigmoid_cls:
self.cls_out_channels = self.num_classes
else:
self.cls_out_channels = self.num_classes + 1
self.loss_bbox_init = MODELS.build(loss_bbox_init)
self.loss_bbox_refine = MODELS.build(loss_bbox_refine)
def _init_layers(self) -> None:
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
pts_out_dim = 4 if self.use_grid_points else 2 * self.num_points
self.reppoints_cls_conv = DeformConv2d(self.feat_channels,
self.point_feat_channels,
self.dcn_kernel, 1,
self.dcn_pad)
self.reppoints_cls_out = nn.Conv2d(self.point_feat_channels,
self.cls_out_channels, 1, 1, 0)
self.reppoints_pts_init_conv = nn.Conv2d(self.feat_channels,
self.point_feat_channels, 3,
1, 1)
self.reppoints_pts_init_out = nn.Conv2d(self.point_feat_channels,
pts_out_dim, 1, 1, 0)
self.reppoints_pts_refine_conv = DeformConv2d(self.feat_channels,
self.point_feat_channels,
self.dcn_kernel, 1,
self.dcn_pad)
self.reppoints_pts_refine_out = nn.Conv2d(self.point_feat_channels,
pts_out_dim, 1, 1, 0)
def points2bbox(self, pts: Tensor, y_first: bool = True) -> Tensor:
"""Converting the points set into bounding box.
Args:
pts (Tensor): the input points sets (fields), each points
set (fields) is represented as 2n scalar.
y_first (bool): if y_first=True, the point set is
represented as [y1, x1, y2, x2 ... yn, xn], otherwise
the point set is represented as
[x1, y1, x2, y2 ... xn, yn]. Defaults to True.
Returns:
Tensor: each points set is converting to a bbox [x1, y1, x2, y2].
"""
pts_reshape = pts.view(pts.shape[0], -1, 2, *pts.shape[2:])
pts_y = pts_reshape[:, :, 0, ...] if y_first else pts_reshape[:, :, 1,
...]
pts_x = pts_reshape[:, :, 1, ...] if y_first else pts_reshape[:, :, 0,
...]
if self.transform_method == 'minmax':
bbox_left = pts_x.min(dim=1, keepdim=True)[0]
bbox_right = pts_x.max(dim=1, keepdim=True)[0]
bbox_up = pts_y.min(dim=1, keepdim=True)[0]
bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]
bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],
dim=1)
elif self.transform_method == 'partial_minmax':
pts_y = pts_y[:, :4, ...]
pts_x = pts_x[:, :4, ...]
bbox_left = pts_x.min(dim=1, keepdim=True)[0]
bbox_right = pts_x.max(dim=1, keepdim=True)[0]
bbox_up = pts_y.min(dim=1, keepdim=True)[0]
bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]
bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],
dim=1)
elif self.transform_method == 'moment':
pts_y_mean = pts_y.mean(dim=1, keepdim=True)
pts_x_mean = pts_x.mean(dim=1, keepdim=True)
pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True)
pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True)
moment_transfer = (self.moment_transfer * self.moment_mul) + (
self.moment_transfer.detach() * (1 - self.moment_mul))
moment_width_transfer = moment_transfer[0]
moment_height_transfer = moment_transfer[1]
half_width = pts_x_std * torch.exp(moment_width_transfer)
half_height = pts_y_std * torch.exp(moment_height_transfer)
bbox = torch.cat([
pts_x_mean - half_width, pts_y_mean - half_height,
pts_x_mean + half_width, pts_y_mean + half_height
],
dim=1)
else:
raise NotImplementedError
return bbox
def gen_grid_from_reg(self, reg: Tensor,
previous_boxes: Tensor) -> Tuple[Tensor]:
"""Base on the previous bboxes and regression values, we compute the
regressed bboxes and generate the grids on the bboxes.
Args:
reg (Tensor): the regression value to previous bboxes.
previous_boxes (Tensor): previous bboxes.
Returns:
Tuple[Tensor]: generate grids on the regressed bboxes.
"""
b, _, h, w = reg.shape
bxy = (previous_boxes[:, :2, ...] + previous_boxes[:, 2:, ...]) / 2.
bwh = (previous_boxes[:, 2:, ...] -
previous_boxes[:, :2, ...]).clamp(min=1e-6)
grid_topleft = bxy + bwh * reg[:, :2, ...] - 0.5 * bwh * torch.exp(
reg[:, 2:, ...])
grid_wh = bwh * torch.exp(reg[:, 2:, ...])
grid_left = grid_topleft[:, [0], ...]
grid_top = grid_topleft[:, [1], ...]
grid_width = grid_wh[:, [0], ...]
grid_height = grid_wh[:, [1], ...]
intervel = torch.linspace(0., 1., self.dcn_kernel).view(
1, self.dcn_kernel, 1, 1).type_as(reg)
grid_x = grid_left + grid_width * intervel
grid_x = grid_x.unsqueeze(1).repeat(1, self.dcn_kernel, 1, 1, 1)
grid_x = grid_x.view(b, -1, h, w)
grid_y = grid_top + grid_height * intervel
grid_y = grid_y.unsqueeze(2).repeat(1, 1, self.dcn_kernel, 1, 1)
grid_y = grid_y.view(b, -1, h, w)
grid_yx = torch.stack([grid_y, grid_x], dim=2)
grid_yx = grid_yx.view(b, -1, h, w)
regressed_bbox = torch.cat([
grid_left, grid_top, grid_left + grid_width, grid_top + grid_height
], 1)
return grid_yx, regressed_bbox
def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor]:
return multi_apply(self.forward_single, feats)
def forward_single(self, x: Tensor) -> Tuple[Tensor]:
"""Forward feature map of a single FPN level."""
dcn_base_offset = self.dcn_base_offset.type_as(x)
# If we use center_init, the initial reppoints is from center points.
# If we use bounding bbox representation, the initial reppoints is
# from regular grid placed on a pre-defined bbox.
if self.use_grid_points or not self.center_init:
scale = self.point_base_scale / 2
points_init = dcn_base_offset / dcn_base_offset.max() * scale
bbox_init = x.new_tensor([-scale, -scale, scale,
scale]).view(1, 4, 1, 1)
else:
points_init = 0
cls_feat = x
pts_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
pts_feat = reg_conv(pts_feat)
# initialize reppoints
pts_out_init = self.reppoints_pts_init_out(
self.relu(self.reppoints_pts_init_conv(pts_feat)))
if self.use_grid_points:
pts_out_init, bbox_out_init = self.gen_grid_from_reg(
pts_out_init, bbox_init.detach())
else:
pts_out_init = pts_out_init + points_init
# refine and classify reppoints
pts_out_init_grad_mul = (1 - self.gradient_mul) * pts_out_init.detach(
) + self.gradient_mul * pts_out_init
dcn_offset = pts_out_init_grad_mul - dcn_base_offset
cls_out = self.reppoints_cls_out(
self.relu(self.reppoints_cls_conv(cls_feat, dcn_offset)))
pts_out_refine = self.reppoints_pts_refine_out(
self.relu(self.reppoints_pts_refine_conv(pts_feat, dcn_offset)))
if self.use_grid_points:
pts_out_refine, bbox_out_refine = self.gen_grid_from_reg(
pts_out_refine, bbox_out_init.detach())
else:
pts_out_refine = pts_out_refine + pts_out_init.detach()
if self.training:
return cls_out, pts_out_init, pts_out_refine
else:
return cls_out, self.points2bbox(pts_out_refine)
def get_points(self, featmap_sizes: List[Tuple[int]],
batch_img_metas: List[dict], device: str) -> tuple:
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
batch_img_metas (list[dict]): Image meta info.
Returns:
tuple: points of each image, valid flags of each image
"""
num_imgs = len(batch_img_metas)
# since feature map sizes of all images are the same, we only compute
# points center for one time
multi_level_points = self.prior_generator.grid_priors(
featmap_sizes, device=device, with_stride=True)
points_list = [[point.clone() for point in multi_level_points]
for _ in range(num_imgs)]
# for each image, we compute valid flags of multi level grids
valid_flag_list = []
for img_id, img_meta in enumerate(batch_img_metas):
multi_level_flags = self.prior_generator.valid_flags(
featmap_sizes, img_meta['pad_shape'], device=device)
valid_flag_list.append(multi_level_flags)
return points_list, valid_flag_list
def centers_to_bboxes(self, point_list: List[Tensor]) -> List[Tensor]:
"""Get bboxes according to center points.
Only used in :class:`MaxIoUAssigner`.
"""
bbox_list = []
for i_img, point in enumerate(point_list):
bbox = []
for i_lvl in range(len(self.point_strides)):
scale = self.point_base_scale * self.point_strides[i_lvl] * 0.5
bbox_shift = torch.Tensor([-scale, -scale, scale,
scale]).view(1, 4).type_as(point[0])
bbox_center = torch.cat(
[point[i_lvl][:, :2], point[i_lvl][:, :2]], dim=1)
bbox.append(bbox_center + bbox_shift)
bbox_list.append(bbox)
return bbox_list
def offset_to_pts(self, center_list: List[Tensor],
pred_list: List[Tensor]) -> List[Tensor]:
"""Change from point offset to point coordinate."""
pts_list = []
for i_lvl in range(len(self.point_strides)):
pts_lvl = []
for i_img in range(len(center_list)):
pts_center = center_list[i_img][i_lvl][:, :2].repeat(
1, self.num_points)
pts_shift = pred_list[i_lvl][i_img]
yx_pts_shift = pts_shift.permute(1, 2, 0).view(
-1, 2 * self.num_points)
y_pts_shift = yx_pts_shift[..., 0::2]
x_pts_shift = yx_pts_shift[..., 1::2]
xy_pts_shift = torch.stack([x_pts_shift, y_pts_shift], -1)
xy_pts_shift = xy_pts_shift.view(*yx_pts_shift.shape[:-1], -1)
pts = xy_pts_shift * self.point_strides[i_lvl] + pts_center
pts_lvl.append(pts)
pts_lvl = torch.stack(pts_lvl, 0)
pts_list.append(pts_lvl)
return pts_list
def _get_targets_single(self,
flat_proposals: Tensor,
valid_flags: Tensor,
gt_instances: InstanceData,
gt_instances_ignore: InstanceData,
stage: str = 'init',
unmap_outputs: bool = True) -> tuple:
"""Compute corresponding GT box and classification targets for
proposals.
Args:
flat_proposals (Tensor): Multi level points of a image.
valid_flags (Tensor): Multi level valid flags of a image.
gt_instances (InstanceData): It usually includes ``bboxes`` and
``labels`` attributes.
gt_instances_ignore (InstanceData): It includes ``bboxes``
attribute data that is ignored during training and testing.
stage (str): 'init' or 'refine'. Generate target for
init stage or refine stage. Defaults to 'init'.
unmap_outputs (bool): Whether to map outputs back to
the original set of anchors. Defaults to True.
Returns:
tuple:
- labels (Tensor): Labels of each level.
- label_weights (Tensor): Label weights of each level.
- bbox_targets (Tensor): BBox targets of each level.
- bbox_weights (Tensor): BBox weights of each level.
- pos_inds (Tensor): positive samples indexes.
- neg_inds (Tensor): negative samples indexes.
- sampling_result (:obj:`SamplingResult`): Sampling results.
"""
inside_flags = valid_flags
if not inside_flags.any():
raise ValueError(
'There is no valid proposal inside the image boundary. Please '
'check the image size.')
# assign gt and sample proposals
proposals = flat_proposals[inside_flags, :]
pred_instances = InstanceData(priors=proposals)
if stage == 'init':
assigner = self.init_assigner
pos_weight = self.train_cfg['init']['pos_weight']
else:
assigner = self.refine_assigner
pos_weight = self.train_cfg['refine']['pos_weight']
assign_result = assigner.assign(pred_instances, gt_instances,
gt_instances_ignore)
sampling_result = self.sampler.sample(assign_result, pred_instances,
gt_instances)
num_valid_proposals = proposals.shape[0]
bbox_gt = proposals.new_zeros([num_valid_proposals, 4])
pos_proposals = torch.zeros_like(proposals)
proposals_weights = proposals.new_zeros([num_valid_proposals, 4])
labels = proposals.new_full((num_valid_proposals, ),
self.num_classes,
dtype=torch.long)
label_weights = proposals.new_zeros(
num_valid_proposals, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
bbox_gt[pos_inds, :] = sampling_result.pos_gt_bboxes
pos_proposals[pos_inds, :] = proposals[pos_inds, :]
proposals_weights[pos_inds, :] = 1.0
labels[pos_inds] = sampling_result.pos_gt_labels
if pos_weight <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = pos_weight
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
# map up to original set of proposals
if unmap_outputs:
num_total_proposals = flat_proposals.size(0)
labels = unmap(
labels,
num_total_proposals,
inside_flags,
fill=self.num_classes) # fill bg label
label_weights = unmap(label_weights, num_total_proposals,
inside_flags)
bbox_gt = unmap(bbox_gt, num_total_proposals, inside_flags)
pos_proposals = unmap(pos_proposals, num_total_proposals,
inside_flags)
proposals_weights = unmap(proposals_weights, num_total_proposals,
inside_flags)
return (labels, label_weights, bbox_gt, pos_proposals,
proposals_weights, pos_inds, neg_inds, sampling_result)
def get_targets(self,
proposals_list: List[Tensor],
valid_flag_list: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None,
stage: str = 'init',
unmap_outputs: bool = True,
return_sampling_results: bool = False) -> tuple:
"""Compute corresponding GT box and classification targets for
proposals.
Args:
proposals_list (list[Tensor]): Multi level points/bboxes of each
image.
valid_flag_list (list[Tensor]): Multi level valid flags of each
image.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
stage (str): 'init' or 'refine'. Generate target for init stage or
refine stage.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors.
return_sampling_results (bool): Whether to return the sampling
results. Defaults to False.
Returns:
tuple:
- labels_list (list[Tensor]): Labels of each level.
- label_weights_list (list[Tensor]): Label weights of each
level.
- bbox_gt_list (list[Tensor]): Ground truth bbox of each level.
- proposals_list (list[Tensor]): Proposals(points/bboxes) of
each level.
- proposal_weights_list (list[Tensor]): Proposal weights of
each level.
- avg_factor (int): Average factor that is used to average
the loss. When using sampling method, avg_factor is usually
the sum of positive and negative priors. When using
`PseudoSampler`, `avg_factor` is usually equal to the number
of positive priors.
"""
assert stage in ['init', 'refine']
num_imgs = len(batch_img_metas)
assert len(proposals_list) == len(valid_flag_list) == num_imgs
# points number of multi levels
num_level_proposals = [points.size(0) for points in proposals_list[0]]
# concat all level points and flags to a single tensor
for i in range(num_imgs):
assert len(proposals_list[i]) == len(valid_flag_list[i])
proposals_list[i] = torch.cat(proposals_list[i])
valid_flag_list[i] = torch.cat(valid_flag_list[i])
if batch_gt_instances_ignore is None:
batch_gt_instances_ignore = [None] * num_imgs
(all_labels, all_label_weights, all_bbox_gt, all_proposals,
all_proposal_weights, pos_inds_list, neg_inds_list,
sampling_results_list) = multi_apply(
self._get_targets_single,
proposals_list,
valid_flag_list,
batch_gt_instances,
batch_gt_instances_ignore,
stage=stage,
unmap_outputs=unmap_outputs)
# sampled points of all images
avg_refactor = sum(
[results.avg_factor for results in sampling_results_list])
labels_list = images_to_levels(all_labels, num_level_proposals)
label_weights_list = images_to_levels(all_label_weights,
num_level_proposals)
bbox_gt_list = images_to_levels(all_bbox_gt, num_level_proposals)
proposals_list = images_to_levels(all_proposals, num_level_proposals)
proposal_weights_list = images_to_levels(all_proposal_weights,
num_level_proposals)
res = (labels_list, label_weights_list, bbox_gt_list, proposals_list,
proposal_weights_list, avg_refactor)
if return_sampling_results:
res = res + (sampling_results_list, )
return res
def loss_by_feat_single(self, cls_score: Tensor, pts_pred_init: Tensor,
pts_pred_refine: Tensor, labels: Tensor,
label_weights, bbox_gt_init: Tensor,
bbox_weights_init: Tensor, bbox_gt_refine: Tensor,
bbox_weights_refine: Tensor, stride: int,
avg_factor_init: int,
avg_factor_refine: int) -> Tuple[Tensor]:
"""Calculate the loss of a single scale level based on the features
extracted by the detection head.
Args:
cls_score (Tensor): Box scores for each scale level
Has shape (N, num_classes, h_i, w_i).
pts_pred_init (Tensor): Points of shape
(batch_size, h_i * w_i, num_points * 2).
pts_pred_refine (Tensor): Points refined of shape
(batch_size, h_i * w_i, num_points * 2).
labels (Tensor): Ground truth class indices with shape
(batch_size, h_i * w_i).
label_weights (Tensor): Label weights of shape
(batch_size, h_i * w_i).
bbox_gt_init (Tensor): BBox regression targets in the init stage
of shape (batch_size, h_i * w_i, 4).
bbox_weights_init (Tensor): BBox regression loss weights in the
init stage of shape (batch_size, h_i * w_i, 4).
bbox_gt_refine (Tensor): BBox regression targets in the refine
stage of shape (batch_size, h_i * w_i, 4).
bbox_weights_refine (Tensor): BBox regression loss weights in the
refine stage of shape (batch_size, h_i * w_i, 4).
stride (int): Point stride.
avg_factor_init (int): Average factor that is used to average
the loss in the init stage.
avg_factor_refine (int): Average factor that is used to average
the loss in the refine stage.
Returns:
Tuple[Tensor]: loss components.
"""
# classification loss
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
cls_score = cls_score.permute(0, 2, 3,
1).reshape(-1, self.cls_out_channels)
cls_score = cls_score.contiguous()
loss_cls = self.loss_cls(
cls_score, labels, label_weights, avg_factor=avg_factor_refine)
# points loss
bbox_gt_init = bbox_gt_init.reshape(-1, 4)
bbox_weights_init = bbox_weights_init.reshape(-1, 4)
bbox_pred_init = self.points2bbox(
pts_pred_init.reshape(-1, 2 * self.num_points), y_first=False)
bbox_gt_refine = bbox_gt_refine.reshape(-1, 4)
bbox_weights_refine = bbox_weights_refine.reshape(-1, 4)
bbox_pred_refine = self.points2bbox(
pts_pred_refine.reshape(-1, 2 * self.num_points), y_first=False)
normalize_term = self.point_base_scale * stride
loss_pts_init = self.loss_bbox_init(
bbox_pred_init / normalize_term,
bbox_gt_init / normalize_term,
bbox_weights_init,
avg_factor=avg_factor_init)
loss_pts_refine = self.loss_bbox_refine(
bbox_pred_refine / normalize_term,
bbox_gt_refine / normalize_term,
bbox_weights_refine,
avg_factor=avg_factor_refine)
return loss_cls, loss_pts_init, loss_pts_refine
def loss_by_feat(
self,
cls_scores: List[Tensor],
pts_preds_init: List[Tensor],
pts_preds_refine: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None
) -> Dict[str, Tensor]:
"""Calculate the loss based on the features extracted by the detection
head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level,
each is a 4D-tensor, of shape (batch_size, num_classes, h, w).
pts_preds_init (list[Tensor]): Points for each scale level, each is
a 3D-tensor, of shape (batch_size, h_i * w_i, num_points * 2).
pts_preds_refine (list[Tensor]): Points refined for each scale
level, each is a 3D-tensor, of shape
(batch_size, h_i * w_i, num_points * 2).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
device = cls_scores[0].device
# target for initial stage
center_list, valid_flag_list = self.get_points(featmap_sizes,
batch_img_metas, device)
pts_coordinate_preds_init = self.offset_to_pts(center_list,
pts_preds_init)
if self.train_cfg['init']['assigner']['type'] == 'PointAssigner':
# Assign target for center list
candidate_list = center_list
else:
# transform center list to bbox list and
# assign target for bbox list
bbox_list = self.centers_to_bboxes(center_list)
candidate_list = bbox_list
cls_reg_targets_init = self.get_targets(
proposals_list=candidate_list,
valid_flag_list=valid_flag_list,
batch_gt_instances=batch_gt_instances,
batch_img_metas=batch_img_metas,
batch_gt_instances_ignore=batch_gt_instances_ignore,
stage='init',
return_sampling_results=False)
(*_, bbox_gt_list_init, candidate_list_init, bbox_weights_list_init,
avg_factor_init) = cls_reg_targets_init
# target for refinement stage
center_list, valid_flag_list = self.get_points(featmap_sizes,
batch_img_metas, device)
pts_coordinate_preds_refine = self.offset_to_pts(
center_list, pts_preds_refine)
bbox_list = []
for i_img, center in enumerate(center_list):
bbox = []
for i_lvl in range(len(pts_preds_refine)):
bbox_preds_init = self.points2bbox(
pts_preds_init[i_lvl].detach())
bbox_shift = bbox_preds_init * self.point_strides[i_lvl]
bbox_center = torch.cat(
[center[i_lvl][:, :2], center[i_lvl][:, :2]], dim=1)
bbox.append(bbox_center +
bbox_shift[i_img].permute(1, 2, 0).reshape(-1, 4))
bbox_list.append(bbox)
cls_reg_targets_refine = self.get_targets(
proposals_list=bbox_list,
valid_flag_list=valid_flag_list,
batch_gt_instances=batch_gt_instances,
batch_img_metas=batch_img_metas,
batch_gt_instances_ignore=batch_gt_instances_ignore,
stage='refine',
return_sampling_results=False)
(labels_list, label_weights_list, bbox_gt_list_refine,
candidate_list_refine, bbox_weights_list_refine,
avg_factor_refine) = cls_reg_targets_refine
# compute loss
losses_cls, losses_pts_init, losses_pts_refine = multi_apply(
self.loss_by_feat_single,
cls_scores,
pts_coordinate_preds_init,
pts_coordinate_preds_refine,
labels_list,
label_weights_list,
bbox_gt_list_init,
bbox_weights_list_init,
bbox_gt_list_refine,
bbox_weights_list_refine,
self.point_strides,
avg_factor_init=avg_factor_init,
avg_factor_refine=avg_factor_refine)
loss_dict_all = {
'loss_cls': losses_cls,
'loss_pts_init': losses_pts_init,
'loss_pts_refine': losses_pts_refine
}
return loss_dict_all
# Same as base_dense_head/_get_bboxes_single except self._bbox_decode
def _predict_by_feat_single(self,
cls_score_list: List[Tensor],
bbox_pred_list: List[Tensor],
score_factor_list: List[Tensor],
mlvl_priors: List[Tensor],
img_meta: dict,
cfg: ConfigDict,
rescale: bool = False,
with_nms: bool = True) -> InstanceData:
"""Transform outputs of a single image into bbox predictions.
Args:
cls_score_list (list[Tensor]): Box scores from all scale
levels of a single image, each item has shape
(num_priors * num_classes, H, W).
bbox_pred_list (list[Tensor]): Box energies / deltas from
all scale levels of a single image, each item has shape
(num_priors * 4, H, W).
score_factor_list (list[Tensor]): Score factor from all scale
levels of a single image. RepPoints head does not need
this value.
mlvl_priors (list[Tensor]): Each element in the list is
the priors of a single level in feature pyramid, has shape
(num_priors, 2).
img_meta (dict): Image meta info.
cfg (:obj:`ConfigDict`): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Defaults to True.
Returns:
:obj:`InstanceData`: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
cfg = self.test_cfg if cfg is None else cfg
assert len(cls_score_list) == len(bbox_pred_list)
img_shape = img_meta['img_shape']
nms_pre = cfg.get('nms_pre', -1)
mlvl_bboxes = []
mlvl_scores = []
mlvl_labels = []
for level_idx, (cls_score, bbox_pred, priors) in enumerate(
zip(cls_score_list, bbox_pred_list, mlvl_priors)):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
cls_score = cls_score.permute(1, 2,
0).reshape(-1, self.cls_out_channels)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
scores = cls_score.softmax(-1)[:, :-1]
# After https://github.com/open-mmlab/mmdetection/pull/6268/,
# this operation keeps fewer bboxes under the same `nms_pre`.
# There is no difference in performance for most models. If you
# find a slight drop in performance, you can set a larger
# `nms_pre` than before.
results = filter_scores_and_topk(
scores, cfg.score_thr, nms_pre,
dict(bbox_pred=bbox_pred, priors=priors))
scores, labels, _, filtered_results = results
bbox_pred = filtered_results['bbox_pred']
priors = filtered_results['priors']
bboxes = self._bbox_decode(priors, bbox_pred,
self.point_strides[level_idx],
img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_labels.append(labels)
results = InstanceData()
results.bboxes = torch.cat(mlvl_bboxes)
results.scores = torch.cat(mlvl_scores)
results.labels = torch.cat(mlvl_labels)
return self._bbox_post_process(
results=results,
cfg=cfg,
rescale=rescale,
with_nms=with_nms,
img_meta=img_meta)
def _bbox_decode(self, points: Tensor, bbox_pred: Tensor, stride: int,
max_shape: Tuple[int, int]) -> Tensor:
"""Decode the prediction to bounding box.
Args:
points (Tensor): shape (h_i * w_i, 2).
bbox_pred (Tensor): shape (h_i * w_i, 4).
stride (int): Stride for bbox_pred in different level.
max_shape (Tuple[int, int]): image shape.
Returns:
Tensor: Bounding boxes decoded.
"""
bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1)
bboxes = bbox_pred * stride + bbox_pos_center
x1 = bboxes[:, 0].clamp(min=0, max=max_shape[1])
y1 = bboxes[:, 1].clamp(min=0, max=max_shape[0])
x2 = bboxes[:, 2].clamp(min=0, max=max_shape[1])
y2 = bboxes[:, 3].clamp(min=0, max=max_shape[0])
decoded_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
return decoded_bboxes
| 40,982 | 45.256208 | 79 | py |
ERD | ERD-main/mmdet/models/dense_heads/solov2_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
from typing import List, Optional, Tuple
import mmcv
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.models.utils.misc import floordiv
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, InstanceList, MultiConfig, OptConfigType
from ..layers import mask_matrix_nms
from ..utils import center_of_mass, generate_coordinate, multi_apply
from .solo_head import SOLOHead
class MaskFeatModule(BaseModule):
"""SOLOv2 mask feature map branch used in `SOLOv2: Dynamic and Fast
Instance Segmentation. <https://arxiv.org/pdf/2003.10152>`_
Args:
in_channels (int): Number of channels in the input feature map.
feat_channels (int): Number of hidden channels of the mask feature
map branch.
start_level (int): The starting feature map level from RPN that
will be used to predict the mask feature map.
end_level (int): The ending feature map level from rpn that
will be used to predict the mask feature map.
out_channels (int): Number of output channels of the mask feature
map branch. This is the channel count of the mask
feature map that to be dynamically convolved with the predicted
kernel.
mask_stride (int): Downsample factor of the mask feature map output.
Defaults to 4.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Config dict for normalization layer. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(
self,
in_channels: int,
feat_channels: int,
start_level: int,
end_level: int,
out_channels: int,
mask_stride: int = 4,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
init_cfg: MultiConfig = [
dict(type='Normal', layer='Conv2d', std=0.01)
]
) -> None:
super().__init__(init_cfg=init_cfg)
self.in_channels = in_channels
self.feat_channels = feat_channels
self.start_level = start_level
self.end_level = end_level
self.mask_stride = mask_stride
assert start_level >= 0 and end_level >= start_level
self.out_channels = out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self._init_layers()
self.fp16_enabled = False
def _init_layers(self) -> None:
"""Initialize layers of the head."""
self.convs_all_levels = nn.ModuleList()
for i in range(self.start_level, self.end_level + 1):
convs_per_level = nn.Sequential()
if i == 0:
convs_per_level.add_module(
f'conv{i}',
ConvModule(
self.in_channels,
self.feat_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=False))
self.convs_all_levels.append(convs_per_level)
continue
for j in range(i):
if j == 0:
if i == self.end_level:
chn = self.in_channels + 2
else:
chn = self.in_channels
convs_per_level.add_module(
f'conv{j}',
ConvModule(
chn,
self.feat_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=False))
convs_per_level.add_module(
f'upsample{j}',
nn.Upsample(
scale_factor=2,
mode='bilinear',
align_corners=False))
continue
convs_per_level.add_module(
f'conv{j}',
ConvModule(
self.feat_channels,
self.feat_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=False))
convs_per_level.add_module(
f'upsample{j}',
nn.Upsample(
scale_factor=2, mode='bilinear', align_corners=False))
self.convs_all_levels.append(convs_per_level)
self.conv_pred = ConvModule(
self.feat_channels,
self.out_channels,
1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
def forward(self, x: Tuple[Tensor]) -> Tensor:
"""Forward features from the upstream network.
Args:
x (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
Tensor: The predicted mask feature map.
"""
inputs = x[self.start_level:self.end_level + 1]
assert len(inputs) == (self.end_level - self.start_level + 1)
feature_add_all_level = self.convs_all_levels[0](inputs[0])
for i in range(1, len(inputs)):
input_p = inputs[i]
if i == len(inputs) - 1:
coord_feat = generate_coordinate(input_p.size(),
input_p.device)
input_p = torch.cat([input_p, coord_feat], 1)
feature_add_all_level = feature_add_all_level + \
self.convs_all_levels[i](input_p)
feature_pred = self.conv_pred(feature_add_all_level)
return feature_pred
@MODELS.register_module()
class SOLOV2Head(SOLOHead):
"""SOLOv2 mask head used in `SOLOv2: Dynamic and Fast Instance
Segmentation. <https://arxiv.org/pdf/2003.10152>`_
Args:
mask_feature_head (dict): Config of SOLOv2MaskFeatHead.
dynamic_conv_size (int): Dynamic Conv kernel size. Defaults to 1.
dcn_cfg (dict): Dcn conv configurations in kernel_convs and cls_conv.
Defaults to None.
dcn_apply_to_all_conv (bool): Whether to use dcn in every layer of
kernel_convs and cls_convs, or only the last layer. It shall be set
`True` for the normal version of SOLOv2 and `False` for the
light-weight version. Defaults to True.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
*args,
mask_feature_head: ConfigType,
dynamic_conv_size: int = 1,
dcn_cfg: OptConfigType = None,
dcn_apply_to_all_conv: bool = True,
init_cfg: MultiConfig = [
dict(type='Normal', layer='Conv2d', std=0.01),
dict(
type='Normal',
std=0.01,
bias_prob=0.01,
override=dict(name='conv_cls'))
],
**kwargs) -> None:
assert dcn_cfg is None or isinstance(dcn_cfg, dict)
self.dcn_cfg = dcn_cfg
self.with_dcn = dcn_cfg is not None
self.dcn_apply_to_all_conv = dcn_apply_to_all_conv
self.dynamic_conv_size = dynamic_conv_size
mask_out_channels = mask_feature_head.get('out_channels')
self.kernel_out_channels = \
mask_out_channels * self.dynamic_conv_size * self.dynamic_conv_size
super().__init__(*args, init_cfg=init_cfg, **kwargs)
# update the in_channels of mask_feature_head
if mask_feature_head.get('in_channels', None) is not None:
if mask_feature_head.in_channels != self.in_channels:
warnings.warn('The `in_channels` of SOLOv2MaskFeatHead and '
'SOLOv2Head should be same, changing '
'mask_feature_head.in_channels to '
f'{self.in_channels}')
mask_feature_head.update(in_channels=self.in_channels)
else:
mask_feature_head.update(in_channels=self.in_channels)
self.mask_feature_head = MaskFeatModule(**mask_feature_head)
self.mask_stride = self.mask_feature_head.mask_stride
self.fp16_enabled = False
def _init_layers(self) -> None:
"""Initialize layers of the head."""
self.cls_convs = nn.ModuleList()
self.kernel_convs = nn.ModuleList()
conv_cfg = None
for i in range(self.stacked_convs):
if self.with_dcn:
if self.dcn_apply_to_all_conv:
conv_cfg = self.dcn_cfg
elif i == self.stacked_convs - 1:
# light head
conv_cfg = self.dcn_cfg
chn = self.in_channels + 2 if i == 0 else self.feat_channels
self.kernel_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.conv_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.conv_kernel = nn.Conv2d(
self.feat_channels, self.kernel_out_channels, 3, padding=1)
def forward(self, x):
"""Forward features from the upstream network.
Args:
x (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: A tuple of classification scores, mask prediction,
and mask features.
- mlvl_kernel_preds (list[Tensor]): Multi-level dynamic kernel
prediction. The kernel is used to generate instance
segmentation masks by dynamic convolution. Each element in
the list has shape
(batch_size, kernel_out_channels, num_grids, num_grids).
- mlvl_cls_preds (list[Tensor]): Multi-level scores. Each
element in the list has shape
(batch_size, num_classes, num_grids, num_grids).
- mask_feats (Tensor): Unified mask feature map used to
generate instance segmentation masks by dynamic convolution.
Has shape (batch_size, mask_out_channels, h, w).
"""
assert len(x) == self.num_levels
mask_feats = self.mask_feature_head(x)
ins_kernel_feats = self.resize_feats(x)
mlvl_kernel_preds = []
mlvl_cls_preds = []
for i in range(self.num_levels):
ins_kernel_feat = ins_kernel_feats[i]
# ins branch
# concat coord
coord_feat = generate_coordinate(ins_kernel_feat.size(),
ins_kernel_feat.device)
ins_kernel_feat = torch.cat([ins_kernel_feat, coord_feat], 1)
# kernel branch
kernel_feat = ins_kernel_feat
kernel_feat = F.interpolate(
kernel_feat,
size=self.num_grids[i],
mode='bilinear',
align_corners=False)
cate_feat = kernel_feat[:, :-2, :, :]
kernel_feat = kernel_feat.contiguous()
for i, kernel_conv in enumerate(self.kernel_convs):
kernel_feat = kernel_conv(kernel_feat)
kernel_pred = self.conv_kernel(kernel_feat)
# cate branch
cate_feat = cate_feat.contiguous()
for i, cls_conv in enumerate(self.cls_convs):
cate_feat = cls_conv(cate_feat)
cate_pred = self.conv_cls(cate_feat)
mlvl_kernel_preds.append(kernel_pred)
mlvl_cls_preds.append(cate_pred)
return mlvl_kernel_preds, mlvl_cls_preds, mask_feats
def _get_targets_single(self,
gt_instances: InstanceData,
featmap_sizes: Optional[list] = None) -> tuple:
"""Compute targets for predictions of single image.
Args:
gt_instances (:obj:`InstanceData`): Ground truth of instance
annotations. It should includes ``bboxes``, ``labels``,
and ``masks`` attributes.
featmap_sizes (list[:obj:`torch.size`]): Size of each
feature map from feature pyramid, each element
means (feat_h, feat_w). Defaults to None.
Returns:
Tuple: Usually returns a tuple containing targets for predictions.
- mlvl_pos_mask_targets (list[Tensor]): Each element represent
the binary mask targets for positive points in this
level, has shape (num_pos, out_h, out_w).
- mlvl_labels (list[Tensor]): Each element is
classification labels for all
points in this level, has shape
(num_grid, num_grid).
- mlvl_pos_masks (list[Tensor]): Each element is
a `BoolTensor` to represent whether the
corresponding point in single level
is positive, has shape (num_grid **2).
- mlvl_pos_indexes (list[list]): Each element
in the list contains the positive index in
corresponding level, has shape (num_pos).
"""
gt_labels = gt_instances.labels
device = gt_labels.device
gt_bboxes = gt_instances.bboxes
gt_areas = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) *
(gt_bboxes[:, 3] - gt_bboxes[:, 1]))
gt_masks = gt_instances.masks.to_tensor(
dtype=torch.bool, device=device)
mlvl_pos_mask_targets = []
mlvl_pos_indexes = []
mlvl_labels = []
mlvl_pos_masks = []
for (lower_bound, upper_bound), num_grid \
in zip(self.scale_ranges, self.num_grids):
mask_target = []
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
pos_index = []
labels = torch.zeros([num_grid, num_grid],
dtype=torch.int64,
device=device) + self.num_classes
pos_mask = torch.zeros([num_grid**2],
dtype=torch.bool,
device=device)
gt_inds = ((gt_areas >= lower_bound) &
(gt_areas <= upper_bound)).nonzero().flatten()
if len(gt_inds) == 0:
mlvl_pos_mask_targets.append(
torch.zeros([0, featmap_sizes[0], featmap_sizes[1]],
dtype=torch.uint8,
device=device))
mlvl_labels.append(labels)
mlvl_pos_masks.append(pos_mask)
mlvl_pos_indexes.append([])
continue
hit_gt_bboxes = gt_bboxes[gt_inds]
hit_gt_labels = gt_labels[gt_inds]
hit_gt_masks = gt_masks[gt_inds, ...]
pos_w_ranges = 0.5 * (hit_gt_bboxes[:, 2] -
hit_gt_bboxes[:, 0]) * self.pos_scale
pos_h_ranges = 0.5 * (hit_gt_bboxes[:, 3] -
hit_gt_bboxes[:, 1]) * self.pos_scale
# Make sure hit_gt_masks has a value
valid_mask_flags = hit_gt_masks.sum(dim=-1).sum(dim=-1) > 0
for gt_mask, gt_label, pos_h_range, pos_w_range, \
valid_mask_flag in \
zip(hit_gt_masks, hit_gt_labels, pos_h_ranges,
pos_w_ranges, valid_mask_flags):
if not valid_mask_flag:
continue
upsampled_size = (featmap_sizes[0] * self.mask_stride,
featmap_sizes[1] * self.mask_stride)
center_h, center_w = center_of_mass(gt_mask)
coord_w = int(
floordiv((center_w / upsampled_size[1]), (1. / num_grid),
rounding_mode='trunc'))
coord_h = int(
floordiv((center_h / upsampled_size[0]), (1. / num_grid),
rounding_mode='trunc'))
# left, top, right, down
top_box = max(
0,
int(
floordiv(
(center_h - pos_h_range) / upsampled_size[0],
(1. / num_grid),
rounding_mode='trunc')))
down_box = min(
num_grid - 1,
int(
floordiv(
(center_h + pos_h_range) / upsampled_size[0],
(1. / num_grid),
rounding_mode='trunc')))
left_box = max(
0,
int(
floordiv(
(center_w - pos_w_range) / upsampled_size[1],
(1. / num_grid),
rounding_mode='trunc')))
right_box = min(
num_grid - 1,
int(
floordiv(
(center_w + pos_w_range) / upsampled_size[1],
(1. / num_grid),
rounding_mode='trunc')))
top = max(top_box, coord_h - 1)
down = min(down_box, coord_h + 1)
left = max(coord_w - 1, left_box)
right = min(right_box, coord_w + 1)
labels[top:(down + 1), left:(right + 1)] = gt_label
# ins
gt_mask = np.uint8(gt_mask.cpu().numpy())
# Follow the original implementation, F.interpolate is
# different from cv2 and opencv
gt_mask = mmcv.imrescale(gt_mask, scale=1. / self.mask_stride)
gt_mask = torch.from_numpy(gt_mask).to(device=device)
for i in range(top, down + 1):
for j in range(left, right + 1):
index = int(i * num_grid + j)
this_mask_target = torch.zeros(
[featmap_sizes[0], featmap_sizes[1]],
dtype=torch.uint8,
device=device)
this_mask_target[:gt_mask.shape[0], :gt_mask.
shape[1]] = gt_mask
mask_target.append(this_mask_target)
pos_mask[index] = True
pos_index.append(index)
if len(mask_target) == 0:
mask_target = torch.zeros(
[0, featmap_sizes[0], featmap_sizes[1]],
dtype=torch.uint8,
device=device)
else:
mask_target = torch.stack(mask_target, 0)
mlvl_pos_mask_targets.append(mask_target)
mlvl_labels.append(labels)
mlvl_pos_masks.append(pos_mask)
mlvl_pos_indexes.append(pos_index)
return (mlvl_pos_mask_targets, mlvl_labels, mlvl_pos_masks,
mlvl_pos_indexes)
def loss_by_feat(self, mlvl_kernel_preds: List[Tensor],
mlvl_cls_preds: List[Tensor], mask_feats: Tensor,
batch_gt_instances: InstanceList,
batch_img_metas: List[dict], **kwargs) -> dict:
"""Calculate the loss based on the features extracted by the mask head.
Args:
mlvl_kernel_preds (list[Tensor]): Multi-level dynamic kernel
prediction. The kernel is used to generate instance
segmentation masks by dynamic convolution. Each element in the
list has shape
(batch_size, kernel_out_channels, num_grids, num_grids).
mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element
in the list has shape
(batch_size, num_classes, num_grids, num_grids).
mask_feats (Tensor): Unified mask feature map used to generate
instance segmentation masks by dynamic convolution. Has shape
(batch_size, mask_out_channels, h, w).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes``, ``masks``,
and ``labels`` attributes.
batch_img_metas (list[dict]): Meta information of multiple images.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
featmap_sizes = mask_feats.size()[-2:]
pos_mask_targets, labels, pos_masks, pos_indexes = multi_apply(
self._get_targets_single,
batch_gt_instances,
featmap_sizes=featmap_sizes)
mlvl_mask_targets = [
torch.cat(lvl_mask_targets, 0)
for lvl_mask_targets in zip(*pos_mask_targets)
]
mlvl_pos_kernel_preds = []
for lvl_kernel_preds, lvl_pos_indexes in zip(mlvl_kernel_preds,
zip(*pos_indexes)):
lvl_pos_kernel_preds = []
for img_lvl_kernel_preds, img_lvl_pos_indexes in zip(
lvl_kernel_preds, lvl_pos_indexes):
img_lvl_pos_kernel_preds = img_lvl_kernel_preds.view(
img_lvl_kernel_preds.shape[0], -1)[:, img_lvl_pos_indexes]
lvl_pos_kernel_preds.append(img_lvl_pos_kernel_preds)
mlvl_pos_kernel_preds.append(lvl_pos_kernel_preds)
# make multilevel mlvl_mask_pred
mlvl_mask_preds = []
for lvl_pos_kernel_preds in mlvl_pos_kernel_preds:
lvl_mask_preds = []
for img_id, img_lvl_pos_kernel_pred in enumerate(
lvl_pos_kernel_preds):
if img_lvl_pos_kernel_pred.size()[-1] == 0:
continue
img_mask_feats = mask_feats[[img_id]]
h, w = img_mask_feats.shape[-2:]
num_kernel = img_lvl_pos_kernel_pred.shape[1]
img_lvl_mask_pred = F.conv2d(
img_mask_feats,
img_lvl_pos_kernel_pred.permute(1, 0).view(
num_kernel, -1, self.dynamic_conv_size,
self.dynamic_conv_size),
stride=1).view(-1, h, w)
lvl_mask_preds.append(img_lvl_mask_pred)
if len(lvl_mask_preds) == 0:
lvl_mask_preds = None
else:
lvl_mask_preds = torch.cat(lvl_mask_preds, 0)
mlvl_mask_preds.append(lvl_mask_preds)
# dice loss
num_pos = 0
for img_pos_masks in pos_masks:
for lvl_img_pos_masks in img_pos_masks:
# Fix `Tensor` object has no attribute `count_nonzero()`
# in PyTorch 1.6, the type of `lvl_img_pos_masks`
# should be `torch.bool`.
num_pos += lvl_img_pos_masks.nonzero().numel()
loss_mask = []
for lvl_mask_preds, lvl_mask_targets in zip(mlvl_mask_preds,
mlvl_mask_targets):
if lvl_mask_preds is None:
continue
loss_mask.append(
self.loss_mask(
lvl_mask_preds,
lvl_mask_targets,
reduction_override='none'))
if num_pos > 0:
loss_mask = torch.cat(loss_mask).sum() / num_pos
else:
loss_mask = mask_feats.sum() * 0
# cate
flatten_labels = [
torch.cat(
[img_lvl_labels.flatten() for img_lvl_labels in lvl_labels])
for lvl_labels in zip(*labels)
]
flatten_labels = torch.cat(flatten_labels)
flatten_cls_preds = [
lvl_cls_preds.permute(0, 2, 3, 1).reshape(-1, self.num_classes)
for lvl_cls_preds in mlvl_cls_preds
]
flatten_cls_preds = torch.cat(flatten_cls_preds)
loss_cls = self.loss_cls(
flatten_cls_preds, flatten_labels, avg_factor=num_pos + 1)
return dict(loss_mask=loss_mask, loss_cls=loss_cls)
def predict_by_feat(self, mlvl_kernel_preds: List[Tensor],
mlvl_cls_scores: List[Tensor], mask_feats: Tensor,
batch_img_metas: List[dict], **kwargs) -> InstanceList:
"""Transform a batch of output features extracted from the head into
mask results.
Args:
mlvl_kernel_preds (list[Tensor]): Multi-level dynamic kernel
prediction. The kernel is used to generate instance
segmentation masks by dynamic convolution. Each element in the
list has shape
(batch_size, kernel_out_channels, num_grids, num_grids).
mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element
in the list has shape
(batch_size, num_classes, num_grids, num_grids).
mask_feats (Tensor): Unified mask feature map used to generate
instance segmentation masks by dynamic convolution. Has shape
(batch_size, mask_out_channels, h, w).
batch_img_metas (list[dict]): Meta information of all images.
Returns:
list[:obj:`InstanceData`]: Processed results of multiple
images.Each :obj:`InstanceData` usually contains
following keys.
- scores (Tensor): Classification scores, has shape
(num_instance,).
- labels (Tensor): Has shape (num_instances,).
- masks (Tensor): Processed mask results, has
shape (num_instances, h, w).
"""
num_levels = len(mlvl_cls_scores)
assert len(mlvl_kernel_preds) == len(mlvl_cls_scores)
for lvl in range(num_levels):
cls_scores = mlvl_cls_scores[lvl]
cls_scores = cls_scores.sigmoid()
local_max = F.max_pool2d(cls_scores, 2, stride=1, padding=1)
keep_mask = local_max[:, :, :-1, :-1] == cls_scores
cls_scores = cls_scores * keep_mask
mlvl_cls_scores[lvl] = cls_scores.permute(0, 2, 3, 1)
result_list = []
for img_id in range(len(batch_img_metas)):
img_cls_pred = [
mlvl_cls_scores[lvl][img_id].view(-1, self.cls_out_channels)
for lvl in range(num_levels)
]
img_mask_feats = mask_feats[[img_id]]
img_kernel_pred = [
mlvl_kernel_preds[lvl][img_id].permute(1, 2, 0).view(
-1, self.kernel_out_channels) for lvl in range(num_levels)
]
img_cls_pred = torch.cat(img_cls_pred, dim=0)
img_kernel_pred = torch.cat(img_kernel_pred, dim=0)
result = self._predict_by_feat_single(
img_kernel_pred,
img_cls_pred,
img_mask_feats,
img_meta=batch_img_metas[img_id])
result_list.append(result)
return result_list
def _predict_by_feat_single(self,
kernel_preds: Tensor,
cls_scores: Tensor,
mask_feats: Tensor,
img_meta: dict,
cfg: OptConfigType = None) -> InstanceData:
"""Transform a single image's features extracted from the head into
mask results.
Args:
kernel_preds (Tensor): Dynamic kernel prediction of all points
in single image, has shape
(num_points, kernel_out_channels).
cls_scores (Tensor): Classification score of all points
in single image, has shape (num_points, num_classes).
mask_feats (Tensor): Mask prediction of all points in
single image, has shape (num_points, feat_h, feat_w).
img_meta (dict): Meta information of corresponding image.
cfg (dict, optional): Config used in test phase.
Defaults to None.
Returns:
:obj:`InstanceData`: Processed results of single image.
it usually contains following keys.
- scores (Tensor): Classification scores, has shape
(num_instance,).
- labels (Tensor): Has shape (num_instances,).
- masks (Tensor): Processed mask results, has
shape (num_instances, h, w).
"""
def empty_results(cls_scores, ori_shape):
"""Generate a empty results."""
results = InstanceData()
results.scores = cls_scores.new_ones(0)
results.masks = cls_scores.new_zeros(0, *ori_shape)
results.labels = cls_scores.new_ones(0)
results.bboxes = cls_scores.new_zeros(0, 4)
return results
cfg = self.test_cfg if cfg is None else cfg
assert len(kernel_preds) == len(cls_scores)
featmap_size = mask_feats.size()[-2:]
# overall info
h, w = img_meta['img_shape'][:2]
upsampled_size = (featmap_size[0] * self.mask_stride,
featmap_size[1] * self.mask_stride)
# process.
score_mask = (cls_scores > cfg.score_thr)
cls_scores = cls_scores[score_mask]
if len(cls_scores) == 0:
return empty_results(cls_scores, img_meta['ori_shape'][:2])
# cate_labels & kernel_preds
inds = score_mask.nonzero()
cls_labels = inds[:, 1]
kernel_preds = kernel_preds[inds[:, 0]]
# trans vector.
lvl_interval = cls_labels.new_tensor(self.num_grids).pow(2).cumsum(0)
strides = kernel_preds.new_ones(lvl_interval[-1])
strides[:lvl_interval[0]] *= self.strides[0]
for lvl in range(1, self.num_levels):
strides[lvl_interval[lvl -
1]:lvl_interval[lvl]] *= self.strides[lvl]
strides = strides[inds[:, 0]]
# mask encoding.
kernel_preds = kernel_preds.view(
kernel_preds.size(0), -1, self.dynamic_conv_size,
self.dynamic_conv_size)
mask_preds = F.conv2d(
mask_feats, kernel_preds, stride=1).squeeze(0).sigmoid()
# mask.
masks = mask_preds > cfg.mask_thr
sum_masks = masks.sum((1, 2)).float()
keep = sum_masks > strides
if keep.sum() == 0:
return empty_results(cls_scores, img_meta['ori_shape'][:2])
masks = masks[keep]
mask_preds = mask_preds[keep]
sum_masks = sum_masks[keep]
cls_scores = cls_scores[keep]
cls_labels = cls_labels[keep]
# maskness.
mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks
cls_scores *= mask_scores
scores, labels, _, keep_inds = mask_matrix_nms(
masks,
cls_labels,
cls_scores,
mask_area=sum_masks,
nms_pre=cfg.nms_pre,
max_num=cfg.max_per_img,
kernel=cfg.kernel,
sigma=cfg.sigma,
filter_thr=cfg.filter_thr)
if len(keep_inds) == 0:
return empty_results(cls_scores, img_meta['ori_shape'][:2])
mask_preds = mask_preds[keep_inds]
mask_preds = F.interpolate(
mask_preds.unsqueeze(0),
size=upsampled_size,
mode='bilinear',
align_corners=False)[:, :, :h, :w]
mask_preds = F.interpolate(
mask_preds,
size=img_meta['ori_shape'][:2],
mode='bilinear',
align_corners=False).squeeze(0)
masks = mask_preds > cfg.mask_thr
results = InstanceData()
results.masks = masks
results.labels = labels
results.scores = scores
# create an empty bbox in InstanceData to avoid bugs when
# calculating metrics.
results.bboxes = results.scores.new_zeros(len(scores), 4)
return results
| 33,478 | 40.84875 | 79 | py |
ERD | ERD-main/mmdet/models/dense_heads/cascade_rpn_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from __future__ import division
import copy
from typing import Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from mmcv.ops import DeformConv2d
from mmengine.config import ConfigDict
from mmengine.model import BaseModule, ModuleList
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.registry import MODELS, TASK_UTILS
from mmdet.structures import SampleList
from mmdet.utils import (ConfigType, InstanceList, MultiConfig,
OptInstanceList, OptMultiConfig)
from ..task_modules.assigners import RegionAssigner
from ..task_modules.samplers import PseudoSampler
from ..utils import (images_to_levels, multi_apply, select_single_mlvl,
unpack_gt_instances)
from .base_dense_head import BaseDenseHead
from .rpn_head import RPNHead
class AdaptiveConv(BaseModule):
"""AdaptiveConv used to adapt the sampling location with the anchors.
Args:
in_channels (int): Number of channels in the input image.
out_channels (int): Number of channels produced by the convolution.
kernel_size (int or tuple[int]): Size of the conv kernel.
Defaults to 3.
stride (int or tuple[int]): Stride of the convolution. Defaults to 1.
padding (int or tuple[int]): Zero-padding added to both sides of
the input. Defaults to 1.
dilation (int or tuple[int]): Spacing between kernel elements.
Defaults to 3.
groups (int): Number of blocked connections from input channels to
output channels. Defaults to 1.
bias (bool): If set True, adds a learnable bias to the output.
Defaults to False.
adapt_type (str): Type of adaptive conv, can be either ``offset``
(arbitrary anchors) or 'dilation' (uniform anchor).
Defaults to 'dilation'.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or \
list[dict]): Initialization config dict.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int]] = 3,
stride: Union[int, Tuple[int]] = 1,
padding: Union[int, Tuple[int]] = 1,
dilation: Union[int, Tuple[int]] = 3,
groups: int = 1,
bias: bool = False,
adapt_type: str = 'dilation',
init_cfg: MultiConfig = dict(
type='Normal', std=0.01, override=dict(name='conv'))
) -> None:
super().__init__(init_cfg=init_cfg)
assert adapt_type in ['offset', 'dilation']
self.adapt_type = adapt_type
assert kernel_size == 3, 'Adaptive conv only supports kernels 3'
if self.adapt_type == 'offset':
assert stride == 1 and padding == 1 and groups == 1, \
'Adaptive conv offset mode only supports padding: {1}, ' \
f'stride: {1}, groups: {1}'
self.conv = DeformConv2d(
in_channels,
out_channels,
kernel_size,
padding=padding,
stride=stride,
groups=groups,
bias=bias)
else:
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
padding=dilation,
dilation=dilation)
def forward(self, x: Tensor, offset: Tensor) -> Tensor:
"""Forward function."""
if self.adapt_type == 'offset':
N, _, H, W = x.shape
assert offset is not None
assert H * W == offset.shape[1]
# reshape [N, NA, 18] to (N, 18, H, W)
offset = offset.permute(0, 2, 1).reshape(N, -1, H, W)
offset = offset.contiguous()
x = self.conv(x, offset)
else:
assert offset is None
x = self.conv(x)
return x
@MODELS.register_module()
class StageCascadeRPNHead(RPNHead):
"""Stage of CascadeRPNHead.
Args:
in_channels (int): Number of channels in the input feature map.
anchor_generator (:obj:`ConfigDict` or dict): anchor generator config.
adapt_cfg (:obj:`ConfigDict` or dict): adaptation config.
bridged_feature (bool): whether update rpn feature. Defaults to False.
with_cls (bool): whether use classification branch. Defaults to True.
init_cfg :obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
in_channels: int,
anchor_generator: ConfigType = dict(
type='AnchorGenerator',
scales=[8],
ratios=[1.0],
strides=[4, 8, 16, 32, 64]),
adapt_cfg: ConfigType = dict(type='dilation', dilation=3),
bridged_feature: bool = False,
with_cls: bool = True,
init_cfg: OptMultiConfig = None,
**kwargs) -> None:
self.with_cls = with_cls
self.anchor_strides = anchor_generator['strides']
self.anchor_scales = anchor_generator['scales']
self.bridged_feature = bridged_feature
self.adapt_cfg = adapt_cfg
super().__init__(
in_channels=in_channels,
anchor_generator=anchor_generator,
init_cfg=init_cfg,
**kwargs)
# override sampling and sampler
if self.train_cfg:
self.assigner = TASK_UTILS.build(self.train_cfg['assigner'])
# use PseudoSampler when sampling is False
if self.train_cfg.get('sampler', None) is not None:
self.sampler = TASK_UTILS.build(
self.train_cfg['sampler'], default_args=dict(context=self))
else:
self.sampler = PseudoSampler(context=self)
if init_cfg is None:
self.init_cfg = dict(
type='Normal', std=0.01, override=[dict(name='rpn_reg')])
if self.with_cls:
self.init_cfg['override'].append(dict(name='rpn_cls'))
def _init_layers(self) -> None:
"""Init layers of a CascadeRPN stage."""
adapt_cfg = copy.deepcopy(self.adapt_cfg)
adapt_cfg['adapt_type'] = adapt_cfg.pop('type')
self.rpn_conv = AdaptiveConv(self.in_channels, self.feat_channels,
**adapt_cfg)
if self.with_cls:
self.rpn_cls = nn.Conv2d(self.feat_channels,
self.num_anchors * self.cls_out_channels,
1)
self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1)
self.relu = nn.ReLU(inplace=True)
def forward_single(self, x: Tensor, offset: Tensor) -> Tuple[Tensor]:
"""Forward function of single scale."""
bridged_x = x
x = self.relu(self.rpn_conv(x, offset))
if self.bridged_feature:
bridged_x = x # update feature
cls_score = self.rpn_cls(x) if self.with_cls else None
bbox_pred = self.rpn_reg(x)
return bridged_x, cls_score, bbox_pred
def forward(
self,
feats: List[Tensor],
offset_list: Optional[List[Tensor]] = None) -> Tuple[List[Tensor]]:
"""Forward function."""
if offset_list is None:
offset_list = [None for _ in range(len(feats))]
return multi_apply(self.forward_single, feats, offset_list)
def _region_targets_single(self, flat_anchors: Tensor, valid_flags: Tensor,
gt_instances: InstanceData, img_meta: dict,
gt_instances_ignore: InstanceData,
featmap_sizes: List[Tuple[int, int]],
num_level_anchors: List[int]) -> tuple:
"""Get anchor targets based on region for single level.
Args:
flat_anchors (Tensor): Multi-level anchors of the image, which are
concatenated into a single tensor of shape (num_anchors, 4)
valid_flags (Tensor): Multi level valid flags of the image,
which are concatenated into a single tensor of
shape (num_anchors, ).
gt_instances (:obj:`InstanceData`): Ground truth of instance
annotations. It should includes ``bboxes`` and ``labels``
attributes.
img_meta (dict): Meta information for current image.
gt_instances_ignore (:obj:`InstanceData`, optional): Instances
to be ignored during training. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
featmap_sizes (list[Tuple[int, int]]): Feature map size each level.
num_level_anchors (list[int]): The number of anchors in each level.
Returns:
tuple:
- labels (Tensor): Labels of each level.
- label_weights (Tensor): Label weights of each level.
- bbox_targets (Tensor): BBox targets of each level.
- bbox_weights (Tensor): BBox weights of each level.
- pos_inds (Tensor): positive samples indexes.
- neg_inds (Tensor): negative samples indexes.
- sampling_result (:obj:`SamplingResult`): Sampling results.
"""
pred_instances = InstanceData()
pred_instances.priors = flat_anchors
pred_instances.valid_flags = valid_flags
assign_result = self.assigner.assign(
pred_instances,
gt_instances,
img_meta,
featmap_sizes,
num_level_anchors,
self.anchor_scales[0],
self.anchor_strides,
gt_instances_ignore=gt_instances_ignore,
allowed_border=self.train_cfg['allowed_border'])
sampling_result = self.sampler.sample(assign_result, pred_instances,
gt_instances)
num_anchors = flat_anchors.shape[0]
bbox_targets = torch.zeros_like(flat_anchors)
bbox_weights = torch.zeros_like(flat_anchors)
labels = flat_anchors.new_zeros(num_anchors, dtype=torch.long)
label_weights = flat_anchors.new_zeros(num_anchors, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
if not self.reg_decoded_bbox:
pos_bbox_targets = self.bbox_coder.encode(
sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
else:
pos_bbox_targets = sampling_result.pos_gt_bboxes
bbox_targets[pos_inds, :] = pos_bbox_targets
bbox_weights[pos_inds, :] = 1.0
labels[pos_inds] = sampling_result.pos_gt_labels
if self.train_cfg['pos_weight'] <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = self.train_cfg['pos_weight']
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
neg_inds, sampling_result)
def region_targets(
self,
anchor_list: List[List[Tensor]],
valid_flag_list: List[List[Tensor]],
featmap_sizes: List[Tuple[int, int]],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None,
return_sampling_results: bool = False,
) -> tuple:
"""Compute regression and classification targets for anchors when using
RegionAssigner.
Args:
anchor_list (list[list[Tensor]]): Multi level anchors of each
image.
valid_flag_list (list[list[Tensor]]): Multi level valid flags of
each image.
featmap_sizes (list[Tuple[int, int]]): Feature map size each level.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
tuple:
- labels_list (list[Tensor]): Labels of each level.
- label_weights_list (list[Tensor]): Label weights of each
level.
- bbox_targets_list (list[Tensor]): BBox targets of each level.
- bbox_weights_list (list[Tensor]): BBox weights of each level.
- avg_factor (int): Average factor that is used to average
the loss. When using sampling method, avg_factor is usually
the sum of positive and negative priors. When using
``PseudoSampler``, ``avg_factor`` is usually equal to the
number of positive priors.
"""
num_imgs = len(batch_img_metas)
assert len(anchor_list) == len(valid_flag_list) == num_imgs
if batch_gt_instances_ignore is None:
batch_gt_instances_ignore = [None] * num_imgs
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
# concat all level anchors to a single tensor
concat_anchor_list = []
concat_valid_flag_list = []
for i in range(num_imgs):
assert len(anchor_list[i]) == len(valid_flag_list[i])
concat_anchor_list.append(torch.cat(anchor_list[i]))
concat_valid_flag_list.append(torch.cat(valid_flag_list[i]))
# compute targets for each image
(all_labels, all_label_weights, all_bbox_targets, all_bbox_weights,
pos_inds_list, neg_inds_list, sampling_results_list) = multi_apply(
self._region_targets_single,
concat_anchor_list,
concat_valid_flag_list,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore,
featmap_sizes=featmap_sizes,
num_level_anchors=num_level_anchors)
# no valid anchors
if any([labels is None for labels in all_labels]):
return None
# sampled anchors of all images
avg_factor = sum(
[results.avg_factor for results in sampling_results_list])
# split targets to a list w.r.t. multiple levels
labels_list = images_to_levels(all_labels, num_level_anchors)
label_weights_list = images_to_levels(all_label_weights,
num_level_anchors)
bbox_targets_list = images_to_levels(all_bbox_targets,
num_level_anchors)
bbox_weights_list = images_to_levels(all_bbox_weights,
num_level_anchors)
res = (labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, avg_factor)
if return_sampling_results:
res = res + (sampling_results_list, )
return res
def get_targets(
self,
anchor_list: List[List[Tensor]],
valid_flag_list: List[List[Tensor]],
featmap_sizes: List[Tuple[int, int]],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None,
return_sampling_results: bool = False,
) -> tuple:
"""Compute regression and classification targets for anchors.
Args:
anchor_list (list[list[Tensor]]): Multi level anchors of each
image.
valid_flag_list (list[list[Tensor]]): Multi level valid flags of
each image.
featmap_sizes (list[Tuple[int, int]]): Feature map size each level.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
return_sampling_results (bool): Whether to return the sampling
results. Defaults to False.
Returns:
tuple:
- labels_list (list[Tensor]): Labels of each level.
- label_weights_list (list[Tensor]): Label weights of each
level.
- bbox_targets_list (list[Tensor]): BBox targets of each level.
- bbox_weights_list (list[Tensor]): BBox weights of each level.
- avg_factor (int): Average factor that is used to average
the loss. When using sampling method, avg_factor is usually
the sum of positive and negative priors. When using
``PseudoSampler``, ``avg_factor`` is usually equal to the
number of positive priors.
"""
if isinstance(self.assigner, RegionAssigner):
cls_reg_targets = self.region_targets(
anchor_list,
valid_flag_list,
featmap_sizes,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore=batch_gt_instances_ignore,
return_sampling_results=return_sampling_results)
else:
cls_reg_targets = super().get_targets(
anchor_list,
valid_flag_list,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore=batch_gt_instances_ignore,
return_sampling_results=return_sampling_results)
return cls_reg_targets
def anchor_offset(self, anchor_list: List[List[Tensor]],
anchor_strides: List[int],
featmap_sizes: List[Tuple[int, int]]) -> List[Tensor]:
""" Get offset for deformable conv based on anchor shape
NOTE: currently support deformable kernel_size=3 and dilation=1
Args:
anchor_list (list[list[tensor])): [NI, NLVL, NA, 4] list of
multi-level anchors
anchor_strides (list[int]): anchor stride of each level
Returns:
list[tensor]: offset of DeformConv kernel with shapes of
[NLVL, NA, 2, 18].
"""
def _shape_offset(anchors, stride, ks=3, dilation=1):
# currently support kernel_size=3 and dilation=1
assert ks == 3 and dilation == 1
pad = (ks - 1) // 2
idx = torch.arange(-pad, pad + 1, dtype=dtype, device=device)
yy, xx = torch.meshgrid(idx, idx) # return order matters
xx = xx.reshape(-1)
yy = yy.reshape(-1)
w = (anchors[:, 2] - anchors[:, 0]) / stride
h = (anchors[:, 3] - anchors[:, 1]) / stride
w = w / (ks - 1) - dilation
h = h / (ks - 1) - dilation
offset_x = w[:, None] * xx # (NA, ks**2)
offset_y = h[:, None] * yy # (NA, ks**2)
return offset_x, offset_y
def _ctr_offset(anchors, stride, featmap_size):
feat_h, feat_w = featmap_size
assert len(anchors) == feat_h * feat_w
x = (anchors[:, 0] + anchors[:, 2]) * 0.5
y = (anchors[:, 1] + anchors[:, 3]) * 0.5
# compute centers on feature map
x = x / stride
y = y / stride
# compute predefine centers
xx = torch.arange(0, feat_w, device=anchors.device)
yy = torch.arange(0, feat_h, device=anchors.device)
yy, xx = torch.meshgrid(yy, xx)
xx = xx.reshape(-1).type_as(x)
yy = yy.reshape(-1).type_as(y)
offset_x = x - xx # (NA, )
offset_y = y - yy # (NA, )
return offset_x, offset_y
num_imgs = len(anchor_list)
num_lvls = len(anchor_list[0])
dtype = anchor_list[0][0].dtype
device = anchor_list[0][0].device
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
offset_list = []
for i in range(num_imgs):
mlvl_offset = []
for lvl in range(num_lvls):
c_offset_x, c_offset_y = _ctr_offset(anchor_list[i][lvl],
anchor_strides[lvl],
featmap_sizes[lvl])
s_offset_x, s_offset_y = _shape_offset(anchor_list[i][lvl],
anchor_strides[lvl])
# offset = ctr_offset + shape_offset
offset_x = s_offset_x + c_offset_x[:, None]
offset_y = s_offset_y + c_offset_y[:, None]
# offset order (y0, x0, y1, x2, .., y8, x8, y9, x9)
offset = torch.stack([offset_y, offset_x], dim=-1)
offset = offset.reshape(offset.size(0), -1) # [NA, 2*ks**2]
mlvl_offset.append(offset)
offset_list.append(torch.cat(mlvl_offset)) # [totalNA, 2*ks**2]
offset_list = images_to_levels(offset_list, num_level_anchors)
return offset_list
def loss_by_feat_single(self, cls_score: Tensor, bbox_pred: Tensor,
anchors: Tensor, labels: Tensor,
label_weights: Tensor, bbox_targets: Tensor,
bbox_weights: Tensor, avg_factor: int) -> tuple:
"""Loss function on single scale."""
# classification loss
if self.with_cls:
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
cls_score = cls_score.permute(0, 2, 3,
1).reshape(-1, self.cls_out_channels)
loss_cls = self.loss_cls(
cls_score, labels, label_weights, avg_factor=avg_factor)
# regression loss
bbox_targets = bbox_targets.reshape(-1, 4)
bbox_weights = bbox_weights.reshape(-1, 4)
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
if self.reg_decoded_bbox:
# When the regression loss (e.g. `IouLoss`, `GIouLoss`)
# is applied directly on the decoded bounding boxes, it
# decodes the already encoded coordinates to absolute format.
anchors = anchors.reshape(-1, 4)
bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)
loss_reg = self.loss_bbox(
bbox_pred, bbox_targets, bbox_weights, avg_factor=avg_factor)
if self.with_cls:
return loss_cls, loss_reg
return None, loss_reg
def loss_by_feat(
self,
anchor_list: List[List[Tensor]],
valid_flag_list: List[List[Tensor]],
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None
) -> Dict[str, Tensor]:
"""Compute losses of the head.
Args:
anchor_list (list[list[Tensor]]): Multi level anchors of each
image.
valid_flag_list (list[list[Tensor]]): Multi level valid flags of
each image. The outer list indicates images, and the inner list
corresponds to feature levels of the image. Each element of
the inner list is a tensor of shape (num_anchors, )
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in bbox_preds]
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
featmap_sizes,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore=batch_gt_instances_ignore,
return_sampling_results=True)
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
avg_factor, sampling_results_list) = cls_reg_targets
if not sampling_results_list[0].avg_factor_with_neg:
# 200 is hard-coded average factor,
# which follows guided anchoring.
avg_factor = sum([label.numel() for label in labels_list]) / 200.0
# change per image, per level anchor_list to per_level, per_image
mlvl_anchor_list = list(zip(*anchor_list))
# concat mlvl_anchor_list
mlvl_anchor_list = [
torch.cat(anchors, dim=0) for anchors in mlvl_anchor_list
]
losses = multi_apply(
self.loss_by_feat_single,
cls_scores,
bbox_preds,
mlvl_anchor_list,
labels_list,
label_weights_list,
bbox_targets_list,
bbox_weights_list,
avg_factor=avg_factor)
if self.with_cls:
return dict(loss_rpn_cls=losses[0], loss_rpn_reg=losses[1])
return dict(loss_rpn_reg=losses[1])
def predict_by_feat(self,
anchor_list: List[List[Tensor]],
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
batch_img_metas: List[dict],
cfg: Optional[ConfigDict] = None,
rescale: bool = False) -> InstanceList:
"""Get proposal predict. Overriding to enable input ``anchor_list``
from outside.
Args:
anchor_list (list[list[Tensor]]): Multi level anchors of each
image.
cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * 4, H, W).
batch_img_metas (list[dict], Optional): Image meta info.
cfg (:obj:`ConfigDict`, optional): Test / postprocessing
configuration, if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Object detection results of each image
after the post process. Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
assert len(cls_scores) == len(bbox_preds)
result_list = []
for img_id in range(len(batch_img_metas)):
cls_score_list = select_single_mlvl(cls_scores, img_id)
bbox_pred_list = select_single_mlvl(bbox_preds, img_id)
proposals = self._predict_by_feat_single(
cls_scores=cls_score_list,
bbox_preds=bbox_pred_list,
mlvl_anchors=anchor_list[img_id],
img_meta=batch_img_metas[img_id],
cfg=cfg,
rescale=rescale)
result_list.append(proposals)
return result_list
def _predict_by_feat_single(self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
mlvl_anchors: List[Tensor],
img_meta: dict,
cfg: ConfigDict,
rescale: bool = False) -> InstanceData:
"""Transform outputs of a single image into bbox predictions.
Args:
cls_scores (list[Tensor]): Box scores from all scale
levels of a single image, each item has shape
(num_anchors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas from
all scale levels of a single image, each item has
shape (num_anchors * 4, H, W).
mlvl_anchors (list[Tensor]): Box reference from all scale
levels of a single image, each item has shape
(num_total_anchors, 4).
img_shape (tuple[int]): Shape of the input image,
(height, width, 3).
scale_factor (ndarray): Scale factor of the image arange as
(w_scale, h_scale, w_scale, h_scale).
cfg (:obj:`ConfigDict`): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
:obj:`InstanceData`: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
cfg = self.test_cfg if cfg is None else cfg
cfg = copy.deepcopy(cfg)
# bboxes from different level should be independent during NMS,
# level_ids are used as labels for batched NMS to separate them
level_ids = []
mlvl_scores = []
mlvl_bbox_preds = []
mlvl_valid_anchors = []
nms_pre = cfg.get('nms_pre', -1)
for idx in range(len(cls_scores)):
rpn_cls_score = cls_scores[idx]
rpn_bbox_pred = bbox_preds[idx]
assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]
rpn_cls_score = rpn_cls_score.permute(1, 2, 0)
if self.use_sigmoid_cls:
rpn_cls_score = rpn_cls_score.reshape(-1)
scores = rpn_cls_score.sigmoid()
else:
rpn_cls_score = rpn_cls_score.reshape(-1, 2)
# We set FG labels to [0, num_class-1] and BG label to
# num_class in RPN head since mmdet v2.5, which is unified to
# be consistent with other head since mmdet v2.0. In mmdet v2.0
# to v2.4 we keep BG label as 0 and FG label as 1 in rpn head.
scores = rpn_cls_score.softmax(dim=1)[:, 0]
rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4)
anchors = mlvl_anchors[idx]
if 0 < nms_pre < scores.shape[0]:
# sort is faster than topk
# _, topk_inds = scores.topk(cfg.nms_pre)
ranked_scores, rank_inds = scores.sort(descending=True)
topk_inds = rank_inds[:nms_pre]
scores = ranked_scores[:nms_pre]
rpn_bbox_pred = rpn_bbox_pred[topk_inds, :]
anchors = anchors[topk_inds, :]
mlvl_scores.append(scores)
mlvl_bbox_preds.append(rpn_bbox_pred)
mlvl_valid_anchors.append(anchors)
level_ids.append(
scores.new_full((scores.size(0), ), idx, dtype=torch.long))
anchors = torch.cat(mlvl_valid_anchors)
rpn_bbox_pred = torch.cat(mlvl_bbox_preds)
bboxes = self.bbox_coder.decode(
anchors, rpn_bbox_pred, max_shape=img_meta['img_shape'])
proposals = InstanceData()
proposals.bboxes = bboxes
proposals.scores = torch.cat(mlvl_scores)
proposals.level_ids = torch.cat(level_ids)
return self._bbox_post_process(
results=proposals, cfg=cfg, rescale=rescale, img_meta=img_meta)
def refine_bboxes(self, anchor_list: List[List[Tensor]],
bbox_preds: List[Tensor],
img_metas: List[dict]) -> List[List[Tensor]]:
"""Refine bboxes through stages."""
num_levels = len(bbox_preds)
new_anchor_list = []
for img_id in range(len(img_metas)):
mlvl_anchors = []
for i in range(num_levels):
bbox_pred = bbox_preds[i][img_id].detach()
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
img_shape = img_metas[img_id]['img_shape']
bboxes = self.bbox_coder.decode(anchor_list[img_id][i],
bbox_pred, img_shape)
mlvl_anchors.append(bboxes)
new_anchor_list.append(mlvl_anchors)
return new_anchor_list
def loss(self, x: Tuple[Tensor], batch_data_samples: SampleList) -> dict:
"""Perform forward propagation and loss calculation of the detection
head on the features of the upstream network.
Args:
x (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, _, batch_img_metas = outputs
featmap_sizes = [featmap.size()[-2:] for featmap in x]
device = x[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, batch_img_metas, device=device)
if self.adapt_cfg['type'] == 'offset':
offset_list = self.anchor_offset(anchor_list, self.anchor_strides,
featmap_sizes)
else:
offset_list = None
x, cls_score, bbox_pred = self(x, offset_list)
rpn_loss_inputs = (anchor_list, valid_flag_list, cls_score, bbox_pred,
batch_gt_instances, batch_img_metas)
losses = self.loss_by_feat(*rpn_loss_inputs)
return losses
def loss_and_predict(
self,
x: Tuple[Tensor],
batch_data_samples: SampleList,
proposal_cfg: Optional[ConfigDict] = None,
) -> Tuple[dict, InstanceList]:
"""Perform forward propagation of the head, then calculate loss and
predictions from the features and data samples.
Args:
x (tuple[Tensor]): Features from FPN.
batch_data_samples (list[:obj:`DetDataSample`]): Each item contains
the meta information of each image and corresponding
annotations.
proposal_cfg (:obj`ConfigDict`, optional): Test / postprocessing
configuration, if None, test_cfg would be used.
Defaults to None.
Returns:
tuple: the return value is a tuple contains:
- losses: (dict[str, Tensor]): A dictionary of loss components.
- predictions (list[:obj:`InstanceData`]): Detection
results of each image after the post process.
"""
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, _, batch_img_metas = outputs
featmap_sizes = [featmap.size()[-2:] for featmap in x]
device = x[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, batch_img_metas, device=device)
if self.adapt_cfg['type'] == 'offset':
offset_list = self.anchor_offset(anchor_list, self.anchor_strides,
featmap_sizes)
else:
offset_list = None
x, cls_score, bbox_pred = self(x, offset_list)
rpn_loss_inputs = (anchor_list, valid_flag_list, cls_score, bbox_pred,
batch_gt_instances, batch_img_metas)
losses = self.loss_by_feat(*rpn_loss_inputs)
predictions = self.predict_by_feat(
anchor_list,
cls_score,
bbox_pred,
batch_img_metas=batch_img_metas,
cfg=proposal_cfg)
return losses, predictions
def predict(self,
x: Tuple[Tensor],
batch_data_samples: SampleList,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the detection head and predict
detection results on the features of the upstream network.
Args:
x (tuple[Tensor]): Multi-level features from the
upstream network, each is a 4D-tensor.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[obj:`InstanceData`]: Detection results of each image
after the post process.
"""
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
featmap_sizes = [featmap.size()[-2:] for featmap in x]
device = x[0].device
anchor_list, _ = self.get_anchors(
featmap_sizes, batch_img_metas, device=device)
if self.adapt_cfg['type'] == 'offset':
offset_list = self.anchor_offset(anchor_list, self.anchor_strides,
featmap_sizes)
else:
offset_list = None
x, cls_score, bbox_pred = self(x, offset_list)
predictions = self.stages[-1].predict_by_feat(
anchor_list,
cls_score,
bbox_pred,
batch_img_metas=batch_img_metas,
rescale=rescale)
return predictions
@MODELS.register_module()
class CascadeRPNHead(BaseDenseHead):
"""The CascadeRPNHead will predict more accurate region proposals, which is
required for two-stage detectors (such as Fast/Faster R-CNN). CascadeRPN
consists of a sequence of RPNStage to progressively improve the accuracy of
the detected proposals.
More details can be found in ``https://arxiv.org/abs/1909.06720``.
Args:
num_stages (int): number of CascadeRPN stages.
stages (list[:obj:`ConfigDict` or dict]): list of configs to build
the stages.
train_cfg (list[:obj:`ConfigDict` or dict]): list of configs at
training time each stage.
test_cfg (:obj:`ConfigDict` or dict): config at testing time.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or \
list[dict]): Initialization config dict.
"""
def __init__(self,
num_classes: int,
num_stages: int,
stages: List[ConfigType],
train_cfg: List[ConfigType],
test_cfg: ConfigType,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
assert num_classes == 1, 'Only support num_classes == 1'
assert num_stages == len(stages)
self.num_stages = num_stages
# Be careful! Pretrained weights cannot be loaded when use
# nn.ModuleList
self.stages = ModuleList()
for i in range(len(stages)):
train_cfg_i = train_cfg[i] if train_cfg is not None else None
stages[i].update(train_cfg=train_cfg_i)
stages[i].update(test_cfg=test_cfg)
self.stages.append(MODELS.build(stages[i]))
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def loss_by_feat(self):
"""loss_by_feat() is implemented in StageCascadeRPNHead."""
pass
def predict_by_feat(self):
"""predict_by_feat() is implemented in StageCascadeRPNHead."""
pass
def loss(self, x: Tuple[Tensor], batch_data_samples: SampleList) -> dict:
"""Perform forward propagation and loss calculation of the detection
head on the features of the upstream network.
Args:
x (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, _, batch_img_metas = outputs
featmap_sizes = [featmap.size()[-2:] for featmap in x]
device = x[0].device
anchor_list, valid_flag_list = self.stages[0].get_anchors(
featmap_sizes, batch_img_metas, device=device)
losses = dict()
for i in range(self.num_stages):
stage = self.stages[i]
if stage.adapt_cfg['type'] == 'offset':
offset_list = stage.anchor_offset(anchor_list,
stage.anchor_strides,
featmap_sizes)
else:
offset_list = None
x, cls_score, bbox_pred = stage(x, offset_list)
rpn_loss_inputs = (anchor_list, valid_flag_list, cls_score,
bbox_pred, batch_gt_instances, batch_img_metas)
stage_loss = stage.loss_by_feat(*rpn_loss_inputs)
for name, value in stage_loss.items():
losses['s{}.{}'.format(i, name)] = value
# refine boxes
if i < self.num_stages - 1:
anchor_list = stage.refine_bboxes(anchor_list, bbox_pred,
batch_img_metas)
return losses
def loss_and_predict(
self,
x: Tuple[Tensor],
batch_data_samples: SampleList,
proposal_cfg: Optional[ConfigDict] = None,
) -> Tuple[dict, InstanceList]:
"""Perform forward propagation of the head, then calculate loss and
predictions from the features and data samples.
Args:
x (tuple[Tensor]): Features from FPN.
batch_data_samples (list[:obj:`DetDataSample`]): Each item contains
the meta information of each image and corresponding
annotations.
proposal_cfg (ConfigDict, optional): Test / postprocessing
configuration, if None, test_cfg would be used.
Defaults to None.
Returns:
tuple: the return value is a tuple contains:
- losses: (dict[str, Tensor]): A dictionary of loss components.
- predictions (list[:obj:`InstanceData`]): Detection
results of each image after the post process.
"""
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, _, batch_img_metas = outputs
featmap_sizes = [featmap.size()[-2:] for featmap in x]
device = x[0].device
anchor_list, valid_flag_list = self.stages[0].get_anchors(
featmap_sizes, batch_img_metas, device=device)
losses = dict()
for i in range(self.num_stages):
stage = self.stages[i]
if stage.adapt_cfg['type'] == 'offset':
offset_list = stage.anchor_offset(anchor_list,
stage.anchor_strides,
featmap_sizes)
else:
offset_list = None
x, cls_score, bbox_pred = stage(x, offset_list)
rpn_loss_inputs = (anchor_list, valid_flag_list, cls_score,
bbox_pred, batch_gt_instances, batch_img_metas)
stage_loss = stage.loss_by_feat(*rpn_loss_inputs)
for name, value in stage_loss.items():
losses['s{}.{}'.format(i, name)] = value
# refine boxes
if i < self.num_stages - 1:
anchor_list = stage.refine_bboxes(anchor_list, bbox_pred,
batch_img_metas)
predictions = self.stages[-1].predict_by_feat(
anchor_list,
cls_score,
bbox_pred,
batch_img_metas=batch_img_metas,
cfg=proposal_cfg)
return losses, predictions
def predict(self,
x: Tuple[Tensor],
batch_data_samples: SampleList,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the detection head and predict
detection results on the features of the upstream network.
Args:
x (tuple[Tensor]): Multi-level features from the
upstream network, each is a 4D-tensor.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[obj:`InstanceData`]: Detection results of each image
after the post process.
"""
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
featmap_sizes = [featmap.size()[-2:] for featmap in x]
device = x[0].device
anchor_list, _ = self.stages[0].get_anchors(
featmap_sizes, batch_img_metas, device=device)
for i in range(self.num_stages):
stage = self.stages[i]
if stage.adapt_cfg['type'] == 'offset':
offset_list = stage.anchor_offset(anchor_list,
stage.anchor_strides,
featmap_sizes)
else:
offset_list = None
x, cls_score, bbox_pred = stage(x, offset_list)
if i < self.num_stages - 1:
anchor_list = stage.refine_bboxes(anchor_list, bbox_pred,
batch_img_metas)
predictions = self.stages[-1].predict_by_feat(
anchor_list,
cls_score,
bbox_pred,
batch_img_metas=batch_img_metas,
rescale=rescale)
return predictions
| 48,358 | 42.527453 | 79 | py |
ERD | ERD-main/mmdet/models/dense_heads/vfnet_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, Scale
from mmcv.ops import DeformConv2d
from torch import Tensor
from mmdet.registry import MODELS, TASK_UTILS
from mmdet.structures.bbox import bbox_overlaps
from mmdet.utils import (ConfigType, InstanceList, MultiConfig,
OptInstanceList, RangeType, reduce_mean)
from ..task_modules.prior_generators import MlvlPointGenerator
from ..task_modules.samplers import PseudoSampler
from ..utils import multi_apply
from .atss_head import ATSSHead
from .fcos_head import FCOSHead
INF = 1e8
@MODELS.register_module()
class VFNetHead(ATSSHead, FCOSHead):
"""Head of `VarifocalNet (VFNet): An IoU-aware Dense Object
Detector.<https://arxiv.org/abs/2008.13367>`_.
The VFNet predicts IoU-aware classification scores which mix the
object presence confidence and object localization accuracy as the
detection score. It is built on the FCOS architecture and uses ATSS
for defining positive/negative training examples. The VFNet is trained
with Varifocal Loss and empolys star-shaped deformable convolution to
extract features for a bbox.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
regress_ranges (Sequence[Tuple[int, int]]): Regress range of multiple
level points.
center_sampling (bool): If true, use center sampling. Defaults to False.
center_sample_radius (float): Radius of center sampling. Defaults to 1.5.
sync_num_pos (bool): If true, synchronize the number of positive
examples across GPUs. Defaults to True
gradient_mul (float): The multiplier to gradients from bbox refinement
and recognition. Defaults to 0.1.
bbox_norm_type (str): The bbox normalization type, 'reg_denom' or
'stride'. Defaults to reg_denom
loss_cls_fl (:obj:`ConfigDict` or dict): Config of focal loss.
use_vfl (bool): If true, use varifocal loss for training.
Defaults to True.
loss_cls (:obj:`ConfigDict` or dict): Config of varifocal loss.
loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss,
GIoU Loss.
loss_bbox (:obj:`ConfigDict` or dict): Config of localization
refinement loss, GIoU Loss.
norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and
config norm layer. Defaults to norm_cfg=dict(type='GN',
num_groups=32, requires_grad=True).
use_atss (bool): If true, use ATSS to define positive/negative
examples. Defaults to True.
anchor_generator (:obj:`ConfigDict` or dict): Config of anchor
generator for ATSS.
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`]): Initialization config dict.
Example:
>>> self = VFNetHead(11, 7)
>>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
>>> cls_score, bbox_pred, bbox_pred_refine= self.forward(feats)
>>> assert len(cls_score) == len(self.scales)
""" # noqa: E501
def __init__(self,
num_classes: int,
in_channels: int,
regress_ranges: RangeType = ((-1, 64), (64, 128), (128, 256),
(256, 512), (512, INF)),
center_sampling: bool = False,
center_sample_radius: float = 1.5,
sync_num_pos: bool = True,
gradient_mul: float = 0.1,
bbox_norm_type: str = 'reg_denom',
loss_cls_fl: ConfigType = dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
use_vfl: bool = True,
loss_cls: ConfigType = dict(
type='VarifocalLoss',
use_sigmoid=True,
alpha=0.75,
gamma=2.0,
iou_weighted=True,
loss_weight=1.0),
loss_bbox: ConfigType = dict(
type='GIoULoss', loss_weight=1.5),
loss_bbox_refine: ConfigType = dict(
type='GIoULoss', loss_weight=2.0),
norm_cfg: ConfigType = dict(
type='GN', num_groups=32, requires_grad=True),
use_atss: bool = True,
reg_decoded_bbox: bool = True,
anchor_generator: ConfigType = dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
center_offset=0.0,
strides=[8, 16, 32, 64, 128]),
init_cfg: MultiConfig = dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='vfnet_cls',
std=0.01,
bias_prob=0.01)),
**kwargs) -> None:
# dcn base offsets, adapted from reppoints_head.py
self.num_dconv_points = 9
self.dcn_kernel = int(np.sqrt(self.num_dconv_points))
self.dcn_pad = int((self.dcn_kernel - 1) / 2)
dcn_base = np.arange(-self.dcn_pad,
self.dcn_pad + 1).astype(np.float64)
dcn_base_y = np.repeat(dcn_base, self.dcn_kernel)
dcn_base_x = np.tile(dcn_base, self.dcn_kernel)
dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(
(-1))
self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)
super(FCOSHead, self).__init__(
num_classes=num_classes,
in_channels=in_channels,
norm_cfg=norm_cfg,
init_cfg=init_cfg,
**kwargs)
self.regress_ranges = regress_ranges
self.reg_denoms = [
regress_range[-1] for regress_range in regress_ranges
]
self.reg_denoms[-1] = self.reg_denoms[-2] * 2
self.center_sampling = center_sampling
self.center_sample_radius = center_sample_radius
self.sync_num_pos = sync_num_pos
self.bbox_norm_type = bbox_norm_type
self.gradient_mul = gradient_mul
self.use_vfl = use_vfl
if self.use_vfl:
self.loss_cls = MODELS.build(loss_cls)
else:
self.loss_cls = MODELS.build(loss_cls_fl)
self.loss_bbox = MODELS.build(loss_bbox)
self.loss_bbox_refine = MODELS.build(loss_bbox_refine)
# for getting ATSS targets
self.use_atss = use_atss
self.reg_decoded_bbox = reg_decoded_bbox
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
self.anchor_center_offset = anchor_generator['center_offset']
self.num_base_priors = self.prior_generator.num_base_priors[0]
if self.train_cfg:
self.assigner = TASK_UTILS.build(self.train_cfg['assigner'])
if self.train_cfg.get('sampler', None) is not None:
self.sampler = TASK_UTILS.build(
self.train_cfg['sampler'], default_args=dict(context=self))
else:
self.sampler = PseudoSampler()
# only be used in `get_atss_targets` when `use_atss` is True
self.atss_prior_generator = TASK_UTILS.build(anchor_generator)
self.fcos_prior_generator = MlvlPointGenerator(
anchor_generator['strides'],
self.anchor_center_offset if self.use_atss else 0.5)
# In order to reuse the `get_bboxes` in `BaseDenseHead.
# Only be used in testing phase.
self.prior_generator = self.fcos_prior_generator
def _init_layers(self) -> None:
"""Initialize layers of the head."""
super(FCOSHead, self)._init_cls_convs()
super(FCOSHead, self)._init_reg_convs()
self.relu = nn.ReLU()
self.vfnet_reg_conv = ConvModule(
self.feat_channels,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.conv_bias)
self.vfnet_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
self.vfnet_reg_refine_dconv = DeformConv2d(
self.feat_channels,
self.feat_channels,
self.dcn_kernel,
1,
padding=self.dcn_pad)
self.vfnet_reg_refine = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.scales_refine = nn.ModuleList([Scale(1.0) for _ in self.strides])
self.vfnet_cls_dconv = DeformConv2d(
self.feat_channels,
self.feat_channels,
self.dcn_kernel,
1,
padding=self.dcn_pad)
self.vfnet_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]:
"""Forward features from the upstream network.
Args:
x (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple:
- cls_scores (list[Tensor]): Box iou-aware scores for each scale
level, each is a 4D-tensor, the channel number is
num_points * num_classes.
- bbox_preds (list[Tensor]): Box offsets for each
scale level, each is a 4D-tensor, the channel number is
num_points * 4.
- bbox_preds_refine (list[Tensor]): Refined Box offsets for
each scale level, each is a 4D-tensor, the channel
number is num_points * 4.
"""
return multi_apply(self.forward_single, x, self.scales,
self.scales_refine, self.strides, self.reg_denoms)
def forward_single(self, x: Tensor, scale: Scale, scale_refine: Scale,
stride: int, reg_denom: int) -> tuple:
"""Forward features of a single scale level.
Args:
x (Tensor): FPN feature maps of the specified stride.
scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
the bbox prediction.
scale_refine (:obj: `mmcv.cnn.Scale`): Learnable scale module to
resize the refined bbox prediction.
stride (int): The corresponding stride for feature maps,
used to normalize the bbox prediction when
bbox_norm_type = 'stride'.
reg_denom (int): The corresponding regression range for feature
maps, only used to normalize the bbox prediction when
bbox_norm_type = 'reg_denom'.
Returns:
tuple: iou-aware cls scores for each box, bbox predictions and
refined bbox predictions of input feature maps.
"""
cls_feat = x
reg_feat = x
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
for reg_layer in self.reg_convs:
reg_feat = reg_layer(reg_feat)
# predict the bbox_pred of different level
reg_feat_init = self.vfnet_reg_conv(reg_feat)
if self.bbox_norm_type == 'reg_denom':
bbox_pred = scale(
self.vfnet_reg(reg_feat_init)).float().exp() * reg_denom
elif self.bbox_norm_type == 'stride':
bbox_pred = scale(
self.vfnet_reg(reg_feat_init)).float().exp() * stride
else:
raise NotImplementedError
# compute star deformable convolution offsets
# converting dcn_offset to reg_feat.dtype thus VFNet can be
# trained with FP16
dcn_offset = self.star_dcn_offset(bbox_pred, self.gradient_mul,
stride).to(reg_feat.dtype)
# refine the bbox_pred
reg_feat = self.relu(self.vfnet_reg_refine_dconv(reg_feat, dcn_offset))
bbox_pred_refine = scale_refine(
self.vfnet_reg_refine(reg_feat)).float().exp()
bbox_pred_refine = bbox_pred_refine * bbox_pred.detach()
# predict the iou-aware cls score
cls_feat = self.relu(self.vfnet_cls_dconv(cls_feat, dcn_offset))
cls_score = self.vfnet_cls(cls_feat)
if self.training:
return cls_score, bbox_pred, bbox_pred_refine
else:
return cls_score, bbox_pred_refine
def star_dcn_offset(self, bbox_pred: Tensor, gradient_mul: float,
stride: int) -> Tensor:
"""Compute the star deformable conv offsets.
Args:
bbox_pred (Tensor): Predicted bbox distance offsets (l, r, t, b).
gradient_mul (float): Gradient multiplier.
stride (int): The corresponding stride for feature maps,
used to project the bbox onto the feature map.
Returns:
Tensor: The offsets for deformable convolution.
"""
dcn_base_offset = self.dcn_base_offset.type_as(bbox_pred)
bbox_pred_grad_mul = (1 - gradient_mul) * bbox_pred.detach() + \
gradient_mul * bbox_pred
# map to the feature map scale
bbox_pred_grad_mul = bbox_pred_grad_mul / stride
N, C, H, W = bbox_pred.size()
x1 = bbox_pred_grad_mul[:, 0, :, :]
y1 = bbox_pred_grad_mul[:, 1, :, :]
x2 = bbox_pred_grad_mul[:, 2, :, :]
y2 = bbox_pred_grad_mul[:, 3, :, :]
bbox_pred_grad_mul_offset = bbox_pred.new_zeros(
N, 2 * self.num_dconv_points, H, W)
bbox_pred_grad_mul_offset[:, 0, :, :] = -1.0 * y1 # -y1
bbox_pred_grad_mul_offset[:, 1, :, :] = -1.0 * x1 # -x1
bbox_pred_grad_mul_offset[:, 2, :, :] = -1.0 * y1 # -y1
bbox_pred_grad_mul_offset[:, 4, :, :] = -1.0 * y1 # -y1
bbox_pred_grad_mul_offset[:, 5, :, :] = x2 # x2
bbox_pred_grad_mul_offset[:, 7, :, :] = -1.0 * x1 # -x1
bbox_pred_grad_mul_offset[:, 11, :, :] = x2 # x2
bbox_pred_grad_mul_offset[:, 12, :, :] = y2 # y2
bbox_pred_grad_mul_offset[:, 13, :, :] = -1.0 * x1 # -x1
bbox_pred_grad_mul_offset[:, 14, :, :] = y2 # y2
bbox_pred_grad_mul_offset[:, 16, :, :] = y2 # y2
bbox_pred_grad_mul_offset[:, 17, :, :] = x2 # x2
dcn_offset = bbox_pred_grad_mul_offset - dcn_base_offset
return dcn_offset
def loss_by_feat(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
bbox_preds_refine: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None) -> dict:
"""Compute loss of the head.
Args:
cls_scores (list[Tensor]): Box iou-aware scores for each scale
level, each is a 4D-tensor, the channel number is
num_points * num_classes.
bbox_preds (list[Tensor]): Box offsets for each
scale level, each is a 4D-tensor, the channel number is
num_points * 4.
bbox_preds_refine (list[Tensor]): Refined Box offsets for
each scale level, each is a 4D-tensor, the channel
number is num_points * 4.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
all_level_points = self.fcos_prior_generator.grid_priors(
featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device)
labels, label_weights, bbox_targets, bbox_weights = self.get_targets(
cls_scores,
all_level_points,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore=batch_gt_instances_ignore)
num_imgs = cls_scores[0].size(0)
# flatten cls_scores, bbox_preds and bbox_preds_refine
flatten_cls_scores = [
cls_score.permute(0, 2, 3,
1).reshape(-1,
self.cls_out_channels).contiguous()
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4).contiguous()
for bbox_pred in bbox_preds
]
flatten_bbox_preds_refine = [
bbox_pred_refine.permute(0, 2, 3, 1).reshape(-1, 4).contiguous()
for bbox_pred_refine in bbox_preds_refine
]
flatten_cls_scores = torch.cat(flatten_cls_scores)
flatten_bbox_preds = torch.cat(flatten_bbox_preds)
flatten_bbox_preds_refine = torch.cat(flatten_bbox_preds_refine)
flatten_labels = torch.cat(labels)
flatten_bbox_targets = torch.cat(bbox_targets)
# repeat points to align with bbox_preds
flatten_points = torch.cat(
[points.repeat(num_imgs, 1) for points in all_level_points])
# FG cat_id: [0, num_classes - 1], BG cat_id: num_classes
bg_class_ind = self.num_classes
pos_inds = torch.where(
((flatten_labels >= 0) & (flatten_labels < bg_class_ind)) > 0)[0]
num_pos = len(pos_inds)
pos_bbox_preds = flatten_bbox_preds[pos_inds]
pos_bbox_preds_refine = flatten_bbox_preds_refine[pos_inds]
pos_labels = flatten_labels[pos_inds]
# sync num_pos across all gpus
if self.sync_num_pos:
num_pos_avg_per_gpu = reduce_mean(
pos_inds.new_tensor(num_pos).float()).item()
num_pos_avg_per_gpu = max(num_pos_avg_per_gpu, 1.0)
else:
num_pos_avg_per_gpu = num_pos
pos_bbox_targets = flatten_bbox_targets[pos_inds]
pos_points = flatten_points[pos_inds]
pos_decoded_bbox_preds = self.bbox_coder.decode(
pos_points, pos_bbox_preds)
pos_decoded_target_preds = self.bbox_coder.decode(
pos_points, pos_bbox_targets)
iou_targets_ini = bbox_overlaps(
pos_decoded_bbox_preds,
pos_decoded_target_preds.detach(),
is_aligned=True).clamp(min=1e-6)
bbox_weights_ini = iou_targets_ini.clone().detach()
bbox_avg_factor_ini = reduce_mean(
bbox_weights_ini.sum()).clamp_(min=1).item()
pos_decoded_bbox_preds_refine = \
self.bbox_coder.decode(pos_points, pos_bbox_preds_refine)
iou_targets_rf = bbox_overlaps(
pos_decoded_bbox_preds_refine,
pos_decoded_target_preds.detach(),
is_aligned=True).clamp(min=1e-6)
bbox_weights_rf = iou_targets_rf.clone().detach()
bbox_avg_factor_rf = reduce_mean(
bbox_weights_rf.sum()).clamp_(min=1).item()
if num_pos > 0:
loss_bbox = self.loss_bbox(
pos_decoded_bbox_preds,
pos_decoded_target_preds.detach(),
weight=bbox_weights_ini,
avg_factor=bbox_avg_factor_ini)
loss_bbox_refine = self.loss_bbox_refine(
pos_decoded_bbox_preds_refine,
pos_decoded_target_preds.detach(),
weight=bbox_weights_rf,
avg_factor=bbox_avg_factor_rf)
# build IoU-aware cls_score targets
if self.use_vfl:
pos_ious = iou_targets_rf.clone().detach()
cls_iou_targets = torch.zeros_like(flatten_cls_scores)
cls_iou_targets[pos_inds, pos_labels] = pos_ious
else:
loss_bbox = pos_bbox_preds.sum() * 0
loss_bbox_refine = pos_bbox_preds_refine.sum() * 0
if self.use_vfl:
cls_iou_targets = torch.zeros_like(flatten_cls_scores)
if self.use_vfl:
loss_cls = self.loss_cls(
flatten_cls_scores,
cls_iou_targets,
avg_factor=num_pos_avg_per_gpu)
else:
loss_cls = self.loss_cls(
flatten_cls_scores,
flatten_labels,
weight=label_weights,
avg_factor=num_pos_avg_per_gpu)
return dict(
loss_cls=loss_cls,
loss_bbox=loss_bbox,
loss_bbox_rf=loss_bbox_refine)
def get_targets(
self,
cls_scores: List[Tensor],
mlvl_points: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None) -> tuple:
"""A wrapper for computing ATSS and FCOS targets for points in multiple
images.
Args:
cls_scores (list[Tensor]): Box iou-aware scores for each scale
level with shape (N, num_points * num_classes, H, W).
mlvl_points (list[Tensor]): Points of each fpn level, each has
shape (num_points, 2).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
tuple:
- labels_list (list[Tensor]): Labels of each level.
- label_weights (Tensor/None): Label weights of all levels.
- bbox_targets_list (list[Tensor]): Regression targets of each
level, (l, t, r, b).
- bbox_weights (Tensor/None): Bbox weights of all levels.
"""
if self.use_atss:
return self.get_atss_targets(cls_scores, mlvl_points,
batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
else:
self.norm_on_bbox = False
return self.get_fcos_targets(mlvl_points, batch_gt_instances)
def _get_targets_single(self, *args, **kwargs):
"""Avoid ambiguity in multiple inheritance."""
if self.use_atss:
return ATSSHead._get_targets_single(self, *args, **kwargs)
else:
return FCOSHead._get_targets_single(self, *args, **kwargs)
def get_fcos_targets(self, points: List[Tensor],
batch_gt_instances: InstanceList) -> tuple:
"""Compute FCOS regression and classification targets for points in
multiple images.
Args:
points (list[Tensor]): Points of each fpn level, each has shape
(num_points, 2).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
Returns:
tuple:
- labels (list[Tensor]): Labels of each level.
- label_weights: None, to be compatible with ATSS targets.
- bbox_targets (list[Tensor]): BBox targets of each level.
- bbox_weights: None, to be compatible with ATSS targets.
"""
labels, bbox_targets = FCOSHead.get_targets(self, points,
batch_gt_instances)
label_weights = None
bbox_weights = None
return labels, label_weights, bbox_targets, bbox_weights
def get_anchors(self,
featmap_sizes: List[Tuple],
batch_img_metas: List[dict],
device: str = 'cuda') -> tuple:
"""Get anchors according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
batch_img_metas (list[dict]): Image meta info.
device (str): Device for returned tensors
Returns:
tuple:
- anchor_list (list[Tensor]): Anchors of each image.
- valid_flag_list (list[Tensor]): Valid flags of each image.
"""
num_imgs = len(batch_img_metas)
# since feature map sizes of all images are the same, we only compute
# anchors for one time
multi_level_anchors = self.atss_prior_generator.grid_priors(
featmap_sizes, device=device)
anchor_list = [multi_level_anchors for _ in range(num_imgs)]
# for each image, we compute valid flags of multi level anchors
valid_flag_list = []
for img_id, img_meta in enumerate(batch_img_metas):
multi_level_flags = self.atss_prior_generator.valid_flags(
featmap_sizes, img_meta['pad_shape'], device=device)
valid_flag_list.append(multi_level_flags)
return anchor_list, valid_flag_list
def get_atss_targets(
self,
cls_scores: List[Tensor],
mlvl_points: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None) -> tuple:
"""A wrapper for computing ATSS targets for points in multiple images.
Args:
cls_scores (list[Tensor]): Box iou-aware scores for each scale
level with shape (N, num_points * num_classes, H, W).
mlvl_points (list[Tensor]): Points of each fpn level, each has
shape (num_points, 2).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
tuple:
- labels_list (list[Tensor]): Labels of each level.
- label_weights (Tensor): Label weights of all levels.
- bbox_targets_list (list[Tensor]): Regression targets of each
level, (l, t, r, b).
- bbox_weights (Tensor): Bbox weights of all levels.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(
featmap_sizes
) == self.atss_prior_generator.num_levels == \
self.fcos_prior_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, batch_img_metas, device=device)
cls_reg_targets = ATSSHead.get_targets(
self,
anchor_list,
valid_flag_list,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore,
unmap_outputs=True)
(anchor_list, labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, avg_factor) = cls_reg_targets
bbox_targets_list = [
bbox_targets.reshape(-1, 4) for bbox_targets in bbox_targets_list
]
num_imgs = len(batch_img_metas)
# transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format
bbox_targets_list = self.transform_bbox_targets(
bbox_targets_list, mlvl_points, num_imgs)
labels_list = [labels.reshape(-1) for labels in labels_list]
label_weights_list = [
label_weights.reshape(-1) for label_weights in label_weights_list
]
bbox_weights_list = [
bbox_weights.reshape(-1) for bbox_weights in bbox_weights_list
]
label_weights = torch.cat(label_weights_list)
bbox_weights = torch.cat(bbox_weights_list)
return labels_list, label_weights, bbox_targets_list, bbox_weights
def transform_bbox_targets(self, decoded_bboxes: List[Tensor],
mlvl_points: List[Tensor],
num_imgs: int) -> List[Tensor]:
"""Transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format.
Args:
decoded_bboxes (list[Tensor]): Regression targets of each level,
in the form of (x1, y1, x2, y2).
mlvl_points (list[Tensor]): Points of each fpn level, each has
shape (num_points, 2).
num_imgs (int): the number of images in a batch.
Returns:
bbox_targets (list[Tensor]): Regression targets of each level in
the form of (l, t, r, b).
"""
# TODO: Re-implemented in Class PointCoder
assert len(decoded_bboxes) == len(mlvl_points)
num_levels = len(decoded_bboxes)
mlvl_points = [points.repeat(num_imgs, 1) for points in mlvl_points]
bbox_targets = []
for i in range(num_levels):
bbox_target = self.bbox_coder.encode(mlvl_points[i],
decoded_bboxes[i])
bbox_targets.append(bbox_target)
return bbox_targets
def _load_from_state_dict(self, state_dict: dict, prefix: str,
local_metadata: dict, strict: bool,
missing_keys: Union[List[str], str],
unexpected_keys: Union[List[str], str],
error_msgs: Union[List[str], str]) -> None:
"""Override the method in the parent class to avoid changing para's
name."""
pass
| 30,913 | 41.757953 | 81 | py |
ERD | ERD-main/mmdet/models/dense_heads/centernet_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
from mmcv.ops import batched_nms
from mmengine.config import ConfigDict
from mmengine.model import bias_init_with_prob, normal_init
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import (ConfigType, InstanceList, OptConfigType,
OptInstanceList, OptMultiConfig)
from ..utils import (gaussian_radius, gen_gaussian_target, get_local_maximum,
get_topk_from_heatmap, multi_apply,
transpose_and_gather_feat)
from .base_dense_head import BaseDenseHead
@MODELS.register_module()
class CenterNetHead(BaseDenseHead):
"""Objects as Points Head. CenterHead use center_point to indicate object's
position. Paper link <https://arxiv.org/abs/1904.07850>
Args:
in_channels (int): Number of channel in the input feature map.
feat_channels (int): Number of channel in the intermediate feature map.
num_classes (int): Number of categories excluding the background
category.
loss_center_heatmap (:obj:`ConfigDict` or dict): Config of center
heatmap loss. Defaults to
dict(type='GaussianFocalLoss', loss_weight=1.0)
loss_wh (:obj:`ConfigDict` or dict): Config of wh loss. Defaults to
dict(type='L1Loss', loss_weight=0.1).
loss_offset (:obj:`ConfigDict` or dict): Config of offset loss.
Defaults to dict(type='L1Loss', loss_weight=1.0).
train_cfg (:obj:`ConfigDict` or dict, optional): Training config.
Useless in CenterNet, but we keep this variable for
SingleStageDetector.
test_cfg (:obj:`ConfigDict` or dict, optional): Testing config
of CenterNet.
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`], optional): Initialization
config dict.
"""
def __init__(self,
in_channels: int,
feat_channels: int,
num_classes: int,
loss_center_heatmap: ConfigType = dict(
type='GaussianFocalLoss', loss_weight=1.0),
loss_wh: ConfigType = dict(type='L1Loss', loss_weight=0.1),
loss_offset: ConfigType = dict(
type='L1Loss', loss_weight=1.0),
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.num_classes = num_classes
self.heatmap_head = self._build_head(in_channels, feat_channels,
num_classes)
self.wh_head = self._build_head(in_channels, feat_channels, 2)
self.offset_head = self._build_head(in_channels, feat_channels, 2)
self.loss_center_heatmap = MODELS.build(loss_center_heatmap)
self.loss_wh = MODELS.build(loss_wh)
self.loss_offset = MODELS.build(loss_offset)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.fp16_enabled = False
def _build_head(self, in_channels: int, feat_channels: int,
out_channels: int) -> nn.Sequential:
"""Build head for each branch."""
layer = nn.Sequential(
nn.Conv2d(in_channels, feat_channels, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(feat_channels, out_channels, kernel_size=1))
return layer
def init_weights(self) -> None:
"""Initialize weights of the head."""
bias_init = bias_init_with_prob(0.1)
self.heatmap_head[-1].bias.data.fill_(bias_init)
for head in [self.wh_head, self.offset_head]:
for m in head.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, std=0.001)
def forward(self, x: Tuple[Tensor, ...]) -> Tuple[List[Tensor]]:
"""Forward features. Notice CenterNet head does not use FPN.
Args:
x (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
center_heatmap_preds (list[Tensor]): center predict heatmaps for
all levels, the channels number is num_classes.
wh_preds (list[Tensor]): wh predicts for all levels, the channels
number is 2.
offset_preds (list[Tensor]): offset predicts for all levels, the
channels number is 2.
"""
return multi_apply(self.forward_single, x)
def forward_single(self, x: Tensor) -> Tuple[Tensor, ...]:
"""Forward feature of a single level.
Args:
x (Tensor): Feature of a single level.
Returns:
center_heatmap_pred (Tensor): center predict heatmaps, the
channels number is num_classes.
wh_pred (Tensor): wh predicts, the channels number is 2.
offset_pred (Tensor): offset predicts, the channels number is 2.
"""
center_heatmap_pred = self.heatmap_head(x).sigmoid()
wh_pred = self.wh_head(x)
offset_pred = self.offset_head(x)
return center_heatmap_pred, wh_pred, offset_pred
def loss_by_feat(
self,
center_heatmap_preds: List[Tensor],
wh_preds: List[Tensor],
offset_preds: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None) -> dict:
"""Compute losses of the head.
Args:
center_heatmap_preds (list[Tensor]): center predict heatmaps for
all levels with shape (B, num_classes, H, W).
wh_preds (list[Tensor]): wh predicts for all levels with
shape (B, 2, H, W).
offset_preds (list[Tensor]): offset predicts for all levels
with shape (B, 2, H, W).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict[str, Tensor]: which has components below:
- loss_center_heatmap (Tensor): loss of center heatmap.
- loss_wh (Tensor): loss of hw heatmap
- loss_offset (Tensor): loss of offset heatmap.
"""
assert len(center_heatmap_preds) == len(wh_preds) == len(
offset_preds) == 1
center_heatmap_pred = center_heatmap_preds[0]
wh_pred = wh_preds[0]
offset_pred = offset_preds[0]
gt_bboxes = [
gt_instances.bboxes for gt_instances in batch_gt_instances
]
gt_labels = [
gt_instances.labels for gt_instances in batch_gt_instances
]
img_shape = batch_img_metas[0]['batch_input_shape']
target_result, avg_factor = self.get_targets(gt_bboxes, gt_labels,
center_heatmap_pred.shape,
img_shape)
center_heatmap_target = target_result['center_heatmap_target']
wh_target = target_result['wh_target']
offset_target = target_result['offset_target']
wh_offset_target_weight = target_result['wh_offset_target_weight']
# Since the channel of wh_target and offset_target is 2, the avg_factor
# of loss_center_heatmap is always 1/2 of loss_wh and loss_offset.
loss_center_heatmap = self.loss_center_heatmap(
center_heatmap_pred, center_heatmap_target, avg_factor=avg_factor)
loss_wh = self.loss_wh(
wh_pred,
wh_target,
wh_offset_target_weight,
avg_factor=avg_factor * 2)
loss_offset = self.loss_offset(
offset_pred,
offset_target,
wh_offset_target_weight,
avg_factor=avg_factor * 2)
return dict(
loss_center_heatmap=loss_center_heatmap,
loss_wh=loss_wh,
loss_offset=loss_offset)
def get_targets(self, gt_bboxes: List[Tensor], gt_labels: List[Tensor],
feat_shape: tuple, img_shape: tuple) -> Tuple[dict, int]:
"""Compute regression and classification targets in multiple images.
Args:
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box.
feat_shape (tuple): feature map shape with value [B, _, H, W]
img_shape (tuple): image shape.
Returns:
tuple[dict, float]: The float value is mean avg_factor, the dict
has components below:
- center_heatmap_target (Tensor): targets of center heatmap, \
shape (B, num_classes, H, W).
- wh_target (Tensor): targets of wh predict, shape \
(B, 2, H, W).
- offset_target (Tensor): targets of offset predict, shape \
(B, 2, H, W).
- wh_offset_target_weight (Tensor): weights of wh and offset \
predict, shape (B, 2, H, W).
"""
img_h, img_w = img_shape[:2]
bs, _, feat_h, feat_w = feat_shape
width_ratio = float(feat_w / img_w)
height_ratio = float(feat_h / img_h)
center_heatmap_target = gt_bboxes[-1].new_zeros(
[bs, self.num_classes, feat_h, feat_w])
wh_target = gt_bboxes[-1].new_zeros([bs, 2, feat_h, feat_w])
offset_target = gt_bboxes[-1].new_zeros([bs, 2, feat_h, feat_w])
wh_offset_target_weight = gt_bboxes[-1].new_zeros(
[bs, 2, feat_h, feat_w])
for batch_id in range(bs):
gt_bbox = gt_bboxes[batch_id]
gt_label = gt_labels[batch_id]
center_x = (gt_bbox[:, [0]] + gt_bbox[:, [2]]) * width_ratio / 2
center_y = (gt_bbox[:, [1]] + gt_bbox[:, [3]]) * height_ratio / 2
gt_centers = torch.cat((center_x, center_y), dim=1)
for j, ct in enumerate(gt_centers):
ctx_int, cty_int = ct.int()
ctx, cty = ct
scale_box_h = (gt_bbox[j][3] - gt_bbox[j][1]) * height_ratio
scale_box_w = (gt_bbox[j][2] - gt_bbox[j][0]) * width_ratio
radius = gaussian_radius([scale_box_h, scale_box_w],
min_overlap=0.3)
radius = max(0, int(radius))
ind = gt_label[j]
gen_gaussian_target(center_heatmap_target[batch_id, ind],
[ctx_int, cty_int], radius)
wh_target[batch_id, 0, cty_int, ctx_int] = scale_box_w
wh_target[batch_id, 1, cty_int, ctx_int] = scale_box_h
offset_target[batch_id, 0, cty_int, ctx_int] = ctx - ctx_int
offset_target[batch_id, 1, cty_int, ctx_int] = cty - cty_int
wh_offset_target_weight[batch_id, :, cty_int, ctx_int] = 1
avg_factor = max(1, center_heatmap_target.eq(1).sum())
target_result = dict(
center_heatmap_target=center_heatmap_target,
wh_target=wh_target,
offset_target=offset_target,
wh_offset_target_weight=wh_offset_target_weight)
return target_result, avg_factor
def predict_by_feat(self,
center_heatmap_preds: List[Tensor],
wh_preds: List[Tensor],
offset_preds: List[Tensor],
batch_img_metas: Optional[List[dict]] = None,
rescale: bool = True,
with_nms: bool = False) -> InstanceList:
"""Transform network output for a batch into bbox predictions.
Args:
center_heatmap_preds (list[Tensor]): Center predict heatmaps for
all levels with shape (B, num_classes, H, W).
wh_preds (list[Tensor]): WH predicts for all levels with
shape (B, 2, H, W).
offset_preds (list[Tensor]): Offset predicts for all levels
with shape (B, 2, H, W).
batch_img_metas (list[dict], optional): Batch image meta info.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to True.
with_nms (bool): If True, do nms before return boxes.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Instance segmentation
results of each image after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
assert len(center_heatmap_preds) == len(wh_preds) == len(
offset_preds) == 1
result_list = []
for img_id in range(len(batch_img_metas)):
result_list.append(
self._predict_by_feat_single(
center_heatmap_preds[0][img_id:img_id + 1, ...],
wh_preds[0][img_id:img_id + 1, ...],
offset_preds[0][img_id:img_id + 1, ...],
batch_img_metas[img_id],
rescale=rescale,
with_nms=with_nms))
return result_list
def _predict_by_feat_single(self,
center_heatmap_pred: Tensor,
wh_pred: Tensor,
offset_pred: Tensor,
img_meta: dict,
rescale: bool = True,
with_nms: bool = False) -> InstanceData:
"""Transform outputs of a single image into bbox results.
Args:
center_heatmap_pred (Tensor): Center heatmap for current level with
shape (1, num_classes, H, W).
wh_pred (Tensor): WH heatmap for current level with shape
(1, num_classes, H, W).
offset_pred (Tensor): Offset for current level with shape
(1, corner_offset_channels, H, W).
img_meta (dict): Meta information of current image, e.g.,
image size, scaling factor, etc.
rescale (bool): If True, return boxes in original image space.
Defaults to True.
with_nms (bool): If True, do nms before return boxes.
Defaults to False.
Returns:
:obj:`InstanceData`: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
batch_det_bboxes, batch_labels = self._decode_heatmap(
center_heatmap_pred,
wh_pred,
offset_pred,
img_meta['batch_input_shape'],
k=self.test_cfg.topk,
kernel=self.test_cfg.local_maximum_kernel)
det_bboxes = batch_det_bboxes.view([-1, 5])
det_labels = batch_labels.view(-1)
batch_border = det_bboxes.new_tensor(img_meta['border'])[...,
[2, 0, 2, 0]]
det_bboxes[..., :4] -= batch_border
if rescale and 'scale_factor' in img_meta:
det_bboxes[..., :4] /= det_bboxes.new_tensor(
img_meta['scale_factor']).repeat((1, 2))
if with_nms:
det_bboxes, det_labels = self._bboxes_nms(det_bboxes, det_labels,
self.test_cfg)
results = InstanceData()
results.bboxes = det_bboxes[..., :4]
results.scores = det_bboxes[..., 4]
results.labels = det_labels
return results
def _decode_heatmap(self,
center_heatmap_pred: Tensor,
wh_pred: Tensor,
offset_pred: Tensor,
img_shape: tuple,
k: int = 100,
kernel: int = 3) -> Tuple[Tensor, Tensor]:
"""Transform outputs into detections raw bbox prediction.
Args:
center_heatmap_pred (Tensor): center predict heatmap,
shape (B, num_classes, H, W).
wh_pred (Tensor): wh predict, shape (B, 2, H, W).
offset_pred (Tensor): offset predict, shape (B, 2, H, W).
img_shape (tuple): image shape in hw format.
k (int): Get top k center keypoints from heatmap. Defaults to 100.
kernel (int): Max pooling kernel for extract local maximum pixels.
Defaults to 3.
Returns:
tuple[Tensor]: Decoded output of CenterNetHead, containing
the following Tensors:
- batch_bboxes (Tensor): Coords of each box with shape (B, k, 5)
- batch_topk_labels (Tensor): Categories of each box with \
shape (B, k)
"""
height, width = center_heatmap_pred.shape[2:]
inp_h, inp_w = img_shape
center_heatmap_pred = get_local_maximum(
center_heatmap_pred, kernel=kernel)
*batch_dets, topk_ys, topk_xs = get_topk_from_heatmap(
center_heatmap_pred, k=k)
batch_scores, batch_index, batch_topk_labels = batch_dets
wh = transpose_and_gather_feat(wh_pred, batch_index)
offset = transpose_and_gather_feat(offset_pred, batch_index)
topk_xs = topk_xs + offset[..., 0]
topk_ys = topk_ys + offset[..., 1]
tl_x = (topk_xs - wh[..., 0] / 2) * (inp_w / width)
tl_y = (topk_ys - wh[..., 1] / 2) * (inp_h / height)
br_x = (topk_xs + wh[..., 0] / 2) * (inp_w / width)
br_y = (topk_ys + wh[..., 1] / 2) * (inp_h / height)
batch_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], dim=2)
batch_bboxes = torch.cat((batch_bboxes, batch_scores[..., None]),
dim=-1)
return batch_bboxes, batch_topk_labels
def _bboxes_nms(self, bboxes: Tensor, labels: Tensor,
cfg: ConfigDict) -> Tuple[Tensor, Tensor]:
"""bboxes nms."""
if labels.numel() > 0:
max_num = cfg.max_per_img
bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:,
-1].contiguous(),
labels, cfg.nms)
if max_num > 0:
bboxes = bboxes[:max_num]
labels = labels[keep][:max_num]
return bboxes, labels
| 19,870 | 43.354911 | 79 | py |
ERD | ERD-main/mmdet/models/dense_heads/rtmdet_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Tuple, Union
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule, Scale, is_norm
from mmengine.model import bias_init_with_prob, constant_init, normal_init
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.registry import MODELS, TASK_UTILS
from mmdet.structures.bbox import distance2bbox
from mmdet.utils import ConfigType, InstanceList, OptInstanceList, reduce_mean
from ..layers.transformer import inverse_sigmoid
from ..task_modules import anchor_inside_flags
from ..utils import (images_to_levels, multi_apply, sigmoid_geometric_mean,
unmap)
from .atss_head import ATSSHead
@MODELS.register_module()
class RTMDetHead(ATSSHead):
"""Detection Head of RTMDet.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
with_objectness (bool): Whether to add an objectness branch.
Defaults to True.
act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer.
Default: dict(type='ReLU')
"""
def __init__(self,
num_classes: int,
in_channels: int,
with_objectness: bool = True,
act_cfg: ConfigType = dict(type='ReLU'),
**kwargs) -> None:
self.act_cfg = act_cfg
self.with_objectness = with_objectness
super().__init__(num_classes, in_channels, **kwargs)
if self.train_cfg:
self.assigner = TASK_UTILS.build(self.train_cfg['assigner'])
def _init_layers(self):
"""Initialize layers of the head."""
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
pred_pad_size = self.pred_kernel_size // 2
self.rtm_cls = nn.Conv2d(
self.feat_channels,
self.num_base_priors * self.cls_out_channels,
self.pred_kernel_size,
padding=pred_pad_size)
self.rtm_reg = nn.Conv2d(
self.feat_channels,
self.num_base_priors * 4,
self.pred_kernel_size,
padding=pred_pad_size)
if self.with_objectness:
self.rtm_obj = nn.Conv2d(
self.feat_channels,
1,
self.pred_kernel_size,
padding=pred_pad_size)
self.scales = nn.ModuleList(
[Scale(1.0) for _ in self.prior_generator.strides])
def init_weights(self) -> None:
"""Initialize weights of the head."""
for m in self.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, mean=0, std=0.01)
if is_norm(m):
constant_init(m, 1)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.rtm_cls, std=0.01, bias=bias_cls)
normal_init(self.rtm_reg, std=0.01)
if self.with_objectness:
normal_init(self.rtm_obj, std=0.01, bias=bias_cls)
def forward(self, feats: Tuple[Tensor, ...]) -> tuple:
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
- cls_scores (list[Tensor]): Classification scores for all scale
levels, each is a 4D-tensor, the channels number is
num_base_priors * num_classes.
- bbox_preds (list[Tensor]): Box energies / deltas for all scale
levels, each is a 4D-tensor, the channels number is
num_base_priors * 4.
"""
cls_scores = []
bbox_preds = []
for idx, (x, scale, stride) in enumerate(
zip(feats, self.scales, self.prior_generator.strides)):
cls_feat = x
reg_feat = x
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
cls_score = self.rtm_cls(cls_feat)
for reg_layer in self.reg_convs:
reg_feat = reg_layer(reg_feat)
if self.with_objectness:
objectness = self.rtm_obj(reg_feat)
cls_score = inverse_sigmoid(
sigmoid_geometric_mean(cls_score, objectness))
reg_dist = scale(self.rtm_reg(reg_feat).exp()).float() * stride[0]
cls_scores.append(cls_score)
bbox_preds.append(reg_dist)
return tuple(cls_scores), tuple(bbox_preds)
def loss_by_feat_single(self, cls_score: Tensor, bbox_pred: Tensor,
labels: Tensor, label_weights: Tensor,
bbox_targets: Tensor, assign_metrics: Tensor,
stride: List[int]):
"""Compute loss of a single scale level.
Args:
cls_score (Tensor): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W).
bbox_pred (Tensor): Decoded bboxes for each scale
level with shape (N, num_anchors * 4, H, W).
labels (Tensor): Labels of each anchors with shape
(N, num_total_anchors).
label_weights (Tensor): Label weights of each anchor with shape
(N, num_total_anchors).
bbox_targets (Tensor): BBox regression targets of each anchor with
shape (N, num_total_anchors, 4).
assign_metrics (Tensor): Assign metrics with shape
(N, num_total_anchors).
stride (List[int]): Downsample stride of the feature map.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert stride[0] == stride[1], 'h stride is not equal to w stride!'
cls_score = cls_score.permute(0, 2, 3, 1).reshape(
-1, self.cls_out_channels).contiguous()
bbox_pred = bbox_pred.reshape(-1, 4)
bbox_targets = bbox_targets.reshape(-1, 4)
labels = labels.reshape(-1)
assign_metrics = assign_metrics.reshape(-1)
label_weights = label_weights.reshape(-1)
targets = (labels, assign_metrics)
loss_cls = self.loss_cls(
cls_score, targets, label_weights, avg_factor=1.0)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = self.num_classes
pos_inds = ((labels >= 0)
& (labels < bg_class_ind)).nonzero().squeeze(1)
if len(pos_inds) > 0:
pos_bbox_targets = bbox_targets[pos_inds]
pos_bbox_pred = bbox_pred[pos_inds]
pos_decode_bbox_pred = pos_bbox_pred
pos_decode_bbox_targets = pos_bbox_targets
# regression loss
pos_bbox_weight = assign_metrics[pos_inds]
loss_bbox = self.loss_bbox(
pos_decode_bbox_pred,
pos_decode_bbox_targets,
weight=pos_bbox_weight,
avg_factor=1.0)
else:
loss_bbox = bbox_pred.sum() * 0
pos_bbox_weight = bbox_targets.new_tensor(0.)
return loss_cls, loss_bbox, assign_metrics.sum(), pos_bbox_weight.sum()
def loss_by_feat(self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None):
"""Compute losses of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Decoded box for each scale
level with shape (N, num_anchors * 4, H, W) in
[tl_x, tl_y, br_x, br_y] format.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
num_imgs = len(batch_img_metas)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.prior_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, batch_img_metas, device=device)
flatten_cls_scores = torch.cat([
cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.cls_out_channels)
for cls_score in cls_scores
], 1)
decoded_bboxes = []
for anchor, bbox_pred in zip(anchor_list[0], bbox_preds):
anchor = anchor.reshape(-1, 4)
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
bbox_pred = distance2bbox(anchor, bbox_pred)
decoded_bboxes.append(bbox_pred)
flatten_bboxes = torch.cat(decoded_bboxes, 1)
cls_reg_targets = self.get_targets(
flatten_cls_scores,
flatten_bboxes,
anchor_list,
valid_flag_list,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore=batch_gt_instances_ignore)
(anchor_list, labels_list, label_weights_list, bbox_targets_list,
assign_metrics_list, sampling_results_list) = cls_reg_targets
losses_cls, losses_bbox,\
cls_avg_factors, bbox_avg_factors = multi_apply(
self.loss_by_feat_single,
cls_scores,
decoded_bboxes,
labels_list,
label_weights_list,
bbox_targets_list,
assign_metrics_list,
self.prior_generator.strides)
cls_avg_factor = reduce_mean(sum(cls_avg_factors)).clamp_(min=1).item()
losses_cls = list(map(lambda x: x / cls_avg_factor, losses_cls))
bbox_avg_factor = reduce_mean(
sum(bbox_avg_factors)).clamp_(min=1).item()
losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox))
return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
def get_targets(self,
cls_scores: Tensor,
bbox_preds: Tensor,
anchor_list: List[List[Tensor]],
valid_flag_list: List[List[Tensor]],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None,
unmap_outputs=True):
"""Compute regression and classification targets for anchors in
multiple images.
Args:
cls_scores (Tensor): Classification predictions of images,
a 3D-Tensor with shape [num_imgs, num_priors, num_classes].
bbox_preds (Tensor): Decoded bboxes predictions of one image,
a 3D-Tensor with shape [num_imgs, num_priors, 4] in [tl_x,
tl_y, br_x, br_y] format.
anchor_list (list[list[Tensor]]): Multi level anchors of each
image. The outer list indicates images, and the inner list
corresponds to feature levels of the image. Each element of
the inner list is a tensor of shape (num_anchors, 4).
valid_flag_list (list[list[Tensor]]): Multi level valid flags of
each image. The outer list indicates images, and the inner list
corresponds to feature levels of the image. Each element of
the inner list is a tensor of shape (num_anchors, )
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors. Defaults to True.
Returns:
tuple: a tuple containing learning targets.
- anchors_list (list[list[Tensor]]): Anchors of each level.
- labels_list (list[Tensor]): Labels of each level.
- label_weights_list (list[Tensor]): Label weights of each
level.
- bbox_targets_list (list[Tensor]): BBox targets of each level.
- assign_metrics_list (list[Tensor]): alignment metrics of each
level.
"""
num_imgs = len(batch_img_metas)
assert len(anchor_list) == len(valid_flag_list) == num_imgs
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
# concat all level anchors and flags to a single tensor
for i in range(num_imgs):
assert len(anchor_list[i]) == len(valid_flag_list[i])
anchor_list[i] = torch.cat(anchor_list[i])
valid_flag_list[i] = torch.cat(valid_flag_list[i])
# compute targets for each image
if batch_gt_instances_ignore is None:
batch_gt_instances_ignore = [None] * num_imgs
# anchor_list: list(b * [-1, 4])
(all_anchors, all_labels, all_label_weights, all_bbox_targets,
all_assign_metrics, sampling_results_list) = multi_apply(
self._get_targets_single,
cls_scores.detach(),
bbox_preds.detach(),
anchor_list,
valid_flag_list,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore,
unmap_outputs=unmap_outputs)
# no valid anchors
if any([labels is None for labels in all_labels]):
return None
# split targets to a list w.r.t. multiple levels
anchors_list = images_to_levels(all_anchors, num_level_anchors)
labels_list = images_to_levels(all_labels, num_level_anchors)
label_weights_list = images_to_levels(all_label_weights,
num_level_anchors)
bbox_targets_list = images_to_levels(all_bbox_targets,
num_level_anchors)
assign_metrics_list = images_to_levels(all_assign_metrics,
num_level_anchors)
return (anchors_list, labels_list, label_weights_list,
bbox_targets_list, assign_metrics_list, sampling_results_list)
def _get_targets_single(self,
cls_scores: Tensor,
bbox_preds: Tensor,
flat_anchors: Tensor,
valid_flags: Tensor,
gt_instances: InstanceData,
img_meta: dict,
gt_instances_ignore: Optional[InstanceData] = None,
unmap_outputs=True):
"""Compute regression, classification targets for anchors in a single
image.
Args:
cls_scores (list(Tensor)): Box scores for each image.
bbox_preds (list(Tensor)): Box energies / deltas for each image.
flat_anchors (Tensor): Multi-level anchors of the image, which are
concatenated into a single tensor of shape (num_anchors ,4)
valid_flags (Tensor): Multi level valid flags of the image,
which are concatenated into a single tensor of
shape (num_anchors,).
gt_instances (:obj:`InstanceData`): Ground truth of instance
annotations. It usually includes ``bboxes`` and ``labels``
attributes.
img_meta (dict): Meta information for current image.
gt_instances_ignore (:obj:`InstanceData`, optional): Instances
to be ignored during training. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors. Defaults to True.
Returns:
tuple: N is the number of total anchors in the image.
- anchors (Tensor): All anchors in the image with shape (N, 4).
- labels (Tensor): Labels of all anchors in the image with shape
(N,).
- label_weights (Tensor): Label weights of all anchor in the
image with shape (N,).
- bbox_targets (Tensor): BBox targets of all anchors in the
image with shape (N, 4).
- norm_alignment_metrics (Tensor): Normalized alignment metrics
of all priors in the image with shape (N,).
"""
inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
img_meta['img_shape'][:2],
self.train_cfg['allowed_border'])
if not inside_flags.any():
return (None, ) * 7
# assign gt and sample anchors
anchors = flat_anchors[inside_flags, :]
pred_instances = InstanceData(
scores=cls_scores[inside_flags, :],
bboxes=bbox_preds[inside_flags, :],
priors=anchors)
assign_result = self.assigner.assign(pred_instances, gt_instances,
gt_instances_ignore)
sampling_result = self.sampler.sample(assign_result, pred_instances,
gt_instances)
num_valid_anchors = anchors.shape[0]
bbox_targets = torch.zeros_like(anchors)
labels = anchors.new_full((num_valid_anchors, ),
self.num_classes,
dtype=torch.long)
label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
assign_metrics = anchors.new_zeros(
num_valid_anchors, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
# point-based
pos_bbox_targets = sampling_result.pos_gt_bboxes
bbox_targets[pos_inds, :] = pos_bbox_targets
labels[pos_inds] = sampling_result.pos_gt_labels
if self.train_cfg['pos_weight'] <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = self.train_cfg['pos_weight']
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
class_assigned_gt_inds = torch.unique(
sampling_result.pos_assigned_gt_inds)
for gt_inds in class_assigned_gt_inds:
gt_class_inds = pos_inds[sampling_result.pos_assigned_gt_inds ==
gt_inds]
assign_metrics[gt_class_inds] = assign_result.max_overlaps[
gt_class_inds]
# map up to original set of anchors
if unmap_outputs:
num_total_anchors = flat_anchors.size(0)
anchors = unmap(anchors, num_total_anchors, inside_flags)
labels = unmap(
labels, num_total_anchors, inside_flags, fill=self.num_classes)
label_weights = unmap(label_weights, num_total_anchors,
inside_flags)
bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
assign_metrics = unmap(assign_metrics, num_total_anchors,
inside_flags)
return (anchors, labels, label_weights, bbox_targets, assign_metrics,
sampling_result)
def get_anchors(self,
featmap_sizes: List[tuple],
batch_img_metas: List[dict],
device: Union[torch.device, str] = 'cuda') \
-> Tuple[List[List[Tensor]], List[List[Tensor]]]:
"""Get anchors according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
batch_img_metas (list[dict]): Image meta info.
device (torch.device or str): Device for returned tensors.
Defaults to cuda.
Returns:
tuple:
- anchor_list (list[list[Tensor]]): Anchors of each image.
- valid_flag_list (list[list[Tensor]]): Valid flags of each
image.
"""
num_imgs = len(batch_img_metas)
# since feature map sizes of all images are the same, we only compute
# anchors for one time
multi_level_anchors = self.prior_generator.grid_priors(
featmap_sizes, device=device, with_stride=True)
anchor_list = [multi_level_anchors for _ in range(num_imgs)]
# for each image, we compute valid flags of multi level anchors
valid_flag_list = []
for img_id, img_meta in enumerate(batch_img_metas):
multi_level_flags = self.prior_generator.valid_flags(
featmap_sizes, img_meta['pad_shape'], device)
valid_flag_list.append(multi_level_flags)
return anchor_list, valid_flag_list
@MODELS.register_module()
class RTMDetSepBNHead(RTMDetHead):
"""RTMDetHead with separated BN layers and shared conv layers.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
share_conv (bool): Whether to share conv layers between stages.
Defaults to True.
use_depthwise (bool): Whether to use depthwise separable convolution in
head. Defaults to False.
norm_cfg (:obj:`ConfigDict` or dict)): Config dict for normalization
layer. Defaults to dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (:obj:`ConfigDict` or dict)): Config dict for activation layer.
Defaults to dict(type='SiLU').
pred_kernel_size (int): Kernel size of prediction layer. Defaults to 1.
"""
def __init__(self,
num_classes: int,
in_channels: int,
share_conv: bool = True,
use_depthwise: bool = False,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='SiLU'),
pred_kernel_size: int = 1,
exp_on_reg=False,
**kwargs) -> None:
self.share_conv = share_conv
self.exp_on_reg = exp_on_reg
self.use_depthwise = use_depthwise
super().__init__(
num_classes,
in_channels,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
pred_kernel_size=pred_kernel_size,
**kwargs)
def _init_layers(self) -> None:
"""Initialize layers of the head."""
conv = DepthwiseSeparableConvModule \
if self.use_depthwise else ConvModule
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
self.rtm_cls = nn.ModuleList()
self.rtm_reg = nn.ModuleList()
if self.with_objectness:
self.rtm_obj = nn.ModuleList()
for n in range(len(self.prior_generator.strides)):
cls_convs = nn.ModuleList()
reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
cls_convs.append(
conv(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
reg_convs.append(
conv(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
self.cls_convs.append(cls_convs)
self.reg_convs.append(reg_convs)
self.rtm_cls.append(
nn.Conv2d(
self.feat_channels,
self.num_base_priors * self.cls_out_channels,
self.pred_kernel_size,
padding=self.pred_kernel_size // 2))
self.rtm_reg.append(
nn.Conv2d(
self.feat_channels,
self.num_base_priors * 4,
self.pred_kernel_size,
padding=self.pred_kernel_size // 2))
if self.with_objectness:
self.rtm_obj.append(
nn.Conv2d(
self.feat_channels,
1,
self.pred_kernel_size,
padding=self.pred_kernel_size // 2))
if self.share_conv:
for n in range(len(self.prior_generator.strides)):
for i in range(self.stacked_convs):
self.cls_convs[n][i].conv = self.cls_convs[0][i].conv
self.reg_convs[n][i].conv = self.reg_convs[0][i].conv
def init_weights(self) -> None:
"""Initialize weights of the head."""
for m in self.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, mean=0, std=0.01)
if is_norm(m):
constant_init(m, 1)
bias_cls = bias_init_with_prob(0.01)
for rtm_cls, rtm_reg in zip(self.rtm_cls, self.rtm_reg):
normal_init(rtm_cls, std=0.01, bias=bias_cls)
normal_init(rtm_reg, std=0.01)
if self.with_objectness:
for rtm_obj in self.rtm_obj:
normal_init(rtm_obj, std=0.01, bias=bias_cls)
def forward(self, feats: Tuple[Tensor, ...]) -> tuple:
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
- cls_scores (tuple[Tensor]): Classification scores for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * num_classes.
- bbox_preds (tuple[Tensor]): Box energies / deltas for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * 4.
"""
cls_scores = []
bbox_preds = []
for idx, (x, stride) in enumerate(
zip(feats, self.prior_generator.strides)):
cls_feat = x
reg_feat = x
for cls_layer in self.cls_convs[idx]:
cls_feat = cls_layer(cls_feat)
cls_score = self.rtm_cls[idx](cls_feat)
for reg_layer in self.reg_convs[idx]:
reg_feat = reg_layer(reg_feat)
if self.with_objectness:
objectness = self.rtm_obj[idx](reg_feat)
cls_score = inverse_sigmoid(
sigmoid_geometric_mean(cls_score, objectness))
if self.exp_on_reg:
reg_dist = self.rtm_reg[idx](reg_feat).exp() * stride[0]
else:
reg_dist = self.rtm_reg[idx](reg_feat) * stride[0]
cls_scores.append(cls_score)
bbox_preds.append(reg_dist)
return tuple(cls_scores), tuple(bbox_preds)
| 29,566 | 41.665224 | 79 | py |
ERD | ERD-main/mmdet/models/dense_heads/fsaf_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import InstanceList, OptInstanceList, OptMultiConfig
from ..losses.accuracy import accuracy
from ..losses.utils import weight_reduce_loss
from ..task_modules.prior_generators import anchor_inside_flags
from ..utils import images_to_levels, multi_apply, unmap
from .retina_head import RetinaHead
@MODELS.register_module()
class FSAFHead(RetinaHead):
"""Anchor-free head used in `FSAF <https://arxiv.org/abs/1903.00621>`_.
The head contains two subnetworks. The first classifies anchor boxes and
the second regresses deltas for the anchors (num_anchors is 1 for anchor-
free methods)
Args:
*args: Same as its base class in :class:`RetinaHead`
score_threshold (float, optional): The score_threshold to calculate
positive recall. If given, prediction scores lower than this value
is counted as incorrect prediction. Defaults to None.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict]): Initialization config dict.
**kwargs: Same as its base class in :class:`RetinaHead`
Example:
>>> import torch
>>> self = FSAFHead(11, 7)
>>> x = torch.rand(1, 7, 32, 32)
>>> cls_score, bbox_pred = self.forward_single(x)
>>> # Each anchor predicts a score for each class except background
>>> cls_per_anchor = cls_score.shape[1] / self.num_anchors
>>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors
>>> assert cls_per_anchor == self.num_classes
>>> assert box_per_anchor == 4
"""
def __init__(self,
*args,
score_threshold: Optional[float] = None,
init_cfg: OptMultiConfig = None,
**kwargs) -> None:
# The positive bias in self.retina_reg conv is to prevent predicted \
# bbox with 0 area
if init_cfg is None:
init_cfg = dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=[
dict(
type='Normal',
name='retina_cls',
std=0.01,
bias_prob=0.01),
dict(
type='Normal', name='retina_reg', std=0.01, bias=0.25)
])
super().__init__(*args, init_cfg=init_cfg, **kwargs)
self.score_threshold = score_threshold
def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]:
"""Forward feature map of a single scale level.
Args:
x (Tensor): Feature map of a single scale level.
Returns:
tuple[Tensor, Tensor]:
- cls_score (Tensor): Box scores for each scale level Has \
shape (N, num_points * num_classes, H, W).
- bbox_pred (Tensor): Box energies / deltas for each scale \
level with shape (N, num_points * 4, H, W).
"""
cls_score, bbox_pred = super().forward_single(x)
# relu: TBLR encoder only accepts positive bbox_pred
return cls_score, self.relu(bbox_pred)
def _get_targets_single(self,
flat_anchors: Tensor,
valid_flags: Tensor,
gt_instances: InstanceData,
img_meta: dict,
gt_instances_ignore: Optional[InstanceData] = None,
unmap_outputs: bool = True) -> tuple:
"""Compute regression and classification targets for anchors in a
single image.
Most of the codes are the same with the base class :obj: `AnchorHead`,
except that it also collects and returns the matched gt index in the
image (from 0 to num_gt-1). If the anchor bbox is not matched to any
gt, the corresponding value in pos_gt_inds is -1.
Args:
flat_anchors (Tensor): Multi-level anchors of the image, which are
concatenated into a single tensor of shape (num_anchors, 4)
valid_flags (Tensor): Multi level valid flags of the image,
which are concatenated into a single tensor of
shape (num_anchors, ).
gt_instances (:obj:`InstanceData`): Ground truth of instance
annotations. It should includes ``bboxes`` and ``labels``
attributes.
img_meta (dict): Meta information for current image.
gt_instances_ignore (:obj:`InstanceData`, optional): Instances
to be ignored during training. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors. Defaults to True.
"""
inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
img_meta['img_shape'][:2],
self.train_cfg['allowed_border'])
if not inside_flags.any():
raise ValueError(
'There is no valid anchor inside the image boundary. Please '
'check the image size and anchor sizes, or set '
'``allowed_border`` to -1 to skip the condition.')
# Assign gt and sample anchors
anchors = flat_anchors[inside_flags.type(torch.bool), :]
pred_instances = InstanceData(priors=anchors)
assign_result = self.assigner.assign(pred_instances, gt_instances,
gt_instances_ignore)
sampling_result = self.sampler.sample(assign_result, pred_instances,
gt_instances)
num_valid_anchors = anchors.shape[0]
bbox_targets = torch.zeros_like(anchors)
bbox_weights = torch.zeros_like(anchors)
labels = anchors.new_full((num_valid_anchors, ),
self.num_classes,
dtype=torch.long)
label_weights = anchors.new_zeros(
(num_valid_anchors, self.cls_out_channels), dtype=torch.float)
pos_gt_inds = anchors.new_full((num_valid_anchors, ),
-1,
dtype=torch.long)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
if not self.reg_decoded_bbox:
pos_bbox_targets = self.bbox_coder.encode(
sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
else:
# When the regression loss (e.g. `IouLoss`, `GIouLoss`)
# is applied directly on the decoded bounding boxes, both
# the predicted boxes and regression targets should be with
# absolute coordinate format.
pos_bbox_targets = sampling_result.pos_gt_bboxes
bbox_targets[pos_inds, :] = pos_bbox_targets
bbox_weights[pos_inds, :] = 1.0
# The assigned gt_index for each anchor. (0-based)
pos_gt_inds[pos_inds] = sampling_result.pos_assigned_gt_inds
labels[pos_inds] = sampling_result.pos_gt_labels
if self.train_cfg['pos_weight'] <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = self.train_cfg['pos_weight']
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
# shadowed_labels is a tensor composed of tuples
# (anchor_inds, class_label) that indicate those anchors lying in the
# outer region of a gt or overlapped by another gt with a smaller
# area.
#
# Therefore, only the shadowed labels are ignored for loss calculation.
# the key `shadowed_labels` is defined in :obj:`CenterRegionAssigner`
shadowed_labels = assign_result.get_extra_property('shadowed_labels')
if shadowed_labels is not None and shadowed_labels.numel():
if len(shadowed_labels.shape) == 2:
idx_, label_ = shadowed_labels[:, 0], shadowed_labels[:, 1]
assert (labels[idx_] != label_).all(), \
'One label cannot be both positive and ignored'
label_weights[idx_, label_] = 0
else:
label_weights[shadowed_labels] = 0
# map up to original set of anchors
if unmap_outputs:
num_total_anchors = flat_anchors.size(0)
labels = unmap(
labels, num_total_anchors, inside_flags,
fill=self.num_classes) # fill bg label
label_weights = unmap(label_weights, num_total_anchors,
inside_flags)
bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
pos_gt_inds = unmap(
pos_gt_inds, num_total_anchors, inside_flags, fill=-1)
return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
neg_inds, sampling_result, pos_gt_inds)
def loss_by_feat(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None
) -> Dict[str, Tensor]:
"""Compute loss of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_points * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_points * 4, H, W).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
for i in range(len(bbox_preds)): # loop over fpn level
# avoid 0 area of the predicted bbox
bbox_preds[i] = bbox_preds[i].clamp(min=1e-4)
# TODO: It may directly use the base-class loss function.
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.prior_generator.num_levels
batch_size = len(batch_img_metas)
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, batch_img_metas, device=device)
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore=batch_gt_instances_ignore,
return_sampling_results=True)
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
avg_factor, sampling_results_list,
pos_assigned_gt_inds_list) = cls_reg_targets
num_gts = np.array(list(map(len, batch_gt_instances)))
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
# concat all level anchors and flags to a single tensor
concat_anchor_list = []
for i in range(len(anchor_list)):
concat_anchor_list.append(torch.cat(anchor_list[i]))
all_anchor_list = images_to_levels(concat_anchor_list,
num_level_anchors)
losses_cls, losses_bbox = multi_apply(
self.loss_by_feat_single,
cls_scores,
bbox_preds,
all_anchor_list,
labels_list,
label_weights_list,
bbox_targets_list,
bbox_weights_list,
avg_factor=avg_factor)
# `pos_assigned_gt_inds_list` (length: fpn_levels) stores the assigned
# gt index of each anchor bbox in each fpn level.
cum_num_gts = list(np.cumsum(num_gts)) # length of batch_size
for i, assign in enumerate(pos_assigned_gt_inds_list):
# loop over fpn levels
for j in range(1, batch_size):
# loop over batch size
# Convert gt indices in each img to those in the batch
assign[j][assign[j] >= 0] += int(cum_num_gts[j - 1])
pos_assigned_gt_inds_list[i] = assign.flatten()
labels_list[i] = labels_list[i].flatten()
num_gts = num_gts.sum() # total number of gt in the batch
# The unique label index of each gt in the batch
label_sequence = torch.arange(num_gts, device=device)
# Collect the average loss of each gt in each level
with torch.no_grad():
loss_levels, = multi_apply(
self.collect_loss_level_single,
losses_cls,
losses_bbox,
pos_assigned_gt_inds_list,
labels_seq=label_sequence)
# Shape: (fpn_levels, num_gts). Loss of each gt at each fpn level
loss_levels = torch.stack(loss_levels, dim=0)
# Locate the best fpn level for loss back-propagation
if loss_levels.numel() == 0: # zero gt
argmin = loss_levels.new_empty((num_gts, ), dtype=torch.long)
else:
_, argmin = loss_levels.min(dim=0)
# Reweight the loss of each (anchor, label) pair, so that only those
# at the best gt level are back-propagated.
losses_cls, losses_bbox, pos_inds = multi_apply(
self.reweight_loss_single,
losses_cls,
losses_bbox,
pos_assigned_gt_inds_list,
labels_list,
list(range(len(losses_cls))),
min_levels=argmin)
num_pos = torch.cat(pos_inds, 0).sum().float()
pos_recall = self.calculate_pos_recall(cls_scores, labels_list,
pos_inds)
if num_pos == 0: # No gt
num_total_neg = sum(
[results.num_neg for results in sampling_results_list])
avg_factor = num_pos + num_total_neg
else:
avg_factor = num_pos
for i in range(len(losses_cls)):
losses_cls[i] /= avg_factor
losses_bbox[i] /= avg_factor
return dict(
loss_cls=losses_cls,
loss_bbox=losses_bbox,
num_pos=num_pos / batch_size,
pos_recall=pos_recall)
def calculate_pos_recall(self, cls_scores: List[Tensor],
labels_list: List[Tensor],
pos_inds: List[Tensor]) -> Tensor:
"""Calculate positive recall with score threshold.
Args:
cls_scores (list[Tensor]): Classification scores at all fpn levels.
Each tensor is in shape (N, num_classes * num_anchors, H, W)
labels_list (list[Tensor]): The label that each anchor is assigned
to. Shape (N * H * W * num_anchors, )
pos_inds (list[Tensor]): List of bool tensors indicating whether
the anchor is assigned to a positive label.
Shape (N * H * W * num_anchors, )
Returns:
Tensor: A single float number indicating the positive recall.
"""
with torch.no_grad():
num_class = self.num_classes
scores = [
cls.permute(0, 2, 3, 1).reshape(-1, num_class)[pos]
for cls, pos in zip(cls_scores, pos_inds)
]
labels = [
label.reshape(-1)[pos]
for label, pos in zip(labels_list, pos_inds)
]
scores = torch.cat(scores, dim=0)
labels = torch.cat(labels, dim=0)
if self.use_sigmoid_cls:
scores = scores.sigmoid()
else:
scores = scores.softmax(dim=1)
return accuracy(scores, labels, thresh=self.score_threshold)
def collect_loss_level_single(self, cls_loss: Tensor, reg_loss: Tensor,
assigned_gt_inds: Tensor,
labels_seq: Tensor) -> Tensor:
"""Get the average loss in each FPN level w.r.t. each gt label.
Args:
cls_loss (Tensor): Classification loss of each feature map pixel,
shape (num_anchor, num_class)
reg_loss (Tensor): Regression loss of each feature map pixel,
shape (num_anchor, 4)
assigned_gt_inds (Tensor): It indicates which gt the prior is
assigned to (0-based, -1: no assignment). shape (num_anchor),
labels_seq: The rank of labels. shape (num_gt)
Returns:
Tensor: shape (num_gt), average loss of each gt in this level
"""
if len(reg_loss.shape) == 2: # iou loss has shape (num_prior, 4)
reg_loss = reg_loss.sum(dim=-1) # sum loss in tblr dims
if len(cls_loss.shape) == 2:
cls_loss = cls_loss.sum(dim=-1) # sum loss in class dims
loss = cls_loss + reg_loss
assert loss.size(0) == assigned_gt_inds.size(0)
# Default loss value is 1e6 for a layer where no anchor is positive
# to ensure it will not be chosen to back-propagate gradient
losses_ = loss.new_full(labels_seq.shape, 1e6)
for i, l in enumerate(labels_seq):
match = assigned_gt_inds == l
if match.any():
losses_[i] = loss[match].mean()
return losses_,
def reweight_loss_single(self, cls_loss: Tensor, reg_loss: Tensor,
assigned_gt_inds: Tensor, labels: Tensor,
level: int, min_levels: Tensor) -> tuple:
"""Reweight loss values at each level.
Reassign loss values at each level by masking those where the
pre-calculated loss is too large. Then return the reduced losses.
Args:
cls_loss (Tensor): Element-wise classification loss.
Shape: (num_anchors, num_classes)
reg_loss (Tensor): Element-wise regression loss.
Shape: (num_anchors, 4)
assigned_gt_inds (Tensor): The gt indices that each anchor bbox
is assigned to. -1 denotes a negative anchor, otherwise it is the
gt index (0-based). Shape: (num_anchors, ),
labels (Tensor): Label assigned to anchors. Shape: (num_anchors, ).
level (int): The current level index in the pyramid
(0-4 for RetinaNet)
min_levels (Tensor): The best-matching level for each gt.
Shape: (num_gts, ),
Returns:
tuple:
- cls_loss: Reduced corrected classification loss. Scalar.
- reg_loss: Reduced corrected regression loss. Scalar.
- pos_flags (Tensor): Corrected bool tensor indicating the \
final positive anchors. Shape: (num_anchors, ).
"""
loc_weight = torch.ones_like(reg_loss)
cls_weight = torch.ones_like(cls_loss)
pos_flags = assigned_gt_inds >= 0 # positive pixel flag
pos_indices = torch.nonzero(pos_flags, as_tuple=False).flatten()
if pos_flags.any(): # pos pixels exist
pos_assigned_gt_inds = assigned_gt_inds[pos_flags]
zeroing_indices = (min_levels[pos_assigned_gt_inds] != level)
neg_indices = pos_indices[zeroing_indices]
if neg_indices.numel():
pos_flags[neg_indices] = 0
loc_weight[neg_indices] = 0
# Only the weight corresponding to the label is
# zeroed out if not selected
zeroing_labels = labels[neg_indices]
assert (zeroing_labels >= 0).all()
cls_weight[neg_indices, zeroing_labels] = 0
# Weighted loss for both cls and reg loss
cls_loss = weight_reduce_loss(cls_loss, cls_weight, reduction='sum')
reg_loss = weight_reduce_loss(reg_loss, loc_weight, reduction='sum')
return cls_loss, reg_loss, pos_flags
| 20,957 | 44.660131 | 79 | py |
ERD | ERD-main/mmdet/models/dense_heads/atss_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Sequence, Tuple
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, Scale
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType,
OptInstanceList, reduce_mean)
from ..task_modules.prior_generators import anchor_inside_flags
from ..utils import images_to_levels, multi_apply, unmap
from .anchor_head import AnchorHead
@MODELS.register_module()
class ATSSHead(AnchorHead):
"""Detection Head of `ATSS <https://arxiv.org/abs/1912.02424>`_.
ATSS head structure is similar with FCOS, however ATSS use anchor boxes
and assign label by Adaptive Training Sample Selection instead max-iou.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
pred_kernel_size (int): Kernel size of ``nn.Conv2d``
stacked_convs (int): Number of stacking convs of the head.
conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
convolution layer. Defaults to None.
norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization
layer. Defaults to ``dict(type='GN', num_groups=32,
requires_grad=True)``.
reg_decoded_bbox (bool): If true, the regression loss would be
applied directly on decoded bounding boxes, converting both
the predicted boxes and regression targets to absolute
coordinates format. Defaults to False. It should be `True` when
using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.
loss_centerness (:obj:`ConfigDict` or dict): Config of centerness loss.
Defaults to ``dict(type='CrossEntropyLoss', use_sigmoid=True,
loss_weight=1.0)``.
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`]): Initialization config dict.
"""
def __init__(self,
num_classes: int,
in_channels: int,
pred_kernel_size: int = 3,
stacked_convs: int = 4,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(
type='GN', num_groups=32, requires_grad=True),
reg_decoded_bbox: bool = True,
loss_centerness: ConfigType = dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
init_cfg: MultiConfig = dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='atss_cls',
std=0.01,
bias_prob=0.01)),
**kwargs) -> None:
self.pred_kernel_size = pred_kernel_size
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
super().__init__(
num_classes=num_classes,
in_channels=in_channels,
reg_decoded_bbox=reg_decoded_bbox,
init_cfg=init_cfg,
**kwargs)
self.sampling = False
self.loss_centerness = MODELS.build(loss_centerness)
def _init_layers(self) -> None:
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
pred_pad_size = self.pred_kernel_size // 2
self.atss_cls = nn.Conv2d(
self.feat_channels,
self.num_anchors * self.cls_out_channels,
self.pred_kernel_size,
padding=pred_pad_size)
self.atss_reg = nn.Conv2d(
self.feat_channels,
self.num_base_priors * 4,
self.pred_kernel_size,
padding=pred_pad_size)
self.atss_centerness = nn.Conv2d(
self.feat_channels,
self.num_base_priors * 1,
self.pred_kernel_size,
padding=pred_pad_size)
self.scales = nn.ModuleList(
[Scale(1.0) for _ in self.prior_generator.strides])
def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]:
"""Forward features from the upstream network.
Args:
x (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
cls_scores (list[Tensor]): Classification scores for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * 4.
"""
return multi_apply(self.forward_single, x, self.scales)
def forward_single(self, x: Tensor, scale: Scale) -> Sequence[Tensor]:
"""Forward feature of a single scale level.
Args:
x (Tensor): Features of a single scale level.
scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
the bbox prediction.
Returns:
tuple:
cls_score (Tensor): Cls scores for a single scale level
the channels number is num_anchors * num_classes.
bbox_pred (Tensor): Box energies / deltas for a single scale
level, the channels number is num_anchors * 4.
centerness (Tensor): Centerness for a single scale level, the
channel number is (N, num_anchors * 1, H, W).
"""
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.atss_cls(cls_feat)
# we just follow atss, not apply exp in bbox_pred
bbox_pred = scale(self.atss_reg(reg_feat)).float()
centerness = self.atss_centerness(reg_feat)
return cls_score, bbox_pred, centerness
def loss_by_feat_single(self, anchors: Tensor, cls_score: Tensor,
bbox_pred: Tensor, centerness: Tensor,
labels: Tensor, label_weights: Tensor,
bbox_targets: Tensor, avg_factor: float) -> dict:
"""Calculate the loss of a single scale level based on the features
extracted by the detection head.
Args:
cls_score (Tensor): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W).
bbox_pred (Tensor): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W).
anchors (Tensor): Box reference for each scale level with shape
(N, num_total_anchors, 4).
labels (Tensor): Labels of each anchors with shape
(N, num_total_anchors).
label_weights (Tensor): Label weights of each anchor with shape
(N, num_total_anchors)
bbox_targets (Tensor): BBox regression targets of each anchor
weight shape (N, num_total_anchors, 4).
avg_factor (float): Average factor that is used to average
the loss. When using sampling method, avg_factor is usually
the sum of positive and negative priors. When using
`PseudoSampler`, `avg_factor` is usually equal to the number
of positive priors.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
anchors = anchors.reshape(-1, 4)
cls_score = cls_score.permute(0, 2, 3, 1).reshape(
-1, self.cls_out_channels).contiguous()
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
centerness = centerness.permute(0, 2, 3, 1).reshape(-1)
bbox_targets = bbox_targets.reshape(-1, 4)
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
# classification loss
loss_cls = self.loss_cls(
cls_score, labels, label_weights, avg_factor=avg_factor)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = self.num_classes
pos_inds = ((labels >= 0)
& (labels < bg_class_ind)).nonzero().squeeze(1)
if len(pos_inds) > 0:
pos_bbox_targets = bbox_targets[pos_inds]
pos_bbox_pred = bbox_pred[pos_inds]
pos_anchors = anchors[pos_inds]
pos_centerness = centerness[pos_inds]
centerness_targets = self.centerness_target(
pos_anchors, pos_bbox_targets)
pos_decode_bbox_pred = self.bbox_coder.decode(
pos_anchors, pos_bbox_pred)
# regression loss
loss_bbox = self.loss_bbox(
pos_decode_bbox_pred,
pos_bbox_targets,
weight=centerness_targets,
avg_factor=1.0)
# centerness loss
loss_centerness = self.loss_centerness(
pos_centerness, centerness_targets, avg_factor=avg_factor)
else:
loss_bbox = bbox_pred.sum() * 0
loss_centerness = centerness.sum() * 0
centerness_targets = bbox_targets.new_tensor(0.)
return loss_cls, loss_bbox, loss_centerness, centerness_targets.sum()
def loss_by_feat(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
centernesses: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None) -> dict:
"""Calculate the loss based on the features extracted by the detection
head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
centernesses (list[Tensor]): Centerness for each scale
level with shape (N, num_anchors * 1, H, W)
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.prior_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, batch_img_metas, device=device)
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore=batch_gt_instances_ignore)
(anchor_list, labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, avg_factor) = cls_reg_targets
avg_factor = reduce_mean(
torch.tensor(avg_factor, dtype=torch.float, device=device)).item()
losses_cls, losses_bbox, loss_centerness, \
bbox_avg_factor = multi_apply(
self.loss_by_feat_single,
anchor_list,
cls_scores,
bbox_preds,
centernesses,
labels_list,
label_weights_list,
bbox_targets_list,
avg_factor=avg_factor)
bbox_avg_factor = sum(bbox_avg_factor)
bbox_avg_factor = reduce_mean(bbox_avg_factor).clamp_(min=1).item()
losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox))
return dict(
loss_cls=losses_cls,
loss_bbox=losses_bbox,
loss_centerness=loss_centerness)
def centerness_target(self, anchors: Tensor, gts: Tensor) -> Tensor:
"""Calculate the centerness between anchors and gts.
Only calculate pos centerness targets, otherwise there may be nan.
Args:
anchors (Tensor): Anchors with shape (N, 4), "xyxy" format.
gts (Tensor): Ground truth bboxes with shape (N, 4), "xyxy" format.
Returns:
Tensor: Centerness between anchors and gts.
"""
anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2
anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2
l_ = anchors_cx - gts[:, 0]
t_ = anchors_cy - gts[:, 1]
r_ = gts[:, 2] - anchors_cx
b_ = gts[:, 3] - anchors_cy
left_right = torch.stack([l_, r_], dim=1)
top_bottom = torch.stack([t_, b_], dim=1)
centerness = torch.sqrt(
(left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) *
(top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]))
assert not torch.isnan(centerness).any()
return centerness
def get_targets(self,
anchor_list: List[List[Tensor]],
valid_flag_list: List[List[Tensor]],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None,
unmap_outputs: bool = True) -> tuple:
"""Get targets for ATSS head.
This method is almost the same as `AnchorHead.get_targets()`. Besides
returning the targets as the parent method does, it also returns the
anchors as the first element of the returned tuple.
"""
num_imgs = len(batch_img_metas)
assert len(anchor_list) == len(valid_flag_list) == num_imgs
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
num_level_anchors_list = [num_level_anchors] * num_imgs
# concat all level anchors and flags to a single tensor
for i in range(num_imgs):
assert len(anchor_list[i]) == len(valid_flag_list[i])
anchor_list[i] = torch.cat(anchor_list[i])
valid_flag_list[i] = torch.cat(valid_flag_list[i])
# compute targets for each image
if batch_gt_instances_ignore is None:
batch_gt_instances_ignore = [None] * num_imgs
(all_anchors, all_labels, all_label_weights, all_bbox_targets,
all_bbox_weights, pos_inds_list, neg_inds_list,
sampling_results_list) = multi_apply(
self._get_targets_single,
anchor_list,
valid_flag_list,
num_level_anchors_list,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore,
unmap_outputs=unmap_outputs)
# Get `avg_factor` of all images, which calculate in `SamplingResult`.
# When using sampling method, avg_factor is usually the sum of
# positive and negative priors. When using `PseudoSampler`,
# `avg_factor` is usually equal to the number of positive priors.
avg_factor = sum(
[results.avg_factor for results in sampling_results_list])
# split targets to a list w.r.t. multiple levels
anchors_list = images_to_levels(all_anchors, num_level_anchors)
labels_list = images_to_levels(all_labels, num_level_anchors)
label_weights_list = images_to_levels(all_label_weights,
num_level_anchors)
bbox_targets_list = images_to_levels(all_bbox_targets,
num_level_anchors)
bbox_weights_list = images_to_levels(all_bbox_weights,
num_level_anchors)
return (anchors_list, labels_list, label_weights_list,
bbox_targets_list, bbox_weights_list, avg_factor)
def _get_targets_single(self,
flat_anchors: Tensor,
valid_flags: Tensor,
num_level_anchors: List[int],
gt_instances: InstanceData,
img_meta: dict,
gt_instances_ignore: Optional[InstanceData] = None,
unmap_outputs: bool = True) -> tuple:
"""Compute regression, classification targets for anchors in a single
image.
Args:
flat_anchors (Tensor): Multi-level anchors of the image, which are
concatenated into a single tensor of shape (num_anchors ,4)
valid_flags (Tensor): Multi level valid flags of the image,
which are concatenated into a single tensor of
shape (num_anchors,).
num_level_anchors (List[int]): Number of anchors of each scale
level.
gt_instances (:obj:`InstanceData`): Ground truth of instance
annotations. It usually includes ``bboxes`` and ``labels``
attributes.
img_meta (dict): Meta information for current image.
gt_instances_ignore (:obj:`InstanceData`, optional): Instances
to be ignored during training. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors.
Returns:
tuple: N is the number of total anchors in the image.
labels (Tensor): Labels of all anchors in the image with shape
(N,).
label_weights (Tensor): Label weights of all anchor in the
image with shape (N,).
bbox_targets (Tensor): BBox targets of all anchors in the
image with shape (N, 4).
bbox_weights (Tensor): BBox weights of all anchors in the
image with shape (N, 4)
pos_inds (Tensor): Indices of positive anchor with shape
(num_pos,).
neg_inds (Tensor): Indices of negative anchor with shape
(num_neg,).
sampling_result (:obj:`SamplingResult`): Sampling results.
"""
inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
img_meta['img_shape'][:2],
self.train_cfg['allowed_border'])
if not inside_flags.any():
raise ValueError(
'There is no valid anchor inside the image boundary. Please '
'check the image size and anchor sizes, or set '
'``allowed_border`` to -1 to skip the condition.')
# assign gt and sample anchors
anchors = flat_anchors[inside_flags, :]
num_level_anchors_inside = self.get_num_level_anchors_inside(
num_level_anchors, inside_flags)
pred_instances = InstanceData(priors=anchors)
assign_result = self.assigner.assign(pred_instances,
num_level_anchors_inside,
gt_instances, gt_instances_ignore)
sampling_result = self.sampler.sample(assign_result, pred_instances,
gt_instances)
num_valid_anchors = anchors.shape[0]
bbox_targets = torch.zeros_like(anchors)
bbox_weights = torch.zeros_like(anchors)
labels = anchors.new_full((num_valid_anchors, ),
self.num_classes,
dtype=torch.long)
label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
if self.reg_decoded_bbox:
pos_bbox_targets = sampling_result.pos_gt_bboxes
else:
pos_bbox_targets = self.bbox_coder.encode(
sampling_result.pos_priors, sampling_result.pos_gt_bboxes)
bbox_targets[pos_inds, :] = pos_bbox_targets
bbox_weights[pos_inds, :] = 1.0
labels[pos_inds] = sampling_result.pos_gt_labels
if self.train_cfg['pos_weight'] <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = self.train_cfg['pos_weight']
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
# map up to original set of anchors
if unmap_outputs:
num_total_anchors = flat_anchors.size(0)
anchors = unmap(anchors, num_total_anchors, inside_flags)
labels = unmap(
labels, num_total_anchors, inside_flags, fill=self.num_classes)
label_weights = unmap(label_weights, num_total_anchors,
inside_flags)
bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
return (anchors, labels, label_weights, bbox_targets, bbox_weights,
pos_inds, neg_inds, sampling_result)
def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):
"""Get the number of valid anchors in every level."""
split_inside_flags = torch.split(inside_flags, num_level_anchors)
num_level_anchors_inside = [
int(flags.sum()) for flags in split_inside_flags
]
return num_level_anchors_inside
| 23,314 | 43.409524 | 79 | py |
ERD | ERD-main/mmdet/models/dense_heads/detr_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import Linear
from mmcv.cnn.bricks.transformer import FFN
from mmengine.model import BaseModule
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.registry import MODELS, TASK_UTILS
from mmdet.structures import SampleList
from mmdet.structures.bbox import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh
from mmdet.utils import (ConfigType, InstanceList, OptInstanceList,
OptMultiConfig, reduce_mean)
from ..utils import multi_apply
@MODELS.register_module()
class DETRHead(BaseModule):
r"""Head of DETR. DETR:End-to-End Object Detection with Transformers.
More details can be found in the `paper
<https://arxiv.org/pdf/2005.12872>`_ .
Args:
num_classes (int): Number of categories excluding the background.
embed_dims (int): The dims of Transformer embedding.
num_reg_fcs (int): Number of fully-connected layers used in `FFN`,
which is then used for the regression head. Defaults to 2.
sync_cls_avg_factor (bool): Whether to sync the `avg_factor` of
all ranks. Default to `False`.
loss_cls (:obj:`ConfigDict` or dict): Config of the classification
loss. Defaults to `CrossEntropyLoss`.
loss_bbox (:obj:`ConfigDict` or dict): Config of the regression bbox
loss. Defaults to `L1Loss`.
loss_iou (:obj:`ConfigDict` or dict): Config of the regression iou
loss. Defaults to `GIoULoss`.
train_cfg (:obj:`ConfigDict` or dict): Training config of transformer
head.
test_cfg (:obj:`ConfigDict` or dict): Testing config of transformer
head.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
_version = 2
def __init__(
self,
num_classes: int,
embed_dims: int = 256,
num_reg_fcs: int = 2,
sync_cls_avg_factor: bool = False,
loss_cls: ConfigType = dict(
type='CrossEntropyLoss',
bg_cls_weight=0.1,
use_sigmoid=False,
loss_weight=1.0,
class_weight=1.0),
loss_bbox: ConfigType = dict(type='L1Loss', loss_weight=5.0),
loss_iou: ConfigType = dict(type='GIoULoss', loss_weight=2.0),
train_cfg: ConfigType = dict(
assigner=dict(
type='HungarianAssigner',
match_costs=[
dict(type='ClassificationCost', weight=1.),
dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'),
dict(type='IoUCost', iou_mode='giou', weight=2.0)
])),
test_cfg: ConfigType = dict(max_per_img=100),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.bg_cls_weight = 0
self.sync_cls_avg_factor = sync_cls_avg_factor
class_weight = loss_cls.get('class_weight', None)
if class_weight is not None and (self.__class__ is DETRHead):
assert isinstance(class_weight, float), 'Expected ' \
'class_weight to have type float. Found ' \
f'{type(class_weight)}.'
# NOTE following the official DETR repo, bg_cls_weight means
# relative classification weight of the no-object class.
bg_cls_weight = loss_cls.get('bg_cls_weight', class_weight)
assert isinstance(bg_cls_weight, float), 'Expected ' \
'bg_cls_weight to have type float. Found ' \
f'{type(bg_cls_weight)}.'
class_weight = torch.ones(num_classes + 1) * class_weight
# set background class as the last indice
class_weight[num_classes] = bg_cls_weight
loss_cls.update({'class_weight': class_weight})
if 'bg_cls_weight' in loss_cls:
loss_cls.pop('bg_cls_weight')
self.bg_cls_weight = bg_cls_weight
if train_cfg:
assert 'assigner' in train_cfg, 'assigner should be provided ' \
'when train_cfg is set.'
assigner = train_cfg['assigner']
self.assigner = TASK_UTILS.build(assigner)
if train_cfg.get('sampler', None) is not None:
raise RuntimeError('DETR do not build sampler.')
self.num_classes = num_classes
self.embed_dims = embed_dims
self.num_reg_fcs = num_reg_fcs
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.loss_cls = MODELS.build(loss_cls)
self.loss_bbox = MODELS.build(loss_bbox)
self.loss_iou = MODELS.build(loss_iou)
if self.loss_cls.use_sigmoid:
self.cls_out_channels = num_classes
else:
self.cls_out_channels = num_classes + 1
self._init_layers()
def _init_layers(self) -> None:
"""Initialize layers of the transformer head."""
# cls branch
self.fc_cls = Linear(self.embed_dims, self.cls_out_channels)
# reg branch
self.activate = nn.ReLU()
self.reg_ffn = FFN(
self.embed_dims,
self.embed_dims,
self.num_reg_fcs,
dict(type='ReLU', inplace=True),
dropout=0.0,
add_residual=False)
# NOTE the activations of reg_branch here is the same as
# those in transformer, but they are actually different
# in DAB-DETR (prelu in transformer and relu in reg_branch)
self.fc_reg = Linear(self.embed_dims, 4)
def forward(self, hidden_states: Tensor) -> Tuple[Tensor]:
""""Forward function.
Args:
hidden_states (Tensor): Features from transformer decoder. If
`return_intermediate_dec` in detr.py is True output has shape
(num_decoder_layers, bs, num_queries, dim), else has shape
(1, bs, num_queries, dim) which only contains the last layer
outputs.
Returns:
tuple[Tensor]: results of head containing the following tensor.
- layers_cls_scores (Tensor): Outputs from the classification head,
shape (num_decoder_layers, bs, num_queries, cls_out_channels).
Note cls_out_channels should include background.
- layers_bbox_preds (Tensor): Sigmoid outputs from the regression
head with normalized coordinate format (cx, cy, w, h), has shape
(num_decoder_layers, bs, num_queries, 4).
"""
layers_cls_scores = self.fc_cls(hidden_states)
layers_bbox_preds = self.fc_reg(
self.activate(self.reg_ffn(hidden_states))).sigmoid()
return layers_cls_scores, layers_bbox_preds
def loss(self, hidden_states: Tensor,
batch_data_samples: SampleList) -> dict:
"""Perform forward propagation and loss calculation of the detection
head on the features of the upstream network.
Args:
hidden_states (Tensor): Feature from the transformer decoder, has
shape (num_decoder_layers, bs, num_queries, cls_out_channels)
or (num_decoder_layers, num_queries, bs, cls_out_channels).
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
batch_gt_instances = []
batch_img_metas = []
for data_sample in batch_data_samples:
batch_img_metas.append(data_sample.metainfo)
batch_gt_instances.append(data_sample.gt_instances)
outs = self(hidden_states)
loss_inputs = outs + (batch_gt_instances, batch_img_metas)
losses = self.loss_by_feat(*loss_inputs)
return losses
def loss_by_feat(
self,
all_layers_cls_scores: Tensor,
all_layers_bbox_preds: Tensor,
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None
) -> Dict[str, Tensor]:
""""Loss function.
Only outputs from the last feature level are used for computing
losses by default.
Args:
all_layers_cls_scores (Tensor): Classification outputs
of each decoder layers. Each is a 4D-tensor, has shape
(num_decoder_layers, bs, num_queries, cls_out_channels).
all_layers_bbox_preds (Tensor): Sigmoid regression
outputs of each decoder layers. Each is a 4D-tensor with
normalized coordinate format (cx, cy, w, h) and shape
(num_decoder_layers, bs, num_queries, 4).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert batch_gt_instances_ignore is None, \
f'{self.__class__.__name__} only supports ' \
'for batch_gt_instances_ignore setting to None.'
losses_cls, losses_bbox, losses_iou = multi_apply(
self.loss_by_feat_single,
all_layers_cls_scores,
all_layers_bbox_preds,
batch_gt_instances=batch_gt_instances,
batch_img_metas=batch_img_metas)
loss_dict = dict()
# loss from the last decoder layer
loss_dict['loss_cls'] = losses_cls[-1]
loss_dict['loss_bbox'] = losses_bbox[-1]
loss_dict['loss_iou'] = losses_iou[-1]
# loss from other decoder layers
num_dec_layer = 0
for loss_cls_i, loss_bbox_i, loss_iou_i in \
zip(losses_cls[:-1], losses_bbox[:-1], losses_iou[:-1]):
loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i
loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i
loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i
num_dec_layer += 1
return loss_dict
def loss_by_feat_single(self, cls_scores: Tensor, bbox_preds: Tensor,
batch_gt_instances: InstanceList,
batch_img_metas: List[dict]) -> Tuple[Tensor]:
"""Loss function for outputs from a single decoder layer of a single
feature level.
Args:
cls_scores (Tensor): Box score logits from a single decoder layer
for all images, has shape (bs, num_queries, cls_out_channels).
bbox_preds (Tensor): Sigmoid outputs from a single decoder layer
for all images, with normalized coordinate (cx, cy, w, h) and
shape (bs, num_queries, 4).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
Returns:
Tuple[Tensor]: A tuple including `loss_cls`, `loss_box` and
`loss_iou`.
"""
num_imgs = cls_scores.size(0)
cls_scores_list = [cls_scores[i] for i in range(num_imgs)]
bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)]
cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list,
batch_gt_instances, batch_img_metas)
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg) = cls_reg_targets
labels = torch.cat(labels_list, 0)
label_weights = torch.cat(label_weights_list, 0)
bbox_targets = torch.cat(bbox_targets_list, 0)
bbox_weights = torch.cat(bbox_weights_list, 0)
# classification loss
cls_scores = cls_scores.reshape(-1, self.cls_out_channels)
# construct weighted avg_factor to match with the official DETR repo
cls_avg_factor = num_total_pos * 1.0 + \
num_total_neg * self.bg_cls_weight
if self.sync_cls_avg_factor:
cls_avg_factor = reduce_mean(
cls_scores.new_tensor([cls_avg_factor]))
cls_avg_factor = max(cls_avg_factor, 1)
loss_cls = self.loss_cls(
cls_scores, labels, label_weights, avg_factor=cls_avg_factor)
# Compute the average number of gt boxes across all gpus, for
# normalization purposes
num_total_pos = loss_cls.new_tensor([num_total_pos])
num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item()
# construct factors used for rescale bboxes
factors = []
for img_meta, bbox_pred in zip(batch_img_metas, bbox_preds):
img_h, img_w, = img_meta['img_shape']
factor = bbox_pred.new_tensor([img_w, img_h, img_w,
img_h]).unsqueeze(0).repeat(
bbox_pred.size(0), 1)
factors.append(factor)
factors = torch.cat(factors, 0)
# DETR regress the relative position of boxes (cxcywh) in the image,
# thus the learning target is normalized by the image size. So here
# we need to re-scale them for calculating IoU loss
bbox_preds = bbox_preds.reshape(-1, 4)
bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors
bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors
# regression IoU loss, defaultly GIoU loss
loss_iou = self.loss_iou(
bboxes, bboxes_gt, bbox_weights, avg_factor=num_total_pos)
# regression L1 loss
loss_bbox = self.loss_bbox(
bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos)
return loss_cls, loss_bbox, loss_iou
def get_targets(self, cls_scores_list: List[Tensor],
bbox_preds_list: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict]) -> tuple:
"""Compute regression and classification targets for a batch image.
Outputs from a single decoder layer of a single feature level are used.
Args:
cls_scores_list (list[Tensor]): Box score logits from a single
decoder layer for each image, has shape [num_queries,
cls_out_channels].
bbox_preds_list (list[Tensor]): Sigmoid outputs from a single
decoder layer for each image, with normalized coordinate
(cx, cy, w, h) and shape [num_queries, 4].
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
Returns:
tuple: a tuple containing the following targets.
- labels_list (list[Tensor]): Labels for all images.
- label_weights_list (list[Tensor]): Label weights for all images.
- bbox_targets_list (list[Tensor]): BBox targets for all images.
- bbox_weights_list (list[Tensor]): BBox weights for all images.
- num_total_pos (int): Number of positive samples in all images.
- num_total_neg (int): Number of negative samples in all images.
"""
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
pos_inds_list,
neg_inds_list) = multi_apply(self._get_targets_single,
cls_scores_list, bbox_preds_list,
batch_gt_instances, batch_img_metas)
num_total_pos = sum((inds.numel() for inds in pos_inds_list))
num_total_neg = sum((inds.numel() for inds in neg_inds_list))
return (labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, num_total_pos, num_total_neg)
def _get_targets_single(self, cls_score: Tensor, bbox_pred: Tensor,
gt_instances: InstanceData,
img_meta: dict) -> tuple:
"""Compute regression and classification targets for one image.
Outputs from a single decoder layer of a single feature level are used.
Args:
cls_score (Tensor): Box score logits from a single decoder layer
for one image. Shape [num_queries, cls_out_channels].
bbox_pred (Tensor): Sigmoid outputs from a single decoder layer
for one image, with normalized coordinate (cx, cy, w, h) and
shape [num_queries, 4].
gt_instances (:obj:`InstanceData`): Ground truth of instance
annotations. It should includes ``bboxes`` and ``labels``
attributes.
img_meta (dict): Meta information for one image.
Returns:
tuple[Tensor]: a tuple containing the following for one image.
- labels (Tensor): Labels of each image.
- label_weights (Tensor]): Label weights of each image.
- bbox_targets (Tensor): BBox targets of each image.
- bbox_weights (Tensor): BBox weights of each image.
- pos_inds (Tensor): Sampled positive indices for each image.
- neg_inds (Tensor): Sampled negative indices for each image.
"""
img_h, img_w = img_meta['img_shape']
factor = bbox_pred.new_tensor([img_w, img_h, img_w,
img_h]).unsqueeze(0)
num_bboxes = bbox_pred.size(0)
# convert bbox_pred from xywh, normalized to xyxy, unnormalized
bbox_pred = bbox_cxcywh_to_xyxy(bbox_pred)
bbox_pred = bbox_pred * factor
pred_instances = InstanceData(scores=cls_score, bboxes=bbox_pred)
# assigner and sampler
assign_result = self.assigner.assign(
pred_instances=pred_instances,
gt_instances=gt_instances,
img_meta=img_meta)
gt_bboxes = gt_instances.bboxes
gt_labels = gt_instances.labels
pos_inds = torch.nonzero(
assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()
neg_inds = torch.nonzero(
assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()
pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1
pos_gt_bboxes = gt_bboxes[pos_assigned_gt_inds.long(), :]
# label targets
labels = gt_bboxes.new_full((num_bboxes, ),
self.num_classes,
dtype=torch.long)
labels[pos_inds] = gt_labels[pos_assigned_gt_inds]
label_weights = gt_bboxes.new_ones(num_bboxes)
# bbox targets
bbox_targets = torch.zeros_like(bbox_pred)
bbox_weights = torch.zeros_like(bbox_pred)
bbox_weights[pos_inds] = 1.0
# DETR regress the relative position of boxes (cxcywh) in the image.
# Thus the learning target should be normalized by the image size, also
# the box format should be converted from defaultly x1y1x2y2 to cxcywh.
pos_gt_bboxes_normalized = pos_gt_bboxes / factor
pos_gt_bboxes_targets = bbox_xyxy_to_cxcywh(pos_gt_bboxes_normalized)
bbox_targets[pos_inds] = pos_gt_bboxes_targets
return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
neg_inds)
def loss_and_predict(
self, hidden_states: Tuple[Tensor],
batch_data_samples: SampleList) -> Tuple[dict, InstanceList]:
"""Perform forward propagation of the head, then calculate loss and
predictions from the features and data samples. Over-write because
img_metas are needed as inputs for bbox_head.
Args:
hidden_states (tuple[Tensor]): Feature from the transformer
decoder, has shape (num_decoder_layers, bs, num_queries, dim).
batch_data_samples (list[:obj:`DetDataSample`]): Each item contains
the meta information of each image and corresponding
annotations.
Returns:
tuple: the return value is a tuple contains:
- losses: (dict[str, Tensor]): A dictionary of loss components.
- predictions (list[:obj:`InstanceData`]): Detection
results of each image after the post process.
"""
batch_gt_instances = []
batch_img_metas = []
for data_sample in batch_data_samples:
batch_img_metas.append(data_sample.metainfo)
batch_gt_instances.append(data_sample.gt_instances)
outs = self(hidden_states)
loss_inputs = outs + (batch_gt_instances, batch_img_metas)
losses = self.loss_by_feat(*loss_inputs)
predictions = self.predict_by_feat(
*outs, batch_img_metas=batch_img_metas)
return losses, predictions
def predict(self,
hidden_states: Tuple[Tensor],
batch_data_samples: SampleList,
rescale: bool = True) -> InstanceList:
"""Perform forward propagation of the detection head and predict
detection results on the features of the upstream network. Over-write
because img_metas are needed as inputs for bbox_head.
Args:
hidden_states (tuple[Tensor]): Multi-level features from the
upstream network, each is a 4D-tensor.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool, optional): Whether to rescale the results.
Defaults to True.
Returns:
list[obj:`InstanceData`]: Detection results of each image
after the post process.
"""
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
last_layer_hidden_state = hidden_states[-1].unsqueeze(0)
outs = self(last_layer_hidden_state)
predictions = self.predict_by_feat(
*outs, batch_img_metas=batch_img_metas, rescale=rescale)
return predictions
def predict_by_feat(self,
layer_cls_scores: Tensor,
layer_bbox_preds: Tensor,
batch_img_metas: List[dict],
rescale: bool = True) -> InstanceList:
"""Transform network outputs for a batch into bbox predictions.
Args:
layer_cls_scores (Tensor): Classification outputs of the last or
all decoder layer. Each is a 4D-tensor, has shape
(num_decoder_layers, bs, num_queries, cls_out_channels).
layer_bbox_preds (Tensor): Sigmoid regression outputs of the last
or all decoder layer. Each is a 4D-tensor with normalized
coordinate format (cx, cy, w, h) and shape
(num_decoder_layers, bs, num_queries, 4).
batch_img_metas (list[dict]): Meta information of each image.
rescale (bool, optional): If `True`, return boxes in original
image space. Defaults to `True`.
Returns:
list[:obj:`InstanceData`]: Object detection results of each image
after the post process. Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
# NOTE only using outputs from the last feature level,
# and only the outputs from the last decoder layer is used.
cls_scores = layer_cls_scores[-1]
bbox_preds = layer_bbox_preds[-1]
result_list = []
for img_id in range(len(batch_img_metas)):
cls_score = cls_scores[img_id]
bbox_pred = bbox_preds[img_id]
img_meta = batch_img_metas[img_id]
results = self._predict_by_feat_single(cls_score, bbox_pred,
img_meta, rescale)
result_list.append(results)
return result_list
def _predict_by_feat_single(self,
cls_score: Tensor,
bbox_pred: Tensor,
img_meta: dict,
rescale: bool = True) -> InstanceData:
"""Transform outputs from the last decoder layer into bbox predictions
for each image.
Args:
cls_score (Tensor): Box score logits from the last decoder layer
for each image. Shape [num_queries, cls_out_channels].
bbox_pred (Tensor): Sigmoid outputs from the last decoder layer
for each image, with coordinate format (cx, cy, w, h) and
shape [num_queries, 4].
img_meta (dict): Image meta info.
rescale (bool): If True, return boxes in original image
space. Default True.
Returns:
:obj:`InstanceData`: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
assert len(cls_score) == len(bbox_pred) # num_queries
max_per_img = self.test_cfg.get('max_per_img', len(cls_score))
img_shape = img_meta['img_shape']
# exclude background
if self.loss_cls.use_sigmoid:
cls_score = cls_score.sigmoid()
scores, indexes = cls_score.view(-1).topk(max_per_img)
det_labels = indexes % self.num_classes
bbox_index = indexes // self.num_classes
bbox_pred = bbox_pred[bbox_index]
else:
scores, det_labels = F.softmax(cls_score, dim=-1)[..., :-1].max(-1)
scores, bbox_index = scores.topk(max_per_img)
bbox_pred = bbox_pred[bbox_index]
det_labels = det_labels[bbox_index]
det_bboxes = bbox_cxcywh_to_xyxy(bbox_pred)
det_bboxes[:, 0::2] = det_bboxes[:, 0::2] * img_shape[1]
det_bboxes[:, 1::2] = det_bboxes[:, 1::2] * img_shape[0]
det_bboxes[:, 0::2].clamp_(min=0, max=img_shape[1])
det_bboxes[:, 1::2].clamp_(min=0, max=img_shape[0])
if rescale:
assert img_meta.get('scale_factor') is not None
det_bboxes /= det_bboxes.new_tensor(
img_meta['scale_factor']).repeat((1, 2))
results = InstanceData()
results.bboxes = det_bboxes
results.scores = scores
results.labels = det_labels
return results
| 28,137 | 44.752846 | 79 | py |
ERD | ERD-main/mmdet/models/dense_heads/rpn_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.ops import batched_nms
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures.bbox import (cat_boxes, empty_box_as, get_box_tensor,
get_box_wh, scale_boxes)
from mmdet.utils import InstanceList, MultiConfig, OptInstanceList
from .anchor_head import AnchorHead
@MODELS.register_module()
class RPNHead(AnchorHead):
"""Implementation of RPN head.
Args:
in_channels (int): Number of channels in the input feature map.
num_classes (int): Number of categories excluding the background
category. Defaults to 1.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or \
list[dict]): Initialization config dict.
num_convs (int): Number of convolution layers in the head.
Defaults to 1.
""" # noqa: W605
def __init__(self,
in_channels: int,
num_classes: int = 1,
init_cfg: MultiConfig = dict(
type='Normal', layer='Conv2d', std=0.01),
num_convs: int = 1,
**kwargs) -> None:
self.num_convs = num_convs
assert num_classes == 1
super().__init__(
num_classes=num_classes,
in_channels=in_channels,
init_cfg=init_cfg,
**kwargs)
def _init_layers(self) -> None:
"""Initialize layers of the head."""
if self.num_convs > 1:
rpn_convs = []
for i in range(self.num_convs):
if i == 0:
in_channels = self.in_channels
else:
in_channels = self.feat_channels
# use ``inplace=False`` to avoid error: one of the variables
# needed for gradient computation has been modified by an
# inplace operation.
rpn_convs.append(
ConvModule(
in_channels,
self.feat_channels,
3,
padding=1,
inplace=False))
self.rpn_conv = nn.Sequential(*rpn_convs)
else:
self.rpn_conv = nn.Conv2d(
self.in_channels, self.feat_channels, 3, padding=1)
self.rpn_cls = nn.Conv2d(self.feat_channels,
self.num_base_priors * self.cls_out_channels,
1)
reg_dim = self.bbox_coder.encode_size
self.rpn_reg = nn.Conv2d(self.feat_channels,
self.num_base_priors * reg_dim, 1)
def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]:
"""Forward feature of a single scale level.
Args:
x (Tensor): Features of a single scale level.
Returns:
tuple:
cls_score (Tensor): Cls scores for a single scale level \
the channels number is num_base_priors * num_classes.
bbox_pred (Tensor): Box energies / deltas for a single scale \
level, the channels number is num_base_priors * 4.
"""
x = self.rpn_conv(x)
x = F.relu(x)
rpn_cls_score = self.rpn_cls(x)
rpn_bbox_pred = self.rpn_reg(x)
return rpn_cls_score, rpn_bbox_pred
def loss_by_feat(self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None) \
-> dict:
"""Calculate the loss based on the features extracted by the detection
head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level,
has shape (N, num_anchors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W).
batch_gt_instances (list[obj:InstanceData]): Batch of gt_instance.
It usually includes ``bboxes`` and ``labels`` attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[obj:InstanceData], Optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
losses = super().loss_by_feat(
cls_scores,
bbox_preds,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore=batch_gt_instances_ignore)
return dict(
loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox'])
def _predict_by_feat_single(self,
cls_score_list: List[Tensor],
bbox_pred_list: List[Tensor],
score_factor_list: List[Tensor],
mlvl_priors: List[Tensor],
img_meta: dict,
cfg: ConfigDict,
rescale: bool = False,
with_nms: bool = True) -> InstanceData:
"""Transform a single image's features extracted from the head into
bbox results.
Args:
cls_score_list (list[Tensor]): Box scores from all scale
levels of a single image, each item has shape
(num_priors * num_classes, H, W).
bbox_pred_list (list[Tensor]): Box energies / deltas from
all scale levels of a single image, each item has shape
(num_priors * 4, H, W).
score_factor_list (list[Tensor]): Be compatible with
BaseDenseHead. Not used in RPNHead.
mlvl_priors (list[Tensor]): Each element in the list is
the priors of a single level in feature pyramid. In all
anchor-based methods, it has shape (num_priors, 4). In
all anchor-free methods, it has shape (num_priors, 2)
when `with_stride=True`, otherwise it still has shape
(num_priors, 4).
img_meta (dict): Image meta info.
cfg (ConfigDict, optional): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
:obj:`InstanceData`: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
cfg = self.test_cfg if cfg is None else cfg
cfg = copy.deepcopy(cfg)
img_shape = img_meta['img_shape']
nms_pre = cfg.get('nms_pre', -1)
mlvl_bbox_preds = []
mlvl_valid_priors = []
mlvl_scores = []
level_ids = []
for level_idx, (cls_score, bbox_pred, priors) in \
enumerate(zip(cls_score_list, bbox_pred_list,
mlvl_priors)):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
reg_dim = self.bbox_coder.encode_size
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, reg_dim)
cls_score = cls_score.permute(1, 2,
0).reshape(-1, self.cls_out_channels)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
# remind that we set FG labels to [0] since mmdet v2.0
# BG cat_id: 1
scores = cls_score.softmax(-1)[:, :-1]
scores = torch.squeeze(scores)
if 0 < nms_pre < scores.shape[0]:
# sort is faster than topk
# _, topk_inds = scores.topk(cfg.nms_pre)
ranked_scores, rank_inds = scores.sort(descending=True)
topk_inds = rank_inds[:nms_pre]
scores = ranked_scores[:nms_pre]
bbox_pred = bbox_pred[topk_inds, :]
priors = priors[topk_inds]
mlvl_bbox_preds.append(bbox_pred)
mlvl_valid_priors.append(priors)
mlvl_scores.append(scores)
# use level id to implement the separate level nms
level_ids.append(
scores.new_full((scores.size(0), ),
level_idx,
dtype=torch.long))
bbox_pred = torch.cat(mlvl_bbox_preds)
priors = cat_boxes(mlvl_valid_priors)
bboxes = self.bbox_coder.decode(priors, bbox_pred, max_shape=img_shape)
results = InstanceData()
results.bboxes = bboxes
results.scores = torch.cat(mlvl_scores)
results.level_ids = torch.cat(level_ids)
return self._bbox_post_process(
results=results, cfg=cfg, rescale=rescale, img_meta=img_meta)
def _bbox_post_process(self,
results: InstanceData,
cfg: ConfigDict,
rescale: bool = False,
with_nms: bool = True,
img_meta: Optional[dict] = None) -> InstanceData:
"""bbox post-processing method.
The boxes would be rescaled to the original image scale and do
the nms operation.
Args:
results (:obj:`InstaceData`): Detection instance results,
each item has shape (num_bboxes, ).
cfg (ConfigDict): Test / postprocessing configuration.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Default to True.
img_meta (dict, optional): Image meta info. Defaults to None.
Returns:
:obj:`InstanceData`: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
assert with_nms, '`with_nms` must be True in RPNHead'
if rescale:
assert img_meta.get('scale_factor') is not None
scale_factor = [1 / s for s in img_meta['scale_factor']]
results.bboxes = scale_boxes(results.bboxes, scale_factor)
# filter small size bboxes
if cfg.get('min_bbox_size', -1) >= 0:
w, h = get_box_wh(results.bboxes)
valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size)
if not valid_mask.all():
results = results[valid_mask]
if results.bboxes.numel() > 0:
bboxes = get_box_tensor(results.bboxes)
det_bboxes, keep_idxs = batched_nms(bboxes, results.scores,
results.level_ids, cfg.nms)
results = results[keep_idxs]
# some nms would reweight the score, such as softnms
results.scores = det_bboxes[:, -1]
results = results[:cfg.max_per_img]
# TODO: This would unreasonably show the 0th class label
# in visualization
results.labels = results.scores.new_zeros(
len(results), dtype=torch.long)
del results.level_ids
else:
# To avoid some potential error
results_ = InstanceData()
results_.bboxes = empty_box_as(results.bboxes)
results_.scores = results.scores.new_zeros(0)
results_.labels = results.scores.new_zeros(0)
results = results_
return results
| 12,882 | 41.518152 | 79 | py |
ERD | ERD-main/mmdet/models/dense_heads/anchor_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
from typing import List, Optional, Tuple, Union
import torch
import torch.nn as nn
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.registry import MODELS, TASK_UTILS
from mmdet.structures.bbox import BaseBoxes, cat_boxes, get_box_tensor
from mmdet.utils import (ConfigType, InstanceList, OptConfigType,
OptInstanceList, OptMultiConfig)
from ..task_modules.prior_generators import (AnchorGenerator,
anchor_inside_flags)
from ..task_modules.samplers import PseudoSampler
from ..utils import images_to_levels, multi_apply, unmap
from .base_dense_head import BaseDenseHead
@MODELS.register_module()
class AnchorHead(BaseDenseHead):
"""Anchor-based head (RPN, RetinaNet, SSD, etc.).
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
feat_channels (int): Number of hidden channels. Used in child classes.
anchor_generator (dict): Config dict for anchor generator
bbox_coder (dict): Config of bounding box coder.
reg_decoded_bbox (bool): If true, the regression loss would be
applied directly on decoded bounding boxes, converting both
the predicted boxes and regression targets to absolute
coordinates format. Default False. It should be `True` when
using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.
loss_cls (dict): Config of classification loss.
loss_bbox (dict): Config of localization loss.
train_cfg (dict): Training config of anchor head.
test_cfg (dict): Testing config of anchor head.
init_cfg (dict or list[dict], optional): Initialization config dict.
""" # noqa: W605
def __init__(
self,
num_classes: int,
in_channels: int,
feat_channels: int = 256,
anchor_generator: ConfigType = dict(
type='AnchorGenerator',
scales=[8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder: ConfigType = dict(
type='DeltaXYWHBBoxCoder',
clip_border=True,
target_means=(.0, .0, .0, .0),
target_stds=(1.0, 1.0, 1.0, 1.0)),
reg_decoded_bbox: bool = False,
loss_cls: ConfigType = dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox: ConfigType = dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = dict(
type='Normal', layer='Conv2d', std=0.01)
) -> None:
super().__init__(init_cfg=init_cfg)
self.in_channels = in_channels
self.num_classes = num_classes
self.feat_channels = feat_channels
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
if self.use_sigmoid_cls:
self.cls_out_channels = num_classes
else:
self.cls_out_channels = num_classes + 1
if self.cls_out_channels <= 0:
raise ValueError(f'num_classes={num_classes} is too small')
self.reg_decoded_bbox = reg_decoded_bbox
self.bbox_coder = TASK_UTILS.build(bbox_coder)
self.loss_cls = MODELS.build(loss_cls)
self.loss_bbox = MODELS.build(loss_bbox)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if self.train_cfg:
self.assigner = TASK_UTILS.build(self.train_cfg['assigner'])
if train_cfg.get('sampler', None) is not None:
self.sampler = TASK_UTILS.build(
self.train_cfg['sampler'], default_args=dict(context=self))
else:
self.sampler = PseudoSampler(context=self)
self.fp16_enabled = False
self.prior_generator = TASK_UTILS.build(anchor_generator)
# Usually the numbers of anchors for each level are the same
# except SSD detectors. So it is an int in the most dense
# heads but a list of int in SSDHead
self.num_base_priors = self.prior_generator.num_base_priors[0]
self._init_layers()
@property
def num_anchors(self) -> int:
warnings.warn('DeprecationWarning: `num_anchors` is deprecated, '
'for consistency or also use '
'`num_base_priors` instead')
return self.prior_generator.num_base_priors[0]
@property
def anchor_generator(self) -> AnchorGenerator:
warnings.warn('DeprecationWarning: anchor_generator is deprecated, '
'please use "prior_generator" instead')
return self.prior_generator
def _init_layers(self) -> None:
"""Initialize layers of the head."""
self.conv_cls = nn.Conv2d(self.in_channels,
self.num_base_priors * self.cls_out_channels,
1)
reg_dim = self.bbox_coder.encode_size
self.conv_reg = nn.Conv2d(self.in_channels,
self.num_base_priors * reg_dim, 1)
def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]:
"""Forward feature of a single scale level.
Args:
x (Tensor): Features of a single scale level.
Returns:
tuple:
cls_score (Tensor): Cls scores for a single scale level \
the channels number is num_base_priors * num_classes.
bbox_pred (Tensor): Box energies / deltas for a single scale \
level, the channels number is num_base_priors * 4.
"""
cls_score = self.conv_cls(x)
bbox_pred = self.conv_reg(x)
return cls_score, bbox_pred
def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]:
"""Forward features from the upstream network.
Args:
x (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: A tuple of classification scores and bbox prediction.
- cls_scores (list[Tensor]): Classification scores for all \
scale levels, each is a 4D-tensor, the channels number \
is num_base_priors * num_classes.
- bbox_preds (list[Tensor]): Box energies / deltas for all \
scale levels, each is a 4D-tensor, the channels number \
is num_base_priors * 4.
"""
return multi_apply(self.forward_single, x)
def get_anchors(self,
featmap_sizes: List[tuple],
batch_img_metas: List[dict],
device: Union[torch.device, str] = 'cuda') \
-> Tuple[List[List[Tensor]], List[List[Tensor]]]:
"""Get anchors according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
batch_img_metas (list[dict]): Image meta info.
device (torch.device | str): Device for returned tensors.
Defaults to cuda.
Returns:
tuple:
- anchor_list (list[list[Tensor]]): Anchors of each image.
- valid_flag_list (list[list[Tensor]]): Valid flags of each
image.
"""
num_imgs = len(batch_img_metas)
# since feature map sizes of all images are the same, we only compute
# anchors for one time
multi_level_anchors = self.prior_generator.grid_priors(
featmap_sizes, device=device)
anchor_list = [multi_level_anchors for _ in range(num_imgs)]
# for each image, we compute valid flags of multi level anchors
valid_flag_list = []
for img_id, img_meta in enumerate(batch_img_metas):
multi_level_flags = self.prior_generator.valid_flags(
featmap_sizes, img_meta['pad_shape'], device)
valid_flag_list.append(multi_level_flags)
return anchor_list, valid_flag_list
def _get_targets_single(self,
flat_anchors: Union[Tensor, BaseBoxes],
valid_flags: Tensor,
gt_instances: InstanceData,
img_meta: dict,
gt_instances_ignore: Optional[InstanceData] = None,
unmap_outputs: bool = True) -> tuple:
"""Compute regression and classification targets for anchors in a
single image.
Args:
flat_anchors (Tensor or :obj:`BaseBoxes`): Multi-level anchors
of the image, which are concatenated into a single tensor
or box type of shape (num_anchors, 4)
valid_flags (Tensor): Multi level valid flags of the image,
which are concatenated into a single tensor of
shape (num_anchors, ).
gt_instances (:obj:`InstanceData`): Ground truth of instance
annotations. It should includes ``bboxes`` and ``labels``
attributes.
img_meta (dict): Meta information for current image.
gt_instances_ignore (:obj:`InstanceData`, optional): Instances
to be ignored during training. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors. Defaults to True.
Returns:
tuple:
- labels (Tensor): Labels of each level.
- label_weights (Tensor): Label weights of each level.
- bbox_targets (Tensor): BBox targets of each level.
- bbox_weights (Tensor): BBox weights of each level.
- pos_inds (Tensor): positive samples indexes.
- neg_inds (Tensor): negative samples indexes.
- sampling_result (:obj:`SamplingResult`): Sampling results.
"""
inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
img_meta['img_shape'][:2],
self.train_cfg['allowed_border'])
if not inside_flags.any():
raise ValueError(
'There is no valid anchor inside the image boundary. Please '
'check the image size and anchor sizes, or set '
'``allowed_border`` to -1 to skip the condition.')
# assign gt and sample anchors
anchors = flat_anchors[inside_flags]
pred_instances = InstanceData(priors=anchors)
assign_result = self.assigner.assign(pred_instances, gt_instances,
gt_instances_ignore)
# No sampling is required except for RPN and
# Guided Anchoring algorithms
sampling_result = self.sampler.sample(assign_result, pred_instances,
gt_instances)
num_valid_anchors = anchors.shape[0]
target_dim = gt_instances.bboxes.size(-1) if self.reg_decoded_bbox \
else self.bbox_coder.encode_size
bbox_targets = anchors.new_zeros(num_valid_anchors, target_dim)
bbox_weights = anchors.new_zeros(num_valid_anchors, target_dim)
# TODO: Considering saving memory, is it necessary to be long?
labels = anchors.new_full((num_valid_anchors, ),
self.num_classes,
dtype=torch.long)
label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
# `bbox_coder.encode` accepts tensor or box type inputs and generates
# tensor targets. If regressing decoded boxes, the code will convert
# box type `pos_bbox_targets` to tensor.
if len(pos_inds) > 0:
if not self.reg_decoded_bbox:
pos_bbox_targets = self.bbox_coder.encode(
sampling_result.pos_priors, sampling_result.pos_gt_bboxes)
else:
pos_bbox_targets = sampling_result.pos_gt_bboxes
pos_bbox_targets = get_box_tensor(pos_bbox_targets)
bbox_targets[pos_inds, :] = pos_bbox_targets
bbox_weights[pos_inds, :] = 1.0
labels[pos_inds] = sampling_result.pos_gt_labels
if self.train_cfg['pos_weight'] <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = self.train_cfg['pos_weight']
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
# map up to original set of anchors
if unmap_outputs:
num_total_anchors = flat_anchors.size(0)
labels = unmap(
labels, num_total_anchors, inside_flags,
fill=self.num_classes) # fill bg label
label_weights = unmap(label_weights, num_total_anchors,
inside_flags)
bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
neg_inds, sampling_result)
def get_targets(self,
anchor_list: List[List[Tensor]],
valid_flag_list: List[List[Tensor]],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None,
unmap_outputs: bool = True,
return_sampling_results: bool = False) -> tuple:
"""Compute regression and classification targets for anchors in
multiple images.
Args:
anchor_list (list[list[Tensor]]): Multi level anchors of each
image. The outer list indicates images, and the inner list
corresponds to feature levels of the image. Each element of
the inner list is a tensor of shape (num_anchors, 4).
valid_flag_list (list[list[Tensor]]): Multi level valid flags of
each image. The outer list indicates images, and the inner list
corresponds to feature levels of the image. Each element of
the inner list is a tensor of shape (num_anchors, )
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors. Defaults to True.
return_sampling_results (bool): Whether to return the sampling
results. Defaults to False.
Returns:
tuple: Usually returns a tuple containing learning targets.
- labels_list (list[Tensor]): Labels of each level.
- label_weights_list (list[Tensor]): Label weights of each
level.
- bbox_targets_list (list[Tensor]): BBox targets of each level.
- bbox_weights_list (list[Tensor]): BBox weights of each level.
- avg_factor (int): Average factor that is used to average
the loss. When using sampling method, avg_factor is usually
the sum of positive and negative priors. When using
`PseudoSampler`, `avg_factor` is usually equal to the number
of positive priors.
additional_returns: This function enables user-defined returns from
`self._get_targets_single`. These returns are currently refined
to properties at each feature map (i.e. having HxW dimension).
The results will be concatenated after the end
"""
num_imgs = len(batch_img_metas)
assert len(anchor_list) == len(valid_flag_list) == num_imgs
if batch_gt_instances_ignore is None:
batch_gt_instances_ignore = [None] * num_imgs
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
# concat all level anchors to a single tensor
concat_anchor_list = []
concat_valid_flag_list = []
for i in range(num_imgs):
assert len(anchor_list[i]) == len(valid_flag_list[i])
concat_anchor_list.append(cat_boxes(anchor_list[i]))
concat_valid_flag_list.append(torch.cat(valid_flag_list[i]))
# compute targets for each image
results = multi_apply(
self._get_targets_single,
concat_anchor_list,
concat_valid_flag_list,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore,
unmap_outputs=unmap_outputs)
(all_labels, all_label_weights, all_bbox_targets, all_bbox_weights,
pos_inds_list, neg_inds_list, sampling_results_list) = results[:7]
rest_results = list(results[7:]) # user-added return values
# Get `avg_factor` of all images, which calculate in `SamplingResult`.
# When using sampling method, avg_factor is usually the sum of
# positive and negative priors. When using `PseudoSampler`,
# `avg_factor` is usually equal to the number of positive priors.
avg_factor = sum(
[results.avg_factor for results in sampling_results_list])
# update `_raw_positive_infos`, which will be used when calling
# `get_positive_infos`.
self._raw_positive_infos.update(sampling_results=sampling_results_list)
# split targets to a list w.r.t. multiple levels
labels_list = images_to_levels(all_labels, num_level_anchors)
label_weights_list = images_to_levels(all_label_weights,
num_level_anchors)
bbox_targets_list = images_to_levels(all_bbox_targets,
num_level_anchors)
bbox_weights_list = images_to_levels(all_bbox_weights,
num_level_anchors)
res = (labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, avg_factor)
if return_sampling_results:
res = res + (sampling_results_list, )
for i, r in enumerate(rest_results): # user-added return values
rest_results[i] = images_to_levels(r, num_level_anchors)
return res + tuple(rest_results)
def loss_by_feat_single(self, cls_score: Tensor, bbox_pred: Tensor,
anchors: Tensor, labels: Tensor,
label_weights: Tensor, bbox_targets: Tensor,
bbox_weights: Tensor, avg_factor: int) -> tuple:
"""Calculate the loss of a single scale level based on the features
extracted by the detection head.
Args:
cls_score (Tensor): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W).
bbox_pred (Tensor): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W).
anchors (Tensor): Box reference for each scale level with shape
(N, num_total_anchors, 4).
labels (Tensor): Labels of each anchors with shape
(N, num_total_anchors).
label_weights (Tensor): Label weights of each anchor with shape
(N, num_total_anchors)
bbox_targets (Tensor): BBox regression targets of each anchor
weight shape (N, num_total_anchors, 4).
bbox_weights (Tensor): BBox regression loss weights of each anchor
with shape (N, num_total_anchors, 4).
avg_factor (int): Average factor that is used to average the loss.
Returns:
tuple: loss components.
"""
# classification loss
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
cls_score = cls_score.permute(0, 2, 3,
1).reshape(-1, self.cls_out_channels)
loss_cls = self.loss_cls(
cls_score, labels, label_weights, avg_factor=avg_factor)
# regression loss
target_dim = bbox_targets.size(-1)
bbox_targets = bbox_targets.reshape(-1, target_dim)
bbox_weights = bbox_weights.reshape(-1, target_dim)
bbox_pred = bbox_pred.permute(0, 2, 3,
1).reshape(-1,
self.bbox_coder.encode_size)
if self.reg_decoded_bbox:
# When the regression loss (e.g. `IouLoss`, `GIouLoss`)
# is applied directly on the decoded bounding boxes, it
# decodes the already encoded coordinates to absolute format.
anchors = anchors.reshape(-1, anchors.size(-1))
bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)
bbox_pred = get_box_tensor(bbox_pred)
loss_bbox = self.loss_bbox(
bbox_pred, bbox_targets, bbox_weights, avg_factor=avg_factor)
return loss_cls, loss_bbox
def loss_by_feat(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None) -> dict:
"""Calculate the loss based on the features extracted by the detection
head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
has shape (N, num_anchors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict: A dictionary of loss components.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.prior_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, batch_img_metas, device=device)
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore=batch_gt_instances_ignore)
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
avg_factor) = cls_reg_targets
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
# concat all level anchors and flags to a single tensor
concat_anchor_list = []
for i in range(len(anchor_list)):
concat_anchor_list.append(cat_boxes(anchor_list[i]))
all_anchor_list = images_to_levels(concat_anchor_list,
num_level_anchors)
losses_cls, losses_bbox = multi_apply(
self.loss_by_feat_single,
cls_scores,
bbox_preds,
all_anchor_list,
labels_list,
label_weights_list,
bbox_targets_list,
bbox_weights_list,
avg_factor=avg_factor)
return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
| 24,816 | 45.736347 | 79 | py |
ERD | ERD-main/mmdet/models/dense_heads/yolox_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import List, Optional, Sequence, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmcv.ops.nms import batched_nms
from mmengine.config import ConfigDict
from mmengine.model import bias_init_with_prob
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.registry import MODELS, TASK_UTILS
from mmdet.structures.bbox import bbox_xyxy_to_cxcywh
from mmdet.utils import (ConfigType, OptConfigType, OptInstanceList,
OptMultiConfig, reduce_mean)
from ..task_modules.prior_generators import MlvlPointGenerator
from ..task_modules.samplers import PseudoSampler
from ..utils import multi_apply
from .base_dense_head import BaseDenseHead
@MODELS.register_module()
class YOLOXHead(BaseDenseHead):
"""YOLOXHead head used in `YOLOX <https://arxiv.org/abs/2107.08430>`_.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
feat_channels (int): Number of hidden channels in stacking convs.
Defaults to 256
stacked_convs (int): Number of stacking convs of the head.
Defaults to (8, 16, 32).
strides (Sequence[int]): Downsample factor of each feature map.
Defaults to None.
use_depthwise (bool): Whether to depthwise separable convolution in
blocks. Defaults to False.
dcn_on_last_conv (bool): If true, use dcn in the last layer of
towers. Defaults to False.
conv_bias (bool or str): If specified as `auto`, it will be decided by
the norm_cfg. Bias of conv will be set as True if `norm_cfg` is
None, otherwise False. Defaults to "auto".
conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
convolution layer. Defaults to None.
norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization
layer. Defaults to dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer.
Defaults to None.
loss_cls (:obj:`ConfigDict` or dict): Config of classification loss.
loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss.
loss_obj (:obj:`ConfigDict` or dict): Config of objectness loss.
loss_l1 (:obj:`ConfigDict` or dict): Config of L1 loss.
train_cfg (:obj:`ConfigDict` or dict, optional): Training config of
anchor head. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of
anchor head. Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(
self,
num_classes: int,
in_channels: int,
feat_channels: int = 256,
stacked_convs: int = 2,
strides: Sequence[int] = (8, 16, 32),
use_depthwise: bool = False,
dcn_on_last_conv: bool = False,
conv_bias: Union[bool, str] = 'auto',
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='Swish'),
loss_cls: ConfigType = dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='sum',
loss_weight=1.0),
loss_bbox: ConfigType = dict(
type='IoULoss',
mode='square',
eps=1e-16,
reduction='sum',
loss_weight=5.0),
loss_obj: ConfigType = dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='sum',
loss_weight=1.0),
loss_l1: ConfigType = dict(
type='L1Loss', reduction='sum', loss_weight=1.0),
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = dict(
type='Kaiming',
layer='Conv2d',
a=math.sqrt(5),
distribution='uniform',
mode='fan_in',
nonlinearity='leaky_relu')
) -> None:
super().__init__(init_cfg=init_cfg)
self.num_classes = num_classes
self.cls_out_channels = num_classes
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.strides = strides
self.use_depthwise = use_depthwise
self.dcn_on_last_conv = dcn_on_last_conv
assert conv_bias == 'auto' or isinstance(conv_bias, bool)
self.conv_bias = conv_bias
self.use_sigmoid_cls = True
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.loss_cls: nn.Module = MODELS.build(loss_cls)
self.loss_bbox: nn.Module = MODELS.build(loss_bbox)
self.loss_obj: nn.Module = MODELS.build(loss_obj)
self.use_l1 = False # This flag will be modified by hooks.
self.loss_l1: nn.Module = MODELS.build(loss_l1)
self.prior_generator = MlvlPointGenerator(strides, offset=0)
self.test_cfg = test_cfg
self.train_cfg = train_cfg
if self.train_cfg:
self.assigner = TASK_UTILS.build(self.train_cfg['assigner'])
# YOLOX does not support sampling
self.sampler = PseudoSampler()
self._init_layers()
def _init_layers(self) -> None:
"""Initialize heads for all level feature maps."""
self.multi_level_cls_convs = nn.ModuleList()
self.multi_level_reg_convs = nn.ModuleList()
self.multi_level_conv_cls = nn.ModuleList()
self.multi_level_conv_reg = nn.ModuleList()
self.multi_level_conv_obj = nn.ModuleList()
for _ in self.strides:
self.multi_level_cls_convs.append(self._build_stacked_convs())
self.multi_level_reg_convs.append(self._build_stacked_convs())
conv_cls, conv_reg, conv_obj = self._build_predictor()
self.multi_level_conv_cls.append(conv_cls)
self.multi_level_conv_reg.append(conv_reg)
self.multi_level_conv_obj.append(conv_obj)
def _build_stacked_convs(self) -> nn.Sequential:
"""Initialize conv layers of a single level head."""
conv = DepthwiseSeparableConvModule \
if self.use_depthwise else ConvModule
stacked_convs = []
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
if self.dcn_on_last_conv and i == self.stacked_convs - 1:
conv_cfg = dict(type='DCNv2')
else:
conv_cfg = self.conv_cfg
stacked_convs.append(
conv(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
bias=self.conv_bias))
return nn.Sequential(*stacked_convs)
def _build_predictor(self) -> Tuple[nn.Module, nn.Module, nn.Module]:
"""Initialize predictor layers of a single level head."""
conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 1)
conv_reg = nn.Conv2d(self.feat_channels, 4, 1)
conv_obj = nn.Conv2d(self.feat_channels, 1, 1)
return conv_cls, conv_reg, conv_obj
def init_weights(self) -> None:
"""Initialize weights of the head."""
super(YOLOXHead, self).init_weights()
# Use prior in model initialization to improve stability
bias_init = bias_init_with_prob(0.01)
for conv_cls, conv_obj in zip(self.multi_level_conv_cls,
self.multi_level_conv_obj):
conv_cls.bias.data.fill_(bias_init)
conv_obj.bias.data.fill_(bias_init)
def forward_single(self, x: Tensor, cls_convs: nn.Module,
reg_convs: nn.Module, conv_cls: nn.Module,
conv_reg: nn.Module,
conv_obj: nn.Module) -> Tuple[Tensor, Tensor, Tensor]:
"""Forward feature of a single scale level."""
cls_feat = cls_convs(x)
reg_feat = reg_convs(x)
cls_score = conv_cls(cls_feat)
bbox_pred = conv_reg(reg_feat)
objectness = conv_obj(reg_feat)
return cls_score, bbox_pred, objectness
def forward(self, x: Tuple[Tensor]) -> Tuple[List]:
"""Forward features from the upstream network.
Args:
x (Tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
Tuple[List]: A tuple of multi-level classification scores, bbox
predictions, and objectnesses.
"""
return multi_apply(self.forward_single, x, self.multi_level_cls_convs,
self.multi_level_reg_convs,
self.multi_level_conv_cls,
self.multi_level_conv_reg,
self.multi_level_conv_obj)
def predict_by_feat(self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
objectnesses: Optional[List[Tensor]],
batch_img_metas: Optional[List[dict]] = None,
cfg: Optional[ConfigDict] = None,
rescale: bool = False,
with_nms: bool = True) -> List[InstanceData]:
"""Transform a batch of output features extracted by the head into
bbox results.
Args:
cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * 4, H, W).
objectnesses (list[Tensor], Optional): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, 1, H, W).
batch_img_metas (list[dict], Optional): Batch image meta info.
Defaults to None.
cfg (ConfigDict, optional): Test / postprocessing
configuration, if None, test_cfg would be used.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Defaults to True.
Returns:
list[:obj:`InstanceData`]: Object detection results of each image
after the post process. Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
assert len(cls_scores) == len(bbox_preds) == len(objectnesses)
cfg = self.test_cfg if cfg is None else cfg
num_imgs = len(batch_img_metas)
featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]
mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes,
dtype=cls_scores[0].dtype,
device=cls_scores[0].device,
with_stride=True)
# flatten cls_scores, bbox_preds and objectness
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
]
flatten_objectness = [
objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1)
for objectness in objectnesses
]
flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid()
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)
flatten_objectness = torch.cat(flatten_objectness, dim=1).sigmoid()
flatten_priors = torch.cat(mlvl_priors)
flatten_bboxes = self._bbox_decode(flatten_priors, flatten_bbox_preds)
result_list = []
for img_id, img_meta in enumerate(batch_img_metas):
max_scores, labels = torch.max(flatten_cls_scores[img_id], 1)
valid_mask = flatten_objectness[
img_id] * max_scores >= cfg.score_thr
results = InstanceData(
bboxes=flatten_bboxes[img_id][valid_mask],
scores=max_scores[valid_mask] *
flatten_objectness[img_id][valid_mask],
labels=labels[valid_mask])
result_list.append(
self._bbox_post_process(
results=results,
cfg=cfg,
rescale=rescale,
with_nms=with_nms,
img_meta=img_meta))
return result_list
def _bbox_decode(self, priors: Tensor, bbox_preds: Tensor) -> Tensor:
"""Decode regression results (delta_x, delta_x, w, h) to bboxes (tl_x,
tl_y, br_x, br_y).
Args:
priors (Tensor): Center proiors of an image, has shape
(num_instances, 2).
bbox_preds (Tensor): Box energies / deltas for all instances,
has shape (batch_size, num_instances, 4).
Returns:
Tensor: Decoded bboxes in (tl_x, tl_y, br_x, br_y) format. Has
shape (batch_size, num_instances, 4).
"""
xys = (bbox_preds[..., :2] * priors[:, 2:]) + priors[:, :2]
whs = bbox_preds[..., 2:].exp() * priors[:, 2:]
tl_x = (xys[..., 0] - whs[..., 0] / 2)
tl_y = (xys[..., 1] - whs[..., 1] / 2)
br_x = (xys[..., 0] + whs[..., 0] / 2)
br_y = (xys[..., 1] + whs[..., 1] / 2)
decoded_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], -1)
return decoded_bboxes
def _bbox_post_process(self,
results: InstanceData,
cfg: ConfigDict,
rescale: bool = False,
with_nms: bool = True,
img_meta: Optional[dict] = None) -> InstanceData:
"""bbox post-processing method.
The boxes would be rescaled to the original image scale and do
the nms operation. Usually `with_nms` is False is used for aug test.
Args:
results (:obj:`InstaceData`): Detection instance results,
each item has shape (num_bboxes, ).
cfg (mmengine.Config): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Default to False.
with_nms (bool): If True, do nms before return boxes.
Default to True.
img_meta (dict, optional): Image meta info. Defaults to None.
Returns:
:obj:`InstanceData`: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
if rescale:
assert img_meta.get('scale_factor') is not None
results.bboxes /= results.bboxes.new_tensor(
img_meta['scale_factor']).repeat((1, 2))
if with_nms and results.bboxes.numel() > 0:
det_bboxes, keep_idxs = batched_nms(results.bboxes, results.scores,
results.labels, cfg.nms)
results = results[keep_idxs]
# some nms would reweight the score, such as softnms
results.scores = det_bboxes[:, -1]
return results
def loss_by_feat(
self,
cls_scores: Sequence[Tensor],
bbox_preds: Sequence[Tensor],
objectnesses: Sequence[Tensor],
batch_gt_instances: Sequence[InstanceData],
batch_img_metas: Sequence[dict],
batch_gt_instances_ignore: OptInstanceList = None) -> dict:
"""Calculate the loss based on the features extracted by the detection
head.
Args:
cls_scores (Sequence[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_priors * num_classes.
bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_priors * 4.
objectnesses (Sequence[Tensor]): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, 1, H, W).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict[str, Tensor]: A dictionary of losses.
"""
num_imgs = len(batch_img_metas)
if batch_gt_instances_ignore is None:
batch_gt_instances_ignore = [None] * num_imgs
featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]
mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes,
dtype=cls_scores[0].dtype,
device=cls_scores[0].device,
with_stride=True)
flatten_cls_preds = [
cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.cls_out_channels)
for cls_pred in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
]
flatten_objectness = [
objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1)
for objectness in objectnesses
]
flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1)
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)
flatten_objectness = torch.cat(flatten_objectness, dim=1)
flatten_priors = torch.cat(mlvl_priors)
flatten_bboxes = self._bbox_decode(flatten_priors, flatten_bbox_preds)
(pos_masks, cls_targets, obj_targets, bbox_targets, l1_targets,
num_fg_imgs) = multi_apply(
self._get_targets_single,
flatten_priors.unsqueeze(0).repeat(num_imgs, 1, 1),
flatten_cls_preds.detach(), flatten_bboxes.detach(),
flatten_objectness.detach(), batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
# The experimental results show that 'reduce_mean' can improve
# performance on the COCO dataset.
num_pos = torch.tensor(
sum(num_fg_imgs),
dtype=torch.float,
device=flatten_cls_preds.device)
num_total_samples = max(reduce_mean(num_pos), 1.0)
pos_masks = torch.cat(pos_masks, 0)
cls_targets = torch.cat(cls_targets, 0)
obj_targets = torch.cat(obj_targets, 0)
bbox_targets = torch.cat(bbox_targets, 0)
if self.use_l1:
l1_targets = torch.cat(l1_targets, 0)
loss_obj = self.loss_obj(flatten_objectness.view(-1, 1),
obj_targets) / num_total_samples
if num_pos > 0:
loss_cls = self.loss_cls(
flatten_cls_preds.view(-1, self.num_classes)[pos_masks],
cls_targets) / num_total_samples
loss_bbox = self.loss_bbox(
flatten_bboxes.view(-1, 4)[pos_masks],
bbox_targets) / num_total_samples
else:
# Avoid cls and reg branch not participating in the gradient
# propagation when there is no ground-truth in the images.
# For more details, please refer to
# https://github.com/open-mmlab/mmdetection/issues/7298
loss_cls = flatten_cls_preds.sum() * 0
loss_bbox = flatten_bboxes.sum() * 0
loss_dict = dict(
loss_cls=loss_cls, loss_bbox=loss_bbox, loss_obj=loss_obj)
if self.use_l1:
if num_pos > 0:
loss_l1 = self.loss_l1(
flatten_bbox_preds.view(-1, 4)[pos_masks],
l1_targets) / num_total_samples
else:
# Avoid cls and reg branch not participating in the gradient
# propagation when there is no ground-truth in the images.
# For more details, please refer to
# https://github.com/open-mmlab/mmdetection/issues/7298
loss_l1 = flatten_bbox_preds.sum() * 0
loss_dict.update(loss_l1=loss_l1)
return loss_dict
@torch.no_grad()
def _get_targets_single(
self,
priors: Tensor,
cls_preds: Tensor,
decoded_bboxes: Tensor,
objectness: Tensor,
gt_instances: InstanceData,
img_meta: dict,
gt_instances_ignore: Optional[InstanceData] = None) -> tuple:
"""Compute classification, regression, and objectness targets for
priors in a single image.
Args:
priors (Tensor): All priors of one image, a 2D-Tensor with shape
[num_priors, 4] in [cx, xy, stride_w, stride_y] format.
cls_preds (Tensor): Classification predictions of one image,
a 2D-Tensor with shape [num_priors, num_classes]
decoded_bboxes (Tensor): Decoded bboxes predictions of one image,
a 2D-Tensor with shape [num_priors, 4] in [tl_x, tl_y,
br_x, br_y] format.
objectness (Tensor): Objectness predictions of one image,
a 1D-Tensor with shape [num_priors]
gt_instances (:obj:`InstanceData`): Ground truth of instance
annotations. It should includes ``bboxes`` and ``labels``
attributes.
img_meta (dict): Meta information for current image.
gt_instances_ignore (:obj:`InstanceData`, optional): Instances
to be ignored during training. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
tuple:
foreground_mask (list[Tensor]): Binary mask of foreground
targets.
cls_target (list[Tensor]): Classification targets of an image.
obj_target (list[Tensor]): Objectness targets of an image.
bbox_target (list[Tensor]): BBox targets of an image.
l1_target (int): BBox L1 targets of an image.
num_pos_per_img (int): Number of positive samples in an image.
"""
num_priors = priors.size(0)
num_gts = len(gt_instances)
# No target
if num_gts == 0:
cls_target = cls_preds.new_zeros((0, self.num_classes))
bbox_target = cls_preds.new_zeros((0, 4))
l1_target = cls_preds.new_zeros((0, 4))
obj_target = cls_preds.new_zeros((num_priors, 1))
foreground_mask = cls_preds.new_zeros(num_priors).bool()
return (foreground_mask, cls_target, obj_target, bbox_target,
l1_target, 0)
# YOLOX uses center priors with 0.5 offset to assign targets,
# but use center priors without offset to regress bboxes.
offset_priors = torch.cat(
[priors[:, :2] + priors[:, 2:] * 0.5, priors[:, 2:]], dim=-1)
scores = cls_preds.sigmoid() * objectness.unsqueeze(1).sigmoid()
pred_instances = InstanceData(
bboxes=decoded_bboxes, scores=scores.sqrt_(), priors=offset_priors)
assign_result = self.assigner.assign(
pred_instances=pred_instances,
gt_instances=gt_instances,
gt_instances_ignore=gt_instances_ignore)
sampling_result = self.sampler.sample(assign_result, pred_instances,
gt_instances)
pos_inds = sampling_result.pos_inds
num_pos_per_img = pos_inds.size(0)
pos_ious = assign_result.max_overlaps[pos_inds]
# IOU aware classification score
cls_target = F.one_hot(sampling_result.pos_gt_labels,
self.num_classes) * pos_ious.unsqueeze(-1)
obj_target = torch.zeros_like(objectness).unsqueeze(-1)
obj_target[pos_inds] = 1
bbox_target = sampling_result.pos_gt_bboxes
l1_target = cls_preds.new_zeros((num_pos_per_img, 4))
if self.use_l1:
l1_target = self._get_l1_target(l1_target, bbox_target,
priors[pos_inds])
foreground_mask = torch.zeros_like(objectness).to(torch.bool)
foreground_mask[pos_inds] = 1
return (foreground_mask, cls_target, obj_target, bbox_target,
l1_target, num_pos_per_img)
def _get_l1_target(self,
l1_target: Tensor,
gt_bboxes: Tensor,
priors: Tensor,
eps: float = 1e-8) -> Tensor:
"""Convert gt bboxes to center offset and log width height."""
gt_cxcywh = bbox_xyxy_to_cxcywh(gt_bboxes)
l1_target[:, :2] = (gt_cxcywh[:, :2] - priors[:, :2]) / priors[:, 2:]
l1_target[:, 2:] = torch.log(gt_cxcywh[:, 2:] / priors[:, 2:] + eps)
return l1_target
| 26,925 | 42.499192 | 79 | py |
ERD | ERD-main/mmdet/models/dense_heads/retina_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.registry import MODELS
from .anchor_head import AnchorHead
@MODELS.register_module()
class RetinaHead(AnchorHead):
r"""An anchor-based head used in `RetinaNet
<https://arxiv.org/pdf/1708.02002.pdf>`_.
The head contains two subnetworks. The first classifies anchor boxes and
the second regresses deltas for the anchors.
Example:
>>> import torch
>>> self = RetinaHead(11, 7)
>>> x = torch.rand(1, 7, 32, 32)
>>> cls_score, bbox_pred = self.forward_single(x)
>>> # Each anchor predicts a score for each class except background
>>> cls_per_anchor = cls_score.shape[1] / self.num_anchors
>>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors
>>> assert cls_per_anchor == (self.num_classes)
>>> assert box_per_anchor == 4
"""
def __init__(self,
num_classes,
in_channels,
stacked_convs=4,
conv_cfg=None,
norm_cfg=None,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
init_cfg=dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='retina_cls',
std=0.01,
bias_prob=0.01)),
**kwargs):
assert stacked_convs >= 0, \
'`stacked_convs` must be non-negative integers, ' \
f'but got {stacked_convs} instead.'
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
super(RetinaHead, self).__init__(
num_classes,
in_channels,
anchor_generator=anchor_generator,
init_cfg=init_cfg,
**kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
in_channels = self.in_channels
for i in range(self.stacked_convs):
self.cls_convs.append(
ConvModule(
in_channels,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
in_channels,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
in_channels = self.feat_channels
self.retina_cls = nn.Conv2d(
in_channels,
self.num_base_priors * self.cls_out_channels,
3,
padding=1)
reg_dim = self.bbox_coder.encode_size
self.retina_reg = nn.Conv2d(
in_channels, self.num_base_priors * reg_dim, 3, padding=1)
def forward_single(self, x):
"""Forward feature of a single scale level.
Args:
x (Tensor): Features of a single scale level.
Returns:
tuple:
cls_score (Tensor): Cls scores for a single scale level
the channels number is num_anchors * num_classes.
bbox_pred (Tensor): Box energies / deltas for a single scale
level, the channels number is num_anchors * 4.
"""
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
return cls_score, bbox_pred
| 4,284 | 34.413223 | 76 | py |
ERD | ERD-main/mmdet/models/dense_heads/rtmdet_ins_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import math
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, is_norm
from mmcv.ops import batched_nms
from mmengine.model import (BaseModule, bias_init_with_prob, constant_init,
normal_init)
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.models.layers.transformer import inverse_sigmoid
from mmdet.models.utils import (filter_scores_and_topk, multi_apply,
select_single_mlvl, sigmoid_geometric_mean)
from mmdet.registry import MODELS
from mmdet.structures.bbox import (cat_boxes, distance2bbox, get_box_tensor,
get_box_wh, scale_boxes)
from mmdet.utils import ConfigType, InstanceList, OptInstanceList, reduce_mean
from .rtmdet_head import RTMDetHead
@MODELS.register_module()
class RTMDetInsHead(RTMDetHead):
"""Detection Head of RTMDet-Ins.
Args:
num_prototypes (int): Number of mask prototype features extracted
from the mask head. Defaults to 8.
dyconv_channels (int): Channel of the dynamic conv layers.
Defaults to 8.
num_dyconvs (int): Number of the dynamic convolution layers.
Defaults to 3.
mask_loss_stride (int): Down sample stride of the masks for loss
computation. Defaults to 4.
loss_mask (:obj:`ConfigDict` or dict): Config dict for mask loss.
"""
def __init__(self,
*args,
num_prototypes: int = 8,
dyconv_channels: int = 8,
num_dyconvs: int = 3,
mask_loss_stride: int = 4,
loss_mask=dict(
type='DiceLoss',
loss_weight=2.0,
eps=5e-6,
reduction='mean'),
**kwargs) -> None:
self.num_prototypes = num_prototypes
self.num_dyconvs = num_dyconvs
self.dyconv_channels = dyconv_channels
self.mask_loss_stride = mask_loss_stride
super().__init__(*args, **kwargs)
self.loss_mask = MODELS.build(loss_mask)
def _init_layers(self) -> None:
"""Initialize layers of the head."""
super()._init_layers()
# a branch to predict kernels of dynamic convs
self.kernel_convs = nn.ModuleList()
# calculate num dynamic parameters
weight_nums, bias_nums = [], []
for i in range(self.num_dyconvs):
if i == 0:
weight_nums.append(
# mask prototype and coordinate features
(self.num_prototypes + 2) * self.dyconv_channels)
bias_nums.append(self.dyconv_channels * 1)
elif i == self.num_dyconvs - 1:
weight_nums.append(self.dyconv_channels * 1)
bias_nums.append(1)
else:
weight_nums.append(self.dyconv_channels * self.dyconv_channels)
bias_nums.append(self.dyconv_channels * 1)
self.weight_nums = weight_nums
self.bias_nums = bias_nums
self.num_gen_params = sum(weight_nums) + sum(bias_nums)
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.kernel_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
pred_pad_size = self.pred_kernel_size // 2
self.rtm_kernel = nn.Conv2d(
self.feat_channels,
self.num_gen_params,
self.pred_kernel_size,
padding=pred_pad_size)
self.mask_head = MaskFeatModule(
in_channels=self.in_channels,
feat_channels=self.feat_channels,
stacked_convs=4,
num_levels=len(self.prior_generator.strides),
num_prototypes=self.num_prototypes,
act_cfg=self.act_cfg,
norm_cfg=self.norm_cfg)
def forward(self, feats: Tuple[Tensor, ...]) -> tuple:
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
- cls_scores (list[Tensor]): Classification scores for all scale
levels, each is a 4D-tensor, the channels number is
num_base_priors * num_classes.
- bbox_preds (list[Tensor]): Box energies / deltas for all scale
levels, each is a 4D-tensor, the channels number is
num_base_priors * 4.
- kernel_preds (list[Tensor]): Dynamic conv kernels for all scale
levels, each is a 4D-tensor, the channels number is
num_gen_params.
- mask_feat (Tensor): Output feature of the mask head. Each is a
4D-tensor, the channels number is num_prototypes.
"""
mask_feat = self.mask_head(feats)
cls_scores = []
bbox_preds = []
kernel_preds = []
for idx, (x, scale, stride) in enumerate(
zip(feats, self.scales, self.prior_generator.strides)):
cls_feat = x
reg_feat = x
kernel_feat = x
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
cls_score = self.rtm_cls(cls_feat)
for kernel_layer in self.kernel_convs:
kernel_feat = kernel_layer(kernel_feat)
kernel_pred = self.rtm_kernel(kernel_feat)
for reg_layer in self.reg_convs:
reg_feat = reg_layer(reg_feat)
if self.with_objectness:
objectness = self.rtm_obj(reg_feat)
cls_score = inverse_sigmoid(
sigmoid_geometric_mean(cls_score, objectness))
reg_dist = scale(self.rtm_reg(reg_feat)) * stride[0]
cls_scores.append(cls_score)
bbox_preds.append(reg_dist)
kernel_preds.append(kernel_pred)
return tuple(cls_scores), tuple(bbox_preds), tuple(
kernel_preds), mask_feat
def predict_by_feat(self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
kernel_preds: List[Tensor],
mask_feat: Tensor,
score_factors: Optional[List[Tensor]] = None,
batch_img_metas: Optional[List[dict]] = None,
cfg: Optional[ConfigType] = None,
rescale: bool = False,
with_nms: bool = True) -> InstanceList:
"""Transform a batch of output features extracted from the head into
bbox results.
Note: When score_factors is not None, the cls_scores are
usually multiplied by it then obtain the real score used in NMS,
such as CenterNess in FCOS, IoU branch in ATSS.
Args:
cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * 4, H, W).
kernel_preds (list[Tensor]): Kernel predictions of dynamic
convs for all scale levels, each is a 4D-tensor, has shape
(batch_size, num_params, H, W).
mask_feat (Tensor): Mask prototype features extracted from the
mask head, has shape (batch_size, num_prototypes, H, W).
score_factors (list[Tensor], optional): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, num_priors * 1, H, W). Defaults to None.
batch_img_metas (list[dict], Optional): Batch image meta info.
Defaults to None.
cfg (ConfigDict, optional): Test / postprocessing
configuration, if None, test_cfg would be used.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Defaults to True.
Returns:
list[:obj:`InstanceData`]: Object detection results of each image
after the post process. Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, h, w).
"""
assert len(cls_scores) == len(bbox_preds)
if score_factors is None:
# e.g. Retina, FreeAnchor, Foveabox, etc.
with_score_factors = False
else:
# e.g. FCOS, PAA, ATSS, AutoAssign, etc.
with_score_factors = True
assert len(cls_scores) == len(score_factors)
num_levels = len(cls_scores)
featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes,
dtype=cls_scores[0].dtype,
device=cls_scores[0].device,
with_stride=True)
result_list = []
for img_id in range(len(batch_img_metas)):
img_meta = batch_img_metas[img_id]
cls_score_list = select_single_mlvl(
cls_scores, img_id, detach=True)
bbox_pred_list = select_single_mlvl(
bbox_preds, img_id, detach=True)
kernel_pred_list = select_single_mlvl(
kernel_preds, img_id, detach=True)
if with_score_factors:
score_factor_list = select_single_mlvl(
score_factors, img_id, detach=True)
else:
score_factor_list = [None for _ in range(num_levels)]
results = self._predict_by_feat_single(
cls_score_list=cls_score_list,
bbox_pred_list=bbox_pred_list,
kernel_pred_list=kernel_pred_list,
mask_feat=mask_feat[img_id],
score_factor_list=score_factor_list,
mlvl_priors=mlvl_priors,
img_meta=img_meta,
cfg=cfg,
rescale=rescale,
with_nms=with_nms)
result_list.append(results)
return result_list
def _predict_by_feat_single(self,
cls_score_list: List[Tensor],
bbox_pred_list: List[Tensor],
kernel_pred_list: List[Tensor],
mask_feat: Tensor,
score_factor_list: List[Tensor],
mlvl_priors: List[Tensor],
img_meta: dict,
cfg: ConfigType,
rescale: bool = False,
with_nms: bool = True) -> InstanceData:
"""Transform a single image's features extracted from the head into
bbox and mask results.
Args:
cls_score_list (list[Tensor]): Box scores from all scale
levels of a single image, each item has shape
(num_priors * num_classes, H, W).
bbox_pred_list (list[Tensor]): Box energies / deltas from
all scale levels of a single image, each item has shape
(num_priors * 4, H, W).
kernel_preds (list[Tensor]): Kernel predictions of dynamic
convs for all scale levels of a single image, each is a
4D-tensor, has shape (num_params, H, W).
mask_feat (Tensor): Mask prototype features of a single image
extracted from the mask head, has shape (num_prototypes, H, W).
score_factor_list (list[Tensor]): Score factor from all scale
levels of a single image, each item has shape
(num_priors * 1, H, W).
mlvl_priors (list[Tensor]): Each element in the list is
the priors of a single level in feature pyramid. In all
anchor-based methods, it has shape (num_priors, 4). In
all anchor-free methods, it has shape (num_priors, 2)
when `with_stride=True`, otherwise it still has shape
(num_priors, 4).
img_meta (dict): Image meta info.
cfg (mmengine.Config): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Defaults to True.
Returns:
:obj:`InstanceData`: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, h, w).
"""
if score_factor_list[0] is None:
# e.g. Retina, FreeAnchor, etc.
with_score_factors = False
else:
# e.g. FCOS, PAA, ATSS, etc.
with_score_factors = True
cfg = self.test_cfg if cfg is None else cfg
cfg = copy.deepcopy(cfg)
img_shape = img_meta['img_shape']
nms_pre = cfg.get('nms_pre', -1)
mlvl_bbox_preds = []
mlvl_kernels = []
mlvl_valid_priors = []
mlvl_scores = []
mlvl_labels = []
if with_score_factors:
mlvl_score_factors = []
else:
mlvl_score_factors = None
for level_idx, (cls_score, bbox_pred, kernel_pred,
score_factor, priors) in \
enumerate(zip(cls_score_list, bbox_pred_list, kernel_pred_list,
score_factor_list, mlvl_priors)):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
dim = self.bbox_coder.encode_size
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, dim)
if with_score_factors:
score_factor = score_factor.permute(1, 2,
0).reshape(-1).sigmoid()
cls_score = cls_score.permute(1, 2,
0).reshape(-1, self.cls_out_channels)
kernel_pred = kernel_pred.permute(1, 2, 0).reshape(
-1, self.num_gen_params)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
# remind that we set FG labels to [0, num_class-1]
# since mmdet v2.0
# BG cat_id: num_class
scores = cls_score.softmax(-1)[:, :-1]
# After https://github.com/open-mmlab/mmdetection/pull/6268/,
# this operation keeps fewer bboxes under the same `nms_pre`.
# There is no difference in performance for most models. If you
# find a slight drop in performance, you can set a larger
# `nms_pre` than before.
score_thr = cfg.get('score_thr', 0)
results = filter_scores_and_topk(
scores, score_thr, nms_pre,
dict(
bbox_pred=bbox_pred,
priors=priors,
kernel_pred=kernel_pred))
scores, labels, keep_idxs, filtered_results = results
bbox_pred = filtered_results['bbox_pred']
priors = filtered_results['priors']
kernel_pred = filtered_results['kernel_pred']
if with_score_factors:
score_factor = score_factor[keep_idxs]
mlvl_bbox_preds.append(bbox_pred)
mlvl_valid_priors.append(priors)
mlvl_scores.append(scores)
mlvl_labels.append(labels)
mlvl_kernels.append(kernel_pred)
if with_score_factors:
mlvl_score_factors.append(score_factor)
bbox_pred = torch.cat(mlvl_bbox_preds)
priors = cat_boxes(mlvl_valid_priors)
bboxes = self.bbox_coder.decode(
priors[..., :2], bbox_pred, max_shape=img_shape)
results = InstanceData()
results.bboxes = bboxes
results.priors = priors
results.scores = torch.cat(mlvl_scores)
results.labels = torch.cat(mlvl_labels)
results.kernels = torch.cat(mlvl_kernels)
if with_score_factors:
results.score_factors = torch.cat(mlvl_score_factors)
return self._bbox_mask_post_process(
results=results,
mask_feat=mask_feat,
cfg=cfg,
rescale=rescale,
with_nms=with_nms,
img_meta=img_meta)
def _bbox_mask_post_process(
self,
results: InstanceData,
mask_feat,
cfg: ConfigType,
rescale: bool = False,
with_nms: bool = True,
img_meta: Optional[dict] = None) -> InstanceData:
"""bbox and mask post-processing method.
The boxes would be rescaled to the original image scale and do
the nms operation. Usually `with_nms` is False is used for aug test.
Args:
results (:obj:`InstaceData`): Detection instance results,
each item has shape (num_bboxes, ).
cfg (ConfigDict): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Default to False.
with_nms (bool): If True, do nms before return boxes.
Default to True.
img_meta (dict, optional): Image meta info. Defaults to None.
Returns:
:obj:`InstanceData`: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, h, w).
"""
stride = self.prior_generator.strides[0][0]
if rescale:
assert img_meta.get('scale_factor') is not None
scale_factor = [1 / s for s in img_meta['scale_factor']]
results.bboxes = scale_boxes(results.bboxes, scale_factor)
if hasattr(results, 'score_factors'):
# TODO: Add sqrt operation in order to be consistent with
# the paper.
score_factors = results.pop('score_factors')
results.scores = results.scores * score_factors
# filter small size bboxes
if cfg.get('min_bbox_size', -1) >= 0:
w, h = get_box_wh(results.bboxes)
valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size)
if not valid_mask.all():
results = results[valid_mask]
# TODO: deal with `with_nms` and `nms_cfg=None` in test_cfg
assert with_nms, 'with_nms must be True for RTMDet-Ins'
if results.bboxes.numel() > 0:
bboxes = get_box_tensor(results.bboxes)
det_bboxes, keep_idxs = batched_nms(bboxes, results.scores,
results.labels, cfg.nms)
results = results[keep_idxs]
# some nms would reweight the score, such as softnms
results.scores = det_bboxes[:, -1]
results = results[:cfg.max_per_img]
# process masks
mask_logits = self._mask_predict_by_feat_single(
mask_feat, results.kernels, results.priors)
mask_logits = F.interpolate(
mask_logits.unsqueeze(0), scale_factor=stride, mode='bilinear')
if rescale:
ori_h, ori_w = img_meta['ori_shape'][:2]
mask_logits = F.interpolate(
mask_logits,
size=[
math.ceil(mask_logits.shape[-2] * scale_factor[0]),
math.ceil(mask_logits.shape[-1] * scale_factor[1])
],
mode='bilinear',
align_corners=False)[..., :ori_h, :ori_w]
masks = mask_logits.sigmoid().squeeze(0)
masks = masks > cfg.mask_thr_binary
results.masks = masks
else:
h, w = img_meta['ori_shape'][:2] if rescale else img_meta[
'img_shape'][:2]
results.masks = torch.zeros(
size=(results.bboxes.shape[0], h, w),
dtype=torch.bool,
device=results.bboxes.device)
return results
def parse_dynamic_params(self, flatten_kernels: Tensor) -> tuple:
"""split kernel head prediction to conv weight and bias."""
n_inst = flatten_kernels.size(0)
n_layers = len(self.weight_nums)
params_splits = list(
torch.split_with_sizes(
flatten_kernels, self.weight_nums + self.bias_nums, dim=1))
weight_splits = params_splits[:n_layers]
bias_splits = params_splits[n_layers:]
for i in range(n_layers):
if i < n_layers - 1:
weight_splits[i] = weight_splits[i].reshape(
n_inst * self.dyconv_channels, -1, 1, 1)
bias_splits[i] = bias_splits[i].reshape(n_inst *
self.dyconv_channels)
else:
weight_splits[i] = weight_splits[i].reshape(n_inst, -1, 1, 1)
bias_splits[i] = bias_splits[i].reshape(n_inst)
return weight_splits, bias_splits
def _mask_predict_by_feat_single(self, mask_feat: Tensor, kernels: Tensor,
priors: Tensor) -> Tensor:
"""Generate mask logits from mask features with dynamic convs.
Args:
mask_feat (Tensor): Mask prototype features.
Has shape (num_prototypes, H, W).
kernels (Tensor): Kernel parameters for each instance.
Has shape (num_instance, num_params)
priors (Tensor): Center priors for each instance.
Has shape (num_instance, 4).
Returns:
Tensor: Instance segmentation masks for each instance.
Has shape (num_instance, H, W).
"""
num_inst = priors.shape[0]
h, w = mask_feat.size()[-2:]
if num_inst < 1:
return torch.empty(
size=(num_inst, h, w),
dtype=mask_feat.dtype,
device=mask_feat.device)
if len(mask_feat.shape) < 4:
mask_feat.unsqueeze(0)
coord = self.prior_generator.single_level_grid_priors(
(h, w), level_idx=0, device=mask_feat.device).reshape(1, -1, 2)
num_inst = priors.shape[0]
points = priors[:, :2].reshape(-1, 1, 2)
strides = priors[:, 2:].reshape(-1, 1, 2)
relative_coord = (points - coord).permute(0, 2, 1) / (
strides[..., 0].reshape(-1, 1, 1) * 8)
relative_coord = relative_coord.reshape(num_inst, 2, h, w)
mask_feat = torch.cat(
[relative_coord,
mask_feat.repeat(num_inst, 1, 1, 1)], dim=1)
weights, biases = self.parse_dynamic_params(kernels)
n_layers = len(weights)
x = mask_feat.reshape(1, -1, h, w)
for i, (weight, bias) in enumerate(zip(weights, biases)):
x = F.conv2d(
x, weight, bias=bias, stride=1, padding=0, groups=num_inst)
if i < n_layers - 1:
x = F.relu(x)
x = x.reshape(num_inst, h, w)
return x
def loss_mask_by_feat(self, mask_feats: Tensor, flatten_kernels: Tensor,
sampling_results_list: list,
batch_gt_instances: InstanceList) -> Tensor:
"""Compute instance segmentation loss.
Args:
mask_feats (list[Tensor]): Mask prototype features extracted from
the mask head. Has shape (N, num_prototypes, H, W)
flatten_kernels (list[Tensor]): Kernels of the dynamic conv layers.
Has shape (N, num_instances, num_params)
sampling_results_list (list[:obj:`SamplingResults`]) Batch of
assignment results.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
Returns:
Tensor: The mask loss tensor.
"""
batch_pos_mask_logits = []
pos_gt_masks = []
for idx, (mask_feat, kernels, sampling_results,
gt_instances) in enumerate(
zip(mask_feats, flatten_kernels, sampling_results_list,
batch_gt_instances)):
pos_priors = sampling_results.pos_priors
pos_inds = sampling_results.pos_inds
pos_kernels = kernels[pos_inds] # n_pos, num_gen_params
pos_mask_logits = self._mask_predict_by_feat_single(
mask_feat, pos_kernels, pos_priors)
if gt_instances.masks.numel() == 0:
gt_masks = torch.empty_like(gt_instances.masks)
else:
gt_masks = gt_instances.masks[
sampling_results.pos_assigned_gt_inds, :]
batch_pos_mask_logits.append(pos_mask_logits)
pos_gt_masks.append(gt_masks)
pos_gt_masks = torch.cat(pos_gt_masks, 0)
batch_pos_mask_logits = torch.cat(batch_pos_mask_logits, 0)
# avg_factor
num_pos = batch_pos_mask_logits.shape[0]
num_pos = reduce_mean(mask_feats.new_tensor([num_pos
])).clamp_(min=1).item()
if batch_pos_mask_logits.shape[0] == 0:
return mask_feats.sum() * 0
scale = self.prior_generator.strides[0][0] // self.mask_loss_stride
# upsample pred masks
batch_pos_mask_logits = F.interpolate(
batch_pos_mask_logits.unsqueeze(0),
scale_factor=scale,
mode='bilinear',
align_corners=False).squeeze(0)
# downsample gt masks
pos_gt_masks = pos_gt_masks[:, self.mask_loss_stride //
2::self.mask_loss_stride,
self.mask_loss_stride //
2::self.mask_loss_stride]
loss_mask = self.loss_mask(
batch_pos_mask_logits,
pos_gt_masks,
weight=None,
avg_factor=num_pos)
return loss_mask
def loss_by_feat(self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
kernel_preds: List[Tensor],
mask_feat: Tensor,
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None):
"""Compute losses of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Decoded box for each scale
level with shape (N, num_anchors * 4, H, W) in
[tl_x, tl_y, br_x, br_y] format.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
num_imgs = len(batch_img_metas)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.prior_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, batch_img_metas, device=device)
flatten_cls_scores = torch.cat([
cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.cls_out_channels)
for cls_score in cls_scores
], 1)
flatten_kernels = torch.cat([
kernel_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.num_gen_params)
for kernel_pred in kernel_preds
], 1)
decoded_bboxes = []
for anchor, bbox_pred in zip(anchor_list[0], bbox_preds):
anchor = anchor.reshape(-1, 4)
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
bbox_pred = distance2bbox(anchor, bbox_pred)
decoded_bboxes.append(bbox_pred)
flatten_bboxes = torch.cat(decoded_bboxes, 1)
for gt_instances in batch_gt_instances:
gt_instances.masks = gt_instances.masks.to_tensor(
dtype=torch.bool, device=device)
cls_reg_targets = self.get_targets(
flatten_cls_scores,
flatten_bboxes,
anchor_list,
valid_flag_list,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore=batch_gt_instances_ignore)
(anchor_list, labels_list, label_weights_list, bbox_targets_list,
assign_metrics_list, sampling_results_list) = cls_reg_targets
losses_cls, losses_bbox,\
cls_avg_factors, bbox_avg_factors = multi_apply(
self.loss_by_feat_single,
cls_scores,
decoded_bboxes,
labels_list,
label_weights_list,
bbox_targets_list,
assign_metrics_list,
self.prior_generator.strides)
cls_avg_factor = reduce_mean(sum(cls_avg_factors)).clamp_(min=1).item()
losses_cls = list(map(lambda x: x / cls_avg_factor, losses_cls))
bbox_avg_factor = reduce_mean(
sum(bbox_avg_factors)).clamp_(min=1).item()
losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox))
loss_mask = self.loss_mask_by_feat(mask_feat, flatten_kernels,
sampling_results_list,
batch_gt_instances)
loss = dict(
loss_cls=losses_cls, loss_bbox=losses_bbox, loss_mask=loss_mask)
return loss
class MaskFeatModule(BaseModule):
"""Mask feature head used in RTMDet-Ins.
Args:
in_channels (int): Number of channels in the input feature map.
feat_channels (int): Number of hidden channels of the mask feature
map branch.
num_levels (int): The starting feature map level from RPN that
will be used to predict the mask feature map.
num_prototypes (int): Number of output channel of the mask feature
map branch. This is the channel count of the mask
feature map that to be dynamically convolved with the predicted
kernel.
stacked_convs (int): Number of convs in mask feature branch.
act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer.
Default: dict(type='ReLU', inplace=True)
norm_cfg (dict): Config dict for normalization layer. Default: None.
"""
def __init__(
self,
in_channels: int,
feat_channels: int = 256,
stacked_convs: int = 4,
num_levels: int = 3,
num_prototypes: int = 8,
act_cfg: ConfigType = dict(type='ReLU', inplace=True),
norm_cfg: ConfigType = dict(type='BN')
) -> None:
super().__init__(init_cfg=None)
self.num_levels = num_levels
self.fusion_conv = nn.Conv2d(num_levels * in_channels, in_channels, 1)
convs = []
for i in range(stacked_convs):
in_c = in_channels if i == 0 else feat_channels
convs.append(
ConvModule(
in_c,
feat_channels,
3,
padding=1,
act_cfg=act_cfg,
norm_cfg=norm_cfg))
self.stacked_convs = nn.Sequential(*convs)
self.projection = nn.Conv2d(
feat_channels, num_prototypes, kernel_size=1)
def forward(self, features: Tuple[Tensor, ...]) -> Tensor:
# multi-level feature fusion
fusion_feats = [features[0]]
size = features[0].shape[-2:]
for i in range(1, self.num_levels):
f = F.interpolate(features[i], size=size, mode='bilinear')
fusion_feats.append(f)
fusion_feats = torch.cat(fusion_feats, dim=1)
fusion_feats = self.fusion_conv(fusion_feats)
# pred mask feats
mask_features = self.stacked_convs(fusion_feats)
mask_features = self.projection(mask_features)
return mask_features
@MODELS.register_module()
class RTMDetInsSepBNHead(RTMDetInsHead):
"""Detection Head of RTMDet-Ins with sep-bn layers.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
share_conv (bool): Whether to share conv layers between stages.
Defaults to True.
norm_cfg (:obj:`ConfigDict` or dict)): Config dict for normalization
layer. Defaults to dict(type='BN').
act_cfg (:obj:`ConfigDict` or dict)): Config dict for activation layer.
Defaults to dict(type='SiLU', inplace=True).
pred_kernel_size (int): Kernel size of prediction layer. Defaults to 1.
"""
def __init__(self,
num_classes: int,
in_channels: int,
share_conv: bool = True,
with_objectness: bool = False,
norm_cfg: ConfigType = dict(type='BN', requires_grad=True),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
pred_kernel_size: int = 1,
**kwargs) -> None:
self.share_conv = share_conv
super().__init__(
num_classes,
in_channels,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
pred_kernel_size=pred_kernel_size,
with_objectness=with_objectness,
**kwargs)
def _init_layers(self) -> None:
"""Initialize layers of the head."""
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
self.kernel_convs = nn.ModuleList()
self.rtm_cls = nn.ModuleList()
self.rtm_reg = nn.ModuleList()
self.rtm_kernel = nn.ModuleList()
self.rtm_obj = nn.ModuleList()
# calculate num dynamic parameters
weight_nums, bias_nums = [], []
for i in range(self.num_dyconvs):
if i == 0:
weight_nums.append(
(self.num_prototypes + 2) * self.dyconv_channels)
bias_nums.append(self.dyconv_channels)
elif i == self.num_dyconvs - 1:
weight_nums.append(self.dyconv_channels)
bias_nums.append(1)
else:
weight_nums.append(self.dyconv_channels * self.dyconv_channels)
bias_nums.append(self.dyconv_channels)
self.weight_nums = weight_nums
self.bias_nums = bias_nums
self.num_gen_params = sum(weight_nums) + sum(bias_nums)
pred_pad_size = self.pred_kernel_size // 2
for n in range(len(self.prior_generator.strides)):
cls_convs = nn.ModuleList()
reg_convs = nn.ModuleList()
kernel_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
kernel_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
self.cls_convs.append(cls_convs)
self.reg_convs.append(cls_convs)
self.kernel_convs.append(kernel_convs)
self.rtm_cls.append(
nn.Conv2d(
self.feat_channels,
self.num_base_priors * self.cls_out_channels,
self.pred_kernel_size,
padding=pred_pad_size))
self.rtm_reg.append(
nn.Conv2d(
self.feat_channels,
self.num_base_priors * 4,
self.pred_kernel_size,
padding=pred_pad_size))
self.rtm_kernel.append(
nn.Conv2d(
self.feat_channels,
self.num_gen_params,
self.pred_kernel_size,
padding=pred_pad_size))
if self.with_objectness:
self.rtm_obj.append(
nn.Conv2d(
self.feat_channels,
1,
self.pred_kernel_size,
padding=pred_pad_size))
if self.share_conv:
for n in range(len(self.prior_generator.strides)):
for i in range(self.stacked_convs):
self.cls_convs[n][i].conv = self.cls_convs[0][i].conv
self.reg_convs[n][i].conv = self.reg_convs[0][i].conv
self.mask_head = MaskFeatModule(
in_channels=self.in_channels,
feat_channels=self.feat_channels,
stacked_convs=4,
num_levels=len(self.prior_generator.strides),
num_prototypes=self.num_prototypes,
act_cfg=self.act_cfg,
norm_cfg=self.norm_cfg)
def init_weights(self) -> None:
"""Initialize weights of the head."""
for m in self.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, mean=0, std=0.01)
if is_norm(m):
constant_init(m, 1)
bias_cls = bias_init_with_prob(0.01)
for rtm_cls, rtm_reg, rtm_kernel in zip(self.rtm_cls, self.rtm_reg,
self.rtm_kernel):
normal_init(rtm_cls, std=0.01, bias=bias_cls)
normal_init(rtm_reg, std=0.01, bias=1)
if self.with_objectness:
for rtm_obj in self.rtm_obj:
normal_init(rtm_obj, std=0.01, bias=bias_cls)
def forward(self, feats: Tuple[Tensor, ...]) -> tuple:
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
- cls_scores (list[Tensor]): Classification scores for all scale
levels, each is a 4D-tensor, the channels number is
num_base_priors * num_classes.
- bbox_preds (list[Tensor]): Box energies / deltas for all scale
levels, each is a 4D-tensor, the channels number is
num_base_priors * 4.
- kernel_preds (list[Tensor]): Dynamic conv kernels for all scale
levels, each is a 4D-tensor, the channels number is
num_gen_params.
- mask_feat (Tensor): Output feature of the mask head. Each is a
4D-tensor, the channels number is num_prototypes.
"""
mask_feat = self.mask_head(feats)
cls_scores = []
bbox_preds = []
kernel_preds = []
for idx, (x, stride) in enumerate(
zip(feats, self.prior_generator.strides)):
cls_feat = x
reg_feat = x
kernel_feat = x
for cls_layer in self.cls_convs[idx]:
cls_feat = cls_layer(cls_feat)
cls_score = self.rtm_cls[idx](cls_feat)
for kernel_layer in self.kernel_convs[idx]:
kernel_feat = kernel_layer(kernel_feat)
kernel_pred = self.rtm_kernel[idx](kernel_feat)
for reg_layer in self.reg_convs[idx]:
reg_feat = reg_layer(reg_feat)
if self.with_objectness:
objectness = self.rtm_obj[idx](reg_feat)
cls_score = inverse_sigmoid(
sigmoid_geometric_mean(cls_score, objectness))
reg_dist = F.relu(self.rtm_reg[idx](reg_feat)) * stride[0]
cls_scores.append(cls_score)
bbox_preds.append(reg_dist)
kernel_preds.append(kernel_pred)
return tuple(cls_scores), tuple(bbox_preds), tuple(
kernel_preds), mask_feat
| 43,632 | 41.157488 | 79 | py |
ERD | ERD-main/mmdet/models/dense_heads/ga_rpn_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from typing import List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.ops import nms
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, InstanceList, MultiConfig, OptInstanceList
from .guided_anchor_head import GuidedAnchorHead
@MODELS.register_module()
class GARPNHead(GuidedAnchorHead):
"""Guided-Anchor-based RPN head."""
def __init__(self,
in_channels: int,
num_classes: int = 1,
init_cfg: MultiConfig = dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='conv_loc',
std=0.01,
bias_prob=0.01)),
**kwargs) -> None:
super().__init__(
num_classes=num_classes,
in_channels=in_channels,
init_cfg=init_cfg,
**kwargs)
def _init_layers(self) -> None:
"""Initialize layers of the head."""
self.rpn_conv = nn.Conv2d(
self.in_channels, self.feat_channels, 3, padding=1)
super(GARPNHead, self)._init_layers()
def forward_single(self, x: Tensor) -> Tuple[Tensor]:
"""Forward feature of a single scale level."""
x = self.rpn_conv(x)
x = F.relu(x, inplace=True)
(cls_score, bbox_pred, shape_pred,
loc_pred) = super().forward_single(x)
return cls_score, bbox_pred, shape_pred, loc_pred
def loss_by_feat(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
shape_preds: List[Tensor],
loc_preds: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None) -> dict:
"""Calculate the loss based on the features extracted by the detection
head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
has shape (N, num_anchors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W).
shape_preds (list[Tensor]): shape predictions for each scale
level with shape (N, 1, H, W).
loc_preds (list[Tensor]): location predictions for each scale
level with shape (N, num_anchors * 2, H, W).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict: A dictionary of loss components.
"""
losses = super().loss_by_feat(
cls_scores,
bbox_preds,
shape_preds,
loc_preds,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore=batch_gt_instances_ignore)
return dict(
loss_rpn_cls=losses['loss_cls'],
loss_rpn_bbox=losses['loss_bbox'],
loss_anchor_shape=losses['loss_shape'],
loss_anchor_loc=losses['loss_loc'])
def _predict_by_feat_single(self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
mlvl_anchors: List[Tensor],
mlvl_masks: List[Tensor],
img_meta: dict,
cfg: ConfigType,
rescale: bool = False) -> InstanceData:
"""Transform a single image's features extracted from the head into
bbox results.
Args:
cls_scores (list[Tensor]): Box scores from all scale
levels of a single image, each item has shape
(num_priors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas from
all scale levels of a single image, each item has shape
(num_priors * 4, H, W).
mlvl_anchors (list[Tensor]): Each element in the list is
the anchors of a single level in feature pyramid. it has
shape (num_priors, 4).
mlvl_masks (list[Tensor]): Each element in the list is location
masks of a single level.
img_meta (dict): Image meta info.
cfg (:obj:`ConfigDict` or dict): Test / postprocessing
configuration, if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
:obj:`InstanceData`: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape (num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4), the last
dimension 4 arrange as (x1, y1, x2, y2).
"""
cfg = self.test_cfg if cfg is None else cfg
cfg = copy.deepcopy(cfg)
assert cfg.nms.get('type', 'nms') == 'nms', 'GARPNHead only support ' \
'naive nms.'
mlvl_proposals = []
for idx in range(len(cls_scores)):
rpn_cls_score = cls_scores[idx]
rpn_bbox_pred = bbox_preds[idx]
anchors = mlvl_anchors[idx]
mask = mlvl_masks[idx]
assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]
# if no location is kept, end.
if mask.sum() == 0:
continue
rpn_cls_score = rpn_cls_score.permute(1, 2, 0)
if self.use_sigmoid_cls:
rpn_cls_score = rpn_cls_score.reshape(-1)
scores = rpn_cls_score.sigmoid()
else:
rpn_cls_score = rpn_cls_score.reshape(-1, 2)
# remind that we set FG labels to [0, num_class-1]
# since mmdet v2.0
# BG cat_id: num_class
scores = rpn_cls_score.softmax(dim=1)[:, :-1]
# filter scores, bbox_pred w.r.t. mask.
# anchors are filtered in get_anchors() beforehand.
scores = scores[mask]
rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1,
4)[mask, :]
if scores.dim() == 0:
rpn_bbox_pred = rpn_bbox_pred.unsqueeze(0)
anchors = anchors.unsqueeze(0)
scores = scores.unsqueeze(0)
# filter anchors, bbox_pred, scores w.r.t. scores
if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre:
_, topk_inds = scores.topk(cfg.nms_pre)
rpn_bbox_pred = rpn_bbox_pred[topk_inds, :]
anchors = anchors[topk_inds, :]
scores = scores[topk_inds]
# get proposals w.r.t. anchors and rpn_bbox_pred
proposals = self.bbox_coder.decode(
anchors, rpn_bbox_pred, max_shape=img_meta['img_shape'])
# filter out too small bboxes
if cfg.min_bbox_size >= 0:
w = proposals[:, 2] - proposals[:, 0]
h = proposals[:, 3] - proposals[:, 1]
valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size)
if not valid_mask.all():
proposals = proposals[valid_mask]
scores = scores[valid_mask]
# NMS in current level
proposals, _ = nms(proposals, scores, cfg.nms.iou_threshold)
proposals = proposals[:cfg.nms_post, :]
mlvl_proposals.append(proposals)
proposals = torch.cat(mlvl_proposals, 0)
if cfg.get('nms_across_levels', False):
# NMS across multi levels
proposals, _ = nms(proposals[:, :4], proposals[:, -1],
cfg.nms.iou_threshold)
proposals = proposals[:cfg.max_per_img, :]
else:
scores = proposals[:, 4]
num = min(cfg.max_per_img, proposals.shape[0])
_, topk_inds = scores.topk(num)
proposals = proposals[topk_inds, :]
bboxes = proposals[:, :-1]
scores = proposals[:, -1]
if rescale:
assert img_meta.get('scale_factor') is not None
bboxes /= bboxes.new_tensor(img_meta['scale_factor']).repeat(
(1, 2))
results = InstanceData()
results.bboxes = bboxes
results.scores = scores
results.labels = scores.new_zeros(scores.size(0), dtype=torch.long)
return results
| 9,455 | 41.403587 | 79 | py |
ERD | ERD-main/mmdet/models/dense_heads/tood_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, Scale
from mmcv.ops import deform_conv2d
from mmengine import MessageHub
from mmengine.config import ConfigDict
from mmengine.model import bias_init_with_prob, normal_init
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.registry import MODELS, TASK_UTILS
from mmdet.structures.bbox import distance2bbox
from mmdet.utils import (ConfigType, InstanceList, OptConfigType,
OptInstanceList, reduce_mean)
from ..task_modules.prior_generators import anchor_inside_flags
from ..utils import (filter_scores_and_topk, images_to_levels, multi_apply,
sigmoid_geometric_mean, unmap)
from .atss_head import ATSSHead
class TaskDecomposition(nn.Module):
"""Task decomposition module in task-aligned predictor of TOOD.
Args:
feat_channels (int): Number of feature channels in TOOD head.
stacked_convs (int): Number of conv layers in TOOD head.
la_down_rate (int): Downsample rate of layer attention.
Defaults to 8.
conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
convolution layer. Defaults to None.
norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
normalization layer. Defaults to None.
"""
def __init__(self,
feat_channels: int,
stacked_convs: int,
la_down_rate: int = 8,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None) -> None:
super().__init__()
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.in_channels = self.feat_channels * self.stacked_convs
self.norm_cfg = norm_cfg
self.layer_attention = nn.Sequential(
nn.Conv2d(self.in_channels, self.in_channels // la_down_rate, 1),
nn.ReLU(inplace=True),
nn.Conv2d(
self.in_channels // la_down_rate,
self.stacked_convs,
1,
padding=0), nn.Sigmoid())
self.reduction_conv = ConvModule(
self.in_channels,
self.feat_channels,
1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
bias=norm_cfg is None)
def init_weights(self) -> None:
"""Initialize the parameters."""
for m in self.layer_attention.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, std=0.001)
normal_init(self.reduction_conv.conv, std=0.01)
def forward(self,
feat: Tensor,
avg_feat: Optional[Tensor] = None) -> Tensor:
"""Forward function of task decomposition module."""
b, c, h, w = feat.shape
if avg_feat is None:
avg_feat = F.adaptive_avg_pool2d(feat, (1, 1))
weight = self.layer_attention(avg_feat)
# here we first compute the product between layer attention weight and
# conv weight, and then compute the convolution between new conv weight
# and feature map, in order to save memory and FLOPs.
conv_weight = weight.reshape(
b, 1, self.stacked_convs,
1) * self.reduction_conv.conv.weight.reshape(
1, self.feat_channels, self.stacked_convs, self.feat_channels)
conv_weight = conv_weight.reshape(b, self.feat_channels,
self.in_channels)
feat = feat.reshape(b, self.in_channels, h * w)
feat = torch.bmm(conv_weight, feat).reshape(b, self.feat_channels, h,
w)
if self.norm_cfg is not None:
feat = self.reduction_conv.norm(feat)
feat = self.reduction_conv.activate(feat)
return feat
@MODELS.register_module()
class TOODHead(ATSSHead):
"""TOODHead used in `TOOD: Task-aligned One-stage Object Detection.
<https://arxiv.org/abs/2108.07755>`_.
TOOD uses Task-aligned head (T-head) and is optimized by Task Alignment
Learning (TAL).
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
num_dcn (int): Number of deformable convolution in the head.
Defaults to 0.
anchor_type (str): If set to ``anchor_free``, the head will use centers
to regress bboxes. If set to ``anchor_based``, the head will
regress bboxes based on anchors. Defaults to ``anchor_free``.
initial_loss_cls (:obj:`ConfigDict` or dict): Config of initial loss.
Example:
>>> self = TOODHead(11, 7)
>>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
>>> cls_score, bbox_pred = self.forward(feats)
>>> assert len(cls_score) == len(self.scales)
"""
def __init__(self,
num_classes: int,
in_channels: int,
num_dcn: int = 0,
anchor_type: str = 'anchor_free',
initial_loss_cls: ConfigType = dict(
type='FocalLoss',
use_sigmoid=True,
activated=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
**kwargs) -> None:
assert anchor_type in ['anchor_free', 'anchor_based']
self.num_dcn = num_dcn
self.anchor_type = anchor_type
super().__init__(
num_classes=num_classes, in_channels=in_channels, **kwargs)
if self.train_cfg:
self.initial_epoch = self.train_cfg['initial_epoch']
self.initial_assigner = TASK_UTILS.build(
self.train_cfg['initial_assigner'])
self.initial_loss_cls = MODELS.build(initial_loss_cls)
self.assigner = self.initial_assigner
self.alignment_assigner = TASK_UTILS.build(
self.train_cfg['assigner'])
self.alpha = self.train_cfg['alpha']
self.beta = self.train_cfg['beta']
def _init_layers(self) -> None:
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.inter_convs = nn.ModuleList()
for i in range(self.stacked_convs):
if i < self.num_dcn:
conv_cfg = dict(type='DCNv2', deform_groups=4)
else:
conv_cfg = self.conv_cfg
chn = self.in_channels if i == 0 else self.feat_channels
self.inter_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=self.norm_cfg))
self.cls_decomp = TaskDecomposition(self.feat_channels,
self.stacked_convs,
self.stacked_convs * 8,
self.conv_cfg, self.norm_cfg)
self.reg_decomp = TaskDecomposition(self.feat_channels,
self.stacked_convs,
self.stacked_convs * 8,
self.conv_cfg, self.norm_cfg)
self.tood_cls = nn.Conv2d(
self.feat_channels,
self.num_base_priors * self.cls_out_channels,
3,
padding=1)
self.tood_reg = nn.Conv2d(
self.feat_channels, self.num_base_priors * 4, 3, padding=1)
self.cls_prob_module = nn.Sequential(
nn.Conv2d(self.feat_channels * self.stacked_convs,
self.feat_channels // 4, 1), nn.ReLU(inplace=True),
nn.Conv2d(self.feat_channels // 4, 1, 3, padding=1))
self.reg_offset_module = nn.Sequential(
nn.Conv2d(self.feat_channels * self.stacked_convs,
self.feat_channels // 4, 1), nn.ReLU(inplace=True),
nn.Conv2d(self.feat_channels // 4, 4 * 2, 3, padding=1))
self.scales = nn.ModuleList(
[Scale(1.0) for _ in self.prior_generator.strides])
def init_weights(self) -> None:
"""Initialize weights of the head."""
bias_cls = bias_init_with_prob(0.01)
for m in self.inter_convs:
normal_init(m.conv, std=0.01)
for m in self.cls_prob_module:
if isinstance(m, nn.Conv2d):
normal_init(m, std=0.01)
for m in self.reg_offset_module:
if isinstance(m, nn.Conv2d):
normal_init(m, std=0.001)
normal_init(self.cls_prob_module[-1], std=0.01, bias=bias_cls)
self.cls_decomp.init_weights()
self.reg_decomp.init_weights()
normal_init(self.tood_cls, std=0.01, bias=bias_cls)
normal_init(self.tood_reg, std=0.01)
def forward(self, feats: Tuple[Tensor]) -> Tuple[List[Tensor]]:
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
cls_scores (list[Tensor]): Classification scores for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * num_classes.
bbox_preds (list[Tensor]): Decoded box for all scale levels,
each is a 4D-tensor, the channels number is
num_anchors * 4. In [tl_x, tl_y, br_x, br_y] format.
"""
cls_scores = []
bbox_preds = []
for idx, (x, scale, stride) in enumerate(
zip(feats, self.scales, self.prior_generator.strides)):
b, c, h, w = x.shape
anchor = self.prior_generator.single_level_grid_priors(
(h, w), idx, device=x.device)
anchor = torch.cat([anchor for _ in range(b)])
# extract task interactive features
inter_feats = []
for inter_conv in self.inter_convs:
x = inter_conv(x)
inter_feats.append(x)
feat = torch.cat(inter_feats, 1)
# task decomposition
avg_feat = F.adaptive_avg_pool2d(feat, (1, 1))
cls_feat = self.cls_decomp(feat, avg_feat)
reg_feat = self.reg_decomp(feat, avg_feat)
# cls prediction and alignment
cls_logits = self.tood_cls(cls_feat)
cls_prob = self.cls_prob_module(feat)
cls_score = sigmoid_geometric_mean(cls_logits, cls_prob)
# reg prediction and alignment
if self.anchor_type == 'anchor_free':
reg_dist = scale(self.tood_reg(reg_feat).exp()).float()
reg_dist = reg_dist.permute(0, 2, 3, 1).reshape(-1, 4)
reg_bbox = distance2bbox(
self.anchor_center(anchor) / stride[0],
reg_dist).reshape(b, h, w, 4).permute(0, 3, 1,
2) # (b, c, h, w)
elif self.anchor_type == 'anchor_based':
reg_dist = scale(self.tood_reg(reg_feat)).float()
reg_dist = reg_dist.permute(0, 2, 3, 1).reshape(-1, 4)
reg_bbox = self.bbox_coder.decode(anchor, reg_dist).reshape(
b, h, w, 4).permute(0, 3, 1, 2) / stride[0]
else:
raise NotImplementedError(
f'Unknown anchor type: {self.anchor_type}.'
f'Please use `anchor_free` or `anchor_based`.')
reg_offset = self.reg_offset_module(feat)
bbox_pred = self.deform_sampling(reg_bbox.contiguous(),
reg_offset.contiguous())
# After deform_sampling, some boxes will become invalid (The
# left-top point is at the right or bottom of the right-bottom
# point), which will make the GIoULoss negative.
invalid_bbox_idx = (bbox_pred[:, [0]] > bbox_pred[:, [2]]) | \
(bbox_pred[:, [1]] > bbox_pred[:, [3]])
invalid_bbox_idx = invalid_bbox_idx.expand_as(bbox_pred)
bbox_pred = torch.where(invalid_bbox_idx, reg_bbox, bbox_pred)
cls_scores.append(cls_score)
bbox_preds.append(bbox_pred)
return tuple(cls_scores), tuple(bbox_preds)
def deform_sampling(self, feat: Tensor, offset: Tensor) -> Tensor:
"""Sampling the feature x according to offset.
Args:
feat (Tensor): Feature
offset (Tensor): Spatial offset for feature sampling
"""
# it is an equivalent implementation of bilinear interpolation
b, c, h, w = feat.shape
weight = feat.new_ones(c, 1, 1, 1)
y = deform_conv2d(feat, offset, weight, 1, 0, 1, c, c)
return y
def anchor_center(self, anchors: Tensor) -> Tensor:
"""Get anchor centers from anchors.
Args:
anchors (Tensor): Anchor list with shape (N, 4), "xyxy" format.
Returns:
Tensor: Anchor centers with shape (N, 2), "xy" format.
"""
anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2
anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2
return torch.stack([anchors_cx, anchors_cy], dim=-1)
def loss_by_feat_single(self, anchors: Tensor, cls_score: Tensor,
bbox_pred: Tensor, labels: Tensor,
label_weights: Tensor, bbox_targets: Tensor,
alignment_metrics: Tensor,
stride: Tuple[int, int]) -> dict:
"""Calculate the loss of a single scale level based on the features
extracted by the detection head.
Args:
anchors (Tensor): Box reference for each scale level with shape
(N, num_total_anchors, 4).
cls_score (Tensor): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W).
bbox_pred (Tensor): Decoded bboxes for each scale
level with shape (N, num_anchors * 4, H, W).
labels (Tensor): Labels of each anchors with shape
(N, num_total_anchors).
label_weights (Tensor): Label weights of each anchor with shape
(N, num_total_anchors).
bbox_targets (Tensor): BBox regression targets of each anchor with
shape (N, num_total_anchors, 4).
alignment_metrics (Tensor): Alignment metrics with shape
(N, num_total_anchors).
stride (Tuple[int, int]): Downsample stride of the feature map.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert stride[0] == stride[1], 'h stride is not equal to w stride!'
anchors = anchors.reshape(-1, 4)
cls_score = cls_score.permute(0, 2, 3, 1).reshape(
-1, self.cls_out_channels).contiguous()
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
bbox_targets = bbox_targets.reshape(-1, 4)
labels = labels.reshape(-1)
alignment_metrics = alignment_metrics.reshape(-1)
label_weights = label_weights.reshape(-1)
targets = labels if self.epoch < self.initial_epoch else (
labels, alignment_metrics)
cls_loss_func = self.initial_loss_cls \
if self.epoch < self.initial_epoch else self.loss_cls
loss_cls = cls_loss_func(
cls_score, targets, label_weights, avg_factor=1.0)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = self.num_classes
pos_inds = ((labels >= 0)
& (labels < bg_class_ind)).nonzero().squeeze(1)
if len(pos_inds) > 0:
pos_bbox_targets = bbox_targets[pos_inds]
pos_bbox_pred = bbox_pred[pos_inds]
pos_anchors = anchors[pos_inds]
pos_decode_bbox_pred = pos_bbox_pred
pos_decode_bbox_targets = pos_bbox_targets / stride[0]
# regression loss
pos_bbox_weight = self.centerness_target(
pos_anchors, pos_bbox_targets
) if self.epoch < self.initial_epoch else alignment_metrics[
pos_inds]
loss_bbox = self.loss_bbox(
pos_decode_bbox_pred,
pos_decode_bbox_targets,
weight=pos_bbox_weight,
avg_factor=1.0)
else:
loss_bbox = bbox_pred.sum() * 0
pos_bbox_weight = bbox_targets.new_tensor(0.)
return loss_cls, loss_bbox, alignment_metrics.sum(
), pos_bbox_weight.sum()
def loss_by_feat(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None) -> dict:
"""Calculate the loss based on the features extracted by the detection
head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Decoded box for each scale
level with shape (N, num_anchors * 4, H, W) in
[tl_x, tl_y, br_x, br_y] format.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
num_imgs = len(batch_img_metas)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.prior_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, batch_img_metas, device=device)
flatten_cls_scores = torch.cat([
cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.cls_out_channels)
for cls_score in cls_scores
], 1)
flatten_bbox_preds = torch.cat([
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) * stride[0]
for bbox_pred, stride in zip(bbox_preds,
self.prior_generator.strides)
], 1)
cls_reg_targets = self.get_targets(
flatten_cls_scores,
flatten_bbox_preds,
anchor_list,
valid_flag_list,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore=batch_gt_instances_ignore)
(anchor_list, labels_list, label_weights_list, bbox_targets_list,
alignment_metrics_list) = cls_reg_targets
losses_cls, losses_bbox, \
cls_avg_factors, bbox_avg_factors = multi_apply(
self.loss_by_feat_single,
anchor_list,
cls_scores,
bbox_preds,
labels_list,
label_weights_list,
bbox_targets_list,
alignment_metrics_list,
self.prior_generator.strides)
cls_avg_factor = reduce_mean(sum(cls_avg_factors)).clamp_(min=1).item()
losses_cls = list(map(lambda x: x / cls_avg_factor, losses_cls))
bbox_avg_factor = reduce_mean(
sum(bbox_avg_factors)).clamp_(min=1).item()
losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox))
return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
def _predict_by_feat_single(self,
cls_score_list: List[Tensor],
bbox_pred_list: List[Tensor],
score_factor_list: List[Tensor],
mlvl_priors: List[Tensor],
img_meta: dict,
cfg: Optional[ConfigDict] = None,
rescale: bool = False,
with_nms: bool = True) -> InstanceData:
"""Transform a single image's features extracted from the head into
bbox results.
Args:
cls_score_list (list[Tensor]): Box scores from all scale
levels of a single image, each item has shape
(num_priors * num_classes, H, W).
bbox_pred_list (list[Tensor]): Box energies / deltas from
all scale levels of a single image, each item has shape
(num_priors * 4, H, W).
score_factor_list (list[Tensor]): Score factor from all scale
levels of a single image, each item has shape
(num_priors * 1, H, W).
mlvl_priors (list[Tensor]): Each element in the list is
the priors of a single level in feature pyramid. In all
anchor-based methods, it has shape (num_priors, 4). In
all anchor-free methods, it has shape (num_priors, 2)
when `with_stride=True`, otherwise it still has shape
(num_priors, 4).
img_meta (dict): Image meta info.
cfg (:obj:`ConfigDict`, optional): Test / postprocessing
configuration, if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Defaults to True.
Returns:
tuple[Tensor]: Results of detected bboxes and labels. If with_nms
is False and mlvl_score_factor is None, return mlvl_bboxes and
mlvl_scores, else return mlvl_bboxes, mlvl_scores and
mlvl_score_factor. Usually with_nms is False is used for aug
test. If with_nms is True, then return the following format
- det_bboxes (Tensor): Predicted bboxes with shape \
[num_bboxes, 5], where the first 4 columns are bounding \
box positions (tl_x, tl_y, br_x, br_y) and the 5-th \
column are scores between 0 and 1.
- det_labels (Tensor): Predicted labels of the corresponding \
box with shape [num_bboxes].
"""
cfg = self.test_cfg if cfg is None else cfg
nms_pre = cfg.get('nms_pre', -1)
mlvl_bboxes = []
mlvl_scores = []
mlvl_labels = []
for cls_score, bbox_pred, priors, stride in zip(
cls_score_list, bbox_pred_list, mlvl_priors,
self.prior_generator.strides):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) * stride[0]
scores = cls_score.permute(1, 2,
0).reshape(-1, self.cls_out_channels)
# After https://github.com/open-mmlab/mmdetection/pull/6268/,
# this operation keeps fewer bboxes under the same `nms_pre`.
# There is no difference in performance for most models. If you
# find a slight drop in performance, you can set a larger
# `nms_pre` than before.
results = filter_scores_and_topk(
scores, cfg.score_thr, nms_pre,
dict(bbox_pred=bbox_pred, priors=priors))
scores, labels, keep_idxs, filtered_results = results
bboxes = filtered_results['bbox_pred']
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_labels.append(labels)
results = InstanceData()
results.bboxes = torch.cat(mlvl_bboxes)
results.scores = torch.cat(mlvl_scores)
results.labels = torch.cat(mlvl_labels)
return self._bbox_post_process(
results=results,
cfg=cfg,
rescale=rescale,
with_nms=with_nms,
img_meta=img_meta)
def get_targets(self,
cls_scores: List[List[Tensor]],
bbox_preds: List[List[Tensor]],
anchor_list: List[List[Tensor]],
valid_flag_list: List[List[Tensor]],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None,
unmap_outputs: bool = True) -> tuple:
"""Compute regression and classification targets for anchors in
multiple images.
Args:
cls_scores (list[list[Tensor]]): Classification predictions of
images, a 3D-Tensor with shape [num_imgs, num_priors,
num_classes].
bbox_preds (list[list[Tensor]]): Decoded bboxes predictions of one
image, a 3D-Tensor with shape [num_imgs, num_priors, 4] in
[tl_x, tl_y, br_x, br_y] format.
anchor_list (list[list[Tensor]]): Multi level anchors of each
image. The outer list indicates images, and the inner list
corresponds to feature levels of the image. Each element of
the inner list is a tensor of shape (num_anchors, 4).
valid_flag_list (list[list[Tensor]]): Multi level valid flags of
each image. The outer list indicates images, and the inner list
corresponds to feature levels of the image. Each element of
the inner list is a tensor of shape (num_anchors, )
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors.
Returns:
tuple: a tuple containing learning targets.
- anchors_list (list[list[Tensor]]): Anchors of each level.
- labels_list (list[Tensor]): Labels of each level.
- label_weights_list (list[Tensor]): Label weights of each
level.
- bbox_targets_list (list[Tensor]): BBox targets of each level.
- norm_alignment_metrics_list (list[Tensor]): Normalized
alignment metrics of each level.
"""
num_imgs = len(batch_img_metas)
assert len(anchor_list) == len(valid_flag_list) == num_imgs
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
num_level_anchors_list = [num_level_anchors] * num_imgs
# concat all level anchors and flags to a single tensor
for i in range(num_imgs):
assert len(anchor_list[i]) == len(valid_flag_list[i])
anchor_list[i] = torch.cat(anchor_list[i])
valid_flag_list[i] = torch.cat(valid_flag_list[i])
# compute targets for each image
if batch_gt_instances_ignore is None:
batch_gt_instances_ignore = [None] * num_imgs
# anchor_list: list(b * [-1, 4])
# get epoch information from message hub
message_hub = MessageHub.get_current_instance()
self.epoch = message_hub.get_info('epoch')
if self.epoch < self.initial_epoch:
(all_anchors, all_labels, all_label_weights, all_bbox_targets,
all_bbox_weights, pos_inds_list, neg_inds_list,
sampling_result) = multi_apply(
super()._get_targets_single,
anchor_list,
valid_flag_list,
num_level_anchors_list,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore,
unmap_outputs=unmap_outputs)
all_assign_metrics = [
weight[..., 0] for weight in all_bbox_weights
]
else:
(all_anchors, all_labels, all_label_weights, all_bbox_targets,
all_assign_metrics) = multi_apply(
self._get_targets_single,
cls_scores,
bbox_preds,
anchor_list,
valid_flag_list,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore,
unmap_outputs=unmap_outputs)
# split targets to a list w.r.t. multiple levels
anchors_list = images_to_levels(all_anchors, num_level_anchors)
labels_list = images_to_levels(all_labels, num_level_anchors)
label_weights_list = images_to_levels(all_label_weights,
num_level_anchors)
bbox_targets_list = images_to_levels(all_bbox_targets,
num_level_anchors)
norm_alignment_metrics_list = images_to_levels(all_assign_metrics,
num_level_anchors)
return (anchors_list, labels_list, label_weights_list,
bbox_targets_list, norm_alignment_metrics_list)
def _get_targets_single(self,
cls_scores: Tensor,
bbox_preds: Tensor,
flat_anchors: Tensor,
valid_flags: Tensor,
gt_instances: InstanceData,
img_meta: dict,
gt_instances_ignore: Optional[InstanceData] = None,
unmap_outputs: bool = True) -> tuple:
"""Compute regression, classification targets for anchors in a single
image.
Args:
cls_scores (Tensor): Box scores for each image.
bbox_preds (Tensor): Box energies / deltas for each image.
flat_anchors (Tensor): Multi-level anchors of the image, which are
concatenated into a single tensor of shape (num_anchors ,4)
valid_flags (Tensor): Multi level valid flags of the image,
which are concatenated into a single tensor of
shape (num_anchors,).
gt_instances (:obj:`InstanceData`): Ground truth of instance
annotations. It usually includes ``bboxes`` and ``labels``
attributes.
img_meta (dict): Meta information for current image.
gt_instances_ignore (:obj:`InstanceData`, optional): Instances
to be ignored during training. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors.
Returns:
tuple: N is the number of total anchors in the image.
anchors (Tensor): All anchors in the image with shape (N, 4).
labels (Tensor): Labels of all anchors in the image with shape
(N,).
label_weights (Tensor): Label weights of all anchor in the
image with shape (N,).
bbox_targets (Tensor): BBox targets of all anchors in the
image with shape (N, 4).
norm_alignment_metrics (Tensor): Normalized alignment metrics
of all priors in the image with shape (N,).
"""
inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
img_meta['img_shape'][:2],
self.train_cfg['allowed_border'])
if not inside_flags.any():
raise ValueError(
'There is no valid anchor inside the image boundary. Please '
'check the image size and anchor sizes, or set '
'``allowed_border`` to -1 to skip the condition.')
# assign gt and sample anchors
anchors = flat_anchors[inside_flags, :]
pred_instances = InstanceData(
priors=anchors,
scores=cls_scores[inside_flags, :],
bboxes=bbox_preds[inside_flags, :])
assign_result = self.alignment_assigner.assign(pred_instances,
gt_instances,
gt_instances_ignore,
self.alpha, self.beta)
assign_ious = assign_result.max_overlaps
assign_metrics = assign_result.assign_metrics
sampling_result = self.sampler.sample(assign_result, pred_instances,
gt_instances)
num_valid_anchors = anchors.shape[0]
bbox_targets = torch.zeros_like(anchors)
labels = anchors.new_full((num_valid_anchors, ),
self.num_classes,
dtype=torch.long)
label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
norm_alignment_metrics = anchors.new_zeros(
num_valid_anchors, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
# point-based
pos_bbox_targets = sampling_result.pos_gt_bboxes
bbox_targets[pos_inds, :] = pos_bbox_targets
labels[pos_inds] = sampling_result.pos_gt_labels
if self.train_cfg['pos_weight'] <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = self.train_cfg['pos_weight']
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
class_assigned_gt_inds = torch.unique(
sampling_result.pos_assigned_gt_inds)
for gt_inds in class_assigned_gt_inds:
gt_class_inds = pos_inds[sampling_result.pos_assigned_gt_inds ==
gt_inds]
pos_alignment_metrics = assign_metrics[gt_class_inds]
pos_ious = assign_ious[gt_class_inds]
pos_norm_alignment_metrics = pos_alignment_metrics / (
pos_alignment_metrics.max() + 10e-8) * pos_ious.max()
norm_alignment_metrics[gt_class_inds] = pos_norm_alignment_metrics
# map up to original set of anchors
if unmap_outputs:
num_total_anchors = flat_anchors.size(0)
anchors = unmap(anchors, num_total_anchors, inside_flags)
labels = unmap(
labels, num_total_anchors, inside_flags, fill=self.num_classes)
label_weights = unmap(label_weights, num_total_anchors,
inside_flags)
bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
norm_alignment_metrics = unmap(norm_alignment_metrics,
num_total_anchors, inside_flags)
return (anchors, labels, label_weights, bbox_targets,
norm_alignment_metrics)
| 36,487 | 44.270471 | 79 | py |
ERD | ERD-main/mmdet/models/dense_heads/deformable_detr_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from typing import Dict, List, Tuple
import torch
import torch.nn as nn
from mmcv.cnn import Linear
from mmengine.model import bias_init_with_prob, constant_init
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import InstanceList, OptInstanceList
from ..layers import inverse_sigmoid
from .detr_head import DETRHead
@MODELS.register_module()
class DeformableDETRHead(DETRHead):
r"""Head of DeformDETR: Deformable DETR: Deformable Transformers for
End-to-End Object Detection.
Code is modified from the `official github repo
<https://github.com/fundamentalvision/Deformable-DETR>`_.
More details can be found in the `paper
<https://arxiv.org/abs/2010.04159>`_ .
Args:
share_pred_layer (bool): Whether to share parameters for all the
prediction layers. Defaults to `False`.
num_pred_layer (int): The number of the prediction layers.
Defaults to 6.
as_two_stage (bool, optional): Whether to generate the proposal
from the outputs of encoder. Defaults to `False`.
"""
def __init__(self,
*args,
share_pred_layer: bool = False,
num_pred_layer: int = 6,
as_two_stage: bool = False,
**kwargs) -> None:
self.share_pred_layer = share_pred_layer
self.num_pred_layer = num_pred_layer
self.as_two_stage = as_two_stage
super().__init__(*args, **kwargs)
def _init_layers(self) -> None:
"""Initialize classification branch and regression branch of head."""
fc_cls = Linear(self.embed_dims, self.cls_out_channels)
reg_branch = []
for _ in range(self.num_reg_fcs):
reg_branch.append(Linear(self.embed_dims, self.embed_dims))
reg_branch.append(nn.ReLU())
reg_branch.append(Linear(self.embed_dims, 4))
reg_branch = nn.Sequential(*reg_branch)
if self.share_pred_layer:
self.cls_branches = nn.ModuleList(
[fc_cls for _ in range(self.num_pred_layer)])
self.reg_branches = nn.ModuleList(
[reg_branch for _ in range(self.num_pred_layer)])
else:
self.cls_branches = nn.ModuleList(
[copy.deepcopy(fc_cls) for _ in range(self.num_pred_layer)])
self.reg_branches = nn.ModuleList([
copy.deepcopy(reg_branch) for _ in range(self.num_pred_layer)
])
def init_weights(self) -> None:
"""Initialize weights of the Deformable DETR head."""
if self.loss_cls.use_sigmoid:
bias_init = bias_init_with_prob(0.01)
for m in self.cls_branches:
nn.init.constant_(m.bias, bias_init)
for m in self.reg_branches:
constant_init(m[-1], 0, bias=0)
nn.init.constant_(self.reg_branches[0][-1].bias.data[2:], -2.0)
if self.as_two_stage:
for m in self.reg_branches:
nn.init.constant_(m[-1].bias.data[2:], 0.0)
def forward(self, hidden_states: Tensor,
references: List[Tensor]) -> Tuple[Tensor]:
"""Forward function.
Args:
hidden_states (Tensor): Hidden states output from each decoder
layer, has shape (num_decoder_layers, bs, num_queries, dim).
references (list[Tensor]): List of the reference from the decoder.
The first reference is the `init_reference` (initial) and the
other num_decoder_layers(6) references are `inter_references`
(intermediate). The `init_reference` has shape (bs,
num_queries, 4) when `as_two_stage` of the detector is `True`,
otherwise (bs, num_queries, 2). Each `inter_reference` has
shape (bs, num_queries, 4) when `with_box_refine` of the
detector is `True`, otherwise (bs, num_queries, 2). The
coordinates are arranged as (cx, cy) when the last dimension is
2, and (cx, cy, w, h) when it is 4.
Returns:
tuple[Tensor]: results of head containing the following tensor.
- all_layers_outputs_classes (Tensor): Outputs from the
classification head, has shape (num_decoder_layers, bs,
num_queries, cls_out_channels).
- all_layers_outputs_coords (Tensor): Sigmoid outputs from the
regression head with normalized coordinate format (cx, cy, w,
h), has shape (num_decoder_layers, bs, num_queries, 4) with the
last dimension arranged as (cx, cy, w, h).
"""
all_layers_outputs_classes = []
all_layers_outputs_coords = []
for layer_id in range(hidden_states.shape[0]):
reference = inverse_sigmoid(references[layer_id])
# NOTE The last reference will not be used.
hidden_state = hidden_states[layer_id]
outputs_class = self.cls_branches[layer_id](hidden_state)
tmp_reg_preds = self.reg_branches[layer_id](hidden_state)
if reference.shape[-1] == 4:
# When `layer` is 0 and `as_two_stage` of the detector
# is `True`, or when `layer` is greater than 0 and
# `with_box_refine` of the detector is `True`.
tmp_reg_preds += reference
else:
# When `layer` is 0 and `as_two_stage` of the detector
# is `False`, or when `layer` is greater than 0 and
# `with_box_refine` of the detector is `False`.
assert reference.shape[-1] == 2
tmp_reg_preds[..., :2] += reference
outputs_coord = tmp_reg_preds.sigmoid()
all_layers_outputs_classes.append(outputs_class)
all_layers_outputs_coords.append(outputs_coord)
all_layers_outputs_classes = torch.stack(all_layers_outputs_classes)
all_layers_outputs_coords = torch.stack(all_layers_outputs_coords)
return all_layers_outputs_classes, all_layers_outputs_coords
def loss(self, hidden_states: Tensor, references: List[Tensor],
enc_outputs_class: Tensor, enc_outputs_coord: Tensor,
batch_data_samples: SampleList) -> dict:
"""Perform forward propagation and loss calculation of the detection
head on the queries of the upstream network.
Args:
hidden_states (Tensor): Hidden states output from each decoder
layer, has shape (num_decoder_layers, num_queries, bs, dim).
references (list[Tensor]): List of the reference from the decoder.
The first reference is the `init_reference` (initial) and the
other num_decoder_layers(6) references are `inter_references`
(intermediate). The `init_reference` has shape (bs,
num_queries, 4) when `as_two_stage` of the detector is `True`,
otherwise (bs, num_queries, 2). Each `inter_reference` has
shape (bs, num_queries, 4) when `with_box_refine` of the
detector is `True`, otherwise (bs, num_queries, 2). The
coordinates are arranged as (cx, cy) when the last dimension is
2, and (cx, cy, w, h) when it is 4.
enc_outputs_class (Tensor): The score of each point on encode
feature map, has shape (bs, num_feat_points, cls_out_channels).
Only when `as_two_stage` is `True` it would be passed in,
otherwise it would be `None`.
enc_outputs_coord (Tensor): The proposal generate from the encode
feature map, has shape (bs, num_feat_points, 4) with the last
dimension arranged as (cx, cy, w, h). Only when `as_two_stage`
is `True` it would be passed in, otherwise it would be `None`.
batch_data_samples (list[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
batch_gt_instances = []
batch_img_metas = []
for data_sample in batch_data_samples:
batch_img_metas.append(data_sample.metainfo)
batch_gt_instances.append(data_sample.gt_instances)
outs = self(hidden_states, references)
loss_inputs = outs + (enc_outputs_class, enc_outputs_coord,
batch_gt_instances, batch_img_metas)
losses = self.loss_by_feat(*loss_inputs)
return losses
def loss_by_feat(
self,
all_layers_cls_scores: Tensor,
all_layers_bbox_preds: Tensor,
enc_cls_scores: Tensor,
enc_bbox_preds: Tensor,
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None
) -> Dict[str, Tensor]:
"""Loss function.
Args:
all_layers_cls_scores (Tensor): Classification scores of all
decoder layers, has shape (num_decoder_layers, bs, num_queries,
cls_out_channels).
all_layers_bbox_preds (Tensor): Regression outputs of all decoder
layers. Each is a 4D-tensor with normalized coordinate format
(cx, cy, w, h) and has shape (num_decoder_layers, bs,
num_queries, 4) with the last dimension arranged as
(cx, cy, w, h).
enc_cls_scores (Tensor): The score of each point on encode
feature map, has shape (bs, num_feat_points, cls_out_channels).
Only when `as_two_stage` is `True` it would be passes in,
otherwise, it would be `None`.
enc_bbox_preds (Tensor): The proposal generate from the encode
feature map, has shape (bs, num_feat_points, 4) with the last
dimension arranged as (cx, cy, w, h). Only when `as_two_stage`
is `True` it would be passed in, otherwise it would be `None`.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
loss_dict = super().loss_by_feat(all_layers_cls_scores,
all_layers_bbox_preds,
batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
# loss of proposal generated from encode feature map.
if enc_cls_scores is not None:
proposal_gt_instances = copy.deepcopy(batch_gt_instances)
for i in range(len(proposal_gt_instances)):
proposal_gt_instances[i].labels = torch.zeros_like(
proposal_gt_instances[i].labels)
enc_loss_cls, enc_losses_bbox, enc_losses_iou = \
self.loss_by_feat_single(
enc_cls_scores, enc_bbox_preds,
batch_gt_instances=proposal_gt_instances,
batch_img_metas=batch_img_metas)
loss_dict['enc_loss_cls'] = enc_loss_cls
loss_dict['enc_loss_bbox'] = enc_losses_bbox
loss_dict['enc_loss_iou'] = enc_losses_iou
return loss_dict
def predict(self,
hidden_states: Tensor,
references: List[Tensor],
batch_data_samples: SampleList,
rescale: bool = True) -> InstanceList:
"""Perform forward propagation and loss calculation of the detection
head on the queries of the upstream network.
Args:
hidden_states (Tensor): Hidden states output from each decoder
layer, has shape (num_decoder_layers, num_queries, bs, dim).
references (list[Tensor]): List of the reference from the decoder.
The first reference is the `init_reference` (initial) and the
other num_decoder_layers(6) references are `inter_references`
(intermediate). The `init_reference` has shape (bs,
num_queries, 4) when `as_two_stage` of the detector is `True`,
otherwise (bs, num_queries, 2). Each `inter_reference` has
shape (bs, num_queries, 4) when `with_box_refine` of the
detector is `True`, otherwise (bs, num_queries, 2). The
coordinates are arranged as (cx, cy) when the last dimension is
2, and (cx, cy, w, h) when it is 4.
batch_data_samples (list[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool, optional): If `True`, return boxes in original
image space. Defaults to `True`.
Returns:
list[obj:`InstanceData`]: Detection results of each image
after the post process.
"""
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
outs = self(hidden_states, references)
predictions = self.predict_by_feat(
*outs, batch_img_metas=batch_img_metas, rescale=rescale)
return predictions
def predict_by_feat(self,
all_layers_cls_scores: Tensor,
all_layers_bbox_preds: Tensor,
batch_img_metas: List[Dict],
rescale: bool = False) -> InstanceList:
"""Transform a batch of output features extracted from the head into
bbox results.
Args:
all_layers_cls_scores (Tensor): Classification scores of all
decoder layers, has shape (num_decoder_layers, bs, num_queries,
cls_out_channels).
all_layers_bbox_preds (Tensor): Regression outputs of all decoder
layers. Each is a 4D-tensor with normalized coordinate format
(cx, cy, w, h) and shape (num_decoder_layers, bs, num_queries,
4) with the last dimension arranged as (cx, cy, w, h).
batch_img_metas (list[dict]): Meta information of each image.
rescale (bool, optional): If `True`, return boxes in original
image space. Default `False`.
Returns:
list[obj:`InstanceData`]: Detection results of each image
after the post process.
"""
cls_scores = all_layers_cls_scores[-1]
bbox_preds = all_layers_bbox_preds[-1]
result_list = []
for img_id in range(len(batch_img_metas)):
cls_score = cls_scores[img_id]
bbox_pred = bbox_preds[img_id]
img_meta = batch_img_metas[img_id]
results = self._predict_by_feat_single(cls_score, bbox_pred,
img_meta, rescale)
result_list.append(results)
return result_list
| 15,707 | 46.744681 | 79 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.