repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
DSLA-DSLA | DSLA-DSLA/configs/ld/ld_r50_gflv1_r101_fpn_coco_1x.py | _base_ = ['./ld_r18_gflv1_r101_fpn_coco_1x.py']
model = dict(
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5))
| 572 | 27.65 | 79 | py |
DSLA-DSLA | DSLA-DSLA/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py | _base_ = [
'../_base_/models/cascade_mask_rcnn_r50_fpn.py',
'../_base_/datasets/lvis_v1_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
roi_head=dict(
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(dataset=dict(pipeline=train_pipeline)))
evaluation = dict(interval=24, metric=['bbox', 'segm'])
| 3,783 | 37.222222 | 79 | py |
DSLA-DSLA | DSLA-DSLA/configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py | _base_ = './mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 231 | 32.142857 | 75 | py |
DSLA-DSLA | DSLA-DSLA/configs/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py | _base_ = './mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 227 | 31.571429 | 71 | py |
DSLA-DSLA | DSLA-DSLA/configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py | _base_ = './mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 257 | 35.857143 | 101 | py |
DSLA-DSLA | DSLA-DSLA/configs/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py | _base_ = './mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 253 | 35.285714 | 97 | py |
DSLA-DSLA | DSLA-DSLA/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py | _base_ = [
'../_base_/models/cascade_mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
roi_head=dict(
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
dataset_type = 'LVISV1Dataset'
data_root = 'data/lvis_v1/'
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v1_train.json',
img_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v1_val.json',
img_prefix=data_root,
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v1_val.json',
img_prefix=data_root,
pipeline=test_pipeline))
evaluation = dict(interval=24, metric=['bbox', 'segm'])
| 4,807 | 35.150376 | 79 | py |
DSLA-DSLA | DSLA-DSLA/configs/tood/tood_x101_64x4d_fpn_mstrain_2x_coco.py | _base_ = './tood_r50_fpn_mstrain_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
| 447 | 25.352941 | 76 | py |
DSLA-DSLA | DSLA-DSLA/configs/tood/tood_r50_fpn_1x_coco.py | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='TOOD',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='TOODHead',
num_classes=80,
in_channels=256,
stacked_convs=6,
feat_channels=256,
anchor_type='anchor_free',
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
initial_loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0)),
train_cfg=dict(
initial_epoch=4,
initial_assigner=dict(type='ATSSAssigner', topk=9),
assigner=dict(type='TaskAlignedAssigner', topk=13),
alpha=1,
beta=6,
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
# custom hooks
custom_hooks = [dict(type='SetEpochInfoHook')]
| 2,306 | 29.76 | 79 | py |
DSLA-DSLA | DSLA-DSLA/configs/tood/tood_r101_fpn_mstrain_2x_coco.py | _base_ = './tood_r50_fpn_mstrain_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 201 | 24.25 | 61 | py |
DSLA-DSLA | DSLA-DSLA/configs/gn+ws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py | _base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py'
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
type='ResNeXt',
depth=50,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://jhu/resnext50_32x4d_gn_ws')))
| 544 | 27.684211 | 66 | py |
DSLA-DSLA | DSLA-DSLA/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py | _base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py'
# model settings
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://jhu/resnext101_32x4d_gn_ws')))
| 561 | 27.1 | 67 | py |
DSLA-DSLA | DSLA-DSLA/configs/gn+ws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py | _base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py'
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://jhu/resnext101_32x4d_gn_ws')))
| 546 | 27.789474 | 67 | py |
DSLA-DSLA | DSLA-DSLA/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py | _base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py'
# model settings
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
type='ResNeXt',
depth=50,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://jhu/resnext50_32x4d_gn_ws')))
| 559 | 27 | 66 | py |
DSLA-DSLA | DSLA-DSLA/configs/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco.py | _base_ = './ga_faster_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
| 419 | 27 | 76 | py |
DSLA-DSLA | DSLA-DSLA/configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco.py | _base_ = './ga_faster_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 419 | 27 | 76 | py |
DSLA-DSLA | DSLA-DSLA/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py | _base_ = './ga_retinanet_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 422 | 27.2 | 76 | py |
DSLA-DSLA | DSLA-DSLA/configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco.py | _base_ = './ga_rpn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| 416 | 26.8 | 76 | py |
DSLA-DSLA | DSLA-DSLA/configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py | _base_ = './ga_rpn_r50_caffe_fpn_1x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
| 236 | 25.333333 | 67 | py |
DSLA-DSLA | DSLA-DSLA/configs/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco.py | _base_ = './ga_retinanet_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
| 422 | 27.2 | 76 | py |
DSLA-DSLA | DSLA-DSLA/configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py | _base_ = './ga_faster_r50_caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
| 222 | 26.875 | 67 | py |
DSLA-DSLA | DSLA-DSLA/configs/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco.py | _base_ = './ga_rpn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
| 416 | 26.8 | 76 | py |
DSLA-DSLA | DSLA-DSLA/configs/guided_anchoring/ga_fast_r50_caffe_fpn_1x_coco.py | _base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
roi_head=dict(
bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))),
# model training and testing settings
train_cfg=dict(
rcnn=dict(
assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6),
sampler=dict(num=256))),
test_cfg=dict(rcnn=dict(score_thr=1e-3)))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=300),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=None),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img', 'proposals']),
])
]
data = dict(
train=dict(
proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_train2017.pkl',
pipeline=train_pipeline),
val=dict(
proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl',
pipeline=test_pipeline),
test=dict(
proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl',
pipeline=test_pipeline))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
| 2,407 | 35.484848 | 78 | py |
DSLA-DSLA | DSLA-DSLA/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py | _base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py'
model = dict(
rpn_head=dict(
_delete_=True,
type='GARPNHead',
in_channels=256,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=8,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[8],
strides=[4, 8, 16, 32, 64]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.14, 0.14]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.11, 0.11]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
roi_head=dict(
bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5),
rpn_proposal=dict(nms_post=1000, max_per_img=300),
rcnn=dict(
assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6),
sampler=dict(type='RandomSampler', num=256))),
test_cfg=dict(
rpn=dict(nms_post=1000, max_per_img=300), rcnn=dict(score_thr=1e-3)))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
| 2,408 | 35.5 | 77 | py |
DSLA-DSLA | DSLA-DSLA/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco.py | _base_ = './ga_retinanet_r50_caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
| 225 | 27.25 | 67 | py |
DSLA-DSLA | DSLA-DSLA/configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py | _base_ = '../retinanet/retinanet_r50_caffe_fpn_1x_coco.py'
model = dict(
bbox_head=dict(
_delete_=True,
type='GARetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.4,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
assigner=dict(neg_iou_thr=0.5, min_pos_iou=0.0),
center_ratio=0.2,
ignore_ratio=0.5))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
| 2,055 | 31.634921 | 74 | py |
DSLA-DSLA | DSLA-DSLA/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_mstrain_2x.py | _base_ = '../_base_/default_runtime.py'
# model settings
model = dict(
type='RetinaNet',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5),
bbox_head=dict(
type='GARetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0)))
# training and testing settings
train_cfg = dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.4,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
center_ratio=0.2,
ignore_ratio=0.5,
debug=False)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 480), (1333, 960)],
keep_ratio=True,
multiscale_mode='range'),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[16, 22])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=24)
| 5,095 | 28.976471 | 74 | py |
DSLA-DSLA | DSLA-DSLA/configs/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco.py | _base_ = '../rpn/rpn_r50_caffe_fpn_1x_coco.py'
model = dict(
rpn_head=dict(
_delete_=True,
type='GARPNHead',
in_channels=256,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=8,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[8],
strides=[4, 8, 16, 32, 64]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.14, 0.14]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.11, 0.11]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
rpn=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5)),
test_cfg=dict(rpn=dict(nms_post=1000)))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
| 2,028 | 33.389831 | 74 | py |
DSLA-DSLA | DSLA-DSLA/configs/lad/lad_r50_paa_r101_fpn_coco_1x.py | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa
model = dict(
type='LAD',
# student
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
data = dict(samples_per_gpu=8, workers_per_gpu=4)
optimizer = dict(lr=0.01)
fp16 = dict(loss_scale=512.)
| 3,719 | 29.743802 | 139 | py |
DSLA-DSLA | DSLA-DSLA/configs/lad/lad_r101_paa_r50_fpn_coco_1x.py | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa
model = dict(
type='LAD',
# student
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
data = dict(samples_per_gpu=8, workers_per_gpu=4)
optimizer = dict(lr=0.01)
fp16 = dict(loss_scale=512.)
| 3,741 | 29.672131 | 138 | py |
DSLA-DSLA | DSLA-DSLA/configs/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py | _base_ = './sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 216 | 26.125 | 61 | py |
DSLA-DSLA | DSLA-DSLA/configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py | _base_ = './sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
| 235 | 28.5 | 78 | py |
DSLA-DSLA | DSLA-DSLA/configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
num_stages = 6
num_proposals = 100
model = dict(
type='SparseRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=0,
add_extra_convs='on_input',
num_outs=4),
rpn_head=dict(
type='EmbeddingRPNHead',
num_proposals=num_proposals,
proposal_feature_channel=256),
roi_head=dict(
type='SparseRoIHead',
num_stages=num_stages,
stage_loss_weights=[1] * num_stages,
proposal_feature_channel=256,
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='DIIHead',
num_classes=80,
num_ffn_fcs=2,
num_heads=8,
num_cls_fcs=1,
num_reg_fcs=3,
feedforward_channels=2048,
in_channels=256,
dropout=0.0,
ffn_act_cfg=dict(type='ReLU', inplace=True),
dynamic_conv_cfg=dict(
type='DynamicConv',
in_channels=256,
feat_channels=64,
out_channels=256,
input_feat_shape=7,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN')),
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
loss_iou=dict(type='GIoULoss', loss_weight=2.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=2.0),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
clip_border=False,
target_means=[0., 0., 0., 0.],
target_stds=[0.5, 0.5, 1., 1.])) for _ in range(num_stages)
]),
# training and testing settings
train_cfg=dict(
rpn=None,
rcnn=[
dict(
assigner=dict(
type='HungarianAssigner',
cls_cost=dict(type='FocalLossCost', weight=2.0),
reg_cost=dict(type='BBoxL1Cost', weight=5.0),
iou_cost=dict(type='IoUCost', iou_mode='giou',
weight=2.0)),
sampler=dict(type='PseudoSampler'),
pos_weight=1) for _ in range(num_stages)
]),
test_cfg=dict(rpn=None, rcnn=dict(max_per_img=num_proposals)))
# optimizer
optimizer = dict(_delete_=True, type='AdamW', lr=0.000025, weight_decay=0.0001)
optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=1, norm_type=2))
# learning policy
lr_config = dict(policy='step', step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
| 3,469 | 35.145833 | 79 | py |
DSLA-DSLA | DSLA-DSLA/docs/en/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'MMDetection'
copyright = '2018-2021, OpenMMLab'
author = 'MMDetection Authors'
version_file = '../../mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'recommonmark',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdetection'
},
],
# Specify the language of shared menu
'menu_lang':
'en'
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
| 3,373 | 28.596491 | 79 | py |
DSLA-DSLA | DSLA-DSLA/docs/zh_cn/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../../'))
# -- Project information -----------------------------------------------------
project = 'MMDetection'
copyright = '2018-2021, OpenMMLab'
author = 'MMDetection Authors'
version_file = '../../mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'recommonmark',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdetection'
},
],
# Specify the language of shared menu
'menu_lang':
'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
language = 'zh_CN'
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
| 3,395 | 28.275862 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/apis/inference.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import mmcv
import numpy as np
import torch
from mmcv.ops import RoIPool
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmdet.core import get_classes
from mmdet.datasets import replace_ImageToTensor
from mmdet.datasets.pipelines import Compose
from mmdet.models import build_detector
def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=None):
"""Initialize a detector from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
cfg_options (dict): Options to override some settings in the used
config.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
if cfg_options is not None:
config.merge_from_dict(cfg_options)
config.model.pretrained = None
config.model.train_cfg = None
model = build_detector(config.model, test_cfg=config.get('test_cfg'))
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
warnings.simplefilter('once')
warnings.warn('Class names are not saved in the checkpoint\'s '
'meta data, use COCO classes by default.')
model.CLASSES = get_classes('coco')
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
class LoadImage:
"""Deprecated.
A simple pipeline to load image.
"""
def __call__(self, results):
"""Call function to load images into results.
Args:
results (dict): A result dict contains the file name
of the image to be read.
Returns:
dict: ``results`` will be returned containing loaded image.
"""
warnings.simplefilter('once')
warnings.warn('`LoadImage` is deprecated and will be removed in '
'future releases. You may use `LoadImageFromWebcam` '
'from `mmdet.datasets.pipelines.` instead.')
if isinstance(results['img'], str):
results['filename'] = results['img']
results['ori_filename'] = results['img']
else:
results['filename'] = None
results['ori_filename'] = None
img = mmcv.imread(results['img'])
results['img'] = img
results['img_fields'] = ['img']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results
def inference_detector(model, imgs):
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str/ndarray or list[str/ndarray] or tuple[str/ndarray]):
Either image files or loaded images.
Returns:
If imgs is a list or tuple, the same length list type results
will be returned, otherwise return the detection results directly.
"""
if isinstance(imgs, (list, tuple)):
is_batch = True
else:
imgs = [imgs]
is_batch = False
cfg = model.cfg
device = next(model.parameters()).device # model device
if isinstance(imgs[0], np.ndarray):
cfg = cfg.copy()
# set loading pipeline type
cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
test_pipeline = Compose(cfg.data.test.pipeline)
datas = []
for img in imgs:
# prepare data
if isinstance(img, np.ndarray):
# directly add img
data = dict(img=img)
else:
# add information into dict
data = dict(img_info=dict(filename=img), img_prefix=None)
# build the data pipeline
data = test_pipeline(data)
datas.append(data)
data = collate(datas, samples_per_gpu=len(imgs))
# just get the actual data from DataContainer
data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']]
data['img'] = [img.data[0] for img in data['img']]
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
else:
for m in model.modules():
assert not isinstance(
m, RoIPool
), 'CPU inference with RoIPool is not supported currently.'
# forward the model
with torch.no_grad():
results = model(return_loss=False, rescale=True, **data)
if not is_batch:
return results[0]
else:
return results
async def async_inference_detector(model, imgs):
"""Async inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
img (str | ndarray): Either image files or loaded images.
Returns:
Awaitable detection results.
"""
if not isinstance(imgs, (list, tuple)):
imgs = [imgs]
cfg = model.cfg
device = next(model.parameters()).device # model device
if isinstance(imgs[0], np.ndarray):
cfg = cfg.copy()
# set loading pipeline type
cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
test_pipeline = Compose(cfg.data.test.pipeline)
datas = []
for img in imgs:
# prepare data
if isinstance(img, np.ndarray):
# directly add img
data = dict(img=img)
else:
# add information into dict
data = dict(img_info=dict(filename=img), img_prefix=None)
# build the data pipeline
data = test_pipeline(data)
datas.append(data)
data = collate(datas, samples_per_gpu=len(imgs))
# just get the actual data from DataContainer
data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']]
data['img'] = [img.data[0] for img in data['img']]
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
else:
for m in model.modules():
assert not isinstance(
m, RoIPool
), 'CPU inference with RoIPool is not supported currently.'
# We don't restore `torch.is_grad_enabled()` value during concurrent
# inference since execution can overlap
torch.set_grad_enabled(False)
results = await model.aforward_test(rescale=True, **data)
return results
def show_result_pyplot(model,
img,
result,
score_thr=0.3,
title='result',
wait_time=0):
"""Visualize the detection results on the image.
Args:
model (nn.Module): The loaded detector.
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The detection result, can be either
(bbox, segm) or just bbox.
score_thr (float): The threshold to visualize the bboxes and masks.
title (str): Title of the pyplot figure.
wait_time (float): Value of waitKey param.
Default: 0.
"""
if hasattr(model, 'module'):
model = model.module
model.show_result(
img,
result,
score_thr=score_thr,
show=True,
wait_time=wait_time,
win_name=title,
bbox_color=(72, 101, 241),
text_color=(72, 101, 241))
| 7,980 | 32.116183 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/apis/test.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import pickle
import shutil
import tempfile
import time
import mmcv
import torch
import torch.distributed as dist
from mmcv.image import tensor2imgs
from mmcv.runner import get_dist_info
from mmdet.core import encode_mask_results
def single_gpu_test(model,
data_loader,
show=False,
out_dir=None,
show_score_thr=0.3):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
batch_size = len(result)
if show or out_dir:
if batch_size == 1 and isinstance(data['img'][0], torch.Tensor):
img_tensor = data['img'][0]
else:
img_tensor = data['img'][0].data[0]
img_metas = data['img_metas'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if out_dir:
out_file = osp.join(out_dir, img_meta['ori_filename'])
else:
out_file = None
model.module.show_result(
img_show,
result[i],
show=show,
out_file=out_file,
score_thr=show_score_thr)
# encode mask results
if isinstance(result[0], tuple):
result = [(bbox_results, encode_mask_results(mask_results))
for bbox_results, mask_results in result]
results.extend(result)
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
time.sleep(2) # This line can prevent deadlock problem in some cases.
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
# encode mask results
if isinstance(result[0], tuple):
result = [(bbox_results, encode_mask_results(mask_results))
for bbox_results, mask_results in result]
results.extend(result)
if rank == 0:
batch_size = len(result)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
def collect_results_cpu(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
mmcv.mkdir_or_exist('.dist_test')
tmpdir = tempfile.mkdtemp(dir='.dist_test')
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, f'part_{i}.pkl')
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def collect_results_gpu(result_part, size):
rank, world_size = get_dist_info()
# dump result part to tensor with pickle
part_tensor = torch.tensor(
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
# gather all result part tensor shape
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
# padding result part tensor to max length
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [
part_tensor.new_zeros(shape_max) for _ in range(world_size)
]
# gather all result part
dist.all_gather(part_recv_list, part_send)
if rank == 0:
part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_list.append(
pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
| 6,874 | 34.807292 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/apis/train.py | # Copyright (c) OpenMMLab. All rights reserved.
import random
import warnings
import numpy as np
import torch
import torch.distributed as dist
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (DistSamplerSeedHook, EpochBasedRunner,
Fp16OptimizerHook, OptimizerHook, build_optimizer,
build_runner, get_dist_info)
from mmdet.core import DistEvalHook, EvalHook
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.utils import find_latest_checkpoint, get_root_logger
def init_random_seed(seed=None, device='cuda'):
"""Initialize random seed.
If the seed is not set, the seed will be automatically randomized,
and then broadcast to all processes to prevent some potential bugs.
Args:
seed (int, Optional): The seed. Default to None.
device (str): The device where the seed will be put on.
Default to 'cuda'.
Returns:
int: Seed to be used.
"""
if seed is not None:
return seed
# Make sure all ranks share the same random seed to prevent
# some potential bugs. Please refer to
# https://github.com/open-mmlab/mmdetection/issues/6339
rank, world_size = get_dist_info()
seed = np.random.randint(2**31)
if world_size == 1:
return seed
if rank == 0:
random_num = torch.tensor(seed, dtype=torch.int32, device=device)
else:
random_num = torch.tensor(0, dtype=torch.int32, device=device)
dist.broadcast(random_num, src=0)
return random_num.item()
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def train_detector(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None):
logger = get_root_logger(log_level=cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
if 'imgs_per_gpu' in cfg.data:
logger.warning('"imgs_per_gpu" is deprecated in MMDet V2.0. '
'Please use "samples_per_gpu" instead')
if 'samples_per_gpu' in cfg.data:
logger.warning(
f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and '
f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"'
f'={cfg.data.imgs_per_gpu} is used in this experiments')
else:
logger.warning(
'Automatically set "samples_per_gpu"="imgs_per_gpu"='
f'{cfg.data.imgs_per_gpu} in this experiments')
cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu
runner_type = 'EpochBasedRunner' if 'runner' not in cfg else cfg.runner[
'type']
data_loaders = [
build_dataloader(
ds,
cfg.data.samples_per_gpu,
cfg.data.workers_per_gpu,
# `num_gpus` will be ignored if distributed
num_gpus=len(cfg.gpu_ids),
dist=distributed,
seed=cfg.seed,
runner_type=runner_type,
persistent_workers=cfg.data.get('persistent_workers', False))
for ds in dataset
]
# put model on gpus
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(
model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
if 'runner' not in cfg:
cfg.runner = {
'type': 'EpochBasedRunner',
'max_epochs': cfg.total_epochs
}
warnings.warn(
'config is now expected to have a `runner` section, '
'please set `runner` in your config.', UserWarning)
else:
if 'total_epochs' in cfg:
assert cfg.total_epochs == cfg.runner.max_epochs
runner = build_runner(
cfg.runner,
default_args=dict(
model=model,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta))
# an ugly workaround to make .log and .log.json filenames the same
runner.timestamp = timestamp
# fp16 setting
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
optimizer_config = Fp16OptimizerHook(
**cfg.optimizer_config, **fp16_cfg, distributed=distributed)
elif distributed and 'type' not in cfg.optimizer_config:
optimizer_config = OptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
# register hooks
runner.register_training_hooks(
cfg.lr_config,
optimizer_config,
cfg.checkpoint_config,
cfg.log_config,
cfg.get('momentum_config', None),
custom_hooks_config=cfg.get('custom_hooks', None))
if distributed:
if isinstance(runner, EpochBasedRunner):
runner.register_hook(DistSamplerSeedHook())
# register eval hooks
if validate:
# Support batch_size > 1 in validation
val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1)
if val_samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.val.pipeline = replace_ImageToTensor(
cfg.data.val.pipeline)
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(
val_dataset,
samples_per_gpu=val_samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
eval_hook = DistEvalHook if distributed else EvalHook
# In this PR (https://github.com/open-mmlab/mmcv/pull/1193), the
# priority of IterTimerHook has been modified from 'NORMAL' to 'LOW'.
runner.register_hook(
eval_hook(val_dataloader, **eval_cfg), priority='LOW')
resume_from = None
if cfg.resume_from is None and cfg.get('auto_resume'):
resume_from = find_latest_checkpoint(cfg.work_dir)
if resume_from is not None:
cfg.resume_from = resume_from
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow)
| 7,445 | 34.457143 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/evaluation/eval_hooks.py | # Copyright (c) OpenMMLab. All rights reserved.
import bisect
import os.path as osp
import mmcv
import torch.distributed as dist
from mmcv.runner import DistEvalHook as BaseDistEvalHook
from mmcv.runner import EvalHook as BaseEvalHook
from torch.nn.modules.batchnorm import _BatchNorm
def _calc_dynamic_intervals(start_interval, dynamic_interval_list):
assert mmcv.is_list_of(dynamic_interval_list, tuple)
dynamic_milestones = [0]
dynamic_milestones.extend(
[dynamic_interval[0] for dynamic_interval in dynamic_interval_list])
dynamic_intervals = [start_interval]
dynamic_intervals.extend(
[dynamic_interval[1] for dynamic_interval in dynamic_interval_list])
return dynamic_milestones, dynamic_intervals
class EvalHook(BaseEvalHook):
def __init__(self, *args, dynamic_intervals=None, **kwargs):
super(EvalHook, self).__init__(*args, **kwargs)
self.use_dynamic_intervals = dynamic_intervals is not None
if self.use_dynamic_intervals:
self.dynamic_milestones, self.dynamic_intervals = \
_calc_dynamic_intervals(self.interval, dynamic_intervals)
def _decide_interval(self, runner):
if self.use_dynamic_intervals:
progress = runner.epoch if self.by_epoch else runner.iter
step = bisect.bisect(self.dynamic_milestones, (progress + 1))
# Dynamically modify the evaluation interval
self.interval = self.dynamic_intervals[step - 1]
def before_train_epoch(self, runner):
"""Evaluate the model only at the start of training by epoch."""
self._decide_interval(runner)
super().before_train_epoch(runner)
def before_train_iter(self, runner):
self._decide_interval(runner)
super().before_train_iter(runner)
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
if not self._should_evaluate(runner):
return
from mmdet.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader, show=False)
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
# Note: Considering that MMCV's EvalHook updated its interface in V1.3.16,
# in order to avoid strong version dependency, we did not directly
# inherit EvalHook but BaseDistEvalHook.
class DistEvalHook(BaseDistEvalHook):
def __init__(self, *args, dynamic_intervals=None, **kwargs):
super(DistEvalHook, self).__init__(*args, **kwargs)
self.use_dynamic_intervals = dynamic_intervals is not None
if self.use_dynamic_intervals:
self.dynamic_milestones, self.dynamic_intervals = \
_calc_dynamic_intervals(self.interval, dynamic_intervals)
def _decide_interval(self, runner):
if self.use_dynamic_intervals:
progress = runner.epoch if self.by_epoch else runner.iter
step = bisect.bisect(self.dynamic_milestones, (progress + 1))
# Dynamically modify the evaluation interval
self.interval = self.dynamic_intervals[step - 1]
def before_train_epoch(self, runner):
"""Evaluate the model only at the start of training by epoch."""
self._decide_interval(runner)
super().before_train_epoch(runner)
def before_train_iter(self, runner):
self._decide_interval(runner)
super().before_train_iter(runner)
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
# Synchronization of BatchNorm's buffer (running_mean
# and running_var) is not supported in the DDP of pytorch,
# which may cause the inconsistent performance of models in
# different ranks, so we broadcast BatchNorm's buffers
# of rank 0 to other ranks to avoid this.
if self.broadcast_bn_buffer:
model = runner.model
for name, module in model.named_modules():
if isinstance(module,
_BatchNorm) and module.track_running_stats:
dist.broadcast(module.running_var, 0)
dist.broadcast(module.running_mean, 0)
if not self._should_evaluate(runner):
return
tmpdir = self.tmpdir
if tmpdir is None:
tmpdir = osp.join(runner.work_dir, '.eval_hook')
from mmdet.apis import multi_gpu_test
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=tmpdir,
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
| 4,942 | 37.92126 | 76 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/post_processing/merge_augs.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import warnings
import numpy as np
import torch
from mmcv import ConfigDict
from mmcv.ops import nms
from ..bbox import bbox_mapping_back
def merge_aug_proposals(aug_proposals, img_metas, cfg):
"""Merge augmented proposals (multiscale, flip, etc.)
Args:
aug_proposals (list[Tensor]): proposals from different testing
schemes, shape (n, 5). Note that they are not rescaled to the
original image size.
img_metas (list[dict]): list of image info dict where each dict has:
'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
cfg (dict): rpn test config.
Returns:
Tensor: shape (n, 4), proposals corresponding to original image scale.
"""
cfg = copy.deepcopy(cfg)
# deprecate arguments warning
if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg:
warnings.warn(
'In rpn_proposal or test_cfg, '
'nms_thr has been moved to a dict named nms as '
'iou_threshold, max_num has been renamed as max_per_img, '
'name of original arguments and the way to specify '
'iou_threshold of NMS will be deprecated.')
if 'nms' not in cfg:
cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr))
if 'max_num' in cfg:
if 'max_per_img' in cfg:
assert cfg.max_num == cfg.max_per_img, f'You set max_num and ' \
f'max_per_img at the same time, but get {cfg.max_num} ' \
f'and {cfg.max_per_img} respectively' \
f'Please delete max_num which will be deprecated.'
else:
cfg.max_per_img = cfg.max_num
if 'nms_thr' in cfg:
assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \
f'iou_threshold in nms and ' \
f'nms_thr at the same time, but get ' \
f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \
f' respectively. Please delete the nms_thr ' \
f'which will be deprecated.'
recovered_proposals = []
for proposals, img_info in zip(aug_proposals, img_metas):
img_shape = img_info['img_shape']
scale_factor = img_info['scale_factor']
flip = img_info['flip']
flip_direction = img_info['flip_direction']
_proposals = proposals.clone()
_proposals[:, :4] = bbox_mapping_back(_proposals[:, :4], img_shape,
scale_factor, flip,
flip_direction)
recovered_proposals.append(_proposals)
aug_proposals = torch.cat(recovered_proposals, dim=0)
merged_proposals, _ = nms(aug_proposals[:, :4].contiguous(),
aug_proposals[:, -1].contiguous(),
cfg.nms.iou_threshold)
scores = merged_proposals[:, 4]
_, order = scores.sort(0, descending=True)
num = min(cfg.max_per_img, merged_proposals.shape[0])
order = order[:num]
merged_proposals = merged_proposals[order, :]
return merged_proposals
def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg):
"""Merge augmented detection bboxes and scores.
Args:
aug_bboxes (list[Tensor]): shape (n, 4*#class)
aug_scores (list[Tensor] or None): shape (n, #class)
img_shapes (list[Tensor]): shape (3, ).
rcnn_test_cfg (dict): rcnn test config.
Returns:
tuple: (bboxes, scores)
"""
recovered_bboxes = []
for bboxes, img_info in zip(aug_bboxes, img_metas):
img_shape = img_info[0]['img_shape']
scale_factor = img_info[0]['scale_factor']
flip = img_info[0]['flip']
flip_direction = img_info[0]['flip_direction']
bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip,
flip_direction)
recovered_bboxes.append(bboxes)
bboxes = torch.stack(recovered_bboxes).mean(dim=0)
if aug_scores is None:
return bboxes
else:
scores = torch.stack(aug_scores).mean(dim=0)
return bboxes, scores
def merge_aug_scores(aug_scores):
"""Merge augmented bbox scores."""
if isinstance(aug_scores[0], torch.Tensor):
return torch.mean(torch.stack(aug_scores), dim=0)
else:
return np.mean(aug_scores, axis=0)
def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None):
"""Merge augmented mask prediction.
Args:
aug_masks (list[ndarray]): shape (n, #class, h, w)
img_shapes (list[ndarray]): shape (3, ).
rcnn_test_cfg (dict): rcnn test config.
Returns:
tuple: (bboxes, scores)
"""
recovered_masks = []
for mask, img_info in zip(aug_masks, img_metas):
flip = img_info[0]['flip']
if flip:
flip_direction = img_info[0]['flip_direction']
if flip_direction == 'horizontal':
mask = mask[:, :, :, ::-1]
elif flip_direction == 'vertical':
mask = mask[:, :, ::-1, :]
elif flip_direction == 'diagonal':
mask = mask[:, :, :, ::-1]
mask = mask[:, :, ::-1, :]
else:
raise ValueError(
f"Invalid flipping direction '{flip_direction}'")
recovered_masks.append(mask)
if weights is None:
merged_masks = np.mean(recovered_masks, axis=0)
else:
merged_masks = np.average(
np.array(recovered_masks), axis=0, weights=np.array(weights))
return merged_masks
| 5,790 | 36.36129 | 78 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/post_processing/bbox_nms.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.ops.nms import batched_nms
from mmdet.core.bbox.iou_calculators import bbox_overlaps
def multiclass_nms(multi_bboxes,
multi_scores,
score_thr,
nms_cfg,
max_num=-1,
score_factors=None,
return_inds=False):
"""NMS for multi-class bboxes.
Args:
multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
multi_scores (Tensor): shape (n, #class), where the last column
contains scores of the background class, but this will be ignored.
score_thr (float): bbox threshold, bboxes with scores lower than it
will not be considered.
nms_thr (float): NMS IoU threshold
max_num (int, optional): if there are more than max_num bboxes after
NMS, only top max_num will be kept. Default to -1.
score_factors (Tensor, optional): The factors multiplied to scores
before applying NMS. Default to None.
return_inds (bool, optional): Whether return the indices of kept
bboxes. Default to False.
Returns:
tuple: (dets, labels, indices (optional)), tensors of shape (k, 5),
(k), and (k). Dets are boxes with scores. Labels are 0-based.
"""
num_classes = multi_scores.size(1) - 1
# exclude background category
if multi_bboxes.shape[1] > 4:
bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4)
else:
bboxes = multi_bboxes[:, None].expand(
multi_scores.size(0), num_classes, 4)
scores = multi_scores[:, :-1]
labels = torch.arange(num_classes, dtype=torch.long, device=scores.device)
labels = labels.view(1, -1).expand_as(scores)
bboxes = bboxes.reshape(-1, 4)
scores = scores.reshape(-1)
labels = labels.reshape(-1)
if not torch.onnx.is_in_onnx_export():
# NonZero not supported in TensorRT
# remove low scoring boxes
valid_mask = scores > score_thr
# multiply score_factor after threshold to preserve more bboxes, improve
# mAP by 1% for YOLOv3
if score_factors is not None:
# expand the shape to match original shape of score
score_factors = score_factors.view(-1, 1).expand(
multi_scores.size(0), num_classes)
score_factors = score_factors.reshape(-1)
scores = scores * score_factors
if not torch.onnx.is_in_onnx_export():
# NonZero not supported in TensorRT
inds = valid_mask.nonzero(as_tuple=False).squeeze(1)
bboxes, scores, labels = bboxes[inds], scores[inds], labels[inds]
else:
# TensorRT NMS plugin has invalid output filled with -1
# add dummy data to make detection output correct.
bboxes = torch.cat([bboxes, bboxes.new_zeros(1, 4)], dim=0)
scores = torch.cat([scores, scores.new_zeros(1)], dim=0)
labels = torch.cat([labels, labels.new_zeros(1)], dim=0)
if bboxes.numel() == 0:
if torch.onnx.is_in_onnx_export():
raise RuntimeError('[ONNX Error] Can not record NMS '
'as it has not been executed this time')
dets = torch.cat([bboxes, scores[:, None]], -1)
if return_inds:
return dets, labels, inds
else:
return dets, labels
dets, keep = batched_nms(bboxes, scores, labels, nms_cfg)
if max_num > 0:
dets = dets[:max_num]
keep = keep[:max_num]
if return_inds:
return dets, labels[keep], inds[keep]
else:
return dets, labels[keep]
def fast_nms(multi_bboxes,
multi_scores,
multi_coeffs,
score_thr,
iou_thr,
top_k,
max_num=-1):
"""Fast NMS in `YOLACT <https://arxiv.org/abs/1904.02689>`_.
Fast NMS allows already-removed detections to suppress other detections so
that every instance can be decided to be kept or discarded in parallel,
which is not possible in traditional NMS. This relaxation allows us to
implement Fast NMS entirely in standard GPU-accelerated matrix operations.
Args:
multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
multi_scores (Tensor): shape (n, #class+1), where the last column
contains scores of the background class, but this will be ignored.
multi_coeffs (Tensor): shape (n, #class*coeffs_dim).
score_thr (float): bbox threshold, bboxes with scores lower than it
will not be considered.
iou_thr (float): IoU threshold to be considered as conflicted.
top_k (int): if there are more than top_k bboxes before NMS,
only top top_k will be kept.
max_num (int): if there are more than max_num bboxes after NMS,
only top max_num will be kept. If -1, keep all the bboxes.
Default: -1.
Returns:
tuple: (dets, labels, coefficients), tensors of shape (k, 5), (k, 1),
and (k, coeffs_dim). Dets are boxes with scores.
Labels are 0-based.
"""
scores = multi_scores[:, :-1].t() # [#class, n]
scores, idx = scores.sort(1, descending=True)
idx = idx[:, :top_k].contiguous()
scores = scores[:, :top_k] # [#class, topk]
num_classes, num_dets = idx.size()
boxes = multi_bboxes[idx.view(-1), :].view(num_classes, num_dets, 4)
coeffs = multi_coeffs[idx.view(-1), :].view(num_classes, num_dets, -1)
iou = bbox_overlaps(boxes, boxes) # [#class, topk, topk]
iou.triu_(diagonal=1)
iou_max, _ = iou.max(dim=1)
# Now just filter out the ones higher than the threshold
keep = iou_max <= iou_thr
# Second thresholding introduces 0.2 mAP gain at negligible time cost
keep *= scores > score_thr
# Assign each kept detection to its corresponding class
classes = torch.arange(
num_classes, device=boxes.device)[:, None].expand_as(keep)
classes = classes[keep]
boxes = boxes[keep]
coeffs = coeffs[keep]
scores = scores[keep]
# Only keep the top max_num highest scores across all classes
scores, idx = scores.sort(0, descending=True)
if max_num > 0:
idx = idx[:max_num]
scores = scores[:max_num]
classes = classes[idx]
boxes = boxes[idx]
coeffs = coeffs[idx]
cls_dets = torch.cat([boxes, scores[:, None]], dim=1)
return cls_dets, classes, coeffs
| 6,461 | 36.569767 | 78 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/post_processing/matrix_nms.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
def mask_matrix_nms(masks,
labels,
scores,
filter_thr=-1,
nms_pre=-1,
max_num=-1,
kernel='gaussian',
sigma=2.0,
mask_area=None):
"""Matrix NMS for multi-class masks.
Args:
masks (Tensor): Has shape (num_instances, h, w)
labels (Tensor): Labels of corresponding masks,
has shape (num_instances,).
scores (Tensor): Mask scores of corresponding masks,
has shape (num_instances).
filter_thr (float): Score threshold to filter the masks
after matrix nms. Default: -1, which means do not
use filter_thr.
nms_pre (int): The max number of instances to do the matrix nms.
Default: -1, which means do not use nms_pre.
max_num (int, optional): If there are more than max_num masks after
matrix, only top max_num will be kept. Default: -1, which means
do not use max_num.
kernel (str): 'linear' or 'gaussian'.
sigma (float): std in gaussian method.
mask_area (Tensor): The sum of seg_masks.
Returns:
tuple(Tensor): Processed mask results.
- scores (Tensor): Updated scores, has shape (n,).
- labels (Tensor): Remained labels, has shape (n,).
- masks (Tensor): Remained masks, has shape (n, w, h).
- keep_inds (Tensor): The indices number of
the remaining mask in the input mask, has shape (n,).
"""
assert len(labels) == len(masks) == len(scores)
if len(labels) == 0:
return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros(
0, *masks.shape[-2:]), labels.new_zeros(0)
if mask_area is None:
mask_area = masks.sum((1, 2)).float()
else:
assert len(masks) == len(mask_area)
# sort and keep top nms_pre
scores, sort_inds = torch.sort(scores, descending=True)
keep_inds = sort_inds
if nms_pre > 0 and len(sort_inds) > nms_pre:
sort_inds = sort_inds[:nms_pre]
keep_inds = keep_inds[:nms_pre]
scores = scores[:nms_pre]
masks = masks[sort_inds]
mask_area = mask_area[sort_inds]
labels = labels[sort_inds]
num_masks = len(labels)
flatten_masks = masks.reshape(num_masks, -1).float()
# inter.
inter_matrix = torch.mm(flatten_masks, flatten_masks.transpose(1, 0))
expanded_mask_area = mask_area.expand(num_masks, num_masks)
# Upper triangle iou matrix.
iou_matrix = (inter_matrix /
(expanded_mask_area + expanded_mask_area.transpose(1, 0) -
inter_matrix)).triu(diagonal=1)
# label_specific matrix.
expanded_labels = labels.expand(num_masks, num_masks)
# Upper triangle label matrix.
label_matrix = (expanded_labels == expanded_labels.transpose(
1, 0)).triu(diagonal=1)
# IoU compensation
compensate_iou, _ = (iou_matrix * label_matrix).max(0)
compensate_iou = compensate_iou.expand(num_masks,
num_masks).transpose(1, 0)
# IoU decay
decay_iou = iou_matrix * label_matrix
# Calculate the decay_coefficient
if kernel == 'gaussian':
decay_matrix = torch.exp(-1 * sigma * (decay_iou**2))
compensate_matrix = torch.exp(-1 * sigma * (compensate_iou**2))
decay_coefficient, _ = (decay_matrix / compensate_matrix).min(0)
elif kernel == 'linear':
decay_matrix = (1 - decay_iou) / (1 - compensate_iou)
decay_coefficient, _ = decay_matrix.min(0)
else:
raise NotImplementedError(
f'{kernel} kernel is not supported in matrix nms!')
# update the score.
scores = scores * decay_coefficient
if filter_thr > 0:
keep = scores >= filter_thr
keep_inds = keep_inds[keep]
if not keep.any():
return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros(
0, *masks.shape[-2:]), labels.new_zeros(0)
masks = masks[keep]
scores = scores[keep]
labels = labels[keep]
# sort and keep top max_num
scores, sort_inds = torch.sort(scores, descending=True)
keep_inds = keep_inds[sort_inds]
if max_num > 0 and len(sort_inds) > max_num:
sort_inds = sort_inds[:max_num]
keep_inds = keep_inds[:max_num]
scores = scores[:max_num]
masks = masks[sort_inds]
labels = labels[sort_inds]
return scores, labels, masks, keep_inds
| 4,622 | 36.893443 | 77 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/mask/structures.py | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import cv2
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
import torch
from mmcv.ops.roi_align import roi_align
class BaseInstanceMasks(metaclass=ABCMeta):
"""Base class for instance masks."""
@abstractmethod
def rescale(self, scale, interpolation='nearest'):
"""Rescale masks as large as possible while keeping the aspect ratio.
For details can refer to `mmcv.imrescale`.
Args:
scale (tuple[int]): The maximum size (h, w) of rescaled mask.
interpolation (str): Same as :func:`mmcv.imrescale`.
Returns:
BaseInstanceMasks: The rescaled masks.
"""
@abstractmethod
def resize(self, out_shape, interpolation='nearest'):
"""Resize masks to the given out_shape.
Args:
out_shape: Target (h, w) of resized mask.
interpolation (str): See :func:`mmcv.imresize`.
Returns:
BaseInstanceMasks: The resized masks.
"""
@abstractmethod
def flip(self, flip_direction='horizontal'):
"""Flip masks alone the given direction.
Args:
flip_direction (str): Either 'horizontal' or 'vertical'.
Returns:
BaseInstanceMasks: The flipped masks.
"""
@abstractmethod
def pad(self, out_shape, pad_val):
"""Pad masks to the given size of (h, w).
Args:
out_shape (tuple[int]): Target (h, w) of padded mask.
pad_val (int): The padded value.
Returns:
BaseInstanceMasks: The padded masks.
"""
@abstractmethod
def crop(self, bbox):
"""Crop each mask by the given bbox.
Args:
bbox (ndarray): Bbox in format [x1, y1, x2, y2], shape (4, ).
Return:
BaseInstanceMasks: The cropped masks.
"""
@abstractmethod
def crop_and_resize(self,
bboxes,
out_shape,
inds,
device,
interpolation='bilinear',
binarize=True):
"""Crop and resize masks by the given bboxes.
This function is mainly used in mask targets computation.
It firstly align mask to bboxes by assigned_inds, then crop mask by the
assigned bbox and resize to the size of (mask_h, mask_w)
Args:
bboxes (Tensor): Bboxes in format [x1, y1, x2, y2], shape (N, 4)
out_shape (tuple[int]): Target (h, w) of resized mask
inds (ndarray): Indexes to assign masks to each bbox,
shape (N,) and values should be between [0, num_masks - 1].
device (str): Device of bboxes
interpolation (str): See `mmcv.imresize`
binarize (bool): if True fractional values are rounded to 0 or 1
after the resize operation. if False and unsupported an error
will be raised. Defaults to True.
Return:
BaseInstanceMasks: the cropped and resized masks.
"""
@abstractmethod
def expand(self, expanded_h, expanded_w, top, left):
"""see :class:`Expand`."""
@property
@abstractmethod
def areas(self):
"""ndarray: areas of each instance."""
@abstractmethod
def to_ndarray(self):
"""Convert masks to the format of ndarray.
Return:
ndarray: Converted masks in the format of ndarray.
"""
@abstractmethod
def to_tensor(self, dtype, device):
"""Convert masks to the format of Tensor.
Args:
dtype (str): Dtype of converted mask.
device (torch.device): Device of converted masks.
Returns:
Tensor: Converted masks in the format of Tensor.
"""
@abstractmethod
def translate(self,
out_shape,
offset,
direction='horizontal',
fill_val=0,
interpolation='bilinear'):
"""Translate the masks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
offset (int | float): The offset for translate.
direction (str): The translate direction, either "horizontal"
or "vertical".
fill_val (int | float): Border value. Default 0.
interpolation (str): Same as :func:`mmcv.imtranslate`.
Returns:
Translated masks.
"""
def shear(self,
out_shape,
magnitude,
direction='horizontal',
border_value=0,
interpolation='bilinear'):
"""Shear the masks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
magnitude (int | float): The magnitude used for shear.
direction (str): The shear direction, either "horizontal"
or "vertical".
border_value (int | tuple[int]): Value used in case of a
constant border. Default 0.
interpolation (str): Same as in :func:`mmcv.imshear`.
Returns:
ndarray: Sheared masks.
"""
@abstractmethod
def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
"""Rotate the masks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
angle (int | float): Rotation angle in degrees. Positive values
mean counter-clockwise rotation.
center (tuple[float], optional): Center point (w, h) of the
rotation in source image. If not specified, the center of
the image will be used.
scale (int | float): Isotropic scale factor.
fill_val (int | float): Border value. Default 0 for masks.
Returns:
Rotated masks.
"""
class BitmapMasks(BaseInstanceMasks):
"""This class represents masks in the form of bitmaps.
Args:
masks (ndarray): ndarray of masks in shape (N, H, W), where N is
the number of objects.
height (int): height of masks
width (int): width of masks
Example:
>>> from mmdet.core.mask.structures import * # NOQA
>>> num_masks, H, W = 3, 32, 32
>>> rng = np.random.RandomState(0)
>>> masks = (rng.rand(num_masks, H, W) > 0.1).astype(np.int)
>>> self = BitmapMasks(masks, height=H, width=W)
>>> # demo crop_and_resize
>>> num_boxes = 5
>>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes)
>>> out_shape = (14, 14)
>>> inds = torch.randint(0, len(self), size=(num_boxes,))
>>> device = 'cpu'
>>> interpolation = 'bilinear'
>>> new = self.crop_and_resize(
... bboxes, out_shape, inds, device, interpolation)
>>> assert len(new) == num_boxes
>>> assert new.height, new.width == out_shape
"""
def __init__(self, masks, height, width):
self.height = height
self.width = width
if len(masks) == 0:
self.masks = np.empty((0, self.height, self.width), dtype=np.uint8)
else:
assert isinstance(masks, (list, np.ndarray))
if isinstance(masks, list):
assert isinstance(masks[0], np.ndarray)
assert masks[0].ndim == 2 # (H, W)
else:
assert masks.ndim == 3 # (N, H, W)
self.masks = np.stack(masks).reshape(-1, height, width)
assert self.masks.shape[1] == self.height
assert self.masks.shape[2] == self.width
def __getitem__(self, index):
"""Index the BitmapMask.
Args:
index (int | ndarray): Indices in the format of integer or ndarray.
Returns:
:obj:`BitmapMasks`: Indexed bitmap masks.
"""
masks = self.masks[index].reshape(-1, self.height, self.width)
return BitmapMasks(masks, self.height, self.width)
def __iter__(self):
return iter(self.masks)
def __repr__(self):
s = self.__class__.__name__ + '('
s += f'num_masks={len(self.masks)}, '
s += f'height={self.height}, '
s += f'width={self.width})'
return s
def __len__(self):
"""Number of masks."""
return len(self.masks)
def rescale(self, scale, interpolation='nearest'):
"""See :func:`BaseInstanceMasks.rescale`."""
if len(self.masks) == 0:
new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)
rescaled_masks = np.empty((0, new_h, new_w), dtype=np.uint8)
else:
rescaled_masks = np.stack([
mmcv.imrescale(mask, scale, interpolation=interpolation)
for mask in self.masks
])
height, width = rescaled_masks.shape[1:]
return BitmapMasks(rescaled_masks, height, width)
def resize(self, out_shape, interpolation='nearest'):
"""See :func:`BaseInstanceMasks.resize`."""
if len(self.masks) == 0:
resized_masks = np.empty((0, *out_shape), dtype=np.uint8)
else:
resized_masks = np.stack([
mmcv.imresize(
mask, out_shape[::-1], interpolation=interpolation)
for mask in self.masks
])
return BitmapMasks(resized_masks, *out_shape)
def flip(self, flip_direction='horizontal'):
"""See :func:`BaseInstanceMasks.flip`."""
assert flip_direction in ('horizontal', 'vertical', 'diagonal')
if len(self.masks) == 0:
flipped_masks = self.masks
else:
flipped_masks = np.stack([
mmcv.imflip(mask, direction=flip_direction)
for mask in self.masks
])
return BitmapMasks(flipped_masks, self.height, self.width)
def pad(self, out_shape, pad_val=0):
"""See :func:`BaseInstanceMasks.pad`."""
if len(self.masks) == 0:
padded_masks = np.empty((0, *out_shape), dtype=np.uint8)
else:
padded_masks = np.stack([
mmcv.impad(mask, shape=out_shape, pad_val=pad_val)
for mask in self.masks
])
return BitmapMasks(padded_masks, *out_shape)
def crop(self, bbox):
"""See :func:`BaseInstanceMasks.crop`."""
assert isinstance(bbox, np.ndarray)
assert bbox.ndim == 1
# clip the boundary
bbox = bbox.copy()
bbox[0::2] = np.clip(bbox[0::2], 0, self.width)
bbox[1::2] = np.clip(bbox[1::2], 0, self.height)
x1, y1, x2, y2 = bbox
w = np.maximum(x2 - x1, 1)
h = np.maximum(y2 - y1, 1)
if len(self.masks) == 0:
cropped_masks = np.empty((0, h, w), dtype=np.uint8)
else:
cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w]
return BitmapMasks(cropped_masks, h, w)
def crop_and_resize(self,
bboxes,
out_shape,
inds,
device='cpu',
interpolation='bilinear',
binarize=True):
"""See :func:`BaseInstanceMasks.crop_and_resize`."""
if len(self.masks) == 0:
empty_masks = np.empty((0, *out_shape), dtype=np.uint8)
return BitmapMasks(empty_masks, *out_shape)
# convert bboxes to tensor
if isinstance(bboxes, np.ndarray):
bboxes = torch.from_numpy(bboxes).to(device=device)
if isinstance(inds, np.ndarray):
inds = torch.from_numpy(inds).to(device=device)
num_bbox = bboxes.shape[0]
fake_inds = torch.arange(
num_bbox, device=device).to(dtype=bboxes.dtype)[:, None]
rois = torch.cat([fake_inds, bboxes], dim=1) # Nx5
rois = rois.to(device=device)
if num_bbox > 0:
gt_masks_th = torch.from_numpy(self.masks).to(device).index_select(
0, inds).to(dtype=rois.dtype)
targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape,
1.0, 0, 'avg', True).squeeze(1)
if binarize:
resized_masks = (targets >= 0.5).cpu().numpy()
else:
resized_masks = targets.cpu().numpy()
else:
resized_masks = []
return BitmapMasks(resized_masks, *out_shape)
def expand(self, expanded_h, expanded_w, top, left):
"""See :func:`BaseInstanceMasks.expand`."""
if len(self.masks) == 0:
expanded_mask = np.empty((0, expanded_h, expanded_w),
dtype=np.uint8)
else:
expanded_mask = np.zeros((len(self), expanded_h, expanded_w),
dtype=np.uint8)
expanded_mask[:, top:top + self.height,
left:left + self.width] = self.masks
return BitmapMasks(expanded_mask, expanded_h, expanded_w)
def translate(self,
out_shape,
offset,
direction='horizontal',
fill_val=0,
interpolation='bilinear'):
"""Translate the BitmapMasks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
offset (int | float): The offset for translate.
direction (str): The translate direction, either "horizontal"
or "vertical".
fill_val (int | float): Border value. Default 0 for masks.
interpolation (str): Same as :func:`mmcv.imtranslate`.
Returns:
BitmapMasks: Translated BitmapMasks.
Example:
>>> from mmdet.core.mask.structures import BitmapMasks
>>> self = BitmapMasks.random(dtype=np.uint8)
>>> out_shape = (32, 32)
>>> offset = 4
>>> direction = 'horizontal'
>>> fill_val = 0
>>> interpolation = 'bilinear'
>>> # Note, There seem to be issues when:
>>> # * out_shape is different than self's shape
>>> # * the mask dtype is not supported by cv2.AffineWarp
>>> new = self.translate(out_shape, offset, direction, fill_val,
>>> interpolation)
>>> assert len(new) == len(self)
>>> assert new.height, new.width == out_shape
"""
if len(self.masks) == 0:
translated_masks = np.empty((0, *out_shape), dtype=np.uint8)
else:
translated_masks = mmcv.imtranslate(
self.masks.transpose((1, 2, 0)),
offset,
direction,
border_value=fill_val,
interpolation=interpolation)
if translated_masks.ndim == 2:
translated_masks = translated_masks[:, :, None]
translated_masks = translated_masks.transpose(
(2, 0, 1)).astype(self.masks.dtype)
return BitmapMasks(translated_masks, *out_shape)
def shear(self,
out_shape,
magnitude,
direction='horizontal',
border_value=0,
interpolation='bilinear'):
"""Shear the BitmapMasks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
magnitude (int | float): The magnitude used for shear.
direction (str): The shear direction, either "horizontal"
or "vertical".
border_value (int | tuple[int]): Value used in case of a
constant border.
interpolation (str): Same as in :func:`mmcv.imshear`.
Returns:
BitmapMasks: The sheared masks.
"""
if len(self.masks) == 0:
sheared_masks = np.empty((0, *out_shape), dtype=np.uint8)
else:
sheared_masks = mmcv.imshear(
self.masks.transpose((1, 2, 0)),
magnitude,
direction,
border_value=border_value,
interpolation=interpolation)
if sheared_masks.ndim == 2:
sheared_masks = sheared_masks[:, :, None]
sheared_masks = sheared_masks.transpose(
(2, 0, 1)).astype(self.masks.dtype)
return BitmapMasks(sheared_masks, *out_shape)
def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
"""Rotate the BitmapMasks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
angle (int | float): Rotation angle in degrees. Positive values
mean counter-clockwise rotation.
center (tuple[float], optional): Center point (w, h) of the
rotation in source image. If not specified, the center of
the image will be used.
scale (int | float): Isotropic scale factor.
fill_val (int | float): Border value. Default 0 for masks.
Returns:
BitmapMasks: Rotated BitmapMasks.
"""
if len(self.masks) == 0:
rotated_masks = np.empty((0, *out_shape), dtype=self.masks.dtype)
else:
rotated_masks = mmcv.imrotate(
self.masks.transpose((1, 2, 0)),
angle,
center=center,
scale=scale,
border_value=fill_val)
if rotated_masks.ndim == 2:
# case when only one mask, (h, w)
rotated_masks = rotated_masks[:, :, None] # (h, w, 1)
rotated_masks = rotated_masks.transpose(
(2, 0, 1)).astype(self.masks.dtype)
return BitmapMasks(rotated_masks, *out_shape)
@property
def areas(self):
"""See :py:attr:`BaseInstanceMasks.areas`."""
return self.masks.sum((1, 2))
def to_ndarray(self):
"""See :func:`BaseInstanceMasks.to_ndarray`."""
return self.masks
def to_tensor(self, dtype, device):
"""See :func:`BaseInstanceMasks.to_tensor`."""
return torch.tensor(self.masks, dtype=dtype, device=device)
@classmethod
def random(cls,
num_masks=3,
height=32,
width=32,
dtype=np.uint8,
rng=None):
"""Generate random bitmap masks for demo / testing purposes.
Example:
>>> from mmdet.core.mask.structures import BitmapMasks
>>> self = BitmapMasks.random()
>>> print('self = {}'.format(self))
self = BitmapMasks(num_masks=3, height=32, width=32)
"""
from mmdet.utils.util_random import ensure_rng
rng = ensure_rng(rng)
masks = (rng.rand(num_masks, height, width) > 0.1).astype(dtype)
self = cls(masks, height=height, width=width)
return self
def get_bboxes(self):
num_masks = len(self)
boxes = np.zeros((num_masks, 4), dtype=np.float32)
x_any = self.masks.any(axis=1)
y_any = self.masks.any(axis=2)
for idx in range(num_masks):
x = np.where(x_any[idx, :])[0]
y = np.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
# use +1 for x_max and y_max so that the right and bottom
# boundary of instance masks are fully included by the box
boxes[idx, :] = np.array([x[0], y[0], x[-1] + 1, y[-1] + 1],
dtype=np.float32)
return boxes
class PolygonMasks(BaseInstanceMasks):
"""This class represents masks in the form of polygons.
Polygons is a list of three levels. The first level of the list
corresponds to objects, the second level to the polys that compose the
object, the third level to the poly coordinates
Args:
masks (list[list[ndarray]]): The first level of the list
corresponds to objects, the second level to the polys that
compose the object, the third level to the poly coordinates
height (int): height of masks
width (int): width of masks
Example:
>>> from mmdet.core.mask.structures import * # NOQA
>>> masks = [
>>> [ np.array([0, 0, 10, 0, 10, 10., 0, 10, 0, 0]) ]
>>> ]
>>> height, width = 16, 16
>>> self = PolygonMasks(masks, height, width)
>>> # demo translate
>>> new = self.translate((16, 16), 4., direction='horizontal')
>>> assert np.all(new.masks[0][0][1::2] == masks[0][0][1::2])
>>> assert np.all(new.masks[0][0][0::2] == masks[0][0][0::2] + 4)
>>> # demo crop_and_resize
>>> num_boxes = 3
>>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes)
>>> out_shape = (16, 16)
>>> inds = torch.randint(0, len(self), size=(num_boxes,))
>>> device = 'cpu'
>>> interpolation = 'bilinear'
>>> new = self.crop_and_resize(
... bboxes, out_shape, inds, device, interpolation)
>>> assert len(new) == num_boxes
>>> assert new.height, new.width == out_shape
"""
def __init__(self, masks, height, width):
assert isinstance(masks, list)
if len(masks) > 0:
assert isinstance(masks[0], list)
assert isinstance(masks[0][0], np.ndarray)
self.height = height
self.width = width
self.masks = masks
def __getitem__(self, index):
"""Index the polygon masks.
Args:
index (ndarray | List): The indices.
Returns:
:obj:`PolygonMasks`: The indexed polygon masks.
"""
if isinstance(index, np.ndarray):
index = index.tolist()
if isinstance(index, list):
masks = [self.masks[i] for i in index]
else:
try:
masks = self.masks[index]
except Exception:
raise ValueError(
f'Unsupported input of type {type(index)} for indexing!')
if len(masks) and isinstance(masks[0], np.ndarray):
masks = [masks] # ensure a list of three levels
return PolygonMasks(masks, self.height, self.width)
def __iter__(self):
return iter(self.masks)
def __repr__(self):
s = self.__class__.__name__ + '('
s += f'num_masks={len(self.masks)}, '
s += f'height={self.height}, '
s += f'width={self.width})'
return s
def __len__(self):
"""Number of masks."""
return len(self.masks)
def rescale(self, scale, interpolation=None):
"""see :func:`BaseInstanceMasks.rescale`"""
new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)
if len(self.masks) == 0:
rescaled_masks = PolygonMasks([], new_h, new_w)
else:
rescaled_masks = self.resize((new_h, new_w))
return rescaled_masks
def resize(self, out_shape, interpolation=None):
"""see :func:`BaseInstanceMasks.resize`"""
if len(self.masks) == 0:
resized_masks = PolygonMasks([], *out_shape)
else:
h_scale = out_shape[0] / self.height
w_scale = out_shape[1] / self.width
resized_masks = []
for poly_per_obj in self.masks:
resized_poly = []
for p in poly_per_obj:
p = p.copy()
p[0::2] = p[0::2] * w_scale
p[1::2] = p[1::2] * h_scale
resized_poly.append(p)
resized_masks.append(resized_poly)
resized_masks = PolygonMasks(resized_masks, *out_shape)
return resized_masks
def flip(self, flip_direction='horizontal'):
"""see :func:`BaseInstanceMasks.flip`"""
assert flip_direction in ('horizontal', 'vertical', 'diagonal')
if len(self.masks) == 0:
flipped_masks = PolygonMasks([], self.height, self.width)
else:
flipped_masks = []
for poly_per_obj in self.masks:
flipped_poly_per_obj = []
for p in poly_per_obj:
p = p.copy()
if flip_direction == 'horizontal':
p[0::2] = self.width - p[0::2]
elif flip_direction == 'vertical':
p[1::2] = self.height - p[1::2]
else:
p[0::2] = self.width - p[0::2]
p[1::2] = self.height - p[1::2]
flipped_poly_per_obj.append(p)
flipped_masks.append(flipped_poly_per_obj)
flipped_masks = PolygonMasks(flipped_masks, self.height,
self.width)
return flipped_masks
def crop(self, bbox):
"""see :func:`BaseInstanceMasks.crop`"""
assert isinstance(bbox, np.ndarray)
assert bbox.ndim == 1
# clip the boundary
bbox = bbox.copy()
bbox[0::2] = np.clip(bbox[0::2], 0, self.width)
bbox[1::2] = np.clip(bbox[1::2], 0, self.height)
x1, y1, x2, y2 = bbox
w = np.maximum(x2 - x1, 1)
h = np.maximum(y2 - y1, 1)
if len(self.masks) == 0:
cropped_masks = PolygonMasks([], h, w)
else:
cropped_masks = []
for poly_per_obj in self.masks:
cropped_poly_per_obj = []
for p in poly_per_obj:
# pycocotools will clip the boundary
p = p.copy()
p[0::2] = p[0::2] - bbox[0]
p[1::2] = p[1::2] - bbox[1]
cropped_poly_per_obj.append(p)
cropped_masks.append(cropped_poly_per_obj)
cropped_masks = PolygonMasks(cropped_masks, h, w)
return cropped_masks
def pad(self, out_shape, pad_val=0):
"""padding has no effect on polygons`"""
return PolygonMasks(self.masks, *out_shape)
def expand(self, *args, **kwargs):
"""TODO: Add expand for polygon"""
raise NotImplementedError
def crop_and_resize(self,
bboxes,
out_shape,
inds,
device='cpu',
interpolation='bilinear',
binarize=True):
"""see :func:`BaseInstanceMasks.crop_and_resize`"""
out_h, out_w = out_shape
if len(self.masks) == 0:
return PolygonMasks([], out_h, out_w)
if not binarize:
raise ValueError('Polygons are always binary, '
'setting binarize=False is unsupported')
resized_masks = []
for i in range(len(bboxes)):
mask = self.masks[inds[i]]
bbox = bboxes[i, :]
x1, y1, x2, y2 = bbox
w = np.maximum(x2 - x1, 1)
h = np.maximum(y2 - y1, 1)
h_scale = out_h / max(h, 0.1) # avoid too large scale
w_scale = out_w / max(w, 0.1)
resized_mask = []
for p in mask:
p = p.copy()
# crop
# pycocotools will clip the boundary
p[0::2] = p[0::2] - bbox[0]
p[1::2] = p[1::2] - bbox[1]
# resize
p[0::2] = p[0::2] * w_scale
p[1::2] = p[1::2] * h_scale
resized_mask.append(p)
resized_masks.append(resized_mask)
return PolygonMasks(resized_masks, *out_shape)
def translate(self,
out_shape,
offset,
direction='horizontal',
fill_val=None,
interpolation=None):
"""Translate the PolygonMasks.
Example:
>>> self = PolygonMasks.random(dtype=np.int)
>>> out_shape = (self.height, self.width)
>>> new = self.translate(out_shape, 4., direction='horizontal')
>>> assert np.all(new.masks[0][0][1::2] == self.masks[0][0][1::2])
>>> assert np.all(new.masks[0][0][0::2] == self.masks[0][0][0::2] + 4) # noqa: E501
"""
assert fill_val is None or fill_val == 0, 'Here fill_val is not '\
f'used, and defaultly should be None or 0. got {fill_val}.'
if len(self.masks) == 0:
translated_masks = PolygonMasks([], *out_shape)
else:
translated_masks = []
for poly_per_obj in self.masks:
translated_poly_per_obj = []
for p in poly_per_obj:
p = p.copy()
if direction == 'horizontal':
p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1])
elif direction == 'vertical':
p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0])
translated_poly_per_obj.append(p)
translated_masks.append(translated_poly_per_obj)
translated_masks = PolygonMasks(translated_masks, *out_shape)
return translated_masks
def shear(self,
out_shape,
magnitude,
direction='horizontal',
border_value=0,
interpolation='bilinear'):
"""See :func:`BaseInstanceMasks.shear`."""
if len(self.masks) == 0:
sheared_masks = PolygonMasks([], *out_shape)
else:
sheared_masks = []
if direction == 'horizontal':
shear_matrix = np.stack([[1, magnitude],
[0, 1]]).astype(np.float32)
elif direction == 'vertical':
shear_matrix = np.stack([[1, 0], [magnitude,
1]]).astype(np.float32)
for poly_per_obj in self.masks:
sheared_poly = []
for p in poly_per_obj:
p = np.stack([p[0::2], p[1::2]], axis=0) # [2, n]
new_coords = np.matmul(shear_matrix, p) # [2, n]
new_coords[0, :] = np.clip(new_coords[0, :], 0,
out_shape[1])
new_coords[1, :] = np.clip(new_coords[1, :], 0,
out_shape[0])
sheared_poly.append(
new_coords.transpose((1, 0)).reshape(-1))
sheared_masks.append(sheared_poly)
sheared_masks = PolygonMasks(sheared_masks, *out_shape)
return sheared_masks
def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
"""See :func:`BaseInstanceMasks.rotate`."""
if len(self.masks) == 0:
rotated_masks = PolygonMasks([], *out_shape)
else:
rotated_masks = []
rotate_matrix = cv2.getRotationMatrix2D(center, -angle, scale)
for poly_per_obj in self.masks:
rotated_poly = []
for p in poly_per_obj:
p = p.copy()
coords = np.stack([p[0::2], p[1::2]], axis=1) # [n, 2]
# pad 1 to convert from format [x, y] to homogeneous
# coordinates format [x, y, 1]
coords = np.concatenate(
(coords, np.ones((coords.shape[0], 1), coords.dtype)),
axis=1) # [n, 3]
rotated_coords = np.matmul(
rotate_matrix[None, :, :],
coords[:, :, None])[..., 0] # [n, 2, 1] -> [n, 2]
rotated_coords[:, 0] = np.clip(rotated_coords[:, 0], 0,
out_shape[1])
rotated_coords[:, 1] = np.clip(rotated_coords[:, 1], 0,
out_shape[0])
rotated_poly.append(rotated_coords.reshape(-1))
rotated_masks.append(rotated_poly)
rotated_masks = PolygonMasks(rotated_masks, *out_shape)
return rotated_masks
def to_bitmap(self):
"""convert polygon masks to bitmap masks."""
bitmap_masks = self.to_ndarray()
return BitmapMasks(bitmap_masks, self.height, self.width)
@property
def areas(self):
"""Compute areas of masks.
This func is modified from `detectron2
<https://github.com/facebookresearch/detectron2/blob/ffff8acc35ea88ad1cb1806ab0f00b4c1c5dbfd9/detectron2/structures/masks.py#L387>`_.
The function only works with Polygons using the shoelace formula.
Return:
ndarray: areas of each instance
""" # noqa: W501
area = []
for polygons_per_obj in self.masks:
area_per_obj = 0
for p in polygons_per_obj:
area_per_obj += self._polygon_area(p[0::2], p[1::2])
area.append(area_per_obj)
return np.asarray(area)
def _polygon_area(self, x, y):
"""Compute the area of a component of a polygon.
Using the shoelace formula:
https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
Args:
x (ndarray): x coordinates of the component
y (ndarray): y coordinates of the component
Return:
float: the are of the component
""" # noqa: 501
return 0.5 * np.abs(
np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def to_ndarray(self):
"""Convert masks to the format of ndarray."""
if len(self.masks) == 0:
return np.empty((0, self.height, self.width), dtype=np.uint8)
bitmap_masks = []
for poly_per_obj in self.masks:
bitmap_masks.append(
polygon_to_bitmap(poly_per_obj, self.height, self.width))
return np.stack(bitmap_masks)
def to_tensor(self, dtype, device):
"""See :func:`BaseInstanceMasks.to_tensor`."""
if len(self.masks) == 0:
return torch.empty((0, self.height, self.width),
dtype=dtype,
device=device)
ndarray_masks = self.to_ndarray()
return torch.tensor(ndarray_masks, dtype=dtype, device=device)
@classmethod
def random(cls,
num_masks=3,
height=32,
width=32,
n_verts=5,
dtype=np.float32,
rng=None):
"""Generate random polygon masks for demo / testing purposes.
Adapted from [1]_
References:
.. [1] https://gitlab.kitware.com/computer-vision/kwimage/-/blob/928cae35ca8/kwimage/structs/polygon.py#L379 # noqa: E501
Example:
>>> from mmdet.core.mask.structures import PolygonMasks
>>> self = PolygonMasks.random()
>>> print('self = {}'.format(self))
"""
from mmdet.utils.util_random import ensure_rng
rng = ensure_rng(rng)
def _gen_polygon(n, irregularity, spikeyness):
"""Creates the polygon by sampling points on a circle around the
centre. Random noise is added by varying the angular spacing
between sequential points, and by varying the radial distance of
each point from the centre.
Based on original code by Mike Ounsworth
Args:
n (int): number of vertices
irregularity (float): [0,1] indicating how much variance there
is in the angular spacing of vertices. [0,1] will map to
[0, 2pi/numberOfVerts]
spikeyness (float): [0,1] indicating how much variance there is
in each vertex from the circle of radius aveRadius. [0,1]
will map to [0, aveRadius]
Returns:
a list of vertices, in CCW order.
"""
from scipy.stats import truncnorm
# Generate around the unit circle
cx, cy = (0.0, 0.0)
radius = 1
tau = np.pi * 2
irregularity = np.clip(irregularity, 0, 1) * 2 * np.pi / n
spikeyness = np.clip(spikeyness, 1e-9, 1)
# generate n angle steps
lower = (tau / n) - irregularity
upper = (tau / n) + irregularity
angle_steps = rng.uniform(lower, upper, n)
# normalize the steps so that point 0 and point n+1 are the same
k = angle_steps.sum() / (2 * np.pi)
angles = (angle_steps / k).cumsum() + rng.uniform(0, tau)
# Convert high and low values to be wrt the standard normal range
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.truncnorm.html
low = 0
high = 2 * radius
mean = radius
std = spikeyness
a = (low - mean) / std
b = (high - mean) / std
tnorm = truncnorm(a=a, b=b, loc=mean, scale=std)
# now generate the points
radii = tnorm.rvs(n, random_state=rng)
x_pts = cx + radii * np.cos(angles)
y_pts = cy + radii * np.sin(angles)
points = np.hstack([x_pts[:, None], y_pts[:, None]])
# Scale to 0-1 space
points = points - points.min(axis=0)
points = points / points.max(axis=0)
# Randomly place within 0-1 space
points = points * (rng.rand() * .8 + .2)
min_pt = points.min(axis=0)
max_pt = points.max(axis=0)
high = (1 - max_pt)
low = (0 - min_pt)
offset = (rng.rand(2) * (high - low)) + low
points = points + offset
return points
def _order_vertices(verts):
"""
References:
https://stackoverflow.com/questions/1709283/how-can-i-sort-a-coordinate-list-for-a-rectangle-counterclockwise
"""
mlat = verts.T[0].sum() / len(verts)
mlng = verts.T[1].sum() / len(verts)
tau = np.pi * 2
angle = (np.arctan2(mlat - verts.T[0], verts.T[1] - mlng) +
tau) % tau
sortx = angle.argsort()
verts = verts.take(sortx, axis=0)
return verts
# Generate a random exterior for each requested mask
masks = []
for _ in range(num_masks):
exterior = _order_vertices(_gen_polygon(n_verts, 0.9, 0.9))
exterior = (exterior * [(width, height)]).astype(dtype)
masks.append([exterior.ravel()])
self = cls(masks, height, width)
return self
def get_bboxes(self):
num_masks = len(self)
boxes = np.zeros((num_masks, 4), dtype=np.float32)
for idx, poly_per_obj in enumerate(self.masks):
# simply use a number that is big enough for comparison with
# coordinates
xy_min = np.array([self.width * 2, self.height * 2],
dtype=np.float32)
xy_max = np.zeros(2, dtype=np.float32)
for p in poly_per_obj:
xy = np.array(p).reshape(-1, 2).astype(np.float32)
xy_min = np.minimum(xy_min, np.min(xy, axis=0))
xy_max = np.maximum(xy_max, np.max(xy, axis=0))
boxes[idx, :2] = xy_min
boxes[idx, 2:] = xy_max
return boxes
def polygon_to_bitmap(polygons, height, width):
"""Convert masks from the form of polygons to bitmaps.
Args:
polygons (list[ndarray]): masks in polygon representation
height (int): mask height
width (int): mask width
Return:
ndarray: the converted masks in bitmap representation
"""
rles = maskUtils.frPyObjects(polygons, height, width)
rle = maskUtils.merge(rles)
bitmap_mask = maskUtils.decode(rle).astype(np.bool)
return bitmap_mask
| 40,241 | 36.539179 | 141 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/mask/mask_target.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from torch.nn.modules.utils import _pair
def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list,
cfg):
"""Compute mask target for positive proposals in multiple images.
Args:
pos_proposals_list (list[Tensor]): Positive proposals in multiple
images.
pos_assigned_gt_inds_list (list[Tensor]): Assigned GT indices for each
positive proposals.
gt_masks_list (list[:obj:`BaseInstanceMasks`]): Ground truth masks of
each image.
cfg (dict): Config dict that specifies the mask size.
Returns:
list[Tensor]: Mask target of each image.
Example:
>>> import mmcv
>>> import mmdet
>>> from mmdet.core.mask import BitmapMasks
>>> from mmdet.core.mask.mask_target import *
>>> H, W = 17, 18
>>> cfg = mmcv.Config({'mask_size': (13, 14)})
>>> rng = np.random.RandomState(0)
>>> # Positive proposals (tl_x, tl_y, br_x, br_y) for each image
>>> pos_proposals_list = [
>>> torch.Tensor([
>>> [ 7.2425, 5.5929, 13.9414, 14.9541],
>>> [ 7.3241, 3.6170, 16.3850, 15.3102],
>>> ]),
>>> torch.Tensor([
>>> [ 4.8448, 6.4010, 7.0314, 9.7681],
>>> [ 5.9790, 2.6989, 7.4416, 4.8580],
>>> [ 0.0000, 0.0000, 0.1398, 9.8232],
>>> ]),
>>> ]
>>> # Corresponding class index for each proposal for each image
>>> pos_assigned_gt_inds_list = [
>>> torch.LongTensor([7, 0]),
>>> torch.LongTensor([5, 4, 1]),
>>> ]
>>> # Ground truth mask for each true object for each image
>>> gt_masks_list = [
>>> BitmapMasks(rng.rand(8, H, W), height=H, width=W),
>>> BitmapMasks(rng.rand(6, H, W), height=H, width=W),
>>> ]
>>> mask_targets = mask_target(
>>> pos_proposals_list, pos_assigned_gt_inds_list,
>>> gt_masks_list, cfg)
>>> assert mask_targets.shape == (5,) + cfg['mask_size']
"""
cfg_list = [cfg for _ in range(len(pos_proposals_list))]
mask_targets = map(mask_target_single, pos_proposals_list,
pos_assigned_gt_inds_list, gt_masks_list, cfg_list)
mask_targets = list(mask_targets)
if len(mask_targets) > 0:
mask_targets = torch.cat(mask_targets)
return mask_targets
def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg):
"""Compute mask target for each positive proposal in the image.
Args:
pos_proposals (Tensor): Positive proposals.
pos_assigned_gt_inds (Tensor): Assigned GT inds of positive proposals.
gt_masks (:obj:`BaseInstanceMasks`): GT masks in the format of Bitmap
or Polygon.
cfg (dict): Config dict that indicate the mask size.
Returns:
Tensor: Mask target of each positive proposals in the image.
Example:
>>> import mmcv
>>> import mmdet
>>> from mmdet.core.mask import BitmapMasks
>>> from mmdet.core.mask.mask_target import * # NOQA
>>> H, W = 32, 32
>>> cfg = mmcv.Config({'mask_size': (7, 11)})
>>> rng = np.random.RandomState(0)
>>> # Masks for each ground truth box (relative to the image)
>>> gt_masks_data = rng.rand(3, H, W)
>>> gt_masks = BitmapMasks(gt_masks_data, height=H, width=W)
>>> # Predicted positive boxes in one image
>>> pos_proposals = torch.FloatTensor([
>>> [ 16.2, 5.5, 19.9, 20.9],
>>> [ 17.3, 13.6, 19.3, 19.3],
>>> [ 14.8, 16.4, 17.0, 23.7],
>>> [ 0.0, 0.0, 16.0, 16.0],
>>> [ 4.0, 0.0, 20.0, 16.0],
>>> ])
>>> # For each predicted proposal, its assignment to a gt mask
>>> pos_assigned_gt_inds = torch.LongTensor([0, 1, 2, 1, 1])
>>> mask_targets = mask_target_single(
>>> pos_proposals, pos_assigned_gt_inds, gt_masks, cfg)
>>> assert mask_targets.shape == (5,) + cfg['mask_size']
"""
device = pos_proposals.device
mask_size = _pair(cfg.mask_size)
binarize = not cfg.get('soft_mask_target', False)
num_pos = pos_proposals.size(0)
if num_pos > 0:
proposals_np = pos_proposals.cpu().numpy()
maxh, maxw = gt_masks.height, gt_masks.width
proposals_np[:, [0, 2]] = np.clip(proposals_np[:, [0, 2]], 0, maxw)
proposals_np[:, [1, 3]] = np.clip(proposals_np[:, [1, 3]], 0, maxh)
pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()
mask_targets = gt_masks.crop_and_resize(
proposals_np,
mask_size,
device=device,
inds=pos_assigned_gt_inds,
binarize=binarize).to_ndarray()
mask_targets = torch.from_numpy(mask_targets).float().to(device)
else:
mask_targets = pos_proposals.new_zeros((0, ) + mask_size)
return mask_targets
| 5,115 | 38.96875 | 78 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/hook/sync_random_size_hook.py | # Copyright (c) OpenMMLab. All rights reserved.
import random
import warnings
import torch
from mmcv.runner import get_dist_info
from mmcv.runner.hooks import HOOKS, Hook
from torch import distributed as dist
@HOOKS.register_module()
class SyncRandomSizeHook(Hook):
"""Change and synchronize the random image size across ranks.
SyncRandomSizeHook is deprecated, please use Resize pipeline to achieve
similar functions. Such as `dict(type='Resize', img_scale=[(448, 448),
(832, 832)], multiscale_mode='range', keep_ratio=True)`.
Note: Due to the multi-process dataloader, its behavior is different
from YOLOX's official implementation, the official is to change the
size every fixed iteration interval and what we achieved is a fixed
epoch interval.
Args:
ratio_range (tuple[int]): Random ratio range. It will be multiplied
by 32, and then change the dataset output image size.
Default: (14, 26).
img_scale (tuple[int]): Size of input image. Default: (640, 640).
interval (int): The epoch interval of change image size. Default: 1.
device (torch.device | str): device for returned tensors.
Default: 'cuda'.
"""
def __init__(self,
ratio_range=(14, 26),
img_scale=(640, 640),
interval=1,
device='cuda'):
warnings.warn('DeprecationWarning: SyncRandomSizeHook is deprecated. '
'Please use Resize pipeline to achieve similar '
'functions. Due to the multi-process dataloader, '
'its behavior is different from YOLOX\'s official '
'implementation, the official is to change the size '
'every fixed iteration interval and what we achieved '
'is a fixed epoch interval.')
self.rank, world_size = get_dist_info()
self.is_distributed = world_size > 1
self.ratio_range = ratio_range
self.img_scale = img_scale
self.interval = interval
self.device = device
def after_train_epoch(self, runner):
"""Change the dataset output image size."""
if self.ratio_range is not None and (runner.epoch +
1) % self.interval == 0:
# Due to DDP and DP get the device behavior inconsistent,
# so we did not get the device from runner.model.
tensor = torch.LongTensor(2).to(self.device)
if self.rank == 0:
size_factor = self.img_scale[1] * 1. / self.img_scale[0]
size = random.randint(*self.ratio_range)
size = (int(32 * size), 32 * int(size * size_factor))
tensor[0] = size[0]
tensor[1] = size[1]
if self.is_distributed:
dist.barrier()
dist.broadcast(tensor, 0)
runner.data_loader.dataset.update_dynamic_scale(
(tensor[0].item(), tensor[1].item()))
| 3,061 | 40.945205 | 78 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/hook/checkloss_hook.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.runner.hooks import HOOKS, Hook
@HOOKS.register_module()
class CheckInvalidLossHook(Hook):
"""Check invalid loss hook.
This hook will regularly check whether the loss is valid
during training.
Args:
interval (int): Checking interval (every k iterations).
Default: 50.
"""
def __init__(self, interval=50):
self.interval = interval
def after_train_iter(self, runner):
if self.every_n_iters(runner, self.interval):
assert torch.isfinite(runner.outputs['loss']), \
runner.logger.info('loss become infinite or NaN!')
| 681 | 26.28 | 66 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/hook/sync_norm_hook.py | # Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from mmcv.runner import get_dist_info
from mmcv.runner.hooks import HOOKS, Hook
from torch import nn
from ..utils.dist_utils import all_reduce_dict
def get_norm_states(module):
async_norm_states = OrderedDict()
for name, child in module.named_modules():
if isinstance(child, nn.modules.batchnorm._NormBase):
for k, v in child.state_dict().items():
async_norm_states['.'.join([name, k])] = v
return async_norm_states
@HOOKS.register_module()
class SyncNormHook(Hook):
"""Synchronize Norm states after training epoch, currently used in YOLOX.
Args:
num_last_epochs (int): The number of latter epochs in the end of the
training to switch to synchronizing norm interval. Default: 15.
interval (int): Synchronizing norm interval. Default: 1.
"""
def __init__(self, num_last_epochs=15, interval=1):
self.interval = interval
self.num_last_epochs = num_last_epochs
def before_train_epoch(self, runner):
epoch = runner.epoch
if (epoch + 1) == runner.max_epochs - self.num_last_epochs:
# Synchronize norm every epoch.
self.interval = 1
def after_train_epoch(self, runner):
"""Synchronizing norm."""
epoch = runner.epoch
module = runner.model
if (epoch + 1) % self.interval == 0:
_, world_size = get_dist_info()
if world_size == 1:
return
norm_states = get_norm_states(module)
if len(norm_states) == 0:
return
norm_states = all_reduce_dict(norm_states, op='mean')
module.load_state_dict(norm_states, strict=False)
| 1,789 | 32.773585 | 77 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/export/model_wrappers.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import warnings
import numpy as np
import torch
from mmdet.core import bbox2result
from mmdet.models import BaseDetector
class DeployBaseDetector(BaseDetector):
"""DeployBaseDetector."""
def __init__(self, class_names, device_id):
super(DeployBaseDetector, self).__init__()
self.CLASSES = class_names
self.device_id = device_id
def simple_test(self, img, img_metas, **kwargs):
raise NotImplementedError('This method is not implemented.')
def aug_test(self, imgs, img_metas, **kwargs):
raise NotImplementedError('This method is not implemented.')
def extract_feat(self, imgs):
raise NotImplementedError('This method is not implemented.')
def forward_train(self, imgs, img_metas, **kwargs):
raise NotImplementedError('This method is not implemented.')
def val_step(self, data, optimizer):
raise NotImplementedError('This method is not implemented.')
def train_step(self, data, optimizer):
raise NotImplementedError('This method is not implemented.')
def forward_test(self, *, img, img_metas, **kwargs):
raise NotImplementedError('This method is not implemented.')
def async_simple_test(self, img, img_metas, **kwargs):
raise NotImplementedError('This method is not implemented.')
def forward(self, img, img_metas, return_loss=True, **kwargs):
outputs = self.forward_test(img, img_metas, **kwargs)
batch_dets, batch_labels = outputs[:2]
batch_masks = outputs[2] if len(outputs) == 3 else None
batch_size = img[0].shape[0]
img_metas = img_metas[0]
results = []
rescale = kwargs.get('rescale', True)
for i in range(batch_size):
dets, labels = batch_dets[i], batch_labels[i]
if rescale:
scale_factor = img_metas[i]['scale_factor']
if isinstance(scale_factor, (list, tuple, np.ndarray)):
assert len(scale_factor) == 4
scale_factor = np.array(scale_factor)[None, :] # [1,4]
dets[:, :4] /= scale_factor
if 'border' in img_metas[i]:
# offset pixel of the top-left corners between original image
# and padded/enlarged image, 'border' is used when exporting
# CornerNet and CentripetalNet to onnx
x_off = img_metas[i]['border'][2]
y_off = img_metas[i]['border'][0]
dets[:, [0, 2]] -= x_off
dets[:, [1, 3]] -= y_off
dets[:, :4] *= (dets[:, :4] > 0).astype(dets.dtype)
dets_results = bbox2result(dets, labels, len(self.CLASSES))
if batch_masks is not None:
masks = batch_masks[i]
img_h, img_w = img_metas[i]['img_shape'][:2]
ori_h, ori_w = img_metas[i]['ori_shape'][:2]
masks = masks[:, :img_h, :img_w]
if rescale:
masks = masks.astype(np.float32)
masks = torch.from_numpy(masks)
masks = torch.nn.functional.interpolate(
masks.unsqueeze(0), size=(ori_h, ori_w))
masks = masks.squeeze(0).detach().numpy()
if masks.dtype != np.bool:
masks = masks >= 0.5
segms_results = [[] for _ in range(len(self.CLASSES))]
for j in range(len(dets)):
segms_results[labels[j]].append(masks[j])
results.append((dets_results, segms_results))
else:
results.append(dets_results)
return results
class ONNXRuntimeDetector(DeployBaseDetector):
"""Wrapper for detector's inference with ONNXRuntime."""
def __init__(self, onnx_file, class_names, device_id):
super(ONNXRuntimeDetector, self).__init__(class_names, device_id)
import onnxruntime as ort
# get the custom op path
ort_custom_op_path = ''
try:
from mmcv.ops import get_onnxruntime_op_path
ort_custom_op_path = get_onnxruntime_op_path()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, \
you may have to build mmcv with ONNXRuntime from source.')
session_options = ort.SessionOptions()
# register custom op for onnxruntime
if osp.exists(ort_custom_op_path):
session_options.register_custom_ops_library(ort_custom_op_path)
sess = ort.InferenceSession(onnx_file, session_options)
providers = ['CPUExecutionProvider']
options = [{}]
is_cuda_available = ort.get_device() == 'GPU'
if is_cuda_available:
providers.insert(0, 'CUDAExecutionProvider')
options.insert(0, {'device_id': device_id})
sess.set_providers(providers, options)
self.sess = sess
self.io_binding = sess.io_binding()
self.output_names = [_.name for _ in sess.get_outputs()]
self.is_cuda_available = is_cuda_available
def forward_test(self, imgs, img_metas, **kwargs):
input_data = imgs[0]
# set io binding for inputs/outputs
device_type = 'cuda' if self.is_cuda_available else 'cpu'
if not self.is_cuda_available:
input_data = input_data.cpu()
self.io_binding.bind_input(
name='input',
device_type=device_type,
device_id=self.device_id,
element_type=np.float32,
shape=input_data.shape,
buffer_ptr=input_data.data_ptr())
for name in self.output_names:
self.io_binding.bind_output(name)
# run session to get outputs
self.sess.run_with_iobinding(self.io_binding)
ort_outputs = self.io_binding.copy_outputs_to_cpu()
return ort_outputs
class TensorRTDetector(DeployBaseDetector):
"""Wrapper for detector's inference with TensorRT."""
def __init__(self, engine_file, class_names, device_id, output_names=None):
super(TensorRTDetector, self).__init__(class_names, device_id)
warnings.warn('`output_names` is deprecated and will be removed in '
'future releases.')
from mmcv.tensorrt import TRTWraper, load_tensorrt_plugin
try:
load_tensorrt_plugin()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, \
you may have to build mmcv with TensorRT from source.')
output_names = ['dets', 'labels']
model = TRTWraper(engine_file, ['input'], output_names)
with_masks = False
# if TensorRT has totally 4 inputs/outputs, then
# the detector should have `mask` output.
if len(model.engine) == 4:
model.output_names = output_names + ['masks']
with_masks = True
self.model = model
self.with_masks = with_masks
def forward_test(self, imgs, img_metas, **kwargs):
input_data = imgs[0].contiguous()
with torch.cuda.device(self.device_id), torch.no_grad():
outputs = self.model({'input': input_data})
outputs = [outputs[name] for name in self.model.output_names]
outputs = [out.detach().cpu().numpy() for out in outputs]
return outputs
| 7,472 | 39.61413 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/export/pytorch2onnx.py | # Copyright (c) OpenMMLab. All rights reserved.
from functools import partial
import mmcv
import numpy as np
import torch
from mmcv.runner import load_checkpoint
def generate_inputs_and_wrap_model(config_path,
checkpoint_path,
input_config,
cfg_options=None):
"""Prepare sample input and wrap model for ONNX export.
The ONNX export API only accept args, and all inputs should be
torch.Tensor or corresponding types (such as tuple of tensor).
So we should call this function before exporting. This function will:
1. generate corresponding inputs which are used to execute the model.
2. Wrap the model's forward function.
For example, the MMDet models' forward function has a parameter
``return_loss:bool``. As we want to set it as False while export API
supports neither bool type or kwargs. So we have to replace the forward
method like ``model.forward = partial(model.forward, return_loss=False)``.
Args:
config_path (str): the OpenMMLab config for the model we want to
export to ONNX
checkpoint_path (str): Path to the corresponding checkpoint
input_config (dict): the exactly data in this dict depends on the
framework. For MMSeg, we can just declare the input shape,
and generate the dummy data accordingly. However, for MMDet,
we may pass the real img path, or the NMS will return None
as there is no legal bbox.
Returns:
tuple: (model, tensor_data) wrapped model which can be called by
``model(*tensor_data)`` and a list of inputs which are used to
execute the model while exporting.
"""
model = build_model_from_cfg(
config_path, checkpoint_path, cfg_options=cfg_options)
one_img, one_meta = preprocess_example_input(input_config)
tensor_data = [one_img]
model.forward = partial(
model.forward, img_metas=[[one_meta]], return_loss=False)
# pytorch has some bug in pytorch1.3, we have to fix it
# by replacing these existing op
opset_version = 11
# put the import within the function thus it will not cause import error
# when not using this function
try:
from mmcv.onnx.symbolic import register_extra_symbolics
except ModuleNotFoundError:
raise NotImplementedError('please update mmcv to version>=v1.0.4')
register_extra_symbolics(opset_version)
return model, tensor_data
def build_model_from_cfg(config_path, checkpoint_path, cfg_options=None):
"""Build a model from config and load the given checkpoint.
Args:
config_path (str): the OpenMMLab config for the model we want to
export to ONNX
checkpoint_path (str): Path to the corresponding checkpoint
Returns:
torch.nn.Module: the built model
"""
from mmdet.models import build_detector
cfg = mmcv.Config.fromfile(config_path)
if cfg_options is not None:
cfg.merge_from_dict(cfg_options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# build the model
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
checkpoint = load_checkpoint(model, checkpoint_path, map_location='cpu')
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
from mmdet.datasets import DATASETS
dataset = DATASETS.get(cfg.data.test['type'])
assert (dataset is not None)
model.CLASSES = dataset.CLASSES
model.cpu().eval()
return model
def preprocess_example_input(input_config):
"""Prepare an example input image for ``generate_inputs_and_wrap_model``.
Args:
input_config (dict): customized config describing the example input.
Returns:
tuple: (one_img, one_meta), tensor of the example input image and \
meta information for the example input image.
Examples:
>>> from mmdet.core.export import preprocess_example_input
>>> input_config = {
>>> 'input_shape': (1,3,224,224),
>>> 'input_path': 'demo/demo.jpg',
>>> 'normalize_cfg': {
>>> 'mean': (123.675, 116.28, 103.53),
>>> 'std': (58.395, 57.12, 57.375)
>>> }
>>> }
>>> one_img, one_meta = preprocess_example_input(input_config)
>>> print(one_img.shape)
torch.Size([1, 3, 224, 224])
>>> print(one_meta)
{'img_shape': (224, 224, 3),
'ori_shape': (224, 224, 3),
'pad_shape': (224, 224, 3),
'filename': '<demo>.png',
'scale_factor': 1.0,
'flip': False}
"""
input_path = input_config['input_path']
input_shape = input_config['input_shape']
one_img = mmcv.imread(input_path)
one_img = mmcv.imresize(one_img, input_shape[2:][::-1])
show_img = one_img.copy()
if 'normalize_cfg' in input_config.keys():
normalize_cfg = input_config['normalize_cfg']
mean = np.array(normalize_cfg['mean'], dtype=np.float32)
std = np.array(normalize_cfg['std'], dtype=np.float32)
to_rgb = normalize_cfg.get('to_rgb', True)
one_img = mmcv.imnormalize(one_img, mean, std, to_rgb=to_rgb)
one_img = one_img.transpose(2, 0, 1)
one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_(
True)
(_, C, H, W) = input_shape
one_meta = {
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'scale_factor': np.ones(4, dtype=np.float32),
'flip': False,
'show_img': show_img,
'flip_direction': None
}
return one_img, one_meta
| 5,995 | 36.475 | 78 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/export/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .onnx_helper import (add_dummy_nms_for_onnx, dynamic_clip_for_onnx,
get_k_for_topk)
from .pytorch2onnx import (build_model_from_cfg,
generate_inputs_and_wrap_model,
preprocess_example_input)
__all__ = [
'build_model_from_cfg', 'generate_inputs_and_wrap_model',
'preprocess_example_input', 'get_k_for_topk', 'add_dummy_nms_for_onnx',
'dynamic_clip_for_onnx'
]
| 505 | 37.923077 | 75 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/export/onnx_helper.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import torch
def dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape):
"""Clip boxes dynamically for onnx.
Since torch.clamp cannot have dynamic `min` and `max`, we scale the
boxes by 1/max_shape and clamp in the range [0, 1].
Args:
x1 (Tensor): The x1 for bounding boxes.
y1 (Tensor): The y1 for bounding boxes.
x2 (Tensor): The x2 for bounding boxes.
y2 (Tensor): The y2 for bounding boxes.
max_shape (Tensor or torch.Size): The (H,W) of original image.
Returns:
tuple(Tensor): The clipped x1, y1, x2, y2.
"""
assert isinstance(
max_shape,
torch.Tensor), '`max_shape` should be tensor of (h,w) for onnx'
# scale by 1/max_shape
x1 = x1 / max_shape[1]
y1 = y1 / max_shape[0]
x2 = x2 / max_shape[1]
y2 = y2 / max_shape[0]
# clamp [0, 1]
x1 = torch.clamp(x1, 0, 1)
y1 = torch.clamp(y1, 0, 1)
x2 = torch.clamp(x2, 0, 1)
y2 = torch.clamp(y2, 0, 1)
# scale back
x1 = x1 * max_shape[1]
y1 = y1 * max_shape[0]
x2 = x2 * max_shape[1]
y2 = y2 * max_shape[0]
return x1, y1, x2, y2
def get_k_for_topk(k, size):
"""Get k of TopK for onnx exporting.
The K of TopK in TensorRT should not be a Tensor, while in ONNX Runtime
it could be a Tensor.Due to dynamic shape feature, we have to decide
whether to do TopK and what K it should be while exporting to ONNX.
If returned K is less than zero, it means we do not have to do
TopK operation.
Args:
k (int or Tensor): The set k value for nms from config file.
size (Tensor or torch.Size): The number of elements of \
TopK's input tensor
Returns:
tuple: (int or Tensor): The final K for TopK.
"""
ret_k = -1
if k <= 0 or size <= 0:
return ret_k
if torch.onnx.is_in_onnx_export():
is_trt_backend = os.environ.get('ONNX_BACKEND') == 'MMCVTensorRT'
if is_trt_backend:
# TensorRT does not support dynamic K with TopK op
if 0 < k < size:
ret_k = k
else:
# Always keep topk op for dynamic input in onnx for ONNX Runtime
ret_k = torch.where(k < size, k, size)
elif k < size:
ret_k = k
else:
# ret_k is -1
pass
return ret_k
def add_dummy_nms_for_onnx(boxes,
scores,
max_output_boxes_per_class=1000,
iou_threshold=0.5,
score_threshold=0.05,
pre_top_k=-1,
after_top_k=-1,
labels=None):
"""Create a dummy onnx::NonMaxSuppression op while exporting to ONNX.
This function helps exporting to onnx with batch and multiclass NMS op.
It only supports class-agnostic detection results. That is, the scores
is of shape (N, num_bboxes, num_classes) and the boxes is of shape
(N, num_boxes, 4).
Args:
boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4]
scores (Tensor): The detection scores of shape
[N, num_boxes, num_classes]
max_output_boxes_per_class (int): Maximum number of output
boxes per class of nms. Defaults to 1000.
iou_threshold (float): IOU threshold of nms. Defaults to 0.5
score_threshold (float): score threshold of nms.
Defaults to 0.05.
pre_top_k (bool): Number of top K boxes to keep before nms.
Defaults to -1.
after_top_k (int): Number of top K boxes to keep after nms.
Defaults to -1.
labels (Tensor, optional): It not None, explicit labels would be used.
Otherwise, labels would be automatically generated using
num_classed. Defaults to None.
Returns:
tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]
and class labels of shape [N, num_det].
"""
max_output_boxes_per_class = torch.LongTensor([max_output_boxes_per_class])
iou_threshold = torch.tensor([iou_threshold], dtype=torch.float32)
score_threshold = torch.tensor([score_threshold], dtype=torch.float32)
batch_size = scores.shape[0]
num_class = scores.shape[2]
nms_pre = torch.tensor(pre_top_k, device=scores.device, dtype=torch.long)
nms_pre = get_k_for_topk(nms_pre, boxes.shape[1])
if nms_pre > 0:
max_scores, _ = scores.max(-1)
_, topk_inds = max_scores.topk(nms_pre)
batch_inds = torch.arange(batch_size).view(
-1, 1).expand_as(topk_inds).long()
# Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501
transformed_inds = boxes.shape[1] * batch_inds + topk_inds
boxes = boxes.reshape(-1, 4)[transformed_inds, :].reshape(
batch_size, -1, 4)
scores = scores.reshape(-1, num_class)[transformed_inds, :].reshape(
batch_size, -1, num_class)
if labels is not None:
labels = labels.reshape(-1, 1)[transformed_inds].reshape(
batch_size, -1)
scores = scores.permute(0, 2, 1)
num_box = boxes.shape[1]
# turn off tracing to create a dummy output of nms
state = torch._C._get_tracing_state()
# dummy indices of nms's output
num_fake_det = 2
batch_inds = torch.randint(batch_size, (num_fake_det, 1))
cls_inds = torch.randint(num_class, (num_fake_det, 1))
box_inds = torch.randint(num_box, (num_fake_det, 1))
indices = torch.cat([batch_inds, cls_inds, box_inds], dim=1)
output = indices
setattr(DummyONNXNMSop, 'output', output)
# open tracing
torch._C._set_tracing_state(state)
selected_indices = DummyONNXNMSop.apply(boxes, scores,
max_output_boxes_per_class,
iou_threshold, score_threshold)
batch_inds, cls_inds = selected_indices[:, 0], selected_indices[:, 1]
box_inds = selected_indices[:, 2]
if labels is None:
labels = torch.arange(num_class, dtype=torch.long).to(scores.device)
labels = labels.view(1, num_class, 1).expand_as(scores)
scores = scores.reshape(-1, 1)
boxes = boxes.reshape(batch_size, -1).repeat(1, num_class).reshape(-1, 4)
pos_inds = (num_class * batch_inds + cls_inds) * num_box + box_inds
mask = scores.new_zeros(scores.shape)
# Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501
# PyTorch style code: mask[batch_inds, box_inds] += 1
mask[pos_inds, :] += 1
scores = scores * mask
boxes = boxes * mask
scores = scores.reshape(batch_size, -1)
boxes = boxes.reshape(batch_size, -1, 4)
labels = labels.reshape(batch_size, -1)
nms_after = torch.tensor(
after_top_k, device=scores.device, dtype=torch.long)
nms_after = get_k_for_topk(nms_after, num_box * num_class)
if nms_after > 0:
_, topk_inds = scores.topk(nms_after)
batch_inds = torch.arange(batch_size).view(-1, 1).expand_as(topk_inds)
# Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501
transformed_inds = scores.shape[1] * batch_inds + topk_inds
scores = scores.reshape(-1, 1)[transformed_inds, :].reshape(
batch_size, -1)
boxes = boxes.reshape(-1, 4)[transformed_inds, :].reshape(
batch_size, -1, 4)
labels = labels.reshape(-1, 1)[transformed_inds, :].reshape(
batch_size, -1)
scores = scores.unsqueeze(2)
dets = torch.cat([boxes, scores], dim=2)
return dets, labels
class DummyONNXNMSop(torch.autograd.Function):
"""DummyONNXNMSop.
This class is only for creating onnx::NonMaxSuppression.
"""
@staticmethod
def forward(ctx, boxes, scores, max_output_boxes_per_class, iou_threshold,
score_threshold):
return DummyONNXNMSop.output
@staticmethod
def symbolic(g, boxes, scores, max_output_boxes_per_class, iou_threshold,
score_threshold):
return g.op(
'NonMaxSuppression',
boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
outputs=1)
| 8,367 | 36.357143 | 98 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/demodata.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmdet.utils.util_random import ensure_rng
def random_boxes(num=1, scale=1, rng=None):
"""Simple version of ``kwimage.Boxes.random``
Returns:
Tensor: shape (n, 4) in x1, y1, x2, y2 format.
References:
https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390
Example:
>>> num = 3
>>> scale = 512
>>> rng = 0
>>> boxes = random_boxes(num, scale, rng)
>>> print(boxes)
tensor([[280.9925, 278.9802, 308.6148, 366.1769],
[216.9113, 330.6978, 224.0446, 456.5878],
[405.3632, 196.3221, 493.3953, 270.7942]])
"""
rng = ensure_rng(rng)
tlbr = rng.rand(num, 4).astype(np.float32)
tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2])
tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3])
br_x = np.maximum(tlbr[:, 0], tlbr[:, 2])
br_y = np.maximum(tlbr[:, 1], tlbr[:, 3])
tlbr[:, 0] = tl_x * scale
tlbr[:, 1] = tl_y * scale
tlbr[:, 2] = br_x * scale
tlbr[:, 3] = br_y * scale
boxes = torch.from_numpy(tlbr)
return boxes
| 1,181 | 26.488372 | 101 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/transforms.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
def find_inside_bboxes(bboxes, img_h, img_w):
"""Find bboxes as long as a part of bboxes is inside the image.
Args:
bboxes (Tensor): Shape (N, 4).
img_h (int): Image height.
img_w (int): Image width.
Returns:
Tensor: Index of the remaining bboxes.
"""
inside_inds = (bboxes[:, 0] < img_w) & (bboxes[:, 2] > 0) \
& (bboxes[:, 1] < img_h) & (bboxes[:, 3] > 0)
return inside_inds
def bbox_flip(bboxes, img_shape, direction='horizontal'):
"""Flip bboxes horizontally or vertically.
Args:
bboxes (Tensor): Shape (..., 4*k)
img_shape (tuple): Image shape.
direction (str): Flip direction, options are "horizontal", "vertical",
"diagonal". Default: "horizontal"
Returns:
Tensor: Flipped bboxes.
"""
assert bboxes.shape[-1] % 4 == 0
assert direction in ['horizontal', 'vertical', 'diagonal']
flipped = bboxes.clone()
if direction == 'horizontal':
flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4]
flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4]
elif direction == 'vertical':
flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4]
flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4]
else:
flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4]
flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4]
flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4]
flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4]
return flipped
def bbox_mapping(bboxes,
img_shape,
scale_factor,
flip,
flip_direction='horizontal'):
"""Map bboxes from the original image scale to testing scale."""
new_bboxes = bboxes * bboxes.new_tensor(scale_factor)
if flip:
new_bboxes = bbox_flip(new_bboxes, img_shape, flip_direction)
return new_bboxes
def bbox_mapping_back(bboxes,
img_shape,
scale_factor,
flip,
flip_direction='horizontal'):
"""Map bboxes from testing scale to original image scale."""
new_bboxes = bbox_flip(bboxes, img_shape,
flip_direction) if flip else bboxes
new_bboxes = new_bboxes.view(-1, 4) / new_bboxes.new_tensor(scale_factor)
return new_bboxes.view(bboxes.shape)
def bbox2roi(bbox_list):
"""Convert a list of bboxes to roi format.
Args:
bbox_list (list[Tensor]): a list of bboxes corresponding to a batch
of images.
Returns:
Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2]
"""
rois_list = []
for img_id, bboxes in enumerate(bbox_list):
if bboxes.size(0) > 0:
img_inds = bboxes.new_full((bboxes.size(0), 1), img_id)
rois = torch.cat([img_inds, bboxes[:, :4]], dim=-1)
else:
rois = bboxes.new_zeros((0, 5))
rois_list.append(rois)
rois = torch.cat(rois_list, 0)
return rois
def roi2bbox(rois):
"""Convert rois to bounding box format.
Args:
rois (torch.Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
Returns:
list[torch.Tensor]: Converted boxes of corresponding rois.
"""
bbox_list = []
img_ids = torch.unique(rois[:, 0].cpu(), sorted=True)
for img_id in img_ids:
inds = (rois[:, 0] == img_id.item())
bbox = rois[inds, 1:]
bbox_list.append(bbox)
return bbox_list
def bbox2result(bboxes, labels, num_classes):
"""Convert detection results to a list of numpy arrays.
Args:
bboxes (torch.Tensor | np.ndarray): shape (n, 5)
labels (torch.Tensor | np.ndarray): shape (n, )
num_classes (int): class number, including background class
Returns:
list(ndarray): bbox results of each class
"""
if bboxes.shape[0] == 0:
return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)]
else:
if isinstance(bboxes, torch.Tensor):
bboxes = bboxes.detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
return [bboxes[labels == i, :] for i in range(num_classes)]
def distance2bbox(points, distance, max_shape=None):
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (B, N, 2) or (N, 2).
distance (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4)
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]]
and the length of max_shape should also be B.
Returns:
Tensor: Boxes with shape (N, 4) or (B, N, 4)
"""
x1 = points[..., 0] - distance[..., 0]
y1 = points[..., 1] - distance[..., 1]
x2 = points[..., 0] + distance[..., 2]
y2 = points[..., 1] + distance[..., 3]
bboxes = torch.stack([x1, y1, x2, y2], -1)
if max_shape is not None:
if bboxes.dim() == 2 and not torch.onnx.is_in_onnx_export():
# speed up
bboxes[:, 0::2].clamp_(min=0, max=max_shape[1])
bboxes[:, 1::2].clamp_(min=0, max=max_shape[0])
return bboxes
# clip bboxes with dynamic `min` and `max` for onnx
if torch.onnx.is_in_onnx_export():
from mmdet.core.export import dynamic_clip_for_onnx
x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape)
bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
return bboxes
if not isinstance(max_shape, torch.Tensor):
max_shape = x1.new_tensor(max_shape)
max_shape = max_shape[..., :2].type_as(x1)
if max_shape.ndim == 2:
assert bboxes.ndim == 3
assert max_shape.size(0) == bboxes.size(0)
min_xy = x1.new_tensor(0)
max_xy = torch.cat([max_shape, max_shape],
dim=-1).flip(-1).unsqueeze(-2)
bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
return bboxes
def bbox2distance(points, bbox, max_dis=None, eps=0.1):
"""Decode bounding box based on distances.
Args:
points (Tensor): Shape (n, 2), [x, y].
bbox (Tensor): Shape (n, 4), "xyxy" format
max_dis (float): Upper bound of the distance.
eps (float): a small value to ensure target < max_dis, instead <=
Returns:
Tensor: Decoded distances.
"""
left = points[:, 0] - bbox[:, 0]
top = points[:, 1] - bbox[:, 1]
right = bbox[:, 2] - points[:, 0]
bottom = bbox[:, 3] - points[:, 1]
if max_dis is not None:
left = left.clamp(min=0, max=max_dis - eps)
top = top.clamp(min=0, max=max_dis - eps)
right = right.clamp(min=0, max=max_dis - eps)
bottom = bottom.clamp(min=0, max=max_dis - eps)
return torch.stack([left, top, right, bottom], -1)
def bbox_rescale(bboxes, scale_factor=1.0):
"""Rescale bounding box w.r.t. scale_factor.
Args:
bboxes (Tensor): Shape (n, 4) for bboxes or (n, 5) for rois
scale_factor (float): rescale factor
Returns:
Tensor: Rescaled bboxes.
"""
if bboxes.size(1) == 5:
bboxes_ = bboxes[:, 1:]
inds_ = bboxes[:, 0]
else:
bboxes_ = bboxes
cx = (bboxes_[:, 0] + bboxes_[:, 2]) * 0.5
cy = (bboxes_[:, 1] + bboxes_[:, 3]) * 0.5
w = bboxes_[:, 2] - bboxes_[:, 0]
h = bboxes_[:, 3] - bboxes_[:, 1]
w = w * scale_factor
h = h * scale_factor
x1 = cx - 0.5 * w
x2 = cx + 0.5 * w
y1 = cy - 0.5 * h
y2 = cy + 0.5 * h
if bboxes.size(1) == 5:
rescaled_bboxes = torch.stack([inds_, x1, y1, x2, y2], dim=-1)
else:
rescaled_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
return rescaled_bboxes
def bbox_cxcywh_to_xyxy(bbox):
"""Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2).
Args:
bbox (Tensor): Shape (n, 4) for bboxes.
Returns:
Tensor: Converted bboxes.
"""
cx, cy, w, h = bbox.split((1, 1, 1, 1), dim=-1)
bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)]
return torch.cat(bbox_new, dim=-1)
def bbox_xyxy_to_cxcywh(bbox):
"""Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h).
Args:
bbox (Tensor): Shape (n, 4) for bboxes.
Returns:
Tensor: Converted bboxes.
"""
x1, y1, x2, y2 = bbox.split((1, 1, 1, 1), dim=-1)
bbox_new = [(x1 + x2) / 2, (y1 + y2) / 2, (x2 - x1), (y2 - y1)]
return torch.cat(bbox_new, dim=-1)
| 8,962 | 32.073801 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/assigners/assign_result.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.utils import util_mixins
class AssignResult(util_mixins.NiceRepr):
"""Stores assignments between predicted and truth boxes.
Attributes:
num_gts (int): the number of truth boxes considered when computing this
assignment
gt_inds (LongTensor): for each predicted box indicates the 1-based
index of the assigned truth box. 0 means unassigned and -1 means
ignore.
max_overlaps (FloatTensor): the iou between the predicted box and its
assigned truth box.
labels (None | LongTensor): If specified, for each predicted box
indicates the category label of the assigned truth box.
Example:
>>> # An assign result between 4 predicted boxes and 9 true boxes
>>> # where only two boxes were assigned.
>>> num_gts = 9
>>> max_overlaps = torch.LongTensor([0, .5, .9, 0])
>>> gt_inds = torch.LongTensor([-1, 1, 2, 0])
>>> labels = torch.LongTensor([0, 3, 4, 0])
>>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels)
>>> print(str(self)) # xdoctest: +IGNORE_WANT
<AssignResult(num_gts=9, gt_inds.shape=(4,), max_overlaps.shape=(4,),
labels.shape=(4,))>
>>> # Force addition of gt labels (when adding gt as proposals)
>>> new_labels = torch.LongTensor([3, 4, 5])
>>> self.add_gt_(new_labels)
>>> print(str(self)) # xdoctest: +IGNORE_WANT
<AssignResult(num_gts=9, gt_inds.shape=(7,), max_overlaps.shape=(7,),
labels.shape=(7,))>
"""
def __init__(self, num_gts, gt_inds, max_overlaps, labels=None):
self.num_gts = num_gts
self.gt_inds = gt_inds
self.max_overlaps = max_overlaps
self.labels = labels
# Interface for possible user-defined properties
self._extra_properties = {}
@property
def num_preds(self):
"""int: the number of predictions in this assignment"""
return len(self.gt_inds)
def set_extra_property(self, key, value):
"""Set user-defined new property."""
assert key not in self.info
self._extra_properties[key] = value
def get_extra_property(self, key):
"""Get user-defined property."""
return self._extra_properties.get(key, None)
@property
def info(self):
"""dict: a dictionary of info about the object"""
basic_info = {
'num_gts': self.num_gts,
'num_preds': self.num_preds,
'gt_inds': self.gt_inds,
'max_overlaps': self.max_overlaps,
'labels': self.labels,
}
basic_info.update(self._extra_properties)
return basic_info
def __nice__(self):
"""str: a "nice" summary string describing this assign result"""
parts = []
parts.append(f'num_gts={self.num_gts!r}')
if self.gt_inds is None:
parts.append(f'gt_inds={self.gt_inds!r}')
else:
parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}')
if self.max_overlaps is None:
parts.append(f'max_overlaps={self.max_overlaps!r}')
else:
parts.append('max_overlaps.shape='
f'{tuple(self.max_overlaps.shape)!r}')
if self.labels is None:
parts.append(f'labels={self.labels!r}')
else:
parts.append(f'labels.shape={tuple(self.labels.shape)!r}')
return ', '.join(parts)
@classmethod
def random(cls, **kwargs):
"""Create random AssignResult for tests or debugging.
Args:
num_preds: number of predicted boxes
num_gts: number of true boxes
p_ignore (float): probability of a predicted box assigned to an
ignored truth
p_assigned (float): probability of a predicted box not being
assigned
p_use_label (float | bool): with labels or not
rng (None | int | numpy.random.RandomState): seed or state
Returns:
:obj:`AssignResult`: Randomly generated assign results.
Example:
>>> from mmdet.core.bbox.assigners.assign_result import * # NOQA
>>> self = AssignResult.random()
>>> print(self.info)
"""
from mmdet.core.bbox import demodata
rng = demodata.ensure_rng(kwargs.get('rng', None))
num_gts = kwargs.get('num_gts', None)
num_preds = kwargs.get('num_preds', None)
p_ignore = kwargs.get('p_ignore', 0.3)
p_assigned = kwargs.get('p_assigned', 0.7)
p_use_label = kwargs.get('p_use_label', 0.5)
num_classes = kwargs.get('p_use_label', 3)
if num_gts is None:
num_gts = rng.randint(0, 8)
if num_preds is None:
num_preds = rng.randint(0, 16)
if num_gts == 0:
max_overlaps = torch.zeros(num_preds, dtype=torch.float32)
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
if p_use_label is True or p_use_label < rng.rand():
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = None
else:
import numpy as np
# Create an overlap for each predicted box
max_overlaps = torch.from_numpy(rng.rand(num_preds))
# Construct gt_inds for each predicted box
is_assigned = torch.from_numpy(rng.rand(num_preds) < p_assigned)
# maximum number of assignments constraints
n_assigned = min(num_preds, min(num_gts, is_assigned.sum()))
assigned_idxs = np.where(is_assigned)[0]
rng.shuffle(assigned_idxs)
assigned_idxs = assigned_idxs[0:n_assigned]
assigned_idxs.sort()
is_assigned[:] = 0
is_assigned[assigned_idxs] = True
is_ignore = torch.from_numpy(
rng.rand(num_preds) < p_ignore) & is_assigned
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
true_idxs = np.arange(num_gts)
rng.shuffle(true_idxs)
true_idxs = torch.from_numpy(true_idxs)
gt_inds[is_assigned] = true_idxs[:n_assigned]
gt_inds = torch.from_numpy(
rng.randint(1, num_gts + 1, size=num_preds))
gt_inds[is_ignore] = -1
gt_inds[~is_assigned] = 0
max_overlaps[~is_assigned] = 0
if p_use_label is True or p_use_label < rng.rand():
if num_classes == 0:
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = torch.from_numpy(
# remind that we set FG labels to [0, num_class-1]
# since mmdet v2.0
# BG cat_id: num_class
rng.randint(0, num_classes, size=num_preds))
labels[~is_assigned] = 0
else:
labels = None
self = cls(num_gts, gt_inds, max_overlaps, labels)
return self
def add_gt_(self, gt_labels):
"""Add ground truth as assigned results.
Args:
gt_labels (torch.Tensor): Labels of gt boxes
"""
self_inds = torch.arange(
1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device)
self.gt_inds = torch.cat([self_inds, self.gt_inds])
self.max_overlaps = torch.cat(
[self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps])
if self.labels is not None:
self.labels = torch.cat([gt_labels, self.labels])
| 7,753 | 36.640777 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/assigners/sim_ota_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch
import torch.nn.functional as F
from ..builder import BBOX_ASSIGNERS
from ..iou_calculators import bbox_overlaps
from .assign_result import AssignResult
from .base_assigner import BaseAssigner
@BBOX_ASSIGNERS.register_module()
class SimOTAAssigner(BaseAssigner):
"""Computes matching between predictions and ground truth.
Args:
center_radius (int | float, optional): Ground truth center size
to judge whether a prior is in center. Default 2.5.
candidate_topk (int, optional): The candidate top-k which used to
get top-k ious to calculate dynamic-k. Default 10.
iou_weight (int | float, optional): The scale factor for regression
iou cost. Default 3.0.
cls_weight (int | float, optional): The scale factor for classification
cost. Default 1.0.
"""
def __init__(self,
center_radius=2.5,
candidate_topk=10,
iou_weight=3.0,
cls_weight=1.0):
self.center_radius = center_radius
self.candidate_topk = candidate_topk
self.iou_weight = iou_weight
self.cls_weight = cls_weight
def assign(self,
pred_scores,
priors,
decoded_bboxes,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
eps=1e-7):
"""Assign gt to priors using SimOTA. It will switch to CPU mode when
GPU is out of memory.
Args:
pred_scores (Tensor): Classification scores of one image,
a 2D-Tensor with shape [num_priors, num_classes]
priors (Tensor): All priors of one image, a 2D-Tensor with shape
[num_priors, 4] in [cx, xy, stride_w, stride_y] format.
decoded_bboxes (Tensor): Predicted bboxes, a 2D-Tensor with shape
[num_priors, 4] in [tl_x, tl_y, br_x, br_y] format.
gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor
with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format.
gt_labels (Tensor): Ground truth labels of one image, a Tensor
with shape [num_gts].
gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
labelled as `ignored`, e.g., crowd boxes in COCO.
eps (float): A value added to the denominator for numerical
stability. Default 1e-7.
Returns:
assign_result (obj:`AssignResult`): The assigned result.
"""
try:
assign_result = self._assign(pred_scores, priors, decoded_bboxes,
gt_bboxes, gt_labels,
gt_bboxes_ignore, eps)
return assign_result
except RuntimeError:
origin_device = pred_scores.device
warnings.warn('OOM RuntimeError is raised due to the huge memory '
'cost during label assignment. CPU mode is applied '
'in this batch. If you want to avoid this issue, '
'try to reduce the batch size or image size.')
torch.cuda.empty_cache()
pred_scores = pred_scores.cpu()
priors = priors.cpu()
decoded_bboxes = decoded_bboxes.cpu()
gt_bboxes = gt_bboxes.cpu().float()
gt_labels = gt_labels.cpu()
assign_result = self._assign(pred_scores, priors, decoded_bboxes,
gt_bboxes, gt_labels,
gt_bboxes_ignore, eps)
assign_result.gt_inds = assign_result.gt_inds.to(origin_device)
assign_result.max_overlaps = assign_result.max_overlaps.to(
origin_device)
assign_result.labels = assign_result.labels.to(origin_device)
return assign_result
def _assign(self,
pred_scores,
priors,
decoded_bboxes,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
eps=1e-7):
"""Assign gt to priors using SimOTA.
Args:
pred_scores (Tensor): Classification scores of one image,
a 2D-Tensor with shape [num_priors, num_classes]
priors (Tensor): All priors of one image, a 2D-Tensor with shape
[num_priors, 4] in [cx, xy, stride_w, stride_y] format.
decoded_bboxes (Tensor): Predicted bboxes, a 2D-Tensor with shape
[num_priors, 4] in [tl_x, tl_y, br_x, br_y] format.
gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor
with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format.
gt_labels (Tensor): Ground truth labels of one image, a Tensor
with shape [num_gts].
gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
labelled as `ignored`, e.g., crowd boxes in COCO.
eps (float): A value added to the denominator for numerical
stability. Default 1e-7.
Returns:
:obj:`AssignResult`: The assigned result.
"""
INF = 100000000
num_gt = gt_bboxes.size(0)
num_bboxes = decoded_bboxes.size(0)
# assign 0 by default
assigned_gt_inds = decoded_bboxes.new_full((num_bboxes, ),
0,
dtype=torch.long)
valid_mask, is_in_boxes_and_center = self.get_in_gt_and_in_center_info(
priors, gt_bboxes)
valid_decoded_bbox = decoded_bboxes[valid_mask]
valid_pred_scores = pred_scores[valid_mask]
num_valid = valid_decoded_bbox.size(0)
if num_gt == 0 or num_bboxes == 0 or num_valid == 0:
# No ground truth or boxes, return empty assignment
max_overlaps = decoded_bboxes.new_zeros((num_bboxes, ))
if num_gt == 0:
# No truth, assign everything to background
assigned_gt_inds[:] = 0
if gt_labels is None:
assigned_labels = None
else:
assigned_labels = decoded_bboxes.new_full((num_bboxes, ),
-1,
dtype=torch.long)
return AssignResult(
num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)
pairwise_ious = bbox_overlaps(valid_decoded_bbox, gt_bboxes)
iou_cost = -torch.log(pairwise_ious + eps)
gt_onehot_label = (
F.one_hot(gt_labels.to(torch.int64),
pred_scores.shape[-1]).float().unsqueeze(0).repeat(
num_valid, 1, 1))
valid_pred_scores = valid_pred_scores.unsqueeze(1).repeat(1, num_gt, 1)
cls_cost = F.binary_cross_entropy(
valid_pred_scores.sqrt_(), gt_onehot_label,
reduction='none').sum(-1)
cost_matrix = (
cls_cost * self.cls_weight + iou_cost * self.iou_weight +
(~is_in_boxes_and_center) * INF)
matched_pred_ious, matched_gt_inds = \
self.dynamic_k_matching(
cost_matrix, pairwise_ious, num_gt, valid_mask)
# convert to AssignResult format
assigned_gt_inds[valid_mask] = matched_gt_inds + 1
assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
assigned_labels[valid_mask] = gt_labels[matched_gt_inds].long()
max_overlaps = assigned_gt_inds.new_full((num_bboxes, ),
-INF,
dtype=torch.float32)
max_overlaps[valid_mask] = matched_pred_ious
return AssignResult(
num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)
def get_in_gt_and_in_center_info(self, priors, gt_bboxes):
num_gt = gt_bboxes.size(0)
repeated_x = priors[:, 0].unsqueeze(1).repeat(1, num_gt)
repeated_y = priors[:, 1].unsqueeze(1).repeat(1, num_gt)
repeated_stride_x = priors[:, 2].unsqueeze(1).repeat(1, num_gt)
repeated_stride_y = priors[:, 3].unsqueeze(1).repeat(1, num_gt)
# is prior centers in gt bboxes, shape: [n_prior, n_gt]
l_ = repeated_x - gt_bboxes[:, 0]
t_ = repeated_y - gt_bboxes[:, 1]
r_ = gt_bboxes[:, 2] - repeated_x
b_ = gt_bboxes[:, 3] - repeated_y
deltas = torch.stack([l_, t_, r_, b_], dim=1)
is_in_gts = deltas.min(dim=1).values > 0
is_in_gts_all = is_in_gts.sum(dim=1) > 0
# is prior centers in gt centers
gt_cxs = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0
gt_cys = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0
ct_box_l = gt_cxs - self.center_radius * repeated_stride_x
ct_box_t = gt_cys - self.center_radius * repeated_stride_y
ct_box_r = gt_cxs + self.center_radius * repeated_stride_x
ct_box_b = gt_cys + self.center_radius * repeated_stride_y
cl_ = repeated_x - ct_box_l
ct_ = repeated_y - ct_box_t
cr_ = ct_box_r - repeated_x
cb_ = ct_box_b - repeated_y
ct_deltas = torch.stack([cl_, ct_, cr_, cb_], dim=1)
is_in_cts = ct_deltas.min(dim=1).values > 0
is_in_cts_all = is_in_cts.sum(dim=1) > 0
# in boxes or in centers, shape: [num_priors]
is_in_gts_or_centers = is_in_gts_all | is_in_cts_all
# both in boxes and centers, shape: [num_fg, num_gt]
is_in_boxes_and_centers = (
is_in_gts[is_in_gts_or_centers, :]
& is_in_cts[is_in_gts_or_centers, :])
return is_in_gts_or_centers, is_in_boxes_and_centers
def dynamic_k_matching(self, cost, pairwise_ious, num_gt, valid_mask):
matching_matrix = torch.zeros_like(cost)
# select candidate topk ious for dynamic-k calculation
candidate_topk = min(self.candidate_topk, pairwise_ious.size(0))
topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=0)
# calculate dynamic k for each gt
dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1)
for gt_idx in range(num_gt):
_, pos_idx = torch.topk(
cost[:, gt_idx], k=dynamic_ks[gt_idx].item(), largest=False)
matching_matrix[:, gt_idx][pos_idx] = 1.0
del topk_ious, dynamic_ks, pos_idx
prior_match_gt_mask = matching_matrix.sum(1) > 1
if prior_match_gt_mask.sum() > 0:
cost_min, cost_argmin = torch.min(
cost[prior_match_gt_mask, :], dim=1)
matching_matrix[prior_match_gt_mask, :] *= 0.0
matching_matrix[prior_match_gt_mask, cost_argmin] = 1.0
# get foreground mask inside box and center prior
fg_mask_inboxes = matching_matrix.sum(1) > 0.0
valid_mask[valid_mask.clone()] = fg_mask_inboxes
matched_gt_inds = matching_matrix[fg_mask_inboxes, :].argmax(1)
matched_pred_ious = (matching_matrix *
pairwise_ious).sum(1)[fg_mask_inboxes]
return matched_pred_ious, matched_gt_inds
| 11,381 | 43.635294 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/assigners/atss_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from ..builder import BBOX_ASSIGNERS
from ..iou_calculators import build_iou_calculator
from .assign_result import AssignResult
from .base_assigner import BaseAssigner
@BBOX_ASSIGNERS.register_module()
class ATSSAssigner(BaseAssigner):
"""Assign a corresponding gt bbox or background to each bbox.
Each proposals will be assigned with `0` or a positive integer
indicating the ground truth index.
- 0: negative sample, no assigned gt
- positive integer: positive sample, index (1-based) of assigned gt
Args:
topk (float): number of bbox selected in each level
"""
def __init__(self,
topk,
iou_calculator=dict(type='BboxOverlaps2D'),
ignore_iof_thr=-1):
self.topk = topk
self.iou_calculator = build_iou_calculator(iou_calculator)
self.ignore_iof_thr = ignore_iof_thr
# https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py
def assign(self,
bboxes,
num_level_bboxes,
gt_bboxes,
gt_bboxes_ignore=None,
gt_labels=None):
"""Assign gt to bboxes.
The assignment is done in following steps
1. compute iou between all bbox (bbox of all pyramid levels) and gt
2. compute center distance between all bbox and gt
3. on each pyramid level, for each gt, select k bbox whose center
are closest to the gt center, so we total select k*l bbox as
candidates for each gt
4. get corresponding iou for the these candidates, and compute the
mean and std, set mean + std as the iou threshold
5. select these candidates whose iou are greater than or equal to
the threshold as positive
6. limit the positive sample's center in gt
Args:
bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
num_level_bboxes (List): num of bboxes in each level
gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
labelled as `ignored`, e.g., crowd boxes in COCO.
gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
Returns:
:obj:`AssignResult`: The assign result.
"""
INF = 100000000
bboxes = bboxes[:, :4]
num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0)
# compute iou between all bbox and gt
overlaps = self.iou_calculator(bboxes, gt_bboxes)
# assign 0 by default
assigned_gt_inds = overlaps.new_full((num_bboxes, ),
0,
dtype=torch.long)
if num_gt == 0 or num_bboxes == 0:
# No ground truth or boxes, return empty assignment
max_overlaps = overlaps.new_zeros((num_bboxes, ))
if num_gt == 0:
# No truth, assign everything to background
assigned_gt_inds[:] = 0
if gt_labels is None:
assigned_labels = None
else:
assigned_labels = overlaps.new_full((num_bboxes, ),
-1,
dtype=torch.long)
return AssignResult(
num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)
# compute center distance between all bbox and gt
gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0
gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0
gt_points = torch.stack((gt_cx, gt_cy), dim=1)
bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0
bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0
bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1)
distances = (bboxes_points[:, None, :] -
gt_points[None, :, :]).pow(2).sum(-1).sqrt()
if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0):
ignore_overlaps = self.iou_calculator(
bboxes, gt_bboxes_ignore, mode='iof')
ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr
distances[ignore_idxs, :] = INF
assigned_gt_inds[ignore_idxs] = -1
# Selecting candidates based on the center distance
candidate_idxs = []
start_idx = 0
for level, bboxes_per_level in enumerate(num_level_bboxes):
# on each pyramid level, for each gt,
# select k bbox whose center are closest to the gt center
end_idx = start_idx + bboxes_per_level
distances_per_level = distances[start_idx:end_idx, :]
selectable_k = min(self.topk, bboxes_per_level)
_, topk_idxs_per_level = distances_per_level.topk(
selectable_k, dim=0, largest=False)
candidate_idxs.append(topk_idxs_per_level + start_idx)
start_idx = end_idx
candidate_idxs = torch.cat(candidate_idxs, dim=0)
# get corresponding iou for the these candidates, and compute the
# mean and std, set mean + std as the iou threshold
candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)]
overlaps_mean_per_gt = candidate_overlaps.mean(0)
overlaps_std_per_gt = candidate_overlaps.std(0)
overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt
is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :]
# limit the positive sample's center in gt
for gt_idx in range(num_gt):
candidate_idxs[:, gt_idx] += gt_idx * num_bboxes
ep_bboxes_cx = bboxes_cx.view(1, -1).expand(
num_gt, num_bboxes).contiguous().view(-1)
ep_bboxes_cy = bboxes_cy.view(1, -1).expand(
num_gt, num_bboxes).contiguous().view(-1)
candidate_idxs = candidate_idxs.view(-1)
# calculate the left, top, right, bottom distance between positive
# bbox center and gt side
l_ = ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0]
t_ = ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1]
r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].view(-1, num_gt)
b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].view(-1, num_gt)
is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01
is_pos = is_pos & is_in_gts
# if an anchor box is assigned to multiple gts,
# the one with the highest IoU will be selected.
overlaps_inf = torch.full_like(overlaps,
-INF).t().contiguous().view(-1)
index = candidate_idxs.view(-1)[is_pos.view(-1)]
overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index]
overlaps_inf = overlaps_inf.view(num_gt, -1).t()
max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1)
assigned_gt_inds[
max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1
if gt_labels is not None:
assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
pos_inds = torch.nonzero(
assigned_gt_inds > 0, as_tuple=False).squeeze()
if pos_inds.numel() > 0:
assigned_labels[pos_inds] = gt_labels[
assigned_gt_inds[pos_inds] - 1]
else:
assigned_labels = None
return AssignResult(
num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)
| 7,809 | 42.388889 | 87 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/assigners/center_region_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from ..builder import BBOX_ASSIGNERS
from ..iou_calculators import build_iou_calculator
from .assign_result import AssignResult
from .base_assigner import BaseAssigner
def scale_boxes(bboxes, scale):
"""Expand an array of boxes by a given scale.
Args:
bboxes (Tensor): Shape (m, 4)
scale (float): The scale factor of bboxes
Returns:
(Tensor): Shape (m, 4). Scaled bboxes
"""
assert bboxes.size(1) == 4
w_half = (bboxes[:, 2] - bboxes[:, 0]) * .5
h_half = (bboxes[:, 3] - bboxes[:, 1]) * .5
x_c = (bboxes[:, 2] + bboxes[:, 0]) * .5
y_c = (bboxes[:, 3] + bboxes[:, 1]) * .5
w_half *= scale
h_half *= scale
boxes_scaled = torch.zeros_like(bboxes)
boxes_scaled[:, 0] = x_c - w_half
boxes_scaled[:, 2] = x_c + w_half
boxes_scaled[:, 1] = y_c - h_half
boxes_scaled[:, 3] = y_c + h_half
return boxes_scaled
def is_located_in(points, bboxes):
"""Are points located in bboxes.
Args:
points (Tensor): Points, shape: (m, 2).
bboxes (Tensor): Bounding boxes, shape: (n, 4).
Return:
Tensor: Flags indicating if points are located in bboxes, shape: (m, n).
"""
assert points.size(1) == 2
assert bboxes.size(1) == 4
return (points[:, 0].unsqueeze(1) > bboxes[:, 0].unsqueeze(0)) & \
(points[:, 0].unsqueeze(1) < bboxes[:, 2].unsqueeze(0)) & \
(points[:, 1].unsqueeze(1) > bboxes[:, 1].unsqueeze(0)) & \
(points[:, 1].unsqueeze(1) < bboxes[:, 3].unsqueeze(0))
def bboxes_area(bboxes):
"""Compute the area of an array of bboxes.
Args:
bboxes (Tensor): The coordinates ox bboxes. Shape: (m, 4)
Returns:
Tensor: Area of the bboxes. Shape: (m, )
"""
assert bboxes.size(1) == 4
w = (bboxes[:, 2] - bboxes[:, 0])
h = (bboxes[:, 3] - bboxes[:, 1])
areas = w * h
return areas
@BBOX_ASSIGNERS.register_module()
class CenterRegionAssigner(BaseAssigner):
"""Assign pixels at the center region of a bbox as positive.
Each proposals will be assigned with `-1`, `0`, or a positive integer
indicating the ground truth index.
- -1: negative samples
- semi-positive numbers: positive sample, index (0-based) of assigned gt
Args:
pos_scale (float): Threshold within which pixels are
labelled as positive.
neg_scale (float): Threshold above which pixels are
labelled as positive.
min_pos_iof (float): Minimum iof of a pixel with a gt to be
labelled as positive. Default: 1e-2
ignore_gt_scale (float): Threshold within which the pixels
are ignored when the gt is labelled as shadowed. Default: 0.5
foreground_dominate (bool): If True, the bbox will be assigned as
positive when a gt's kernel region overlaps with another's shadowed
(ignored) region, otherwise it is set as ignored. Default to False.
"""
def __init__(self,
pos_scale,
neg_scale,
min_pos_iof=1e-2,
ignore_gt_scale=0.5,
foreground_dominate=False,
iou_calculator=dict(type='BboxOverlaps2D')):
self.pos_scale = pos_scale
self.neg_scale = neg_scale
self.min_pos_iof = min_pos_iof
self.ignore_gt_scale = ignore_gt_scale
self.foreground_dominate = foreground_dominate
self.iou_calculator = build_iou_calculator(iou_calculator)
def get_gt_priorities(self, gt_bboxes):
"""Get gt priorities according to their areas.
Smaller gt has higher priority.
Args:
gt_bboxes (Tensor): Ground truth boxes, shape (k, 4).
Returns:
Tensor: The priority of gts so that gts with larger priority is \
more likely to be assigned. Shape (k, )
"""
gt_areas = bboxes_area(gt_bboxes)
# Rank all gt bbox areas. Smaller objects has larger priority
_, sort_idx = gt_areas.sort(descending=True)
sort_idx = sort_idx.argsort()
return sort_idx
def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
"""Assign gt to bboxes.
This method assigns gts to every bbox (proposal/anchor), each bbox \
will be assigned with -1, or a semi-positive number. -1 means \
negative sample, semi-positive number is the index (0-based) of \
assigned gt.
Args:
bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
gt_bboxes_ignore (tensor, optional): Ground truth bboxes that are
labelled as `ignored`, e.g., crowd boxes in COCO.
gt_labels (tensor, optional): Label of gt_bboxes, shape (num_gts,).
Returns:
:obj:`AssignResult`: The assigned result. Note that \
shadowed_labels of shape (N, 2) is also added as an \
`assign_result` attribute. `shadowed_labels` is a tensor \
composed of N pairs of anchor_ind, class_label], where N \
is the number of anchors that lie in the outer region of a \
gt, anchor_ind is the shadowed anchor index and class_label \
is the shadowed class label.
Example:
>>> self = CenterRegionAssigner(0.2, 0.2)
>>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]])
>>> gt_bboxes = torch.Tensor([[0, 0, 10, 10]])
>>> assign_result = self.assign(bboxes, gt_bboxes)
>>> expected_gt_inds = torch.LongTensor([1, 0])
>>> assert torch.all(assign_result.gt_inds == expected_gt_inds)
"""
# There are in total 5 steps in the pixel assignment
# 1. Find core (the center region, say inner 0.2)
# and shadow (the relatively ourter part, say inner 0.2-0.5)
# regions of every gt.
# 2. Find all prior bboxes that lie in gt_core and gt_shadow regions
# 3. Assign prior bboxes in gt_core with a one-hot id of the gt in
# the image.
# 3.1. For overlapping objects, the prior bboxes in gt_core is
# assigned with the object with smallest area
# 4. Assign prior bboxes with class label according to its gt id.
# 4.1. Assign -1 to prior bboxes lying in shadowed gts
# 4.2. Assign positive prior boxes with the corresponding label
# 5. Find pixels lying in the shadow of an object and assign them with
# background label, but set the loss weight of its corresponding
# gt to zero.
assert bboxes.size(1) == 4, 'bboxes must have size of 4'
# 1. Find core positive and shadow region of every gt
gt_core = scale_boxes(gt_bboxes, self.pos_scale)
gt_shadow = scale_boxes(gt_bboxes, self.neg_scale)
# 2. Find prior bboxes that lie in gt_core and gt_shadow regions
bbox_centers = (bboxes[:, 2:4] + bboxes[:, 0:2]) / 2
# The center points lie within the gt boxes
is_bbox_in_gt = is_located_in(bbox_centers, gt_bboxes)
# Only calculate bbox and gt_core IoF. This enables small prior bboxes
# to match large gts
bbox_and_gt_core_overlaps = self.iou_calculator(
bboxes, gt_core, mode='iof')
# The center point of effective priors should be within the gt box
is_bbox_in_gt_core = is_bbox_in_gt & (
bbox_and_gt_core_overlaps > self.min_pos_iof) # shape (n, k)
is_bbox_in_gt_shadow = (
self.iou_calculator(bboxes, gt_shadow, mode='iof') >
self.min_pos_iof)
# Rule out center effective positive pixels
is_bbox_in_gt_shadow &= (~is_bbox_in_gt_core)
num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0)
if num_gts == 0 or num_bboxes == 0:
# If no gts exist, assign all pixels to negative
assigned_gt_ids = \
is_bbox_in_gt_core.new_zeros((num_bboxes,),
dtype=torch.long)
pixels_in_gt_shadow = assigned_gt_ids.new_empty((0, 2))
else:
# Step 3: assign a one-hot gt id to each pixel, and smaller objects
# have high priority to assign the pixel.
sort_idx = self.get_gt_priorities(gt_bboxes)
assigned_gt_ids, pixels_in_gt_shadow = \
self.assign_one_hot_gt_indices(is_bbox_in_gt_core,
is_bbox_in_gt_shadow,
gt_priority=sort_idx)
if gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0:
# No ground truth or boxes, return empty assignment
gt_bboxes_ignore = scale_boxes(
gt_bboxes_ignore, scale=self.ignore_gt_scale)
is_bbox_in_ignored_gts = is_located_in(bbox_centers,
gt_bboxes_ignore)
is_bbox_in_ignored_gts = is_bbox_in_ignored_gts.any(dim=1)
assigned_gt_ids[is_bbox_in_ignored_gts] = -1
# 4. Assign prior bboxes with class label according to its gt id.
assigned_labels = None
shadowed_pixel_labels = None
if gt_labels is not None:
# Default assigned label is the background (-1)
assigned_labels = assigned_gt_ids.new_full((num_bboxes, ), -1)
pos_inds = torch.nonzero(
assigned_gt_ids > 0, as_tuple=False).squeeze()
if pos_inds.numel() > 0:
assigned_labels[pos_inds] = gt_labels[assigned_gt_ids[pos_inds]
- 1]
# 5. Find pixels lying in the shadow of an object
shadowed_pixel_labels = pixels_in_gt_shadow.clone()
if pixels_in_gt_shadow.numel() > 0:
pixel_idx, gt_idx =\
pixels_in_gt_shadow[:, 0], pixels_in_gt_shadow[:, 1]
assert (assigned_gt_ids[pixel_idx] != gt_idx).all(), \
'Some pixels are dually assigned to ignore and gt!'
shadowed_pixel_labels[:, 1] = gt_labels[gt_idx - 1]
override = (
assigned_labels[pixel_idx] == shadowed_pixel_labels[:, 1])
if self.foreground_dominate:
# When a pixel is both positive and shadowed, set it as pos
shadowed_pixel_labels = shadowed_pixel_labels[~override]
else:
# When a pixel is both pos and shadowed, set it as shadowed
assigned_labels[pixel_idx[override]] = -1
assigned_gt_ids[pixel_idx[override]] = 0
assign_result = AssignResult(
num_gts, assigned_gt_ids, None, labels=assigned_labels)
# Add shadowed_labels as assign_result property. Shape: (num_shadow, 2)
assign_result.set_extra_property('shadowed_labels',
shadowed_pixel_labels)
return assign_result
def assign_one_hot_gt_indices(self,
is_bbox_in_gt_core,
is_bbox_in_gt_shadow,
gt_priority=None):
"""Assign only one gt index to each prior box.
Gts with large gt_priority are more likely to be assigned.
Args:
is_bbox_in_gt_core (Tensor): Bool tensor indicating the bbox center
is in the core area of a gt (e.g. 0-0.2).
Shape: (num_prior, num_gt).
is_bbox_in_gt_shadow (Tensor): Bool tensor indicating the bbox
center is in the shadowed area of a gt (e.g. 0.2-0.5).
Shape: (num_prior, num_gt).
gt_priority (Tensor): Priorities of gts. The gt with a higher
priority is more likely to be assigned to the bbox when the bbox
match with multiple gts. Shape: (num_gt, ).
Returns:
tuple: Returns (assigned_gt_inds, shadowed_gt_inds).
- assigned_gt_inds: The assigned gt index of each prior bbox \
(i.e. index from 1 to num_gts). Shape: (num_prior, ).
- shadowed_gt_inds: shadowed gt indices. It is a tensor of \
shape (num_ignore, 2) with first column being the \
shadowed prior bbox indices and the second column the \
shadowed gt indices (1-based).
"""
num_bboxes, num_gts = is_bbox_in_gt_core.shape
if gt_priority is None:
gt_priority = torch.arange(
num_gts, device=is_bbox_in_gt_core.device)
assert gt_priority.size(0) == num_gts
# The bigger gt_priority, the more preferable to be assigned
# The assigned inds are by default 0 (background)
assigned_gt_inds = is_bbox_in_gt_core.new_zeros((num_bboxes, ),
dtype=torch.long)
# Shadowed bboxes are assigned to be background. But the corresponding
# label is ignored during loss calculation, which is done through
# shadowed_gt_inds
shadowed_gt_inds = torch.nonzero(is_bbox_in_gt_shadow, as_tuple=False)
if is_bbox_in_gt_core.sum() == 0: # No gt match
shadowed_gt_inds[:, 1] += 1 # 1-based. For consistency issue
return assigned_gt_inds, shadowed_gt_inds
# The priority of each prior box and gt pair. If one prior box is
# matched bo multiple gts. Only the pair with the highest priority
# is saved
pair_priority = is_bbox_in_gt_core.new_full((num_bboxes, num_gts),
-1,
dtype=torch.long)
# Each bbox could match with multiple gts.
# The following codes deal with this situation
# Matched bboxes (to any gt). Shape: (num_pos_anchor, )
inds_of_match = torch.any(is_bbox_in_gt_core, dim=1)
# The matched gt index of each positive bbox. Length >= num_pos_anchor
# , since one bbox could match multiple gts
matched_bbox_gt_inds = torch.nonzero(
is_bbox_in_gt_core, as_tuple=False)[:, 1]
# Assign priority to each bbox-gt pair.
pair_priority[is_bbox_in_gt_core] = gt_priority[matched_bbox_gt_inds]
_, argmax_priority = pair_priority[inds_of_match].max(dim=1)
assigned_gt_inds[inds_of_match] = argmax_priority + 1 # 1-based
# Zero-out the assigned anchor box to filter the shadowed gt indices
is_bbox_in_gt_core[inds_of_match, argmax_priority] = 0
# Concat the shadowed indices due to overlapping with that out side of
# effective scale. shape: (total_num_ignore, 2)
shadowed_gt_inds = torch.cat(
(shadowed_gt_inds, torch.nonzero(
is_bbox_in_gt_core, as_tuple=False)),
dim=0)
# `is_bbox_in_gt_core` should be changed back to keep arguments intact.
is_bbox_in_gt_core[inds_of_match, argmax_priority] = 1
# 1-based shadowed gt indices, to be consistent with `assigned_gt_inds`
if shadowed_gt_inds.numel() > 0:
shadowed_gt_inds[:, 1] += 1
return assigned_gt_inds, shadowed_gt_inds
| 15,477 | 44.928783 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/assigners/region_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import anchor_inside_flags
from ..builder import BBOX_ASSIGNERS
from .assign_result import AssignResult
from .base_assigner import BaseAssigner
def calc_region(bbox, ratio, stride, featmap_size=None):
"""Calculate region of the box defined by the ratio, the ratio is from the
center of the box to every edge."""
# project bbox on the feature
f_bbox = bbox / stride
x1 = torch.round((1 - ratio) * f_bbox[0] + ratio * f_bbox[2])
y1 = torch.round((1 - ratio) * f_bbox[1] + ratio * f_bbox[3])
x2 = torch.round(ratio * f_bbox[0] + (1 - ratio) * f_bbox[2])
y2 = torch.round(ratio * f_bbox[1] + (1 - ratio) * f_bbox[3])
if featmap_size is not None:
x1 = x1.clamp(min=0, max=featmap_size[1])
y1 = y1.clamp(min=0, max=featmap_size[0])
x2 = x2.clamp(min=0, max=featmap_size[1])
y2 = y2.clamp(min=0, max=featmap_size[0])
return (x1, y1, x2, y2)
def anchor_ctr_inside_region_flags(anchors, stride, region):
"""Get the flag indicate whether anchor centers are inside regions."""
x1, y1, x2, y2 = region
f_anchors = anchors / stride
x = (f_anchors[:, 0] + f_anchors[:, 2]) * 0.5
y = (f_anchors[:, 1] + f_anchors[:, 3]) * 0.5
flags = (x >= x1) & (x <= x2) & (y >= y1) & (y <= y2)
return flags
@BBOX_ASSIGNERS.register_module()
class RegionAssigner(BaseAssigner):
"""Assign a corresponding gt bbox or background to each bbox.
Each proposals will be assigned with `-1`, `0`, or a positive integer
indicating the ground truth index.
- -1: don't care
- 0: negative sample, no assigned gt
- positive integer: positive sample, index (1-based) of assigned gt
Args:
center_ratio: ratio of the region in the center of the bbox to
define positive sample.
ignore_ratio: ratio of the region to define ignore samples.
"""
def __init__(self, center_ratio=0.2, ignore_ratio=0.5):
self.center_ratio = center_ratio
self.ignore_ratio = ignore_ratio
def assign(self,
mlvl_anchors,
mlvl_valid_flags,
gt_bboxes,
img_meta,
featmap_sizes,
anchor_scale,
anchor_strides,
gt_bboxes_ignore=None,
gt_labels=None,
allowed_border=0):
"""Assign gt to anchors.
This method assign a gt bbox to every bbox (proposal/anchor), each bbox
will be assigned with -1, 0, or a positive number. -1 means don't care,
0 means negative sample, positive number is the index (1-based) of
assigned gt.
The assignment is done in following steps, and the order matters.
1. Assign every anchor to 0 (negative)
2. (For each gt_bboxes) Compute ignore flags based on ignore_region
then assign -1 to anchors w.r.t. ignore flags
3. (For each gt_bboxes) Compute pos flags based on center_region then
assign gt_bboxes to anchors w.r.t. pos flags
4. (For each gt_bboxes) Compute ignore flags based on adjacent anchor
level then assign -1 to anchors w.r.t. ignore flags
5. Assign anchor outside of image to -1
Args:
mlvl_anchors (list[Tensor]): Multi level anchors.
mlvl_valid_flags (list[Tensor]): Multi level valid flags.
gt_bboxes (Tensor): Ground truth bboxes of image
img_meta (dict): Meta info of image.
featmap_sizes (list[Tensor]): Feature mapsize each level
anchor_scale (int): Scale of the anchor.
anchor_strides (list[int]): Stride of the anchor.
gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
labelled as `ignored`, e.g., crowd boxes in COCO.
gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
allowed_border (int, optional): The border to allow the valid
anchor. Defaults to 0.
Returns:
:obj:`AssignResult`: The assign result.
"""
if gt_bboxes_ignore is not None:
raise NotImplementedError
num_gts = gt_bboxes.shape[0]
num_bboxes = sum(x.shape[0] for x in mlvl_anchors)
if num_gts == 0 or num_bboxes == 0:
# No ground truth or boxes, return empty assignment
max_overlaps = gt_bboxes.new_zeros((num_bboxes, ))
assigned_gt_inds = gt_bboxes.new_zeros((num_bboxes, ),
dtype=torch.long)
if gt_labels is None:
assigned_labels = None
else:
assigned_labels = gt_bboxes.new_full((num_bboxes, ),
-1,
dtype=torch.long)
return AssignResult(
num_gts,
assigned_gt_inds,
max_overlaps,
labels=assigned_labels)
num_lvls = len(mlvl_anchors)
r1 = (1 - self.center_ratio) / 2
r2 = (1 - self.ignore_ratio) / 2
scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) *
(gt_bboxes[:, 3] - gt_bboxes[:, 1]))
min_anchor_size = scale.new_full(
(1, ), float(anchor_scale * anchor_strides[0]))
target_lvls = torch.floor(
torch.log2(scale) - torch.log2(min_anchor_size) + 0.5)
target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long()
# 1. assign 0 (negative) by default
mlvl_assigned_gt_inds = []
mlvl_ignore_flags = []
for lvl in range(num_lvls):
h, w = featmap_sizes[lvl]
assert h * w == mlvl_anchors[lvl].shape[0]
assigned_gt_inds = gt_bboxes.new_full((h * w, ),
0,
dtype=torch.long)
ignore_flags = torch.zeros_like(assigned_gt_inds)
mlvl_assigned_gt_inds.append(assigned_gt_inds)
mlvl_ignore_flags.append(ignore_flags)
for gt_id in range(num_gts):
lvl = target_lvls[gt_id].item()
featmap_size = featmap_sizes[lvl]
stride = anchor_strides[lvl]
anchors = mlvl_anchors[lvl]
gt_bbox = gt_bboxes[gt_id, :4]
# Compute regions
ignore_region = calc_region(gt_bbox, r2, stride, featmap_size)
ctr_region = calc_region(gt_bbox, r1, stride, featmap_size)
# 2. Assign -1 to ignore flags
ignore_flags = anchor_ctr_inside_region_flags(
anchors, stride, ignore_region)
mlvl_assigned_gt_inds[lvl][ignore_flags] = -1
# 3. Assign gt_bboxes to pos flags
pos_flags = anchor_ctr_inside_region_flags(anchors, stride,
ctr_region)
mlvl_assigned_gt_inds[lvl][pos_flags] = gt_id + 1
# 4. Assign -1 to ignore adjacent lvl
if lvl > 0:
d_lvl = lvl - 1
d_anchors = mlvl_anchors[d_lvl]
d_featmap_size = featmap_sizes[d_lvl]
d_stride = anchor_strides[d_lvl]
d_ignore_region = calc_region(gt_bbox, r2, d_stride,
d_featmap_size)
ignore_flags = anchor_ctr_inside_region_flags(
d_anchors, d_stride, d_ignore_region)
mlvl_ignore_flags[d_lvl][ignore_flags] = 1
if lvl < num_lvls - 1:
u_lvl = lvl + 1
u_anchors = mlvl_anchors[u_lvl]
u_featmap_size = featmap_sizes[u_lvl]
u_stride = anchor_strides[u_lvl]
u_ignore_region = calc_region(gt_bbox, r2, u_stride,
u_featmap_size)
ignore_flags = anchor_ctr_inside_region_flags(
u_anchors, u_stride, u_ignore_region)
mlvl_ignore_flags[u_lvl][ignore_flags] = 1
# 4. (cont.) Assign -1 to ignore adjacent lvl
for lvl in range(num_lvls):
ignore_flags = mlvl_ignore_flags[lvl]
mlvl_assigned_gt_inds[lvl][ignore_flags] = -1
# 5. Assign -1 to anchor outside of image
flat_assigned_gt_inds = torch.cat(mlvl_assigned_gt_inds)
flat_anchors = torch.cat(mlvl_anchors)
flat_valid_flags = torch.cat(mlvl_valid_flags)
assert (flat_assigned_gt_inds.shape[0] == flat_anchors.shape[0] ==
flat_valid_flags.shape[0])
inside_flags = anchor_inside_flags(flat_anchors, flat_valid_flags,
img_meta['img_shape'],
allowed_border)
outside_flags = ~inside_flags
flat_assigned_gt_inds[outside_flags] = -1
if gt_labels is not None:
assigned_labels = torch.zeros_like(flat_assigned_gt_inds)
pos_flags = assigned_gt_inds > 0
assigned_labels[pos_flags] = gt_labels[
flat_assigned_gt_inds[pos_flags] - 1]
else:
assigned_labels = None
return AssignResult(
num_gts, flat_assigned_gt_inds, None, labels=assigned_labels)
| 9,473 | 41.484305 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/assigners/grid_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from ..builder import BBOX_ASSIGNERS
from ..iou_calculators import build_iou_calculator
from .assign_result import AssignResult
from .base_assigner import BaseAssigner
@BBOX_ASSIGNERS.register_module()
class GridAssigner(BaseAssigner):
"""Assign a corresponding gt bbox or background to each bbox.
Each proposals will be assigned with `-1`, `0`, or a positive integer
indicating the ground truth index.
- -1: don't care
- 0: negative sample, no assigned gt
- positive integer: positive sample, index (1-based) of assigned gt
Args:
pos_iou_thr (float): IoU threshold for positive bboxes.
neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
min_pos_iou (float): Minimum iou for a bbox to be considered as a
positive bbox. Positive samples can have smaller IoU than
pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
gt_max_assign_all (bool): Whether to assign all bboxes with the same
highest overlap with some gt to that gt.
"""
def __init__(self,
pos_iou_thr,
neg_iou_thr,
min_pos_iou=.0,
gt_max_assign_all=True,
iou_calculator=dict(type='BboxOverlaps2D')):
self.pos_iou_thr = pos_iou_thr
self.neg_iou_thr = neg_iou_thr
self.min_pos_iou = min_pos_iou
self.gt_max_assign_all = gt_max_assign_all
self.iou_calculator = build_iou_calculator(iou_calculator)
def assign(self, bboxes, box_responsible_flags, gt_bboxes, gt_labels=None):
"""Assign gt to bboxes. The process is very much like the max iou
assigner, except that positive samples are constrained within the cell
that the gt boxes fell in.
This method assign a gt bbox to every bbox (proposal/anchor), each bbox
will be assigned with -1, 0, or a positive number. -1 means don't care,
0 means negative sample, positive number is the index (1-based) of
assigned gt.
The assignment is done in following steps, the order matters.
1. assign every bbox to -1
2. assign proposals whose iou with all gts <= neg_iou_thr to 0
3. for each bbox within a cell, if the iou with its nearest gt >
pos_iou_thr and the center of that gt falls inside the cell,
assign it to that bbox
4. for each gt bbox, assign its nearest proposals within the cell the
gt bbox falls in to itself.
Args:
bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
box_responsible_flags (Tensor): flag to indicate whether box is
responsible for prediction, shape(n, )
gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
Returns:
:obj:`AssignResult`: The assign result.
"""
num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0)
# compute iou between all gt and bboxes
overlaps = self.iou_calculator(gt_bboxes, bboxes)
# 1. assign -1 by default
assigned_gt_inds = overlaps.new_full((num_bboxes, ),
-1,
dtype=torch.long)
if num_gts == 0 or num_bboxes == 0:
# No ground truth or boxes, return empty assignment
max_overlaps = overlaps.new_zeros((num_bboxes, ))
if num_gts == 0:
# No truth, assign everything to background
assigned_gt_inds[:] = 0
if gt_labels is None:
assigned_labels = None
else:
assigned_labels = overlaps.new_full((num_bboxes, ),
-1,
dtype=torch.long)
return AssignResult(
num_gts,
assigned_gt_inds,
max_overlaps,
labels=assigned_labels)
# 2. assign negative: below
# for each anchor, which gt best overlaps with it
# for each anchor, the max iou of all gts
# shape of max_overlaps == argmax_overlaps == num_bboxes
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
if isinstance(self.neg_iou_thr, float):
assigned_gt_inds[(max_overlaps >= 0)
& (max_overlaps <= self.neg_iou_thr)] = 0
elif isinstance(self.neg_iou_thr, (tuple, list)):
assert len(self.neg_iou_thr) == 2
assigned_gt_inds[(max_overlaps > self.neg_iou_thr[0])
& (max_overlaps <= self.neg_iou_thr[1])] = 0
# 3. assign positive: falls into responsible cell and above
# positive IOU threshold, the order matters.
# the prior condition of comparison is to filter out all
# unrelated anchors, i.e. not box_responsible_flags
overlaps[:, ~box_responsible_flags.type(torch.bool)] = -1.
# calculate max_overlaps again, but this time we only consider IOUs
# for anchors responsible for prediction
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# for each gt, which anchor best overlaps with it
# for each gt, the max iou of all proposals
# shape of gt_max_overlaps == gt_argmax_overlaps == num_gts
gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)
pos_inds = (max_overlaps >
self.pos_iou_thr) & box_responsible_flags.type(torch.bool)
assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1
# 4. assign positive to max overlapped anchors within responsible cell
for i in range(num_gts):
if gt_max_overlaps[i] > self.min_pos_iou:
if self.gt_max_assign_all:
max_iou_inds = (overlaps[i, :] == gt_max_overlaps[i]) & \
box_responsible_flags.type(torch.bool)
assigned_gt_inds[max_iou_inds] = i + 1
elif box_responsible_flags[gt_argmax_overlaps[i]]:
assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1
# assign labels of positive anchors
if gt_labels is not None:
assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
pos_inds = torch.nonzero(
assigned_gt_inds > 0, as_tuple=False).squeeze()
if pos_inds.numel() > 0:
assigned_labels[pos_inds] = gt_labels[
assigned_gt_inds[pos_inds] - 1]
else:
assigned_labels = None
return AssignResult(
num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)
| 6,863 | 42.719745 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/assigners/hungarian_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from ..builder import BBOX_ASSIGNERS
from ..match_costs import build_match_cost
from ..transforms import bbox_cxcywh_to_xyxy
from .assign_result import AssignResult
from .base_assigner import BaseAssigner
try:
from scipy.optimize import linear_sum_assignment
except ImportError:
linear_sum_assignment = None
@BBOX_ASSIGNERS.register_module()
class HungarianAssigner(BaseAssigner):
"""Computes one-to-one matching between predictions and ground truth.
This class computes an assignment between the targets and the predictions
based on the costs. The costs are weighted sum of three components:
classification cost, regression L1 cost and regression iou cost. The
targets don't include the no_object, so generally there are more
predictions than targets. After the one-to-one matching, the un-matched
are treated as backgrounds. Thus each query prediction will be assigned
with `0` or a positive integer indicating the ground truth index:
- 0: negative sample, no assigned gt
- positive integer: positive sample, index (1-based) of assigned gt
Args:
cls_weight (int | float, optional): The scale factor for classification
cost. Default 1.0.
bbox_weight (int | float, optional): The scale factor for regression
L1 cost. Default 1.0.
iou_weight (int | float, optional): The scale factor for regression
iou cost. Default 1.0.
iou_calculator (dict | optional): The config for the iou calculation.
Default type `BboxOverlaps2D`.
iou_mode (str | optional): "iou" (intersection over union), "iof"
(intersection over foreground), or "giou" (generalized
intersection over union). Default "giou".
"""
def __init__(self,
cls_cost=dict(type='ClassificationCost', weight=1.),
reg_cost=dict(type='BBoxL1Cost', weight=1.0),
iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0)):
self.cls_cost = build_match_cost(cls_cost)
self.reg_cost = build_match_cost(reg_cost)
self.iou_cost = build_match_cost(iou_cost)
def assign(self,
bbox_pred,
cls_pred,
gt_bboxes,
gt_labels,
img_meta,
gt_bboxes_ignore=None,
eps=1e-7):
"""Computes one-to-one matching based on the weighted costs.
This method assign each query prediction to a ground truth or
background. The `assigned_gt_inds` with -1 means don't care,
0 means negative sample, and positive number is the index (1-based)
of assigned gt.
The assignment is done in the following steps, the order matters.
1. assign every prediction to -1
2. compute the weighted costs
3. do Hungarian matching on CPU based on the costs
4. assign all to 0 (background) first, then for each matched pair
between predictions and gts, treat this prediction as foreground
and assign the corresponding gt index (plus 1) to it.
Args:
bbox_pred (Tensor): Predicted boxes with normalized coordinates
(cx, cy, w, h), which are all in range [0, 1]. Shape
[num_query, 4].
cls_pred (Tensor): Predicted classification logits, shape
[num_query, num_class].
gt_bboxes (Tensor): Ground truth boxes with unnormalized
coordinates (x1, y1, x2, y2). Shape [num_gt, 4].
gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).
img_meta (dict): Meta information for current image.
gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
labelled as `ignored`. Default None.
eps (int | float, optional): A value added to the denominator for
numerical stability. Default 1e-7.
Returns:
:obj:`AssignResult`: The assigned result.
"""
assert gt_bboxes_ignore is None, \
'Only case when gt_bboxes_ignore is None is supported.'
num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0)
# 1. assign -1 by default
assigned_gt_inds = bbox_pred.new_full((num_bboxes, ),
-1,
dtype=torch.long)
assigned_labels = bbox_pred.new_full((num_bboxes, ),
-1,
dtype=torch.long)
if num_gts == 0 or num_bboxes == 0:
# No ground truth or boxes, return empty assignment
if num_gts == 0:
# No ground truth, assign all to background
assigned_gt_inds[:] = 0
return AssignResult(
num_gts, assigned_gt_inds, None, labels=assigned_labels)
img_h, img_w, _ = img_meta['img_shape']
factor = gt_bboxes.new_tensor([img_w, img_h, img_w,
img_h]).unsqueeze(0)
# 2. compute the weighted costs
# classification and bboxcost.
cls_cost = self.cls_cost(cls_pred, gt_labels)
# regression L1 cost
normalize_gt_bboxes = gt_bboxes / factor
reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes)
# regression iou cost, defaultly giou is used in official DETR.
bboxes = bbox_cxcywh_to_xyxy(bbox_pred) * factor
iou_cost = self.iou_cost(bboxes, gt_bboxes)
# weighted sum of above three costs
cost = cls_cost + reg_cost + iou_cost
# 3. do Hungarian matching on CPU using linear_sum_assignment
cost = cost.detach().cpu()
if linear_sum_assignment is None:
raise ImportError('Please run "pip install scipy" '
'to install scipy first.')
matched_row_inds, matched_col_inds = linear_sum_assignment(cost)
matched_row_inds = torch.from_numpy(matched_row_inds).to(
bbox_pred.device)
matched_col_inds = torch.from_numpy(matched_col_inds).to(
bbox_pred.device)
# 4. assign backgrounds and foregrounds
# assign all indices to backgrounds first
assigned_gt_inds[:] = 0
# assign foregrounds based on matching results
assigned_gt_inds[matched_row_inds] = matched_col_inds + 1
assigned_labels[matched_row_inds] = gt_labels[matched_col_inds]
return AssignResult(
num_gts, assigned_gt_inds, None, labels=assigned_labels)
| 6,665 | 44.346939 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/assigners/task_aligned_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from ..builder import BBOX_ASSIGNERS
from ..iou_calculators import build_iou_calculator
from .assign_result import AssignResult
from .base_assigner import BaseAssigner
INF = 100000000
@BBOX_ASSIGNERS.register_module()
class TaskAlignedAssigner(BaseAssigner):
"""Task aligned assigner used in the paper:
`TOOD: Task-aligned One-stage Object Detection.
<https://arxiv.org/abs/2108.07755>`_.
Assign a corresponding gt bbox or background to each predicted bbox.
Each bbox will be assigned with `0` or a positive integer
indicating the ground truth index.
- 0: negative sample, no assigned gt
- positive integer: positive sample, index (1-based) of assigned gt
Args:
topk (int): number of bbox selected in each level
iou_calculator (dict): Config dict for iou calculator.
Default: dict(type='BboxOverlaps2D')
"""
def __init__(self, topk, iou_calculator=dict(type='BboxOverlaps2D')):
assert topk >= 1
self.topk = topk
self.iou_calculator = build_iou_calculator(iou_calculator)
def assign(self,
pred_scores,
decode_bboxes,
anchors,
gt_bboxes,
gt_bboxes_ignore=None,
gt_labels=None,
alpha=1,
beta=6):
"""Assign gt to bboxes.
The assignment is done in following steps
1. compute alignment metric between all bbox (bbox of all pyramid
levels) and gt
2. select top-k bbox as candidates for each gt
3. limit the positive sample's center in gt (because the anchor-free
detector only can predict positive distance)
Args:
pred_scores (Tensor): predicted class probability,
shape(n, num_classes)
decode_bboxes (Tensor): predicted bounding boxes, shape(n, 4)
anchors (Tensor): pre-defined anchors, shape(n, 4).
gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
labelled as `ignored`, e.g., crowd boxes in COCO.
gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
Returns:
:obj:`TaskAlignedAssignResult`: The assign result.
"""
anchors = anchors[:, :4]
num_gt, num_bboxes = gt_bboxes.size(0), anchors.size(0)
# compute alignment metric between all bbox and gt
overlaps = self.iou_calculator(decode_bboxes, gt_bboxes).detach()
bbox_scores = pred_scores[:, gt_labels].detach()
# assign 0 by default
assigned_gt_inds = anchors.new_full((num_bboxes, ),
0,
dtype=torch.long)
assign_metrics = anchors.new_zeros((num_bboxes, ))
if num_gt == 0 or num_bboxes == 0:
# No ground truth or boxes, return empty assignment
max_overlaps = anchors.new_zeros((num_bboxes, ))
if num_gt == 0:
# No gt boxes, assign everything to background
assigned_gt_inds[:] = 0
if gt_labels is None:
assigned_labels = None
else:
assigned_labels = anchors.new_full((num_bboxes, ),
-1,
dtype=torch.long)
assign_result = AssignResult(
num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)
assign_result.assign_metrics = assign_metrics
return assign_result
# select top-k bboxes as candidates for each gt
alignment_metrics = bbox_scores**alpha * overlaps**beta
topk = min(self.topk, alignment_metrics.size(0))
_, candidate_idxs = alignment_metrics.topk(topk, dim=0, largest=True)
candidate_metrics = alignment_metrics[candidate_idxs,
torch.arange(num_gt)]
is_pos = candidate_metrics > 0
# limit the positive sample's center in gt
anchors_cx = (anchors[:, 0] + anchors[:, 2]) / 2.0
anchors_cy = (anchors[:, 1] + anchors[:, 3]) / 2.0
for gt_idx in range(num_gt):
candidate_idxs[:, gt_idx] += gt_idx * num_bboxes
ep_anchors_cx = anchors_cx.view(1, -1).expand(
num_gt, num_bboxes).contiguous().view(-1)
ep_anchors_cy = anchors_cy.view(1, -1).expand(
num_gt, num_bboxes).contiguous().view(-1)
candidate_idxs = candidate_idxs.view(-1)
# calculate the left, top, right, bottom distance between positive
# bbox center and gt side
l_ = ep_anchors_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0]
t_ = ep_anchors_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1]
r_ = gt_bboxes[:, 2] - ep_anchors_cx[candidate_idxs].view(-1, num_gt)
b_ = gt_bboxes[:, 3] - ep_anchors_cy[candidate_idxs].view(-1, num_gt)
is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01
is_pos = is_pos & is_in_gts
# if an anchor box is assigned to multiple gts,
# the one with the highest iou will be selected.
overlaps_inf = torch.full_like(overlaps,
-INF).t().contiguous().view(-1)
index = candidate_idxs.view(-1)[is_pos.view(-1)]
overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index]
overlaps_inf = overlaps_inf.view(num_gt, -1).t()
max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1)
assigned_gt_inds[
max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1
assign_metrics[max_overlaps != -INF] = alignment_metrics[
max_overlaps != -INF, argmax_overlaps[max_overlaps != -INF]]
if gt_labels is not None:
assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
pos_inds = torch.nonzero(
assigned_gt_inds > 0, as_tuple=False).squeeze()
if pos_inds.numel() > 0:
assigned_labels[pos_inds] = gt_labels[
assigned_gt_inds[pos_inds] - 1]
else:
assigned_labels = None
assign_result = AssignResult(
num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)
assign_result.assign_metrics = assign_metrics
return assign_result
| 6,554 | 42.125 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/assigners/uniform_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from ..builder import BBOX_ASSIGNERS
from ..iou_calculators import build_iou_calculator
from ..transforms import bbox_xyxy_to_cxcywh
from .assign_result import AssignResult
from .base_assigner import BaseAssigner
@BBOX_ASSIGNERS.register_module()
class UniformAssigner(BaseAssigner):
"""Uniform Matching between the anchors and gt boxes, which can achieve
balance in positive anchors, and gt_bboxes_ignore was not considered for
now.
Args:
pos_ignore_thr (float): the threshold to ignore positive anchors
neg_ignore_thr (float): the threshold to ignore negative anchors
match_times(int): Number of positive anchors for each gt box.
Default 4.
iou_calculator (dict): iou_calculator config
"""
def __init__(self,
pos_ignore_thr,
neg_ignore_thr,
match_times=4,
iou_calculator=dict(type='BboxOverlaps2D')):
self.match_times = match_times
self.pos_ignore_thr = pos_ignore_thr
self.neg_ignore_thr = neg_ignore_thr
self.iou_calculator = build_iou_calculator(iou_calculator)
def assign(self,
bbox_pred,
anchor,
gt_bboxes,
gt_bboxes_ignore=None,
gt_labels=None):
num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0)
# 1. assign -1 by default
assigned_gt_inds = bbox_pred.new_full((num_bboxes, ),
0,
dtype=torch.long)
assigned_labels = bbox_pred.new_full((num_bboxes, ),
-1,
dtype=torch.long)
if num_gts == 0 or num_bboxes == 0:
# No ground truth or boxes, return empty assignment
if num_gts == 0:
# No ground truth, assign all to background
assigned_gt_inds[:] = 0
assign_result = AssignResult(
num_gts, assigned_gt_inds, None, labels=assigned_labels)
assign_result.set_extra_property(
'pos_idx', bbox_pred.new_empty(0, dtype=torch.bool))
assign_result.set_extra_property('pos_predicted_boxes',
bbox_pred.new_empty((0, 4)))
assign_result.set_extra_property('target_boxes',
bbox_pred.new_empty((0, 4)))
return assign_result
# 2. Compute the L1 cost between boxes
# Note that we use anchors and predict boxes both
cost_bbox = torch.cdist(
bbox_xyxy_to_cxcywh(bbox_pred),
bbox_xyxy_to_cxcywh(gt_bboxes),
p=1)
cost_bbox_anchors = torch.cdist(
bbox_xyxy_to_cxcywh(anchor), bbox_xyxy_to_cxcywh(gt_bboxes), p=1)
# We found that topk function has different results in cpu and
# cuda mode. In order to ensure consistency with the source code,
# we also use cpu mode.
# TODO: Check whether the performance of cpu and cuda are the same.
C = cost_bbox.cpu()
C1 = cost_bbox_anchors.cpu()
# self.match_times x n
index = torch.topk(
C, # c=b,n,x c[i]=n,x
k=self.match_times,
dim=0,
largest=False)[1]
# self.match_times x n
index1 = torch.topk(C1, k=self.match_times, dim=0, largest=False)[1]
# (self.match_times*2) x n
indexes = torch.cat((index, index1),
dim=1).reshape(-1).to(bbox_pred.device)
pred_overlaps = self.iou_calculator(bbox_pred, gt_bboxes)
anchor_overlaps = self.iou_calculator(anchor, gt_bboxes)
pred_max_overlaps, _ = pred_overlaps.max(dim=1)
anchor_max_overlaps, _ = anchor_overlaps.max(dim=0)
# 3. Compute the ignore indexes use gt_bboxes and predict boxes
ignore_idx = pred_max_overlaps > self.neg_ignore_thr
assigned_gt_inds[ignore_idx] = -1
# 4. Compute the ignore indexes of positive sample use anchors
# and predict boxes
pos_gt_index = torch.arange(
0, C1.size(1),
device=bbox_pred.device).repeat(self.match_times * 2)
pos_ious = anchor_overlaps[indexes, pos_gt_index]
pos_ignore_idx = pos_ious < self.pos_ignore_thr
pos_gt_index_with_ignore = pos_gt_index + 1
pos_gt_index_with_ignore[pos_ignore_idx] = -1
assigned_gt_inds[indexes] = pos_gt_index_with_ignore
if gt_labels is not None:
assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
pos_inds = torch.nonzero(
assigned_gt_inds > 0, as_tuple=False).squeeze()
if pos_inds.numel() > 0:
assigned_labels[pos_inds] = gt_labels[
assigned_gt_inds[pos_inds] - 1]
else:
assigned_labels = None
assign_result = AssignResult(
num_gts,
assigned_gt_inds,
anchor_max_overlaps,
labels=assigned_labels)
assign_result.set_extra_property('pos_idx', ~pos_ignore_idx)
assign_result.set_extra_property('pos_predicted_boxes',
bbox_pred[indexes])
assign_result.set_extra_property('target_boxes',
gt_bboxes[pos_gt_index])
return assign_result
| 5,556 | 39.860294 | 77 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/assigners/point_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from ..builder import BBOX_ASSIGNERS
from .assign_result import AssignResult
from .base_assigner import BaseAssigner
@BBOX_ASSIGNERS.register_module()
class PointAssigner(BaseAssigner):
"""Assign a corresponding gt bbox or background to each point.
Each proposals will be assigned with `0`, or a positive integer
indicating the ground truth index.
- 0: negative sample, no assigned gt
- positive integer: positive sample, index (1-based) of assigned gt
"""
def __init__(self, scale=4, pos_num=3):
self.scale = scale
self.pos_num = pos_num
def assign(self, points, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
"""Assign gt to points.
This method assign a gt bbox to every points set, each points set
will be assigned with the background_label (-1), or a label number.
-1 is background, and semi-positive number is the index (0-based) of
assigned gt.
The assignment is done in following steps, the order matters.
1. assign every points to the background_label (-1)
2. A point is assigned to some gt bbox if
(i) the point is within the k closest points to the gt bbox
(ii) the distance between this point and the gt is smaller than
other gt bboxes
Args:
points (Tensor): points to be assigned, shape(n, 3) while last
dimension stands for (x, y, stride).
gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
labelled as `ignored`, e.g., crowd boxes in COCO.
NOTE: currently unused.
gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
Returns:
:obj:`AssignResult`: The assign result.
"""
num_points = points.shape[0]
num_gts = gt_bboxes.shape[0]
if num_gts == 0 or num_points == 0:
# If no truth assign everything to the background
assigned_gt_inds = points.new_full((num_points, ),
0,
dtype=torch.long)
if gt_labels is None:
assigned_labels = None
else:
assigned_labels = points.new_full((num_points, ),
-1,
dtype=torch.long)
return AssignResult(
num_gts, assigned_gt_inds, None, labels=assigned_labels)
points_xy = points[:, :2]
points_stride = points[:, 2]
points_lvl = torch.log2(
points_stride).int() # [3...,4...,5...,6...,7...]
lvl_min, lvl_max = points_lvl.min(), points_lvl.max()
# assign gt box
gt_bboxes_xy = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) / 2
gt_bboxes_wh = (gt_bboxes[:, 2:] - gt_bboxes[:, :2]).clamp(min=1e-6)
scale = self.scale
gt_bboxes_lvl = ((torch.log2(gt_bboxes_wh[:, 0] / scale) +
torch.log2(gt_bboxes_wh[:, 1] / scale)) / 2).int()
gt_bboxes_lvl = torch.clamp(gt_bboxes_lvl, min=lvl_min, max=lvl_max)
# stores the assigned gt index of each point
assigned_gt_inds = points.new_zeros((num_points, ), dtype=torch.long)
# stores the assigned gt dist (to this point) of each point
assigned_gt_dist = points.new_full((num_points, ), float('inf'))
points_range = torch.arange(points.shape[0])
for idx in range(num_gts):
gt_lvl = gt_bboxes_lvl[idx]
# get the index of points in this level
lvl_idx = gt_lvl == points_lvl
points_index = points_range[lvl_idx]
# get the points in this level
lvl_points = points_xy[lvl_idx, :]
# get the center point of gt
gt_point = gt_bboxes_xy[[idx], :]
# get width and height of gt
gt_wh = gt_bboxes_wh[[idx], :]
# compute the distance between gt center and
# all points in this level
points_gt_dist = ((lvl_points - gt_point) / gt_wh).norm(dim=1)
# find the nearest k points to gt center in this level
min_dist, min_dist_index = torch.topk(
points_gt_dist, self.pos_num, largest=False)
# the index of nearest k points to gt center in this level
min_dist_points_index = points_index[min_dist_index]
# The less_than_recorded_index stores the index
# of min_dist that is less then the assigned_gt_dist. Where
# assigned_gt_dist stores the dist from previous assigned gt
# (if exist) to each point.
less_than_recorded_index = min_dist < assigned_gt_dist[
min_dist_points_index]
# The min_dist_points_index stores the index of points satisfy:
# (1) it is k nearest to current gt center in this level.
# (2) it is closer to current gt center than other gt center.
min_dist_points_index = min_dist_points_index[
less_than_recorded_index]
# assign the result
assigned_gt_inds[min_dist_points_index] = idx + 1
assigned_gt_dist[min_dist_points_index] = min_dist[
less_than_recorded_index]
if gt_labels is not None:
assigned_labels = assigned_gt_inds.new_full((num_points, ), -1)
pos_inds = torch.nonzero(
assigned_gt_inds > 0, as_tuple=False).squeeze()
if pos_inds.numel() > 0:
assigned_labels[pos_inds] = gt_labels[
assigned_gt_inds[pos_inds] - 1]
else:
assigned_labels = None
return AssignResult(
num_gts, assigned_gt_inds, None, labels=assigned_labels)
| 5,995 | 43.414815 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/assigners/approx_max_iou_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from ..builder import BBOX_ASSIGNERS
from ..iou_calculators import build_iou_calculator
from .max_iou_assigner import MaxIoUAssigner
@BBOX_ASSIGNERS.register_module()
class ApproxMaxIoUAssigner(MaxIoUAssigner):
"""Assign a corresponding gt bbox or background to each bbox.
Each proposals will be assigned with an integer indicating the ground truth
index. (semi-positive index: gt label (0-based), -1: background)
- -1: negative sample, no assigned gt
- semi-positive integer: positive sample, index (0-based) of assigned gt
Args:
pos_iou_thr (float): IoU threshold for positive bboxes.
neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
min_pos_iou (float): Minimum iou for a bbox to be considered as a
positive bbox. Positive samples can have smaller IoU than
pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
gt_max_assign_all (bool): Whether to assign all bboxes with the same
highest overlap with some gt to that gt.
ignore_iof_thr (float): IoF threshold for ignoring bboxes (if
`gt_bboxes_ignore` is specified). Negative values mean not
ignoring any bboxes.
ignore_wrt_candidates (bool): Whether to compute the iof between
`bboxes` and `gt_bboxes_ignore`, or the contrary.
match_low_quality (bool): Whether to allow quality matches. This is
usually allowed for RPN and single stage detectors, but not allowed
in the second stage.
gpu_assign_thr (int): The upper bound of the number of GT for GPU
assign. When the number of gt is above this threshold, will assign
on CPU device. Negative values mean not assign on CPU.
"""
def __init__(self,
pos_iou_thr,
neg_iou_thr,
min_pos_iou=.0,
gt_max_assign_all=True,
ignore_iof_thr=-1,
ignore_wrt_candidates=True,
match_low_quality=True,
gpu_assign_thr=-1,
iou_calculator=dict(type='BboxOverlaps2D')):
self.pos_iou_thr = pos_iou_thr
self.neg_iou_thr = neg_iou_thr
self.min_pos_iou = min_pos_iou
self.gt_max_assign_all = gt_max_assign_all
self.ignore_iof_thr = ignore_iof_thr
self.ignore_wrt_candidates = ignore_wrt_candidates
self.gpu_assign_thr = gpu_assign_thr
self.match_low_quality = match_low_quality
self.iou_calculator = build_iou_calculator(iou_calculator)
def assign(self,
approxs,
squares,
approxs_per_octave,
gt_bboxes,
gt_bboxes_ignore=None,
gt_labels=None):
"""Assign gt to approxs.
This method assign a gt bbox to each group of approxs (bboxes),
each group of approxs is represent by a base approx (bbox) and
will be assigned with -1, or a semi-positive number.
background_label (-1) means negative sample,
semi-positive number is the index (0-based) of assigned gt.
The assignment is done in following steps, the order matters.
1. assign every bbox to background_label (-1)
2. use the max IoU of each group of approxs to assign
2. assign proposals whose iou with all gts < neg_iou_thr to background
3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,
assign it to that bbox
4. for each gt bbox, assign its nearest proposals (may be more than
one) to itself
Args:
approxs (Tensor): Bounding boxes to be assigned,
shape(approxs_per_octave*n, 4).
squares (Tensor): Base Bounding boxes to be assigned,
shape(n, 4).
approxs_per_octave (int): number of approxs per octave
gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
labelled as `ignored`, e.g., crowd boxes in COCO.
gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
Returns:
:obj:`AssignResult`: The assign result.
"""
num_squares = squares.size(0)
num_gts = gt_bboxes.size(0)
if num_squares == 0 or num_gts == 0:
# No predictions and/or truth, return empty assignment
overlaps = approxs.new(num_gts, num_squares)
assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
return assign_result
# re-organize anchors by approxs_per_octave x num_squares
approxs = torch.transpose(
approxs.view(num_squares, approxs_per_octave, 4), 0,
1).contiguous().view(-1, 4)
assign_on_cpu = True if (self.gpu_assign_thr > 0) and (
num_gts > self.gpu_assign_thr) else False
# compute overlap and assign gt on CPU when number of GT is large
if assign_on_cpu:
device = approxs.device
approxs = approxs.cpu()
gt_bboxes = gt_bboxes.cpu()
if gt_bboxes_ignore is not None:
gt_bboxes_ignore = gt_bboxes_ignore.cpu()
if gt_labels is not None:
gt_labels = gt_labels.cpu()
all_overlaps = self.iou_calculator(approxs, gt_bboxes)
overlaps, _ = all_overlaps.view(approxs_per_octave, num_squares,
num_gts).max(dim=0)
overlaps = torch.transpose(overlaps, 0, 1)
if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
and gt_bboxes_ignore.numel() > 0 and squares.numel() > 0):
if self.ignore_wrt_candidates:
ignore_overlaps = self.iou_calculator(
squares, gt_bboxes_ignore, mode='iof')
ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
else:
ignore_overlaps = self.iou_calculator(
gt_bboxes_ignore, squares, mode='iof')
ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)
overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1
assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
if assign_on_cpu:
assign_result.gt_inds = assign_result.gt_inds.to(device)
assign_result.max_overlaps = assign_result.max_overlaps.to(device)
if assign_result.labels is not None:
assign_result.labels = assign_result.labels.to(device)
return assign_result
| 6,697 | 44.564626 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/assigners/max_iou_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from ..builder import BBOX_ASSIGNERS
from ..iou_calculators import build_iou_calculator
from .assign_result import AssignResult
from .base_assigner import BaseAssigner
@BBOX_ASSIGNERS.register_module()
class MaxIoUAssigner(BaseAssigner):
"""Assign a corresponding gt bbox or background to each bbox.
Each proposals will be assigned with `-1`, or a semi-positive integer
indicating the ground truth index.
- -1: negative sample, no assigned gt
- semi-positive integer: positive sample, index (0-based) of assigned gt
Args:
pos_iou_thr (float): IoU threshold for positive bboxes.
neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
min_pos_iou (float): Minimum iou for a bbox to be considered as a
positive bbox. Positive samples can have smaller IoU than
pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
gt_max_assign_all (bool): Whether to assign all bboxes with the same
highest overlap with some gt to that gt.
ignore_iof_thr (float): IoF threshold for ignoring bboxes (if
`gt_bboxes_ignore` is specified). Negative values mean not
ignoring any bboxes.
ignore_wrt_candidates (bool): Whether to compute the iof between
`bboxes` and `gt_bboxes_ignore`, or the contrary.
match_low_quality (bool): Whether to allow low quality matches. This is
usually allowed for RPN and single stage detectors, but not allowed
in the second stage. Details are demonstrated in Step 4.
gpu_assign_thr (int): The upper bound of the number of GT for GPU
assign. When the number of gt is above this threshold, will assign
on CPU device. Negative values mean not assign on CPU.
"""
def __init__(self,
pos_iou_thr,
neg_iou_thr,
min_pos_iou=.0,
gt_max_assign_all=True,
ignore_iof_thr=-1,
ignore_wrt_candidates=True,
match_low_quality=True,
gpu_assign_thr=-1,
iou_calculator=dict(type='BboxOverlaps2D')):
self.pos_iou_thr = pos_iou_thr
self.neg_iou_thr = neg_iou_thr
self.min_pos_iou = min_pos_iou
self.gt_max_assign_all = gt_max_assign_all
self.ignore_iof_thr = ignore_iof_thr
self.ignore_wrt_candidates = ignore_wrt_candidates
self.gpu_assign_thr = gpu_assign_thr
self.match_low_quality = match_low_quality
self.iou_calculator = build_iou_calculator(iou_calculator)
def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
"""Assign gt to bboxes.
This method assign a gt bbox to every bbox (proposal/anchor), each bbox
will be assigned with -1, or a semi-positive number. -1 means negative
sample, semi-positive number is the index (0-based) of assigned gt.
The assignment is done in following steps, the order matters.
1. assign every bbox to the background
2. assign proposals whose iou with all gts < neg_iou_thr to 0
3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,
assign it to that bbox
4. for each gt bbox, assign its nearest proposals (may be more than
one) to itself
Args:
bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
labelled as `ignored`, e.g., crowd boxes in COCO.
gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
Returns:
:obj:`AssignResult`: The assign result.
Example:
>>> self = MaxIoUAssigner(0.5, 0.5)
>>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]])
>>> gt_bboxes = torch.Tensor([[0, 0, 10, 9]])
>>> assign_result = self.assign(bboxes, gt_bboxes)
>>> expected_gt_inds = torch.LongTensor([1, 0])
>>> assert torch.all(assign_result.gt_inds == expected_gt_inds)
"""
assign_on_cpu = True if (self.gpu_assign_thr > 0) and (
gt_bboxes.shape[0] > self.gpu_assign_thr) else False
# compute overlap and assign gt on CPU when number of GT is large
if assign_on_cpu:
device = bboxes.device
bboxes = bboxes.cpu()
gt_bboxes = gt_bboxes.cpu()
if gt_bboxes_ignore is not None:
gt_bboxes_ignore = gt_bboxes_ignore.cpu()
if gt_labels is not None:
gt_labels = gt_labels.cpu()
overlaps = self.iou_calculator(gt_bboxes, bboxes)
if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0):
if self.ignore_wrt_candidates:
ignore_overlaps = self.iou_calculator(
bboxes, gt_bboxes_ignore, mode='iof')
ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
else:
ignore_overlaps = self.iou_calculator(
gt_bboxes_ignore, bboxes, mode='iof')
ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)
overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1
assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
if assign_on_cpu:
assign_result.gt_inds = assign_result.gt_inds.to(device)
assign_result.max_overlaps = assign_result.max_overlaps.to(device)
if assign_result.labels is not None:
assign_result.labels = assign_result.labels.to(device)
return assign_result
def assign_wrt_overlaps(self, overlaps, gt_labels=None):
"""Assign w.r.t. the overlaps of bboxes with gts.
Args:
overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes,
shape(k, n).
gt_labels (Tensor, optional): Labels of k gt_bboxes, shape (k, ).
Returns:
:obj:`AssignResult`: The assign result.
"""
num_gts, num_bboxes = overlaps.size(0), overlaps.size(1)
# 1. assign -1 by default
assigned_gt_inds = overlaps.new_full((num_bboxes, ),
-1,
dtype=torch.long)
if num_gts == 0 or num_bboxes == 0:
# No ground truth or boxes, return empty assignment
max_overlaps = overlaps.new_zeros((num_bboxes, ))
if num_gts == 0:
# No truth, assign everything to background
assigned_gt_inds[:] = 0
if gt_labels is None:
assigned_labels = None
else:
assigned_labels = overlaps.new_full((num_bboxes, ),
-1,
dtype=torch.long)
return AssignResult(
num_gts,
assigned_gt_inds,
max_overlaps,
labels=assigned_labels)
# for each anchor, which gt best overlaps with it
# for each anchor, the max iou of all gts
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# for each gt, which anchor best overlaps with it
# for each gt, the max iou of all proposals
gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)
# 2. assign negative: below
# the negative inds are set to be 0
if isinstance(self.neg_iou_thr, float):
assigned_gt_inds[(max_overlaps >= 0)
& (max_overlaps < self.neg_iou_thr)] = 0
elif isinstance(self.neg_iou_thr, tuple):
assert len(self.neg_iou_thr) == 2
assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0])
& (max_overlaps < self.neg_iou_thr[1])] = 0
# 3. assign positive: above positive IoU threshold
pos_inds = max_overlaps >= self.pos_iou_thr
assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1
if self.match_low_quality:
# Low-quality matching will overwrite the assigned_gt_inds assigned
# in Step 3. Thus, the assigned gt might not be the best one for
# prediction.
# For example, if bbox A has 0.9 and 0.8 iou with GT bbox 1 & 2,
# bbox 1 will be assigned as the best target for bbox A in step 3.
# However, if GT bbox 2's gt_argmax_overlaps = A, bbox A's
# assigned_gt_inds will be overwritten to be bbox B.
# This might be the reason that it is not used in ROI Heads.
for i in range(num_gts):
if gt_max_overlaps[i] >= self.min_pos_iou:
if self.gt_max_assign_all:
max_iou_inds = overlaps[i, :] == gt_max_overlaps[i]
assigned_gt_inds[max_iou_inds] = i + 1
else:
assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1
if gt_labels is not None:
assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
pos_inds = torch.nonzero(
assigned_gt_inds > 0, as_tuple=False).squeeze()
if pos_inds.numel() > 0:
assigned_labels[pos_inds] = gt_labels[
assigned_gt_inds[pos_inds] - 1]
else:
assigned_labels = None
return AssignResult(
num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)
| 9,798 | 44.78972 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/match_costs/match_cost.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core.bbox.iou_calculators import bbox_overlaps
from mmdet.core.bbox.transforms import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh
from .builder import MATCH_COST
@MATCH_COST.register_module()
class BBoxL1Cost:
"""BBoxL1Cost.
Args:
weight (int | float, optional): loss_weight
box_format (str, optional): 'xyxy' for DETR, 'xywh' for Sparse_RCNN
Examples:
>>> from mmdet.core.bbox.match_costs.match_cost import BBoxL1Cost
>>> import torch
>>> self = BBoxL1Cost()
>>> bbox_pred = torch.rand(1, 4)
>>> gt_bboxes= torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]])
>>> factor = torch.tensor([10, 8, 10, 8])
>>> self(bbox_pred, gt_bboxes, factor)
tensor([[1.6172, 1.6422]])
"""
def __init__(self, weight=1., box_format='xyxy'):
self.weight = weight
assert box_format in ['xyxy', 'xywh']
self.box_format = box_format
def __call__(self, bbox_pred, gt_bboxes):
"""
Args:
bbox_pred (Tensor): Predicted boxes with normalized coordinates
(cx, cy, w, h), which are all in range [0, 1]. Shape
[num_query, 4].
gt_bboxes (Tensor): Ground truth boxes with normalized
coordinates (x1, y1, x2, y2). Shape [num_gt, 4].
Returns:
torch.Tensor: bbox_cost value with weight
"""
if self.box_format == 'xywh':
gt_bboxes = bbox_xyxy_to_cxcywh(gt_bboxes)
elif self.box_format == 'xyxy':
bbox_pred = bbox_cxcywh_to_xyxy(bbox_pred)
bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1)
return bbox_cost * self.weight
@MATCH_COST.register_module()
class FocalLossCost:
"""FocalLossCost.
Args:
weight (int | float, optional): loss_weight
alpha (int | float, optional): focal_loss alpha
gamma (int | float, optional): focal_loss gamma
eps (float, optional): default 1e-12
Examples:
>>> from mmdet.core.bbox.match_costs.match_cost import FocalLossCost
>>> import torch
>>> self = FocalLossCost()
>>> cls_pred = torch.rand(4, 3)
>>> gt_labels = torch.tensor([0, 1, 2])
>>> factor = torch.tensor([10, 8, 10, 8])
>>> self(cls_pred, gt_labels)
tensor([[-0.3236, -0.3364, -0.2699],
[-0.3439, -0.3209, -0.4807],
[-0.4099, -0.3795, -0.2929],
[-0.1950, -0.1207, -0.2626]])
"""
def __init__(self, weight=1., alpha=0.25, gamma=2, eps=1e-12):
self.weight = weight
self.alpha = alpha
self.gamma = gamma
self.eps = eps
def __call__(self, cls_pred, gt_labels):
"""
Args:
cls_pred (Tensor): Predicted classification logits, shape
[num_query, num_class].
gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).
Returns:
torch.Tensor: cls_cost value with weight
"""
cls_pred = cls_pred.sigmoid()
neg_cost = -(1 - cls_pred + self.eps).log() * (
1 - self.alpha) * cls_pred.pow(self.gamma)
pos_cost = -(cls_pred + self.eps).log() * self.alpha * (
1 - cls_pred).pow(self.gamma)
cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels]
return cls_cost * self.weight
@MATCH_COST.register_module()
class ClassificationCost:
"""ClsSoftmaxCost.
Args:
weight (int | float, optional): loss_weight
Examples:
>>> from mmdet.core.bbox.match_costs.match_cost import \
... ClassificationCost
>>> import torch
>>> self = ClassificationCost()
>>> cls_pred = torch.rand(4, 3)
>>> gt_labels = torch.tensor([0, 1, 2])
>>> factor = torch.tensor([10, 8, 10, 8])
>>> self(cls_pred, gt_labels)
tensor([[-0.3430, -0.3525, -0.3045],
[-0.3077, -0.2931, -0.3992],
[-0.3664, -0.3455, -0.2881],
[-0.3343, -0.2701, -0.3956]])
"""
def __init__(self, weight=1.):
self.weight = weight
def __call__(self, cls_pred, gt_labels):
"""
Args:
cls_pred (Tensor): Predicted classification logits, shape
[num_query, num_class].
gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).
Returns:
torch.Tensor: cls_cost value with weight
"""
# Following the official DETR repo, contrary to the loss that
# NLL is used, we approximate it in 1 - cls_score[gt_label].
# The 1 is a constant that doesn't change the matching,
# so it can be omitted.
cls_score = cls_pred.softmax(-1)
cls_cost = -cls_score[:, gt_labels]
return cls_cost * self.weight
@MATCH_COST.register_module()
class IoUCost:
"""IoUCost.
Args:
iou_mode (str, optional): iou mode such as 'iou' | 'giou'
weight (int | float, optional): loss weight
Examples:
>>> from mmdet.core.bbox.match_costs.match_cost import IoUCost
>>> import torch
>>> self = IoUCost()
>>> bboxes = torch.FloatTensor([[1,1, 2, 2], [2, 2, 3, 4]])
>>> gt_bboxes = torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]])
>>> self(bboxes, gt_bboxes)
tensor([[-0.1250, 0.1667],
[ 0.1667, -0.5000]])
"""
def __init__(self, iou_mode='giou', weight=1.):
self.weight = weight
self.iou_mode = iou_mode
def __call__(self, bboxes, gt_bboxes):
"""
Args:
bboxes (Tensor): Predicted boxes with unnormalized coordinates
(x1, y1, x2, y2). Shape [num_query, 4].
gt_bboxes (Tensor): Ground truth boxes with unnormalized
coordinates (x1, y1, x2, y2). Shape [num_gt, 4].
Returns:
torch.Tensor: iou_cost value with weight
"""
# overlaps: [num_bboxes, num_gt]
overlaps = bbox_overlaps(
bboxes, gt_bboxes, mode=self.iou_mode, is_aligned=False)
# The 1 is a constant that doesn't change the matching, so omitted.
iou_cost = -overlaps
return iou_cost * self.weight
| 6,342 | 33.102151 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/coder/yolo_bbox_coder.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from ..builder import BBOX_CODERS
from .base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class YOLOBBoxCoder(BaseBBoxCoder):
"""YOLO BBox coder.
Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide
image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh).
cx, cy in [0., 1.], denotes relative center position w.r.t the center of
bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`.
Args:
eps (float): Min value of cx, cy when encoding.
"""
def __init__(self, eps=1e-6):
super(BaseBBoxCoder, self).__init__()
self.eps = eps
@mmcv.jit(coderize=True)
def encode(self, bboxes, gt_bboxes, stride):
"""Get box regression transformation deltas that can be used to
transform the ``bboxes`` into the ``gt_bboxes``.
Args:
bboxes (torch.Tensor): Source boxes, e.g., anchors.
gt_bboxes (torch.Tensor): Target of the transformation, e.g.,
ground-truth boxes.
stride (torch.Tensor | int): Stride of bboxes.
Returns:
torch.Tensor: Box transformation deltas
"""
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5
y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5
w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0]
h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1]
x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
w = bboxes[..., 2] - bboxes[..., 0]
h = bboxes[..., 3] - bboxes[..., 1]
w_target = torch.log((w_gt / w).clamp(min=self.eps))
h_target = torch.log((h_gt / h).clamp(min=self.eps))
x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp(
self.eps, 1 - self.eps)
encoded_bboxes = torch.stack(
[x_center_target, y_center_target, w_target, h_target], dim=-1)
return encoded_bboxes
@mmcv.jit(coderize=True)
def decode(self, bboxes, pred_bboxes, stride):
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
boxes (torch.Tensor): Basic boxes, e.g. anchors.
pred_bboxes (torch.Tensor): Encoded boxes with shape
stride (torch.Tensor | int): Strides of bboxes.
Returns:
torch.Tensor: Decoded boxes.
"""
assert pred_bboxes.size(-1) == bboxes.size(-1) == 4
xy_centers = (bboxes[..., :2] + bboxes[..., 2:]) * 0.5 + (
pred_bboxes[..., :2] - 0.5) * stride
whs = (bboxes[..., 2:] -
bboxes[..., :2]) * 0.5 * pred_bboxes[..., 2:].exp()
decoded_bboxes = torch.stack(
(xy_centers[..., 0] - whs[..., 0], xy_centers[..., 1] -
whs[..., 1], xy_centers[..., 0] + whs[..., 0],
xy_centers[..., 1] + whs[..., 1]),
dim=-1)
return decoded_bboxes
| 3,252 | 37.72619 | 77 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/coder/distance_point_bbox_coder.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import BBOX_CODERS
from ..transforms import bbox2distance, distance2bbox
from .base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class DistancePointBBoxCoder(BaseBBoxCoder):
"""Distance Point BBox coder.
This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,
right) and decode it back to the original.
Args:
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
"""
def __init__(self, clip_border=True):
super(BaseBBoxCoder, self).__init__()
self.clip_border = clip_border
def encode(self, points, gt_bboxes, max_dis=None, eps=0.1):
"""Encode bounding box to distances.
Args:
points (Tensor): Shape (N, 2), The format is [x, y].
gt_bboxes (Tensor): Shape (N, 4), The format is "xyxy"
max_dis (float): Upper bound of the distance. Default None.
eps (float): a small value to ensure target < max_dis, instead <=.
Default 0.1.
Returns:
Tensor: Box transformation deltas. The shape is (N, 4).
"""
assert points.size(0) == gt_bboxes.size(0)
assert points.size(-1) == 2
assert gt_bboxes.size(-1) == 4
return bbox2distance(points, gt_bboxes, max_dis, eps)
def decode(self, points, pred_bboxes, max_shape=None):
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (B, N, 2) or (N, 2).
pred_bboxes (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom). Shape (B, N, 4)
or (N, 4)
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]],
and the length of max_shape should also be B.
Default None.
Returns:
Tensor: Boxes with shape (N, 4) or (B, N, 4)
"""
assert points.size(0) == pred_bboxes.size(0)
assert points.size(-1) == 2
assert pred_bboxes.size(-1) == 4
if self.clip_border is False:
max_shape = None
return distance2bbox(points, pred_bboxes, max_shape)
| 2,481 | 37.78125 | 78 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/coder/bucketing_bbox_coder.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
import torch.nn.functional as F
from ..builder import BBOX_CODERS
from ..transforms import bbox_rescale
from .base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class BucketingBBoxCoder(BaseBBoxCoder):
"""Bucketing BBox Coder for Side-Aware Boundary Localization (SABL).
Boundary Localization with Bucketing and Bucketing Guided Rescoring
are implemented here.
Please refer to https://arxiv.org/abs/1912.04260 for more details.
Args:
num_buckets (int): Number of buckets.
scale_factor (int): Scale factor of proposals to generate buckets.
offset_topk (int): Topk buckets are used to generate
bucket fine regression targets. Defaults to 2.
offset_upperbound (float): Offset upperbound to generate
bucket fine regression targets.
To avoid too large offset displacements. Defaults to 1.0.
cls_ignore_neighbor (bool): Ignore second nearest bucket or Not.
Defaults to True.
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
"""
def __init__(self,
num_buckets,
scale_factor,
offset_topk=2,
offset_upperbound=1.0,
cls_ignore_neighbor=True,
clip_border=True):
super(BucketingBBoxCoder, self).__init__()
self.num_buckets = num_buckets
self.scale_factor = scale_factor
self.offset_topk = offset_topk
self.offset_upperbound = offset_upperbound
self.cls_ignore_neighbor = cls_ignore_neighbor
self.clip_border = clip_border
def encode(self, bboxes, gt_bboxes):
"""Get bucketing estimation and fine regression targets during
training.
Args:
bboxes (torch.Tensor): source boxes, e.g., object proposals.
gt_bboxes (torch.Tensor): target of the transformation, e.g.,
ground truth boxes.
Returns:
encoded_bboxes(tuple[Tensor]): bucketing estimation
and fine regression targets and weights
"""
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
encoded_bboxes = bbox2bucket(bboxes, gt_bboxes, self.num_buckets,
self.scale_factor, self.offset_topk,
self.offset_upperbound,
self.cls_ignore_neighbor)
return encoded_bboxes
def decode(self, bboxes, pred_bboxes, max_shape=None):
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
boxes (torch.Tensor): Basic boxes.
pred_bboxes (torch.Tensor): Predictions for bucketing estimation
and fine regression
max_shape (tuple[int], optional): Maximum shape of boxes.
Defaults to None.
Returns:
torch.Tensor: Decoded boxes.
"""
assert len(pred_bboxes) == 2
cls_preds, offset_preds = pred_bboxes
assert cls_preds.size(0) == bboxes.size(0) and offset_preds.size(
0) == bboxes.size(0)
decoded_bboxes = bucket2bbox(bboxes, cls_preds, offset_preds,
self.num_buckets, self.scale_factor,
max_shape, self.clip_border)
return decoded_bboxes
@mmcv.jit(coderize=True)
def generat_buckets(proposals, num_buckets, scale_factor=1.0):
"""Generate buckets w.r.t bucket number and scale factor of proposals.
Args:
proposals (Tensor): Shape (n, 4)
num_buckets (int): Number of buckets.
scale_factor (float): Scale factor to rescale proposals.
Returns:
tuple[Tensor]: (bucket_w, bucket_h, l_buckets, r_buckets,
t_buckets, d_buckets)
- bucket_w: Width of buckets on x-axis. Shape (n, ).
- bucket_h: Height of buckets on y-axis. Shape (n, ).
- l_buckets: Left buckets. Shape (n, ceil(side_num/2)).
- r_buckets: Right buckets. Shape (n, ceil(side_num/2)).
- t_buckets: Top buckets. Shape (n, ceil(side_num/2)).
- d_buckets: Down buckets. Shape (n, ceil(side_num/2)).
"""
proposals = bbox_rescale(proposals, scale_factor)
# number of buckets in each side
side_num = int(np.ceil(num_buckets / 2.0))
pw = proposals[..., 2] - proposals[..., 0]
ph = proposals[..., 3] - proposals[..., 1]
px1 = proposals[..., 0]
py1 = proposals[..., 1]
px2 = proposals[..., 2]
py2 = proposals[..., 3]
bucket_w = pw / num_buckets
bucket_h = ph / num_buckets
# left buckets
l_buckets = px1[:, None] + (0.5 + torch.arange(
0, side_num).to(proposals).float())[None, :] * bucket_w[:, None]
# right buckets
r_buckets = px2[:, None] - (0.5 + torch.arange(
0, side_num).to(proposals).float())[None, :] * bucket_w[:, None]
# top buckets
t_buckets = py1[:, None] + (0.5 + torch.arange(
0, side_num).to(proposals).float())[None, :] * bucket_h[:, None]
# down buckets
d_buckets = py2[:, None] - (0.5 + torch.arange(
0, side_num).to(proposals).float())[None, :] * bucket_h[:, None]
return bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets
@mmcv.jit(coderize=True)
def bbox2bucket(proposals,
gt,
num_buckets,
scale_factor,
offset_topk=2,
offset_upperbound=1.0,
cls_ignore_neighbor=True):
"""Generate buckets estimation and fine regression targets.
Args:
proposals (Tensor): Shape (n, 4)
gt (Tensor): Shape (n, 4)
num_buckets (int): Number of buckets.
scale_factor (float): Scale factor to rescale proposals.
offset_topk (int): Topk buckets are used to generate
bucket fine regression targets. Defaults to 2.
offset_upperbound (float): Offset allowance to generate
bucket fine regression targets.
To avoid too large offset displacements. Defaults to 1.0.
cls_ignore_neighbor (bool): Ignore second nearest bucket or Not.
Defaults to True.
Returns:
tuple[Tensor]: (offsets, offsets_weights, bucket_labels, cls_weights).
- offsets: Fine regression targets. \
Shape (n, num_buckets*2).
- offsets_weights: Fine regression weights. \
Shape (n, num_buckets*2).
- bucket_labels: Bucketing estimation labels. \
Shape (n, num_buckets*2).
- cls_weights: Bucketing estimation weights. \
Shape (n, num_buckets*2).
"""
assert proposals.size() == gt.size()
# generate buckets
proposals = proposals.float()
gt = gt.float()
(bucket_w, bucket_h, l_buckets, r_buckets, t_buckets,
d_buckets) = generat_buckets(proposals, num_buckets, scale_factor)
gx1 = gt[..., 0]
gy1 = gt[..., 1]
gx2 = gt[..., 2]
gy2 = gt[..., 3]
# generate offset targets and weights
# offsets from buckets to gts
l_offsets = (l_buckets - gx1[:, None]) / bucket_w[:, None]
r_offsets = (r_buckets - gx2[:, None]) / bucket_w[:, None]
t_offsets = (t_buckets - gy1[:, None]) / bucket_h[:, None]
d_offsets = (d_buckets - gy2[:, None]) / bucket_h[:, None]
# select top-k nearest buckets
l_topk, l_label = l_offsets.abs().topk(
offset_topk, dim=1, largest=False, sorted=True)
r_topk, r_label = r_offsets.abs().topk(
offset_topk, dim=1, largest=False, sorted=True)
t_topk, t_label = t_offsets.abs().topk(
offset_topk, dim=1, largest=False, sorted=True)
d_topk, d_label = d_offsets.abs().topk(
offset_topk, dim=1, largest=False, sorted=True)
offset_l_weights = l_offsets.new_zeros(l_offsets.size())
offset_r_weights = r_offsets.new_zeros(r_offsets.size())
offset_t_weights = t_offsets.new_zeros(t_offsets.size())
offset_d_weights = d_offsets.new_zeros(d_offsets.size())
inds = torch.arange(0, proposals.size(0)).to(proposals).long()
# generate offset weights of top-k nearest buckets
for k in range(offset_topk):
if k >= 1:
offset_l_weights[inds, l_label[:,
k]] = (l_topk[:, k] <
offset_upperbound).float()
offset_r_weights[inds, r_label[:,
k]] = (r_topk[:, k] <
offset_upperbound).float()
offset_t_weights[inds, t_label[:,
k]] = (t_topk[:, k] <
offset_upperbound).float()
offset_d_weights[inds, d_label[:,
k]] = (d_topk[:, k] <
offset_upperbound).float()
else:
offset_l_weights[inds, l_label[:, k]] = 1.0
offset_r_weights[inds, r_label[:, k]] = 1.0
offset_t_weights[inds, t_label[:, k]] = 1.0
offset_d_weights[inds, d_label[:, k]] = 1.0
offsets = torch.cat([l_offsets, r_offsets, t_offsets, d_offsets], dim=-1)
offsets_weights = torch.cat([
offset_l_weights, offset_r_weights, offset_t_weights, offset_d_weights
],
dim=-1)
# generate bucket labels and weight
side_num = int(np.ceil(num_buckets / 2.0))
labels = torch.stack(
[l_label[:, 0], r_label[:, 0], t_label[:, 0], d_label[:, 0]], dim=-1)
batch_size = labels.size(0)
bucket_labels = F.one_hot(labels.view(-1), side_num).view(batch_size,
-1).float()
bucket_cls_l_weights = (l_offsets.abs() < 1).float()
bucket_cls_r_weights = (r_offsets.abs() < 1).float()
bucket_cls_t_weights = (t_offsets.abs() < 1).float()
bucket_cls_d_weights = (d_offsets.abs() < 1).float()
bucket_cls_weights = torch.cat([
bucket_cls_l_weights, bucket_cls_r_weights, bucket_cls_t_weights,
bucket_cls_d_weights
],
dim=-1)
# ignore second nearest buckets for cls if necessary
if cls_ignore_neighbor:
bucket_cls_weights = (~((bucket_cls_weights == 1) &
(bucket_labels == 0))).float()
else:
bucket_cls_weights[:] = 1.0
return offsets, offsets_weights, bucket_labels, bucket_cls_weights
@mmcv.jit(coderize=True)
def bucket2bbox(proposals,
cls_preds,
offset_preds,
num_buckets,
scale_factor=1.0,
max_shape=None,
clip_border=True):
"""Apply bucketing estimation (cls preds) and fine regression (offset
preds) to generate det bboxes.
Args:
proposals (Tensor): Boxes to be transformed. Shape (n, 4)
cls_preds (Tensor): bucketing estimation. Shape (n, num_buckets*2).
offset_preds (Tensor): fine regression. Shape (n, num_buckets*2).
num_buckets (int): Number of buckets.
scale_factor (float): Scale factor to rescale proposals.
max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W)
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
Returns:
tuple[Tensor]: (bboxes, loc_confidence).
- bboxes: predicted bboxes. Shape (n, 4)
- loc_confidence: localization confidence of predicted bboxes.
Shape (n,).
"""
side_num = int(np.ceil(num_buckets / 2.0))
cls_preds = cls_preds.view(-1, side_num)
offset_preds = offset_preds.view(-1, side_num)
scores = F.softmax(cls_preds, dim=1)
score_topk, score_label = scores.topk(2, dim=1, largest=True, sorted=True)
rescaled_proposals = bbox_rescale(proposals, scale_factor)
pw = rescaled_proposals[..., 2] - rescaled_proposals[..., 0]
ph = rescaled_proposals[..., 3] - rescaled_proposals[..., 1]
px1 = rescaled_proposals[..., 0]
py1 = rescaled_proposals[..., 1]
px2 = rescaled_proposals[..., 2]
py2 = rescaled_proposals[..., 3]
bucket_w = pw / num_buckets
bucket_h = ph / num_buckets
score_inds_l = score_label[0::4, 0]
score_inds_r = score_label[1::4, 0]
score_inds_t = score_label[2::4, 0]
score_inds_d = score_label[3::4, 0]
l_buckets = px1 + (0.5 + score_inds_l.float()) * bucket_w
r_buckets = px2 - (0.5 + score_inds_r.float()) * bucket_w
t_buckets = py1 + (0.5 + score_inds_t.float()) * bucket_h
d_buckets = py2 - (0.5 + score_inds_d.float()) * bucket_h
offsets = offset_preds.view(-1, 4, side_num)
inds = torch.arange(proposals.size(0)).to(proposals).long()
l_offsets = offsets[:, 0, :][inds, score_inds_l]
r_offsets = offsets[:, 1, :][inds, score_inds_r]
t_offsets = offsets[:, 2, :][inds, score_inds_t]
d_offsets = offsets[:, 3, :][inds, score_inds_d]
x1 = l_buckets - l_offsets * bucket_w
x2 = r_buckets - r_offsets * bucket_w
y1 = t_buckets - t_offsets * bucket_h
y2 = d_buckets - d_offsets * bucket_h
if clip_border and max_shape is not None:
x1 = x1.clamp(min=0, max=max_shape[1] - 1)
y1 = y1.clamp(min=0, max=max_shape[0] - 1)
x2 = x2.clamp(min=0, max=max_shape[1] - 1)
y2 = y2.clamp(min=0, max=max_shape[0] - 1)
bboxes = torch.cat([x1[:, None], y1[:, None], x2[:, None], y2[:, None]],
dim=-1)
# bucketing guided rescoring
loc_confidence = score_topk[:, 0]
top2_neighbor_inds = (score_label[:, 0] - score_label[:, 1]).abs() == 1
loc_confidence += score_topk[:, 1] * top2_neighbor_inds.float()
loc_confidence = loc_confidence.view(-1, 4).mean(dim=1)
return bboxes, loc_confidence
| 14,119 | 39.113636 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/coder/pseudo_bbox_coder.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import BBOX_CODERS
from .base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class PseudoBBoxCoder(BaseBBoxCoder):
"""Pseudo bounding box coder."""
def __init__(self, **kwargs):
super(BaseBBoxCoder, self).__init__(**kwargs)
def encode(self, bboxes, gt_bboxes):
"""torch.Tensor: return the given ``bboxes``"""
return gt_bboxes
def decode(self, bboxes, pred_bboxes):
"""torch.Tensor: return the given ``pred_bboxes``"""
return pred_bboxes
| 577 | 27.9 | 60 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/coder/tblr_bbox_coder.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from ..builder import BBOX_CODERS
from .base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class TBLRBBoxCoder(BaseBBoxCoder):
"""TBLR BBox coder.
Following the practice in `FSAF <https://arxiv.org/abs/1903.00621>`_,
this coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,
right) and decode it back to the original.
Args:
normalizer (list | float): Normalization factor to be
divided with when coding the coordinates. If it is a list, it should
have length of 4 indicating normalization factor in tblr dims.
Otherwise it is a unified float factor for all dims. Default: 4.0
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
"""
def __init__(self, normalizer=4.0, clip_border=True):
super(BaseBBoxCoder, self).__init__()
self.normalizer = normalizer
self.clip_border = clip_border
def encode(self, bboxes, gt_bboxes):
"""Get box regression transformation deltas that can be used to
transform the ``bboxes`` into the ``gt_bboxes`` in the (top, left,
bottom, right) order.
Args:
bboxes (torch.Tensor): source boxes, e.g., object proposals.
gt_bboxes (torch.Tensor): target of the transformation, e.g.,
ground truth boxes.
Returns:
torch.Tensor: Box transformation deltas
"""
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
encoded_bboxes = bboxes2tblr(
bboxes, gt_bboxes, normalizer=self.normalizer)
return encoded_bboxes
def decode(self, bboxes, pred_bboxes, max_shape=None):
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
bboxes (torch.Tensor): Basic boxes.Shape (B, N, 4) or (N, 4)
pred_bboxes (torch.Tensor): Encoded boxes with shape
(B, N, 4) or (N, 4)
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If bboxes shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]]
and the length of max_shape should also be B.
Returns:
torch.Tensor: Decoded boxes.
"""
decoded_bboxes = tblr2bboxes(
bboxes,
pred_bboxes,
normalizer=self.normalizer,
max_shape=max_shape,
clip_border=self.clip_border)
return decoded_bboxes
@mmcv.jit(coderize=True)
def bboxes2tblr(priors, gts, normalizer=4.0, normalize_by_wh=True):
"""Encode ground truth boxes to tblr coordinate.
It first convert the gt coordinate to tblr format,
(top, bottom, left, right), relative to prior box centers.
The tblr coordinate may be normalized by the side length of prior bboxes
if `normalize_by_wh` is specified as True, and it is then normalized by
the `normalizer` factor.
Args:
priors (Tensor): Prior boxes in point form
Shape: (num_proposals,4).
gts (Tensor): Coords of ground truth for each prior in point-form
Shape: (num_proposals, 4).
normalizer (Sequence[float] | float): normalization parameter of
encoded boxes. If it is a list, it has to have length = 4.
Default: 4.0
normalize_by_wh (bool): Whether to normalize tblr coordinate by the
side length (wh) of prior bboxes.
Return:
encoded boxes (Tensor), Shape: (num_proposals, 4)
"""
# dist b/t match center and prior's center
if not isinstance(normalizer, float):
normalizer = torch.tensor(normalizer, device=priors.device)
assert len(normalizer) == 4, 'Normalizer must have length = 4'
assert priors.size(0) == gts.size(0)
prior_centers = (priors[:, 0:2] + priors[:, 2:4]) / 2
xmin, ymin, xmax, ymax = gts.split(1, dim=1)
top = prior_centers[:, 1].unsqueeze(1) - ymin
bottom = ymax - prior_centers[:, 1].unsqueeze(1)
left = prior_centers[:, 0].unsqueeze(1) - xmin
right = xmax - prior_centers[:, 0].unsqueeze(1)
loc = torch.cat((top, bottom, left, right), dim=1)
if normalize_by_wh:
# Normalize tblr by anchor width and height
wh = priors[:, 2:4] - priors[:, 0:2]
w, h = torch.split(wh, 1, dim=1)
loc[:, :2] /= h # tb is normalized by h
loc[:, 2:] /= w # lr is normalized by w
# Normalize tblr by the given normalization factor
return loc / normalizer
@mmcv.jit(coderize=True)
def tblr2bboxes(priors,
tblr,
normalizer=4.0,
normalize_by_wh=True,
max_shape=None,
clip_border=True):
"""Decode tblr outputs to prediction boxes.
The process includes 3 steps: 1) De-normalize tblr coordinates by
multiplying it with `normalizer`; 2) De-normalize tblr coordinates by the
prior bbox width and height if `normalize_by_wh` is `True`; 3) Convert
tblr (top, bottom, left, right) pair relative to the center of priors back
to (xmin, ymin, xmax, ymax) coordinate.
Args:
priors (Tensor): Prior boxes in point form (x0, y0, x1, y1)
Shape: (N,4) or (B, N, 4).
tblr (Tensor): Coords of network output in tblr form
Shape: (N, 4) or (B, N, 4).
normalizer (Sequence[float] | float): Normalization parameter of
encoded boxes. By list, it represents the normalization factors at
tblr dims. By float, it is the unified normalization factor at all
dims. Default: 4.0
normalize_by_wh (bool): Whether the tblr coordinates have been
normalized by the side length (wh) of prior bboxes.
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]]
and the length of max_shape should also be B.
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
Return:
encoded boxes (Tensor): Boxes with shape (N, 4) or (B, N, 4)
"""
if not isinstance(normalizer, float):
normalizer = torch.tensor(normalizer, device=priors.device)
assert len(normalizer) == 4, 'Normalizer must have length = 4'
assert priors.size(0) == tblr.size(0)
if priors.ndim == 3:
assert priors.size(1) == tblr.size(1)
loc_decode = tblr * normalizer
prior_centers = (priors[..., 0:2] + priors[..., 2:4]) / 2
if normalize_by_wh:
wh = priors[..., 2:4] - priors[..., 0:2]
w, h = torch.split(wh, 1, dim=-1)
# Inplace operation with slice would failed for exporting to ONNX
th = h * loc_decode[..., :2] # tb
tw = w * loc_decode[..., 2:] # lr
loc_decode = torch.cat([th, tw], dim=-1)
# Cannot be exported using onnx when loc_decode.split(1, dim=-1)
top, bottom, left, right = loc_decode.split((1, 1, 1, 1), dim=-1)
xmin = prior_centers[..., 0].unsqueeze(-1) - left
xmax = prior_centers[..., 0].unsqueeze(-1) + right
ymin = prior_centers[..., 1].unsqueeze(-1) - top
ymax = prior_centers[..., 1].unsqueeze(-1) + bottom
bboxes = torch.cat((xmin, ymin, xmax, ymax), dim=-1)
if clip_border and max_shape is not None:
# clip bboxes with dynamic `min` and `max` for onnx
if torch.onnx.is_in_onnx_export():
from mmdet.core.export import dynamic_clip_for_onnx
xmin, ymin, xmax, ymax = dynamic_clip_for_onnx(
xmin, ymin, xmax, ymax, max_shape)
bboxes = torch.cat([xmin, ymin, xmax, ymax], dim=-1)
return bboxes
if not isinstance(max_shape, torch.Tensor):
max_shape = priors.new_tensor(max_shape)
max_shape = max_shape[..., :2].type_as(priors)
if max_shape.ndim == 2:
assert bboxes.ndim == 3
assert max_shape.size(0) == bboxes.size(0)
min_xy = priors.new_tensor(0)
max_xy = torch.cat([max_shape, max_shape],
dim=-1).flip(-1).unsqueeze(-2)
bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
return bboxes
| 8,625 | 40.671498 | 78 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
from ..builder import BBOX_CODERS
from .base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class LegacyDeltaXYWHBBoxCoder(BaseBBoxCoder):
"""Legacy Delta XYWH BBox coder used in MMDet V1.x.
Following the practice in R-CNN [1]_, this coder encodes bbox (x1, y1, x2,
y2) into delta (dx, dy, dw, dh) and decodes delta (dx, dy, dw, dh)
back to original bbox (x1, y1, x2, y2).
Note:
The main difference between :class`LegacyDeltaXYWHBBoxCoder` and
:class:`DeltaXYWHBBoxCoder` is whether ``+ 1`` is used during width and
height calculation. We suggest to only use this coder when testing with
MMDet V1.x models.
References:
.. [1] https://arxiv.org/abs/1311.2524
Args:
target_means (Sequence[float]): denormalizing means of target for
delta coordinates
target_stds (Sequence[float]): denormalizing standard deviation of
target for delta coordinates
"""
def __init__(self,
target_means=(0., 0., 0., 0.),
target_stds=(1., 1., 1., 1.)):
super(BaseBBoxCoder, self).__init__()
self.means = target_means
self.stds = target_stds
def encode(self, bboxes, gt_bboxes):
"""Get box regression transformation deltas that can be used to
transform the ``bboxes`` into the ``gt_bboxes``.
Args:
bboxes (torch.Tensor): source boxes, e.g., object proposals.
gt_bboxes (torch.Tensor): target of the transformation, e.g.,
ground-truth boxes.
Returns:
torch.Tensor: Box transformation deltas
"""
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
encoded_bboxes = legacy_bbox2delta(bboxes, gt_bboxes, self.means,
self.stds)
return encoded_bboxes
def decode(self,
bboxes,
pred_bboxes,
max_shape=None,
wh_ratio_clip=16 / 1000):
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
boxes (torch.Tensor): Basic boxes.
pred_bboxes (torch.Tensor): Encoded boxes with shape
max_shape (tuple[int], optional): Maximum shape of boxes.
Defaults to None.
wh_ratio_clip (float, optional): The allowed ratio between
width and height.
Returns:
torch.Tensor: Decoded boxes.
"""
assert pred_bboxes.size(0) == bboxes.size(0)
decoded_bboxes = legacy_delta2bbox(bboxes, pred_bboxes, self.means,
self.stds, max_shape, wh_ratio_clip)
return decoded_bboxes
@mmcv.jit(coderize=True)
def legacy_bbox2delta(proposals,
gt,
means=(0., 0., 0., 0.),
stds=(1., 1., 1., 1.)):
"""Compute deltas of proposals w.r.t. gt in the MMDet V1.x manner.
We usually compute the deltas of x, y, w, h of proposals w.r.t ground
truth bboxes to get regression target.
This is the inverse function of `delta2bbox()`
Args:
proposals (Tensor): Boxes to be transformed, shape (N, ..., 4)
gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4)
means (Sequence[float]): Denormalizing means for delta coordinates
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates
Returns:
Tensor: deltas with shape (N, 4), where columns represent dx, dy,
dw, dh.
"""
assert proposals.size() == gt.size()
proposals = proposals.float()
gt = gt.float()
px = (proposals[..., 0] + proposals[..., 2]) * 0.5
py = (proposals[..., 1] + proposals[..., 3]) * 0.5
pw = proposals[..., 2] - proposals[..., 0] + 1.0
ph = proposals[..., 3] - proposals[..., 1] + 1.0
gx = (gt[..., 0] + gt[..., 2]) * 0.5
gy = (gt[..., 1] + gt[..., 3]) * 0.5
gw = gt[..., 2] - gt[..., 0] + 1.0
gh = gt[..., 3] - gt[..., 1] + 1.0
dx = (gx - px) / pw
dy = (gy - py) / ph
dw = torch.log(gw / pw)
dh = torch.log(gh / ph)
deltas = torch.stack([dx, dy, dw, dh], dim=-1)
means = deltas.new_tensor(means).unsqueeze(0)
stds = deltas.new_tensor(stds).unsqueeze(0)
deltas = deltas.sub_(means).div_(stds)
return deltas
@mmcv.jit(coderize=True)
def legacy_delta2bbox(rois,
deltas,
means=(0., 0., 0., 0.),
stds=(1., 1., 1., 1.),
max_shape=None,
wh_ratio_clip=16 / 1000):
"""Apply deltas to shift/scale base boxes in the MMDet V1.x manner.
Typically the rois are anchor or proposed bounding boxes and the deltas are
network outputs used to shift/scale those boxes.
This is the inverse function of `bbox2delta()`
Args:
rois (Tensor): Boxes to be transformed. Has shape (N, 4)
deltas (Tensor): Encoded offsets with respect to each roi.
Has shape (N, 4 * num_classes). Note N = num_anchors * W * H when
rois is a grid of anchors. Offset encoding follows [1]_.
means (Sequence[float]): Denormalizing means for delta coordinates
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates
max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W)
wh_ratio_clip (float): Maximum aspect ratio for boxes.
Returns:
Tensor: Boxes with shape (N, 4), where columns represent
tl_x, tl_y, br_x, br_y.
References:
.. [1] https://arxiv.org/abs/1311.2524
Example:
>>> rois = torch.Tensor([[ 0., 0., 1., 1.],
>>> [ 0., 0., 1., 1.],
>>> [ 0., 0., 1., 1.],
>>> [ 5., 5., 5., 5.]])
>>> deltas = torch.Tensor([[ 0., 0., 0., 0.],
>>> [ 1., 1., 1., 1.],
>>> [ 0., 0., 2., -1.],
>>> [ 0.7, -1.9, -0.5, 0.3]])
>>> legacy_delta2bbox(rois, deltas, max_shape=(32, 32))
tensor([[0.0000, 0.0000, 1.5000, 1.5000],
[0.0000, 0.0000, 5.2183, 5.2183],
[0.0000, 0.1321, 7.8891, 0.8679],
[5.3967, 2.4251, 6.0033, 3.7749]])
"""
means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4)
stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4)
denorm_deltas = deltas * stds + means
dx = denorm_deltas[:, 0::4]
dy = denorm_deltas[:, 1::4]
dw = denorm_deltas[:, 2::4]
dh = denorm_deltas[:, 3::4]
max_ratio = np.abs(np.log(wh_ratio_clip))
dw = dw.clamp(min=-max_ratio, max=max_ratio)
dh = dh.clamp(min=-max_ratio, max=max_ratio)
# Compute center of each roi
px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx)
py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy)
# Compute width/height of each roi
pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw)
ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh)
# Use exp(network energy) to enlarge/shrink each roi
gw = pw * dw.exp()
gh = ph * dh.exp()
# Use network energy to shift the center of each roi
gx = px + pw * dx
gy = py + ph * dy
# Convert center-xy/width/height to top-left, bottom-right
# The true legacy box coder should +- 0.5 here.
# However, current implementation improves the performance when testing
# the models trained in MMDetection 1.X (~0.5 bbox AP, 0.2 mask AP)
x1 = gx - gw * 0.5
y1 = gy - gh * 0.5
x2 = gx + gw * 0.5
y2 = gy + gh * 0.5
if max_shape is not None:
x1 = x1.clamp(min=0, max=max_shape[1] - 1)
y1 = y1.clamp(min=0, max=max_shape[0] - 1)
x2 = x2.clamp(min=0, max=max_shape[1] - 1)
y2 = y2.clamp(min=0, max=max_shape[0] - 1)
bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas)
return bboxes
| 8,257 | 37.0553 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import mmcv
import numpy as np
import torch
from ..builder import BBOX_CODERS
from .base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class DeltaXYWHBBoxCoder(BaseBBoxCoder):
"""Delta XYWH BBox coder.
Following the practice in `R-CNN <https://arxiv.org/abs/1311.2524>`_,
this coder encodes bbox (x1, y1, x2, y2) into delta (dx, dy, dw, dh) and
decodes delta (dx, dy, dw, dh) back to original bbox (x1, y1, x2, y2).
Args:
target_means (Sequence[float]): Denormalizing means of target for
delta coordinates
target_stds (Sequence[float]): Denormalizing standard deviation of
target for delta coordinates
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
add_ctr_clamp (bool): Whether to add center clamp, when added, the
predicted box is clamped is its center is too far away from
the original anchor's center. Only used by YOLOF. Default False.
ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF.
Default 32.
"""
def __init__(self,
target_means=(0., 0., 0., 0.),
target_stds=(1., 1., 1., 1.),
clip_border=True,
add_ctr_clamp=False,
ctr_clamp=32):
super(BaseBBoxCoder, self).__init__()
self.means = target_means
self.stds = target_stds
self.clip_border = clip_border
self.add_ctr_clamp = add_ctr_clamp
self.ctr_clamp = ctr_clamp
def encode(self, bboxes, gt_bboxes):
"""Get box regression transformation deltas that can be used to
transform the ``bboxes`` into the ``gt_bboxes``.
Args:
bboxes (torch.Tensor): Source boxes, e.g., object proposals.
gt_bboxes (torch.Tensor): Target of the transformation, e.g.,
ground-truth boxes.
Returns:
torch.Tensor: Box transformation deltas
"""
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
encoded_bboxes = bbox2delta(bboxes, gt_bboxes, self.means, self.stds)
return encoded_bboxes
def decode(self,
bboxes,
pred_bboxes,
max_shape=None,
wh_ratio_clip=16 / 1000):
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
bboxes (torch.Tensor): Basic boxes. Shape (B, N, 4) or (N, 4)
pred_bboxes (Tensor): Encoded offsets with respect to each roi.
Has shape (B, N, num_classes * 4) or (B, N, 4) or
(N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H
when rois is a grid of anchors.Offset encoding follows [1]_.
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If bboxes shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]]
and the length of max_shape should also be B.
wh_ratio_clip (float, optional): The allowed ratio between
width and height.
Returns:
torch.Tensor: Decoded boxes.
"""
assert pred_bboxes.size(0) == bboxes.size(0)
if pred_bboxes.ndim == 3:
assert pred_bboxes.size(1) == bboxes.size(1)
if pred_bboxes.ndim == 2 and not torch.onnx.is_in_onnx_export():
# single image decode
decoded_bboxes = delta2bbox(bboxes, pred_bboxes, self.means,
self.stds, max_shape, wh_ratio_clip,
self.clip_border, self.add_ctr_clamp,
self.ctr_clamp)
else:
if pred_bboxes.ndim == 3 and not torch.onnx.is_in_onnx_export():
warnings.warn(
'DeprecationWarning: onnx_delta2bbox is deprecated '
'in the case of batch decoding and non-ONNX, '
'please use “delta2bbox” instead. In order to improve '
'the decoding speed, the batch function will no '
'longer be supported. ')
decoded_bboxes = onnx_delta2bbox(bboxes, pred_bboxes, self.means,
self.stds, max_shape,
wh_ratio_clip, self.clip_border,
self.add_ctr_clamp,
self.ctr_clamp)
return decoded_bboxes
@mmcv.jit(coderize=True)
def bbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.)):
"""Compute deltas of proposals w.r.t. gt.
We usually compute the deltas of x, y, w, h of proposals w.r.t ground
truth bboxes to get regression target.
This is the inverse function of :func:`delta2bbox`.
Args:
proposals (Tensor): Boxes to be transformed, shape (N, ..., 4)
gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4)
means (Sequence[float]): Denormalizing means for delta coordinates
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates
Returns:
Tensor: deltas with shape (N, 4), where columns represent dx, dy,
dw, dh.
"""
assert proposals.size() == gt.size()
proposals = proposals.float()
gt = gt.float()
px = (proposals[..., 0] + proposals[..., 2]) * 0.5
py = (proposals[..., 1] + proposals[..., 3]) * 0.5
pw = proposals[..., 2] - proposals[..., 0]
ph = proposals[..., 3] - proposals[..., 1]
gx = (gt[..., 0] + gt[..., 2]) * 0.5
gy = (gt[..., 1] + gt[..., 3]) * 0.5
gw = gt[..., 2] - gt[..., 0]
gh = gt[..., 3] - gt[..., 1]
dx = (gx - px) / pw
dy = (gy - py) / ph
dw = torch.log(gw / pw)
dh = torch.log(gh / ph)
deltas = torch.stack([dx, dy, dw, dh], dim=-1)
means = deltas.new_tensor(means).unsqueeze(0)
stds = deltas.new_tensor(stds).unsqueeze(0)
deltas = deltas.sub_(means).div_(stds)
return deltas
@mmcv.jit(coderize=True)
def delta2bbox(rois,
deltas,
means=(0., 0., 0., 0.),
stds=(1., 1., 1., 1.),
max_shape=None,
wh_ratio_clip=16 / 1000,
clip_border=True,
add_ctr_clamp=False,
ctr_clamp=32):
"""Apply deltas to shift/scale base boxes.
Typically the rois are anchor or proposed bounding boxes and the deltas are
network outputs used to shift/scale those boxes.
This is the inverse function of :func:`bbox2delta`.
Args:
rois (Tensor): Boxes to be transformed. Has shape (N, 4).
deltas (Tensor): Encoded offsets relative to each roi.
Has shape (N, num_classes * 4) or (N, 4). Note
N = num_base_anchors * W * H, when rois is a grid of
anchors. Offset encoding follows [1]_.
means (Sequence[float]): Denormalizing means for delta coordinates.
Default (0., 0., 0., 0.).
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates. Default (1., 1., 1., 1.).
max_shape (tuple[int, int]): Maximum bounds for boxes, specifies
(H, W). Default None.
wh_ratio_clip (float): Maximum aspect ratio for boxes. Default
16 / 1000.
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Default True.
add_ctr_clamp (bool): Whether to add center clamp. When set to True,
the center of the prediction bounding box will be clamped to
avoid being too far away from the center of the anchor.
Only used by YOLOF. Default False.
ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF.
Default 32.
Returns:
Tensor: Boxes with shape (N, num_classes * 4) or (N, 4), where 4
represent tl_x, tl_y, br_x, br_y.
References:
.. [1] https://arxiv.org/abs/1311.2524
Example:
>>> rois = torch.Tensor([[ 0., 0., 1., 1.],
>>> [ 0., 0., 1., 1.],
>>> [ 0., 0., 1., 1.],
>>> [ 5., 5., 5., 5.]])
>>> deltas = torch.Tensor([[ 0., 0., 0., 0.],
>>> [ 1., 1., 1., 1.],
>>> [ 0., 0., 2., -1.],
>>> [ 0.7, -1.9, -0.5, 0.3]])
>>> delta2bbox(rois, deltas, max_shape=(32, 32, 3))
tensor([[0.0000, 0.0000, 1.0000, 1.0000],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
"""
num_bboxes, num_classes = deltas.size(0), deltas.size(1) // 4
if num_bboxes == 0:
return deltas
deltas = deltas.reshape(-1, 4)
means = deltas.new_tensor(means).view(1, -1)
stds = deltas.new_tensor(stds).view(1, -1)
denorm_deltas = deltas * stds + means
dxy = denorm_deltas[:, :2]
dwh = denorm_deltas[:, 2:]
# Compute width/height of each roi
rois_ = rois.repeat(1, num_classes).reshape(-1, 4)
pxy = ((rois_[:, :2] + rois_[:, 2:]) * 0.5)
pwh = (rois_[:, 2:] - rois_[:, :2])
dxy_wh = pwh * dxy
max_ratio = np.abs(np.log(wh_ratio_clip))
if add_ctr_clamp:
dxy_wh = torch.clamp(dxy_wh, max=ctr_clamp, min=-ctr_clamp)
dwh = torch.clamp(dwh, max=max_ratio)
else:
dwh = dwh.clamp(min=-max_ratio, max=max_ratio)
gxy = pxy + dxy_wh
gwh = pwh * dwh.exp()
x1y1 = gxy - (gwh * 0.5)
x2y2 = gxy + (gwh * 0.5)
bboxes = torch.cat([x1y1, x2y2], dim=-1)
if clip_border and max_shape is not None:
bboxes[..., 0::2].clamp_(min=0, max=max_shape[1])
bboxes[..., 1::2].clamp_(min=0, max=max_shape[0])
bboxes = bboxes.reshape(num_bboxes, -1)
return bboxes
def onnx_delta2bbox(rois,
deltas,
means=(0., 0., 0., 0.),
stds=(1., 1., 1., 1.),
max_shape=None,
wh_ratio_clip=16 / 1000,
clip_border=True,
add_ctr_clamp=False,
ctr_clamp=32):
"""Apply deltas to shift/scale base boxes.
Typically the rois are anchor or proposed bounding boxes and the deltas are
network outputs used to shift/scale those boxes.
This is the inverse function of :func:`bbox2delta`.
Args:
rois (Tensor): Boxes to be transformed. Has shape (N, 4) or (B, N, 4)
deltas (Tensor): Encoded offsets with respect to each roi.
Has shape (B, N, num_classes * 4) or (B, N, 4) or
(N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H
when rois is a grid of anchors.Offset encoding follows [1]_.
means (Sequence[float]): Denormalizing means for delta coordinates.
Default (0., 0., 0., 0.).
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates. Default (1., 1., 1., 1.).
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If rois shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]]
and the length of max_shape should also be B. Default None.
wh_ratio_clip (float): Maximum aspect ratio for boxes.
Default 16 / 1000.
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Default True.
add_ctr_clamp (bool): Whether to add center clamp, when added, the
predicted box is clamped is its center is too far away from
the original anchor's center. Only used by YOLOF. Default False.
ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF.
Default 32.
Returns:
Tensor: Boxes with shape (B, N, num_classes * 4) or (B, N, 4) or
(N, num_classes * 4) or (N, 4), where 4 represent
tl_x, tl_y, br_x, br_y.
References:
.. [1] https://arxiv.org/abs/1311.2524
Example:
>>> rois = torch.Tensor([[ 0., 0., 1., 1.],
>>> [ 0., 0., 1., 1.],
>>> [ 0., 0., 1., 1.],
>>> [ 5., 5., 5., 5.]])
>>> deltas = torch.Tensor([[ 0., 0., 0., 0.],
>>> [ 1., 1., 1., 1.],
>>> [ 0., 0., 2., -1.],
>>> [ 0.7, -1.9, -0.5, 0.3]])
>>> delta2bbox(rois, deltas, max_shape=(32, 32, 3))
tensor([[0.0000, 0.0000, 1.0000, 1.0000],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
"""
means = deltas.new_tensor(means).view(1,
-1).repeat(1,
deltas.size(-1) // 4)
stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(-1) // 4)
denorm_deltas = deltas * stds + means
dx = denorm_deltas[..., 0::4]
dy = denorm_deltas[..., 1::4]
dw = denorm_deltas[..., 2::4]
dh = denorm_deltas[..., 3::4]
x1, y1 = rois[..., 0], rois[..., 1]
x2, y2 = rois[..., 2], rois[..., 3]
# Compute center of each roi
px = ((x1 + x2) * 0.5).unsqueeze(-1).expand_as(dx)
py = ((y1 + y2) * 0.5).unsqueeze(-1).expand_as(dy)
# Compute width/height of each roi
pw = (x2 - x1).unsqueeze(-1).expand_as(dw)
ph = (y2 - y1).unsqueeze(-1).expand_as(dh)
dx_width = pw * dx
dy_height = ph * dy
max_ratio = np.abs(np.log(wh_ratio_clip))
if add_ctr_clamp:
dx_width = torch.clamp(dx_width, max=ctr_clamp, min=-ctr_clamp)
dy_height = torch.clamp(dy_height, max=ctr_clamp, min=-ctr_clamp)
dw = torch.clamp(dw, max=max_ratio)
dh = torch.clamp(dh, max=max_ratio)
else:
dw = dw.clamp(min=-max_ratio, max=max_ratio)
dh = dh.clamp(min=-max_ratio, max=max_ratio)
# Use exp(network energy) to enlarge/shrink each roi
gw = pw * dw.exp()
gh = ph * dh.exp()
# Use network energy to shift the center of each roi
gx = px + dx_width
gy = py + dy_height
# Convert center-xy/width/height to top-left, bottom-right
x1 = gx - gw * 0.5
y1 = gy - gh * 0.5
x2 = gx + gw * 0.5
y2 = gy + gh * 0.5
bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size())
if clip_border and max_shape is not None:
# clip bboxes with dynamic `min` and `max` for onnx
if torch.onnx.is_in_onnx_export():
from mmdet.core.export import dynamic_clip_for_onnx
x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape)
bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size())
return bboxes
if not isinstance(max_shape, torch.Tensor):
max_shape = x1.new_tensor(max_shape)
max_shape = max_shape[..., :2].type_as(x1)
if max_shape.ndim == 2:
assert bboxes.ndim == 3
assert max_shape.size(0) == bboxes.size(0)
min_xy = x1.new_tensor(0)
max_xy = torch.cat(
[max_shape] * (deltas.size(-1) // 2),
dim=-1).flip(-1).unsqueeze(-2)
bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
return bboxes
| 15,978 | 39.659033 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/iou_calculators/iou2d_calculator.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from .builder import IOU_CALCULATORS
def cast_tensor_type(x, scale=1., dtype=None):
if dtype == 'fp16':
# scale is for preventing overflows
x = (x / scale).half()
return x
def fp16_clamp(x, min=None, max=None):
if not x.is_cuda and x.dtype == torch.float16:
# clamp for cpu float16, tensor fp16 has no clamp implementation
return x.float().clamp(min, max).half()
return x.clamp(min, max)
@IOU_CALCULATORS.register_module()
class BboxOverlaps2D:
"""2D Overlaps (e.g. IoUs, GIoUs) Calculator."""
def __init__(self, scale=1., dtype=None):
self.scale = scale
self.dtype = dtype
def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False):
"""Calculate IoU between 2D bboxes.
Args:
bboxes1 (Tensor): bboxes have shape (m, 4) in <x1, y1, x2, y2>
format, or shape (m, 5) in <x1, y1, x2, y2, score> format.
bboxes2 (Tensor): bboxes have shape (m, 4) in <x1, y1, x2, y2>
format, shape (m, 5) in <x1, y1, x2, y2, score> format, or be
empty. If ``is_aligned `` is ``True``, then m and n must be
equal.
mode (str): "iou" (intersection over union), "iof" (intersection
over foreground), or "giou" (generalized intersection over
union).
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
"""
assert bboxes1.size(-1) in [0, 4, 5]
assert bboxes2.size(-1) in [0, 4, 5]
if bboxes2.size(-1) == 5:
bboxes2 = bboxes2[..., :4]
if bboxes1.size(-1) == 5:
bboxes1 = bboxes1[..., :4]
if self.dtype == 'fp16':
# change tensor type to save cpu and cuda memory and keep speed
bboxes1 = cast_tensor_type(bboxes1, self.scale, self.dtype)
bboxes2 = cast_tensor_type(bboxes2, self.scale, self.dtype)
overlaps = bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)
if not overlaps.is_cuda and overlaps.dtype == torch.float16:
# resume cpu float32
overlaps = overlaps.float()
return overlaps
return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)
def __repr__(self):
"""str: a string describing the module"""
repr_str = self.__class__.__name__ + f'(' \
f'scale={self.scale}, dtype={self.dtype})'
return repr_str
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):
"""Calculate overlap between two set of bboxes.
FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889
Note:
Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou',
there are some new generated variable when calculating IOU
using bbox_overlaps function:
1) is_aligned is False
area1: M x 1
area2: N x 1
lt: M x N x 2
rb: M x N x 2
wh: M x N x 2
overlap: M x N x 1
union: M x N x 1
ious: M x N x 1
Total memory:
S = (9 x N x M + N + M) * 4 Byte,
When using FP16, we can reduce:
R = (9 x N x M + N + M) * 4 / 2 Byte
R large than (N + M) * 4 * 2 is always true when N and M >= 1.
Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2,
N + 1 < 3 * N, when N or M is 1.
Given M = 40 (ground truth), N = 400000 (three anchor boxes
in per grid, FPN, R-CNNs),
R = 275 MB (one times)
A special case (dense detection), M = 512 (ground truth),
R = 3516 MB = 3.43 GB
When the batch size is B, reduce:
B x R
Therefore, CUDA memory runs out frequently.
Experiments on GeForce RTX 2080Ti (11019 MiB):
| dtype | M | N | Use | Real | Ideal |
|:----:|:----:|:----:|:----:|:----:|:----:|
| FP32 | 512 | 400000 | 8020 MiB | -- | -- |
| FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB |
| FP32 | 40 | 400000 | 1540 MiB | -- | -- |
| FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB |
2) is_aligned is True
area1: N x 1
area2: N x 1
lt: N x 2
rb: N x 2
wh: N x 2
overlap: N x 1
union: N x 1
ious: N x 1
Total memory:
S = 11 x N * 4 Byte
When using FP16, we can reduce:
R = 11 x N * 4 / 2 Byte
So do the 'giou' (large than 'iou').
Time-wise, FP16 is generally faster than FP32.
When gpu_assign_thr is not -1, it takes more time on cpu
but not reduce memory.
There, we can reduce half the memory and keep the speed.
If ``is_aligned`` is ``False``, then calculate the overlaps between each
bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
pair of bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.
bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.
B indicates the batch dim, in shape (B1, B2, ..., Bn).
If ``is_aligned`` is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union), "iof" (intersection over
foreground) or "giou" (generalized intersection over union).
Default "iou".
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
eps (float, optional): A value added to the denominator for numerical
stability. Default 1e-6.
Returns:
Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,)
Example:
>>> bboxes1 = torch.FloatTensor([
>>> [0, 0, 10, 10],
>>> [10, 10, 20, 20],
>>> [32, 32, 38, 42],
>>> ])
>>> bboxes2 = torch.FloatTensor([
>>> [0, 0, 10, 20],
>>> [0, 10, 10, 19],
>>> [10, 10, 20, 20],
>>> ])
>>> overlaps = bbox_overlaps(bboxes1, bboxes2)
>>> assert overlaps.shape == (3, 3)
>>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)
>>> assert overlaps.shape == (3, )
Example:
>>> empty = torch.empty(0, 4)
>>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])
>>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
>>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
>>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
"""
assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'
# Either the boxes are empty or the length of boxes' last dimension is 4
assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)
assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)
# Batch dim must be the same
# Batch dim: (B1, B2, ... Bn)
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
batch_shape = bboxes1.shape[:-2]
rows = bboxes1.size(-2)
cols = bboxes2.size(-2)
if is_aligned:
assert rows == cols
if rows * cols == 0:
if is_aligned:
return bboxes1.new(batch_shape + (rows, ))
else:
return bboxes1.new(batch_shape + (rows, cols))
area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (
bboxes1[..., 3] - bboxes1[..., 1])
area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (
bboxes2[..., 3] - bboxes2[..., 1])
if is_aligned:
lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2]
rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2]
wh = fp16_clamp(rb - lt, min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1 + area2 - overlap
else:
union = area1
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
else:
lt = torch.max(bboxes1[..., :, None, :2],
bboxes2[..., None, :, :2]) # [B, rows, cols, 2]
rb = torch.min(bboxes1[..., :, None, 2:],
bboxes2[..., None, :, 2:]) # [B, rows, cols, 2]
wh = fp16_clamp(rb - lt, min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1[..., None] + area2[..., None, :] - overlap
else:
union = area1[..., None]
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :, None, :2],
bboxes2[..., None, :, :2])
enclosed_rb = torch.max(bboxes1[..., :, None, 2:],
bboxes2[..., None, :, 2:])
eps = union.new_tensor([eps])
union = torch.max(union, eps)
ious = overlap / union
if mode in ['iou', 'iof']:
return ious
# calculate gious
enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
enclose_area = torch.max(enclose_area, eps)
gious = ious - (enclose_area - union) / enclose_area
return gious
| 9,678 | 35.942748 | 78 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from ..builder import BBOX_SAMPLERS
from .random_sampler import RandomSampler
@BBOX_SAMPLERS.register_module()
class InstanceBalancedPosSampler(RandomSampler):
"""Instance balanced sampler that samples equal number of positive samples
for each instance."""
def _sample_pos(self, assign_result, num_expected, **kwargs):
"""Sample positive boxes.
Args:
assign_result (:obj:`AssignResult`): The assigned results of boxes.
num_expected (int): The number of expected positive samples
Returns:
Tensor or ndarray: sampled indices.
"""
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
unique_gt_inds = assign_result.gt_inds[pos_inds].unique()
num_gts = len(unique_gt_inds)
num_per_gt = int(round(num_expected / float(num_gts)) + 1)
sampled_inds = []
for i in unique_gt_inds:
inds = torch.nonzero(
assign_result.gt_inds == i.item(), as_tuple=False)
if inds.numel() != 0:
inds = inds.squeeze(1)
else:
continue
if len(inds) > num_per_gt:
inds = self.random_choice(inds, num_per_gt)
sampled_inds.append(inds)
sampled_inds = torch.cat(sampled_inds)
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(
list(set(pos_inds.cpu()) - set(sampled_inds.cpu())))
if len(extra_inds) > num_extra:
extra_inds = self.random_choice(extra_inds, num_extra)
extra_inds = torch.from_numpy(extra_inds).to(
assign_result.gt_inds.device).long()
sampled_inds = torch.cat([sampled_inds, extra_inds])
elif len(sampled_inds) > num_expected:
sampled_inds = self.random_choice(sampled_inds, num_expected)
return sampled_inds
| 2,319 | 39.701754 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/samplers/base_sampler.py | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import torch
from .sampling_result import SamplingResult
class BaseSampler(metaclass=ABCMeta):
"""Base class of samplers."""
def __init__(self,
num,
pos_fraction,
neg_pos_ub=-1,
add_gt_as_proposals=True,
**kwargs):
self.num = num
self.pos_fraction = pos_fraction
self.neg_pos_ub = neg_pos_ub
self.add_gt_as_proposals = add_gt_as_proposals
self.pos_sampler = self
self.neg_sampler = self
@abstractmethod
def _sample_pos(self, assign_result, num_expected, **kwargs):
"""Sample positive samples."""
pass
@abstractmethod
def _sample_neg(self, assign_result, num_expected, **kwargs):
"""Sample negative samples."""
pass
def sample(self,
assign_result,
bboxes,
gt_bboxes,
gt_labels=None,
**kwargs):
"""Sample positive and negative bboxes.
This is a simple implementation of bbox sampling given candidates,
assigning results and ground truth bboxes.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
bboxes (Tensor): Boxes to be sampled from.
gt_bboxes (Tensor): Ground truth bboxes.
gt_labels (Tensor, optional): Class labels of ground truth bboxes.
Returns:
:obj:`SamplingResult`: Sampling result.
Example:
>>> from mmdet.core.bbox import RandomSampler
>>> from mmdet.core.bbox import AssignResult
>>> from mmdet.core.bbox.demodata import ensure_rng, random_boxes
>>> rng = ensure_rng(None)
>>> assign_result = AssignResult.random(rng=rng)
>>> bboxes = random_boxes(assign_result.num_preds, rng=rng)
>>> gt_bboxes = random_boxes(assign_result.num_gts, rng=rng)
>>> gt_labels = None
>>> self = RandomSampler(num=32, pos_fraction=0.5, neg_pos_ub=-1,
>>> add_gt_as_proposals=False)
>>> self = self.sample(assign_result, bboxes, gt_bboxes, gt_labels)
"""
if len(bboxes.shape) < 2:
bboxes = bboxes[None, :]
bboxes = bboxes[:, :4]
gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8)
if self.add_gt_as_proposals and len(gt_bboxes) > 0:
if gt_labels is None:
raise ValueError(
'gt_labels must be given when add_gt_as_proposals is True')
bboxes = torch.cat([gt_bboxes, bboxes], dim=0)
assign_result.add_gt_(gt_labels)
gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)
gt_flags = torch.cat([gt_ones, gt_flags])
num_expected_pos = int(self.num * self.pos_fraction)
pos_inds = self.pos_sampler._sample_pos(
assign_result, num_expected_pos, bboxes=bboxes, **kwargs)
# We found that sampled indices have duplicated items occasionally.
# (may be a bug of PyTorch)
pos_inds = pos_inds.unique()
num_sampled_pos = pos_inds.numel()
num_expected_neg = self.num - num_sampled_pos
if self.neg_pos_ub >= 0:
_pos = max(1, num_sampled_pos)
neg_upper_bound = int(self.neg_pos_ub * _pos)
if num_expected_neg > neg_upper_bound:
num_expected_neg = neg_upper_bound
neg_inds = self.neg_sampler._sample_neg(
assign_result, num_expected_neg, bboxes=bboxes, **kwargs)
neg_inds = neg_inds.unique()
sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
assign_result, gt_flags)
return sampling_result
| 3,920 | 37.067961 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/samplers/random_sampler.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from ..builder import BBOX_SAMPLERS
from .base_sampler import BaseSampler
@BBOX_SAMPLERS.register_module()
class RandomSampler(BaseSampler):
"""Random sampler.
Args:
num (int): Number of samples
pos_fraction (float): Fraction of positive samples
neg_pos_up (int, optional): Upper bound number of negative and
positive samples. Defaults to -1.
add_gt_as_proposals (bool, optional): Whether to add ground truth
boxes as proposals. Defaults to True.
"""
def __init__(self,
num,
pos_fraction,
neg_pos_ub=-1,
add_gt_as_proposals=True,
**kwargs):
from mmdet.core.bbox import demodata
super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub,
add_gt_as_proposals)
self.rng = demodata.ensure_rng(kwargs.get('rng', None))
def random_choice(self, gallery, num):
"""Random select some elements from the gallery.
If `gallery` is a Tensor, the returned indices will be a Tensor;
If `gallery` is a ndarray or list, the returned indices will be a
ndarray.
Args:
gallery (Tensor | ndarray | list): indices pool.
num (int): expected sample num.
Returns:
Tensor or ndarray: sampled indices.
"""
assert len(gallery) >= num
is_tensor = isinstance(gallery, torch.Tensor)
if not is_tensor:
if torch.cuda.is_available():
device = torch.cuda.current_device()
else:
device = 'cpu'
gallery = torch.tensor(gallery, dtype=torch.long, device=device)
# This is a temporary fix. We can revert the following code
# when PyTorch fixes the abnormal return of torch.randperm.
# See: https://github.com/open-mmlab/mmdetection/pull/5014
perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device)
rand_inds = gallery[perm]
if not is_tensor:
rand_inds = rand_inds.cpu().numpy()
return rand_inds
def _sample_pos(self, assign_result, num_expected, **kwargs):
"""Randomly sample some positive samples."""
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.random_choice(pos_inds, num_expected)
def _sample_neg(self, assign_result, num_expected, **kwargs):
"""Randomly sample some negative samples."""
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
return self.random_choice(neg_inds, num_expected)
| 3,071 | 36.012048 | 78 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/samplers/ohem_sampler.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from ..builder import BBOX_SAMPLERS
from ..transforms import bbox2roi
from .base_sampler import BaseSampler
@BBOX_SAMPLERS.register_module()
class OHEMSampler(BaseSampler):
r"""Online Hard Example Mining Sampler described in `Training Region-based
Object Detectors with Online Hard Example Mining
<https://arxiv.org/abs/1604.03540>`_.
"""
def __init__(self,
num,
pos_fraction,
context,
neg_pos_ub=-1,
add_gt_as_proposals=True,
loss_key='loss_cls',
**kwargs):
super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub,
add_gt_as_proposals)
self.context = context
if not hasattr(self.context, 'num_stages'):
self.bbox_head = self.context.bbox_head
else:
self.bbox_head = self.context.bbox_head[self.context.current_stage]
self.loss_key = loss_key
def hard_mining(self, inds, num_expected, bboxes, labels, feats):
with torch.no_grad():
rois = bbox2roi([bboxes])
if not hasattr(self.context, 'num_stages'):
bbox_results = self.context._bbox_forward(feats, rois)
else:
bbox_results = self.context._bbox_forward(
self.context.current_stage, feats, rois)
cls_score = bbox_results['cls_score']
loss = self.bbox_head.loss(
cls_score=cls_score,
bbox_pred=None,
rois=rois,
labels=labels,
label_weights=cls_score.new_ones(cls_score.size(0)),
bbox_targets=None,
bbox_weights=None,
reduction_override='none')[self.loss_key]
_, topk_loss_inds = loss.topk(num_expected)
return inds[topk_loss_inds]
def _sample_pos(self,
assign_result,
num_expected,
bboxes=None,
feats=None,
**kwargs):
"""Sample positive boxes.
Args:
assign_result (:obj:`AssignResult`): Assigned results
num_expected (int): Number of expected positive samples
bboxes (torch.Tensor, optional): Boxes. Defaults to None.
feats (list[torch.Tensor], optional): Multi-level features.
Defaults to None.
Returns:
torch.Tensor: Indices of positive samples
"""
# Sample some hard positive samples
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds],
assign_result.labels[pos_inds], feats)
def _sample_neg(self,
assign_result,
num_expected,
bboxes=None,
feats=None,
**kwargs):
"""Sample negative boxes.
Args:
assign_result (:obj:`AssignResult`): Assigned results
num_expected (int): Number of expected negative samples
bboxes (torch.Tensor, optional): Boxes. Defaults to None.
feats (list[torch.Tensor], optional): Multi-level features.
Defaults to None.
Returns:
torch.Tensor: Indices of negative samples
"""
# Sample some hard negative samples
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
neg_labels = assign_result.labels.new_empty(
neg_inds.size(0)).fill_(self.bbox_head.num_classes)
return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds],
neg_labels, feats)
| 4,221 | 36.696429 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from ..builder import BBOX_SAMPLERS
from .random_sampler import RandomSampler
@BBOX_SAMPLERS.register_module()
class IoUBalancedNegSampler(RandomSampler):
"""IoU Balanced Sampling.
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
Sampling proposals according to their IoU. `floor_fraction` of needed RoIs
are sampled from proposals whose IoU are lower than `floor_thr` randomly.
The others are sampled from proposals whose IoU are higher than
`floor_thr`. These proposals are sampled from some bins evenly, which are
split by `num_bins` via IoU evenly.
Args:
num (int): number of proposals.
pos_fraction (float): fraction of positive proposals.
floor_thr (float): threshold (minimum) IoU for IoU balanced sampling,
set to -1 if all using IoU balanced sampling.
floor_fraction (float): sampling fraction of proposals under floor_thr.
num_bins (int): number of bins in IoU balanced sampling.
"""
def __init__(self,
num,
pos_fraction,
floor_thr=-1,
floor_fraction=0,
num_bins=3,
**kwargs):
super(IoUBalancedNegSampler, self).__init__(num, pos_fraction,
**kwargs)
assert floor_thr >= 0 or floor_thr == -1
assert 0 <= floor_fraction <= 1
assert num_bins >= 1
self.floor_thr = floor_thr
self.floor_fraction = floor_fraction
self.num_bins = num_bins
def sample_via_interval(self, max_overlaps, full_set, num_expected):
"""Sample according to the iou interval.
Args:
max_overlaps (torch.Tensor): IoU between bounding boxes and ground
truth boxes.
full_set (set(int)): A full set of indices of boxes。
num_expected (int): Number of expected samples。
Returns:
np.ndarray: Indices of samples
"""
max_iou = max_overlaps.max()
iou_interval = (max_iou - self.floor_thr) / self.num_bins
per_num_expected = int(num_expected / self.num_bins)
sampled_inds = []
for i in range(self.num_bins):
start_iou = self.floor_thr + i * iou_interval
end_iou = self.floor_thr + (i + 1) * iou_interval
tmp_set = set(
np.where(
np.logical_and(max_overlaps >= start_iou,
max_overlaps < end_iou))[0])
tmp_inds = list(tmp_set & full_set)
if len(tmp_inds) > per_num_expected:
tmp_sampled_set = self.random_choice(tmp_inds,
per_num_expected)
else:
tmp_sampled_set = np.array(tmp_inds, dtype=np.int)
sampled_inds.append(tmp_sampled_set)
sampled_inds = np.concatenate(sampled_inds)
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(list(full_set - set(sampled_inds)))
if len(extra_inds) > num_extra:
extra_inds = self.random_choice(extra_inds, num_extra)
sampled_inds = np.concatenate([sampled_inds, extra_inds])
return sampled_inds
def _sample_neg(self, assign_result, num_expected, **kwargs):
"""Sample negative boxes.
Args:
assign_result (:obj:`AssignResult`): The assigned results of boxes.
num_expected (int): The number of expected negative samples
Returns:
Tensor or ndarray: sampled indices.
"""
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
max_overlaps = assign_result.max_overlaps.cpu().numpy()
# balance sampling for negative samples
neg_set = set(neg_inds.cpu().numpy())
if self.floor_thr > 0:
floor_set = set(
np.where(
np.logical_and(max_overlaps >= 0,
max_overlaps < self.floor_thr))[0])
iou_sampling_set = set(
np.where(max_overlaps >= self.floor_thr)[0])
elif self.floor_thr == 0:
floor_set = set(np.where(max_overlaps == 0)[0])
iou_sampling_set = set(
np.where(max_overlaps > self.floor_thr)[0])
else:
floor_set = set()
iou_sampling_set = set(
np.where(max_overlaps > self.floor_thr)[0])
# for sampling interval calculation
self.floor_thr = 0
floor_neg_inds = list(floor_set & neg_set)
iou_sampling_neg_inds = list(iou_sampling_set & neg_set)
num_expected_iou_sampling = int(num_expected *
(1 - self.floor_fraction))
if len(iou_sampling_neg_inds) > num_expected_iou_sampling:
if self.num_bins >= 2:
iou_sampled_inds = self.sample_via_interval(
max_overlaps, set(iou_sampling_neg_inds),
num_expected_iou_sampling)
else:
iou_sampled_inds = self.random_choice(
iou_sampling_neg_inds, num_expected_iou_sampling)
else:
iou_sampled_inds = np.array(
iou_sampling_neg_inds, dtype=np.int)
num_expected_floor = num_expected - len(iou_sampled_inds)
if len(floor_neg_inds) > num_expected_floor:
sampled_floor_inds = self.random_choice(
floor_neg_inds, num_expected_floor)
else:
sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int)
sampled_inds = np.concatenate(
(sampled_floor_inds, iou_sampled_inds))
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(list(neg_set - set(sampled_inds)))
if len(extra_inds) > num_extra:
extra_inds = self.random_choice(extra_inds, num_extra)
sampled_inds = np.concatenate((sampled_inds, extra_inds))
sampled_inds = torch.from_numpy(sampled_inds).long().to(
assign_result.gt_inds.device)
return sampled_inds
| 6,740 | 41.396226 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/samplers/score_hlr_sampler.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.ops import nms_match
from ..builder import BBOX_SAMPLERS
from ..transforms import bbox2roi
from .base_sampler import BaseSampler
from .sampling_result import SamplingResult
@BBOX_SAMPLERS.register_module()
class ScoreHLRSampler(BaseSampler):
r"""Importance-based Sample Reweighting (ISR_N), described in `Prime Sample
Attention in Object Detection <https://arxiv.org/abs/1904.04821>`_.
Score hierarchical local rank (HLR) differentiates with RandomSampler in
negative part. It firstly computes Score-HLR in a two-step way,
then linearly maps score hlr to the loss weights.
Args:
num (int): Total number of sampled RoIs.
pos_fraction (float): Fraction of positive samples.
context (:class:`BaseRoIHead`): RoI head that the sampler belongs to.
neg_pos_ub (int): Upper bound of the ratio of num negative to num
positive, -1 means no upper bound.
add_gt_as_proposals (bool): Whether to add ground truth as proposals.
k (float): Power of the non-linear mapping.
bias (float): Shift of the non-linear mapping.
score_thr (float): Minimum score that a negative sample is to be
considered as valid bbox.
"""
def __init__(self,
num,
pos_fraction,
context,
neg_pos_ub=-1,
add_gt_as_proposals=True,
k=0.5,
bias=0,
score_thr=0.05,
iou_thr=0.5,
**kwargs):
super().__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals)
self.k = k
self.bias = bias
self.score_thr = score_thr
self.iou_thr = iou_thr
self.context = context
# context of cascade detectors is a list, so distinguish them here.
if not hasattr(context, 'num_stages'):
self.bbox_roi_extractor = context.bbox_roi_extractor
self.bbox_head = context.bbox_head
self.with_shared_head = context.with_shared_head
if self.with_shared_head:
self.shared_head = context.shared_head
else:
self.bbox_roi_extractor = context.bbox_roi_extractor[
context.current_stage]
self.bbox_head = context.bbox_head[context.current_stage]
@staticmethod
def random_choice(gallery, num):
"""Randomly select some elements from the gallery.
If `gallery` is a Tensor, the returned indices will be a Tensor;
If `gallery` is a ndarray or list, the returned indices will be a
ndarray.
Args:
gallery (Tensor | ndarray | list): indices pool.
num (int): expected sample num.
Returns:
Tensor or ndarray: sampled indices.
"""
assert len(gallery) >= num
is_tensor = isinstance(gallery, torch.Tensor)
if not is_tensor:
if torch.cuda.is_available():
device = torch.cuda.current_device()
else:
device = 'cpu'
gallery = torch.tensor(gallery, dtype=torch.long, device=device)
perm = torch.randperm(gallery.numel(), device=gallery.device)[:num]
rand_inds = gallery[perm]
if not is_tensor:
rand_inds = rand_inds.cpu().numpy()
return rand_inds
def _sample_pos(self, assign_result, num_expected, **kwargs):
"""Randomly sample some positive samples."""
pos_inds = torch.nonzero(assign_result.gt_inds > 0).flatten()
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.random_choice(pos_inds, num_expected)
def _sample_neg(self,
assign_result,
num_expected,
bboxes,
feats=None,
img_meta=None,
**kwargs):
"""Sample negative samples.
Score-HLR sampler is done in the following steps:
1. Take the maximum positive score prediction of each negative samples
as s_i.
2. Filter out negative samples whose s_i <= score_thr, the left samples
are called valid samples.
3. Use NMS-Match to divide valid samples into different groups,
samples in the same group will greatly overlap with each other
4. Rank the matched samples in two-steps to get Score-HLR.
(1) In the same group, rank samples with their scores.
(2) In the same score rank across different groups,
rank samples with their scores again.
5. Linearly map Score-HLR to the final label weights.
Args:
assign_result (:obj:`AssignResult`): result of assigner.
num_expected (int): Expected number of samples.
bboxes (Tensor): bbox to be sampled.
feats (Tensor): Features come from FPN.
img_meta (dict): Meta information dictionary.
"""
neg_inds = torch.nonzero(assign_result.gt_inds == 0).flatten()
num_neg = neg_inds.size(0)
if num_neg == 0:
return neg_inds, None
with torch.no_grad():
neg_bboxes = bboxes[neg_inds]
neg_rois = bbox2roi([neg_bboxes])
bbox_result = self.context._bbox_forward(feats, neg_rois)
cls_score, bbox_pred = bbox_result['cls_score'], bbox_result[
'bbox_pred']
ori_loss = self.bbox_head.loss(
cls_score=cls_score,
bbox_pred=None,
rois=None,
labels=neg_inds.new_full((num_neg, ),
self.bbox_head.num_classes),
label_weights=cls_score.new_ones(num_neg),
bbox_targets=None,
bbox_weights=None,
reduction_override='none')['loss_cls']
# filter out samples with the max score lower than score_thr
max_score, argmax_score = cls_score.softmax(-1)[:, :-1].max(-1)
valid_inds = (max_score > self.score_thr).nonzero().view(-1)
invalid_inds = (max_score <= self.score_thr).nonzero().view(-1)
num_valid = valid_inds.size(0)
num_invalid = invalid_inds.size(0)
num_expected = min(num_neg, num_expected)
num_hlr = min(num_valid, num_expected)
num_rand = num_expected - num_hlr
if num_valid > 0:
valid_rois = neg_rois[valid_inds]
valid_max_score = max_score[valid_inds]
valid_argmax_score = argmax_score[valid_inds]
valid_bbox_pred = bbox_pred[valid_inds]
# valid_bbox_pred shape: [num_valid, #num_classes, 4]
valid_bbox_pred = valid_bbox_pred.view(
valid_bbox_pred.size(0), -1, 4)
selected_bbox_pred = valid_bbox_pred[range(num_valid),
valid_argmax_score]
pred_bboxes = self.bbox_head.bbox_coder.decode(
valid_rois[:, 1:], selected_bbox_pred)
pred_bboxes_with_score = torch.cat(
[pred_bboxes, valid_max_score[:, None]], -1)
group = nms_match(pred_bboxes_with_score, self.iou_thr)
# imp: importance
imp = cls_score.new_zeros(num_valid)
for g in group:
g_score = valid_max_score[g]
# g_score has already sorted
rank = g_score.new_tensor(range(g_score.size(0)))
imp[g] = num_valid - rank + g_score
_, imp_rank_inds = imp.sort(descending=True)
_, imp_rank = imp_rank_inds.sort()
hlr_inds = imp_rank_inds[:num_expected]
if num_rand > 0:
rand_inds = torch.randperm(num_invalid)[:num_rand]
select_inds = torch.cat(
[valid_inds[hlr_inds], invalid_inds[rand_inds]])
else:
select_inds = valid_inds[hlr_inds]
neg_label_weights = cls_score.new_ones(num_expected)
up_bound = max(num_expected, num_valid)
imp_weights = (up_bound -
imp_rank[hlr_inds].float()) / up_bound
neg_label_weights[:num_hlr] = imp_weights
neg_label_weights[num_hlr:] = imp_weights.min()
neg_label_weights = (self.bias +
(1 - self.bias) * neg_label_weights).pow(
self.k)
ori_selected_loss = ori_loss[select_inds]
new_loss = ori_selected_loss * neg_label_weights
norm_ratio = ori_selected_loss.sum() / new_loss.sum()
neg_label_weights *= norm_ratio
else:
neg_label_weights = cls_score.new_ones(num_expected)
select_inds = torch.randperm(num_neg)[:num_expected]
return neg_inds[select_inds], neg_label_weights
def sample(self,
assign_result,
bboxes,
gt_bboxes,
gt_labels=None,
img_meta=None,
**kwargs):
"""Sample positive and negative bboxes.
This is a simple implementation of bbox sampling given candidates,
assigning results and ground truth bboxes.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
bboxes (Tensor): Boxes to be sampled from.
gt_bboxes (Tensor): Ground truth bboxes.
gt_labels (Tensor, optional): Class labels of ground truth bboxes.
Returns:
tuple[:obj:`SamplingResult`, Tensor]: Sampling result and negative
label weights.
"""
bboxes = bboxes[:, :4]
gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8)
if self.add_gt_as_proposals:
bboxes = torch.cat([gt_bboxes, bboxes], dim=0)
assign_result.add_gt_(gt_labels)
gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)
gt_flags = torch.cat([gt_ones, gt_flags])
num_expected_pos = int(self.num * self.pos_fraction)
pos_inds = self.pos_sampler._sample_pos(
assign_result, num_expected_pos, bboxes=bboxes, **kwargs)
num_sampled_pos = pos_inds.numel()
num_expected_neg = self.num - num_sampled_pos
if self.neg_pos_ub >= 0:
_pos = max(1, num_sampled_pos)
neg_upper_bound = int(self.neg_pos_ub * _pos)
if num_expected_neg > neg_upper_bound:
num_expected_neg = neg_upper_bound
neg_inds, neg_label_weights = self.neg_sampler._sample_neg(
assign_result,
num_expected_neg,
bboxes,
img_meta=img_meta,
**kwargs)
return SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
assign_result, gt_flags), neg_label_weights
| 11,235 | 41.240602 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/samplers/sampling_result.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.utils import util_mixins
class SamplingResult(util_mixins.NiceRepr):
"""Bbox sampling result.
Example:
>>> # xdoctest: +IGNORE_WANT
>>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA
>>> self = SamplingResult.random(rng=10)
>>> print(f'self = {self}')
self = <SamplingResult({
'neg_bboxes': torch.Size([12, 4]),
'neg_inds': tensor([ 0, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12]),
'num_gts': 4,
'pos_assigned_gt_inds': tensor([], dtype=torch.int64),
'pos_bboxes': torch.Size([0, 4]),
'pos_inds': tensor([], dtype=torch.int64),
'pos_is_gt': tensor([], dtype=torch.uint8)
})>
"""
def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result,
gt_flags):
self.pos_inds = pos_inds
self.neg_inds = neg_inds
self.pos_bboxes = bboxes[pos_inds]
self.neg_bboxes = bboxes[neg_inds]
self.pos_is_gt = gt_flags[pos_inds]
self.num_gts = gt_bboxes.shape[0]
self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1
if gt_bboxes.numel() == 0:
# hack for index error case
assert self.pos_assigned_gt_inds.numel() == 0
self.pos_gt_bboxes = torch.empty_like(gt_bboxes).view(-1, 4)
else:
if len(gt_bboxes.shape) < 2:
gt_bboxes = gt_bboxes.view(-1, 4)
self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds, :]
if assign_result.labels is not None:
self.pos_gt_labels = assign_result.labels[pos_inds]
else:
self.pos_gt_labels = None
@property
def bboxes(self):
"""torch.Tensor: concatenated positive and negative boxes"""
return torch.cat([self.pos_bboxes, self.neg_bboxes])
def to(self, device):
"""Change the device of the data inplace.
Example:
>>> self = SamplingResult.random()
>>> print(f'self = {self.to(None)}')
>>> # xdoctest: +REQUIRES(--gpu)
>>> print(f'self = {self.to(0)}')
"""
_dict = self.__dict__
for key, value in _dict.items():
if isinstance(value, torch.Tensor):
_dict[key] = value.to(device)
return self
def __nice__(self):
data = self.info.copy()
data['pos_bboxes'] = data.pop('pos_bboxes').shape
data['neg_bboxes'] = data.pop('neg_bboxes').shape
parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())]
body = ' ' + ',\n '.join(parts)
return '{\n' + body + '\n}'
@property
def info(self):
"""Returns a dictionary of info about the object."""
return {
'pos_inds': self.pos_inds,
'neg_inds': self.neg_inds,
'pos_bboxes': self.pos_bboxes,
'neg_bboxes': self.neg_bboxes,
'pos_is_gt': self.pos_is_gt,
'num_gts': self.num_gts,
'pos_assigned_gt_inds': self.pos_assigned_gt_inds,
}
@classmethod
def random(cls, rng=None, **kwargs):
"""
Args:
rng (None | int | numpy.random.RandomState): seed or state.
kwargs (keyword arguments):
- num_preds: number of predicted boxes
- num_gts: number of true boxes
- p_ignore (float): probability of a predicted box assigned to \
an ignored truth.
- p_assigned (float): probability of a predicted box not being \
assigned.
- p_use_label (float | bool): with labels or not.
Returns:
:obj:`SamplingResult`: Randomly generated sampling result.
Example:
>>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA
>>> self = SamplingResult.random()
>>> print(self.__dict__)
"""
from mmdet.core.bbox.samplers.random_sampler import RandomSampler
from mmdet.core.bbox.assigners.assign_result import AssignResult
from mmdet.core.bbox import demodata
rng = demodata.ensure_rng(rng)
# make probabalistic?
num = 32
pos_fraction = 0.5
neg_pos_ub = -1
assign_result = AssignResult.random(rng=rng, **kwargs)
# Note we could just compute an assignment
bboxes = demodata.random_boxes(assign_result.num_preds, rng=rng)
gt_bboxes = demodata.random_boxes(assign_result.num_gts, rng=rng)
if rng.rand() > 0.2:
# sometimes algorithms squeeze their data, be robust to that
gt_bboxes = gt_bboxes.squeeze()
bboxes = bboxes.squeeze()
if assign_result.labels is None:
gt_labels = None
else:
gt_labels = None # todo
if gt_labels is None:
add_gt_as_proposals = False
else:
add_gt_as_proposals = True # make probabalistic?
sampler = RandomSampler(
num,
pos_fraction,
neg_pos_ub=neg_pos_ub,
add_gt_as_proposals=add_gt_as_proposals,
rng=rng)
self = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
return self
| 5,382 | 33.954545 | 81 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/bbox/samplers/pseudo_sampler.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from ..builder import BBOX_SAMPLERS
from .base_sampler import BaseSampler
from .sampling_result import SamplingResult
@BBOX_SAMPLERS.register_module()
class PseudoSampler(BaseSampler):
"""A pseudo sampler that does not do sampling actually."""
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
"""Sample positive samples."""
raise NotImplementedError
def _sample_neg(self, **kwargs):
"""Sample negative samples."""
raise NotImplementedError
def sample(self, assign_result, bboxes, gt_bboxes, *args, **kwargs):
"""Directly returns the positive and negative indices of samples.
Args:
assign_result (:obj:`AssignResult`): Assigned results
bboxes (torch.Tensor): Bounding boxes
gt_bboxes (torch.Tensor): Ground truth boxes
Returns:
:obj:`SamplingResult`: sampler results
"""
pos_inds = torch.nonzero(
assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()
neg_inds = torch.nonzero(
assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()
gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8)
sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
assign_result, gt_flags)
return sampling_result
| 1,470 | 33.209302 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/data_structures/general_data.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import numpy as np
import torch
from mmdet.utils.util_mixins import NiceRepr
class GeneralData(NiceRepr):
"""A general data structure of OpenMMlab.
A data structure that stores the meta information,
the annotations of the images or the model predictions,
which can be used in communication between components.
The attributes in `GeneralData` are divided into two parts,
the `meta_info_fields` and the `data_fields` respectively.
- `meta_info_fields`: Usually contains the
information about the image such as filename,
image_shape, pad_shape, etc. All attributes in
it are immutable once set,
but the user can add new meta information with
`set_meta_info` function, all information can be accessed
with methods `meta_info_keys`, `meta_info_values`,
`meta_info_items`.
- `data_fields`: Annotations or model predictions are
stored. The attributes can be accessed or modified by
dict-like or object-like operations, such as
`.` , `[]`, `in`, `del`, `pop(str)` `get(str)`, `keys()`,
`values()`, `items()`. Users can also apply tensor-like methods
to all obj:`torch.Tensor` in the `data_fileds`,
such as `.cuda()`, `.cpu()`, `.numpy()`, `device`, `.to()`
`.detach()`, `.numpy()`
Args:
meta_info (dict, optional): A dict contains the meta information
of single image. such as `img_shape`, `scale_factor`, etc.
Default: None.
data (dict, optional): A dict contains annotations of single image or
model predictions. Default: None.
Examples:
>>> from mmdet.core import GeneralData
>>> img_meta = dict(img_shape=(800, 1196, 3), pad_shape=(800, 1216, 3))
>>> instance_data = GeneralData(meta_info=img_meta)
>>> img_shape in instance_data
True
>>> instance_data.det_labels = torch.LongTensor([0, 1, 2, 3])
>>> instance_data["det_scores"] = torch.Tensor([0.01, 0.1, 0.2, 0.3])
>>> print(results)
<GeneralData(
META INFORMATION
img_shape: (800, 1196, 3)
pad_shape: (800, 1216, 3)
DATA FIELDS
shape of det_labels: torch.Size([4])
shape of det_scores: torch.Size([4])
) at 0x7f84acd10f90>
>>> instance_data.det_scores
tensor([0.0100, 0.1000, 0.2000, 0.3000])
>>> instance_data.det_labels
tensor([0, 1, 2, 3])
>>> instance_data['det_labels']
tensor([0, 1, 2, 3])
>>> 'det_labels' in instance_data
True
>>> instance_data.img_shape
(800, 1196, 3)
>>> 'det_scores' in instance_data
True
>>> del instance_data.det_scores
>>> 'det_scores' in instance_data
False
>>> det_labels = instance_data.pop('det_labels', None)
>>> det_labels
tensor([0, 1, 2, 3])
>>> 'det_labels' in instance_data
>>> False
"""
def __init__(self, meta_info=None, data=None):
self._meta_info_fields = set()
self._data_fields = set()
if meta_info is not None:
self.set_meta_info(meta_info=meta_info)
if data is not None:
self.set_data(data)
def set_meta_info(self, meta_info):
"""Add meta information.
Args:
meta_info (dict): A dict contains the meta information
of image. such as `img_shape`, `scale_factor`, etc.
Default: None.
"""
assert isinstance(meta_info,
dict), f'meta should be a `dict` but get {meta_info}'
meta = copy.deepcopy(meta_info)
for k, v in meta.items():
# should be consistent with original meta_info
if k in self._meta_info_fields:
ori_value = getattr(self, k)
if isinstance(ori_value, (torch.Tensor, np.ndarray)):
if (ori_value == v).all():
continue
else:
raise KeyError(
f'img_meta_info {k} has been set as '
f'{getattr(self, k)} before, which is immutable ')
elif ori_value == v:
continue
else:
raise KeyError(
f'img_meta_info {k} has been set as '
f'{getattr(self, k)} before, which is immutable ')
else:
self._meta_info_fields.add(k)
self.__dict__[k] = v
def set_data(self, data):
"""Update a dict to `data_fields`.
Args:
data (dict): A dict contains annotations of image or
model predictions. Default: None.
"""
assert isinstance(data,
dict), f'meta should be a `dict` but get {data}'
for k, v in data.items():
self.__setattr__(k, v)
def new(self, meta_info=None, data=None):
"""Return a new results with same image meta information.
Args:
meta_info (dict, optional): A dict contains the meta information
of image. such as `img_shape`, `scale_factor`, etc.
Default: None.
data (dict, optional): A dict contains annotations of image or
model predictions. Default: None.
"""
new_data = self.__class__()
new_data.set_meta_info(dict(self.meta_info_items()))
if meta_info is not None:
new_data.set_meta_info(meta_info)
if data is not None:
new_data.set_data(data)
return new_data
def keys(self):
"""
Returns:
list: Contains all keys in data_fields.
"""
return [key for key in self._data_fields]
def meta_info_keys(self):
"""
Returns:
list: Contains all keys in meta_info_fields.
"""
return [key for key in self._meta_info_fields]
def values(self):
"""
Returns:
list: Contains all values in data_fields.
"""
return [getattr(self, k) for k in self.keys()]
def meta_info_values(self):
"""
Returns:
list: Contains all values in meta_info_fields.
"""
return [getattr(self, k) for k in self.meta_info_keys()]
def items(self):
for k in self.keys():
yield (k, getattr(self, k))
def meta_info_items(self):
for k in self.meta_info_keys():
yield (k, getattr(self, k))
def __setattr__(self, name, val):
if name in ('_meta_info_fields', '_data_fields'):
if not hasattr(self, name):
super().__setattr__(name, val)
else:
raise AttributeError(
f'{name} has been used as a '
f'private attribute, which is immutable. ')
else:
if name in self._meta_info_fields:
raise AttributeError(f'`{name}` is used in meta information,'
f'which is immutable')
self._data_fields.add(name)
super().__setattr__(name, val)
def __delattr__(self, item):
if item in ('_meta_info_fields', '_data_fields'):
raise AttributeError(f'{item} has been used as a '
f'private attribute, which is immutable. ')
if item in self._meta_info_fields:
raise KeyError(f'{item} is used in meta information, '
f'which is immutable.')
super().__delattr__(item)
if item in self._data_fields:
self._data_fields.remove(item)
# dict-like methods
__setitem__ = __setattr__
__delitem__ = __delattr__
def __getitem__(self, name):
return getattr(self, name)
def get(self, *args):
assert len(args) < 3, '`get` get more than 2 arguments'
return self.__dict__.get(*args)
def pop(self, *args):
assert len(args) < 3, '`pop` get more than 2 arguments'
name = args[0]
if name in self._meta_info_fields:
raise KeyError(f'{name} is a key in meta information, '
f'which is immutable')
if args[0] in self._data_fields:
self._data_fields.remove(args[0])
return self.__dict__.pop(*args)
# with default value
elif len(args) == 2:
return args[1]
else:
raise KeyError(f'{args[0]}')
def __contains__(self, item):
return item in self._data_fields or \
item in self._meta_info_fields
# Tensor-like methods
def to(self, *args, **kwargs):
"""Apply same name function to all tensors in data_fields."""
new_data = self.new()
for k, v in self.items():
if hasattr(v, 'to'):
v = v.to(*args, **kwargs)
new_data[k] = v
return new_data
# Tensor-like methods
def cpu(self):
"""Apply same name function to all tensors in data_fields."""
new_data = self.new()
for k, v in self.items():
if isinstance(v, torch.Tensor):
v = v.cpu()
new_data[k] = v
return new_data
# Tensor-like methods
def cuda(self):
"""Apply same name function to all tensors in data_fields."""
new_data = self.new()
for k, v in self.items():
if isinstance(v, torch.Tensor):
v = v.cuda()
new_data[k] = v
return new_data
# Tensor-like methods
def detach(self):
"""Apply same name function to all tensors in data_fields."""
new_data = self.new()
for k, v in self.items():
if isinstance(v, torch.Tensor):
v = v.detach()
new_data[k] = v
return new_data
# Tensor-like methods
def numpy(self):
"""Apply same name function to all tensors in data_fields."""
new_data = self.new()
for k, v in self.items():
if isinstance(v, torch.Tensor):
v = v.detach().cpu().numpy()
new_data[k] = v
return new_data
def __nice__(self):
repr = '\n \n META INFORMATION \n'
for k, v in self.meta_info_items():
repr += f'{k}: {v} \n'
repr += '\n DATA FIELDS \n'
for k, v in self.items():
if isinstance(v, (torch.Tensor, np.ndarray)):
repr += f'shape of {k}: {v.shape} \n'
else:
repr += f'{k}: {v} \n'
return repr + '\n'
| 10,806 | 33.091483 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/data_structures/instance_data.py | # Copyright (c) OpenMMLab. All rights reserved.
import itertools
import numpy as np
import torch
from .general_data import GeneralData
class InstanceData(GeneralData):
"""Data structure for instance-level annnotations or predictions.
Subclass of :class:`GeneralData`. All value in `data_fields`
should have the same length. This design refer to
https://github.com/facebookresearch/detectron2/blob/master/detectron2/structures/instances.py # noqa E501
Examples:
>>> from mmdet.core import InstanceData
>>> import numpy as np
>>> img_meta = dict(img_shape=(800, 1196, 3), pad_shape=(800, 1216, 3))
>>> results = InstanceData(img_meta)
>>> img_shape in results
True
>>> results.det_labels = torch.LongTensor([0, 1, 2, 3])
>>> results["det_scores"] = torch.Tensor([0.01, 0.7, 0.6, 0.3])
>>> results["det_masks"] = np.ndarray(4, 2, 2)
>>> len(results)
4
>>> print(resutls)
<InstanceData(
META INFORMATION
pad_shape: (800, 1216, 3)
img_shape: (800, 1196, 3)
PREDICTIONS
shape of det_labels: torch.Size([4])
shape of det_masks: (4, 2, 2)
shape of det_scores: torch.Size([4])
) at 0x7fe26b5ca990>
>>> sorted_results = results[results.det_scores.sort().indices]
>>> sorted_results.det_scores
tensor([0.0100, 0.3000, 0.6000, 0.7000])
>>> sorted_results.det_labels
tensor([0, 3, 2, 1])
>>> print(results[results.scores > 0.5])
<InstanceData(
META INFORMATION
pad_shape: (800, 1216, 3)
img_shape: (800, 1196, 3)
PREDICTIONS
shape of det_labels: torch.Size([2])
shape of det_masks: (2, 2, 2)
shape of det_scores: torch.Size([2])
) at 0x7fe26b6d7790>
>>> results[results.det_scores > 0.5].det_labels
tensor([1, 2])
>>> results[results.det_scores > 0.5].det_scores
tensor([0.7000, 0.6000])
"""
def __setattr__(self, name, value):
if name in ('_meta_info_fields', '_data_fields'):
if not hasattr(self, name):
super().__setattr__(name, value)
else:
raise AttributeError(
f'{name} has been used as a '
f'private attribute, which is immutable. ')
else:
assert isinstance(value, (torch.Tensor, np.ndarray, list)), \
f'Can set {type(value)}, only support' \
f' {(torch.Tensor, np.ndarray, list)}'
if self._data_fields:
assert len(value) == len(self), f'the length of ' \
f'values {len(value)} is ' \
f'not consistent with' \
f' the length ' \
f'of this :obj:`InstanceData` ' \
f'{len(self)} '
super().__setattr__(name, value)
def __getitem__(self, item):
"""
Args:
item (str, obj:`slice`,
obj`torch.LongTensor`, obj:`torch.BoolTensor`):
get the corresponding values according to item.
Returns:
obj:`InstanceData`: Corresponding values.
"""
assert len(self), ' This is a empty instance'
assert isinstance(
item, (str, slice, int, torch.LongTensor, torch.BoolTensor))
if isinstance(item, str):
return getattr(self, item)
if type(item) == int:
if item >= len(self) or item < -len(self):
raise IndexError(f'Index {item} out of range!')
else:
# keep the dimension
item = slice(item, None, len(self))
new_data = self.new()
if isinstance(item, (torch.Tensor)):
assert item.dim() == 1, 'Only support to get the' \
' values along the first dimension.'
if isinstance(item, torch.BoolTensor):
assert len(item) == len(self), f'The shape of the' \
f' input(BoolTensor)) ' \
f'{len(item)} ' \
f' does not match the shape ' \
f'of the indexed tensor ' \
f'in results_filed ' \
f'{len(self)} at ' \
f'first dimension. '
for k, v in self.items():
if isinstance(v, torch.Tensor):
new_data[k] = v[item]
elif isinstance(v, np.ndarray):
new_data[k] = v[item.cpu().numpy()]
elif isinstance(v, list):
r_list = []
# convert to indexes from boolTensor
if isinstance(item, torch.BoolTensor):
indexes = torch.nonzero(item).view(-1)
else:
indexes = item
for index in indexes:
r_list.append(v[index])
new_data[k] = r_list
else:
# item is a slice
for k, v in self.items():
new_data[k] = v[item]
return new_data
@staticmethod
def cat(instances_list):
"""Concat the predictions of all :obj:`InstanceData` in the list.
Args:
instances_list (list[:obj:`InstanceData`]): A list
of :obj:`InstanceData`.
Returns:
obj:`InstanceData`
"""
assert all(
isinstance(results, InstanceData) for results in instances_list)
assert len(instances_list) > 0
if len(instances_list) == 1:
return instances_list[0]
new_data = instances_list[0].new()
for k in instances_list[0]._data_fields:
values = [results[k] for results in instances_list]
v0 = values[0]
if isinstance(v0, torch.Tensor):
values = torch.cat(values, dim=0)
elif isinstance(v0, np.ndarray):
values = np.concatenate(values, axis=0)
elif isinstance(v0, list):
values = list(itertools.chain(*values))
else:
raise ValueError(
f'Can not concat the {k} which is a {type(v0)}')
new_data[k] = values
return new_data
def __len__(self):
if len(self._data_fields):
for v in self.values():
return len(v)
else:
raise AssertionError('This is an empty `InstanceData`.')
| 6,926 | 35.650794 | 109 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/utils/dist_utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import functools
import pickle
import warnings
from collections import OrderedDict
import torch
import torch.distributed as dist
from mmcv.runner import OptimizerHook, get_dist_info
from torch._utils import (_flatten_dense_tensors, _take_tensors,
_unflatten_dense_tensors)
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
if bucket_size_mb > 0:
bucket_size_bytes = bucket_size_mb * 1024 * 1024
buckets = _take_tensors(tensors, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(tensor)
buckets = buckets.values()
for bucket in buckets:
flat_tensors = _flatten_dense_tensors(bucket)
dist.all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for tensor, synced in zip(
bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
tensor.copy_(synced)
def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
"""Allreduce gradients.
Args:
params (list[torch.Parameters]): List of parameters of a model
coalesce (bool, optional): Whether allreduce parameters as a whole.
Defaults to True.
bucket_size_mb (int, optional): Size of bucket, the unit is MB.
Defaults to -1.
"""
grads = [
param.grad.data for param in params
if param.requires_grad and param.grad is not None
]
world_size = dist.get_world_size()
if coalesce:
_allreduce_coalesced(grads, world_size, bucket_size_mb)
else:
for tensor in grads:
dist.all_reduce(tensor.div_(world_size))
class DistOptimizerHook(OptimizerHook):
"""Deprecated optimizer hook for distributed training."""
def __init__(self, *args, **kwargs):
warnings.warn('"DistOptimizerHook" is deprecated, please switch to'
'"mmcv.runner.OptimizerHook".')
super().__init__(*args, **kwargs)
def reduce_mean(tensor):
""""Obtain the mean of tensor on different GPUs."""
if not (dist.is_available() and dist.is_initialized()):
return tensor
tensor = tensor.clone()
dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)
return tensor
def obj2tensor(pyobj, device='cuda'):
"""Serialize picklable python object to tensor."""
storage = torch.ByteStorage.from_buffer(pickle.dumps(pyobj))
return torch.ByteTensor(storage).to(device=device)
def tensor2obj(tensor):
"""Deserialize tensor to picklable python object."""
return pickle.loads(tensor.cpu().numpy().tobytes())
@functools.lru_cache()
def _get_global_gloo_group():
"""Return a process group based on gloo backend, containing all the ranks
The result is cached."""
if dist.get_backend() == 'nccl':
return dist.new_group(backend='gloo')
else:
return dist.group.WORLD
def all_reduce_dict(py_dict, op='sum', group=None, to_float=True):
"""Apply all reduce function for python dict object.
The code is modified from https://github.com/Megvii-
BaseDetection/YOLOX/blob/main/yolox/utils/allreduce_norm.py.
NOTE: make sure that py_dict in different ranks has the same keys and
the values should be in the same shape. Currently only supports
nccl backend.
Args:
py_dict (dict): Dict to be applied all reduce op.
op (str): Operator, could be 'sum' or 'mean'. Default: 'sum'
group (:obj:`torch.distributed.group`, optional): Distributed group,
Default: None.
to_float (bool): Whether to convert all values of dict to float.
Default: True.
Returns:
OrderedDict: reduced python dict object.
"""
warnings.warn(
'group` is deprecated. Currently only supports NCCL backend.')
_, world_size = get_dist_info()
if world_size == 1:
return py_dict
# all reduce logic across different devices.
py_key = list(py_dict.keys())
if not isinstance(py_dict, OrderedDict):
py_key_tensor = obj2tensor(py_key)
dist.broadcast(py_key_tensor, src=0)
py_key = tensor2obj(py_key_tensor)
tensor_shapes = [py_dict[k].shape for k in py_key]
tensor_numels = [py_dict[k].numel() for k in py_key]
if to_float:
warnings.warn('Note: the "to_float" is True, you need to '
'ensure that the behavior is reasonable.')
flatten_tensor = torch.cat(
[py_dict[k].flatten().float() for k in py_key])
else:
flatten_tensor = torch.cat([py_dict[k].flatten() for k in py_key])
dist.all_reduce(flatten_tensor, op=dist.ReduceOp.SUM)
if op == 'mean':
flatten_tensor /= world_size
split_tensors = [
x.reshape(shape) for x, shape in zip(
torch.split(flatten_tensor, tensor_numels), tensor_shapes)
]
out_dict = {k: v for k, v in zip(py_key, split_tensors)}
if isinstance(py_dict, OrderedDict):
out_dict = OrderedDict(out_dict)
return out_dict
| 5,201 | 32.779221 | 77 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/utils/misc.py | # Copyright (c) OpenMMLab. All rights reserved.
from functools import partial
import numpy as np
import torch
from six.moves import map, zip
from ..mask.structures import BitmapMasks, PolygonMasks
def multi_apply(func, *args, **kwargs):
"""Apply function to a list of arguments.
Note:
This function applies the ``func`` to multiple inputs and
map the multiple outputs of the ``func`` into different
list. Each list contains the same type of outputs corresponding
to different inputs.
Args:
func (Function): A function that will be applied to a list of
arguments
Returns:
tuple(list): A tuple containing multiple list, each list contains \
a kind of returned results by the function
"""
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
def unmap(data, count, inds, fill=0):
"""Unmap a subset of item (data) back to the original set of items (of size
count)"""
if data.dim() == 1:
ret = data.new_full((count, ), fill)
ret[inds.type(torch.bool)] = data
else:
new_size = (count, ) + data.size()[1:]
ret = data.new_full(new_size, fill)
ret[inds.type(torch.bool), :] = data
return ret
def mask2ndarray(mask):
"""Convert Mask to ndarray..
Args:
mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or
torch.Tensor or np.ndarray): The mask to be converted.
Returns:
np.ndarray: Ndarray mask of shape (n, h, w) that has been converted
"""
if isinstance(mask, (BitmapMasks, PolygonMasks)):
mask = mask.to_ndarray()
elif isinstance(mask, torch.Tensor):
mask = mask.detach().cpu().numpy()
elif not isinstance(mask, np.ndarray):
raise TypeError(f'Unsupported {type(mask)} data type')
return mask
def flip_tensor(src_tensor, flip_direction):
"""flip tensor base on flip_direction.
Args:
src_tensor (Tensor): input feature map, shape (B, C, H, W).
flip_direction (str): The flipping direction. Options are
'horizontal', 'vertical', 'diagonal'.
Returns:
out_tensor (Tensor): Flipped tensor.
"""
assert src_tensor.ndim == 4
valid_directions = ['horizontal', 'vertical', 'diagonal']
assert flip_direction in valid_directions
if flip_direction == 'horizontal':
out_tensor = torch.flip(src_tensor, [3])
elif flip_direction == 'vertical':
out_tensor = torch.flip(src_tensor, [2])
else:
out_tensor = torch.flip(src_tensor, [2, 3])
return out_tensor
def select_single_mlvl(mlvl_tensors, batch_id, detach=True):
"""Extract a multi-scale single image tensor from a multi-scale batch
tensor based on batch index.
Note: The default value of detach is True, because the proposal gradient
needs to be detached during the training of the two-stage model. E.g
Cascade Mask R-CNN.
Args:
mlvl_tensors (list[Tensor]): Batch tensor for all scale levels,
each is a 4D-tensor.
batch_id (int): Batch index.
detach (bool): Whether detach gradient. Default True.
Returns:
list[Tensor]: Multi-scale single image tensor.
"""
assert isinstance(mlvl_tensors, (list, tuple))
num_levels = len(mlvl_tensors)
if detach:
mlvl_tensor_list = [
mlvl_tensors[i][batch_id].detach() for i in range(num_levels)
]
else:
mlvl_tensor_list = [
mlvl_tensors[i][batch_id] for i in range(num_levels)
]
return mlvl_tensor_list
def filter_scores_and_topk(scores, score_thr, topk, results=None):
"""Filter results using score threshold and topk candidates.
Args:
scores (Tensor): The scores, shape (num_bboxes, K).
score_thr (float): The score filter threshold.
topk (int): The number of topk candidates.
results (dict or list or Tensor, Optional): The results to
which the filtering rule is to be applied. The shape
of each item is (num_bboxes, N).
Returns:
tuple: Filtered results
- scores (Tensor): The scores after being filtered, \
shape (num_bboxes_filtered, ).
- labels (Tensor): The class labels, shape \
(num_bboxes_filtered, ).
- anchor_idxs (Tensor): The anchor indexes, shape \
(num_bboxes_filtered, ).
- filtered_results (dict or list or Tensor, Optional): \
The filtered results. The shape of each item is \
(num_bboxes_filtered, N).
"""
valid_mask = scores > score_thr
scores = scores[valid_mask]
valid_idxs = torch.nonzero(valid_mask)
num_topk = min(topk, valid_idxs.size(0))
# torch.sort is actually faster than .topk (at least on GPUs)
scores, idxs = scores.sort(descending=True)
scores = scores[:num_topk]
topk_idxs = valid_idxs[idxs[:num_topk]]
keep_idxs, labels = topk_idxs.unbind(dim=1)
filtered_results = None
if results is not None:
if isinstance(results, dict):
filtered_results = {k: v[keep_idxs] for k, v in results.items()}
elif isinstance(results, list):
filtered_results = [result[keep_idxs] for result in results]
elif isinstance(results, torch.Tensor):
filtered_results = results[keep_idxs]
else:
raise NotImplementedError(f'Only supports dict or list or Tensor, '
f'but get {type(results)}.')
return scores, labels, keep_idxs, filtered_results
def center_of_mass(mask, esp=1e-6):
"""Calculate the centroid coordinates of the mask.
Args:
mask (Tensor): The mask to be calculated, shape (h, w).
esp (float): Avoid dividing by zero. Default: 1e-6.
Returns:
tuple[Tensor]: the coordinates of the center point of the mask.
- center_h (Tensor): the center point of the height.
- center_w (Tensor): the center point of the width.
"""
h, w = mask.shape
grid_h = torch.arange(h, device=mask.device)[:, None]
grid_w = torch.arange(w, device=mask.device)
normalizer = mask.sum().float().clamp(min=esp)
center_h = (mask * grid_h).sum() / normalizer
center_w = (mask * grid_w).sum() / normalizer
return center_h, center_w
def generate_coordinate(featmap_sizes, device='cuda'):
"""Generate the coordinate.
Args:
featmap_sizes (tuple): The feature to be calculated,
of shape (N, C, W, H).
device (str): The device where the feature will be put on.
Returns:
coord_feat (Tensor): The coordinate feature, of shape (N, 2, W, H).
"""
x_range = torch.linspace(-1, 1, featmap_sizes[-1], device=device)
y_range = torch.linspace(-1, 1, featmap_sizes[-2], device=device)
y, x = torch.meshgrid(y_range, x_range)
y = y.expand([featmap_sizes[0], 1, -1, -1])
x = x.expand([featmap_sizes[0], 1, -1, -1])
coord_feat = torch.cat([x, y], 1)
return coord_feat
| 7,147 | 33.200957 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/anchor/point_generator.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from torch.nn.modules.utils import _pair
from .builder import PRIOR_GENERATORS
@PRIOR_GENERATORS.register_module()
class PointGenerator:
def _meshgrid(self, x, y, row_major=True):
xx = x.repeat(len(y))
yy = y.view(-1, 1).repeat(1, len(x)).view(-1)
if row_major:
return xx, yy
else:
return yy, xx
def grid_points(self, featmap_size, stride=16, device='cuda'):
feat_h, feat_w = featmap_size
shift_x = torch.arange(0., feat_w, device=device) * stride
shift_y = torch.arange(0., feat_h, device=device) * stride
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
stride = shift_x.new_full((shift_xx.shape[0], ), stride)
shifts = torch.stack([shift_xx, shift_yy, stride], dim=-1)
all_points = shifts.to(device)
return all_points
def valid_flags(self, featmap_size, valid_size, device='cuda'):
feat_h, feat_w = featmap_size
valid_h, valid_w = valid_size
assert valid_h <= feat_h and valid_w <= feat_w
valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)
valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)
valid_x[:valid_w] = 1
valid_y[:valid_h] = 1
valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
valid = valid_xx & valid_yy
return valid
@PRIOR_GENERATORS.register_module()
class MlvlPointGenerator:
"""Standard points generator for multi-level (Mlvl) feature maps in 2D
points-based detectors.
Args:
strides (list[int] | list[tuple[int, int]]): Strides of anchors
in multiple feature levels in order (w, h).
offset (float): The offset of points, the value is normalized with
corresponding stride. Defaults to 0.5.
"""
def __init__(self, strides, offset=0.5):
self.strides = [_pair(stride) for stride in strides]
self.offset = offset
@property
def num_levels(self):
"""int: number of feature levels that the generator will be applied"""
return len(self.strides)
@property
def num_base_priors(self):
"""list[int]: The number of priors (points) at a point
on the feature grid"""
return [1 for _ in range(len(self.strides))]
def _meshgrid(self, x, y, row_major=True):
yy, xx = torch.meshgrid(y, x)
if row_major:
# warning .flatten() would cause error in ONNX exporting
# have to use reshape here
return xx.reshape(-1), yy.reshape(-1)
else:
return yy.reshape(-1), xx.reshape(-1)
def grid_priors(self,
featmap_sizes,
dtype=torch.float32,
device='cuda',
with_stride=False):
"""Generate grid points of multiple feature levels.
Args:
featmap_sizes (list[tuple]): List of feature map sizes in
multiple feature levels, each size arrange as
as (h, w).
dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32.
device (str): The device where the anchors will be put on.
with_stride (bool): Whether to concatenate the stride to
the last dimension of points.
Return:
list[torch.Tensor]: Points of multiple feature levels.
The sizes of each tensor should be (N, 2) when with stride is
``False``, where N = width * height, width and height
are the sizes of the corresponding feature level,
and the last dimension 2 represent (coord_x, coord_y),
otherwise the shape should be (N, 4),
and the last dimension 4 represent
(coord_x, coord_y, stride_w, stride_h).
"""
assert self.num_levels == len(featmap_sizes)
multi_level_priors = []
for i in range(self.num_levels):
priors = self.single_level_grid_priors(
featmap_sizes[i],
level_idx=i,
dtype=dtype,
device=device,
with_stride=with_stride)
multi_level_priors.append(priors)
return multi_level_priors
def single_level_grid_priors(self,
featmap_size,
level_idx,
dtype=torch.float32,
device='cuda',
with_stride=False):
"""Generate grid Points of a single level.
Note:
This function is usually called by method ``self.grid_priors``.
Args:
featmap_size (tuple[int]): Size of the feature maps, arrange as
(h, w).
level_idx (int): The index of corresponding feature map level.
dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32.
device (str, optional): The device the tensor will be put on.
Defaults to 'cuda'.
with_stride (bool): Concatenate the stride to the last dimension
of points.
Return:
Tensor: Points of single feature levels.
The shape of tensor should be (N, 2) when with stride is
``False``, where N = width * height, width and height
are the sizes of the corresponding feature level,
and the last dimension 2 represent (coord_x, coord_y),
otherwise the shape should be (N, 4),
and the last dimension 4 represent
(coord_x, coord_y, stride_w, stride_h).
"""
feat_h, feat_w = featmap_size
stride_w, stride_h = self.strides[level_idx]
shift_x = (torch.arange(0, feat_w, device=device) +
self.offset) * stride_w
# keep featmap_size as Tensor instead of int, so that we
# can convert to ONNX correctly
shift_x = shift_x.to(dtype)
shift_y = (torch.arange(0, feat_h, device=device) +
self.offset) * stride_h
# keep featmap_size as Tensor instead of int, so that we
# can convert to ONNX correctly
shift_y = shift_y.to(dtype)
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
if not with_stride:
shifts = torch.stack([shift_xx, shift_yy], dim=-1)
else:
# use `shape[0]` instead of `len(shift_xx)` for ONNX export
stride_w = shift_xx.new_full((shift_xx.shape[0], ),
stride_w).to(dtype)
stride_h = shift_xx.new_full((shift_yy.shape[0], ),
stride_h).to(dtype)
shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h],
dim=-1)
all_points = shifts.to(device)
return all_points
def valid_flags(self, featmap_sizes, pad_shape, device='cuda'):
"""Generate valid flags of points of multiple feature levels.
Args:
featmap_sizes (list(tuple)): List of feature map sizes in
multiple feature levels, each size arrange as
as (h, w).
pad_shape (tuple(int)): The padded shape of the image,
arrange as (h, w).
device (str): The device where the anchors will be put on.
Return:
list(torch.Tensor): Valid flags of points of multiple levels.
"""
assert self.num_levels == len(featmap_sizes)
multi_level_flags = []
for i in range(self.num_levels):
point_stride = self.strides[i]
feat_h, feat_w = featmap_sizes[i]
h, w = pad_shape[:2]
valid_feat_h = min(int(np.ceil(h / point_stride[1])), feat_h)
valid_feat_w = min(int(np.ceil(w / point_stride[0])), feat_w)
flags = self.single_level_valid_flags((feat_h, feat_w),
(valid_feat_h, valid_feat_w),
device=device)
multi_level_flags.append(flags)
return multi_level_flags
def single_level_valid_flags(self,
featmap_size,
valid_size,
device='cuda'):
"""Generate the valid flags of points of a single feature map.
Args:
featmap_size (tuple[int]): The size of feature maps, arrange as
as (h, w).
valid_size (tuple[int]): The valid size of the feature maps.
The size arrange as as (h, w).
device (str, optional): The device where the flags will be put on.
Defaults to 'cuda'.
Returns:
torch.Tensor: The valid flags of each points in a single level \
feature map.
"""
feat_h, feat_w = featmap_size
valid_h, valid_w = valid_size
assert valid_h <= feat_h and valid_w <= feat_w
valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)
valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)
valid_x[:valid_w] = 1
valid_y[:valid_h] = 1
valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
valid = valid_xx & valid_yy
return valid
def sparse_priors(self,
prior_idxs,
featmap_size,
level_idx,
dtype=torch.float32,
device='cuda'):
"""Generate sparse points according to the ``prior_idxs``.
Args:
prior_idxs (Tensor): The index of corresponding anchors
in the feature map.
featmap_size (tuple[int]): feature map size arrange as (w, h).
level_idx (int): The level index of corresponding feature
map.
dtype (obj:`torch.dtype`): Date type of points. Defaults to
``torch.float32``.
device (obj:`torch.device`): The device where the points is
located.
Returns:
Tensor: Anchor with shape (N, 2), N should be equal to
the length of ``prior_idxs``. And last dimension
2 represent (coord_x, coord_y).
"""
height, width = featmap_size
x = (prior_idxs % width + self.offset) * self.strides[level_idx][0]
y = ((prior_idxs // width) % height +
self.offset) * self.strides[level_idx][1]
prioris = torch.stack([x, y], 1).to(dtype)
prioris = prioris.to(device)
return prioris
| 10,739 | 39.681818 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/anchor/anchor_generator.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import mmcv
import numpy as np
import torch
from torch.nn.modules.utils import _pair
from .builder import PRIOR_GENERATORS
@PRIOR_GENERATORS.register_module()
class AnchorGenerator:
"""Standard anchor generator for 2D anchor-based detectors.
Args:
strides (list[int] | list[tuple[int, int]]): Strides of anchors
in multiple feature levels in order (w, h).
ratios (list[float]): The list of ratios between the height and width
of anchors in a single level.
scales (list[int] | None): Anchor scales for anchors in a single level.
It cannot be set at the same time if `octave_base_scale` and
`scales_per_octave` are set.
base_sizes (list[int] | None): The basic sizes
of anchors in multiple levels.
If None is given, strides will be used as base_sizes.
(If strides are non square, the shortest stride is taken.)
scale_major (bool): Whether to multiply scales first when generating
base anchors. If true, the anchors in the same row will have the
same scales. By default it is True in V2.0
octave_base_scale (int): The base scale of octave.
scales_per_octave (int): Number of scales for each octave.
`octave_base_scale` and `scales_per_octave` are usually used in
retinanet and the `scales` should be None when they are set.
centers (list[tuple[float, float]] | None): The centers of the anchor
relative to the feature grid center in multiple feature levels.
By default it is set to be None and not used. If a list of tuple of
float is given, they will be used to shift the centers of anchors.
center_offset (float): The offset of center in proportion to anchors'
width and height. By default it is 0 in V2.0.
Examples:
>>> from mmdet.core import AnchorGenerator
>>> self = AnchorGenerator([16], [1.], [1.], [9])
>>> all_anchors = self.grid_priors([(2, 2)], device='cpu')
>>> print(all_anchors)
[tensor([[-4.5000, -4.5000, 4.5000, 4.5000],
[11.5000, -4.5000, 20.5000, 4.5000],
[-4.5000, 11.5000, 4.5000, 20.5000],
[11.5000, 11.5000, 20.5000, 20.5000]])]
>>> self = AnchorGenerator([16, 32], [1.], [1.], [9, 18])
>>> all_anchors = self.grid_priors([(2, 2), (1, 1)], device='cpu')
>>> print(all_anchors)
[tensor([[-4.5000, -4.5000, 4.5000, 4.5000],
[11.5000, -4.5000, 20.5000, 4.5000],
[-4.5000, 11.5000, 4.5000, 20.5000],
[11.5000, 11.5000, 20.5000, 20.5000]]), \
tensor([[-9., -9., 9., 9.]])]
"""
def __init__(self,
strides,
ratios,
scales=None,
base_sizes=None,
scale_major=True,
octave_base_scale=None,
scales_per_octave=None,
centers=None,
center_offset=0.):
# check center and center_offset
if center_offset != 0:
assert centers is None, 'center cannot be set when center_offset' \
f'!=0, {centers} is given.'
if not (0 <= center_offset <= 1):
raise ValueError('center_offset should be in range [0, 1], '
f'{center_offset} is given.')
if centers is not None:
assert len(centers) == len(strides), \
'The number of strides should be the same as centers, got ' \
f'{strides} and {centers}'
# calculate base sizes of anchors
self.strides = [_pair(stride) for stride in strides]
self.base_sizes = [min(stride) for stride in self.strides
] if base_sizes is None else base_sizes
assert len(self.base_sizes) == len(self.strides), \
'The number of strides should be the same as base sizes, got ' \
f'{self.strides} and {self.base_sizes}'
# calculate scales of anchors
assert ((octave_base_scale is not None
and scales_per_octave is not None) ^ (scales is not None)), \
'scales and octave_base_scale with scales_per_octave cannot' \
' be set at the same time'
if scales is not None:
self.scales = torch.Tensor(scales)
elif octave_base_scale is not None and scales_per_octave is not None:
octave_scales = np.array(
[2**(i / scales_per_octave) for i in range(scales_per_octave)])
scales = octave_scales * octave_base_scale
self.scales = torch.Tensor(scales)
else:
raise ValueError('Either scales or octave_base_scale with '
'scales_per_octave should be set')
self.octave_base_scale = octave_base_scale
self.scales_per_octave = scales_per_octave
self.ratios = torch.Tensor(ratios)
self.scale_major = scale_major
self.centers = centers
self.center_offset = center_offset
self.base_anchors = self.gen_base_anchors()
@property
def num_base_anchors(self):
"""list[int]: total number of base anchors in a feature grid"""
return self.num_base_priors
@property
def num_base_priors(self):
"""list[int]: The number of priors (anchors) at a point
on the feature grid"""
return [base_anchors.size(0) for base_anchors in self.base_anchors]
@property
def num_levels(self):
"""int: number of feature levels that the generator will be applied"""
return len(self.strides)
def gen_base_anchors(self):
"""Generate base anchors.
Returns:
list(torch.Tensor): Base anchors of a feature grid in multiple \
feature levels.
"""
multi_level_base_anchors = []
for i, base_size in enumerate(self.base_sizes):
center = None
if self.centers is not None:
center = self.centers[i]
multi_level_base_anchors.append(
self.gen_single_level_base_anchors(
base_size,
scales=self.scales,
ratios=self.ratios,
center=center))
return multi_level_base_anchors
def gen_single_level_base_anchors(self,
base_size,
scales,
ratios,
center=None):
"""Generate base anchors of a single level.
Args:
base_size (int | float): Basic size of an anchor.
scales (torch.Tensor): Scales of the anchor.
ratios (torch.Tensor): The ratio between between the height
and width of anchors in a single level.
center (tuple[float], optional): The center of the base anchor
related to a single feature grid. Defaults to None.
Returns:
torch.Tensor: Anchors in a single-level feature maps.
"""
w = base_size
h = base_size
if center is None:
x_center = self.center_offset * w
y_center = self.center_offset * h
else:
x_center, y_center = center
h_ratios = torch.sqrt(ratios)
w_ratios = 1 / h_ratios
if self.scale_major:
ws = (w * w_ratios[:, None] * scales[None, :]).view(-1)
hs = (h * h_ratios[:, None] * scales[None, :]).view(-1)
else:
ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)
hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)
# use float anchor and the anchor's center is aligned with the
# pixel center
base_anchors = [
x_center - 0.5 * ws, y_center - 0.5 * hs, x_center + 0.5 * ws,
y_center + 0.5 * hs
]
base_anchors = torch.stack(base_anchors, dim=-1)
return base_anchors
def _meshgrid(self, x, y, row_major=True):
"""Generate mesh grid of x and y.
Args:
x (torch.Tensor): Grids of x dimension.
y (torch.Tensor): Grids of y dimension.
row_major (bool, optional): Whether to return y grids first.
Defaults to True.
Returns:
tuple[torch.Tensor]: The mesh grids of x and y.
"""
# use shape instead of len to keep tracing while exporting to onnx
xx = x.repeat(y.shape[0])
yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1)
if row_major:
return xx, yy
else:
return yy, xx
def grid_priors(self, featmap_sizes, dtype=torch.float32, device='cuda'):
"""Generate grid anchors in multiple feature levels.
Args:
featmap_sizes (list[tuple]): List of feature map sizes in
multiple feature levels.
dtype (:obj:`torch.dtype`): Dtype of priors.
Default: torch.float32.
device (str): The device where the anchors will be put on.
Return:
list[torch.Tensor]: Anchors in multiple feature levels. \
The sizes of each tensor should be [N, 4], where \
N = width * height * num_base_anchors, width and height \
are the sizes of the corresponding feature level, \
num_base_anchors is the number of anchors for that level.
"""
assert self.num_levels == len(featmap_sizes)
multi_level_anchors = []
for i in range(self.num_levels):
anchors = self.single_level_grid_priors(
featmap_sizes[i], level_idx=i, dtype=dtype, device=device)
multi_level_anchors.append(anchors)
return multi_level_anchors
def single_level_grid_priors(self,
featmap_size,
level_idx,
dtype=torch.float32,
device='cuda'):
"""Generate grid anchors of a single level.
Note:
This function is usually called by method ``self.grid_priors``.
Args:
featmap_size (tuple[int]): Size of the feature maps.
level_idx (int): The index of corresponding feature map level.
dtype (obj:`torch.dtype`): Date type of points.Defaults to
``torch.float32``.
device (str, optional): The device the tensor will be put on.
Defaults to 'cuda'.
Returns:
torch.Tensor: Anchors in the overall feature maps.
"""
base_anchors = self.base_anchors[level_idx].to(device).to(dtype)
feat_h, feat_w = featmap_size
stride_w, stride_h = self.strides[level_idx]
# First create Range with the default dtype, than convert to
# target `dtype` for onnx exporting.
shift_x = torch.arange(0, feat_w, device=device).to(dtype) * stride_w
shift_y = torch.arange(0, feat_h, device=device).to(dtype) * stride_h
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)
# first feat_w elements correspond to the first row of shifts
# add A anchors (1, A, 4) to K shifts (K, 1, 4) to get
# shifted anchors (K, A, 4), reshape to (K*A, 4)
all_anchors = base_anchors[None, :, :] + shifts[:, None, :]
all_anchors = all_anchors.view(-1, 4)
# first A rows correspond to A anchors of (0, 0) in feature map,
# then (0, 1), (0, 2), ...
return all_anchors
def sparse_priors(self,
prior_idxs,
featmap_size,
level_idx,
dtype=torch.float32,
device='cuda'):
"""Generate sparse anchors according to the ``prior_idxs``.
Args:
prior_idxs (Tensor): The index of corresponding anchors
in the feature map.
featmap_size (tuple[int]): feature map size arrange as (h, w).
level_idx (int): The level index of corresponding feature
map.
dtype (obj:`torch.dtype`): Date type of points.Defaults to
``torch.float32``.
device (obj:`torch.device`): The device where the points is
located.
Returns:
Tensor: Anchor with shape (N, 4), N should be equal to
the length of ``prior_idxs``.
"""
height, width = featmap_size
num_base_anchors = self.num_base_anchors[level_idx]
base_anchor_id = prior_idxs % num_base_anchors
x = (prior_idxs //
num_base_anchors) % width * self.strides[level_idx][0]
y = (prior_idxs // width //
num_base_anchors) % height * self.strides[level_idx][1]
priors = torch.stack([x, y, x, y], 1).to(dtype).to(device) + \
self.base_anchors[level_idx][base_anchor_id, :].to(device)
return priors
def grid_anchors(self, featmap_sizes, device='cuda'):
"""Generate grid anchors in multiple feature levels.
Args:
featmap_sizes (list[tuple]): List of feature map sizes in
multiple feature levels.
device (str): Device where the anchors will be put on.
Return:
list[torch.Tensor]: Anchors in multiple feature levels. \
The sizes of each tensor should be [N, 4], where \
N = width * height * num_base_anchors, width and height \
are the sizes of the corresponding feature level, \
num_base_anchors is the number of anchors for that level.
"""
warnings.warn('``grid_anchors`` would be deprecated soon. '
'Please use ``grid_priors`` ')
assert self.num_levels == len(featmap_sizes)
multi_level_anchors = []
for i in range(self.num_levels):
anchors = self.single_level_grid_anchors(
self.base_anchors[i].to(device),
featmap_sizes[i],
self.strides[i],
device=device)
multi_level_anchors.append(anchors)
return multi_level_anchors
def single_level_grid_anchors(self,
base_anchors,
featmap_size,
stride=(16, 16),
device='cuda'):
"""Generate grid anchors of a single level.
Note:
This function is usually called by method ``self.grid_anchors``.
Args:
base_anchors (torch.Tensor): The base anchors of a feature grid.
featmap_size (tuple[int]): Size of the feature maps.
stride (tuple[int], optional): Stride of the feature map in order
(w, h). Defaults to (16, 16).
device (str, optional): Device the tensor will be put on.
Defaults to 'cuda'.
Returns:
torch.Tensor: Anchors in the overall feature maps.
"""
warnings.warn(
'``single_level_grid_anchors`` would be deprecated soon. '
'Please use ``single_level_grid_priors`` ')
# keep featmap_size as Tensor instead of int, so that we
# can convert to ONNX correctly
feat_h, feat_w = featmap_size
shift_x = torch.arange(0, feat_w, device=device) * stride[0]
shift_y = torch.arange(0, feat_h, device=device) * stride[1]
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)
shifts = shifts.type_as(base_anchors)
# first feat_w elements correspond to the first row of shifts
# add A anchors (1, A, 4) to K shifts (K, 1, 4) to get
# shifted anchors (K, A, 4), reshape to (K*A, 4)
all_anchors = base_anchors[None, :, :] + shifts[:, None, :]
all_anchors = all_anchors.view(-1, 4)
# first A rows correspond to A anchors of (0, 0) in feature map,
# then (0, 1), (0, 2), ...
return all_anchors
def valid_flags(self, featmap_sizes, pad_shape, device='cuda'):
"""Generate valid flags of anchors in multiple feature levels.
Args:
featmap_sizes (list(tuple)): List of feature map sizes in
multiple feature levels.
pad_shape (tuple): The padded shape of the image.
device (str): Device where the anchors will be put on.
Return:
list(torch.Tensor): Valid flags of anchors in multiple levels.
"""
assert self.num_levels == len(featmap_sizes)
multi_level_flags = []
for i in range(self.num_levels):
anchor_stride = self.strides[i]
feat_h, feat_w = featmap_sizes[i]
h, w = pad_shape[:2]
valid_feat_h = min(int(np.ceil(h / anchor_stride[1])), feat_h)
valid_feat_w = min(int(np.ceil(w / anchor_stride[0])), feat_w)
flags = self.single_level_valid_flags((feat_h, feat_w),
(valid_feat_h, valid_feat_w),
self.num_base_anchors[i],
device=device)
multi_level_flags.append(flags)
return multi_level_flags
def single_level_valid_flags(self,
featmap_size,
valid_size,
num_base_anchors,
device='cuda'):
"""Generate the valid flags of anchor in a single feature map.
Args:
featmap_size (tuple[int]): The size of feature maps, arrange
as (h, w).
valid_size (tuple[int]): The valid size of the feature maps.
num_base_anchors (int): The number of base anchors.
device (str, optional): Device where the flags will be put on.
Defaults to 'cuda'.
Returns:
torch.Tensor: The valid flags of each anchor in a single level \
feature map.
"""
feat_h, feat_w = featmap_size
valid_h, valid_w = valid_size
assert valid_h <= feat_h and valid_w <= feat_w
valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)
valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)
valid_x[:valid_w] = 1
valid_y[:valid_h] = 1
valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
valid = valid_xx & valid_yy
valid = valid[:, None].expand(valid.size(0),
num_base_anchors).contiguous().view(-1)
return valid
def __repr__(self):
"""str: a string that describes the module"""
indent_str = ' '
repr_str = self.__class__.__name__ + '(\n'
repr_str += f'{indent_str}strides={self.strides},\n'
repr_str += f'{indent_str}ratios={self.ratios},\n'
repr_str += f'{indent_str}scales={self.scales},\n'
repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
repr_str += f'{indent_str}scale_major={self.scale_major},\n'
repr_str += f'{indent_str}octave_base_scale='
repr_str += f'{self.octave_base_scale},\n'
repr_str += f'{indent_str}scales_per_octave='
repr_str += f'{self.scales_per_octave},\n'
repr_str += f'{indent_str}num_levels={self.num_levels}\n'
repr_str += f'{indent_str}centers={self.centers},\n'
repr_str += f'{indent_str}center_offset={self.center_offset})'
return repr_str
@PRIOR_GENERATORS.register_module()
class SSDAnchorGenerator(AnchorGenerator):
"""Anchor generator for SSD.
Args:
strides (list[int] | list[tuple[int, int]]): Strides of anchors
in multiple feature levels.
ratios (list[float]): The list of ratios between the height and width
of anchors in a single level.
min_sizes (list[float]): The list of minimum anchor sizes on each
level.
max_sizes (list[float]): The list of maximum anchor sizes on each
level.
basesize_ratio_range (tuple(float)): Ratio range of anchors. Being
used when not setting min_sizes and max_sizes.
input_size (int): Size of feature map, 300 for SSD300, 512 for
SSD512. Being used when not setting min_sizes and max_sizes.
scale_major (bool): Whether to multiply scales first when generating
base anchors. If true, the anchors in the same row will have the
same scales. It is always set to be False in SSD.
"""
def __init__(self,
strides,
ratios,
min_sizes=None,
max_sizes=None,
basesize_ratio_range=(0.15, 0.9),
input_size=300,
scale_major=True):
assert len(strides) == len(ratios)
assert not (min_sizes is None) ^ (max_sizes is None)
self.strides = [_pair(stride) for stride in strides]
self.centers = [(stride[0] / 2., stride[1] / 2.)
for stride in self.strides]
if min_sizes is None and max_sizes is None:
# use hard code to generate SSD anchors
self.input_size = input_size
assert mmcv.is_tuple_of(basesize_ratio_range, float)
self.basesize_ratio_range = basesize_ratio_range
# calculate anchor ratios and sizes
min_ratio, max_ratio = basesize_ratio_range
min_ratio = int(min_ratio * 100)
max_ratio = int(max_ratio * 100)
step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2))
min_sizes = []
max_sizes = []
for ratio in range(int(min_ratio), int(max_ratio) + 1, step):
min_sizes.append(int(self.input_size * ratio / 100))
max_sizes.append(int(self.input_size * (ratio + step) / 100))
if self.input_size == 300:
if basesize_ratio_range[0] == 0.15: # SSD300 COCO
min_sizes.insert(0, int(self.input_size * 7 / 100))
max_sizes.insert(0, int(self.input_size * 15 / 100))
elif basesize_ratio_range[0] == 0.2: # SSD300 VOC
min_sizes.insert(0, int(self.input_size * 10 / 100))
max_sizes.insert(0, int(self.input_size * 20 / 100))
else:
raise ValueError(
'basesize_ratio_range[0] should be either 0.15'
'or 0.2 when input_size is 300, got '
f'{basesize_ratio_range[0]}.')
elif self.input_size == 512:
if basesize_ratio_range[0] == 0.1: # SSD512 COCO
min_sizes.insert(0, int(self.input_size * 4 / 100))
max_sizes.insert(0, int(self.input_size * 10 / 100))
elif basesize_ratio_range[0] == 0.15: # SSD512 VOC
min_sizes.insert(0, int(self.input_size * 7 / 100))
max_sizes.insert(0, int(self.input_size * 15 / 100))
else:
raise ValueError(
'When not setting min_sizes and max_sizes,'
'basesize_ratio_range[0] should be either 0.1'
'or 0.15 when input_size is 512, got'
f' {basesize_ratio_range[0]}.')
else:
raise ValueError(
'Only support 300 or 512 in SSDAnchorGenerator when '
'not setting min_sizes and max_sizes, '
f'got {self.input_size}.')
assert len(min_sizes) == len(max_sizes) == len(strides)
anchor_ratios = []
anchor_scales = []
for k in range(len(self.strides)):
scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])]
anchor_ratio = [1.]
for r in ratios[k]:
anchor_ratio += [1 / r, r] # 4 or 6 ratio
anchor_ratios.append(torch.Tensor(anchor_ratio))
anchor_scales.append(torch.Tensor(scales))
self.base_sizes = min_sizes
self.scales = anchor_scales
self.ratios = anchor_ratios
self.scale_major = scale_major
self.center_offset = 0
self.base_anchors = self.gen_base_anchors()
def gen_base_anchors(self):
"""Generate base anchors.
Returns:
list(torch.Tensor): Base anchors of a feature grid in multiple \
feature levels.
"""
multi_level_base_anchors = []
for i, base_size in enumerate(self.base_sizes):
base_anchors = self.gen_single_level_base_anchors(
base_size,
scales=self.scales[i],
ratios=self.ratios[i],
center=self.centers[i])
indices = list(range(len(self.ratios[i])))
indices.insert(1, len(indices))
base_anchors = torch.index_select(base_anchors, 0,
torch.LongTensor(indices))
multi_level_base_anchors.append(base_anchors)
return multi_level_base_anchors
def __repr__(self):
"""str: a string that describes the module"""
indent_str = ' '
repr_str = self.__class__.__name__ + '(\n'
repr_str += f'{indent_str}strides={self.strides},\n'
repr_str += f'{indent_str}scales={self.scales},\n'
repr_str += f'{indent_str}scale_major={self.scale_major},\n'
repr_str += f'{indent_str}input_size={self.input_size},\n'
repr_str += f'{indent_str}scales={self.scales},\n'
repr_str += f'{indent_str}ratios={self.ratios},\n'
repr_str += f'{indent_str}num_levels={self.num_levels},\n'
repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
repr_str += f'{indent_str}basesize_ratio_range='
repr_str += f'{self.basesize_ratio_range})'
return repr_str
@PRIOR_GENERATORS.register_module()
class LegacyAnchorGenerator(AnchorGenerator):
"""Legacy anchor generator used in MMDetection V1.x.
Note:
Difference to the V2.0 anchor generator:
1. The center offset of V1.x anchors are set to be 0.5 rather than 0.
2. The width/height are minused by 1 when calculating the anchors' \
centers and corners to meet the V1.x coordinate system.
3. The anchors' corners are quantized.
Args:
strides (list[int] | list[tuple[int]]): Strides of anchors
in multiple feature levels.
ratios (list[float]): The list of ratios between the height and width
of anchors in a single level.
scales (list[int] | None): Anchor scales for anchors in a single level.
It cannot be set at the same time if `octave_base_scale` and
`scales_per_octave` are set.
base_sizes (list[int]): The basic sizes of anchors in multiple levels.
If None is given, strides will be used to generate base_sizes.
scale_major (bool): Whether to multiply scales first when generating
base anchors. If true, the anchors in the same row will have the
same scales. By default it is True in V2.0
octave_base_scale (int): The base scale of octave.
scales_per_octave (int): Number of scales for each octave.
`octave_base_scale` and `scales_per_octave` are usually used in
retinanet and the `scales` should be None when they are set.
centers (list[tuple[float, float]] | None): The centers of the anchor
relative to the feature grid center in multiple feature levels.
By default it is set to be None and not used. It a list of float
is given, this list will be used to shift the centers of anchors.
center_offset (float): The offset of center in proportion to anchors'
width and height. By default it is 0.5 in V2.0 but it should be 0.5
in v1.x models.
Examples:
>>> from mmdet.core import LegacyAnchorGenerator
>>> self = LegacyAnchorGenerator(
>>> [16], [1.], [1.], [9], center_offset=0.5)
>>> all_anchors = self.grid_anchors(((2, 2),), device='cpu')
>>> print(all_anchors)
[tensor([[ 0., 0., 8., 8.],
[16., 0., 24., 8.],
[ 0., 16., 8., 24.],
[16., 16., 24., 24.]])]
"""
def gen_single_level_base_anchors(self,
base_size,
scales,
ratios,
center=None):
"""Generate base anchors of a single level.
Note:
The width/height of anchors are minused by 1 when calculating \
the centers and corners to meet the V1.x coordinate system.
Args:
base_size (int | float): Basic size of an anchor.
scales (torch.Tensor): Scales of the anchor.
ratios (torch.Tensor): The ratio between between the height.
and width of anchors in a single level.
center (tuple[float], optional): The center of the base anchor
related to a single feature grid. Defaults to None.
Returns:
torch.Tensor: Anchors in a single-level feature map.
"""
w = base_size
h = base_size
if center is None:
x_center = self.center_offset * (w - 1)
y_center = self.center_offset * (h - 1)
else:
x_center, y_center = center
h_ratios = torch.sqrt(ratios)
w_ratios = 1 / h_ratios
if self.scale_major:
ws = (w * w_ratios[:, None] * scales[None, :]).view(-1)
hs = (h * h_ratios[:, None] * scales[None, :]).view(-1)
else:
ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)
hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)
# use float anchor and the anchor's center is aligned with the
# pixel center
base_anchors = [
x_center - 0.5 * (ws - 1), y_center - 0.5 * (hs - 1),
x_center + 0.5 * (ws - 1), y_center + 0.5 * (hs - 1)
]
base_anchors = torch.stack(base_anchors, dim=-1).round()
return base_anchors
@PRIOR_GENERATORS.register_module()
class LegacySSDAnchorGenerator(SSDAnchorGenerator, LegacyAnchorGenerator):
"""Legacy anchor generator used in MMDetection V1.x.
The difference between `LegacySSDAnchorGenerator` and `SSDAnchorGenerator`
can be found in `LegacyAnchorGenerator`.
"""
def __init__(self,
strides,
ratios,
basesize_ratio_range,
input_size=300,
scale_major=True):
super(LegacySSDAnchorGenerator, self).__init__(
strides=strides,
ratios=ratios,
basesize_ratio_range=basesize_ratio_range,
input_size=input_size,
scale_major=scale_major)
self.centers = [((stride - 1) / 2., (stride - 1) / 2.)
for stride in strides]
self.base_anchors = self.gen_base_anchors()
@PRIOR_GENERATORS.register_module()
class YOLOAnchorGenerator(AnchorGenerator):
"""Anchor generator for YOLO.
Args:
strides (list[int] | list[tuple[int, int]]): Strides of anchors
in multiple feature levels.
base_sizes (list[list[tuple[int, int]]]): The basic sizes
of anchors in multiple levels.
"""
def __init__(self, strides, base_sizes):
self.strides = [_pair(stride) for stride in strides]
self.centers = [(stride[0] / 2., stride[1] / 2.)
for stride in self.strides]
self.base_sizes = []
num_anchor_per_level = len(base_sizes[0])
for base_sizes_per_level in base_sizes:
assert num_anchor_per_level == len(base_sizes_per_level)
self.base_sizes.append(
[_pair(base_size) for base_size in base_sizes_per_level])
self.base_anchors = self.gen_base_anchors()
@property
def num_levels(self):
"""int: number of feature levels that the generator will be applied"""
return len(self.base_sizes)
def gen_base_anchors(self):
"""Generate base anchors.
Returns:
list(torch.Tensor): Base anchors of a feature grid in multiple \
feature levels.
"""
multi_level_base_anchors = []
for i, base_sizes_per_level in enumerate(self.base_sizes):
center = None
if self.centers is not None:
center = self.centers[i]
multi_level_base_anchors.append(
self.gen_single_level_base_anchors(base_sizes_per_level,
center))
return multi_level_base_anchors
def gen_single_level_base_anchors(self, base_sizes_per_level, center=None):
"""Generate base anchors of a single level.
Args:
base_sizes_per_level (list[tuple[int, int]]): Basic sizes of
anchors.
center (tuple[float], optional): The center of the base anchor
related to a single feature grid. Defaults to None.
Returns:
torch.Tensor: Anchors in a single-level feature maps.
"""
x_center, y_center = center
base_anchors = []
for base_size in base_sizes_per_level:
w, h = base_size
# use float anchor and the anchor's center is aligned with the
# pixel center
base_anchor = torch.Tensor([
x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w,
y_center + 0.5 * h
])
base_anchors.append(base_anchor)
base_anchors = torch.stack(base_anchors, dim=0)
return base_anchors
def responsible_flags(self, featmap_sizes, gt_bboxes, device='cuda'):
"""Generate responsible anchor flags of grid cells in multiple scales.
Args:
featmap_sizes (list(tuple)): List of feature map sizes in multiple
feature levels.
gt_bboxes (Tensor): Ground truth boxes, shape (n, 4).
device (str): Device where the anchors will be put on.
Return:
list(torch.Tensor): responsible flags of anchors in multiple level
"""
assert self.num_levels == len(featmap_sizes)
multi_level_responsible_flags = []
for i in range(self.num_levels):
anchor_stride = self.strides[i]
flags = self.single_level_responsible_flags(
featmap_sizes[i],
gt_bboxes,
anchor_stride,
self.num_base_anchors[i],
device=device)
multi_level_responsible_flags.append(flags)
return multi_level_responsible_flags
def single_level_responsible_flags(self,
featmap_size,
gt_bboxes,
stride,
num_base_anchors,
device='cuda'):
"""Generate the responsible flags of anchor in a single feature map.
Args:
featmap_size (tuple[int]): The size of feature maps.
gt_bboxes (Tensor): Ground truth boxes, shape (n, 4).
stride (tuple(int)): stride of current level
num_base_anchors (int): The number of base anchors.
device (str, optional): Device where the flags will be put on.
Defaults to 'cuda'.
Returns:
torch.Tensor: The valid flags of each anchor in a single level \
feature map.
"""
feat_h, feat_w = featmap_size
gt_bboxes_cx = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) * 0.5).to(device)
gt_bboxes_cy = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) * 0.5).to(device)
gt_bboxes_grid_x = torch.floor(gt_bboxes_cx / stride[0]).long()
gt_bboxes_grid_y = torch.floor(gt_bboxes_cy / stride[1]).long()
# row major indexing
gt_bboxes_grid_idx = gt_bboxes_grid_y * feat_w + gt_bboxes_grid_x
responsible_grid = torch.zeros(
feat_h * feat_w, dtype=torch.uint8, device=device)
responsible_grid[gt_bboxes_grid_idx] = 1
responsible_grid = responsible_grid[:, None].expand(
responsible_grid.size(0), num_base_anchors).contiguous().view(-1)
return responsible_grid
| 37,205 | 41.913495 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/core/anchor/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
def images_to_levels(target, num_levels):
"""Convert targets by image to targets by feature level.
[target_img0, target_img1] -> [target_level0, target_level1, ...]
"""
target = torch.stack(target, 0)
level_targets = []
start = 0
for n in num_levels:
end = start + n
# level_targets.append(target[:, start:end].squeeze(0))
level_targets.append(target[:, start:end])
start = end
return level_targets
def anchor_inside_flags(flat_anchors,
valid_flags,
img_shape,
allowed_border=0):
"""Check whether the anchors are inside the border.
Args:
flat_anchors (torch.Tensor): Flatten anchors, shape (n, 4).
valid_flags (torch.Tensor): An existing valid flags of anchors.
img_shape (tuple(int)): Shape of current image.
allowed_border (int, optional): The border to allow the valid anchor.
Defaults to 0.
Returns:
torch.Tensor: Flags indicating whether the anchors are inside a \
valid range.
"""
img_h, img_w = img_shape[:2]
if allowed_border >= 0:
inside_flags = valid_flags & \
(flat_anchors[:, 0] >= -allowed_border) & \
(flat_anchors[:, 1] >= -allowed_border) & \
(flat_anchors[:, 2] < img_w + allowed_border) & \
(flat_anchors[:, 3] < img_h + allowed_border)
else:
inside_flags = valid_flags
return inside_flags
def calc_region(bbox, ratio, featmap_size=None):
"""Calculate a proportional bbox region.
The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.
Args:
bbox (Tensor): Bboxes to calculate regions, shape (n, 4).
ratio (float): Ratio of the output region.
featmap_size (tuple): Feature map size used for clipping the boundary.
Returns:
tuple: x1, y1, x2, y2
"""
x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long()
y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long()
x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long()
y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long()
if featmap_size is not None:
x1 = x1.clamp(min=0, max=featmap_size[1])
y1 = y1.clamp(min=0, max=featmap_size[0])
x2 = x2.clamp(min=0, max=featmap_size[1])
y2 = y2.clamp(min=0, max=featmap_size[0])
return (x1, y1, x2, y2)
| 2,545 | 33.876712 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/detectors/yolox.py | # Copyright (c) OpenMMLab. All rights reserved.
import random
import torch
import torch.distributed as dist
import torch.nn.functional as F
from mmcv.runner import get_dist_info
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class YOLOX(SingleStageDetector):
r"""Implementation of `YOLOX: Exceeding YOLO Series in 2021
<https://arxiv.org/abs/2107.08430>`_
Note: Considering the trade-off between training speed and accuracy,
multi-scale training is temporarily kept. More elegant implementation
will be adopted in the future.
Args:
backbone (nn.Module): The backbone module.
neck (nn.Module): The neck module.
bbox_head (nn.Module): The bbox head module.
train_cfg (obj:`ConfigDict`, optional): The training config
of YOLOX. Default: None.
test_cfg (obj:`ConfigDict`, optional): The testing config
of YOLOX. Default: None.
pretrained (str, optional): model pretrained path.
Default: None.
input_size (tuple): The model default input image size.
Default: (640, 640).
size_multiplier (int): Image size multiplication factor.
Default: 32.
random_size_range (tuple): The multi-scale random range during
multi-scale training. The real training image size will
be multiplied by size_multiplier. Default: (15, 25).
random_size_interval (int): The iter interval of change
image size. Default: 10.
init_cfg (dict, optional): Initialization config dict.
Default: None.
"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
input_size=(640, 640),
size_multiplier=32,
random_size_range=(15, 25),
random_size_interval=10,
init_cfg=None):
super(YOLOX, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
self.rank, self.world_size = get_dist_info()
self._default_input_size = input_size
self._input_size = input_size
self._random_size_range = random_size_range
self._random_size_interval = random_size_interval
self._size_multiplier = size_multiplier
self._progress_in_iter = 0
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
# Multi-scale training
img, gt_bboxes = self._preprocess(img, gt_bboxes)
losses = super(YOLOX, self).forward_train(img, img_metas, gt_bboxes,
gt_labels, gt_bboxes_ignore)
# random resizing
if (self._progress_in_iter + 1) % self._random_size_interval == 0:
self._input_size = self._random_resize()
self._progress_in_iter += 1
return losses
def _preprocess(self, img, gt_bboxes):
scale_y = self._input_size[0] / self._default_input_size[0]
scale_x = self._input_size[1] / self._default_input_size[1]
if scale_x != 1 or scale_y != 1:
img = F.interpolate(
img,
size=self._input_size,
mode='bilinear',
align_corners=False)
for gt_bbox in gt_bboxes:
gt_bbox[..., 0::2] = gt_bbox[..., 0::2] * scale_x
gt_bbox[..., 1::2] = gt_bbox[..., 1::2] * scale_y
return img, gt_bboxes
def _random_resize(self):
tensor = torch.LongTensor(2).cuda()
if self.rank == 0:
size = random.randint(*self._random_size_range)
aspect_ratio = float(
self._default_input_size[1]) / self._default_input_size[0]
size = (self._size_multiplier * size,
self._size_multiplier * int(aspect_ratio * size))
tensor[0] = size[0]
tensor[1] = size[1]
if self.world_size > 1:
dist.barrier()
dist.broadcast(tensor, 0)
input_size = (tensor[0].item(), tensor[1].item())
return input_size
| 5,360 | 38.711111 | 78 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/detectors/two_stage.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
@DETECTORS.register_module()
class TwoStageDetector(BaseDetector):
"""Base class for two-stage detectors.
Two-stage detectors typically consisting of a region proposal network and a
task-specific regression head.
"""
def __init__(self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(TwoStageDetector, self).__init__(init_cfg)
if pretrained:
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
backbone.pretrained = pretrained
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
if rpn_head is not None:
rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
rpn_head_ = rpn_head.copy()
rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)
self.rpn_head = build_head(rpn_head_)
if roi_head is not None:
# update train and test cfg here for now
# TODO: refactor assigner & sampler
rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None
roi_head.update(train_cfg=rcnn_train_cfg)
roi_head.update(test_cfg=test_cfg.rcnn)
roi_head.pretrained = pretrained
self.roi_head = build_head(roi_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
@property
def with_rpn(self):
"""bool: whether the detector has RPN"""
return hasattr(self, 'rpn_head') and self.rpn_head is not None
@property
def with_roi_head(self):
"""bool: whether the detector has a RoI head"""
return hasattr(self, 'roi_head') and self.roi_head is not None
def extract_feat(self, img):
"""Directly extract features from the backbone+neck."""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
outs = ()
# backbone
x = self.extract_feat(img)
# rpn
if self.with_rpn:
rpn_outs = self.rpn_head(x)
outs = outs + (rpn_outs, )
proposals = torch.randn(1000, 4).to(img.device)
# roi_head
roi_outs = self.roi_head.forward_dummy(x, proposals)
outs = outs + (roi_outs, )
return outs
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None,
**kwargs):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
proposals : override rpn proposals with custom proposals. Use when
`with_rpn` is False.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
x = self.extract_feat(img)
losses = dict()
# RPN forward and loss
if self.with_rpn:
proposal_cfg = self.train_cfg.get('rpn_proposal',
self.test_cfg.rpn)
rpn_losses, proposal_list = self.rpn_head.forward_train(
x,
img_metas,
gt_bboxes,
gt_labels=None,
gt_bboxes_ignore=gt_bboxes_ignore,
proposal_cfg=proposal_cfg,
**kwargs)
losses.update(rpn_losses)
else:
proposal_list = proposals
roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list,
gt_bboxes, gt_labels,
gt_bboxes_ignore, gt_masks,
**kwargs)
losses.update(roi_losses)
return losses
async def async_simple_test(self,
img,
img_meta,
proposals=None,
rescale=False):
"""Async test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
proposal_list = await self.rpn_head.async_simple_test_rpn(
x, img_meta)
else:
proposal_list = proposals
return await self.roi_head.async_simple_test(
x, proposal_list, img_meta, rescale=rescale)
def simple_test(self, img, img_metas, proposals=None, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
else:
proposal_list = proposals
return self.roi_head.simple_test(
x, proposal_list, img_metas, rescale=rescale)
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
x = self.extract_feats(imgs)
proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
def onnx_export(self, img, img_metas):
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
x = self.extract_feat(img)
proposals = self.rpn_head.onnx_export(x, img_metas)
if hasattr(self.roi_head, 'onnx_export'):
return self.roi_head.onnx_export(x, proposals, img_metas)
else:
raise NotImplementedError(
f'{self.__class__.__name__} can not '
f'be exported to ONNX. Please refer to the '
f'list of supported models,'
f'https://mmdetection.readthedocs.io/en/latest/tutorials/pytorch2onnx.html#list-of-supported-models-exportable-to-onnx' # noqa E501
)
| 7,703 | 35.339623 | 148 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/detectors/base.py | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import mmcv
import numpy as np
import torch
import torch.distributed as dist
from mmcv.runner import BaseModule, auto_fp16
from mmdet.core.visualization import imshow_det_bboxes
class BaseDetector(BaseModule, metaclass=ABCMeta):
"""Base class for detectors."""
def __init__(self, init_cfg=None):
super(BaseDetector, self).__init__(init_cfg)
self.fp16_enabled = False
@property
def with_neck(self):
"""bool: whether the detector has a neck"""
return hasattr(self, 'neck') and self.neck is not None
# TODO: these properties need to be carefully handled
# for both single stage & two stage detectors
@property
def with_shared_head(self):
"""bool: whether the detector has a shared head in the RoI Head"""
return hasattr(self, 'roi_head') and self.roi_head.with_shared_head
@property
def with_bbox(self):
"""bool: whether the detector has a bbox head"""
return ((hasattr(self, 'roi_head') and self.roi_head.with_bbox)
or (hasattr(self, 'bbox_head') and self.bbox_head is not None))
@property
def with_mask(self):
"""bool: whether the detector has a mask head"""
return ((hasattr(self, 'roi_head') and self.roi_head.with_mask)
or (hasattr(self, 'mask_head') and self.mask_head is not None))
@abstractmethod
def extract_feat(self, imgs):
"""Extract features from images."""
pass
def extract_feats(self, imgs):
"""Extract features from multiple images.
Args:
imgs (list[torch.Tensor]): A list of images. The images are
augmented from the same image but in different ways.
Returns:
list[torch.Tensor]: Features of different images
"""
assert isinstance(imgs, list)
return [self.extract_feat(img) for img in imgs]
def forward_train(self, imgs, img_metas, **kwargs):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys, see
:class:`mmdet.datasets.pipelines.Collect`.
kwargs (keyword arguments): Specific to concrete implementation.
"""
# NOTE the batched image size information may be useful, e.g.
# in DETR, this is needed for the construction of masks, which is
# then used for the transformer_head.
batch_input_shape = tuple(imgs[0].size()[-2:])
for img_meta in img_metas:
img_meta['batch_input_shape'] = batch_input_shape
async def async_simple_test(self, img, img_metas, **kwargs):
raise NotImplementedError
@abstractmethod
def simple_test(self, img, img_metas, **kwargs):
pass
@abstractmethod
def aug_test(self, imgs, img_metas, **kwargs):
"""Test function with test time augmentation."""
pass
async def aforward_test(self, *, img, img_metas, **kwargs):
for var, name in [(img, 'img'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(img)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(img)}) '
f'!= num of image metas ({len(img_metas)})')
# TODO: remove the restriction of samples_per_gpu == 1 when prepared
samples_per_gpu = img[0].size(0)
assert samples_per_gpu == 1
if num_augs == 1:
return await self.async_simple_test(img[0], img_metas[0], **kwargs)
else:
raise NotImplementedError
def forward_test(self, imgs, img_metas, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch.
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(imgs)}) '
f'!= num of image meta ({len(img_metas)})')
# NOTE the batched image size information may be useful, e.g.
# in DETR, this is needed for the construction of masks, which is
# then used for the transformer_head.
for img, img_meta in zip(imgs, img_metas):
batch_size = len(img_meta)
for img_id in range(batch_size):
img_meta[img_id]['batch_input_shape'] = tuple(img.size()[-2:])
if num_augs == 1:
# proposals (List[List[Tensor]]): the outer list indicates
# test-time augs (multiscale, flip, etc.) and the inner list
# indicates images in a batch.
# The Tensor should have a shape Px4, where P is the number of
# proposals.
if 'proposals' in kwargs:
kwargs['proposals'] = kwargs['proposals'][0]
return self.simple_test(imgs[0], img_metas[0], **kwargs)
else:
assert imgs[0].size(0) == 1, 'aug test does not support ' \
'inference with batch size ' \
f'{imgs[0].size(0)}'
# TODO: support test augmentation for predefined proposals
assert 'proposals' not in kwargs
return self.aug_test(imgs, img_metas, **kwargs)
@auto_fp16(apply_to=('img', ))
def forward(self, img, img_metas, return_loss=True, **kwargs):
"""Calls either :func:`forward_train` or :func:`forward_test` depending
on whether ``return_loss`` is ``True``.
Note this setting will change the expected inputs. When
``return_loss=True``, img and img_meta are single-nested (i.e. Tensor
and List[dict]), and when ``resturn_loss=False``, img and img_meta
should be double nested (i.e. List[Tensor], List[List[dict]]), with
the outer list indicating test time augmentations.
"""
if torch.onnx.is_in_onnx_export():
assert len(img_metas) == 1
return self.onnx_export(img[0], img_metas[0])
if return_loss:
return self.forward_train(img, img_metas, **kwargs)
else:
return self.forward_test(img, img_metas, **kwargs)
def _parse_losses(self, losses):
"""Parse the raw outputs (losses) of the network.
Args:
losses (dict): Raw output of the network, which usually contain
losses and other necessary information.
Returns:
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor \
which may be a weighted sum of all losses, log_vars contains \
all the variables to be sent to the logger.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
# If the loss_vars has different length, GPUs will wait infinitely
if dist.is_available() and dist.is_initialized():
log_var_length = torch.tensor(len(log_vars), device=loss.device)
dist.all_reduce(log_var_length)
message = (f'rank {dist.get_rank()}' +
f' len(log_vars): {len(log_vars)}' + ' keys: ' +
','.join(log_vars.keys()))
assert log_var_length == len(log_vars) * dist.get_world_size(), \
'loss log variables are different across GPUs!\n' + message
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def train_step(self, data, optimizer):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``, \
``num_samples``.
- ``loss`` is a tensor for back propagation, which can be a
weighted sum of multiple losses.
- ``log_vars`` contains all the variables to be sent to the
logger.
- ``num_samples`` indicates the batch size (when the model is
DDP, it means the batch size on each GPU), which is used for
averaging the logs.
"""
losses = self(**data)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=len(data['img_metas']))
return outputs
def val_step(self, data, optimizer=None):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
losses = self(**data)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=len(data['img_metas']))
return outputs
def show_result(self,
img,
result,
score_thr=0.3,
bbox_color=(72, 101, 241),
text_color=(72, 101, 241),
mask_color=None,
thickness=2,
font_size=13,
win_name='',
show=False,
wait_time=0,
out_file=None):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (Tensor or tuple): The results to draw over `img`
bbox_result or (bbox_result, segm_result).
score_thr (float, optional): Minimum score of bboxes to be shown.
Default: 0.3.
bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
The tuple of color should be in BGR order. Default: 'green'
text_color (str or tuple(int) or :obj:`Color`):Color of texts.
The tuple of color should be in BGR order. Default: 'green'
mask_color (None or str or tuple(int) or :obj:`Color`):
Color of masks. The tuple of color should be in BGR order.
Default: None
thickness (int): Thickness of lines. Default: 2
font_size (int): Font size of texts. Default: 13
win_name (str): The window name. Default: ''
wait_time (float): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (Tensor): Only if not `show` or `out_file`
"""
img = mmcv.imread(img)
img = img.copy()
if isinstance(result, tuple):
bbox_result, segm_result = result
if isinstance(segm_result, tuple):
segm_result = segm_result[0] # ms rcnn
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
# draw segmentation masks
segms = None
if segm_result is not None and len(labels) > 0: # non empty
segms = mmcv.concat_list(segm_result)
if isinstance(segms[0], torch.Tensor):
segms = torch.stack(segms, dim=0).detach().cpu().numpy()
else:
segms = np.stack(segms, axis=0)
# if out_file specified, do not show image in window
if out_file is not None:
show = False
# draw bounding boxes
img = imshow_det_bboxes(
img,
bboxes,
labels,
segms,
class_names=self.CLASSES,
score_thr=score_thr,
bbox_color=bbox_color,
text_color=text_color,
mask_color=mask_color,
thickness=thickness,
font_size=font_size,
win_name=win_name,
show=show,
wait_time=wait_time,
out_file=out_file)
if not (show or out_file):
return img
def onnx_export(self, img, img_metas):
raise NotImplementedError(f'{self.__class__.__name__} does '
f'not support ONNX EXPORT')
| 14,772 | 39.922438 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/detectors/single_stage.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch
from mmdet.core import bbox2result
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
@DETECTORS.register_module()
class SingleStageDetector(BaseDetector):
"""Base class for single-stage detectors.
Single-stage detectors directly and densely predict bounding boxes on the
output features of the backbone+neck.
"""
def __init__(self,
backbone,
neck=None,
bbox_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(SingleStageDetector, self).__init__(init_cfg)
if pretrained:
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
backbone.pretrained = pretrained
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
bbox_head.update(train_cfg=train_cfg)
bbox_head.update(test_cfg=test_cfg)
self.bbox_head = build_head(bbox_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def extract_feat(self, img):
"""Directly extract features from the backbone+neck."""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
x = self.extract_feat(img)
outs = self.bbox_head(x)
return outs
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
super(SingleStageDetector, self).forward_train(img, img_metas)
x = self.extract_feat(img)
losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes,
gt_labels, gt_bboxes_ignore)
return losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test-time augmentation.
Args:
img (torch.Tensor): Images with shape (N, C, H, W).
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
feat = self.extract_feat(img)
results_list = self.bbox_head.simple_test(
feat, img_metas, rescale=rescale)
bbox_results = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in results_list
]
return bbox_results
def aug_test(self, imgs, img_metas, rescale=False):
"""Test function with test time augmentation.
Args:
imgs (list[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (list[list[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch. each dict has image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
assert hasattr(self.bbox_head, 'aug_test'), \
f'{self.bbox_head.__class__.__name__}' \
' does not support test-time augmentation'
feats = self.extract_feats(imgs)
results_list = self.bbox_head.aug_test(
feats, img_metas, rescale=rescale)
bbox_results = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in results_list
]
return bbox_results
def onnx_export(self, img, img_metas, with_nms=True):
"""Test function without test time augmentation.
Args:
img (torch.Tensor): input images.
img_metas (list[dict]): List of image information.
Returns:
tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]
and class labels of shape [N, num_det].
"""
x = self.extract_feat(img)
outs = self.bbox_head(x)
# get origin input shape to support onnx dynamic shape
# get shape as tensor
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
# get pad input shape to support onnx dynamic shape for exporting
# `CornerNet` and `CentripetalNet`, which 'pad_shape' is used
# for inference
img_metas[0]['pad_shape_for_onnx'] = img_shape
if len(outs) == 2:
# add dummy score_factor
outs = (*outs, None)
# TODO Can we change to `get_bboxes` when `onnx_export` fail
det_bboxes, det_labels = self.bbox_head.onnx_export(
*outs, img_metas, with_nms=with_nms)
return det_bboxes, det_labels
| 6,645 | 37.639535 | 78 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/detectors/detr.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class DETR(SingleStageDetector):
r"""Implementation of `DETR: End-to-End Object Detection with
Transformers <https://arxiv.org/pdf/2005.12872>`_"""
def __init__(self,
backbone,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(DETR, self).__init__(backbone, None, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
# over-write `forward_dummy` because:
# the forward of bbox_head requires img_metas
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
warnings.warn('Warning! MultiheadAttention in DETR does not '
'support flops computation! Do not use the '
'results in your papers!')
batch_size, _, height, width = img.shape
dummy_img_metas = [
dict(
batch_input_shape=(height, width),
img_shape=(height, width, 3)) for _ in range(batch_size)
]
x = self.extract_feat(img)
outs = self.bbox_head(x, dummy_img_metas)
return outs
# over-write `onnx_export` because:
# (1) the forward of bbox_head requires img_metas
# (2) the different behavior (e.g. construction of `masks`) between
# torch and ONNX model, during the forward of bbox_head
def onnx_export(self, img, img_metas):
"""Test function for exporting to ONNX, without test time augmentation.
Args:
img (torch.Tensor): input images.
img_metas (list[dict]): List of image information.
Returns:
tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]
and class labels of shape [N, num_det].
"""
x = self.extract_feat(img)
# forward of this head requires img_metas
outs = self.bbox_head.forward_onnx(x, img_metas)
# get shape as tensor
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas)
return det_bboxes, det_labels
| 2,483 | 33.985915 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/detectors/yolo.py | # Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) 2019 Western Digital Corporation or its affiliates.
import torch
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class YOLOV3(SingleStageDetector):
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(YOLOV3, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
def onnx_export(self, img, img_metas):
"""Test function for exporting to ONNX, without test time augmentation.
Args:
img (torch.Tensor): input images.
img_metas (list[dict]): List of image information.
Returns:
tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]
and class labels of shape [N, num_det].
"""
x = self.extract_feat(img)
outs = self.bbox_head.forward(x)
# get shape as tensor
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas)
return det_bboxes, det_labels
| 1,382 | 31.162791 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/detectors/kd_one_stage.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmcv.runner import load_checkpoint
from .. import build_detector
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class KnowledgeDistillationSingleStageDetector(SingleStageDetector):
r"""Implementation of `Distilling the Knowledge in a Neural Network.
<https://arxiv.org/abs/1503.02531>`_.
Args:
teacher_config (str | dict): Config file path
or the config object of teacher model.
teacher_ckpt (str, optional): Checkpoint path of teacher model.
If left as None, the model will not load any weights.
"""
def __init__(self,
backbone,
neck,
bbox_head,
teacher_config,
teacher_ckpt=None,
eval_teacher=True,
train_cfg=None,
test_cfg=None,
pretrained=None):
super().__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
pretrained)
self.eval_teacher = eval_teacher
# Build teacher model
if isinstance(teacher_config, str):
teacher_config = mmcv.Config.fromfile(teacher_config)
self.teacher_model = build_detector(teacher_config['model'])
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.extract_feat(img)
with torch.no_grad():
teacher_x = self.teacher_model.extract_feat(img)
out_teacher = self.teacher_model.bbox_head(teacher_x)
losses = self.bbox_head.forward_train(x, out_teacher, img_metas,
gt_bboxes, gt_labels,
gt_bboxes_ignore)
return losses
def cuda(self, device=None):
"""Since teacher_model is registered as a plain object, it is necessary
to put the teacher model to cuda when calling cuda function."""
self.teacher_model.cuda(device=device)
return super().cuda(device=device)
def train(self, mode=True):
"""Set the same train mode for teacher and student model."""
if self.eval_teacher:
self.teacher_model.train(False)
else:
self.teacher_model.train(mode)
super().train(mode)
def __setattr__(self, name, value):
"""Set attribute, i.e. self.name = value
This reloading prevent the teacher model from being registered as a
nn.Module. The teacher module is registered as a plain object, so that
the teacher parameters will not show up when calling
``self.parameters``, ``self.modules``, ``self.children`` methods.
"""
if name == 'teacher_model':
object.__setattr__(self, name, value)
else:
super().__setattr__(name, value)
| 4,150 | 39.696078 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/detectors/yolact.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import bbox2result
from ..builder import DETECTORS, build_head
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class YOLACT(SingleStageDetector):
"""Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
segm_head,
mask_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(YOLACT, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
self.segm_head = build_head(segm_head)
self.mask_head = build_head(mask_head)
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
feat = self.extract_feat(img)
bbox_outs = self.bbox_head(feat)
prototypes = self.mask_head.forward_dummy(feat[0])
return (bbox_outs, prototypes)
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# convert Bitmap mask or Polygon Mask to Tensor here
gt_masks = [
gt_mask.to_tensor(dtype=torch.uint8, device=img.device)
for gt_mask in gt_masks
]
x = self.extract_feat(img)
cls_score, bbox_pred, coeff_pred = self.bbox_head(x)
bbox_head_loss_inputs = (cls_score, bbox_pred) + (gt_bboxes, gt_labels,
img_metas)
losses, sampling_results = self.bbox_head.loss(
*bbox_head_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
segm_head_outs = self.segm_head(x[0])
loss_segm = self.segm_head.loss(segm_head_outs, gt_masks, gt_labels)
losses.update(loss_segm)
mask_pred = self.mask_head(x[0], coeff_pred, gt_bboxes, img_metas,
sampling_results)
loss_mask = self.mask_head.loss(mask_pred, gt_masks, gt_bboxes,
img_metas, sampling_results)
losses.update(loss_mask)
# check NaN and Inf
for loss_name in losses.keys():
assert torch.isfinite(torch.stack(losses[loss_name]))\
.all().item(), '{} becomes infinite or NaN!'\
.format(loss_name)
return losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test-time augmentation."""
feat = self.extract_feat(img)
det_bboxes, det_labels, det_coeffs = self.bbox_head.simple_test(
feat, img_metas, rescale=rescale)
bbox_results = [
bbox2result(det_bbox, det_label, self.bbox_head.num_classes)
for det_bbox, det_label in zip(det_bboxes, det_labels)
]
segm_results = self.mask_head.simple_test(
feat,
det_bboxes,
det_labels,
det_coeffs,
img_metas,
rescale=rescale)
return list(zip(bbox_results, segm_results))
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations."""
raise NotImplementedError(
'YOLACT does not support test-time augmentation')
| 4,728 | 38.082645 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/detectors/rpn.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import mmcv
import torch
from mmcv.image import tensor2imgs
from mmdet.core import bbox_mapping
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
@DETECTORS.register_module()
class RPN(BaseDetector):
"""Implementation of Region Proposal Network."""
def __init__(self,
backbone,
neck,
rpn_head,
train_cfg,
test_cfg,
pretrained=None,
init_cfg=None):
super(RPN, self).__init__(init_cfg)
if pretrained:
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
backbone.pretrained = pretrained
self.backbone = build_backbone(backbone)
self.neck = build_neck(neck) if neck is not None else None
rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
rpn_head.update(train_cfg=rpn_train_cfg)
rpn_head.update(test_cfg=test_cfg.rpn)
self.rpn_head = build_head(rpn_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def extract_feat(self, img):
"""Extract features.
Args:
img (torch.Tensor): Image tensor with shape (n, c, h ,w).
Returns:
list[torch.Tensor]: Multi-level features that may have
different resolutions.
"""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
"""Dummy forward function."""
x = self.extract_feat(img)
rpn_outs = self.rpn_head(x)
return rpn_outs
def forward_train(self,
img,
img_metas,
gt_bboxes=None,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
if (isinstance(self.train_cfg.rpn, dict)
and self.train_cfg.rpn.get('debug', False)):
self.rpn_head.debug_imgs = tensor2imgs(img)
x = self.extract_feat(img)
losses = self.rpn_head.forward_train(x, img_metas, gt_bboxes, None,
gt_bboxes_ignore)
return losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test time augmentation.
Args:
imgs (list[torch.Tensor]): List of multiple images
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[np.ndarray]: proposals
"""
x = self.extract_feat(img)
# get origin input shape to onnx dynamic input shape
if torch.onnx.is_in_onnx_export():
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
if rescale:
for proposals, meta in zip(proposal_list, img_metas):
proposals[:, :4] /= proposals.new_tensor(meta['scale_factor'])
if torch.onnx.is_in_onnx_export():
return proposal_list
return [proposal.cpu().numpy() for proposal in proposal_list]
def aug_test(self, imgs, img_metas, rescale=False):
"""Test function with test time augmentation.
Args:
imgs (list[torch.Tensor]): List of multiple images
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[np.ndarray]: proposals
"""
proposal_list = self.rpn_head.aug_test_rpn(
self.extract_feats(imgs), img_metas)
if not rescale:
for proposals, img_meta in zip(proposal_list, img_metas[0]):
img_shape = img_meta['img_shape']
scale_factor = img_meta['scale_factor']
flip = img_meta['flip']
flip_direction = img_meta['flip_direction']
proposals[:, :4] = bbox_mapping(proposals[:, :4], img_shape,
scale_factor, flip,
flip_direction)
return [proposal.cpu().numpy() for proposal in proposal_list]
def show_result(self, data, result, top_k=20, **kwargs):
"""Show RPN proposals on the image.
Args:
data (str or np.ndarray): Image filename or loaded image.
result (Tensor or tuple): The results to draw over `img`
bbox_result or (bbox_result, segm_result).
top_k (int): Plot the first k bboxes only
if set positive. Default: 20
Returns:
np.ndarray: The image with bboxes drawn on it.
"""
if kwargs is not None:
kwargs.pop('score_thr', None)
kwargs.pop('text_color', None)
kwargs['colors'] = kwargs.pop('bbox_color', 'green')
mmcv.imshow_bboxes(data, result, top_k=top_k, **kwargs)
| 6,136 | 37.35625 | 78 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/detectors/panoptic_two_stage_segmentor.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import bbox2roi, multiclass_nms
from ..builder import DETECTORS, build_head
from ..roi_heads.mask_heads.fcn_mask_head import _do_paste_mask
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class TwoStagePanopticSegmentor(TwoStageDetector):
"""Base class of Two-stage Panoptic Segmentor.
As well as the components in TwoStageDetector, Panoptic Segmentor has extra
semantic_head and panoptic_fusion_head.
"""
def __init__(
self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None,
# for panoptic segmentation
semantic_head=None,
panoptic_fusion_head=None):
super(TwoStagePanopticSegmentor,
self).__init__(backbone, neck, rpn_head, roi_head, train_cfg,
test_cfg, pretrained, init_cfg)
if semantic_head is not None:
self.semantic_head = build_head(semantic_head)
if panoptic_fusion_head is not None:
panoptic_cfg = test_cfg.panoptic if test_cfg is not None else None
panoptic_fusion_head_ = panoptic_fusion_head.deepcopy()
panoptic_fusion_head_.update(test_cfg=panoptic_cfg)
self.panoptic_fusion_head = build_head(panoptic_fusion_head_)
self.num_things_classes = self.panoptic_fusion_head.\
num_things_classes
self.num_stuff_classes = self.panoptic_fusion_head.\
num_stuff_classes
self.num_classes = self.panoptic_fusion_head.num_classes
@property
def with_semantic_head(self):
return hasattr(self,
'semantic_head') and self.semantic_head is not None
@property
def with_panoptic_fusion_head(self):
return hasattr(self, 'panoptic_fusion_heads') and \
self.panoptic_fusion_head is not None
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/get_flops.py`
"""
raise NotImplementedError(
f'`forward_dummy` is not implemented in {self.__class__.__name__}')
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
gt_semantic_seg=None,
proposals=None,
**kwargs):
x = self.extract_feat(img)
losses = dict()
# RPN forward and loss
if self.with_rpn:
proposal_cfg = self.train_cfg.get('rpn_proposal',
self.test_cfg.rpn)
rpn_losses, proposal_list = self.rpn_head.forward_train(
x,
img_metas,
gt_bboxes,
gt_labels=None,
gt_bboxes_ignore=gt_bboxes_ignore,
proposal_cfg=proposal_cfg)
losses.update(rpn_losses)
else:
proposal_list = proposals
roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list,
gt_bboxes, gt_labels,
gt_bboxes_ignore, gt_masks,
**kwargs)
losses.update(roi_losses)
semantic_loss = self.semantic_head.forward_train(x, gt_semantic_seg)
losses.update(semantic_loss)
return losses
def simple_test_mask(self,
x,
img_metas,
det_bboxes,
det_labels,
rescale=False):
"""Simple test for mask head without augmentation."""
img_shapes = tuple(meta['ori_shape']
for meta in img_metas) if rescale else tuple(
meta['pad_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
masks = []
for img_shape in img_shapes:
out_shape = (0, self.roi_head.bbox_head.num_classes) \
+ img_shape[:2]
masks.append(det_bboxes[0].new_zeros(out_shape))
mask_pred = det_bboxes[0].new_zeros((0, 80, 28, 28))
mask_results = dict(
masks=masks, mask_pred=mask_pred, mask_feats=None)
return mask_results
_bboxes = [det_bboxes[i][:, :4] for i in range(len(det_bboxes))]
if rescale:
if not isinstance(scale_factors[0], float):
scale_factors = [
det_bboxes[0].new_tensor(scale_factor)
for scale_factor in scale_factors
]
_bboxes = [
_bboxes[i] * scale_factors[i] for i in range(len(_bboxes))
]
mask_rois = bbox2roi(_bboxes)
mask_results = self.roi_head._mask_forward(x, mask_rois)
mask_pred = mask_results['mask_pred']
# split batch mask prediction back to each image
num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes]
mask_preds = mask_pred.split(num_mask_roi_per_img, 0)
# resize the mask_preds to (K, H, W)
masks = []
for i in range(len(_bboxes)):
det_bbox = det_bboxes[i][:, :4]
det_label = det_labels[i]
mask_pred = mask_preds[i].sigmoid()
box_inds = torch.arange(mask_pred.shape[0])
mask_pred = mask_pred[box_inds, det_label][:, None]
img_h, img_w, _ = img_shapes[i]
mask_pred, _ = _do_paste_mask(
mask_pred, det_bbox, img_h, img_w, skip_empty=False)
masks.append(mask_pred)
mask_results['masks'] = masks
return mask_results
def simple_test(self, img, img_metas, proposals=None, rescale=False):
"""Test without Augmentation."""
x = self.extract_feat(img)
if proposals is None:
proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
else:
proposal_list = proposals
bboxes, scores = self.roi_head.simple_test_bboxes(
x, img_metas, proposal_list, None, rescale=rescale)
pan_cfg = self.test_cfg.panoptic
# class-wise predictions
det_bboxes = []
det_labels = []
for bboxe, score in zip(bboxes, scores):
det_bbox, det_label = multiclass_nms(bboxe, score,
pan_cfg.score_thr,
pan_cfg.nms,
pan_cfg.max_per_img)
det_bboxes.append(det_bbox)
det_labels.append(det_label)
mask_results = self.simple_test_mask(
x, img_metas, det_bboxes, det_labels, rescale=rescale)
masks = mask_results['masks']
seg_preds = self.semantic_head.simple_test(x, img_metas, rescale)
results = []
for i in range(len(det_bboxes)):
pan_results = self.panoptic_fusion_head.simple_test(
det_bboxes[i], det_labels[i], masks[i], seg_preds[i])
pan_results = pan_results.int().detach().cpu().numpy()
result = dict(pan_results=pan_results)
results.append(result)
return results
| 7,725 | 36.872549 | 79 | py |
DSLA-DSLA | DSLA-DSLA/mmdet/models/detectors/centernet.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import bbox2result
from mmdet.models.builder import DETECTORS
from ...core.utils import flip_tensor
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class CenterNet(SingleStageDetector):
"""Implementation of CenterNet(Objects as Points)
<https://arxiv.org/abs/1904.07850>.
"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(CenterNet, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
def merge_aug_results(self, aug_results, with_nms):
"""Merge augmented detection bboxes and score.
Args:
aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each
image.
with_nms (bool): If True, do nms before return boxes.
Returns:
tuple: (out_bboxes, out_labels)
"""
recovered_bboxes, aug_labels = [], []
for single_result in aug_results:
recovered_bboxes.append(single_result[0][0])
aug_labels.append(single_result[0][1])
bboxes = torch.cat(recovered_bboxes, dim=0).contiguous()
labels = torch.cat(aug_labels).contiguous()
if with_nms:
out_bboxes, out_labels = self.bbox_head._bboxes_nms(
bboxes, labels, self.bbox_head.test_cfg)
else:
out_bboxes, out_labels = bboxes, labels
return out_bboxes, out_labels
def aug_test(self, imgs, img_metas, rescale=True):
"""Augment testing of CenterNet. Aug test must have flipped image pair,
and unlike CornerNet, it will perform an averaging operation on the
feature map instead of detecting bbox.
Args:
imgs (list[Tensor]): Augmented images.
img_metas (list[list[dict]]): Meta information of each image, e.g.,
image size, scaling factor, etc.
rescale (bool): If True, return boxes in original image space.
Default: True.
Note:
``imgs`` must including flipped image pairs.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
img_inds = list(range(len(imgs)))
assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], (
'aug test must have flipped image pair')
aug_results = []
for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]):
flip_direction = img_metas[flip_ind][0]['flip_direction']
img_pair = torch.cat([imgs[ind], imgs[flip_ind]])
x = self.extract_feat(img_pair)
center_heatmap_preds, wh_preds, offset_preds = self.bbox_head(x)
assert len(center_heatmap_preds) == len(wh_preds) == len(
offset_preds) == 1
# Feature map averaging
center_heatmap_preds[0] = (
center_heatmap_preds[0][0:1] +
flip_tensor(center_heatmap_preds[0][1:2], flip_direction)) / 2
wh_preds[0] = (wh_preds[0][0:1] +
flip_tensor(wh_preds[0][1:2], flip_direction)) / 2
bbox_list = self.bbox_head.get_bboxes(
center_heatmap_preds,
wh_preds, [offset_preds[0][0:1]],
img_metas[ind],
rescale=rescale,
with_nms=False)
aug_results.append(bbox_list)
nms_cfg = self.bbox_head.test_cfg.get('nms_cfg', None)
if nms_cfg is None:
with_nms = False
else:
with_nms = True
bbox_list = [self.merge_aug_results(aug_results, with_nms)]
bbox_results = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in bbox_list
]
return bbox_results
| 4,206 | 36.5625 | 79 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.