repo
stringlengths
1
99
file
stringlengths
13
215
code
stringlengths
12
59.2M
file_length
int64
12
59.2M
avg_line_length
float64
3.82
1.48M
max_line_length
int64
12
2.51M
extension_type
stringclasses
1 value
GFocalV2
GFocalV2-master/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py
_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py' model = dict( pretrained='open-mmlab://resnext101_64x4d', backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch'))
390
26.928571
63
py
GFocalV2
GFocalV2-master/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py
_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py' model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
143
47
76
py
GFocalV2
GFocalV2-master/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py
_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py' model = dict( pretrained='open-mmlab://resnext101_64x4d', backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch'))
392
27.071429
65
py
GFocalV2
GFocalV2-master/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py
_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py' model = dict( pretrained='open-mmlab://resnext101_32x4d', backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch'))
392
27.071429
65
py
GFocalV2
GFocalV2-master/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py
_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py' model = dict( pretrained='open-mmlab://resnext101_32x4d', backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch'))
390
26.928571
63
py
GFocalV2
GFocalV2-master/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1.py
_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py' model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
141
46.333333
76
py
GFocalV2
GFocalV2-master/configs/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py
_base_ = './mask_rcnn_r50_fpn_instaboost_4x_coco.py' model = dict( pretrained='open-mmlab://resnext101_64x4d', backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch'))
379
26.142857
53
py
GFocalV2
GFocalV2-master/configs/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco.py
_base_ = './mask_rcnn_r50_fpn_instaboost_4x_coco.py' model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
130
42.666667
76
py
GFocalV2
GFocalV2-master/configs/instaboost/cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py
_base_ = './cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py' model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
139
34
76
py
GFocalV2
GFocalV2-master/configs/instaboost/cascade_mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py
_base_ = './cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py' model = dict( pretrained='open-mmlab://resnext101_64x4d', backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch'))
387
26.714286
60
py
GFocalV2
GFocalV2-master/configs/atss/atss_r101_fpn_1x_coco.py
_base_ = './atss_r50_fpn_1x_coco.py' model = dict( pretrained='torchvision://resnet101', backbone=dict(depth=101), )
125
20
41
py
GFocalV2
GFocalV2-master/configs/atss/atss_r50_fpn_1x_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='ATSS', pretrained='torchvision://resnet50', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch'), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), bbox_head=dict( type='ATSSHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))) # training and testing settings train_cfg = dict( assigner=dict(type='ATSSAssigner', topk=9), allowed_border=-1, pos_weight=-1, debug=False) test_cfg = dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
1,841
28.238095
73
py
GFocalV2
GFocalV2-master/configs/gn+ws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py
_base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' conv_cfg = dict(type='ConvWS') norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( pretrained='open-mmlab://jhu/resnext50_32x4d_gn_ws', backbone=dict( type='ResNeXt', depth=50, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, style='pytorch', conv_cfg=conv_cfg, norm_cfg=norm_cfg))
481
27.352941
61
py
GFocalV2
GFocalV2-master/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py
_base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' # model settings conv_cfg = dict(type='ConvWS') norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( pretrained='open-mmlab://jhu/resnext101_32x4d_gn_ws', backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, style='pytorch', conv_cfg=conv_cfg, norm_cfg=norm_cfg))
498
26.722222
61
py
GFocalV2
GFocalV2-master/configs/gn+ws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py
_base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' conv_cfg = dict(type='ConvWS') norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( pretrained='open-mmlab://jhu/resnext101_32x4d_gn_ws', backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, style='pytorch', conv_cfg=conv_cfg, norm_cfg=norm_cfg))
483
27.470588
61
py
GFocalV2
GFocalV2-master/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py
_base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' # model settings conv_cfg = dict(type='ConvWS') norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( pretrained='open-mmlab://jhu/resnext50_32x4d_gn_ws', backbone=dict( type='ResNeXt', depth=50, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, style='pytorch', conv_cfg=conv_cfg, norm_cfg=norm_cfg))
496
26.611111
61
py
GFocalV2
GFocalV2-master/configs/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco.py
_base_ = './ga_faster_r50_fpn_1x_coco.py' model = dict( pretrained='open-mmlab://resnext101_64x4d', backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch'))
368
25.357143
53
py
GFocalV2
GFocalV2-master/configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco.py
_base_ = './ga_faster_r50_fpn_1x_coco.py' model = dict( pretrained='open-mmlab://resnext101_32x4d', backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch'))
368
25.357143
53
py
GFocalV2
GFocalV2-master/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py
_base_ = './ga_retinanet_r50_fpn_1x_coco.py' model = dict( pretrained='open-mmlab://resnext101_32x4d', backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch'))
371
25.571429
53
py
GFocalV2
GFocalV2-master/configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco.py
_base_ = './ga_rpn_r50_fpn_1x_coco.py' model = dict( pretrained='open-mmlab://resnext101_32x4d', backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch'))
365
25.142857
53
py
GFocalV2
GFocalV2-master/configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py
_base_ = './ga_rpn_r50_caffe_fpn_1x_coco.py' # model settings model = dict( pretrained='open-mmlab://detectron2/resnet101_caffe', backbone=dict(depth=101))
164
26.5
57
py
GFocalV2
GFocalV2-master/configs/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco.py
_base_ = './ga_retinanet_r50_fpn_1x_coco.py' model = dict( pretrained='open-mmlab://resnext101_64x4d', backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch'))
371
25.571429
53
py
GFocalV2
GFocalV2-master/configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py
_base_ = './ga_faster_r50_caffe_fpn_1x_coco.py' model = dict( pretrained='open-mmlab://detectron2/resnet101_caffe', backbone=dict(depth=101))
150
29.2
57
py
GFocalV2
GFocalV2-master/configs/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco.py
_base_ = './ga_rpn_r50_fpn_1x_coco.py' model = dict( pretrained='open-mmlab://resnext101_64x4d', backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch'))
365
25.142857
53
py
GFocalV2
GFocalV2-master/configs/guided_anchoring/ga_fast_r50_caffe_fpn_1x_coco.py
_base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py' model = dict( pretrained='open-mmlab://detectron2/resnet50_caffe', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='caffe'), roi_head=dict( bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1])))) # model training and testing settings train_cfg = dict( rcnn=dict( assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6), sampler=dict(num=256))) test_cfg = dict(rcnn=dict(score_thr=1e-3)) dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadProposals', num_max_proposals=300), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadProposals', num_max_proposals=None), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img', 'proposals']), ]) ] data = dict( train=dict( proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_train2017.pkl', pipeline=train_pipeline), val=dict( proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl', pipeline=test_pipeline), test=dict( proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl', pipeline=test_pipeline)) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
2,322
35.296875
78
py
GFocalV2
GFocalV2-master/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py' model = dict( rpn_head=dict( _delete_=True, type='GARPNHead', in_channels=256, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=8, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[8], strides=[4, 8, 16, 32, 64]), anchor_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.07, 0.07, 0.14, 0.14]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.07, 0.07, 0.11, 0.11]), loc_filter_thr=0.01, loss_loc=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), roi_head=dict( bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1])))) # model training and testing settings train_cfg = dict( rpn=dict( ga_assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), ga_sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, center_ratio=0.2, ignore_ratio=0.5), rpn_proposal=dict(max_num=300), rcnn=dict( assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6), sampler=dict(type='RandomSampler', num=256))) test_cfg = dict(rpn=dict(max_num=300), rcnn=dict(score_thr=1e-3)) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
2,271
33.953846
77
py
GFocalV2
GFocalV2-master/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco.py
_base_ = './ga_retinanet_r50_caffe_fpn_1x_coco.py' model = dict( pretrained='open-mmlab://detectron2/resnet101_caffe', backbone=dict(depth=101))
153
29.8
57
py
GFocalV2
GFocalV2-master/configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py
_base_ = '../retinanet/retinanet_r50_caffe_fpn_1x_coco.py' model = dict( bbox_head=dict( _delete_=True, type='GARetinaHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), anchor_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loc_filter_thr=0.01, loss_loc=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0))) # training and testing settings train_cfg = dict( ga_assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0.4, ignore_iof_thr=-1), ga_sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), assigner=dict(neg_iou_thr=0.5, min_pos_iou=0.0), center_ratio=0.2, ignore_ratio=0.5) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
1,988
30.571429
74
py
GFocalV2
GFocalV2-master/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_mstrain_2x.py
# model settings model = dict( type='RetinaNet', pretrained='open-mmlab://detectron2/resnet101_caffe', backbone=dict( type='ResNet', depth=101, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='caffe'), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs=True, num_outs=5), bbox_head=dict( type='GARetinaHead', num_classes=81, in_channels=256, stacked_convs=4, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[4], strides=[8, 16, 32, 64, 128]), anchor_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loc_filter_thr=0.01, loss_loc=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0))) # training and testing settings train_cfg = dict( ga_assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0.4, ignore_iof_thr=-1), ga_sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, center_ratio=0.2, ignore_ratio=0.5, debug=False) test_cfg = dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100) # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 480), (1333, 960)], keep_ratio=True, multiscale_mode='range'), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric='bbox') # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[16, 22]) checkpoint_config = dict(interval=1) # yapf:disable log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') ]) # yapf:enable # runtime settings total_epochs = 24 dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/ga_retinanet_r101_caffe_fpn_mstrain_2x' load_from = None resume_from = None workflow = [('train', 1)]
5,155
28.803468
74
py
GFocalV2
GFocalV2-master/configs/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco.py
_base_ = '../rpn/rpn_r50_caffe_fpn_1x_coco.py' model = dict( rpn_head=dict( _delete_=True, type='GARPNHead', in_channels=256, feat_channels=256, approx_anchor_generator=dict( type='AnchorGenerator', octave_base_scale=8, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), square_anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[8], strides=[4, 8, 16, 32, 64]), anchor_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.07, 0.07, 0.14, 0.14]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.07, 0.07, 0.11, 0.11]), loc_filter_thr=0.01, loss_loc=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))) # model training and testing settings train_cfg = dict( rpn=dict( ga_assigner=dict( type='ApproxMaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), ga_sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=-1, center_ratio=0.2, ignore_ratio=0.5)) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
1,913
32
74
py
GFocalV2
GFocalV2-master/mmdet/apis/inference.py
import warnings import matplotlib.pyplot as plt import mmcv import numpy as np import torch from mmcv.ops import RoIPool from mmcv.parallel import collate, scatter from mmcv.runner import load_checkpoint from mmdet.core import get_classes from mmdet.datasets.pipelines import Compose from mmdet.models import build_detector def init_detector(config, checkpoint=None, device='cuda:0'): """Initialize a detector from config file. Args: config (str or :obj:`mmcv.Config`): Config file path or the config object. checkpoint (str, optional): Checkpoint path. If left as None, the model will not load any weights. Returns: nn.Module: The constructed detector. """ if isinstance(config, str): config = mmcv.Config.fromfile(config) elif not isinstance(config, mmcv.Config): raise TypeError('config must be a filename or Config object, ' f'but got {type(config)}') config.model.pretrained = None model = build_detector(config.model, test_cfg=config.test_cfg) if checkpoint is not None: map_loc = 'cpu' if device == 'cpu' else None checkpoint = load_checkpoint(model, checkpoint, map_location=map_loc) if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: warnings.simplefilter('once') warnings.warn('Class names are not saved in the checkpoint\'s ' 'meta data, use COCO classes by default.') model.CLASSES = get_classes('coco') model.cfg = config # save the config in the model for convenience model.to(device) model.eval() return model class LoadImage(object): """A simple pipeline to load image.""" def __call__(self, results): """Call function to load images into results. Args: results (dict): A result dict contains the file name of the image to be read. Returns: dict: ``results`` will be returned containing loaded image. """ if isinstance(results['img'], str): results['filename'] = results['img'] results['ori_filename'] = results['img'] else: results['filename'] = None results['ori_filename'] = None img = mmcv.imread(results['img']) results['img'] = img results['img_fields'] = ['img'] results['img_shape'] = img.shape results['ori_shape'] = img.shape return results def inference_detector(model, img): """Inference image(s) with the detector. Args: model (nn.Module): The loaded detector. imgs (str/ndarray or list[str/ndarray]): Either image files or loaded images. Returns: If imgs is a str, a generator will be returned, otherwise return the detection results directly. """ cfg = model.cfg device = next(model.parameters()).device # model device # prepare data if isinstance(img, np.ndarray): # directly add img data = dict(img=img) cfg = cfg.copy() # set loading pipeline type cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' else: # add information into dict data = dict(img_info=dict(filename=img), img_prefix=None) # build the data pipeline test_pipeline = Compose(cfg.data.test.pipeline) data = test_pipeline(data) data = collate([data], samples_per_gpu=1) if next(model.parameters()).is_cuda: # scatter to specified GPU data = scatter(data, [device])[0] else: for m in model.modules(): assert not isinstance( m, RoIPool ), 'CPU inference with RoIPool is not supported currently.' # just get the actual data from DataContainer data['img_metas'] = data['img_metas'][0].data # forward the model with torch.no_grad(): result = model(return_loss=False, rescale=True, **data)[0] return result async def async_inference_detector(model, img): """Async inference image(s) with the detector. Args: model (nn.Module): The loaded detector. img (str | ndarray): Either image files or loaded images. Returns: Awaitable detection results. """ cfg = model.cfg device = next(model.parameters()).device # model device # prepare data if isinstance(img, np.ndarray): # directly add img data = dict(img=img) cfg = cfg.copy() # set loading pipeline type cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' else: # add information into dict data = dict(img_info=dict(filename=img), img_prefix=None) # build the data pipeline test_pipeline = Compose(cfg.data.test.pipeline) data = test_pipeline(data) data = scatter(collate([data], samples_per_gpu=1), [device])[0] # We don't restore `torch.is_grad_enabled()` value during concurrent # inference since execution can overlap torch.set_grad_enabled(False) result = await model.aforward_test(rescale=True, **data) return result def show_result_pyplot(model, img, result, score_thr=0.3, fig_size=(15, 10)): """Visualize the detection results on the image. Args: model (nn.Module): The loaded detector. img (str or np.ndarray): Image filename or loaded image. result (tuple[list] or list): The detection result, can be either (bbox, segm) or just bbox. score_thr (float): The threshold to visualize the bboxes and masks. fig_size (tuple): Figure size of the pyplot figure. """ if hasattr(model, 'module'): model = model.module img = model.show_result(img, result, score_thr=score_thr, show=False) plt.figure(figsize=fig_size) plt.imshow(mmcv.bgr2rgb(img)) plt.show()
5,913
32.988506
79
py
GFocalV2
GFocalV2-master/mmdet/apis/test.py
import os.path as osp import pickle import shutil import tempfile import time import mmcv import torch import torch.distributed as dist from mmcv.image import tensor2imgs from mmcv.runner import get_dist_info from mmdet.core import encode_mask_results def single_gpu_test(model, data_loader, show=False, out_dir=None, show_score_thr=0.3): model.eval() results = [] dataset = data_loader.dataset prog_bar = mmcv.ProgressBar(len(dataset)) for i, data in enumerate(data_loader): with torch.no_grad(): result = model(return_loss=False, rescale=True, **data) batch_size = len(result) if show or out_dir: if batch_size == 1 and isinstance(data['img'][0], torch.Tensor): img_tensor = data['img'][0] else: img_tensor = data['img'][0].data[0] img_metas = data['img_metas'][0].data[0] imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg']) assert len(imgs) == len(img_metas) for i, (img, img_meta) in enumerate(zip(imgs, img_metas)): h, w, _ = img_meta['img_shape'] img_show = img[:h, :w, :] ori_h, ori_w = img_meta['ori_shape'][:-1] img_show = mmcv.imresize(img_show, (ori_w, ori_h)) if out_dir: out_file = osp.join(out_dir, img_meta['ori_filename']) else: out_file = None model.module.show_result( img_show, result[i], show=show, out_file=out_file, score_thr=show_score_thr) # encode mask results if isinstance(result[0], tuple): result = [(bbox_results, encode_mask_results(mask_results)) for bbox_results, mask_results in result] results.extend(result) for _ in range(batch_size): prog_bar.update() return results def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): """Test model with multiple gpus. This method tests model with multiple gpus and collects the results under two different modes: gpu and cpu modes. By setting 'gpu_collect=True' it encodes results to gpu tensors and use gpu communication for results collection. On cpu mode it saves the results on different gpus to 'tmpdir' and collects them by the rank 0 worker. Args: model (nn.Module): Model to be tested. data_loader (nn.Dataloader): Pytorch data loader. tmpdir (str): Path of directory to save the temporary results from different gpus under cpu mode. gpu_collect (bool): Option to use either gpu or cpu to collect results. Returns: list: The prediction results. """ model.eval() results = [] dataset = data_loader.dataset rank, world_size = get_dist_info() if rank == 0: prog_bar = mmcv.ProgressBar(len(dataset)) time.sleep(2) # This line can prevent deadlock problem in some cases. for i, data in enumerate(data_loader): with torch.no_grad(): result = model(return_loss=False, rescale=True, **data) # encode mask results if isinstance(result[0], tuple): result = [(bbox_results, encode_mask_results(mask_results)) for bbox_results, mask_results in result] results.extend(result) if rank == 0: batch_size = len(result) for _ in range(batch_size * world_size): prog_bar.update() # collect results from all ranks if gpu_collect: results = collect_results_gpu(results, len(dataset)) else: results = collect_results_cpu(results, len(dataset), tmpdir) return results def collect_results_cpu(result_part, size, tmpdir=None): rank, world_size = get_dist_info() # create a tmp dir if it is not specified if tmpdir is None: MAX_LEN = 512 # 32 is whitespace dir_tensor = torch.full((MAX_LEN, ), 32, dtype=torch.uint8, device='cuda') if rank == 0: mmcv.mkdir_or_exist('.dist_test') tmpdir = tempfile.mkdtemp(dir='.dist_test') tmpdir = torch.tensor( bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') dir_tensor[:len(tmpdir)] = tmpdir dist.broadcast(dir_tensor, 0) tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() else: mmcv.mkdir_or_exist(tmpdir) # dump the part result to the dir mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl')) dist.barrier() # collect all parts if rank != 0: return None else: # load results of all parts from tmp dir part_list = [] for i in range(world_size): part_file = osp.join(tmpdir, f'part_{i}.pkl') part_list.append(mmcv.load(part_file)) # sort the results ordered_results = [] for res in zip(*part_list): ordered_results.extend(list(res)) # the dataloader may pad some samples ordered_results = ordered_results[:size] # remove tmp dir shutil.rmtree(tmpdir) return ordered_results def collect_results_gpu(result_part, size): rank, world_size = get_dist_info() # dump result part to tensor with pickle part_tensor = torch.tensor( bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda') # gather all result part tensor shape shape_tensor = torch.tensor(part_tensor.shape, device='cuda') shape_list = [shape_tensor.clone() for _ in range(world_size)] dist.all_gather(shape_list, shape_tensor) # padding result part tensor to max length shape_max = torch.tensor(shape_list).max() part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda') part_send[:shape_tensor[0]] = part_tensor part_recv_list = [ part_tensor.new_zeros(shape_max) for _ in range(world_size) ] # gather all result part dist.all_gather(part_recv_list, part_send) if rank == 0: part_list = [] for recv, shape in zip(part_recv_list, shape_list): part_list.append( pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())) # sort the results ordered_results = [] for res in zip(*part_list): ordered_results.extend(list(res)) # the dataloader may pad some samples ordered_results = ordered_results[:size] return ordered_results
6,826
34.743455
79
py
GFocalV2
GFocalV2-master/mmdet/apis/train.py
import random import numpy as np import torch from mmcv.parallel import MMDataParallel, MMDistributedDataParallel from mmcv.runner import (HOOKS, DistSamplerSeedHook, EpochBasedRunner, Fp16OptimizerHook, OptimizerHook, build_optimizer) from mmcv.utils import build_from_cfg from mmdet.core import DistEvalHook, EvalHook from mmdet.datasets import (build_dataloader, build_dataset, replace_ImageToTensor) from mmdet.utils import get_root_logger def set_random_seed(seed, deterministic=False): """Set random seed. Args: seed (int): Seed to be used. deterministic (bool): Whether to set the deterministic option for CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` to True and `torch.backends.cudnn.benchmark` to False. Default: False. """ random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) if deterministic: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False def train_detector(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None): logger = get_root_logger(cfg.log_level) # prepare data loaders dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] if 'imgs_per_gpu' in cfg.data: logger.warning('"imgs_per_gpu" is deprecated in MMDet V2.0. ' 'Please use "samples_per_gpu" instead') if 'samples_per_gpu' in cfg.data: logger.warning( f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and ' f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"' f'={cfg.data.imgs_per_gpu} is used in this experiments') else: logger.warning( 'Automatically set "samples_per_gpu"="imgs_per_gpu"=' f'{cfg.data.imgs_per_gpu} in this experiments') cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu data_loaders = [ build_dataloader( ds, cfg.data.samples_per_gpu, cfg.data.workers_per_gpu, # cfg.gpus will be ignored if distributed len(cfg.gpu_ids), dist=distributed, seed=cfg.seed) for ds in dataset ] # put model on gpus if distributed: find_unused_parameters = cfg.get('find_unused_parameters', False) # Sets the `find_unused_parameters` parameter in # torch.nn.parallel.DistributedDataParallel model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters) else: model = MMDataParallel( model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids) # build runner optimizer = build_optimizer(model, cfg.optimizer) runner = EpochBasedRunner( model, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta) # an ugly workaround to make .log and .log.json filenames the same runner.timestamp = timestamp # fp16 setting fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: optimizer_config = Fp16OptimizerHook( **cfg.optimizer_config, **fp16_cfg, distributed=distributed) elif distributed and 'type' not in cfg.optimizer_config: optimizer_config = OptimizerHook(**cfg.optimizer_config) else: optimizer_config = cfg.optimizer_config # register hooks runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config, cfg.get('momentum_config', None)) if distributed: runner.register_hook(DistSamplerSeedHook()) # register eval hooks if validate: # Support batch_size > 1 in validation val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1) if val_samples_per_gpu > 1: # Replace 'ImageToTensor' to 'DefaultFormatBundle' cfg.data.val.pipeline = replace_ImageToTensor( cfg.data.val.pipeline) val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) val_dataloader = build_dataloader( val_dataset, samples_per_gpu=val_samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) eval_cfg = cfg.get('evaluation', {}) eval_hook = DistEvalHook if distributed else EvalHook runner.register_hook(eval_hook(val_dataloader, **eval_cfg)) # user-defined hooks if cfg.get('custom_hooks', None): custom_hooks = cfg.custom_hooks assert isinstance(custom_hooks, list), \ f'custom_hooks expect list type, but got {type(custom_hooks)}' for hook_cfg in cfg.custom_hooks: assert isinstance(hook_cfg, dict), \ 'Each item in custom_hooks expects dict type, but got ' \ f'{type(hook_cfg)}' hook_cfg = hook_cfg.copy() priority = hook_cfg.pop('priority', 'NORMAL') hook = build_from_cfg(hook_cfg, HOOKS) runner.register_hook(hook, priority=priority) if cfg.resume_from: runner.resume(cfg.resume_from) elif cfg.load_from: runner.load_checkpoint(cfg.load_from) runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
5,700
36.754967
79
py
GFocalV2
GFocalV2-master/mmdet/core/evaluation/eval_hooks.py
import os.path as osp import warnings from mmcv.runner import Hook from torch.utils.data import DataLoader class EvalHook(Hook): """Evaluation hook. Notes: If new arguments are added for EvalHook, tools/test.py may be effected. Attributes: dataloader (DataLoader): A PyTorch dataloader. start (int, optional): Evaluation starting epoch. It enables evaluation before the training starts if ``start`` <= the resuming epoch. If None, whether to evaluate is merely decided by ``interval``. Default: None. interval (int): Evaluation interval (by epochs). Default: 1. **eval_kwargs: Evaluation arguments fed into the evaluate function of the dataset. """ def __init__(self, dataloader, start=None, interval=1, **eval_kwargs): if not isinstance(dataloader, DataLoader): raise TypeError('dataloader must be a pytorch DataLoader, but got' f' {type(dataloader)}') if not interval > 0: raise ValueError(f'interval must be positive, but got {interval}') if start is not None and start < 0: warnings.warn( f'The evaluation start epoch {start} is smaller than 0, ' f'use 0 instead', UserWarning) start = 0 self.dataloader = dataloader self.interval = interval self.start = start self.eval_kwargs = eval_kwargs self.initial_epoch_flag = True def before_train_epoch(self, runner): """Evaluate the model only at the start of training.""" if not self.initial_epoch_flag: return if self.start is not None and runner.epoch >= self.start: self.after_train_epoch(runner) self.initial_epoch_flag = False def evaluation_flag(self, runner): """Judge whether to perform_evaluation after this epoch. Returns: bool: The flag indicating whether to perform evaluation. """ if self.start is None: if not self.every_n_epochs(runner, self.interval): # No evaluation during the interval epochs. return False elif (runner.epoch + 1) < self.start: # No evaluation if start is larger than the current epoch. return False else: # Evaluation only at epochs 3, 5, 7... if start==3 and interval==2 if (runner.epoch + 1 - self.start) % self.interval: return False return True def after_train_epoch(self, runner): if not self.evaluation_flag(runner): return from mmdet.apis import single_gpu_test results = single_gpu_test(runner.model, self.dataloader, show=False) self.evaluate(runner, results) def evaluate(self, runner, results): eval_res = self.dataloader.dataset.evaluate( results, logger=runner.logger, **self.eval_kwargs) for name, val in eval_res.items(): runner.log_buffer.output[name] = val runner.log_buffer.ready = True class DistEvalHook(EvalHook): """Distributed evaluation hook. Notes: If new arguments are added, tools/test.py may be effected. Attributes: dataloader (DataLoader): A PyTorch dataloader. start (int, optional): Evaluation starting epoch. It enables evaluation before the training starts if ``start`` <= the resuming epoch. If None, whether to evaluate is merely decided by ``interval``. Default: None. interval (int): Evaluation interval (by epochs). Default: 1. tmpdir (str | None): Temporary directory to save the results of all processes. Default: None. gpu_collect (bool): Whether to use gpu or cpu to collect results. Default: False. **eval_kwargs: Evaluation arguments fed into the evaluate function of the dataset. """ def __init__(self, dataloader, start=None, interval=1, tmpdir=None, gpu_collect=False, **eval_kwargs): super().__init__( dataloader, start=start, interval=interval, **eval_kwargs) self.tmpdir = tmpdir self.gpu_collect = gpu_collect def after_train_epoch(self, runner): if not self.evaluation_flag(runner): return from mmdet.apis import multi_gpu_test tmpdir = self.tmpdir if tmpdir is None: tmpdir = osp.join(runner.work_dir, '.eval_hook') results = multi_gpu_test( runner.model, self.dataloader, tmpdir=tmpdir, gpu_collect=self.gpu_collect) if runner.rank == 0: print('\n') self.evaluate(runner, results)
4,896
35.819549
79
py
GFocalV2
GFocalV2-master/mmdet/core/post_processing/merge_augs.py
import numpy as np import torch from mmcv.ops import nms from ..bbox import bbox_mapping_back def merge_aug_proposals(aug_proposals, img_metas, rpn_test_cfg): """Merge augmented proposals (multiscale, flip, etc.) Args: aug_proposals (list[Tensor]): proposals from different testing schemes, shape (n, 5). Note that they are not rescaled to the original image size. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. rpn_test_cfg (dict): rpn test config. Returns: Tensor: shape (n, 4), proposals corresponding to original image scale. """ recovered_proposals = [] for proposals, img_info in zip(aug_proposals, img_metas): img_shape = img_info['img_shape'] scale_factor = img_info['scale_factor'] flip = img_info['flip'] flip_direction = img_info['flip_direction'] _proposals = proposals.clone() _proposals[:, :4] = bbox_mapping_back(_proposals[:, :4], img_shape, scale_factor, flip, flip_direction) recovered_proposals.append(_proposals) aug_proposals = torch.cat(recovered_proposals, dim=0) merged_proposals, _ = nms(aug_proposals[:, :4].contiguous(), aug_proposals[:, -1].contiguous(), rpn_test_cfg.nms_thr) scores = merged_proposals[:, 4] _, order = scores.sort(0, descending=True) num = min(rpn_test_cfg.max_num, merged_proposals.shape[0]) order = order[:num] merged_proposals = merged_proposals[order, :] return merged_proposals def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg): """Merge augmented detection bboxes and scores. Args: aug_bboxes (list[Tensor]): shape (n, 4*#class) aug_scores (list[Tensor] or None): shape (n, #class) img_shapes (list[Tensor]): shape (3, ). rcnn_test_cfg (dict): rcnn test config. Returns: tuple: (bboxes, scores) """ recovered_bboxes = [] for bboxes, img_info in zip(aug_bboxes, img_metas): img_shape = img_info[0]['img_shape'] scale_factor = img_info[0]['scale_factor'] flip = img_info[0]['flip'] flip_direction = img_info[0]['flip_direction'] bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip, flip_direction) recovered_bboxes.append(bboxes) bboxes = torch.stack(recovered_bboxes).mean(dim=0) if aug_scores is None: return bboxes else: scores = torch.stack(aug_scores).mean(dim=0) return bboxes, scores def merge_aug_scores(aug_scores): """Merge augmented bbox scores.""" if isinstance(aug_scores[0], torch.Tensor): return torch.mean(torch.stack(aug_scores), dim=0) else: return np.mean(aug_scores, axis=0) def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None): """Merge augmented mask prediction. Args: aug_masks (list[ndarray]): shape (n, #class, h, w) img_shapes (list[ndarray]): shape (3, ). rcnn_test_cfg (dict): rcnn test config. Returns: tuple: (bboxes, scores) """ recovered_masks = [] for mask, img_info in zip(aug_masks, img_metas): flip = img_info[0]['flip'] flip_direction = img_info[0]['flip_direction'] if flip: if flip_direction == 'horizontal': mask = mask[:, :, :, ::-1] elif flip_direction == 'vertical': mask = mask[:, :, ::-1, :] else: raise ValueError( f"Invalid flipping direction '{flip_direction}'") recovered_masks.append(mask) if weights is None: merged_masks = np.mean(recovered_masks, axis=0) else: merged_masks = np.average( np.array(recovered_masks), axis=0, weights=np.array(weights)) return merged_masks
4,286
35.330508
78
py
GFocalV2
GFocalV2-master/mmdet/core/post_processing/bbox_nms.py
import torch from mmcv.ops.nms import batched_nms from mmdet.core.bbox.iou_calculators import bbox_overlaps def multiclass_nms(multi_bboxes, multi_scores, score_thr, nms_cfg, max_num=-1, score_factors=None): """NMS for multi-class bboxes. Args: multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) multi_scores (Tensor): shape (n, #class), where the last column contains scores of the background class, but this will be ignored. score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. nms_thr (float): NMS IoU threshold max_num (int): if there are more than max_num bboxes after NMS, only top max_num will be kept. score_factors (Tensor): The factors multiplied to scores before applying NMS Returns: tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels \ are 0-based. """ num_classes = multi_scores.size(1) - 1 # exclude background category if multi_bboxes.shape[1] > 4: bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4) else: bboxes = multi_bboxes[:, None].expand( multi_scores.size(0), num_classes, 4) scores = multi_scores[:, :-1] # filter out boxes with low scores valid_mask = scores > score_thr # We use masked_select for ONNX exporting purpose, # which is equivalent to bboxes = bboxes[valid_mask] # (TODO): as ONNX does not support repeat now, # we have to use this ugly code bboxes = torch.masked_select( bboxes, torch.stack((valid_mask, valid_mask, valid_mask, valid_mask), -1)).view(-1, 4) if score_factors is not None: scores = scores * score_factors[:, None] scores = torch.masked_select(scores, valid_mask) labels = valid_mask.nonzero(as_tuple=False)[:, 1] if bboxes.numel() == 0: bboxes = multi_bboxes.new_zeros((0, 5)) labels = multi_bboxes.new_zeros((0, ), dtype=torch.long) if torch.onnx.is_in_onnx_export(): raise RuntimeError('[ONNX Error] Can not record NMS ' 'as it has not been executed this time') return bboxes, labels dets, keep = batched_nms(bboxes, scores, labels, nms_cfg) if max_num > 0: dets = dets[:max_num] keep = keep[:max_num] return dets, labels[keep] def fast_nms(multi_bboxes, multi_scores, multi_coeffs, score_thr, iou_thr, top_k, max_num=-1): """Fast NMS in `YOLACT <https://arxiv.org/abs/1904.02689>`_. Fast NMS allows already-removed detections to suppress other detections so that every instance can be decided to be kept or discarded in parallel, which is not possible in traditional NMS. This relaxation allows us to implement Fast NMS entirely in standard GPU-accelerated matrix operations. Args: multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) multi_scores (Tensor): shape (n, #class+1), where the last column contains scores of the background class, but this will be ignored. multi_coeffs (Tensor): shape (n, #class*coeffs_dim). score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. iou_thr (float): IoU threshold to be considered as conflicted. top_k (int): if there are more than top_k bboxes before NMS, only top top_k will be kept. max_num (int): if there are more than max_num bboxes after NMS, only top max_num will be kept. If -1, keep all the bboxes. Default: -1. Returns: tuple: (bboxes, labels, coefficients), tensors of shape (k, 5), (k, 1), and (k, coeffs_dim). Labels are 0-based. """ scores = multi_scores[:, :-1].t() # [#class, n] scores, idx = scores.sort(1, descending=True) idx = idx[:, :top_k].contiguous() scores = scores[:, :top_k] # [#class, topk] num_classes, num_dets = idx.size() boxes = multi_bboxes[idx.view(-1), :].view(num_classes, num_dets, 4) coeffs = multi_coeffs[idx.view(-1), :].view(num_classes, num_dets, -1) iou = bbox_overlaps(boxes, boxes) # [#class, topk, topk] iou.triu_(diagonal=1) iou_max, _ = iou.max(dim=1) # Now just filter out the ones higher than the threshold keep = iou_max <= iou_thr # Second thresholding introduces 0.2 mAP gain at negligible time cost keep *= scores > score_thr # Assign each kept detection to its corresponding class classes = torch.arange( num_classes, device=boxes.device)[:, None].expand_as(keep) classes = classes[keep] boxes = boxes[keep] coeffs = coeffs[keep] scores = scores[keep] # Only keep the top max_num highest scores across all classes scores, idx = scores.sort(0, descending=True) if max_num > 0: idx = idx[:max_num] scores = scores[:max_num] classes = classes[idx] boxes = boxes[idx] coeffs = coeffs[idx] cls_dets = torch.cat([boxes, scores[:, None]], dim=1) return cls_dets, classes, coeffs
5,306
35.102041
79
py
GFocalV2
GFocalV2-master/mmdet/core/mask/structures.py
from abc import ABCMeta, abstractmethod import cv2 import mmcv import numpy as np import pycocotools.mask as maskUtils import torch from mmcv.ops.roi_align import roi_align class BaseInstanceMasks(metaclass=ABCMeta): """Base class for instance masks.""" @abstractmethod def rescale(self, scale, interpolation='nearest'): """Rescale masks as large as possible while keeping the aspect ratio. For details can refer to `mmcv.imrescale`. Args: scale (tuple[int]): The maximum size (h, w) of rescaled mask. interpolation (str): Same as :func:`mmcv.imrescale`. Returns: BaseInstanceMasks: The rescaled masks. """ pass @abstractmethod def resize(self, out_shape, interpolation='nearest'): """Resize masks to the given out_shape. Args: out_shape: Target (h, w) of resized mask. interpolation (str): See :func:`mmcv.imresize`. Returns: BaseInstanceMasks: The resized masks. """ pass @abstractmethod def flip(self, flip_direction='horizontal'): """Flip masks alone the given direction. Args: flip_direction (str): Either 'horizontal' or 'vertical'. Returns: BaseInstanceMasks: The flipped masks. """ pass @abstractmethod def pad(self, out_shape, pad_val): """Pad masks to the given size of (h, w). Args: out_shape (tuple[int]): Target (h, w) of padded mask. pad_val (int): The padded value. Returns: BaseInstanceMasks: The padded masks. """ pass @abstractmethod def crop(self, bbox): """Crop each mask by the given bbox. Args: bbox (ndarray): Bbox in format [x1, y1, x2, y2], shape (4, ). Return: BaseInstanceMasks: The cropped masks. """ pass @abstractmethod def crop_and_resize(self, bboxes, out_shape, inds, device, interpolation='bilinear'): """Crop and resize masks by the given bboxes. This function is mainly used in mask targets computation. It firstly align mask to bboxes by assigned_inds, then crop mask by the assigned bbox and resize to the size of (mask_h, mask_w) Args: bboxes (Tensor): Bboxes in format [x1, y1, x2, y2], shape (N, 4) out_shape (tuple[int]): Target (h, w) of resized mask inds (ndarray): Indexes to assign masks to each bbox device (str): Device of bboxes interpolation (str): See `mmcv.imresize` Return: BaseInstanceMasks: the cropped and resized masks. """ pass @abstractmethod def expand(self, expanded_h, expanded_w, top, left): """see :class:`Expand`.""" pass @property @abstractmethod def areas(self): """ndarray: areas of each instance.""" pass @abstractmethod def to_ndarray(self): """Convert masks to the format of ndarray. Return: ndarray: Converted masks in the format of ndarray. """ pass @abstractmethod def to_tensor(self, dtype, device): """Convert masks to the format of Tensor. Args: dtype (str): Dtype of converted mask. device (torch.device): Device of converted masks. Returns: Tensor: Converted masks in the format of Tensor. """ pass @abstractmethod def translate(self, out_shape, offset, direction='horizontal', fill_val=0, interpolation='bilinear'): """Translate the masks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). offset (int | float): The offset for translate. direction (str): The translate direction, either "horizontal" or "vertical". fill_val (int | float): Border value. Default 0. interpolation (str): Same as :func:`mmcv.imtranslate`. Returns: Translated masks. """ pass def shear(self, out_shape, magnitude, direction='horizontal', border_value=0, interpolation='bilinear'): """Shear the masks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). magnitude (int | float): The magnitude used for shear. direction (str): The shear direction, either "horizontal" or "vertical". border_value (int | tuple[int]): Value used in case of a constant border. Default 0. interpolation (str): Same as in :func:`mmcv.imshear`. Returns: ndarray: Sheared masks. """ pass @abstractmethod def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): """Rotate the masks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). angle (int | float): Rotation angle in degrees. Positive values mean counter-clockwise rotation. center (tuple[float], optional): Center point (w, h) of the rotation in source image. If not specified, the center of the image will be used. scale (int | float): Isotropic scale factor. fill_val (int | float): Border value. Default 0 for masks. Returns: Rotated masks. """ pass class BitmapMasks(BaseInstanceMasks): """This class represents masks in the form of bitmaps. Args: masks (ndarray): ndarray of masks in shape (N, H, W), where N is the number of objects. height (int): height of masks width (int): width of masks """ def __init__(self, masks, height, width): self.height = height self.width = width if len(masks) == 0: self.masks = np.empty((0, self.height, self.width), dtype=np.uint8) else: assert isinstance(masks, (list, np.ndarray)) if isinstance(masks, list): assert isinstance(masks[0], np.ndarray) assert masks[0].ndim == 2 # (H, W) else: assert masks.ndim == 3 # (N, H, W) self.masks = np.stack(masks).reshape(-1, height, width) assert self.masks.shape[1] == self.height assert self.masks.shape[2] == self.width def __getitem__(self, index): """Index the BitmapMask. Args: index (int | ndarray): Indices in the format of integer or ndarray. Returns: :obj:`BitmapMasks`: Indexed bitmap masks. """ masks = self.masks[index].reshape(-1, self.height, self.width) return BitmapMasks(masks, self.height, self.width) def __iter__(self): return iter(self.masks) def __repr__(self): s = self.__class__.__name__ + '(' s += f'num_masks={len(self.masks)}, ' s += f'height={self.height}, ' s += f'width={self.width})' return s def __len__(self): """Number of masks.""" return len(self.masks) def rescale(self, scale, interpolation='nearest'): """See :func:`BaseInstanceMasks.rescale`.""" if len(self.masks) == 0: new_w, new_h = mmcv.rescale_size((self.width, self.height), scale) rescaled_masks = np.empty((0, new_h, new_w), dtype=np.uint8) else: rescaled_masks = np.stack([ mmcv.imrescale(mask, scale, interpolation=interpolation) for mask in self.masks ]) height, width = rescaled_masks.shape[1:] return BitmapMasks(rescaled_masks, height, width) def resize(self, out_shape, interpolation='nearest'): """See :func:`BaseInstanceMasks.resize`.""" if len(self.masks) == 0: resized_masks = np.empty((0, *out_shape), dtype=np.uint8) else: resized_masks = np.stack([ mmcv.imresize(mask, out_shape, interpolation=interpolation) for mask in self.masks ]) return BitmapMasks(resized_masks, *out_shape) def flip(self, flip_direction='horizontal'): """See :func:`BaseInstanceMasks.flip`.""" assert flip_direction in ('horizontal', 'vertical', 'diagonal') if len(self.masks) == 0: flipped_masks = self.masks else: flipped_masks = np.stack([ mmcv.imflip(mask, direction=flip_direction) for mask in self.masks ]) return BitmapMasks(flipped_masks, self.height, self.width) def pad(self, out_shape, pad_val=0): """See :func:`BaseInstanceMasks.pad`.""" if len(self.masks) == 0: padded_masks = np.empty((0, *out_shape), dtype=np.uint8) else: padded_masks = np.stack([ mmcv.impad(mask, shape=out_shape, pad_val=pad_val) for mask in self.masks ]) return BitmapMasks(padded_masks, *out_shape) def crop(self, bbox): """See :func:`BaseInstanceMasks.crop`.""" assert isinstance(bbox, np.ndarray) assert bbox.ndim == 1 # clip the boundary bbox = bbox.copy() bbox[0::2] = np.clip(bbox[0::2], 0, self.width) bbox[1::2] = np.clip(bbox[1::2], 0, self.height) x1, y1, x2, y2 = bbox w = np.maximum(x2 - x1, 1) h = np.maximum(y2 - y1, 1) if len(self.masks) == 0: cropped_masks = np.empty((0, h, w), dtype=np.uint8) else: cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w] return BitmapMasks(cropped_masks, h, w) def crop_and_resize(self, bboxes, out_shape, inds, device='cpu', interpolation='bilinear'): """See :func:`BaseInstanceMasks.crop_and_resize`.""" if len(self.masks) == 0: empty_masks = np.empty((0, *out_shape), dtype=np.uint8) return BitmapMasks(empty_masks, *out_shape) # convert bboxes to tensor if isinstance(bboxes, np.ndarray): bboxes = torch.from_numpy(bboxes).to(device=device) if isinstance(inds, np.ndarray): inds = torch.from_numpy(inds).to(device=device) num_bbox = bboxes.shape[0] fake_inds = torch.arange( num_bbox, device=device).to(dtype=bboxes.dtype)[:, None] rois = torch.cat([fake_inds, bboxes], dim=1) # Nx5 rois = rois.to(device=device) if num_bbox > 0: gt_masks_th = torch.from_numpy(self.masks).to(device).index_select( 0, inds).to(dtype=rois.dtype) targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape, 1.0, 0, 'avg', True).squeeze(1) resized_masks = (targets >= 0.5).cpu().numpy() else: resized_masks = [] return BitmapMasks(resized_masks, *out_shape) def expand(self, expanded_h, expanded_w, top, left): """See :func:`BaseInstanceMasks.expand`.""" if len(self.masks) == 0: expanded_mask = np.empty((0, expanded_h, expanded_w), dtype=np.uint8) else: expanded_mask = np.zeros((len(self), expanded_h, expanded_w), dtype=np.uint8) expanded_mask[:, top:top + self.height, left:left + self.width] = self.masks return BitmapMasks(expanded_mask, expanded_h, expanded_w) def translate(self, out_shape, offset, direction='horizontal', fill_val=0, interpolation='bilinear'): """Translate the BitmapMasks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). offset (int | float): The offset for translate. direction (str): The translate direction, either "horizontal" or "vertical". fill_val (int | float): Border value. Default 0 for masks. interpolation (str): Same as :func:`mmcv.imtranslate`. Returns: BitmapMasks: Translated BitmapMasks. """ if len(self.masks) == 0: translated_masks = np.empty((0, *out_shape), dtype=np.uint8) else: translated_masks = mmcv.imtranslate( self.masks.transpose((1, 2, 0)), offset, direction, border_value=fill_val, interpolation=interpolation) if translated_masks.ndim == 2: translated_masks = translated_masks[:, :, None] translated_masks = translated_masks.transpose( (2, 0, 1)).astype(self.masks.dtype) return BitmapMasks(translated_masks, *out_shape) def shear(self, out_shape, magnitude, direction='horizontal', border_value=0, interpolation='bilinear'): """Shear the BitmapMasks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). magnitude (int | float): The magnitude used for shear. direction (str): The shear direction, either "horizontal" or "vertical". border_value (int | tuple[int]): Value used in case of a constant border. interpolation (str): Same as in :func:`mmcv.imshear`. Returns: BitmapMasks: The sheared masks. """ if len(self.masks) == 0: sheared_masks = np.empty((0, *out_shape), dtype=np.uint8) else: sheared_masks = mmcv.imshear( self.masks.transpose((1, 2, 0)), magnitude, direction, border_value=border_value, interpolation=interpolation) if sheared_masks.ndim == 2: sheared_masks = sheared_masks[:, :, None] sheared_masks = sheared_masks.transpose( (2, 0, 1)).astype(self.masks.dtype) return BitmapMasks(sheared_masks, *out_shape) def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): """Rotate the BitmapMasks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). angle (int | float): Rotation angle in degrees. Positive values mean counter-clockwise rotation. center (tuple[float], optional): Center point (w, h) of the rotation in source image. If not specified, the center of the image will be used. scale (int | float): Isotropic scale factor. fill_val (int | float): Border value. Default 0 for masks. Returns: BitmapMasks: Rotated BitmapMasks. """ if len(self.masks) == 0: rotated_masks = np.empty((0, *out_shape), dtype=self.masks.dtype) else: rotated_masks = mmcv.imrotate( self.masks.transpose((1, 2, 0)), angle, center=center, scale=scale, border_value=fill_val) if rotated_masks.ndim == 2: # case when only one mask, (h, w) rotated_masks = rotated_masks[:, :, None] # (h, w, 1) rotated_masks = rotated_masks.transpose( (2, 0, 1)).astype(self.masks.dtype) return BitmapMasks(rotated_masks, *out_shape) @property def areas(self): """See :py:attr:`BaseInstanceMasks.areas`.""" return self.masks.sum((1, 2)) def to_ndarray(self): """See :func:`BaseInstanceMasks.to_ndarray`.""" return self.masks def to_tensor(self, dtype, device): """See :func:`BaseInstanceMasks.to_tensor`.""" return torch.tensor(self.masks, dtype=dtype, device=device) class PolygonMasks(BaseInstanceMasks): """This class represents masks in the form of polygons. Polygons is a list of three levels. The first level of the list corresponds to objects, the second level to the polys that compose the object, the third level to the poly coordinates Args: masks (list[list[ndarray]]): The first level of the list corresponds to objects, the second level to the polys that compose the object, the third level to the poly coordinates height (int): height of masks width (int): width of masks """ def __init__(self, masks, height, width): assert isinstance(masks, list) if len(masks) > 0: assert isinstance(masks[0], list) assert isinstance(masks[0][0], np.ndarray) self.height = height self.width = width self.masks = masks def __getitem__(self, index): """Index the polygon masks. Args: index (ndarray | List): The indices. Returns: :obj:`PolygonMasks`: The indexed polygon masks. """ if isinstance(index, np.ndarray): index = index.tolist() if isinstance(index, list): masks = [self.masks[i] for i in index] else: try: masks = self.masks[index] except Exception: raise ValueError( f'Unsupported input of type {type(index)} for indexing!') if len(masks) and isinstance(masks[0], np.ndarray): masks = [masks] # ensure a list of three levels return PolygonMasks(masks, self.height, self.width) def __iter__(self): return iter(self.masks) def __repr__(self): s = self.__class__.__name__ + '(' s += f'num_masks={len(self.masks)}, ' s += f'height={self.height}, ' s += f'width={self.width})' return s def __len__(self): """Number of masks.""" return len(self.masks) def rescale(self, scale, interpolation=None): """see :func:`BaseInstanceMasks.rescale`""" new_w, new_h = mmcv.rescale_size((self.width, self.height), scale) if len(self.masks) == 0: rescaled_masks = PolygonMasks([], new_h, new_w) else: rescaled_masks = self.resize((new_h, new_w)) return rescaled_masks def resize(self, out_shape, interpolation=None): """see :func:`BaseInstanceMasks.resize`""" if len(self.masks) == 0: resized_masks = PolygonMasks([], *out_shape) else: h_scale = out_shape[0] / self.height w_scale = out_shape[1] / self.width resized_masks = [] for poly_per_obj in self.masks: resized_poly = [] for p in poly_per_obj: p = p.copy() p[0::2] *= w_scale p[1::2] *= h_scale resized_poly.append(p) resized_masks.append(resized_poly) resized_masks = PolygonMasks(resized_masks, *out_shape) return resized_masks def flip(self, flip_direction='horizontal'): """see :func:`BaseInstanceMasks.flip`""" assert flip_direction in ('horizontal', 'vertical', 'diagonal') if len(self.masks) == 0: flipped_masks = PolygonMasks([], self.height, self.width) else: flipped_masks = [] for poly_per_obj in self.masks: flipped_poly_per_obj = [] for p in poly_per_obj: p = p.copy() if flip_direction == 'horizontal': p[0::2] = self.width - p[0::2] elif flip_direction == 'vertical': p[1::2] = self.height - p[1::2] else: p[0::2] = self.width - p[0::2] p[1::2] = self.height - p[1::2] flipped_poly_per_obj.append(p) flipped_masks.append(flipped_poly_per_obj) flipped_masks = PolygonMasks(flipped_masks, self.height, self.width) return flipped_masks def crop(self, bbox): """see :func:`BaseInstanceMasks.crop`""" assert isinstance(bbox, np.ndarray) assert bbox.ndim == 1 # clip the boundary bbox = bbox.copy() bbox[0::2] = np.clip(bbox[0::2], 0, self.width) bbox[1::2] = np.clip(bbox[1::2], 0, self.height) x1, y1, x2, y2 = bbox w = np.maximum(x2 - x1, 1) h = np.maximum(y2 - y1, 1) if len(self.masks) == 0: cropped_masks = PolygonMasks([], h, w) else: cropped_masks = [] for poly_per_obj in self.masks: cropped_poly_per_obj = [] for p in poly_per_obj: # pycocotools will clip the boundary p = p.copy() p[0::2] -= bbox[0] p[1::2] -= bbox[1] cropped_poly_per_obj.append(p) cropped_masks.append(cropped_poly_per_obj) cropped_masks = PolygonMasks(cropped_masks, h, w) return cropped_masks def pad(self, out_shape, pad_val=0): """padding has no effect on polygons`""" return PolygonMasks(self.masks, *out_shape) def expand(self, *args, **kwargs): """TODO: Add expand for polygon""" raise NotImplementedError def crop_and_resize(self, bboxes, out_shape, inds, device='cpu', interpolation='bilinear'): """see :func:`BaseInstanceMasks.crop_and_resize`""" out_h, out_w = out_shape if len(self.masks) == 0: return PolygonMasks([], out_h, out_w) resized_masks = [] for i in range(len(bboxes)): mask = self.masks[inds[i]] bbox = bboxes[i, :] x1, y1, x2, y2 = bbox w = np.maximum(x2 - x1, 1) h = np.maximum(y2 - y1, 1) h_scale = out_h / max(h, 0.1) # avoid too large scale w_scale = out_w / max(w, 0.1) resized_mask = [] for p in mask: p = p.copy() # crop # pycocotools will clip the boundary p[0::2] -= bbox[0] p[1::2] -= bbox[1] # resize p[0::2] *= w_scale p[1::2] *= h_scale resized_mask.append(p) resized_masks.append(resized_mask) return PolygonMasks(resized_masks, *out_shape) def translate(self, out_shape, offset, direction='horizontal', fill_val=None, interpolation=None): """Translate the PolygonMasks.""" assert fill_val is None or fill_val == 0, 'Here fill_val is not '\ f'used, and defaultly should be None or 0. got {fill_val}.' if len(self.masks) == 0: translated_masks = PolygonMasks([], *out_shape) else: translated_masks = [] for poly_per_obj in self.masks: translated_poly_per_obj = [] for p in poly_per_obj: p = p.copy() if direction == 'horizontal': p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1]) elif direction == 'vertical': p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0]) translated_poly_per_obj.append(p) translated_masks.append(translated_poly_per_obj) translated_masks = PolygonMasks(translated_masks, *out_shape) return translated_masks def shear(self, out_shape, magnitude, direction='horizontal', border_value=0, interpolation='bilinear'): """See :func:`BaseInstanceMasks.shear`.""" if len(self.masks) == 0: sheared_masks = PolygonMasks([], *out_shape) else: sheared_masks = [] if direction == 'horizontal': shear_matrix = np.stack([[1, magnitude], [0, 1]]).astype(np.float32) elif direction == 'vertical': shear_matrix = np.stack([[1, 0], [magnitude, 1]]).astype(np.float32) for poly_per_obj in self.masks: sheared_poly = [] for p in poly_per_obj: p = np.stack([p[0::2], p[1::2]], axis=0) # [2, n] new_coords = np.matmul(shear_matrix, p) # [2, n] new_coords[0, :] = np.clip(new_coords[0, :], 0, out_shape[1]) new_coords[1, :] = np.clip(new_coords[1, :], 0, out_shape[0]) sheared_poly.append( new_coords.transpose((1, 0)).reshape(-1)) sheared_masks.append(sheared_poly) sheared_masks = PolygonMasks(sheared_masks, *out_shape) return sheared_masks def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): """See :func:`BaseInstanceMasks.rotate`.""" if len(self.masks) == 0: rotated_masks = PolygonMasks([], *out_shape) else: rotated_masks = [] rotate_matrix = cv2.getRotationMatrix2D(center, -angle, scale) for poly_per_obj in self.masks: rotated_poly = [] for p in poly_per_obj: p = p.copy() coords = np.stack([p[0::2], p[1::2]], axis=1) # [n, 2] # pad 1 to convert from format [x, y] to homogeneous # coordinates format [x, y, 1] coords = np.concatenate( (coords, np.ones((coords.shape[0], 1), coords.dtype)), axis=1) # [n, 3] rotated_coords = np.matmul( rotate_matrix[None, :, :], coords[:, :, None])[..., 0] # [n, 2, 1] -> [n, 2] rotated_coords[:, 0] = np.clip(rotated_coords[:, 0], 0, out_shape[1]) rotated_coords[:, 1] = np.clip(rotated_coords[:, 1], 0, out_shape[0]) rotated_poly.append(rotated_coords.reshape(-1)) rotated_masks.append(rotated_poly) rotated_masks = PolygonMasks(rotated_masks, *out_shape) return rotated_masks def to_bitmap(self): """convert polygon masks to bitmap masks.""" bitmap_masks = self.to_ndarray() return BitmapMasks(bitmap_masks, self.height, self.width) @property def areas(self): """Compute areas of masks. This func is modified from `detectron2 <https://github.com/facebookresearch/detectron2/blob/ffff8acc35ea88ad1cb1806ab0f00b4c1c5dbfd9/detectron2/structures/masks.py#L387>`_. The function only works with Polygons using the shoelace formula. Return: ndarray: areas of each instance """ # noqa: W501 area = [] for polygons_per_obj in self.masks: area_per_obj = 0 for p in polygons_per_obj: area_per_obj += self._polygon_area(p[0::2], p[1::2]) area.append(area_per_obj) return np.asarray(area) def _polygon_area(self, x, y): """Compute the area of a component of a polygon. Using the shoelace formula: https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates Args: x (ndarray): x coordinates of the component y (ndarray): y coordinates of the component Return: float: the are of the component """ # noqa: 501 return 0.5 * np.abs( np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) def to_ndarray(self): """Convert masks to the format of ndarray.""" if len(self.masks) == 0: return np.empty((0, self.height, self.width), dtype=np.uint8) bitmap_masks = [] for poly_per_obj in self.masks: bitmap_masks.append( polygon_to_bitmap(poly_per_obj, self.height, self.width)) return np.stack(bitmap_masks) def to_tensor(self, dtype, device): """See :func:`BaseInstanceMasks.to_tensor`.""" if len(self.masks) == 0: return torch.empty((0, self.height, self.width), dtype=dtype, device=device) ndarray_masks = self.to_ndarray() return torch.tensor(ndarray_masks, dtype=dtype, device=device) def polygon_to_bitmap(polygons, height, width): """Convert masks from the form of polygons to bitmaps. Args: polygons (list[ndarray]): masks in polygon representation height (int): mask height width (int): mask width Return: ndarray: the converted masks in bitmap representation """ rles = maskUtils.frPyObjects(polygons, height, width) rle = maskUtils.merge(rles) bitmap_mask = maskUtils.decode(rle).astype(np.bool) return bitmap_mask
30,134
35.394928
141
py
GFocalV2
GFocalV2-master/mmdet/core/mask/mask_target.py
import numpy as np import torch from torch.nn.modules.utils import _pair def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, cfg): """Compute mask target for positive proposals in multiple images. Args: pos_proposals_list (list[Tensor]): Positive proposals in multiple images. pos_assigned_gt_inds_list (list[Tensor]): Assigned GT indices for each positive proposals. gt_masks_list (list[:obj:`BaseInstanceMasks`]): Ground truth masks of each image. cfg (dict): Config dict that specifies the mask size. Returns: list[Tensor]: Mask target of each image. """ cfg_list = [cfg for _ in range(len(pos_proposals_list))] mask_targets = map(mask_target_single, pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, cfg_list) mask_targets = list(mask_targets) if len(mask_targets) > 0: mask_targets = torch.cat(mask_targets) return mask_targets def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg): """Compute mask target for each positive proposal in the image. Args: pos_proposals (Tensor): Positive proposals. pos_assigned_gt_inds (Tensor): Assigned GT inds of positive proposals. gt_masks (:obj:`BaseInstanceMasks`): GT masks in the format of Bitmap or Polygon. cfg (dict): Config dict that indicate the mask size. Returns: Tensor: Mask target of each positive proposals in the image. """ device = pos_proposals.device mask_size = _pair(cfg.mask_size) num_pos = pos_proposals.size(0) if num_pos > 0: proposals_np = pos_proposals.cpu().numpy() maxh, maxw = gt_masks.height, gt_masks.width proposals_np[:, [0, 2]] = np.clip(proposals_np[:, [0, 2]], 0, maxw) proposals_np[:, [1, 3]] = np.clip(proposals_np[:, [1, 3]], 0, maxh) pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() mask_targets = gt_masks.crop_and_resize( proposals_np, mask_size, device=device, inds=pos_assigned_gt_inds).to_ndarray() mask_targets = torch.from_numpy(mask_targets).float().to(device) else: mask_targets = pos_proposals.new_zeros((0, ) + mask_size) return mask_targets
2,354
36.380952
78
py
GFocalV2
GFocalV2-master/mmdet/core/export/pytorch2onnx.py
from functools import partial import mmcv import numpy as np import torch from mmcv.runner import load_checkpoint try: from mmcv.onnx.symbolic import register_extra_symbolics except ModuleNotFoundError: raise NotImplementedError('please update mmcv to version>=v1.0.4') def generate_inputs_and_wrap_model(config_path, checkpoint_path, input_config): """Prepare sample input and wrap model for ONNX export. The ONNX export API only accept args, and all inputs should be torch.Tensor or corresponding types (such as tuple of tensor). So we should call this function before exporting. This function will: 1. generate corresponding inputs which are used to execute the model. 2. Wrap the model's forward function. For example, the MMDet models' forward function has a parameter ``return_loss:bool``. As we want to set it as False while export API supports neither bool type or kwargs. So we have to replace the forward like: ``model.forward = partial(model.forward, return_loss=False)`` Args: config_path (str): the OpenMMLab config for the model we want to export to ONNX checkpoint_path (str): Path to the corresponding checkpoint input_config (dict): the exactly data in this dict depends on the framework. For MMSeg, we can just declare the input shape, and generate the dummy data accordingly. However, for MMDet, we may pass the real img path, or the NMS will return None as there is no legal bbox. Returns: tuple: (model, tensor_data) wrapped model which can be called by \ model(*tensor_data) and a list of inputs which are used to execute \ the model while exporting. """ model = build_model_from_cfg(config_path, checkpoint_path) one_img, one_meta = preprocess_example_input(input_config) tensor_data = [one_img] model.forward = partial( model.forward, img_metas=[[one_meta]], return_loss=False) # pytorch has some bug in pytorch1.3, we have to fix it # by replacing these existing op opset_version = 11 register_extra_symbolics(opset_version) return model, tensor_data def build_model_from_cfg(config_path, checkpoint_path): """Build a model from config and load the given checkpoint. Args: config_path (str): the OpenMMLab config for the model we want to export to ONNX checkpoint_path (str): Path to the corresponding checkpoint Returns: torch.nn.Module: the built model """ from mmdet.models import build_detector cfg = mmcv.Config.fromfile(config_path) # import modules from string list. if cfg.get('custom_imports', None): from mmcv.utils import import_modules_from_strings import_modules_from_strings(**cfg['custom_imports']) cfg.model.pretrained = None cfg.data.test.test_mode = True # build the model model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) load_checkpoint(model, checkpoint_path, map_location='cpu') model.cpu().eval() return model def preprocess_example_input(input_config): """Prepare an example input image for ``generate_inputs_and_wrap_model``. Args: input_config (dict): customized config describing the example input. Returns: tuple: (one_img, one_meta), tensor of the example input image and \ meta information for the example input image. Examples: >>> from mmdet.core.export import preprocess_example_input >>> input_config = { >>> 'input_shape': (1,3,224,224), >>> 'input_path': 'demo/demo.jpg', >>> 'normalize_cfg': { >>> 'mean': (123.675, 116.28, 103.53), >>> 'std': (58.395, 57.12, 57.375) >>> } >>> } >>> one_img, one_meta = preprocess_example_input(input_config) >>> print(one_img.shape) torch.Size([1, 3, 224, 224]) >>> print(one_meta) {'img_shape': (224, 224, 3), 'ori_shape': (224, 224, 3), 'pad_shape': (224, 224, 3), 'filename': '<demo>.png', 'scale_factor': 1.0, 'flip': False} """ input_path = input_config['input_path'] input_shape = input_config['input_shape'] one_img = mmcv.imread(input_path) if 'normalize_cfg' in input_config.keys(): normalize_cfg = input_config['normalize_cfg'] mean = np.array(normalize_cfg['mean'], dtype=np.float32) std = np.array(normalize_cfg['std'], dtype=np.float32) one_img = mmcv.imnormalize(one_img, mean, std) one_img = mmcv.imresize(one_img, input_shape[2:][::-1]).transpose(2, 0, 1) one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_( True) (_, C, H, W) = input_shape one_meta = { 'img_shape': (H, W, C), 'ori_shape': (H, W, C), 'pad_shape': (H, W, C), 'filename': '<demo>.png', 'scale_factor': 1.0, 'flip': False } return one_img, one_meta
5,119
35.571429
79
py
GFocalV2
GFocalV2-master/mmdet/core/export/__init__.py
from .pytorch2onnx import (build_model_from_cfg, generate_inputs_and_wrap_model, preprocess_example_input) __all__ = [ 'build_model_from_cfg', 'generate_inputs_and_wrap_model', 'preprocess_example_input' ]
269
29
61
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/demodata.py
import numpy as np import torch def ensure_rng(rng=None): """Simple version of the ``kwarray.ensure_rng`` Args: rng (int | numpy.random.RandomState | None): if None, then defaults to the global rng. Otherwise this can be an integer or a RandomState class Returns: (numpy.random.RandomState) : rng - a numpy random number generator References: https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 """ if rng is None: rng = np.random.mtrand._rand elif isinstance(rng, int): rng = np.random.RandomState(rng) else: rng = rng return rng def random_boxes(num=1, scale=1, rng=None): """Simple version of ``kwimage.Boxes.random`` Returns: Tensor: shape (n, 4) in x1, y1, x2, y2 format. References: https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390 Example: >>> num = 3 >>> scale = 512 >>> rng = 0 >>> boxes = random_boxes(num, scale, rng) >>> print(boxes) tensor([[280.9925, 278.9802, 308.6148, 366.1769], [216.9113, 330.6978, 224.0446, 456.5878], [405.3632, 196.3221, 493.3953, 270.7942]]) """ rng = ensure_rng(rng) tlbr = rng.rand(num, 4).astype(np.float32) tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2]) tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3]) br_x = np.maximum(tlbr[:, 0], tlbr[:, 2]) br_y = np.maximum(tlbr[:, 1], tlbr[:, 3]) tlbr[:, 0] = tl_x * scale tlbr[:, 1] = tl_y * scale tlbr[:, 2] = br_x * scale tlbr[:, 3] = br_y * scale boxes = torch.from_numpy(tlbr) return boxes
1,748
26.328125
101
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/transforms.py
import numpy as np import torch def bbox_flip(bboxes, img_shape, direction='horizontal'): """Flip bboxes horizontally or vertically. Args: bboxes (Tensor): Shape (..., 4*k) img_shape (tuple): Image shape. direction (str): Flip direction, options are "horizontal", "vertical", "diagonal". Default: "horizontal" Returns: Tensor: Flipped bboxes. """ assert bboxes.shape[-1] % 4 == 0 assert direction in ['horizontal', 'vertical', 'diagonal'] flipped = bboxes.clone() if direction == 'horizontal': flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] elif direction == 'vertical': flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] else: flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] return flipped def bbox_mapping(bboxes, img_shape, scale_factor, flip, flip_direction='horizontal'): """Map bboxes from the original image scale to testing scale.""" new_bboxes = bboxes * bboxes.new_tensor(scale_factor) if flip: new_bboxes = bbox_flip(new_bboxes, img_shape, flip_direction) return new_bboxes def bbox_mapping_back(bboxes, img_shape, scale_factor, flip, flip_direction='horizontal'): """Map bboxes from testing scale to original image scale.""" new_bboxes = bbox_flip(bboxes, img_shape, flip_direction) if flip else bboxes new_bboxes = new_bboxes.view(-1, 4) / new_bboxes.new_tensor(scale_factor) return new_bboxes.view(bboxes.shape) def bbox2roi(bbox_list): """Convert a list of bboxes to roi format. Args: bbox_list (list[Tensor]): a list of bboxes corresponding to a batch of images. Returns: Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2] """ rois_list = [] for img_id, bboxes in enumerate(bbox_list): if bboxes.size(0) > 0: img_inds = bboxes.new_full((bboxes.size(0), 1), img_id) rois = torch.cat([img_inds, bboxes[:, :4]], dim=-1) else: rois = bboxes.new_zeros((0, 5)) rois_list.append(rois) rois = torch.cat(rois_list, 0) return rois def roi2bbox(rois): """Convert rois to bounding box format. Args: rois (torch.Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. Returns: list[torch.Tensor]: Converted boxes of corresponding rois. """ bbox_list = [] img_ids = torch.unique(rois[:, 0].cpu(), sorted=True) for img_id in img_ids: inds = (rois[:, 0] == img_id.item()) bbox = rois[inds, 1:] bbox_list.append(bbox) return bbox_list def bbox2result(bboxes, labels, num_classes): """Convert detection results to a list of numpy arrays. Args: bboxes (torch.Tensor | np.ndarray): shape (n, 5) labels (torch.Tensor | np.ndarray): shape (n, ) num_classes (int): class number, including background class Returns: list(ndarray): bbox results of each class """ if bboxes.shape[0] == 0: return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)] else: if isinstance(bboxes, torch.Tensor): bboxes = bboxes.cpu().numpy() labels = labels.cpu().numpy() return [bboxes[labels == i, :] for i in range(num_classes)] def distance2bbox(points, distance, max_shape=None): """Decode distance prediction to bounding box. Args: points (Tensor): Shape (n, 2), [x, y]. distance (Tensor): Distance from the given point to 4 boundaries (left, top, right, bottom). max_shape (tuple): Shape of the image. Returns: Tensor: Decoded bboxes. """ x1 = points[:, 0] - distance[:, 0] y1 = points[:, 1] - distance[:, 1] x2 = points[:, 0] + distance[:, 2] y2 = points[:, 1] + distance[:, 3] if max_shape is not None: x1 = x1.clamp(min=0, max=max_shape[1]) y1 = y1.clamp(min=0, max=max_shape[0]) x2 = x2.clamp(min=0, max=max_shape[1]) y2 = y2.clamp(min=0, max=max_shape[0]) return torch.stack([x1, y1, x2, y2], -1) def bbox2distance(points, bbox, max_dis=None, eps=0.1): """Decode bounding box based on distances. Args: points (Tensor): Shape (n, 2), [x, y]. bbox (Tensor): Shape (n, 4), "xyxy" format max_dis (float): Upper bound of the distance. eps (float): a small value to ensure target < max_dis, instead <= Returns: Tensor: Decoded distances. """ left = points[:, 0] - bbox[:, 0] top = points[:, 1] - bbox[:, 1] right = bbox[:, 2] - points[:, 0] bottom = bbox[:, 3] - points[:, 1] if max_dis is not None: left = left.clamp(min=0, max=max_dis - eps) top = top.clamp(min=0, max=max_dis - eps) right = right.clamp(min=0, max=max_dis - eps) bottom = bottom.clamp(min=0, max=max_dis - eps) return torch.stack([left, top, right, bottom], -1) def bbox_rescale(bboxes, scale_factor=1.0): """Rescale bounding box w.r.t. scale_factor. Args: bboxes (Tensor): Shape (n, 4) for bboxes or (n, 5) for rois scale_factor (float): rescale factor Returns: Tensor: Rescaled bboxes. """ if bboxes.size(1) == 5: bboxes_ = bboxes[:, 1:] inds_ = bboxes[:, 0] else: bboxes_ = bboxes cx = (bboxes_[:, 0] + bboxes_[:, 2]) * 0.5 cy = (bboxes_[:, 1] + bboxes_[:, 3]) * 0.5 w = bboxes_[:, 2] - bboxes_[:, 0] h = bboxes_[:, 3] - bboxes_[:, 1] w = w * scale_factor h = h * scale_factor x1 = cx - 0.5 * w x2 = cx + 0.5 * w y1 = cy - 0.5 * h y2 = cy + 0.5 * h if bboxes.size(1) == 5: rescaled_bboxes = torch.stack([inds_, x1, y1, x2, y2], dim=-1) else: rescaled_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) return rescaled_bboxes
6,384
31.411168
79
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/assigners/assign_result.py
import torch from mmdet.utils import util_mixins class AssignResult(util_mixins.NiceRepr): """Stores assignments between predicted and truth boxes. Attributes: num_gts (int): the number of truth boxes considered when computing this assignment gt_inds (LongTensor): for each predicted box indicates the 1-based index of the assigned truth box. 0 means unassigned and -1 means ignore. max_overlaps (FloatTensor): the iou between the predicted box and its assigned truth box. labels (None | LongTensor): If specified, for each predicted box indicates the category label of the assigned truth box. Example: >>> # An assign result between 4 predicted boxes and 9 true boxes >>> # where only two boxes were assigned. >>> num_gts = 9 >>> max_overlaps = torch.LongTensor([0, .5, .9, 0]) >>> gt_inds = torch.LongTensor([-1, 1, 2, 0]) >>> labels = torch.LongTensor([0, 3, 4, 0]) >>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels) >>> print(str(self)) # xdoctest: +IGNORE_WANT <AssignResult(num_gts=9, gt_inds.shape=(4,), max_overlaps.shape=(4,), labels.shape=(4,))> >>> # Force addition of gt labels (when adding gt as proposals) >>> new_labels = torch.LongTensor([3, 4, 5]) >>> self.add_gt_(new_labels) >>> print(str(self)) # xdoctest: +IGNORE_WANT <AssignResult(num_gts=9, gt_inds.shape=(7,), max_overlaps.shape=(7,), labels.shape=(7,))> """ def __init__(self, num_gts, gt_inds, max_overlaps, labels=None): self.num_gts = num_gts self.gt_inds = gt_inds self.max_overlaps = max_overlaps self.labels = labels # Interface for possible user-defined properties self._extra_properties = {} @property def num_preds(self): """int: the number of predictions in this assignment""" return len(self.gt_inds) def set_extra_property(self, key, value): """Set user-defined new property.""" assert key not in self.info self._extra_properties[key] = value def get_extra_property(self, key): """Get user-defined property.""" return self._extra_properties.get(key, None) @property def info(self): """dict: a dictionary of info about the object""" basic_info = { 'num_gts': self.num_gts, 'num_preds': self.num_preds, 'gt_inds': self.gt_inds, 'max_overlaps': self.max_overlaps, 'labels': self.labels, } basic_info.update(self._extra_properties) return basic_info def __nice__(self): """str: a "nice" summary string describing this assign result""" parts = [] parts.append(f'num_gts={self.num_gts!r}') if self.gt_inds is None: parts.append(f'gt_inds={self.gt_inds!r}') else: parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}') if self.max_overlaps is None: parts.append(f'max_overlaps={self.max_overlaps!r}') else: parts.append('max_overlaps.shape=' f'{tuple(self.max_overlaps.shape)!r}') if self.labels is None: parts.append(f'labels={self.labels!r}') else: parts.append(f'labels.shape={tuple(self.labels.shape)!r}') return ', '.join(parts) @classmethod def random(cls, **kwargs): """Create random AssignResult for tests or debugging. Args: num_preds: number of predicted boxes num_gts: number of true boxes p_ignore (float): probability of a predicted box assinged to an ignored truth p_assigned (float): probability of a predicted box not being assigned p_use_label (float | bool): with labels or not rng (None | int | numpy.random.RandomState): seed or state Returns: :obj:`AssignResult`: Randomly generated assign results. Example: >>> from mmdet.core.bbox.assigners.assign_result import * # NOQA >>> self = AssignResult.random() >>> print(self.info) """ from mmdet.core.bbox import demodata rng = demodata.ensure_rng(kwargs.get('rng', None)) num_gts = kwargs.get('num_gts', None) num_preds = kwargs.get('num_preds', None) p_ignore = kwargs.get('p_ignore', 0.3) p_assigned = kwargs.get('p_assigned', 0.7) p_use_label = kwargs.get('p_use_label', 0.5) num_classes = kwargs.get('p_use_label', 3) if num_gts is None: num_gts = rng.randint(0, 8) if num_preds is None: num_preds = rng.randint(0, 16) if num_gts == 0: max_overlaps = torch.zeros(num_preds, dtype=torch.float32) gt_inds = torch.zeros(num_preds, dtype=torch.int64) if p_use_label is True or p_use_label < rng.rand(): labels = torch.zeros(num_preds, dtype=torch.int64) else: labels = None else: import numpy as np # Create an overlap for each predicted box max_overlaps = torch.from_numpy(rng.rand(num_preds)) # Construct gt_inds for each predicted box is_assigned = torch.from_numpy(rng.rand(num_preds) < p_assigned) # maximum number of assignments constraints n_assigned = min(num_preds, min(num_gts, is_assigned.sum())) assigned_idxs = np.where(is_assigned)[0] rng.shuffle(assigned_idxs) assigned_idxs = assigned_idxs[0:n_assigned] assigned_idxs.sort() is_assigned[:] = 0 is_assigned[assigned_idxs] = True is_ignore = torch.from_numpy( rng.rand(num_preds) < p_ignore) & is_assigned gt_inds = torch.zeros(num_preds, dtype=torch.int64) true_idxs = np.arange(num_gts) rng.shuffle(true_idxs) true_idxs = torch.from_numpy(true_idxs) gt_inds[is_assigned] = true_idxs[:n_assigned] gt_inds = torch.from_numpy( rng.randint(1, num_gts + 1, size=num_preds)) gt_inds[is_ignore] = -1 gt_inds[~is_assigned] = 0 max_overlaps[~is_assigned] = 0 if p_use_label is True or p_use_label < rng.rand(): if num_classes == 0: labels = torch.zeros(num_preds, dtype=torch.int64) else: labels = torch.from_numpy( # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class rng.randint(0, num_classes, size=num_preds)) labels[~is_assigned] = 0 else: labels = None self = cls(num_gts, gt_inds, max_overlaps, labels) return self def add_gt_(self, gt_labels): """Add ground truth as assigned results. Args: gt_labels (torch.Tensor): Labels of gt boxes """ self_inds = torch.arange( 1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device) self.gt_inds = torch.cat([self_inds, self.gt_inds]) self.max_overlaps = torch.cat( [self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps]) if self.labels is not None: self.labels = torch.cat([gt_labels, self.labels])
7,705
36.590244
79
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/assigners/atss_assigner.py
import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class ATSSAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `0` or a positive integer indicating the ground truth index. - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: topk (float): number of bbox selected in each level """ def __init__(self, topk, iou_calculator=dict(type='BboxOverlaps2D'), ignore_iof_thr=-1): self.topk = topk self.iou_calculator = build_iou_calculator(iou_calculator) self.ignore_iof_thr = ignore_iof_thr # https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py def assign(self, bboxes, num_level_bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign gt to bboxes. The assignment is done in following steps 1. compute iou between all bbox (bbox of all pyramid levels) and gt 2. compute center distance between all bbox and gt 3. on each pyramid level, for each gt, select k bbox whose center are closest to the gt center, so we total select k*l bbox as candidates for each gt 4. get corresponding iou for the these candidates, and compute the mean and std, set mean + std as the iou threshold 5. select these candidates whose iou are greater than or equal to the threshold as postive 6. limit the positive sample's center in gt Args: bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). num_level_bboxes (List): num of bboxes in each level gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ INF = 100000000 bboxes = bboxes[:, :4] num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0) # compute iou between all bbox and gt overlaps = self.iou_calculator(bboxes, gt_bboxes) # assign 0 by default assigned_gt_inds = overlaps.new_full((num_bboxes, ), 0, dtype=torch.long) if num_gt == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = overlaps.new_zeros((num_bboxes, )) if num_gt == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 if gt_labels is None: assigned_labels = None else: assigned_labels = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) # compute center distance between all bbox and gt gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0 gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0 gt_points = torch.stack((gt_cx, gt_cy), dim=1) bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0 bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0 bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1) distances = (bboxes_points[:, None, :] - gt_points[None, :, :]).pow(2).sum(-1).sqrt() if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0): ignore_overlaps = self.iou_calculator( bboxes, gt_bboxes_ignore, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr distances[ignore_idxs, :] = INF assigned_gt_inds[ignore_idxs] = -1 # Selecting candidates based on the center distance candidate_idxs = [] start_idx = 0 for level, bboxes_per_level in enumerate(num_level_bboxes): # on each pyramid level, for each gt, # select k bbox whose center are closest to the gt center end_idx = start_idx + bboxes_per_level distances_per_level = distances[start_idx:end_idx, :] selectable_k = min(self.topk, bboxes_per_level) _, topk_idxs_per_level = distances_per_level.topk( selectable_k, dim=0, largest=False) candidate_idxs.append(topk_idxs_per_level + start_idx) start_idx = end_idx candidate_idxs = torch.cat(candidate_idxs, dim=0) # get corresponding iou for the these candidates, and compute the # mean and std, set mean + std as the iou threshold candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)] overlaps_mean_per_gt = candidate_overlaps.mean(0) overlaps_std_per_gt = candidate_overlaps.std(0) overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :] # limit the positive sample's center in gt for gt_idx in range(num_gt): candidate_idxs[:, gt_idx] += gt_idx * num_bboxes ep_bboxes_cx = bboxes_cx.view(1, -1).expand( num_gt, num_bboxes).contiguous().view(-1) ep_bboxes_cy = bboxes_cy.view(1, -1).expand( num_gt, num_bboxes).contiguous().view(-1) candidate_idxs = candidate_idxs.view(-1) # calculate the left, top, right, bottom distance between positive # bbox center and gt side l_ = ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0] t_ = ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1] r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].view(-1, num_gt) b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].view(-1, num_gt) is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01 is_pos = is_pos & is_in_gts # if an anchor box is assigned to multiple gts, # the one with the highest IoU will be selected. overlaps_inf = torch.full_like(overlaps, -INF).t().contiguous().view(-1) index = candidate_idxs.view(-1)[is_pos.view(-1)] overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index] overlaps_inf = overlaps_inf.view(num_gt, -1).t() max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1) assigned_gt_inds[ max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1 if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)
7,760
42.357542
87
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/assigners/center_region_assigner.py
import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .assign_result import AssignResult from .base_assigner import BaseAssigner def scale_boxes(bboxes, scale): """Expand an array of boxes by a given scale. Args: bboxes (Tensor): Shape (m, 4) scale (float): The scale factor of bboxes Returns: (Tensor): Shape (m, 4). Scaled bboxes """ assert bboxes.size(1) == 4 w_half = (bboxes[:, 2] - bboxes[:, 0]) * .5 h_half = (bboxes[:, 3] - bboxes[:, 1]) * .5 x_c = (bboxes[:, 2] + bboxes[:, 0]) * .5 y_c = (bboxes[:, 3] + bboxes[:, 1]) * .5 w_half *= scale h_half *= scale boxes_scaled = torch.zeros_like(bboxes) boxes_scaled[:, 0] = x_c - w_half boxes_scaled[:, 2] = x_c + w_half boxes_scaled[:, 1] = y_c - h_half boxes_scaled[:, 3] = y_c + h_half return boxes_scaled def is_located_in(points, bboxes): """Are points located in bboxes. Args: points (Tensor): Points, shape: (m, 2). bboxes (Tensor): Bounding boxes, shape: (n, 4). Return: Tensor: Flags indicating if points are located in bboxes, shape: (m, n). """ assert points.size(1) == 2 assert bboxes.size(1) == 4 return (points[:, 0].unsqueeze(1) > bboxes[:, 0].unsqueeze(0)) & \ (points[:, 0].unsqueeze(1) < bboxes[:, 2].unsqueeze(0)) & \ (points[:, 1].unsqueeze(1) > bboxes[:, 1].unsqueeze(0)) & \ (points[:, 1].unsqueeze(1) < bboxes[:, 3].unsqueeze(0)) def bboxes_area(bboxes): """Compute the area of an array of bboxes. Args: bboxes (Tensor): The coordinates ox bboxes. Shape: (m, 4) Returns: Tensor: Area of the bboxes. Shape: (m, ) """ assert bboxes.size(1) == 4 w = (bboxes[:, 2] - bboxes[:, 0]) h = (bboxes[:, 3] - bboxes[:, 1]) areas = w * h return areas @BBOX_ASSIGNERS.register_module() class CenterRegionAssigner(BaseAssigner): """Assign pixels at the center region of a bbox as positive. Each proposals will be assigned with `-1`, `0`, or a positive integer indicating the ground truth index. - -1: negative samples - semi-positive numbers: positive sample, index (0-based) of assigned gt Args: pos_scale (float): Threshold within which pixels are labelled as positive. neg_scale (float): Threshold above which pixels are labelled as positive. min_pos_iof (float): Minimum iof of a pixel with a gt to be labelled as positive. Default: 1e-2 ignore_gt_scale (float): Threshold within which the pixels are ignored when the gt is labelled as shadowed. Default: 0.5 foreground_dominate (bool): If True, the bbox will be assigned as positive when a gt's kernel region overlaps with another's shadowed (ignored) region, otherwise it is set as ignored. Default to False. """ def __init__(self, pos_scale, neg_scale, min_pos_iof=1e-2, ignore_gt_scale=0.5, foreground_dominate=False, iou_calculator=dict(type='BboxOverlaps2D')): self.pos_scale = pos_scale self.neg_scale = neg_scale self.min_pos_iof = min_pos_iof self.ignore_gt_scale = ignore_gt_scale self.foreground_dominate = foreground_dominate self.iou_calculator = build_iou_calculator(iou_calculator) def get_gt_priorities(self, gt_bboxes): """Get gt priorities according to their areas. Smaller gt has higher priority. Args: gt_bboxes (Tensor): Ground truth boxes, shape (k, 4). Returns: Tensor: The priority of gts so that gts with larger priority is \ more likely to be assigned. Shape (k, ) """ gt_areas = bboxes_area(gt_bboxes) # Rank all gt bbox areas. Smaller objects has larger priority _, sort_idx = gt_areas.sort(descending=True) sort_idx = sort_idx.argsort() return sort_idx def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign gt to bboxes. This method assigns gts to every bbox (proposal/anchor), each bbox \ will be assigned with -1, or a semi-positive number. -1 means \ negative sample, semi-positive number is the index (0-based) of \ assigned gt. Args: bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (tensor, optional): Label of gt_bboxes, shape (num_gts,). Returns: :obj:`AssignResult`: The assigned result. Note that \ shadowed_labels of shape (N, 2) is also added as an \ `assign_result` attribute. `shadowed_labels` is a tensor \ composed of N pairs of anchor_ind, class_label], where N \ is the number of anchors that lie in the outer region of a \ gt, anchor_ind is the shadowed anchor index and class_label \ is the shadowed class label. Example: >>> self = CenterRegionAssigner(0.2, 0.2) >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]]) >>> gt_bboxes = torch.Tensor([[0, 0, 10, 10]]) >>> assign_result = self.assign(bboxes, gt_bboxes) >>> expected_gt_inds = torch.LongTensor([1, 0]) >>> assert torch.all(assign_result.gt_inds == expected_gt_inds) """ # There are in total 5 steps in the pixel assignment # 1. Find core (the center region, say inner 0.2) # and shadow (the relatively ourter part, say inner 0.2-0.5) # regions of every gt. # 2. Find all prior bboxes that lie in gt_core and gt_shadow regions # 3. Assign prior bboxes in gt_core with a one-hot id of the gt in # the image. # 3.1. For overlapping objects, the prior bboxes in gt_core is # assigned with the object with smallest area # 4. Assign prior bboxes with class label according to its gt id. # 4.1. Assign -1 to prior bboxes lying in shadowed gts # 4.2. Assign positive prior boxes with the corresponding label # 5. Find pixels lying in the shadow of an object and assign them with # background label, but set the loss weight of its corresponding # gt to zero. assert bboxes.size(1) == 4, 'bboxes must have size of 4' # 1. Find core positive and shadow region of every gt gt_core = scale_boxes(gt_bboxes, self.pos_scale) gt_shadow = scale_boxes(gt_bboxes, self.neg_scale) # 2. Find prior bboxes that lie in gt_core and gt_shadow regions bbox_centers = (bboxes[:, 2:4] + bboxes[:, 0:2]) / 2 # The center points lie within the gt boxes is_bbox_in_gt = is_located_in(bbox_centers, gt_bboxes) # Only calculate bbox and gt_core IoF. This enables small prior bboxes # to match large gts bbox_and_gt_core_overlaps = self.iou_calculator( bboxes, gt_core, mode='iof') # The center point of effective priors should be within the gt box is_bbox_in_gt_core = is_bbox_in_gt & ( bbox_and_gt_core_overlaps > self.min_pos_iof) # shape (n, k) is_bbox_in_gt_shadow = ( self.iou_calculator(bboxes, gt_shadow, mode='iof') > self.min_pos_iof) # Rule out center effective positive pixels is_bbox_in_gt_shadow &= (~is_bbox_in_gt_core) num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0) if num_gts == 0 or num_bboxes == 0: # If no gts exist, assign all pixels to negative assigned_gt_ids = \ is_bbox_in_gt_core.new_zeros((num_bboxes,), dtype=torch.long) pixels_in_gt_shadow = assigned_gt_ids.new_empty((0, 2)) else: # Step 3: assign a one-hot gt id to each pixel, and smaller objects # have high priority to assign the pixel. sort_idx = self.get_gt_priorities(gt_bboxes) assigned_gt_ids, pixels_in_gt_shadow = \ self.assign_one_hot_gt_indices(is_bbox_in_gt_core, is_bbox_in_gt_shadow, gt_priority=sort_idx) if gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0: # No ground truth or boxes, return empty assignment gt_bboxes_ignore = scale_boxes( gt_bboxes_ignore, scale=self.ignore_gt_scale) is_bbox_in_ignored_gts = is_located_in(bbox_centers, gt_bboxes_ignore) is_bbox_in_ignored_gts = is_bbox_in_ignored_gts.any(dim=1) assigned_gt_ids[is_bbox_in_ignored_gts] = -1 # 4. Assign prior bboxes with class label according to its gt id. assigned_labels = None shadowed_pixel_labels = None if gt_labels is not None: # Default assigned label is the background (-1) assigned_labels = assigned_gt_ids.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_ids > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[assigned_gt_ids[pos_inds] - 1] # 5. Find pixels lying in the shadow of an object shadowed_pixel_labels = pixels_in_gt_shadow.clone() if pixels_in_gt_shadow.numel() > 0: pixel_idx, gt_idx =\ pixels_in_gt_shadow[:, 0], pixels_in_gt_shadow[:, 1] assert (assigned_gt_ids[pixel_idx] != gt_idx).all(), \ 'Some pixels are dually assigned to ignore and gt!' shadowed_pixel_labels[:, 1] = gt_labels[gt_idx - 1] override = ( assigned_labels[pixel_idx] == shadowed_pixel_labels[:, 1]) if self.foreground_dominate: # When a pixel is both positive and shadowed, set it as pos shadowed_pixel_labels = shadowed_pixel_labels[~override] else: # When a pixel is both pos and shadowed, set it as shadowed assigned_labels[pixel_idx[override]] = -1 assigned_gt_ids[pixel_idx[override]] = 0 assign_result = AssignResult( num_gts, assigned_gt_ids, None, labels=assigned_labels) # Add shadowed_labels as assign_result property. Shape: (num_shadow, 2) assign_result.set_extra_property('shadowed_labels', shadowed_pixel_labels) return assign_result def assign_one_hot_gt_indices(self, is_bbox_in_gt_core, is_bbox_in_gt_shadow, gt_priority=None): """Assign only one gt index to each prior box. Gts with large gt_priority are more likely to be assigned. Args: is_bbox_in_gt_core (Tensor): Bool tensor indicating the bbox center is in the core area of a gt (e.g. 0-0.2). Shape: (num_prior, num_gt). is_bbox_in_gt_shadow (Tensor): Bool tensor indicating the bbox center is in the shadowed area of a gt (e.g. 0.2-0.5). Shape: (num_prior, num_gt). gt_priority (Tensor): Priorities of gts. The gt with a higher priority is more likely to be assigned to the bbox when the bbox match with multiple gts. Shape: (num_gt, ). Returns: tuple: Returns (assigned_gt_inds, shadowed_gt_inds). - assigned_gt_inds: The assigned gt index of each prior bbox \ (i.e. index from 1 to num_gts). Shape: (num_prior, ). - shadowed_gt_inds: shadowed gt indices. It is a tensor of \ shape (num_ignore, 2) with first column being the \ shadowed prior bbox indices and the second column the \ shadowed gt indices (1-based). """ num_bboxes, num_gts = is_bbox_in_gt_core.shape if gt_priority is None: gt_priority = torch.arange( num_gts, device=is_bbox_in_gt_core.device) assert gt_priority.size(0) == num_gts # The bigger gt_priority, the more preferable to be assigned # The assigned inds are by default 0 (background) assigned_gt_inds = is_bbox_in_gt_core.new_zeros((num_bboxes, ), dtype=torch.long) # Shadowed bboxes are assigned to be background. But the corresponding # label is ignored during loss calculation, which is done through # shadowed_gt_inds shadowed_gt_inds = torch.nonzero(is_bbox_in_gt_shadow, as_tuple=False) if is_bbox_in_gt_core.sum() == 0: # No gt match shadowed_gt_inds[:, 1] += 1 # 1-based. For consistency issue return assigned_gt_inds, shadowed_gt_inds # The priority of each prior box and gt pair. If one prior box is # matched bo multiple gts. Only the pair with the highest priority # is saved pair_priority = is_bbox_in_gt_core.new_full((num_bboxes, num_gts), -1, dtype=torch.long) # Each bbox could match with multiple gts. # The following codes deal with this situation # Matched bboxes (to any gt). Shape: (num_pos_anchor, ) inds_of_match = torch.any(is_bbox_in_gt_core, dim=1) # The matched gt index of each positive bbox. Length >= num_pos_anchor # , since one bbox could match multiple gts matched_bbox_gt_inds = torch.nonzero( is_bbox_in_gt_core, as_tuple=False)[:, 1] # Assign priority to each bbox-gt pair. pair_priority[is_bbox_in_gt_core] = gt_priority[matched_bbox_gt_inds] _, argmax_priority = pair_priority[inds_of_match].max(dim=1) assigned_gt_inds[inds_of_match] = argmax_priority + 1 # 1-based # Zero-out the assigned anchor box to filter the shadowed gt indices is_bbox_in_gt_core[inds_of_match, argmax_priority] = 0 # Concat the shadowed indices due to overlapping with that out side of # effective scale. shape: (total_num_ignore, 2) shadowed_gt_inds = torch.cat( (shadowed_gt_inds, torch.nonzero( is_bbox_in_gt_core, as_tuple=False)), dim=0) # `is_bbox_in_gt_core` should be changed back to keep arguments intact. is_bbox_in_gt_core[inds_of_match, argmax_priority] = 1 # 1-based shadowed gt indices, to be consistent with `assigned_gt_inds` if shadowed_gt_inds.numel() > 0: shadowed_gt_inds[:, 1] += 1 return assigned_gt_inds, shadowed_gt_inds
15,429
44.922619
79
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/assigners/grid_assigner.py
import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class GridAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `-1`, `0`, or a positive integer indicating the ground truth index. - -1: don't care - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: pos_iou_thr (float): IoU threshold for positive bboxes. neg_iou_thr (float or tuple): IoU threshold for negative bboxes. min_pos_iou (float): Minimum iou for a bbox to be considered as a positive bbox. Positive samples can have smaller IoU than pos_iou_thr due to the 4th step (assign max IoU sample to each gt). gt_max_assign_all (bool): Whether to assign all bboxes with the same highest overlap with some gt to that gt. """ def __init__(self, pos_iou_thr, neg_iou_thr, min_pos_iou=.0, gt_max_assign_all=True, iou_calculator=dict(type='BboxOverlaps2D')): self.pos_iou_thr = pos_iou_thr self.neg_iou_thr = neg_iou_thr self.min_pos_iou = min_pos_iou self.gt_max_assign_all = gt_max_assign_all self.iou_calculator = build_iou_calculator(iou_calculator) def assign(self, bboxes, box_responsible_flags, gt_bboxes, gt_labels=None): """Assign gt to bboxes. The process is very much like the max iou assigner, except that positive samples are constrained within the cell that the gt boxes fell in. This method assign a gt bbox to every bbox (proposal/anchor), each bbox will be assigned with -1, 0, or a positive number. -1 means don't care, 0 means negative sample, positive number is the index (1-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every bbox to -1 2. assign proposals whose iou with all gts <= neg_iou_thr to 0 3. for each bbox within a cell, if the iou with its nearest gt > pos_iou_thr and the center of that gt falls inside the cell, assign it to that bbox 4. for each gt bbox, assign its nearest proposals within the cell the gt bbox falls in to itself. Args: bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). box_responsible_flags (Tensor): flag to indicate whether box is responsible for prediction, shape(n, ) gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0) # compute iou between all gt and bboxes overlaps = self.iou_calculator(gt_bboxes, bboxes) # 1. assign -1 by default assigned_gt_inds = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = overlaps.new_zeros((num_bboxes, )) if num_gts == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 if gt_labels is None: assigned_labels = None else: assigned_labels = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) # 2. assign negative: below # for each anchor, which gt best overlaps with it # for each anchor, the max iou of all gts # shape of max_overlaps == argmax_overlaps == num_bboxes max_overlaps, argmax_overlaps = overlaps.max(dim=0) if isinstance(self.neg_iou_thr, float): assigned_gt_inds[(max_overlaps >= 0) & (max_overlaps <= self.neg_iou_thr)] = 0 elif isinstance(self.neg_iou_thr, (tuple, list)): assert len(self.neg_iou_thr) == 2 assigned_gt_inds[(max_overlaps > self.neg_iou_thr[0]) & (max_overlaps <= self.neg_iou_thr[1])] = 0 # 3. assign positive: falls into responsible cell and above # positive IOU threshold, the order matters. # the prior condition of comparision is to filter out all # unrelated anchors, i.e. not box_responsible_flags overlaps[:, ~box_responsible_flags.type(torch.bool)] = -1. # calculate max_overlaps again, but this time we only consider IOUs # for anchors responsible for prediction max_overlaps, argmax_overlaps = overlaps.max(dim=0) # for each gt, which anchor best overlaps with it # for each gt, the max iou of all proposals # shape of gt_max_overlaps == gt_argmax_overlaps == num_gts gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1) pos_inds = (max_overlaps > self.pos_iou_thr) & box_responsible_flags.type(torch.bool) assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1 # 4. assign positive to max overlapped anchors within responsible cell for i in range(num_gts): if gt_max_overlaps[i] > self.min_pos_iou: if self.gt_max_assign_all: max_iou_inds = (overlaps[i, :] == gt_max_overlaps[i]) & \ box_responsible_flags.type(torch.bool) assigned_gt_inds[max_iou_inds] = i + 1 elif box_responsible_flags[gt_argmax_overlaps[i]]: assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 # assign labels of positive anchors if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)
6,816
42.698718
79
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/assigners/point_assigner.py
import torch from ..builder import BBOX_ASSIGNERS from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class PointAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each point. Each proposals will be assigned with `0`, or a positive integer indicating the ground truth index. - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt """ def __init__(self, scale=4, pos_num=3): self.scale = scale self.pos_num = pos_num def assign(self, points, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign gt to points. This method assign a gt bbox to every points set, each points set will be assigned with the background_label (-1), or a label number. -1 is background, and semi-positive number is the index (0-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every points to the background_label (-1) 2. A point is assigned to some gt bbox if (i) the point is within the k closest points to the gt bbox (ii) the distance between this point and the gt is smaller than other gt bboxes Args: points (Tensor): points to be assigned, shape(n, 3) while last dimension stands for (x, y, stride). gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. NOTE: currently unused. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ num_points = points.shape[0] num_gts = gt_bboxes.shape[0] if num_gts == 0 or num_points == 0: # If no truth assign everything to the background assigned_gt_inds = points.new_full((num_points, ), 0, dtype=torch.long) if gt_labels is None: assigned_labels = None else: assigned_labels = points.new_full((num_points, ), -1, dtype=torch.long) return AssignResult( num_gts, assigned_gt_inds, None, labels=assigned_labels) points_xy = points[:, :2] points_stride = points[:, 2] points_lvl = torch.log2( points_stride).int() # [3...,4...,5...,6...,7...] lvl_min, lvl_max = points_lvl.min(), points_lvl.max() # assign gt box gt_bboxes_xy = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) / 2 gt_bboxes_wh = (gt_bboxes[:, 2:] - gt_bboxes[:, :2]).clamp(min=1e-6) scale = self.scale gt_bboxes_lvl = ((torch.log2(gt_bboxes_wh[:, 0] / scale) + torch.log2(gt_bboxes_wh[:, 1] / scale)) / 2).int() gt_bboxes_lvl = torch.clamp(gt_bboxes_lvl, min=lvl_min, max=lvl_max) # stores the assigned gt index of each point assigned_gt_inds = points.new_zeros((num_points, ), dtype=torch.long) # stores the assigned gt dist (to this point) of each point assigned_gt_dist = points.new_full((num_points, ), float('inf')) points_range = torch.arange(points.shape[0]) for idx in range(num_gts): gt_lvl = gt_bboxes_lvl[idx] # get the index of points in this level lvl_idx = gt_lvl == points_lvl points_index = points_range[lvl_idx] # get the points in this level lvl_points = points_xy[lvl_idx, :] # get the center point of gt gt_point = gt_bboxes_xy[[idx], :] # get width and height of gt gt_wh = gt_bboxes_wh[[idx], :] # compute the distance between gt center and # all points in this level points_gt_dist = ((lvl_points - gt_point) / gt_wh).norm(dim=1) # find the nearest k points to gt center in this level min_dist, min_dist_index = torch.topk( points_gt_dist, self.pos_num, largest=False) # the index of nearest k points to gt center in this level min_dist_points_index = points_index[min_dist_index] # The less_than_recorded_index stores the index # of min_dist that is less then the assigned_gt_dist. Where # assigned_gt_dist stores the dist from previous assigned gt # (if exist) to each point. less_than_recorded_index = min_dist < assigned_gt_dist[ min_dist_points_index] # The min_dist_points_index stores the index of points satisfy: # (1) it is k nearest to current gt center in this level. # (2) it is closer to current gt center than other gt center. min_dist_points_index = min_dist_points_index[ less_than_recorded_index] # assign the result assigned_gt_inds[min_dist_points_index] = idx + 1 assigned_gt_dist[min_dist_points_index] = min_dist[ less_than_recorded_index] if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_points, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None return AssignResult( num_gts, assigned_gt_inds, None, labels=assigned_labels)
5,947
43.38806
79
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/assigners/approx_max_iou_assigner.py
import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .max_iou_assigner import MaxIoUAssigner @BBOX_ASSIGNERS.register_module() class ApproxMaxIoUAssigner(MaxIoUAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with an integer indicating the ground truth index. (semi-positive index: gt label (0-based), -1: background) - -1: negative sample, no assigned gt - semi-positive integer: positive sample, index (0-based) of assigned gt Args: pos_iou_thr (float): IoU threshold for positive bboxes. neg_iou_thr (float or tuple): IoU threshold for negative bboxes. min_pos_iou (float): Minimum iou for a bbox to be considered as a positive bbox. Positive samples can have smaller IoU than pos_iou_thr due to the 4th step (assign max IoU sample to each gt). gt_max_assign_all (bool): Whether to assign all bboxes with the same highest overlap with some gt to that gt. ignore_iof_thr (float): IoF threshold for ignoring bboxes (if `gt_bboxes_ignore` is specified). Negative values mean not ignoring any bboxes. ignore_wrt_candidates (bool): Whether to compute the iof between `bboxes` and `gt_bboxes_ignore`, or the contrary. match_low_quality (bool): Whether to allow quality matches. This is usually allowed for RPN and single stage detectors, but not allowed in the second stage. gpu_assign_thr (int): The upper bound of the number of GT for GPU assign. When the number of gt is above this threshold, will assign on CPU device. Negative values mean not assign on CPU. """ def __init__(self, pos_iou_thr, neg_iou_thr, min_pos_iou=.0, gt_max_assign_all=True, ignore_iof_thr=-1, ignore_wrt_candidates=True, match_low_quality=True, gpu_assign_thr=-1, iou_calculator=dict(type='BboxOverlaps2D')): self.pos_iou_thr = pos_iou_thr self.neg_iou_thr = neg_iou_thr self.min_pos_iou = min_pos_iou self.gt_max_assign_all = gt_max_assign_all self.ignore_iof_thr = ignore_iof_thr self.ignore_wrt_candidates = ignore_wrt_candidates self.gpu_assign_thr = gpu_assign_thr self.match_low_quality = match_low_quality self.iou_calculator = build_iou_calculator(iou_calculator) def assign(self, approxs, squares, approxs_per_octave, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign gt to approxs. This method assign a gt bbox to each group of approxs (bboxes), each group of approxs is represent by a base approx (bbox) and will be assigned with -1, or a semi-positive number. background_label (-1) means negative sample, semi-positive number is the index (0-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every bbox to background_label (-1) 2. use the max IoU of each group of approxs to assign 2. assign proposals whose iou with all gts < neg_iou_thr to background 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr, assign it to that bbox 4. for each gt bbox, assign its nearest proposals (may be more than one) to itself Args: approxs (Tensor): Bounding boxes to be assigned, shape(approxs_per_octave*n, 4). squares (Tensor): Base Bounding boxes to be assigned, shape(n, 4). approxs_per_octave (int): number of approxs per octave gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ num_squares = squares.size(0) num_gts = gt_bboxes.size(0) if num_squares == 0 or num_gts == 0: # No predictions and/or truth, return empty assignment overlaps = approxs.new(num_gts, num_squares) assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) return assign_result # re-organize anchors by approxs_per_octave x num_squares approxs = torch.transpose( approxs.view(num_squares, approxs_per_octave, 4), 0, 1).contiguous().view(-1, 4) assign_on_cpu = True if (self.gpu_assign_thr > 0) and ( num_gts > self.gpu_assign_thr) else False # compute overlap and assign gt on CPU when number of GT is large if assign_on_cpu: device = approxs.device approxs = approxs.cpu() gt_bboxes = gt_bboxes.cpu() if gt_bboxes_ignore is not None: gt_bboxes_ignore = gt_bboxes_ignore.cpu() if gt_labels is not None: gt_labels = gt_labels.cpu() all_overlaps = self.iou_calculator(approxs, gt_bboxes) overlaps, _ = all_overlaps.view(approxs_per_octave, num_squares, num_gts).max(dim=0) overlaps = torch.transpose(overlaps, 0, 1) if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0 and squares.numel() > 0): if self.ignore_wrt_candidates: ignore_overlaps = self.iou_calculator( squares, gt_bboxes_ignore, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) else: ignore_overlaps = self.iou_calculator( gt_bboxes_ignore, squares, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=0) overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1 assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) if assign_on_cpu: assign_result.gt_inds = assign_result.gt_inds.to(device) assign_result.max_overlaps = assign_result.max_overlaps.to(device) if assign_result.labels is not None: assign_result.labels = assign_result.labels.to(device) return assign_result
6,649
44.547945
79
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/assigners/max_iou_assigner.py
import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class MaxIoUAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `-1`, or a semi-positive integer indicating the ground truth index. - -1: negative sample, no assigned gt - semi-positive integer: positive sample, index (0-based) of assigned gt Args: pos_iou_thr (float): IoU threshold for positive bboxes. neg_iou_thr (float or tuple): IoU threshold for negative bboxes. min_pos_iou (float): Minimum iou for a bbox to be considered as a positive bbox. Positive samples can have smaller IoU than pos_iou_thr due to the 4th step (assign max IoU sample to each gt). gt_max_assign_all (bool): Whether to assign all bboxes with the same highest overlap with some gt to that gt. ignore_iof_thr (float): IoF threshold for ignoring bboxes (if `gt_bboxes_ignore` is specified). Negative values mean not ignoring any bboxes. ignore_wrt_candidates (bool): Whether to compute the iof between `bboxes` and `gt_bboxes_ignore`, or the contrary. match_low_quality (bool): Whether to allow low quality matches. This is usually allowed for RPN and single stage detectors, but not allowed in the second stage. Details are demonstrated in Step 4. gpu_assign_thr (int): The upper bound of the number of GT for GPU assign. When the number of gt is above this threshold, will assign on CPU device. Negative values mean not assign on CPU. """ def __init__(self, pos_iou_thr, neg_iou_thr, min_pos_iou=.0, gt_max_assign_all=True, ignore_iof_thr=-1, ignore_wrt_candidates=True, match_low_quality=True, gpu_assign_thr=-1, iou_calculator=dict(type='BboxOverlaps2D')): self.pos_iou_thr = pos_iou_thr self.neg_iou_thr = neg_iou_thr self.min_pos_iou = min_pos_iou self.gt_max_assign_all = gt_max_assign_all self.ignore_iof_thr = ignore_iof_thr self.ignore_wrt_candidates = ignore_wrt_candidates self.gpu_assign_thr = gpu_assign_thr self.match_low_quality = match_low_quality self.iou_calculator = build_iou_calculator(iou_calculator) def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign gt to bboxes. This method assign a gt bbox to every bbox (proposal/anchor), each bbox will be assigned with -1, or a semi-positive number. -1 means negative sample, semi-positive number is the index (0-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every bbox to the background 2. assign proposals whose iou with all gts < neg_iou_thr to 0 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr, assign it to that bbox 4. for each gt bbox, assign its nearest proposals (may be more than one) to itself Args: bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. Example: >>> self = MaxIoUAssigner(0.5, 0.5) >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]]) >>> gt_bboxes = torch.Tensor([[0, 0, 10, 9]]) >>> assign_result = self.assign(bboxes, gt_bboxes) >>> expected_gt_inds = torch.LongTensor([1, 0]) >>> assert torch.all(assign_result.gt_inds == expected_gt_inds) """ assign_on_cpu = True if (self.gpu_assign_thr > 0) and ( gt_bboxes.shape[0] > self.gpu_assign_thr) else False # compute overlap and assign gt on CPU when number of GT is large if assign_on_cpu: device = bboxes.device bboxes = bboxes.cpu() gt_bboxes = gt_bboxes.cpu() if gt_bboxes_ignore is not None: gt_bboxes_ignore = gt_bboxes_ignore.cpu() if gt_labels is not None: gt_labels = gt_labels.cpu() overlaps = self.iou_calculator(gt_bboxes, bboxes) if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0): if self.ignore_wrt_candidates: ignore_overlaps = self.iou_calculator( bboxes, gt_bboxes_ignore, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) else: ignore_overlaps = self.iou_calculator( gt_bboxes_ignore, bboxes, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=0) overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1 assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) if assign_on_cpu: assign_result.gt_inds = assign_result.gt_inds.to(device) assign_result.max_overlaps = assign_result.max_overlaps.to(device) if assign_result.labels is not None: assign_result.labels = assign_result.labels.to(device) return assign_result def assign_wrt_overlaps(self, overlaps, gt_labels=None): """Assign w.r.t. the overlaps of bboxes with gts. Args: overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes, shape(k, n). gt_labels (Tensor, optional): Labels of k gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ num_gts, num_bboxes = overlaps.size(0), overlaps.size(1) # 1. assign -1 by default assigned_gt_inds = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = overlaps.new_zeros((num_bboxes, )) if num_gts == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 if gt_labels is None: assigned_labels = None else: assigned_labels = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) # for each anchor, which gt best overlaps with it # for each anchor, the max iou of all gts max_overlaps, argmax_overlaps = overlaps.max(dim=0) # for each gt, which anchor best overlaps with it # for each gt, the max iou of all proposals gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1) # 2. assign negative: below # the negative inds are set to be 0 if isinstance(self.neg_iou_thr, float): assigned_gt_inds[(max_overlaps >= 0) & (max_overlaps < self.neg_iou_thr)] = 0 elif isinstance(self.neg_iou_thr, tuple): assert len(self.neg_iou_thr) == 2 assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0]) & (max_overlaps < self.neg_iou_thr[1])] = 0 # 3. assign positive: above positive IoU threshold pos_inds = max_overlaps >= self.pos_iou_thr assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1 if self.match_low_quality: # Low-quality matching will overwirte the assigned_gt_inds assigned # in Step 3. Thus, the assigned gt might not be the best one for # prediction. # For example, if bbox A has 0.9 and 0.8 iou with GT bbox 1 & 2, # bbox 1 will be assigned as the best target for bbox A in step 3. # However, if GT bbox 2's gt_argmax_overlaps = A, bbox A's # assigned_gt_inds will be overwritten to be bbox B. # This might be the reason that it is not used in ROI Heads. for i in range(num_gts): if gt_max_overlaps[i] >= self.min_pos_iou: if self.gt_max_assign_all: max_iou_inds = overlaps[i, :] == gt_max_overlaps[i] assigned_gt_inds[max_iou_inds] = i + 1 else: assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)
9,750
44.779343
79
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/coder/yolo_bbox_coder.py
import torch from ..builder import BBOX_CODERS from .base_bbox_coder import BaseBBoxCoder @BBOX_CODERS.register_module() class YOLOBBoxCoder(BaseBBoxCoder): """YOLO BBox coder. Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh). cx, cy in [0., 1.], denotes relative center position w.r.t the center of bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`. Args: eps (float): Min value of cx, cy when encoding. """ def __init__(self, eps=1e-6): super(BaseBBoxCoder, self).__init__() self.eps = eps def encode(self, bboxes, gt_bboxes, stride): """Get box regression transformation deltas that can be used to transform the ``bboxes`` into the ``gt_bboxes``. Args: bboxes (torch.Tensor): Source boxes, e.g., anchors. gt_bboxes (torch.Tensor): Target of the transformation, e.g., ground-truth boxes. stride (torch.Tensor | int): Stride of bboxes. Returns: torch.Tensor: Box transformation deltas """ assert bboxes.size(0) == gt_bboxes.size(0) assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5 y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5 w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0] h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1] x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5 y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5 w = bboxes[..., 2] - bboxes[..., 0] h = bboxes[..., 3] - bboxes[..., 1] w_target = torch.log((w_gt / w).clamp(min=self.eps)) h_target = torch.log((h_gt / h).clamp(min=self.eps)) x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp( self.eps, 1 - self.eps) y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp( self.eps, 1 - self.eps) encoded_bboxes = torch.stack( [x_center_target, y_center_target, w_target, h_target], dim=-1) return encoded_bboxes def decode(self, bboxes, pred_bboxes, stride): """Apply transformation `pred_bboxes` to `boxes`. Args: boxes (torch.Tensor): Basic boxes, e.g. anchors. pred_bboxes (torch.Tensor): Encoded boxes with shape stride (torch.Tensor | int): Strides of bboxes. Returns: torch.Tensor: Decoded boxes. """ assert pred_bboxes.size(0) == bboxes.size(0) assert pred_bboxes.size(-1) == bboxes.size(-1) == 4 x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5 y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5 w = bboxes[..., 2] - bboxes[..., 0] h = bboxes[..., 3] - bboxes[..., 1] # Get outputs x, y x_center_pred = (pred_bboxes[..., 0] - 0.5) * stride + x_center y_center_pred = (pred_bboxes[..., 1] - 0.5) * stride + y_center w_pred = torch.exp(pred_bboxes[..., 2]) * w h_pred = torch.exp(pred_bboxes[..., 3]) * h decoded_bboxes = torch.stack( (x_center_pred - w_pred / 2, y_center_pred - h_pred / 2, x_center_pred + w_pred / 2, y_center_pred + h_pred / 2), dim=-1) return decoded_bboxes
3,417
38.287356
77
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/coder/bucketing_bbox_coder.py
import numpy as np import torch import torch.nn.functional as F from ..builder import BBOX_CODERS from ..transforms import bbox_rescale from .base_bbox_coder import BaseBBoxCoder @BBOX_CODERS.register_module() class BucketingBBoxCoder(BaseBBoxCoder): """Bucketing BBox Coder for Side-Aware Bounday Localization (SABL). Boundary Localization with Bucketing and Bucketing Guided Rescoring are implemented here. Please refer to https://arxiv.org/abs/1912.04260 for more details. Args: num_buckets (int): Number of buckets. scale_factor (int): Scale factor of proposals to generate buckets. offset_topk (int): Topk buckets are used to generate bucket fine regression targets. Defaults to 2. offset_upperbound (float): Offset upperbound to generate bucket fine regression targets. To avoid too large offset displacements. Defaults to 1.0. cls_ignore_neighbor (bool): Ignore second nearest bucket or Not. Defaults to True. """ def __init__(self, num_buckets, scale_factor, offset_topk=2, offset_upperbound=1.0, cls_ignore_neighbor=True): super(BucketingBBoxCoder, self).__init__() self.num_buckets = num_buckets self.scale_factor = scale_factor self.offset_topk = offset_topk self.offset_upperbound = offset_upperbound self.cls_ignore_neighbor = cls_ignore_neighbor def encode(self, bboxes, gt_bboxes): """Get bucketing estimation and fine regression targets during training. Args: bboxes (torch.Tensor): source boxes, e.g., object proposals. gt_bboxes (torch.Tensor): target of the transformation, e.g., ground truth boxes. Returns: encoded_bboxes(tuple[Tensor]): bucketing estimation and fine regression targets and weights """ assert bboxes.size(0) == gt_bboxes.size(0) assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 encoded_bboxes = bbox2bucket(bboxes, gt_bboxes, self.num_buckets, self.scale_factor, self.offset_topk, self.offset_upperbound, self.cls_ignore_neighbor) return encoded_bboxes def decode(self, bboxes, pred_bboxes, max_shape=None): """Apply transformation `pred_bboxes` to `boxes`. Args: boxes (torch.Tensor): Basic boxes. pred_bboxes (torch.Tensor): Predictions for bucketing estimation and fine regression max_shape (tuple[int], optional): Maximum shape of boxes. Defaults to None. Returns: torch.Tensor: Decoded boxes. """ assert len(pred_bboxes) == 2 cls_preds, offset_preds = pred_bboxes assert cls_preds.size(0) == bboxes.size(0) and offset_preds.size( 0) == bboxes.size(0) decoded_bboxes = bucket2bbox(bboxes, cls_preds, offset_preds, self.num_buckets, self.scale_factor, max_shape) return decoded_bboxes def generat_buckets(proposals, num_buckets, scale_factor=1.0): """Generate buckets w.r.t bucket number and scale factor of proposals. Args: proposals (Tensor): Shape (n, 4) num_buckets (int): Number of buckets. scale_factor (float): Scale factor to rescale proposals. Returns: tuple[Tensor]: (bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets) - bucket_w: Width of buckets on x-axis. Shape (n, ). - bucket_h: Height of buckets on y-axis. Shape (n, ). - l_buckets: Left buckets. Shape (n, ceil(side_num/2)). - r_buckets: Right buckets. Shape (n, ceil(side_num/2)). - t_buckets: Top buckets. Shape (n, ceil(side_num/2)). - d_buckets: Down buckets. Shape (n, ceil(side_num/2)). """ proposals = bbox_rescale(proposals, scale_factor) # number of buckets in each side side_num = int(np.ceil(num_buckets / 2.0)) pw = proposals[..., 2] - proposals[..., 0] ph = proposals[..., 3] - proposals[..., 1] px1 = proposals[..., 0] py1 = proposals[..., 1] px2 = proposals[..., 2] py2 = proposals[..., 3] bucket_w = pw / num_buckets bucket_h = ph / num_buckets # left buckets l_buckets = px1[:, None] + (0.5 + torch.arange( 0, side_num).to(proposals).float())[None, :] * bucket_w[:, None] # right buckets r_buckets = px2[:, None] - (0.5 + torch.arange( 0, side_num).to(proposals).float())[None, :] * bucket_w[:, None] # top buckets t_buckets = py1[:, None] + (0.5 + torch.arange( 0, side_num).to(proposals).float())[None, :] * bucket_h[:, None] # down buckets d_buckets = py2[:, None] - (0.5 + torch.arange( 0, side_num).to(proposals).float())[None, :] * bucket_h[:, None] return bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets def bbox2bucket(proposals, gt, num_buckets, scale_factor, offset_topk=2, offset_upperbound=1.0, cls_ignore_neighbor=True): """Generate buckets estimation and fine regression targets. Args: proposals (Tensor): Shape (n, 4) gt (Tensor): Shape (n, 4) num_buckets (int): Number of buckets. scale_factor (float): Scale factor to rescale proposals. offset_topk (int): Topk buckets are used to generate bucket fine regression targets. Defaults to 2. offset_upperbound (float): Offset allowance to generate bucket fine regression targets. To avoid too large offset displacements. Defaults to 1.0. cls_ignore_neighbor (bool): Ignore second nearest bucket or Not. Defaults to True. Returns: tuple[Tensor]: (offsets, offsets_weights, bucket_labels, cls_weights). - offsets: Fine regression targets. \ Shape (n, num_buckets*2). - offsets_weights: Fine regression weights. \ Shape (n, num_buckets*2). - bucket_labels: Bucketing estimation labels. \ Shape (n, num_buckets*2). - cls_weights: Bucketing estimation weights. \ Shape (n, num_buckets*2). """ assert proposals.size() == gt.size() # generate buckets proposals = proposals.float() gt = gt.float() (bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets) = generat_buckets(proposals, num_buckets, scale_factor) gx1 = gt[..., 0] gy1 = gt[..., 1] gx2 = gt[..., 2] gy2 = gt[..., 3] # generate offset targets and weights # offsets from buckets to gts l_offsets = (l_buckets - gx1[:, None]) / bucket_w[:, None] r_offsets = (r_buckets - gx2[:, None]) / bucket_w[:, None] t_offsets = (t_buckets - gy1[:, None]) / bucket_h[:, None] d_offsets = (d_buckets - gy2[:, None]) / bucket_h[:, None] # select top-k nearset buckets l_topk, l_label = l_offsets.abs().topk( offset_topk, dim=1, largest=False, sorted=True) r_topk, r_label = r_offsets.abs().topk( offset_topk, dim=1, largest=False, sorted=True) t_topk, t_label = t_offsets.abs().topk( offset_topk, dim=1, largest=False, sorted=True) d_topk, d_label = d_offsets.abs().topk( offset_topk, dim=1, largest=False, sorted=True) offset_l_weights = l_offsets.new_zeros(l_offsets.size()) offset_r_weights = r_offsets.new_zeros(r_offsets.size()) offset_t_weights = t_offsets.new_zeros(t_offsets.size()) offset_d_weights = d_offsets.new_zeros(d_offsets.size()) inds = torch.arange(0, proposals.size(0)).to(proposals).long() # generate offset weights of top-k nearset buckets for k in range(offset_topk): if k >= 1: offset_l_weights[inds, l_label[:, k]] = (l_topk[:, k] < offset_upperbound).float() offset_r_weights[inds, r_label[:, k]] = (r_topk[:, k] < offset_upperbound).float() offset_t_weights[inds, t_label[:, k]] = (t_topk[:, k] < offset_upperbound).float() offset_d_weights[inds, d_label[:, k]] = (d_topk[:, k] < offset_upperbound).float() else: offset_l_weights[inds, l_label[:, k]] = 1.0 offset_r_weights[inds, r_label[:, k]] = 1.0 offset_t_weights[inds, t_label[:, k]] = 1.0 offset_d_weights[inds, d_label[:, k]] = 1.0 offsets = torch.cat([l_offsets, r_offsets, t_offsets, d_offsets], dim=-1) offsets_weights = torch.cat([ offset_l_weights, offset_r_weights, offset_t_weights, offset_d_weights ], dim=-1) # generate bucket labels and weight side_num = int(np.ceil(num_buckets / 2.0)) labels = torch.stack( [l_label[:, 0], r_label[:, 0], t_label[:, 0], d_label[:, 0]], dim=-1) batch_size = labels.size(0) bucket_labels = F.one_hot(labels.view(-1), side_num).view(batch_size, -1).float() bucket_cls_l_weights = (l_offsets.abs() < 1).float() bucket_cls_r_weights = (r_offsets.abs() < 1).float() bucket_cls_t_weights = (t_offsets.abs() < 1).float() bucket_cls_d_weights = (d_offsets.abs() < 1).float() bucket_cls_weights = torch.cat([ bucket_cls_l_weights, bucket_cls_r_weights, bucket_cls_t_weights, bucket_cls_d_weights ], dim=-1) # ignore second nearest buckets for cls if necessay if cls_ignore_neighbor: bucket_cls_weights = (~((bucket_cls_weights == 1) & (bucket_labels == 0))).float() else: bucket_cls_weights[:] = 1.0 return offsets, offsets_weights, bucket_labels, bucket_cls_weights def bucket2bbox(proposals, cls_preds, offset_preds, num_buckets, scale_factor=1.0, max_shape=None): """Apply bucketing estimation (cls preds) and fine regression (offset preds) to generate det bboxes. Args: proposals (Tensor): Boxes to be transformed. Shape (n, 4) cls_preds (Tensor): bucketing estimation. Shape (n, num_buckets*2). offset_preds (Tensor): fine regression. Shape (n, num_buckets*2). num_buckets (int): Number of buckets. scale_factor (float): Scale factor to rescale proposals. max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W) Returns: tuple[Tensor]: (bboxes, loc_confidence). - bboxes: predicted bboxes. Shape (n, 4) - loc_confidence: localization confidence of predicted bboxes. Shape (n,). """ side_num = int(np.ceil(num_buckets / 2.0)) cls_preds = cls_preds.view(-1, side_num) offset_preds = offset_preds.view(-1, side_num) scores = F.softmax(cls_preds, dim=1) score_topk, score_label = scores.topk(2, dim=1, largest=True, sorted=True) rescaled_proposals = bbox_rescale(proposals, scale_factor) pw = rescaled_proposals[..., 2] - rescaled_proposals[..., 0] ph = rescaled_proposals[..., 3] - rescaled_proposals[..., 1] px1 = rescaled_proposals[..., 0] py1 = rescaled_proposals[..., 1] px2 = rescaled_proposals[..., 2] py2 = rescaled_proposals[..., 3] bucket_w = pw / num_buckets bucket_h = ph / num_buckets score_inds_l = score_label[0::4, 0] score_inds_r = score_label[1::4, 0] score_inds_t = score_label[2::4, 0] score_inds_d = score_label[3::4, 0] l_buckets = px1 + (0.5 + score_inds_l.float()) * bucket_w r_buckets = px2 - (0.5 + score_inds_r.float()) * bucket_w t_buckets = py1 + (0.5 + score_inds_t.float()) * bucket_h d_buckets = py2 - (0.5 + score_inds_d.float()) * bucket_h offsets = offset_preds.view(-1, 4, side_num) inds = torch.arange(proposals.size(0)).to(proposals).long() l_offsets = offsets[:, 0, :][inds, score_inds_l] r_offsets = offsets[:, 1, :][inds, score_inds_r] t_offsets = offsets[:, 2, :][inds, score_inds_t] d_offsets = offsets[:, 3, :][inds, score_inds_d] x1 = l_buckets - l_offsets * bucket_w x2 = r_buckets - r_offsets * bucket_w y1 = t_buckets - t_offsets * bucket_h y2 = d_buckets - d_offsets * bucket_h if max_shape is not None: x1 = x1.clamp(min=0, max=max_shape[1] - 1) y1 = y1.clamp(min=0, max=max_shape[0] - 1) x2 = x2.clamp(min=0, max=max_shape[1] - 1) y2 = y2.clamp(min=0, max=max_shape[0] - 1) bboxes = torch.cat([x1[:, None], y1[:, None], x2[:, None], y2[:, None]], dim=-1) # bucketing guided rescoring loc_confidence = score_topk[:, 0] top2_neighbor_inds = (score_label[:, 0] - score_label[:, 1]).abs() == 1 loc_confidence += score_topk[:, 1] * top2_neighbor_inds.float() loc_confidence = loc_confidence.view(-1, 4).mean(dim=1) return bboxes, loc_confidence
13,588
38.967647
79
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/coder/pseudo_bbox_coder.py
from ..builder import BBOX_CODERS from .base_bbox_coder import BaseBBoxCoder @BBOX_CODERS.register_module() class PseudoBBoxCoder(BaseBBoxCoder): """Pseudo bounding box coder.""" def __init__(self, **kwargs): super(BaseBBoxCoder, self).__init__(**kwargs) def encode(self, bboxes, gt_bboxes): """torch.Tensor: return the given ``bboxes``""" return gt_bboxes def decode(self, bboxes, pred_bboxes): """torch.Tensor: return the given ``pred_bboxes``""" return pred_bboxes
529
26.894737
60
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/coder/tblr_bbox_coder.py
import torch from ..builder import BBOX_CODERS from .base_bbox_coder import BaseBBoxCoder @BBOX_CODERS.register_module() class TBLRBBoxCoder(BaseBBoxCoder): """TBLR BBox coder. Following the practice in `FSAF <https://arxiv.org/abs/1903.00621>`_, this coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left, right) and decode it back to the original. Args: normalizer (list | float): Normalization factor to be divided with when coding the coordinates. If it is a list, it should have length of 4 indicating normalization factor in tblr dims. Otherwise it is a unified float factor for all dims. Default: 4.0 """ def __init__(self, normalizer=4.0): super(BaseBBoxCoder, self).__init__() self.normalizer = normalizer def encode(self, bboxes, gt_bboxes): """Get box regression transformation deltas that can be used to transform the ``bboxes`` into the ``gt_bboxes`` in the (top, left, bottom, right) order. Args: bboxes (torch.Tensor): source boxes, e.g., object proposals. gt_bboxes (torch.Tensor): target of the transformation, e.g., ground truth boxes. Returns: torch.Tensor: Box transformation deltas """ assert bboxes.size(0) == gt_bboxes.size(0) assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 encoded_bboxes = bboxes2tblr( bboxes, gt_bboxes, normalizer=self.normalizer) return encoded_bboxes def decode(self, bboxes, pred_bboxes, max_shape=None): """Apply transformation `pred_bboxes` to `boxes`. Args: boxes (torch.Tensor): Basic boxes. pred_bboxes (torch.Tensor): Encoded boxes with shape max_shape (tuple[int], optional): Maximum shape of boxes. Defaults to None. Returns: torch.Tensor: Decoded boxes. """ assert pred_bboxes.size(0) == bboxes.size(0) decoded_bboxes = tblr2bboxes( bboxes, pred_bboxes, normalizer=self.normalizer, max_shape=max_shape) return decoded_bboxes def bboxes2tblr(priors, gts, normalizer=4.0, normalize_by_wh=True): """Encode ground truth boxes to tblr coordinate. It first convert the gt coordinate to tblr format, (top, bottom, left, right), relative to prior box centers. The tblr coordinate may be normalized by the side length of prior bboxes if `normalize_by_wh` is specified as True, and it is then normalized by the `normalizer` factor. Args: priors (Tensor): Prior boxes in point form Shape: (num_proposals,4). gts (Tensor): Coords of ground truth for each prior in point-form Shape: (num_proposals, 4). normalizer (Sequence[float] | float): normalization parameter of encoded boxes. If it is a list, it has to have length = 4. Default: 4.0 normalize_by_wh (bool): Whether to normalize tblr coordinate by the side length (wh) of prior bboxes. Return: encoded boxes (Tensor), Shape: (num_proposals, 4) """ # dist b/t match center and prior's center if not isinstance(normalizer, float): normalizer = torch.tensor(normalizer, device=priors.device) assert len(normalizer) == 4, 'Normalizer must have length = 4' assert priors.size(0) == gts.size(0) prior_centers = (priors[:, 0:2] + priors[:, 2:4]) / 2 xmin, ymin, xmax, ymax = gts.split(1, dim=1) top = prior_centers[:, 1].unsqueeze(1) - ymin bottom = ymax - prior_centers[:, 1].unsqueeze(1) left = prior_centers[:, 0].unsqueeze(1) - xmin right = xmax - prior_centers[:, 0].unsqueeze(1) loc = torch.cat((top, bottom, left, right), dim=1) if normalize_by_wh: # Normalize tblr by anchor width and height wh = priors[:, 2:4] - priors[:, 0:2] w, h = torch.split(wh, 1, dim=1) loc[:, :2] /= h # tb is normalized by h loc[:, 2:] /= w # lr is normalized by w # Normalize tblr by the given normalization factor return loc / normalizer def tblr2bboxes(priors, tblr, normalizer=4.0, normalize_by_wh=True, max_shape=None): """Decode tblr outputs to prediction boxes. The process includes 3 steps: 1) De-normalize tblr coordinates by multiplying it with `normalizer`; 2) De-normalize tblr coordinates by the prior bbox width and height if `normalize_by_wh` is `True`; 3) Convert tblr (top, bottom, left, right) pair relative to the center of priors back to (xmin, ymin, xmax, ymax) coordinate. Args: priors (Tensor): Prior boxes in point form (x0, y0, x1, y1) Shape: (n,4). tblr (Tensor): Coords of network output in tblr form Shape: (n, 4). normalizer (Sequence[float] | float): Normalization parameter of encoded boxes. By list, it represents the normalization factors at tblr dims. By float, it is the unified normalization factor at all dims. Default: 4.0 normalize_by_wh (bool): Whether the tblr coordinates have been normalized by the side length (wh) of prior bboxes. max_shape (tuple, optional): Shape of the image. Decoded bboxes exceeding which will be clamped. Return: encoded boxes (Tensor), Shape: (n, 4) """ if not isinstance(normalizer, float): normalizer = torch.tensor(normalizer, device=priors.device) assert len(normalizer) == 4, 'Normalizer must have length = 4' assert priors.size(0) == tblr.size(0) loc_decode = tblr * normalizer prior_centers = (priors[:, 0:2] + priors[:, 2:4]) / 2 if normalize_by_wh: wh = priors[:, 2:4] - priors[:, 0:2] w, h = torch.split(wh, 1, dim=1) loc_decode[:, :2] *= h # tb loc_decode[:, 2:] *= w # lr top, bottom, left, right = loc_decode.split(1, dim=1) xmin = prior_centers[:, 0].unsqueeze(1) - left xmax = prior_centers[:, 0].unsqueeze(1) + right ymin = prior_centers[:, 1].unsqueeze(1) - top ymax = prior_centers[:, 1].unsqueeze(1) + bottom boxes = torch.cat((xmin, ymin, xmax, ymax), dim=1) if max_shape is not None: boxes[:, 0].clamp_(min=0, max=max_shape[1]) boxes[:, 1].clamp_(min=0, max=max_shape[0]) boxes[:, 2].clamp_(min=0, max=max_shape[1]) boxes[:, 3].clamp_(min=0, max=max_shape[0]) return boxes
6,581
38.650602
78
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py
import numpy as np import torch from ..builder import BBOX_CODERS from .base_bbox_coder import BaseBBoxCoder @BBOX_CODERS.register_module() class LegacyDeltaXYWHBBoxCoder(BaseBBoxCoder): """Legacy Delta XYWH BBox coder used in MMDet V1.x. Following the practice in R-CNN [1]_, this coder encodes bbox (x1, y1, x2, y2) into delta (dx, dy, dw, dh) and decodes delta (dx, dy, dw, dh) back to original bbox (x1, y1, x2, y2). Note: The main difference between :class`LegacyDeltaXYWHBBoxCoder` and :class:`DeltaXYWHBBoxCoder` is whether ``+ 1`` is used during width and height calculation. We suggest to only use this coder when testing with MMDet V1.x models. References: .. [1] https://arxiv.org/abs/1311.2524 Args: target_means (Sequence[float]): denormalizing means of target for delta coordinates target_stds (Sequence[float]): denormalizing standard deviation of target for delta coordinates """ def __init__(self, target_means=(0., 0., 0., 0.), target_stds=(1., 1., 1., 1.)): super(BaseBBoxCoder, self).__init__() self.means = target_means self.stds = target_stds def encode(self, bboxes, gt_bboxes): """Get box regression transformation deltas that can be used to transform the ``bboxes`` into the ``gt_bboxes``. Args: bboxes (torch.Tensor): source boxes, e.g., object proposals. gt_bboxes (torch.Tensor): target of the transformation, e.g., ground-truth boxes. Returns: torch.Tensor: Box transformation deltas """ assert bboxes.size(0) == gt_bboxes.size(0) assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 encoded_bboxes = legacy_bbox2delta(bboxes, gt_bboxes, self.means, self.stds) return encoded_bboxes def decode(self, bboxes, pred_bboxes, max_shape=None, wh_ratio_clip=16 / 1000): """Apply transformation `pred_bboxes` to `boxes`. Args: boxes (torch.Tensor): Basic boxes. pred_bboxes (torch.Tensor): Encoded boxes with shape max_shape (tuple[int], optional): Maximum shape of boxes. Defaults to None. wh_ratio_clip (float, optional): The allowed ratio between width and height. Returns: torch.Tensor: Decoded boxes. """ assert pred_bboxes.size(0) == bboxes.size(0) decoded_bboxes = legacy_delta2bbox(bboxes, pred_bboxes, self.means, self.stds, max_shape, wh_ratio_clip) return decoded_bboxes def legacy_bbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.)): """Compute deltas of proposals w.r.t. gt in the MMDet V1.x manner. We usually compute the deltas of x, y, w, h of proposals w.r.t ground truth bboxes to get regression target. This is the inverse function of `delta2bbox()` Args: proposals (Tensor): Boxes to be transformed, shape (N, ..., 4) gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4) means (Sequence[float]): Denormalizing means for delta coordinates stds (Sequence[float]): Denormalizing standard deviation for delta coordinates Returns: Tensor: deltas with shape (N, 4), where columns represent dx, dy, dw, dh. """ assert proposals.size() == gt.size() proposals = proposals.float() gt = gt.float() px = (proposals[..., 0] + proposals[..., 2]) * 0.5 py = (proposals[..., 1] + proposals[..., 3]) * 0.5 pw = proposals[..., 2] - proposals[..., 0] + 1.0 ph = proposals[..., 3] - proposals[..., 1] + 1.0 gx = (gt[..., 0] + gt[..., 2]) * 0.5 gy = (gt[..., 1] + gt[..., 3]) * 0.5 gw = gt[..., 2] - gt[..., 0] + 1.0 gh = gt[..., 3] - gt[..., 1] + 1.0 dx = (gx - px) / pw dy = (gy - py) / ph dw = torch.log(gw / pw) dh = torch.log(gh / ph) deltas = torch.stack([dx, dy, dw, dh], dim=-1) means = deltas.new_tensor(means).unsqueeze(0) stds = deltas.new_tensor(stds).unsqueeze(0) deltas = deltas.sub_(means).div_(stds) return deltas def legacy_delta2bbox(rois, deltas, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.), max_shape=None, wh_ratio_clip=16 / 1000): """Apply deltas to shift/scale base boxes in the MMDet V1.x manner. Typically the rois are anchor or proposed bounding boxes and the deltas are network outputs used to shift/scale those boxes. This is the inverse function of `bbox2delta()` Args: rois (Tensor): Boxes to be transformed. Has shape (N, 4) deltas (Tensor): Encoded offsets with respect to each roi. Has shape (N, 4 * num_classes). Note N = num_anchors * W * H when rois is a grid of anchors. Offset encoding follows [1]_. means (Sequence[float]): Denormalizing means for delta coordinates stds (Sequence[float]): Denormalizing standard deviation for delta coordinates max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W) wh_ratio_clip (float): Maximum aspect ratio for boxes. Returns: Tensor: Boxes with shape (N, 4), where columns represent tl_x, tl_y, br_x, br_y. References: .. [1] https://arxiv.org/abs/1311.2524 Example: >>> rois = torch.Tensor([[ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 5., 5., 5., 5.]]) >>> deltas = torch.Tensor([[ 0., 0., 0., 0.], >>> [ 1., 1., 1., 1.], >>> [ 0., 0., 2., -1.], >>> [ 0.7, -1.9, -0.5, 0.3]]) >>> legacy_delta2bbox(rois, deltas, max_shape=(32, 32)) tensor([[0.0000, 0.0000, 1.5000, 1.5000], [0.0000, 0.0000, 5.2183, 5.2183], [0.0000, 0.1321, 7.8891, 0.8679], [5.3967, 2.4251, 6.0033, 3.7749]]) """ means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4) stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4) denorm_deltas = deltas * stds + means dx = denorm_deltas[:, 0::4] dy = denorm_deltas[:, 1::4] dw = denorm_deltas[:, 2::4] dh = denorm_deltas[:, 3::4] max_ratio = np.abs(np.log(wh_ratio_clip)) dw = dw.clamp(min=-max_ratio, max=max_ratio) dh = dh.clamp(min=-max_ratio, max=max_ratio) # Compute center of each roi px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx) py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy) # Compute width/height of each roi pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw) ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh) # Use exp(network energy) to enlarge/shrink each roi gw = pw * dw.exp() gh = ph * dh.exp() # Use network energy to shift the center of each roi gx = px + pw * dx gy = py + ph * dy # Convert center-xy/width/height to top-left, bottom-right # The true legacy box coder should +- 0.5 here. # However, current implementation improves the performance when testing # the models trained in MMDetection 1.X (~0.5 bbox AP, 0.2 mask AP) x1 = gx - gw * 0.5 y1 = gy - gh * 0.5 x2 = gx + gw * 0.5 y2 = gy + gh * 0.5 if max_shape is not None: x1 = x1.clamp(min=0, max=max_shape[1] - 1) y1 = y1.clamp(min=0, max=max_shape[0] - 1) x2 = x2.clamp(min=0, max=max_shape[1] - 1) y2 = y2.clamp(min=0, max=max_shape[0] - 1) bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas) return bboxes
8,147
37.253521
79
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py
import numpy as np import torch from ..builder import BBOX_CODERS from .base_bbox_coder import BaseBBoxCoder @BBOX_CODERS.register_module() class DeltaXYWHBBoxCoder(BaseBBoxCoder): """Delta XYWH BBox coder. Following the practice in `R-CNN <https://arxiv.org/abs/1311.2524>`_, this coder encodes bbox (x1, y1, x2, y2) into delta (dx, dy, dw, dh) and decodes delta (dx, dy, dw, dh) back to original bbox (x1, y1, x2, y2). Args: target_means (Sequence[float]): Denormalizing means of target for delta coordinates target_stds (Sequence[float]): Denormalizing standard deviation of target for delta coordinates """ def __init__(self, target_means=(0., 0., 0., 0.), target_stds=(1., 1., 1., 1.)): super(BaseBBoxCoder, self).__init__() self.means = target_means self.stds = target_stds def encode(self, bboxes, gt_bboxes): """Get box regression transformation deltas that can be used to transform the ``bboxes`` into the ``gt_bboxes``. Args: bboxes (torch.Tensor): Source boxes, e.g., object proposals. gt_bboxes (torch.Tensor): Target of the transformation, e.g., ground-truth boxes. Returns: torch.Tensor: Box transformation deltas """ assert bboxes.size(0) == gt_bboxes.size(0) assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 encoded_bboxes = bbox2delta(bboxes, gt_bboxes, self.means, self.stds) return encoded_bboxes def decode(self, bboxes, pred_bboxes, max_shape=None, wh_ratio_clip=16 / 1000): """Apply transformation `pred_bboxes` to `boxes`. Args: boxes (torch.Tensor): Basic boxes. pred_bboxes (torch.Tensor): Encoded boxes with shape max_shape (tuple[int], optional): Maximum shape of boxes. Defaults to None. wh_ratio_clip (float, optional): The allowed ratio between width and height. Returns: torch.Tensor: Decoded boxes. """ assert pred_bboxes.size(0) == bboxes.size(0) decoded_bboxes = delta2bbox(bboxes, pred_bboxes, self.means, self.stds, max_shape, wh_ratio_clip) return decoded_bboxes def bbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.)): """Compute deltas of proposals w.r.t. gt. We usually compute the deltas of x, y, w, h of proposals w.r.t ground truth bboxes to get regression target. This is the inverse function of :func:`delta2bbox`. Args: proposals (Tensor): Boxes to be transformed, shape (N, ..., 4) gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4) means (Sequence[float]): Denormalizing means for delta coordinates stds (Sequence[float]): Denormalizing standard deviation for delta coordinates Returns: Tensor: deltas with shape (N, 4), where columns represent dx, dy, dw, dh. """ assert proposals.size() == gt.size() proposals = proposals.float() gt = gt.float() px = (proposals[..., 0] + proposals[..., 2]) * 0.5 py = (proposals[..., 1] + proposals[..., 3]) * 0.5 pw = proposals[..., 2] - proposals[..., 0] ph = proposals[..., 3] - proposals[..., 1] gx = (gt[..., 0] + gt[..., 2]) * 0.5 gy = (gt[..., 1] + gt[..., 3]) * 0.5 gw = gt[..., 2] - gt[..., 0] gh = gt[..., 3] - gt[..., 1] dx = (gx - px) / pw dy = (gy - py) / ph dw = torch.log(gw / pw) dh = torch.log(gh / ph) deltas = torch.stack([dx, dy, dw, dh], dim=-1) means = deltas.new_tensor(means).unsqueeze(0) stds = deltas.new_tensor(stds).unsqueeze(0) deltas = deltas.sub_(means).div_(stds) return deltas def delta2bbox(rois, deltas, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.), max_shape=None, wh_ratio_clip=16 / 1000): """Apply deltas to shift/scale base boxes. Typically the rois are anchor or proposed bounding boxes and the deltas are network outputs used to shift/scale those boxes. This is the inverse function of :func:`bbox2delta`. Args: rois (Tensor): Boxes to be transformed. Has shape (N, 4) deltas (Tensor): Encoded offsets with respect to each roi. Has shape (N, 4 * num_classes). Note N = num_anchors * W * H when rois is a grid of anchors. Offset encoding follows [1]_. means (Sequence[float]): Denormalizing means for delta coordinates stds (Sequence[float]): Denormalizing standard deviation for delta coordinates max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W) wh_ratio_clip (float): Maximum aspect ratio for boxes. Returns: Tensor: Boxes with shape (N, 4), where columns represent tl_x, tl_y, br_x, br_y. References: .. [1] https://arxiv.org/abs/1311.2524 Example: >>> rois = torch.Tensor([[ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 5., 5., 5., 5.]]) >>> deltas = torch.Tensor([[ 0., 0., 0., 0.], >>> [ 1., 1., 1., 1.], >>> [ 0., 0., 2., -1.], >>> [ 0.7, -1.9, -0.5, 0.3]]) >>> delta2bbox(rois, deltas, max_shape=(32, 32)) tensor([[0.0000, 0.0000, 1.0000, 1.0000], [0.1409, 0.1409, 2.8591, 2.8591], [0.0000, 0.3161, 4.1945, 0.6839], [5.0000, 5.0000, 5.0000, 5.0000]]) """ means = deltas.new_tensor(means).view(1, -1).repeat(1, deltas.size(1) // 4) stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(1) // 4) denorm_deltas = deltas * stds + means dx = denorm_deltas[:, 0::4] dy = denorm_deltas[:, 1::4] dw = denorm_deltas[:, 2::4] dh = denorm_deltas[:, 3::4] max_ratio = np.abs(np.log(wh_ratio_clip)) dw = dw.clamp(min=-max_ratio, max=max_ratio) dh = dh.clamp(min=-max_ratio, max=max_ratio) # Compute center of each roi px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx) py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy) # Compute width/height of each roi pw = (rois[:, 2] - rois[:, 0]).unsqueeze(1).expand_as(dw) ph = (rois[:, 3] - rois[:, 1]).unsqueeze(1).expand_as(dh) # Use exp(network energy) to enlarge/shrink each roi gw = pw * dw.exp() gh = ph * dh.exp() # Use network energy to shift the center of each roi gx = px + pw * dx gy = py + ph * dy # Convert center-xy/width/height to top-left, bottom-right x1 = gx - gw * 0.5 y1 = gy - gh * 0.5 x2 = gx + gw * 0.5 y2 = gy + gh * 0.5 if max_shape is not None: x1 = x1.clamp(min=0, max=max_shape[1]) y1 = y1.clamp(min=0, max=max_shape[0]) x2 = x2.clamp(min=0, max=max_shape[1]) y2 = y2.clamp(min=0, max=max_shape[0]) bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size()) return bboxes
7,363
36.191919
79
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/iou_calculators/iou2d_calculator.py
import torch from .builder import IOU_CALCULATORS @IOU_CALCULATORS.register_module() class BboxOverlaps2D(object): """2D Overlaps (e.g. IoUs, GIoUs) Calculator.""" def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False): """Calculate IoU between 2D bboxes. Args: bboxes1 (Tensor): bboxes have shape (m, 4) in <x1, y1, x2, y2> format, or shape (m, 5) in <x1, y1, x2, y2, score> format. bboxes2 (Tensor): bboxes have shape (m, 4) in <x1, y1, x2, y2> format, shape (m, 5) in <x1, y1, x2, y2, score> format, or be empty. If ``is_aligned `` is ``True``, then m and n must be equal. mode (str): "iou" (intersection over union), "iof" (intersection over foreground), or "giou" (generalized intersection over union). is_aligned (bool, optional): If True, then m and n must be equal. Default False. Returns: Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,) """ assert bboxes1.size(-1) in [0, 4, 5] assert bboxes2.size(-1) in [0, 4, 5] if bboxes2.size(-1) == 5: bboxes2 = bboxes2[..., :4] if bboxes1.size(-1) == 5: bboxes1 = bboxes1[..., :4] return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) def __repr__(self): """str: a string describing the module""" repr_str = self.__class__.__name__ + '()' return repr_str def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6): """Calculate overlap between two set of bboxes. If ``is_aligned `` is ``False``, then calculate the overlaps between each bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned pair of bboxes1 and bboxes2. Args: bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty. bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty. B indicates the batch dim, in shape (B1, B2, ..., Bn). If ``is_aligned `` is ``True``, then m and n must be equal. mode (str): "iou" (intersection over union) or "iof" (intersection over foreground). is_aligned (bool, optional): If True, then m and n must be equal. Default False. eps (float, optional): A value added to the denominator for numerical stability. Default 1e-6. Returns: Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,) Example: >>> bboxes1 = torch.FloatTensor([ >>> [0, 0, 10, 10], >>> [10, 10, 20, 20], >>> [32, 32, 38, 42], >>> ]) >>> bboxes2 = torch.FloatTensor([ >>> [0, 0, 10, 20], >>> [0, 10, 10, 19], >>> [10, 10, 20, 20], >>> ]) >>> overlaps = bbox_overlaps(bboxes1, bboxes2) >>> assert overlaps.shape == (3, 3) >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True) >>> assert overlaps.shape == (3, ) Example: >>> empty = torch.empty(0, 4) >>> nonempty = torch.FloatTensor([[0, 0, 10, 9]]) >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1) >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0) >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0) """ assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}' # Either the boxes are empty or the length of boxes's last dimenstion is 4 assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0) assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0) # Batch dim must be the same # Batch dim: (B1, B2, ... Bn) assert bboxes1.shape[:-2] == bboxes2.shape[:-2] batch_shape = bboxes1.shape[:-2] rows = bboxes1.size(-2) cols = bboxes2.size(-2) if is_aligned: assert rows == cols if rows * cols == 0: if is_aligned: return bboxes1.new(batch_shape + (rows, )) else: return bboxes1.new(batch_shape + (rows, cols)) area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * ( bboxes1[..., 3] - bboxes1[..., 1]) area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * ( bboxes2[..., 3] - bboxes2[..., 1]) if is_aligned: lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2] rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2] wh = (rb - lt).clamp(min=0) # [B, rows, 2] overlap = wh[..., 0] * wh[..., 1] if mode in ['iou', 'giou']: union = area1 + area2 - overlap else: union = area1 if mode == 'giou': enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2]) enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:]) else: lt = torch.max(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2]) # [B, rows, cols, 2] rb = torch.min(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:]) # [B, rows, cols, 2] wh = (rb - lt).clamp(min=0) # [B, rows, cols, 2] overlap = wh[..., 0] * wh[..., 1] if mode in ['iou', 'giou']: union = area1[..., None] + area2[..., None, :] - overlap else: union = area1[..., None] if mode == 'giou': enclosed_lt = torch.min(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2]) enclosed_rb = torch.max(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:]) eps = union.new_tensor([eps]) union = torch.max(union, eps) ious = overlap / union if mode in ['iou', 'iof']: return ious # calculate gious enclose_wh = (enclosed_rb - enclosed_lt).clamp(min=0) enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1] enclose_area = torch.max(enclose_area, eps) gious = ious - (enclose_area - union) / enclose_area return gious
6,111
37.440252
79
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py
import numpy as np import torch from ..builder import BBOX_SAMPLERS from .random_sampler import RandomSampler @BBOX_SAMPLERS.register_module() class InstanceBalancedPosSampler(RandomSampler): """Instance balanced sampler that samples equal number of positive samples for each instance.""" def _sample_pos(self, assign_result, num_expected, **kwargs): """Sample positive boxes. Args: assign_result (:obj:`AssignResult`): The assigned results of boxes. num_expected (int): The number of expected positive samples Returns: Tensor or ndarray: sampled indices. """ pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) if pos_inds.numel() != 0: pos_inds = pos_inds.squeeze(1) if pos_inds.numel() <= num_expected: return pos_inds else: unique_gt_inds = assign_result.gt_inds[pos_inds].unique() num_gts = len(unique_gt_inds) num_per_gt = int(round(num_expected / float(num_gts)) + 1) sampled_inds = [] for i in unique_gt_inds: inds = torch.nonzero( assign_result.gt_inds == i.item(), as_tuple=False) if inds.numel() != 0: inds = inds.squeeze(1) else: continue if len(inds) > num_per_gt: inds = self.random_choice(inds, num_per_gt) sampled_inds.append(inds) sampled_inds = torch.cat(sampled_inds) if len(sampled_inds) < num_expected: num_extra = num_expected - len(sampled_inds) extra_inds = np.array( list(set(pos_inds.cpu()) - set(sampled_inds.cpu()))) if len(extra_inds) > num_extra: extra_inds = self.random_choice(extra_inds, num_extra) extra_inds = torch.from_numpy(extra_inds).to( assign_result.gt_inds.device).long() sampled_inds = torch.cat([sampled_inds, extra_inds]) elif len(sampled_inds) > num_expected: sampled_inds = self.random_choice(sampled_inds, num_expected) return sampled_inds
2,271
39.571429
79
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/samplers/base_sampler.py
from abc import ABCMeta, abstractmethod import torch from .sampling_result import SamplingResult class BaseSampler(metaclass=ABCMeta): """Base class of samplers.""" def __init__(self, num, pos_fraction, neg_pos_ub=-1, add_gt_as_proposals=True, **kwargs): self.num = num self.pos_fraction = pos_fraction self.neg_pos_ub = neg_pos_ub self.add_gt_as_proposals = add_gt_as_proposals self.pos_sampler = self self.neg_sampler = self @abstractmethod def _sample_pos(self, assign_result, num_expected, **kwargs): """Sample positive samples.""" pass @abstractmethod def _sample_neg(self, assign_result, num_expected, **kwargs): """Sample negative samples.""" pass def sample(self, assign_result, bboxes, gt_bboxes, gt_labels=None, **kwargs): """Sample positive and negative bboxes. This is a simple implementation of bbox sampling given candidates, assigning results and ground truth bboxes. Args: assign_result (:obj:`AssignResult`): Bbox assigning results. bboxes (Tensor): Boxes to be sampled from. gt_bboxes (Tensor): Ground truth bboxes. gt_labels (Tensor, optional): Class labels of ground truth bboxes. Returns: :obj:`SamplingResult`: Sampling result. Example: >>> from mmdet.core.bbox import RandomSampler >>> from mmdet.core.bbox import AssignResult >>> from mmdet.core.bbox.demodata import ensure_rng, random_boxes >>> rng = ensure_rng(None) >>> assign_result = AssignResult.random(rng=rng) >>> bboxes = random_boxes(assign_result.num_preds, rng=rng) >>> gt_bboxes = random_boxes(assign_result.num_gts, rng=rng) >>> gt_labels = None >>> self = RandomSampler(num=32, pos_fraction=0.5, neg_pos_ub=-1, >>> add_gt_as_proposals=False) >>> self = self.sample(assign_result, bboxes, gt_bboxes, gt_labels) """ if len(bboxes.shape) < 2: bboxes = bboxes[None, :] bboxes = bboxes[:, :4] gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8) if self.add_gt_as_proposals and len(gt_bboxes) > 0: if gt_labels is None: raise ValueError( 'gt_labels must be given when add_gt_as_proposals is True') bboxes = torch.cat([gt_bboxes, bboxes], dim=0) assign_result.add_gt_(gt_labels) gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8) gt_flags = torch.cat([gt_ones, gt_flags]) num_expected_pos = int(self.num * self.pos_fraction) pos_inds = self.pos_sampler._sample_pos( assign_result, num_expected_pos, bboxes=bboxes, **kwargs) # We found that sampled indices have duplicated items occasionally. # (may be a bug of PyTorch) pos_inds = pos_inds.unique() num_sampled_pos = pos_inds.numel() num_expected_neg = self.num - num_sampled_pos if self.neg_pos_ub >= 0: _pos = max(1, num_sampled_pos) neg_upper_bound = int(self.neg_pos_ub * _pos) if num_expected_neg > neg_upper_bound: num_expected_neg = neg_upper_bound neg_inds = self.neg_sampler._sample_neg( assign_result, num_expected_neg, bboxes=bboxes, **kwargs) neg_inds = neg_inds.unique() sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags) return sampling_result
3,872
36.970588
79
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/samplers/random_sampler.py
import torch from ..builder import BBOX_SAMPLERS from .base_sampler import BaseSampler @BBOX_SAMPLERS.register_module() class RandomSampler(BaseSampler): """Random sampler. Args: num (int): Number of samples pos_fraction (float): Fraction of positive samples neg_pos_up (int, optional): Upper bound number of negative and positive samples. Defaults to -1. add_gt_as_proposals (bool, optional): Whether to add ground truth boxes as proposals. Defaults to True. """ def __init__(self, num, pos_fraction, neg_pos_ub=-1, add_gt_as_proposals=True, **kwargs): from mmdet.core.bbox import demodata super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals) self.rng = demodata.ensure_rng(kwargs.get('rng', None)) def random_choice(self, gallery, num): """Random select some elements from the gallery. If `gallery` is a Tensor, the returned indices will be a Tensor; If `gallery` is a ndarray or list, the returned indices will be a ndarray. Args: gallery (Tensor | ndarray | list): indices pool. num (int): expected sample num. Returns: Tensor or ndarray: sampled indices. """ assert len(gallery) >= num is_tensor = isinstance(gallery, torch.Tensor) if not is_tensor: if torch.cuda.is_available(): device = torch.cuda.current_device() else: device = 'cpu' gallery = torch.tensor(gallery, dtype=torch.long, device=device) perm = torch.randperm(gallery.numel(), device=gallery.device)[:num] rand_inds = gallery[perm] if not is_tensor: rand_inds = rand_inds.cpu().numpy() return rand_inds def _sample_pos(self, assign_result, num_expected, **kwargs): """Randomly sample some positive samples.""" pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) if pos_inds.numel() != 0: pos_inds = pos_inds.squeeze(1) if pos_inds.numel() <= num_expected: return pos_inds else: return self.random_choice(pos_inds, num_expected) def _sample_neg(self, assign_result, num_expected, **kwargs): """Randomly sample some negative samples.""" neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False) if neg_inds.numel() != 0: neg_inds = neg_inds.squeeze(1) if len(neg_inds) <= num_expected: return neg_inds else: return self.random_choice(neg_inds, num_expected)
2,817
34.670886
76
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/samplers/ohem_sampler.py
import torch from ..builder import BBOX_SAMPLERS from ..transforms import bbox2roi from .base_sampler import BaseSampler @BBOX_SAMPLERS.register_module() class OHEMSampler(BaseSampler): r"""Online Hard Example Mining Sampler described in `Training Region-based Object Detectors with Online Hard Example Mining <https://arxiv.org/abs/1604.03540>`_. """ def __init__(self, num, pos_fraction, context, neg_pos_ub=-1, add_gt_as_proposals=True, **kwargs): super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals) self.context = context if not hasattr(self.context, 'num_stages'): self.bbox_head = self.context.bbox_head else: self.bbox_head = self.context.bbox_head[self.context.current_stage] def hard_mining(self, inds, num_expected, bboxes, labels, feats): with torch.no_grad(): rois = bbox2roi([bboxes]) if not hasattr(self.context, 'num_stages'): bbox_results = self.context._bbox_forward(feats, rois) else: bbox_results = self.context._bbox_forward( self.context.current_stage, feats, rois) cls_score = bbox_results['cls_score'] loss = self.bbox_head.loss( cls_score=cls_score, bbox_pred=None, rois=rois, labels=labels, label_weights=cls_score.new_ones(cls_score.size(0)), bbox_targets=None, bbox_weights=None, reduction_override='none')['loss_cls'] _, topk_loss_inds = loss.topk(num_expected) return inds[topk_loss_inds] def _sample_pos(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs): """Sample positive boxes. Args: assign_result (:obj:`AssignResult`): Assigned results num_expected (int): Number of expected positive samples bboxes (torch.Tensor, optional): Boxes. Defaults to None. feats (list[torch.Tensor], optional): Multi-level features. Defaults to None. Returns: torch.Tensor: Indices of positive samples """ # Sample some hard positive samples pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) if pos_inds.numel() != 0: pos_inds = pos_inds.squeeze(1) if pos_inds.numel() <= num_expected: return pos_inds else: return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds], assign_result.labels[pos_inds], feats) def _sample_neg(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs): """Sample negative boxes. Args: assign_result (:obj:`AssignResult`): Assigned results num_expected (int): Number of expected negative samples bboxes (torch.Tensor, optional): Boxes. Defaults to None. feats (list[torch.Tensor], optional): Multi-level features. Defaults to None. Returns: torch.Tensor: Indices of negative samples """ # Sample some hard negative samples neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False) if neg_inds.numel() != 0: neg_inds = neg_inds.squeeze(1) if len(neg_inds) <= num_expected: return neg_inds else: neg_labels = assign_result.labels.new_empty( neg_inds.size(0)).fill_(self.bbox_head.num_classes) return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds], neg_labels, feats)
4,098
36.953704
79
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py
import numpy as np import torch from ..builder import BBOX_SAMPLERS from .random_sampler import RandomSampler @BBOX_SAMPLERS.register_module() class IoUBalancedNegSampler(RandomSampler): """IoU Balanced Sampling. arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019) Sampling proposals according to their IoU. `floor_fraction` of needed RoIs are sampled from proposals whose IoU are lower than `floor_thr` randomly. The others are sampled from proposals whose IoU are higher than `floor_thr`. These proposals are sampled from some bins evenly, which are split by `num_bins` via IoU evenly. Args: num (int): number of proposals. pos_fraction (float): fraction of positive proposals. floor_thr (float): threshold (minimum) IoU for IoU balanced sampling, set to -1 if all using IoU balanced sampling. floor_fraction (float): sampling fraction of proposals under floor_thr. num_bins (int): number of bins in IoU balanced sampling. """ def __init__(self, num, pos_fraction, floor_thr=-1, floor_fraction=0, num_bins=3, **kwargs): super(IoUBalancedNegSampler, self).__init__(num, pos_fraction, **kwargs) assert floor_thr >= 0 or floor_thr == -1 assert 0 <= floor_fraction <= 1 assert num_bins >= 1 self.floor_thr = floor_thr self.floor_fraction = floor_fraction self.num_bins = num_bins def sample_via_interval(self, max_overlaps, full_set, num_expected): """Sample according to the iou interval. Args: max_overlaps (torch.Tensor): IoU between bounding boxes and ground truth boxes. full_set (set(int)): A full set of indices of boxes。 num_expected (int): Number of expected samples。 Returns: np.ndarray: Indices of samples """ max_iou = max_overlaps.max() iou_interval = (max_iou - self.floor_thr) / self.num_bins per_num_expected = int(num_expected / self.num_bins) sampled_inds = [] for i in range(self.num_bins): start_iou = self.floor_thr + i * iou_interval end_iou = self.floor_thr + (i + 1) * iou_interval tmp_set = set( np.where( np.logical_and(max_overlaps >= start_iou, max_overlaps < end_iou))[0]) tmp_inds = list(tmp_set & full_set) if len(tmp_inds) > per_num_expected: tmp_sampled_set = self.random_choice(tmp_inds, per_num_expected) else: tmp_sampled_set = np.array(tmp_inds, dtype=np.int) sampled_inds.append(tmp_sampled_set) sampled_inds = np.concatenate(sampled_inds) if len(sampled_inds) < num_expected: num_extra = num_expected - len(sampled_inds) extra_inds = np.array(list(full_set - set(sampled_inds))) if len(extra_inds) > num_extra: extra_inds = self.random_choice(extra_inds, num_extra) sampled_inds = np.concatenate([sampled_inds, extra_inds]) return sampled_inds def _sample_neg(self, assign_result, num_expected, **kwargs): """Sample negative boxes. Args: assign_result (:obj:`AssignResult`): The assigned results of boxes. num_expected (int): The number of expected negative samples Returns: Tensor or ndarray: sampled indices. """ neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False) if neg_inds.numel() != 0: neg_inds = neg_inds.squeeze(1) if len(neg_inds) <= num_expected: return neg_inds else: max_overlaps = assign_result.max_overlaps.cpu().numpy() # balance sampling for negative samples neg_set = set(neg_inds.cpu().numpy()) if self.floor_thr > 0: floor_set = set( np.where( np.logical_and(max_overlaps >= 0, max_overlaps < self.floor_thr))[0]) iou_sampling_set = set( np.where(max_overlaps >= self.floor_thr)[0]) elif self.floor_thr == 0: floor_set = set(np.where(max_overlaps == 0)[0]) iou_sampling_set = set( np.where(max_overlaps > self.floor_thr)[0]) else: floor_set = set() iou_sampling_set = set( np.where(max_overlaps > self.floor_thr)[0]) # for sampling interval calculation self.floor_thr = 0 floor_neg_inds = list(floor_set & neg_set) iou_sampling_neg_inds = list(iou_sampling_set & neg_set) num_expected_iou_sampling = int(num_expected * (1 - self.floor_fraction)) if len(iou_sampling_neg_inds) > num_expected_iou_sampling: if self.num_bins >= 2: iou_sampled_inds = self.sample_via_interval( max_overlaps, set(iou_sampling_neg_inds), num_expected_iou_sampling) else: iou_sampled_inds = self.random_choice( iou_sampling_neg_inds, num_expected_iou_sampling) else: iou_sampled_inds = np.array( iou_sampling_neg_inds, dtype=np.int) num_expected_floor = num_expected - len(iou_sampled_inds) if len(floor_neg_inds) > num_expected_floor: sampled_floor_inds = self.random_choice( floor_neg_inds, num_expected_floor) else: sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int) sampled_inds = np.concatenate( (sampled_floor_inds, iou_sampled_inds)) if len(sampled_inds) < num_expected: num_extra = num_expected - len(sampled_inds) extra_inds = np.array(list(neg_set - set(sampled_inds))) if len(extra_inds) > num_extra: extra_inds = self.random_choice(extra_inds, num_extra) sampled_inds = np.concatenate((sampled_inds, extra_inds)) sampled_inds = torch.from_numpy(sampled_inds).long().to( assign_result.gt_inds.device) return sampled_inds
6,692
41.360759
79
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/samplers/score_hlr_sampler.py
import torch from mmcv.ops import nms_match from ..builder import BBOX_SAMPLERS from ..transforms import bbox2roi from .base_sampler import BaseSampler from .sampling_result import SamplingResult @BBOX_SAMPLERS.register_module() class ScoreHLRSampler(BaseSampler): r"""Importance-based Sample Reweighting (ISR_N), described in `Prime Sample Attention in Object Detection <https://arxiv.org/abs/1904.04821>`_. Score hierarchical local rank (HLR) differentiates with RandomSampler in negative part. It firstly computes Score-HLR in a two-step way, then linearly maps score hlr to the loss weights. Args: num (int): Total number of sampled RoIs. pos_fraction (float): Fraction of positive samples. context (:class:`BaseRoIHead`): RoI head that the sampler belongs to. neg_pos_ub (int): Upper bound of the ratio of num negative to num positive, -1 means no upper bound. add_gt_as_proposals (bool): Whether to add ground truth as proposals. k (float): Power of the non-linear mapping. bias (float): Shift of the non-linear mapping. score_thr (float): Minimum score that a negative sample is to be considered as valid bbox. """ def __init__(self, num, pos_fraction, context, neg_pos_ub=-1, add_gt_as_proposals=True, k=0.5, bias=0, score_thr=0.05, iou_thr=0.5, **kwargs): super().__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals) self.k = k self.bias = bias self.score_thr = score_thr self.iou_thr = iou_thr self.context = context # context of cascade detectors is a list, so distinguish them here. if not hasattr(context, 'num_stages'): self.bbox_roi_extractor = context.bbox_roi_extractor self.bbox_head = context.bbox_head self.with_shared_head = context.with_shared_head if self.with_shared_head: self.shared_head = context.shared_head else: self.bbox_roi_extractor = context.bbox_roi_extractor[ context.current_stage] self.bbox_head = context.bbox_head[context.current_stage] @staticmethod def random_choice(gallery, num): """Randomly select some elements from the gallery. If `gallery` is a Tensor, the returned indices will be a Tensor; If `gallery` is a ndarray or list, the returned indices will be a ndarray. Args: gallery (Tensor | ndarray | list): indices pool. num (int): expected sample num. Returns: Tensor or ndarray: sampled indices. """ assert len(gallery) >= num is_tensor = isinstance(gallery, torch.Tensor) if not is_tensor: if torch.cuda.is_available(): device = torch.cuda.current_device() else: device = 'cpu' gallery = torch.tensor(gallery, dtype=torch.long, device=device) perm = torch.randperm(gallery.numel(), device=gallery.device)[:num] rand_inds = gallery[perm] if not is_tensor: rand_inds = rand_inds.cpu().numpy() return rand_inds def _sample_pos(self, assign_result, num_expected, **kwargs): """Randomly sample some positive samples.""" pos_inds = torch.nonzero(assign_result.gt_inds > 0).flatten() if pos_inds.numel() <= num_expected: return pos_inds else: return self.random_choice(pos_inds, num_expected) def _sample_neg(self, assign_result, num_expected, bboxes, feats=None, img_meta=None, **kwargs): """Sample negative samples. Score-HLR sampler is done in the following steps: 1. Take the maximum positive score prediction of each negative samples as s_i. 2. Filter out negative samples whose s_i <= score_thr, the left samples are called valid samples. 3. Use NMS-Match to divide valid samples into different groups, samples in the same group will greatly overlap with each other 4. Rank the matched samples in two-steps to get Score-HLR. (1) In the same group, rank samples with their scores. (2) In the same score rank across different groups, rank samples with their scores again. 5. Linearly map Score-HLR to the final label weights. Args: assign_result (:obj:`AssignResult`): result of assigner. num_expected (int): Expected number of samples. bboxes (Tensor): bbox to be sampled. feats (Tensor): Features come from FPN. img_meta (dict): Meta information dictionary. """ neg_inds = torch.nonzero(assign_result.gt_inds == 0).flatten() num_neg = neg_inds.size(0) if num_neg == 0: return neg_inds, None with torch.no_grad(): neg_bboxes = bboxes[neg_inds] neg_rois = bbox2roi([neg_bboxes]) bbox_result = self.context._bbox_forward(feats, neg_rois) cls_score, bbox_pred = bbox_result['cls_score'], bbox_result[ 'bbox_pred'] ori_loss = self.bbox_head.loss( cls_score=cls_score, bbox_pred=None, rois=None, labels=neg_inds.new_full((num_neg, ), self.bbox_head.num_classes), label_weights=cls_score.new_ones(num_neg), bbox_targets=None, bbox_weights=None, reduction_override='none')['loss_cls'] # filter out samples with the max score lower than score_thr max_score, argmax_score = cls_score.softmax(-1)[:, :-1].max(-1) valid_inds = (max_score > self.score_thr).nonzero().view(-1) invalid_inds = (max_score <= self.score_thr).nonzero().view(-1) num_valid = valid_inds.size(0) num_invalid = invalid_inds.size(0) num_expected = min(num_neg, num_expected) num_hlr = min(num_valid, num_expected) num_rand = num_expected - num_hlr if num_valid > 0: valid_rois = neg_rois[valid_inds] valid_max_score = max_score[valid_inds] valid_argmax_score = argmax_score[valid_inds] valid_bbox_pred = bbox_pred[valid_inds] # valid_bbox_pred shape: [num_valid, #num_classes, 4] valid_bbox_pred = valid_bbox_pred.view( valid_bbox_pred.size(0), -1, 4) selected_bbox_pred = valid_bbox_pred[range(num_valid), valid_argmax_score] pred_bboxes = self.bbox_head.bbox_coder.decode( valid_rois[:, 1:], selected_bbox_pred) pred_bboxes_with_score = torch.cat( [pred_bboxes, valid_max_score[:, None]], -1) group = nms_match(pred_bboxes_with_score, self.iou_thr) # imp: importance imp = cls_score.new_zeros(num_valid) for g in group: g_score = valid_max_score[g] # g_score has already sorted rank = g_score.new_tensor(range(g_score.size(0))) imp[g] = num_valid - rank + g_score _, imp_rank_inds = imp.sort(descending=True) _, imp_rank = imp_rank_inds.sort() hlr_inds = imp_rank_inds[:num_expected] if num_rand > 0: rand_inds = torch.randperm(num_invalid)[:num_rand] select_inds = torch.cat( [valid_inds[hlr_inds], invalid_inds[rand_inds]]) else: select_inds = valid_inds[hlr_inds] neg_label_weights = cls_score.new_ones(num_expected) up_bound = max(num_expected, num_valid) imp_weights = (up_bound - imp_rank[hlr_inds].float()) / up_bound neg_label_weights[:num_hlr] = imp_weights neg_label_weights[num_hlr:] = imp_weights.min() neg_label_weights = (self.bias + (1 - self.bias) * neg_label_weights).pow( self.k) ori_selected_loss = ori_loss[select_inds] new_loss = ori_selected_loss * neg_label_weights norm_ratio = ori_selected_loss.sum() / new_loss.sum() neg_label_weights *= norm_ratio else: neg_label_weights = cls_score.new_ones(num_expected) select_inds = torch.randperm(num_neg)[:num_expected] return neg_inds[select_inds], neg_label_weights def sample(self, assign_result, bboxes, gt_bboxes, gt_labels=None, img_meta=None, **kwargs): """Sample positive and negative bboxes. This is a simple implementation of bbox sampling given candidates, assigning results and ground truth bboxes. Args: assign_result (:obj:`AssignResult`): Bbox assigning results. bboxes (Tensor): Boxes to be sampled from. gt_bboxes (Tensor): Ground truth bboxes. gt_labels (Tensor, optional): Class labels of ground truth bboxes. Returns: tuple[:obj:`SamplingResult`, Tensor]: Sampling result and negetive label weights. """ bboxes = bboxes[:, :4] gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8) if self.add_gt_as_proposals: bboxes = torch.cat([gt_bboxes, bboxes], dim=0) assign_result.add_gt_(gt_labels) gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8) gt_flags = torch.cat([gt_ones, gt_flags]) num_expected_pos = int(self.num * self.pos_fraction) pos_inds = self.pos_sampler._sample_pos( assign_result, num_expected_pos, bboxes=bboxes, **kwargs) num_sampled_pos = pos_inds.numel() num_expected_neg = self.num - num_sampled_pos if self.neg_pos_ub >= 0: _pos = max(1, num_sampled_pos) neg_upper_bound = int(self.neg_pos_ub * _pos) if num_expected_neg > neg_upper_bound: num_expected_neg = neg_upper_bound neg_inds, neg_label_weights = self.neg_sampler._sample_neg( assign_result, num_expected_neg, bboxes, img_meta=img_meta, **kwargs) return SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags), neg_label_weights
11,187
41.218868
79
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/samplers/sampling_result.py
import torch from mmdet.utils import util_mixins class SamplingResult(util_mixins.NiceRepr): """Bbox sampling result. Example: >>> # xdoctest: +IGNORE_WANT >>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA >>> self = SamplingResult.random(rng=10) >>> print(f'self = {self}') self = <SamplingResult({ 'neg_bboxes': torch.Size([12, 4]), 'neg_inds': tensor([ 0, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12]), 'num_gts': 4, 'pos_assigned_gt_inds': tensor([], dtype=torch.int64), 'pos_bboxes': torch.Size([0, 4]), 'pos_inds': tensor([], dtype=torch.int64), 'pos_is_gt': tensor([], dtype=torch.uint8) })> """ def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags): self.pos_inds = pos_inds self.neg_inds = neg_inds self.pos_bboxes = bboxes[pos_inds] self.neg_bboxes = bboxes[neg_inds] self.pos_is_gt = gt_flags[pos_inds] self.num_gts = gt_bboxes.shape[0] self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 if gt_bboxes.numel() == 0: # hack for index error case assert self.pos_assigned_gt_inds.numel() == 0 self.pos_gt_bboxes = torch.empty_like(gt_bboxes).view(-1, 4) else: if len(gt_bboxes.shape) < 2: gt_bboxes = gt_bboxes.view(-1, 4) self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds, :] if assign_result.labels is not None: self.pos_gt_labels = assign_result.labels[pos_inds] else: self.pos_gt_labels = None @property def bboxes(self): """torch.Tensor: concatenated positive and negative boxes""" return torch.cat([self.pos_bboxes, self.neg_bboxes]) def to(self, device): """Change the device of the data inplace. Example: >>> self = SamplingResult.random() >>> print(f'self = {self.to(None)}') >>> # xdoctest: +REQUIRES(--gpu) >>> print(f'self = {self.to(0)}') """ _dict = self.__dict__ for key, value in _dict.items(): if isinstance(value, torch.Tensor): _dict[key] = value.to(device) return self def __nice__(self): data = self.info.copy() data['pos_bboxes'] = data.pop('pos_bboxes').shape data['neg_bboxes'] = data.pop('neg_bboxes').shape parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())] body = ' ' + ',\n '.join(parts) return '{\n' + body + '\n}' @property def info(self): """Returns a dictionary of info about the object.""" return { 'pos_inds': self.pos_inds, 'neg_inds': self.neg_inds, 'pos_bboxes': self.pos_bboxes, 'neg_bboxes': self.neg_bboxes, 'pos_is_gt': self.pos_is_gt, 'num_gts': self.num_gts, 'pos_assigned_gt_inds': self.pos_assigned_gt_inds, } @classmethod def random(cls, rng=None, **kwargs): """ Args: rng (None | int | numpy.random.RandomState): seed or state. kwargs (keyword arguments): - num_preds: number of predicted boxes - num_gts: number of true boxes - p_ignore (float): probability of a predicted box assinged to \ an ignored truth. - p_assigned (float): probability of a predicted box not being \ assigned. - p_use_label (float | bool): with labels or not. Returns: :obj:`SamplingResult`: Randomly generated sampling result. Example: >>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA >>> self = SamplingResult.random() >>> print(self.__dict__) """ from mmdet.core.bbox.samplers.random_sampler import RandomSampler from mmdet.core.bbox.assigners.assign_result import AssignResult from mmdet.core.bbox import demodata rng = demodata.ensure_rng(rng) # make probabalistic? num = 32 pos_fraction = 0.5 neg_pos_ub = -1 assign_result = AssignResult.random(rng=rng, **kwargs) # Note we could just compute an assignment bboxes = demodata.random_boxes(assign_result.num_preds, rng=rng) gt_bboxes = demodata.random_boxes(assign_result.num_gts, rng=rng) if rng.rand() > 0.2: # sometimes algorithms squeeze their data, be robust to that gt_bboxes = gt_bboxes.squeeze() bboxes = bboxes.squeeze() if assign_result.labels is None: gt_labels = None else: gt_labels = None # todo if gt_labels is None: add_gt_as_proposals = False else: add_gt_as_proposals = True # make probabalistic? sampler = RandomSampler( num, pos_fraction, neg_pos_ub=neg_pos_ub, add_gt_as_proposals=add_gt_as_proposals, rng=rng) self = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels) return self
5,334
33.869281
81
py
GFocalV2
GFocalV2-master/mmdet/core/bbox/samplers/pseudo_sampler.py
import torch from ..builder import BBOX_SAMPLERS from .base_sampler import BaseSampler from .sampling_result import SamplingResult @BBOX_SAMPLERS.register_module() class PseudoSampler(BaseSampler): """A pseudo sampler that does not do sampling actually.""" def __init__(self, **kwargs): pass def _sample_pos(self, **kwargs): """Sample positive samples.""" raise NotImplementedError def _sample_neg(self, **kwargs): """Sample negative samples.""" raise NotImplementedError def sample(self, assign_result, bboxes, gt_bboxes, **kwargs): """Directly returns the positive and negative indices of samples. Args: assign_result (:obj:`AssignResult`): Assigned results bboxes (torch.Tensor): Bounding boxes gt_bboxes (torch.Tensor): Ground truth boxes Returns: :obj:`SamplingResult`: sampler results """ pos_inds = torch.nonzero( assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique() neg_inds = torch.nonzero( assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique() gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8) sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags) return sampling_result
1,415
32.714286
79
py
GFocalV2
GFocalV2-master/mmdet/core/utils/dist_utils.py
import warnings from collections import OrderedDict import torch.distributed as dist from mmcv.runner import OptimizerHook from torch._utils import (_flatten_dense_tensors, _take_tensors, _unflatten_dense_tensors) def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1): if bucket_size_mb > 0: bucket_size_bytes = bucket_size_mb * 1024 * 1024 buckets = _take_tensors(tensors, bucket_size_bytes) else: buckets = OrderedDict() for tensor in tensors: tp = tensor.type() if tp not in buckets: buckets[tp] = [] buckets[tp].append(tensor) buckets = buckets.values() for bucket in buckets: flat_tensors = _flatten_dense_tensors(bucket) dist.all_reduce(flat_tensors) flat_tensors.div_(world_size) for tensor, synced in zip( bucket, _unflatten_dense_tensors(flat_tensors, bucket)): tensor.copy_(synced) def allreduce_grads(params, coalesce=True, bucket_size_mb=-1): """Allreduce gradients. Args: params (list[torch.Parameters]): List of parameters of a model coalesce (bool, optional): Whether allreduce parameters as a whole. Defaults to True. bucket_size_mb (int, optional): Size of bucket, the unit is MB. Defaults to -1. """ grads = [ param.grad.data for param in params if param.requires_grad and param.grad is not None ] world_size = dist.get_world_size() if coalesce: _allreduce_coalesced(grads, world_size, bucket_size_mb) else: for tensor in grads: dist.all_reduce(tensor.div_(world_size)) class DistOptimizerHook(OptimizerHook): """Deprecated optimizer hook for distributed training.""" def __init__(self, *args, **kwargs): warnings.warn('"DistOptimizerHook" is deprecated, please switch to' '"mmcv.runner.OptimizerHook".') super().__init__(*args, **kwargs) def reduce_mean(tensor): """"Obtain the mean of tensor on different GPUs.""" if not (dist.is_available() and dist.is_initialized()): return tensor tensor = tensor.clone() dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM) return tensor
2,327
32.257143
77
py
GFocalV2
GFocalV2-master/mmdet/core/utils/misc.py
from functools import partial import torch from six.moves import map, zip def multi_apply(func, *args, **kwargs): """Apply function to a list of arguments. Note: This function applies the ``func`` to multiple inputs and map the multiple outputs of the ``func`` into different list. Each list contains the same type of outputs corresponding to different inputs. Args: func (Function): A function that will be applied to a list of arguments Returns: tuple(list): A tuple containing multiple list, each list contains \ a kind of returned results by the function """ pfunc = partial(func, **kwargs) if kwargs else func map_results = map(pfunc, *args) return tuple(map(list, zip(*map_results))) def unmap(data, count, inds, fill=0): """Unmap a subset of item (data) back to the original set of items (of size count)""" if data.dim() == 1: ret = data.new_full((count, ), fill) ret[inds.type(torch.bool)] = data else: new_size = (count, ) + data.size()[1:] ret = data.new_full(new_size, fill) ret[inds.type(torch.bool), :] = data return ret
1,206
29.175
79
py
GFocalV2
GFocalV2-master/mmdet/core/anchor/point_generator.py
import torch from .builder import ANCHOR_GENERATORS @ANCHOR_GENERATORS.register_module() class PointGenerator(object): def _meshgrid(self, x, y, row_major=True): xx = x.repeat(len(y)) yy = y.view(-1, 1).repeat(1, len(x)).view(-1) if row_major: return xx, yy else: return yy, xx def grid_points(self, featmap_size, stride=16, device='cuda'): feat_h, feat_w = featmap_size shift_x = torch.arange(0., feat_w, device=device) * stride shift_y = torch.arange(0., feat_h, device=device) * stride shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) stride = shift_x.new_full((shift_xx.shape[0], ), stride) shifts = torch.stack([shift_xx, shift_yy, stride], dim=-1) all_points = shifts.to(device) return all_points def valid_flags(self, featmap_size, valid_size, device='cuda'): feat_h, feat_w = featmap_size valid_h, valid_w = valid_size assert valid_h <= feat_h and valid_w <= feat_w valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) valid_x[:valid_w] = 1 valid_y[:valid_h] = 1 valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) valid = valid_xx & valid_yy return valid
1,362
34.868421
70
py
GFocalV2
GFocalV2-master/mmdet/core/anchor/anchor_generator.py
import mmcv import numpy as np import torch from torch.nn.modules.utils import _pair from .builder import ANCHOR_GENERATORS @ANCHOR_GENERATORS.register_module() class AnchorGenerator(object): """Standard anchor generator for 2D anchor-based detectors. Args: strides (list[int] | list[tuple[int, int]]): Strides of anchors in multiple feature levels in order (w, h). ratios (list[float]): The list of ratios between the height and width of anchors in a single level. scales (list[int] | None): Anchor scales for anchors in a single level. It cannot be set at the same time if `octave_base_scale` and `scales_per_octave` are set. base_sizes (list[int] | None): The basic sizes of anchors in multiple levels. If None is given, strides will be used as base_sizes. (If strides are non square, the shortest stride is taken.) scale_major (bool): Whether to multiply scales first when generating base anchors. If true, the anchors in the same row will have the same scales. By default it is True in V2.0 octave_base_scale (int): The base scale of octave. scales_per_octave (int): Number of scales for each octave. `octave_base_scale` and `scales_per_octave` are usually used in retinanet and the `scales` should be None when they are set. centers (list[tuple[float, float]] | None): The centers of the anchor relative to the feature grid center in multiple feature levels. By default it is set to be None and not used. If a list of tuple of float is given, they will be used to shift the centers of anchors. center_offset (float): The offset of center in proportion to anchors' width and height. By default it is 0 in V2.0. Examples: >>> from mmdet.core import AnchorGenerator >>> self = AnchorGenerator([16], [1.], [1.], [9]) >>> all_anchors = self.grid_anchors([(2, 2)], device='cpu') >>> print(all_anchors) [tensor([[-4.5000, -4.5000, 4.5000, 4.5000], [11.5000, -4.5000, 20.5000, 4.5000], [-4.5000, 11.5000, 4.5000, 20.5000], [11.5000, 11.5000, 20.5000, 20.5000]])] >>> self = AnchorGenerator([16, 32], [1.], [1.], [9, 18]) >>> all_anchors = self.grid_anchors([(2, 2), (1, 1)], device='cpu') >>> print(all_anchors) [tensor([[-4.5000, -4.5000, 4.5000, 4.5000], [11.5000, -4.5000, 20.5000, 4.5000], [-4.5000, 11.5000, 4.5000, 20.5000], [11.5000, 11.5000, 20.5000, 20.5000]]), \ tensor([[-9., -9., 9., 9.]])] """ def __init__(self, strides, ratios, scales=None, base_sizes=None, scale_major=True, octave_base_scale=None, scales_per_octave=None, centers=None, center_offset=0.): # check center and center_offset if center_offset != 0: assert centers is None, 'center cannot be set when center_offset' \ f'!=0, {centers} is given.' if not (0 <= center_offset <= 1): raise ValueError('center_offset should be in range [0, 1], ' f'{center_offset} is given.') if centers is not None: assert len(centers) == len(strides), \ 'The number of strides should be the same as centers, got ' \ f'{strides} and {centers}' # calculate base sizes of anchors self.strides = [_pair(stride) for stride in strides] self.base_sizes = [min(stride) for stride in self.strides ] if base_sizes is None else base_sizes assert len(self.base_sizes) == len(self.strides), \ 'The number of strides should be the same as base sizes, got ' \ f'{self.strides} and {self.base_sizes}' # calculate scales of anchors assert ((octave_base_scale is not None and scales_per_octave is not None) ^ (scales is not None)), \ 'scales and octave_base_scale with scales_per_octave cannot' \ ' be set at the same time' if scales is not None: self.scales = torch.Tensor(scales) elif octave_base_scale is not None and scales_per_octave is not None: octave_scales = np.array( [2**(i / scales_per_octave) for i in range(scales_per_octave)]) scales = octave_scales * octave_base_scale self.scales = torch.Tensor(scales) else: raise ValueError('Either scales or octave_base_scale with ' 'scales_per_octave should be set') self.octave_base_scale = octave_base_scale self.scales_per_octave = scales_per_octave self.ratios = torch.Tensor(ratios) self.scale_major = scale_major self.centers = centers self.center_offset = center_offset self.base_anchors = self.gen_base_anchors() @property def num_base_anchors(self): """list[int]: total number of base anchors in a feature grid""" return [base_anchors.size(0) for base_anchors in self.base_anchors] @property def num_levels(self): """int: number of feature levels that the generator will be applied""" return len(self.strides) def gen_base_anchors(self): """Generate base anchors. Returns: list(torch.Tensor): Base anchors of a feature grid in multiple \ feature levels. """ multi_level_base_anchors = [] for i, base_size in enumerate(self.base_sizes): center = None if self.centers is not None: center = self.centers[i] multi_level_base_anchors.append( self.gen_single_level_base_anchors( base_size, scales=self.scales, ratios=self.ratios, center=center)) return multi_level_base_anchors def gen_single_level_base_anchors(self, base_size, scales, ratios, center=None): """Generate base anchors of a single level. Args: base_size (int | float): Basic size of an anchor. scales (torch.Tensor): Scales of the anchor. ratios (torch.Tensor): The ratio between between the height and width of anchors in a single level. center (tuple[float], optional): The center of the base anchor related to a single feature grid. Defaults to None. Returns: torch.Tensor: Anchors in a single-level feature maps. """ w = base_size h = base_size if center is None: x_center = self.center_offset * w y_center = self.center_offset * h else: x_center, y_center = center h_ratios = torch.sqrt(ratios) w_ratios = 1 / h_ratios if self.scale_major: ws = (w * w_ratios[:, None] * scales[None, :]).view(-1) hs = (h * h_ratios[:, None] * scales[None, :]).view(-1) else: ws = (w * scales[:, None] * w_ratios[None, :]).view(-1) hs = (h * scales[:, None] * h_ratios[None, :]).view(-1) # use float anchor and the anchor's center is aligned with the # pixel center base_anchors = [ x_center - 0.5 * ws, y_center - 0.5 * hs, x_center + 0.5 * ws, y_center + 0.5 * hs ] base_anchors = torch.stack(base_anchors, dim=-1) return base_anchors def _meshgrid(self, x, y, row_major=True): """Generate mesh grid of x and y. Args: x (torch.Tensor): Grids of x dimension. y (torch.Tensor): Grids of y dimension. row_major (bool, optional): Whether to return y grids first. Defaults to True. Returns: tuple[torch.Tensor]: The mesh grids of x and y. """ xx = x.repeat(len(y)) yy = y.view(-1, 1).repeat(1, len(x)).view(-1) if row_major: return xx, yy else: return yy, xx def grid_anchors(self, featmap_sizes, device='cuda'): """Generate grid anchors in multiple feature levels. Args: featmap_sizes (list[tuple]): List of feature map sizes in multiple feature levels. device (str): Device where the anchors will be put on. Return: list[torch.Tensor]: Anchors in multiple feature levels. \ The sizes of each tensor should be [N, 4], where \ N = width * height * num_base_anchors, width and height \ are the sizes of the corresponding feature level, \ num_base_anchors is the number of anchors for that level. """ assert self.num_levels == len(featmap_sizes) multi_level_anchors = [] for i in range(self.num_levels): anchors = self.single_level_grid_anchors( self.base_anchors[i].to(device), featmap_sizes[i], self.strides[i], device=device) multi_level_anchors.append(anchors) return multi_level_anchors def single_level_grid_anchors(self, base_anchors, featmap_size, stride=(16, 16), device='cuda'): """Generate grid anchors of a single level. Note: This function is usually called by method ``self.grid_anchors``. Args: base_anchors (torch.Tensor): The base anchors of a feature grid. featmap_size (tuple[int]): Size of the feature maps. stride (tuple[int], optional): Stride of the feature map in order (w, h). Defaults to (16, 16). device (str, optional): Device the tensor will be put on. Defaults to 'cuda'. Returns: torch.Tensor: Anchors in the overall feature maps. """ feat_h, feat_w = featmap_size # convert Tensor to int, so that we can covert to ONNX correctlly feat_h = int(feat_h) feat_w = int(feat_w) shift_x = torch.arange(0, feat_w, device=device) * stride[0] shift_y = torch.arange(0, feat_h, device=device) * stride[1] shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1) shifts = shifts.type_as(base_anchors) # first feat_w elements correspond to the first row of shifts # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get # shifted anchors (K, A, 4), reshape to (K*A, 4) all_anchors = base_anchors[None, :, :] + shifts[:, None, :] all_anchors = all_anchors.view(-1, 4) # first A rows correspond to A anchors of (0, 0) in feature map, # then (0, 1), (0, 2), ... return all_anchors def valid_flags(self, featmap_sizes, pad_shape, device='cuda'): """Generate valid flags of anchors in multiple feature levels. Args: featmap_sizes (list(tuple)): List of feature map sizes in multiple feature levels. pad_shape (tuple): The padded shape of the image. device (str): Device where the anchors will be put on. Return: list(torch.Tensor): Valid flags of anchors in multiple levels. """ assert self.num_levels == len(featmap_sizes) multi_level_flags = [] for i in range(self.num_levels): anchor_stride = self.strides[i] feat_h, feat_w = featmap_sizes[i] h, w = pad_shape[:2] valid_feat_h = min(int(np.ceil(h / anchor_stride[1])), feat_h) valid_feat_w = min(int(np.ceil(w / anchor_stride[0])), feat_w) flags = self.single_level_valid_flags((feat_h, feat_w), (valid_feat_h, valid_feat_w), self.num_base_anchors[i], device=device) multi_level_flags.append(flags) return multi_level_flags def single_level_valid_flags(self, featmap_size, valid_size, num_base_anchors, device='cuda'): """Generate the valid flags of anchor in a single feature map. Args: featmap_size (tuple[int]): The size of feature maps. valid_size (tuple[int]): The valid size of the feature maps. num_base_anchors (int): The number of base anchors. device (str, optional): Device where the flags will be put on. Defaults to 'cuda'. Returns: torch.Tensor: The valid flags of each anchor in a single level \ feature map. """ feat_h, feat_w = featmap_size valid_h, valid_w = valid_size assert valid_h <= feat_h and valid_w <= feat_w valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) valid_x[:valid_w] = 1 valid_y[:valid_h] = 1 valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) valid = valid_xx & valid_yy valid = valid[:, None].expand(valid.size(0), num_base_anchors).contiguous().view(-1) return valid def __repr__(self): """str: a string that describes the module""" indent_str = ' ' repr_str = self.__class__.__name__ + '(\n' repr_str += f'{indent_str}strides={self.strides},\n' repr_str += f'{indent_str}ratios={self.ratios},\n' repr_str += f'{indent_str}scales={self.scales},\n' repr_str += f'{indent_str}base_sizes={self.base_sizes},\n' repr_str += f'{indent_str}scale_major={self.scale_major},\n' repr_str += f'{indent_str}octave_base_scale=' repr_str += f'{self.octave_base_scale},\n' repr_str += f'{indent_str}scales_per_octave=' repr_str += f'{self.scales_per_octave},\n' repr_str += f'{indent_str}num_levels={self.num_levels}\n' repr_str += f'{indent_str}centers={self.centers},\n' repr_str += f'{indent_str}center_offset={self.center_offset})' return repr_str @ANCHOR_GENERATORS.register_module() class SSDAnchorGenerator(AnchorGenerator): """Anchor generator for SSD. Args: strides (list[int] | list[tuple[int, int]]): Strides of anchors in multiple feature levels. ratios (list[float]): The list of ratios between the height and width of anchors in a single level. basesize_ratio_range (tuple(float)): Ratio range of anchors. input_size (int): Size of feature map, 300 for SSD300, 512 for SSD512. scale_major (bool): Whether to multiply scales first when generating base anchors. If true, the anchors in the same row will have the same scales. It is always set to be False in SSD. """ def __init__(self, strides, ratios, basesize_ratio_range, input_size=300, scale_major=True): assert len(strides) == len(ratios) assert mmcv.is_tuple_of(basesize_ratio_range, float) self.strides = [_pair(stride) for stride in strides] self.input_size = input_size self.centers = [(stride[0] / 2., stride[1] / 2.) for stride in self.strides] self.basesize_ratio_range = basesize_ratio_range # calculate anchor ratios and sizes min_ratio, max_ratio = basesize_ratio_range min_ratio = int(min_ratio * 100) max_ratio = int(max_ratio * 100) step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2)) min_sizes = [] max_sizes = [] for ratio in range(int(min_ratio), int(max_ratio) + 1, step): min_sizes.append(int(self.input_size * ratio / 100)) max_sizes.append(int(self.input_size * (ratio + step) / 100)) if self.input_size == 300: if basesize_ratio_range[0] == 0.15: # SSD300 COCO min_sizes.insert(0, int(self.input_size * 7 / 100)) max_sizes.insert(0, int(self.input_size * 15 / 100)) elif basesize_ratio_range[0] == 0.2: # SSD300 VOC min_sizes.insert(0, int(self.input_size * 10 / 100)) max_sizes.insert(0, int(self.input_size * 20 / 100)) else: raise ValueError( 'basesize_ratio_range[0] should be either 0.15' 'or 0.2 when input_size is 300, got ' f'{basesize_ratio_range[0]}.') elif self.input_size == 512: if basesize_ratio_range[0] == 0.1: # SSD512 COCO min_sizes.insert(0, int(self.input_size * 4 / 100)) max_sizes.insert(0, int(self.input_size * 10 / 100)) elif basesize_ratio_range[0] == 0.15: # SSD512 VOC min_sizes.insert(0, int(self.input_size * 7 / 100)) max_sizes.insert(0, int(self.input_size * 15 / 100)) else: raise ValueError('basesize_ratio_range[0] should be either 0.1' 'or 0.15 when input_size is 512, got' f' {basesize_ratio_range[0]}.') else: raise ValueError('Only support 300 or 512 in SSDAnchorGenerator' f', got {self.input_size}.') anchor_ratios = [] anchor_scales = [] for k in range(len(self.strides)): scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])] anchor_ratio = [1.] for r in ratios[k]: anchor_ratio += [1 / r, r] # 4 or 6 ratio anchor_ratios.append(torch.Tensor(anchor_ratio)) anchor_scales.append(torch.Tensor(scales)) self.base_sizes = min_sizes self.scales = anchor_scales self.ratios = anchor_ratios self.scale_major = scale_major self.center_offset = 0 self.base_anchors = self.gen_base_anchors() def gen_base_anchors(self): """Generate base anchors. Returns: list(torch.Tensor): Base anchors of a feature grid in multiple \ feature levels. """ multi_level_base_anchors = [] for i, base_size in enumerate(self.base_sizes): base_anchors = self.gen_single_level_base_anchors( base_size, scales=self.scales[i], ratios=self.ratios[i], center=self.centers[i]) indices = list(range(len(self.ratios[i]))) indices.insert(1, len(indices)) base_anchors = torch.index_select(base_anchors, 0, torch.LongTensor(indices)) multi_level_base_anchors.append(base_anchors) return multi_level_base_anchors def __repr__(self): """str: a string that describes the module""" indent_str = ' ' repr_str = self.__class__.__name__ + '(\n' repr_str += f'{indent_str}strides={self.strides},\n' repr_str += f'{indent_str}scales={self.scales},\n' repr_str += f'{indent_str}scale_major={self.scale_major},\n' repr_str += f'{indent_str}input_size={self.input_size},\n' repr_str += f'{indent_str}scales={self.scales},\n' repr_str += f'{indent_str}ratios={self.ratios},\n' repr_str += f'{indent_str}num_levels={self.num_levels},\n' repr_str += f'{indent_str}base_sizes={self.base_sizes},\n' repr_str += f'{indent_str}basesize_ratio_range=' repr_str += f'{self.basesize_ratio_range})' return repr_str @ANCHOR_GENERATORS.register_module() class LegacyAnchorGenerator(AnchorGenerator): """Legacy anchor generator used in MMDetection V1.x. Note: Difference to the V2.0 anchor generator: 1. The center offset of V1.x anchors are set to be 0.5 rather than 0. 2. The width/height are minused by 1 when calculating the anchors' \ centers and corners to meet the V1.x coordinate system. 3. The anchors' corners are quantized. Args: strides (list[int] | list[tuple[int]]): Strides of anchors in multiple feature levels. ratios (list[float]): The list of ratios between the height and width of anchors in a single level. scales (list[int] | None): Anchor scales for anchors in a single level. It cannot be set at the same time if `octave_base_scale` and `scales_per_octave` are set. base_sizes (list[int]): The basic sizes of anchors in multiple levels. If None is given, strides will be used to generate base_sizes. scale_major (bool): Whether to multiply scales first when generating base anchors. If true, the anchors in the same row will have the same scales. By default it is True in V2.0 octave_base_scale (int): The base scale of octave. scales_per_octave (int): Number of scales for each octave. `octave_base_scale` and `scales_per_octave` are usually used in retinanet and the `scales` should be None when they are set. centers (list[tuple[float, float]] | None): The centers of the anchor relative to the feature grid center in multiple feature levels. By default it is set to be None and not used. It a list of float is given, this list will be used to shift the centers of anchors. center_offset (float): The offset of center in propotion to anchors' width and height. By default it is 0.5 in V2.0 but it should be 0.5 in v1.x models. Examples: >>> from mmdet.core import LegacyAnchorGenerator >>> self = LegacyAnchorGenerator( >>> [16], [1.], [1.], [9], center_offset=0.5) >>> all_anchors = self.grid_anchors(((2, 2),), device='cpu') >>> print(all_anchors) [tensor([[ 0., 0., 8., 8.], [16., 0., 24., 8.], [ 0., 16., 8., 24.], [16., 16., 24., 24.]])] """ def gen_single_level_base_anchors(self, base_size, scales, ratios, center=None): """Generate base anchors of a single level. Note: The width/height of anchors are minused by 1 when calculating \ the centers and corners to meet the V1.x coordinate system. Args: base_size (int | float): Basic size of an anchor. scales (torch.Tensor): Scales of the anchor. ratios (torch.Tensor): The ratio between between the height. and width of anchors in a single level. center (tuple[float], optional): The center of the base anchor related to a single feature grid. Defaults to None. Returns: torch.Tensor: Anchors in a single-level feature map. """ w = base_size h = base_size if center is None: x_center = self.center_offset * (w - 1) y_center = self.center_offset * (h - 1) else: x_center, y_center = center h_ratios = torch.sqrt(ratios) w_ratios = 1 / h_ratios if self.scale_major: ws = (w * w_ratios[:, None] * scales[None, :]).view(-1) hs = (h * h_ratios[:, None] * scales[None, :]).view(-1) else: ws = (w * scales[:, None] * w_ratios[None, :]).view(-1) hs = (h * scales[:, None] * h_ratios[None, :]).view(-1) # use float anchor and the anchor's center is aligned with the # pixel center base_anchors = [ x_center - 0.5 * (ws - 1), y_center - 0.5 * (hs - 1), x_center + 0.5 * (ws - 1), y_center + 0.5 * (hs - 1) ] base_anchors = torch.stack(base_anchors, dim=-1).round() return base_anchors @ANCHOR_GENERATORS.register_module() class LegacySSDAnchorGenerator(SSDAnchorGenerator, LegacyAnchorGenerator): """Legacy anchor generator used in MMDetection V1.x. The difference between `LegacySSDAnchorGenerator` and `SSDAnchorGenerator` can be found in `LegacyAnchorGenerator`. """ def __init__(self, strides, ratios, basesize_ratio_range, input_size=300, scale_major=True): super(LegacySSDAnchorGenerator, self).__init__(strides, ratios, basesize_ratio_range, input_size, scale_major) self.centers = [((stride - 1) / 2., (stride - 1) / 2.) for stride in strides] self.base_anchors = self.gen_base_anchors() @ANCHOR_GENERATORS.register_module() class YOLOAnchorGenerator(AnchorGenerator): """Anchor generator for YOLO. Args: strides (list[int] | list[tuple[int, int]]): Strides of anchors in multiple feature levels. base_sizes (list[list[tuple[int, int]]]): The basic sizes of anchors in multiple levels. """ def __init__(self, strides, base_sizes): self.strides = [_pair(stride) for stride in strides] self.centers = [(stride[0] / 2., stride[1] / 2.) for stride in self.strides] self.base_sizes = [] num_anchor_per_level = len(base_sizes[0]) for base_sizes_per_level in base_sizes: assert num_anchor_per_level == len(base_sizes_per_level) self.base_sizes.append( [_pair(base_size) for base_size in base_sizes_per_level]) self.base_anchors = self.gen_base_anchors() @property def num_levels(self): """int: number of feature levels that the generator will be applied""" return len(self.base_sizes) def gen_base_anchors(self): """Generate base anchors. Returns: list(torch.Tensor): Base anchors of a feature grid in multiple \ feature levels. """ multi_level_base_anchors = [] for i, base_sizes_per_level in enumerate(self.base_sizes): center = None if self.centers is not None: center = self.centers[i] multi_level_base_anchors.append( self.gen_single_level_base_anchors(base_sizes_per_level, center)) return multi_level_base_anchors def gen_single_level_base_anchors(self, base_sizes_per_level, center=None): """Generate base anchors of a single level. Args: base_sizes_per_level (list[tuple[int, int]]): Basic sizes of anchors. center (tuple[float], optional): The center of the base anchor related to a single feature grid. Defaults to None. Returns: torch.Tensor: Anchors in a single-level feature maps. """ x_center, y_center = center base_anchors = [] for base_size in base_sizes_per_level: w, h = base_size # use float anchor and the anchor's center is aligned with the # pixel center base_anchor = torch.Tensor([ x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w, y_center + 0.5 * h ]) base_anchors.append(base_anchor) base_anchors = torch.stack(base_anchors, dim=0) return base_anchors def responsible_flags(self, featmap_sizes, gt_bboxes, device='cuda'): """Generate responsible anchor flags of grid cells in multiple scales. Args: featmap_sizes (list(tuple)): List of feature map sizes in multiple feature levels. gt_bboxes (Tensor): Ground truth boxes, shape (n, 4). device (str): Device where the anchors will be put on. Return: list(torch.Tensor): responsible flags of anchors in multiple level """ assert self.num_levels == len(featmap_sizes) multi_level_responsible_flags = [] for i in range(self.num_levels): anchor_stride = self.strides[i] flags = self.single_level_responsible_flags( featmap_sizes[i], gt_bboxes, anchor_stride, self.num_base_anchors[i], device=device) multi_level_responsible_flags.append(flags) return multi_level_responsible_flags def single_level_responsible_flags(self, featmap_size, gt_bboxes, stride, num_base_anchors, device='cuda'): """Generate the responsible flags of anchor in a single feature map. Args: featmap_size (tuple[int]): The size of feature maps. gt_bboxes (Tensor): Ground truth boxes, shape (n, 4). stride (tuple(int)): stride of current level num_base_anchors (int): The number of base anchors. device (str, optional): Device where the flags will be put on. Defaults to 'cuda'. Returns: torch.Tensor: The valid flags of each anchor in a single level \ feature map. """ feat_h, feat_w = featmap_size gt_bboxes_cx = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) * 0.5).to(device) gt_bboxes_cy = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) * 0.5).to(device) gt_bboxes_grid_x = torch.floor(gt_bboxes_cx / stride[0]).long() gt_bboxes_grid_y = torch.floor(gt_bboxes_cy / stride[1]).long() # row major indexing gt_bboxes_grid_idx = gt_bboxes_grid_y * feat_w + gt_bboxes_grid_x responsible_grid = torch.zeros( feat_h * feat_w, dtype=torch.uint8, device=device) responsible_grid[gt_bboxes_grid_idx] = 1 responsible_grid = responsible_grid[:, None].expand( responsible_grid.size(0), num_base_anchors).contiguous().view(-1) return responsible_grid
31,168
41.75583
79
py
GFocalV2
GFocalV2-master/mmdet/core/anchor/utils.py
import torch def images_to_levels(target, num_levels): """Convert targets by image to targets by feature level. [target_img0, target_img1] -> [target_level0, target_level1, ...] """ target = torch.stack(target, 0) level_targets = [] start = 0 for n in num_levels: end = start + n # level_targets.append(target[:, start:end].squeeze(0)) level_targets.append(target[:, start:end]) start = end return level_targets def anchor_inside_flags(flat_anchors, valid_flags, img_shape, allowed_border=0): """Check whether the anchors are inside the border. Args: flat_anchors (torch.Tensor): Flatten anchors, shape (n, 4). valid_flags (torch.Tensor): An existing valid flags of anchors. img_shape (tuple(int)): Shape of current image. allowed_border (int, optional): The border to allow the valid anchor. Defaults to 0. Returns: torch.Tensor: Flags indicating whether the anchors are inside a \ valid range. """ img_h, img_w = img_shape[:2] if allowed_border >= 0: inside_flags = valid_flags & \ (flat_anchors[:, 0] >= -allowed_border) & \ (flat_anchors[:, 1] >= -allowed_border) & \ (flat_anchors[:, 2] < img_w + allowed_border) & \ (flat_anchors[:, 3] < img_h + allowed_border) else: inside_flags = valid_flags return inside_flags def calc_region(bbox, ratio, featmap_size=None): """Calculate a proportional bbox region. The bbox center are fixed and the new h' and w' is h * ratio and w * ratio. Args: bbox (Tensor): Bboxes to calculate regions, shape (n, 4). ratio (float): Ratio of the output region. featmap_size (tuple): Feature map size used for clipping the boundary. Returns: tuple: x1, y1, x2, y2 """ x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long() y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long() x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long() y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long() if featmap_size is not None: x1 = x1.clamp(min=0, max=featmap_size[1]) y1 = y1.clamp(min=0, max=featmap_size[0]) x2 = x2.clamp(min=0, max=featmap_size[1]) y2 = y2.clamp(min=0, max=featmap_size[0]) return (x1, y1, x2, y2)
2,497
33.694444
79
py
GFocalV2
GFocalV2-master/mmdet/models/builder.py
from mmcv.utils import Registry, build_from_cfg from torch import nn BACKBONES = Registry('backbone') NECKS = Registry('neck') ROI_EXTRACTORS = Registry('roi_extractor') SHARED_HEADS = Registry('shared_head') HEADS = Registry('head') LOSSES = Registry('loss') DETECTORS = Registry('detector') def build(cfg, registry, default_args=None): """Build a module. Args: cfg (dict, list[dict]): The config of modules, is is either a dict or a list of configs. registry (:obj:`Registry`): A registry the module belongs to. default_args (dict, optional): Default arguments to build the module. Defaults to None. Returns: nn.Module: A built nn module. """ if isinstance(cfg, list): modules = [ build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg ] return nn.Sequential(*modules) else: return build_from_cfg(cfg, registry, default_args) def build_backbone(cfg): """Build backbone.""" return build(cfg, BACKBONES) def build_neck(cfg): """Build neck.""" return build(cfg, NECKS) def build_roi_extractor(cfg): """Build roi extractor.""" return build(cfg, ROI_EXTRACTORS) def build_shared_head(cfg): """Build shared head.""" return build(cfg, SHARED_HEADS) def build_head(cfg): """Build head.""" return build(cfg, HEADS) def build_loss(cfg): """Build loss.""" return build(cfg, LOSSES) def build_detector(cfg, train_cfg=None, test_cfg=None): """Build detector.""" return build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
1,631
23
78
py
GFocalV2
GFocalV2-master/mmdet/models/detectors/two_stage.py
import torch import torch.nn as nn # from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler from ..builder import DETECTORS, build_backbone, build_head, build_neck from .base import BaseDetector @DETECTORS.register_module() class TwoStageDetector(BaseDetector): """Base class for two-stage detectors. Two-stage detectors typically consisting of a region proposal network and a task-specific regression head. """ def __init__(self, backbone, neck=None, rpn_head=None, roi_head=None, train_cfg=None, test_cfg=None, pretrained=None): super(TwoStageDetector, self).__init__() self.backbone = build_backbone(backbone) if neck is not None: self.neck = build_neck(neck) if rpn_head is not None: rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None rpn_head_ = rpn_head.copy() rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn) self.rpn_head = build_head(rpn_head_) if roi_head is not None: # update train and test cfg here for now # TODO: refactor assigner & sampler rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None roi_head.update(train_cfg=rcnn_train_cfg) roi_head.update(test_cfg=test_cfg.rcnn) self.roi_head = build_head(roi_head) self.train_cfg = train_cfg self.test_cfg = test_cfg self.init_weights(pretrained=pretrained) @property def with_rpn(self): """bool: whether the detector has RPN""" return hasattr(self, 'rpn_head') and self.rpn_head is not None @property def with_roi_head(self): """bool: whether the detector has a RoI head""" return hasattr(self, 'roi_head') and self.roi_head is not None def init_weights(self, pretrained=None): """Initialize the weights in detector. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ super(TwoStageDetector, self).init_weights(pretrained) self.backbone.init_weights(pretrained=pretrained) if self.with_neck: if isinstance(self.neck, nn.Sequential): for m in self.neck: m.init_weights() else: self.neck.init_weights() if self.with_rpn: self.rpn_head.init_weights() if self.with_roi_head: self.roi_head.init_weights(pretrained) def extract_feat(self, img): """Directly extract features from the backbone+neck.""" x = self.backbone(img) if self.with_neck: x = self.neck(x) return x def forward_dummy(self, img): """Used for computing network flops. See `mmdetection/tools/get_flops.py` """ outs = () # backbone x = self.extract_feat(img) # rpn if self.with_rpn: rpn_outs = self.rpn_head(x) outs = outs + (rpn_outs, ) proposals = torch.randn(1000, 4).to(img.device) # roi_head roi_outs = self.roi_head.forward_dummy(x, proposals) outs = outs + (roi_outs, ) return outs def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None, **kwargs): """ Args: img (Tensor): of shape (N, C, H, W) encoding input images. Typically these should be mean centered and std scaled. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. gt_masks (None | Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. proposals : override rpn proposals with custom proposals. Use when `with_rpn` is False. Returns: dict[str, Tensor]: a dictionary of loss components """ x = self.extract_feat(img) losses = dict() # RPN forward and loss if self.with_rpn: proposal_cfg = self.train_cfg.get('rpn_proposal', self.test_cfg.rpn) rpn_losses, proposal_list = self.rpn_head.forward_train( x, img_metas, gt_bboxes, gt_labels=None, gt_bboxes_ignore=gt_bboxes_ignore, proposal_cfg=proposal_cfg) losses.update(rpn_losses) else: proposal_list = proposals roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore, gt_masks, **kwargs) losses.update(roi_losses) return losses async def async_simple_test(self, img, img_meta, proposals=None, rescale=False): """Async test without augmentation.""" assert self.with_bbox, 'Bbox head must be implemented.' x = self.extract_feat(img) if proposals is None: proposal_list = await self.rpn_head.async_simple_test_rpn( x, img_meta) else: proposal_list = proposals return await self.roi_head.async_simple_test( x, proposal_list, img_meta, rescale=rescale) def simple_test(self, img, img_metas, proposals=None, rescale=False): """Test without augmentation.""" assert self.with_bbox, 'Bbox head must be implemented.' x = self.extract_feat(img) if proposals is None: proposal_list = self.rpn_head.simple_test_rpn(x, img_metas) else: proposal_list = proposals return self.roi_head.simple_test( x, proposal_list, img_metas, rescale=rescale) def aug_test(self, imgs, img_metas, rescale=False): """Test with augmentations. If rescale is False, then returned bboxes and masks will fit the scale of imgs[0]. """ x = self.extract_feats(imgs) proposal_list = self.rpn_head.aug_test_rpn(x, img_metas) return self.roi_head.aug_test( x, proposal_list, img_metas, rescale=rescale)
7,423
34.184834
79
py
GFocalV2
GFocalV2-master/mmdet/models/detectors/base.py
from abc import ABCMeta, abstractmethod from collections import OrderedDict import mmcv import numpy as np import torch import torch.distributed as dist import torch.nn as nn from mmcv.runner import auto_fp16 from mmcv.utils import print_log from mmdet.utils import get_root_logger class BaseDetector(nn.Module, metaclass=ABCMeta): """Base class for detectors.""" def __init__(self): super(BaseDetector, self).__init__() self.fp16_enabled = False @property def with_neck(self): """bool: whether the detector has a neck""" return hasattr(self, 'neck') and self.neck is not None # TODO: these properties need to be carefully handled # for both single stage & two stage detectors @property def with_shared_head(self): """bool: whether the detector has a shared head in the RoI Head""" return hasattr(self, 'roi_head') and self.roi_head.with_shared_head @property def with_bbox(self): """bool: whether the detector has a bbox head""" return ((hasattr(self, 'roi_head') and self.roi_head.with_bbox) or (hasattr(self, 'bbox_head') and self.bbox_head is not None)) @property def with_mask(self): """bool: whether the detector has a mask head""" return ((hasattr(self, 'roi_head') and self.roi_head.with_mask) or (hasattr(self, 'mask_head') and self.mask_head is not None)) @abstractmethod def extract_feat(self, imgs): """Extract features from images.""" pass def extract_feats(self, imgs): """Extract features from multiple images. Args: imgs (list[torch.Tensor]): A list of images. The images are augmented from the same image but in different ways. Returns: list[torch.Tensor]: Features of different images """ assert isinstance(imgs, list) return [self.extract_feat(img) for img in imgs] @abstractmethod def forward_train(self, imgs, img_metas, **kwargs): """ Args: img (list[Tensor]): List of tensors of shape (1, C, H, W). Typically these should be mean centered and std scaled. img_metas (list[dict]): List of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys, see :class:`mmdet.datasets.pipelines.Collect`. kwargs (keyword arguments): Specific to concrete implementation. """ pass async def async_simple_test(self, img, img_metas, **kwargs): raise NotImplementedError @abstractmethod def simple_test(self, img, img_metas, **kwargs): pass @abstractmethod def aug_test(self, imgs, img_metas, **kwargs): """Test function with test time augmentation.""" pass def init_weights(self, pretrained=None): """Initialize the weights in detector. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ if pretrained is not None: logger = get_root_logger() print_log(f'load model from: {pretrained}', logger=logger) async def aforward_test(self, *, img, img_metas, **kwargs): for var, name in [(img, 'img'), (img_metas, 'img_metas')]: if not isinstance(var, list): raise TypeError(f'{name} must be a list, but got {type(var)}') num_augs = len(img) if num_augs != len(img_metas): raise ValueError(f'num of augmentations ({len(img)}) ' f'!= num of image metas ({len(img_metas)})') # TODO: remove the restriction of samples_per_gpu == 1 when prepared samples_per_gpu = img[0].size(0) assert samples_per_gpu == 1 if num_augs == 1: return await self.async_simple_test(img[0], img_metas[0], **kwargs) else: raise NotImplementedError def forward_test(self, imgs, img_metas, **kwargs): """ Args: imgs (List[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains all images in the batch. img_metas (List[List[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. """ for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: if not isinstance(var, list): raise TypeError(f'{name} must be a list, but got {type(var)}') num_augs = len(imgs) if num_augs != len(img_metas): raise ValueError(f'num of augmentations ({len(imgs)}) ' f'!= num of image meta ({len(img_metas)})') if num_augs == 1: # proposals (List[List[Tensor]]): the outer list indicates # test-time augs (multiscale, flip, etc.) and the inner list # indicates images in a batch. # The Tensor should have a shape Px4, where P is the number of # proposals. if 'proposals' in kwargs: kwargs['proposals'] = kwargs['proposals'][0] return self.simple_test(imgs[0], img_metas[0], **kwargs) else: assert imgs[0].size(0) == 1, 'aug test does not support ' \ 'inference with batch size ' \ f'{imgs[0].size(0)}' # TODO: support test augmentation for predefined proposals assert 'proposals' not in kwargs return self.aug_test(imgs, img_metas, **kwargs) @auto_fp16(apply_to=('img', )) def forward(self, img, img_metas, return_loss=True, **kwargs): """Calls either :func:`forward_train` or :func:`forward_test` depending on whether ``return_loss`` is ``True``. Note this setting will change the expected inputs. When ``return_loss=True``, img and img_meta are single-nested (i.e. Tensor and List[dict]), and when ``resturn_loss=False``, img and img_meta should be double nested (i.e. List[Tensor], List[List[dict]]), with the outer list indicating test time augmentations. """ if return_loss: return self.forward_train(img, img_metas, **kwargs) else: return self.forward_test(img, img_metas, **kwargs) def _parse_losses(self, losses): """Parse the raw outputs (losses) of the network. Args: losses (dict): Raw output of the network, which usually contain losses and other necessary infomation. Returns: tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor \ which may be a weighted sum of all losses, log_vars contains \ all the variables to be sent to the logger. """ log_vars = OrderedDict() for loss_name, loss_value in losses.items(): if isinstance(loss_value, torch.Tensor): log_vars[loss_name] = loss_value.mean() elif isinstance(loss_value, list): log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) else: raise TypeError( f'{loss_name} is not a tensor or list of tensors') loss = sum(_value for _key, _value in log_vars.items() if 'loss' in _key) log_vars['loss'] = loss for loss_name, loss_value in log_vars.items(): # reduce loss when distributed training if dist.is_available() and dist.is_initialized(): loss_value = loss_value.data.clone() dist.all_reduce(loss_value.div_(dist.get_world_size())) log_vars[loss_name] = loss_value.item() return loss, log_vars def train_step(self, data, optimizer): """The iteration step during training. This method defines an iteration step during training, except for the back propagation and optimizer updating, which are done in an optimizer hook. Note that in some complicated cases or models, the whole process including back propagation and optimizer updating is also defined in this method, such as GAN. Args: data (dict): The output of dataloader. optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of runner is passed to ``train_step()``. This argument is unused and reserved. Returns: dict: It should contain at least 3 keys: ``loss``, ``log_vars``, \ ``num_samples``. - ``loss`` is a tensor for back propagation, which can be a \ weighted sum of multiple losses. - ``log_vars`` contains all the variables to be sent to the logger. - ``num_samples`` indicates the batch size (when the model is \ DDP, it means the batch size on each GPU), which is used for \ averaging the logs. """ losses = self(**data) loss, log_vars = self._parse_losses(losses) outputs = dict( loss=loss, log_vars=log_vars, num_samples=len(data['img_metas'])) return outputs def val_step(self, data, optimizer): """The iteration step during validation. This method shares the same signature as :func:`train_step`, but used during val epochs. Note that the evaluation after training epochs is not implemented with this method, but an evaluation hook. """ losses = self(**data) loss, log_vars = self._parse_losses(losses) outputs = dict( loss=loss, log_vars=log_vars, num_samples=len(data['img_metas'])) return outputs def show_result(self, img, result, score_thr=0.3, bbox_color='green', text_color='green', thickness=1, font_scale=0.5, win_name='', show=False, wait_time=0, out_file=None): """Draw `result` over `img`. Args: img (str or Tensor): The image to be displayed. result (Tensor or tuple): The results to draw over `img` bbox_result or (bbox_result, segm_result). score_thr (float, optional): Minimum score of bboxes to be shown. Default: 0.3. bbox_color (str or tuple or :obj:`Color`): Color of bbox lines. text_color (str or tuple or :obj:`Color`): Color of texts. thickness (int): Thickness of lines. font_scale (float): Font scales of texts. win_name (str): The window name. wait_time (int): Value of waitKey param. Default: 0. show (bool): Whether to show the image. Default: False. out_file (str or None): The filename to write the image. Default: None. Returns: img (Tensor): Only if not `show` or `out_file` """ img = mmcv.imread(img) img = img.copy() if isinstance(result, tuple): bbox_result, segm_result = result if isinstance(segm_result, tuple): segm_result = segm_result[0] # ms rcnn else: bbox_result, segm_result = result, None bboxes = np.vstack(bbox_result) labels = [ np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result) ] labels = np.concatenate(labels) # draw segmentation masks if segm_result is not None and len(labels) > 0: # non empty segms = mmcv.concat_list(segm_result) inds = np.where(bboxes[:, -1] > score_thr)[0] np.random.seed(42) color_masks = [ np.random.randint(0, 256, (1, 3), dtype=np.uint8) for _ in range(max(labels) + 1) ] for i in inds: i = int(i) color_mask = color_masks[labels[i]] mask = segms[i].astype(bool) img[mask] = img[mask] * 0.5 + color_mask * 0.5 # if out_file specified, do not show image in window if out_file is not None: show = False # draw bounding boxes mmcv.imshow_det_bboxes( img, bboxes, labels, class_names=self.CLASSES, score_thr=score_thr, bbox_color=bbox_color, text_color=text_color, thickness=thickness, font_scale=font_scale, win_name=win_name, show=show, wait_time=wait_time, out_file=out_file) if not (show or out_file): return img
13,261
37.891496
79
py
GFocalV2
GFocalV2-master/mmdet/models/detectors/single_stage.py
import torch import torch.nn as nn from mmdet.core import bbox2result from ..builder import DETECTORS, build_backbone, build_head, build_neck from .base import BaseDetector @DETECTORS.register_module() class SingleStageDetector(BaseDetector): """Base class for single-stage detectors. Single-stage detectors directly and densely predict bounding boxes on the output features of the backbone+neck. """ def __init__(self, backbone, neck=None, bbox_head=None, train_cfg=None, test_cfg=None, pretrained=None): super(SingleStageDetector, self).__init__() self.backbone = build_backbone(backbone) if neck is not None: self.neck = build_neck(neck) bbox_head.update(train_cfg=train_cfg) bbox_head.update(test_cfg=test_cfg) self.bbox_head = build_head(bbox_head) self.train_cfg = train_cfg self.test_cfg = test_cfg self.init_weights(pretrained=pretrained) def init_weights(self, pretrained=None): """Initialize the weights in detector. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ super(SingleStageDetector, self).init_weights(pretrained) self.backbone.init_weights(pretrained=pretrained) if self.with_neck: if isinstance(self.neck, nn.Sequential): for m in self.neck: m.init_weights() else: self.neck.init_weights() self.bbox_head.init_weights() def extract_feat(self, img): """Directly extract features from the backbone+neck.""" x = self.backbone(img) if self.with_neck: x = self.neck(x) return x def forward_dummy(self, img): """Used for computing network flops. See `mmdetection/tools/get_flops.py` """ x = self.extract_feat(img) outs = self.bbox_head(x) return outs def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None): """ Args: img (Tensor): Input images of shape (N, C, H, W). Typically these should be mean centered and std scaled. img_metas (list[dict]): A List of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see :class:`mmdet.datasets.pipelines.Collect`. gt_bboxes (list[Tensor]): Each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): Class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): Specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ x = self.extract_feat(img) losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore) return losses def simple_test(self, img, img_metas, rescale=False): """Test function without test time augmentation. Args: imgs (list[torch.Tensor]): List of multiple images img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[list[np.ndarray]]: BBox results of each image and classes. The outer list corresponds to each image. The inner list corresponds to each class. """ x = self.extract_feat(img) outs = self.bbox_head(x) bbox_list = self.bbox_head.get_bboxes( *outs, img_metas, rescale=rescale) # skip post-processing when exporting to ONNX if torch.onnx.is_in_onnx_export(): return bbox_list bbox_results = [ bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) for det_bboxes, det_labels in bbox_list ] return bbox_results def aug_test(self, imgs, img_metas, rescale=False): """Test function with test time augmentation. Args: imgs (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains all images in the batch. img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[list[np.ndarray]]: BBox results of each image and classes. The outer list corresponds to each image. The inner list corresponds to each class. """ assert hasattr(self.bbox_head, 'aug_test'), \ f'{self.bbox_head.__class__.__name__}' \ ' does not support test-time augmentation' feats = self.extract_feats(imgs) return [self.bbox_head.aug_test(feats, img_metas, rescale=rescale)]
5,658
36.979866
78
py
GFocalV2
GFocalV2-master/mmdet/models/detectors/yolact.py
import torch from mmdet.core import bbox2result from ..builder import DETECTORS, build_head from .single_stage import SingleStageDetector @DETECTORS.register_module() class YOLACT(SingleStageDetector): """Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_""" def __init__(self, backbone, neck, bbox_head, segm_head, mask_head, train_cfg=None, test_cfg=None, pretrained=None): super(YOLACT, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained) self.segm_head = build_head(segm_head) self.mask_head = build_head(mask_head) self.init_segm_mask_weights() def init_segm_mask_weights(self): """Initialize weights of the YOLACT semg head and YOLACT mask head.""" self.segm_head.init_weights() self.mask_head.init_weights() def forward_dummy(self, img): """Used for computing network flops. See `mmdetection/tools/get_flops.py` """ raise NotImplementedError def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None): """ Args: img (Tensor): of shape (N, C, H, W) encoding input images. Typically these should be mean centered and std scaled. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. gt_masks (None | Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. Returns: dict[str, Tensor]: a dictionary of loss components """ # convert Bitmap mask or Polygon Mask to Tensor here gt_masks = [ gt_mask.to_tensor(dtype=torch.uint8, device=img.device) for gt_mask in gt_masks ] x = self.extract_feat(img) cls_score, bbox_pred, coeff_pred = self.bbox_head(x) bbox_head_loss_inputs = (cls_score, bbox_pred) + (gt_bboxes, gt_labels, img_metas) losses, sampling_results = self.bbox_head.loss( *bbox_head_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) segm_head_outs = self.segm_head(x[0]) loss_segm = self.segm_head.loss(segm_head_outs, gt_masks, gt_labels) losses.update(loss_segm) mask_pred = self.mask_head(x[0], coeff_pred, gt_bboxes, img_metas, sampling_results) loss_mask = self.mask_head.loss(mask_pred, gt_masks, gt_bboxes, img_metas, sampling_results) losses.update(loss_mask) # check NaN and Inf for loss_name in losses.keys(): assert torch.isfinite(torch.stack(losses[loss_name]))\ .all().item(), '{} becomes infinite or NaN!'\ .format(loss_name) return losses def simple_test(self, img, img_metas, rescale=False): """Test function without test time augmentation.""" x = self.extract_feat(img) cls_score, bbox_pred, coeff_pred = self.bbox_head(x) bbox_inputs = (cls_score, bbox_pred, coeff_pred) + (img_metas, self.test_cfg, rescale) det_bboxes, det_labels, det_coeffs = self.bbox_head.get_bboxes( *bbox_inputs) bbox_results = [ bbox2result(det_bbox, det_label, self.bbox_head.num_classes) for det_bbox, det_label in zip(det_bboxes, det_labels) ] num_imgs = len(img_metas) scale_factors = tuple(meta['scale_factor'] for meta in img_metas) if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): segm_results = [[[] for _ in range(self.mask_head.num_classes)] for _ in range(num_imgs)] else: # if det_bboxes is rescaled to the original image size, we need to # rescale it back to the testing scale to obtain RoIs. if rescale and not isinstance(scale_factors[0], float): scale_factors = [ torch.from_numpy(scale_factor).to(det_bboxes[0].device) for scale_factor in scale_factors ] _bboxes = [ det_bboxes[i][:, :4] * scale_factors[i] if rescale else det_bboxes[i][:, :4] for i in range(len(det_bboxes)) ] mask_preds = self.mask_head(x[0], det_coeffs, _bboxes, img_metas) # apply mask post-processing to each image individually segm_results = [] for i in range(num_imgs): if det_bboxes[i].shape[0] == 0: segm_results.append( [[] for _ in range(self.mask_head.num_classes)]) else: segm_result = self.mask_head.get_seg_masks( mask_preds[i], det_labels[i], img_metas[i], rescale) segm_results.append(segm_result) return list(zip(bbox_results, segm_results)) def aug_test(self, imgs, img_metas, rescale=False): """Test with augmentations.""" raise NotImplementedError
6,114
40.598639
79
py
GFocalV2
GFocalV2-master/mmdet/models/detectors/rpn.py
import mmcv from mmcv.image import tensor2imgs from mmdet.core import bbox_mapping from ..builder import DETECTORS, build_backbone, build_head, build_neck from .base import BaseDetector @DETECTORS.register_module() class RPN(BaseDetector): """Implementation of Region Proposal Network.""" def __init__(self, backbone, neck, rpn_head, train_cfg, test_cfg, pretrained=None): super(RPN, self).__init__() self.backbone = build_backbone(backbone) self.neck = build_neck(neck) if neck is not None else None rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None rpn_head.update(train_cfg=rpn_train_cfg) rpn_head.update(test_cfg=test_cfg.rpn) self.rpn_head = build_head(rpn_head) self.train_cfg = train_cfg self.test_cfg = test_cfg self.init_weights(pretrained=pretrained) def init_weights(self, pretrained=None): """Initialize the weights in detector. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ super(RPN, self).init_weights(pretrained) self.backbone.init_weights(pretrained=pretrained) if self.with_neck: self.neck.init_weights() self.rpn_head.init_weights() def extract_feat(self, img): """Extract features. Args: img (torch.Tensor): Image tensor with shape (n, c, h ,w). Returns: list[torch.Tensor]: Multi-level features that may have different resolutions. """ x = self.backbone(img) if self.with_neck: x = self.neck(x) return x def forward_dummy(self, img): """Dummy forward function.""" x = self.extract_feat(img) rpn_outs = self.rpn_head(x) return rpn_outs def forward_train(self, img, img_metas, gt_bboxes=None, gt_bboxes_ignore=None): """ Args: img (Tensor): Input images of shape (N, C, H, W). Typically these should be mean centered and std scaled. img_metas (list[dict]): A List of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see :class:`mmdet.datasets.pipelines.Collect`. gt_bboxes (list[Tensor]): Each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_bboxes_ignore (None | list[Tensor]): Specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ if self.train_cfg.rpn.get('debug', False): self.rpn_head.debug_imgs = tensor2imgs(img) x = self.extract_feat(img) losses = self.rpn_head.forward_train(x, img_metas, gt_bboxes, None, gt_bboxes_ignore) return losses def simple_test(self, img, img_metas, rescale=False): """Test function without test time augmentation. Args: imgs (list[torch.Tensor]): List of multiple images img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[np.ndarray]: proposals """ x = self.extract_feat(img) proposal_list = self.rpn_head.simple_test_rpn(x, img_metas) if rescale: for proposals, meta in zip(proposal_list, img_metas): proposals[:, :4] /= proposals.new_tensor(meta['scale_factor']) return [proposal.cpu().numpy() for proposal in proposal_list] def aug_test(self, imgs, img_metas, rescale=False): """Test function with test time augmentation. Args: imgs (list[torch.Tensor]): List of multiple images img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[np.ndarray]: proposals """ proposal_list = self.rpn_head.aug_test_rpn( self.extract_feats(imgs), img_metas) if not rescale: for proposals, img_meta in zip(proposal_list, img_metas[0]): img_shape = img_meta['img_shape'] scale_factor = img_meta['scale_factor'] flip = img_meta['flip'] flip_direction = img_meta['flip_direction'] proposals[:, :4] = bbox_mapping(proposals[:, :4], img_shape, scale_factor, flip, flip_direction) return [proposal.cpu().numpy() for proposal in proposal_list] def show_result(self, data, result, dataset=None, top_k=20): """Show RPN proposals on the image. Although we assume batch size is 1, this method supports arbitrary batch size. """ img_tensor = data['img'][0] img_metas = data['img_metas'][0].data[0] imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg']) assert len(imgs) == len(img_metas) for img, img_meta in zip(imgs, img_metas): h, w, _ = img_meta['img_shape'] img_show = img[:h, :w, :] mmcv.imshow_bboxes(img_show, result, top_k=top_k)
5,804
36.694805
78
py
GFocalV2
GFocalV2-master/mmdet/models/detectors/cornernet.py
import torch from mmdet.core import bbox2result, bbox_mapping_back from ..builder import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module() class CornerNet(SingleStageDetector): """CornerNet. This detector is the implementation of the paper `CornerNet: Detecting Objects as Paired Keypoints <https://arxiv.org/abs/1808.01244>`_ . """ def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None): super(CornerNet, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained) def merge_aug_results(self, aug_results, img_metas): """Merge augmented detection bboxes and score. Args: aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each image. img_metas (list[list[dict]]): Meta information of each image, e.g., image size, scaling factor, etc. Returns: tuple: (bboxes, labels) """ recovered_bboxes, aug_labels = [], [] for bboxes_labels, img_info in zip(aug_results, img_metas): img_shape = img_info[0]['img_shape'] # using shape before padding scale_factor = img_info[0]['scale_factor'] flip = img_info[0]['flip'] bboxes, labels = bboxes_labels bboxes, scores = bboxes[:, :4], bboxes[:, -1:] bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip) recovered_bboxes.append(torch.cat([bboxes, scores], dim=-1)) aug_labels.append(labels) bboxes = torch.cat(recovered_bboxes, dim=0) labels = torch.cat(aug_labels) if bboxes.shape[0] > 0: out_bboxes, out_labels = self.bbox_head._bboxes_nms( bboxes, labels, self.bbox_head.test_cfg) else: out_bboxes, out_labels = bboxes, labels return out_bboxes, out_labels def aug_test(self, imgs, img_metas, rescale=False): """Augment testing of CornerNet. Args: imgs (list[Tensor]): Augmented images. img_metas (list[list[dict]]): Meta information of each image, e.g., image size, scaling factor, etc. rescale (bool): If True, return boxes in original image space. Default: False. Note: ``imgs`` must including flipped image pairs. Returns: list[list[np.ndarray]]: BBox results of each image and classes. The outer list corresponds to each image. The inner list corresponds to each class. """ img_inds = list(range(len(imgs))) assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], ( 'aug test must have flipped image pair') aug_results = [] for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]): img_pair = torch.cat([imgs[ind], imgs[flip_ind]]) x = self.extract_feat(img_pair) outs = self.bbox_head(x) bbox_list = self.bbox_head.get_bboxes( *outs, [img_metas[ind], img_metas[flip_ind]], False, False) aug_results.append(bbox_list[0]) aug_results.append(bbox_list[1]) bboxes, labels = self.merge_aug_results(aug_results, img_metas) bbox_results = bbox2result(bboxes, labels, self.bbox_head.num_classes) return [bbox_results]
3,578
36.28125
79
py
GFocalV2
GFocalV2-master/mmdet/models/necks/rfp.py
import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import constant_init, kaiming_init, xavier_init from ..builder import NECKS, build_backbone from .fpn import FPN class ASPP(nn.Module): """ASPP (Atrous Spatial Pyramid Pooling) This is an implementation of the ASPP module used in DetectoRS (https://arxiv.org/pdf/2006.02334.pdf) Args: in_channels (int): Number of input channels. out_channels (int): Number of channels produced by this module dilations (tuple[int]): Dilations of the four branches. Default: (1, 3, 6, 1) """ def __init__(self, in_channels, out_channels, dilations=(1, 3, 6, 1)): super().__init__() assert dilations[-1] == 1 self.aspp = nn.ModuleList() for dilation in dilations: kernel_size = 3 if dilation > 1 else 1 padding = dilation if dilation > 1 else 0 conv = nn.Conv2d( in_channels, out_channels, kernel_size=kernel_size, stride=1, dilation=dilation, padding=padding, bias=True) self.aspp.append(conv) self.gap = nn.AdaptiveAvgPool2d(1) self.init_weights() def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) def forward(self, x): avg_x = self.gap(x) out = [] for aspp_idx in range(len(self.aspp)): inp = avg_x if (aspp_idx == len(self.aspp) - 1) else x out.append(F.relu_(self.aspp[aspp_idx](inp))) out[-1] = out[-1].expand_as(out[-2]) out = torch.cat(out, dim=1) return out @NECKS.register_module() class RFP(FPN): """RFP (Recursive Feature Pyramid) This is an implementation of RFP in `DetectoRS <https://arxiv.org/pdf/2006.02334.pdf>`_. Different from standard FPN, the input of RFP should be multi level features along with origin input image of backbone. Args: rfp_steps (int): Number of unrolled steps of RFP. rfp_backbone (dict): Configuration of the backbone for RFP. aspp_out_channels (int): Number of output channels of ASPP module. aspp_dilations (tuple[int]): Dilation rates of four branches. Default: (1, 3, 6, 1) """ def __init__(self, rfp_steps, rfp_backbone, aspp_out_channels, aspp_dilations=(1, 3, 6, 1), **kwargs): super().__init__(**kwargs) self.rfp_steps = rfp_steps self.rfp_modules = nn.ModuleList() for rfp_idx in range(1, rfp_steps): rfp_module = build_backbone(rfp_backbone) self.rfp_modules.append(rfp_module) self.rfp_aspp = ASPP(self.out_channels, aspp_out_channels, aspp_dilations) self.rfp_weight = nn.Conv2d( self.out_channels, 1, kernel_size=1, stride=1, padding=0, bias=True) def init_weights(self): # Avoid using super().init_weights(), which may alter the default # initialization of the modules in self.rfp_modules that have missing # keys in the pretrained checkpoint. for convs in [self.lateral_convs, self.fpn_convs]: for m in convs.modules(): if isinstance(m, nn.Conv2d): xavier_init(m, distribution='uniform') for rfp_idx in range(self.rfp_steps - 1): self.rfp_modules[rfp_idx].init_weights( self.rfp_modules[rfp_idx].pretrained) constant_init(self.rfp_weight, 0) def forward(self, inputs): inputs = list(inputs) assert len(inputs) == len(self.in_channels) + 1 # +1 for input image img = inputs.pop(0) # FPN forward x = super().forward(tuple(inputs)) for rfp_idx in range(self.rfp_steps - 1): rfp_feats = [x[0]] + list( self.rfp_aspp(x[i]) for i in range(1, len(x))) x_idx = self.rfp_modules[rfp_idx].rfp_forward(img, rfp_feats) # FPN forward x_idx = super().forward(x_idx) x_new = [] for ft_idx in range(len(x_idx)): add_weight = torch.sigmoid(self.rfp_weight(x_idx[ft_idx])) x_new.append(add_weight * x_idx[ft_idx] + (1 - add_weight) * x[ft_idx]) x = x_new return x
4,592
34.604651
78
py
GFocalV2
GFocalV2-master/mmdet/models/necks/pafpn.py
import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import auto_fp16 from ..builder import NECKS from .fpn import FPN @NECKS.register_module() class PAFPN(FPN): """Path Aggregation Network for Instance Segmentation. This is an implementation of the `PAFPN in Path Aggregation Network <https://arxiv.org/abs/1803.01534>`_. Args: in_channels (List[int]): Number of input channels per scale. out_channels (int): Number of output channels (used at each scale) num_outs (int): Number of output scales. start_level (int): Index of the start input backbone level used to build the feature pyramid. Default: 0. end_level (int): Index of the end input backbone level (exclusive) to build the feature pyramid. Default: -1, which means the last level. add_extra_convs (bool): Whether to add conv layers on top of the original feature maps. Default: False. extra_convs_on_inputs (bool): Whether to apply extra conv on the original feature from the backbone. Default: False. relu_before_extra_convs (bool): Whether to apply relu before the extra conv. Default: False. no_norm_on_lateral (bool): Whether to apply norm on lateral. Default: False. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Config dict for normalization layer. Default: None. act_cfg (str): Config dict for activation layer in ConvModule. Default: None. """ def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=-1, add_extra_convs=False, extra_convs_on_inputs=True, relu_before_extra_convs=False, no_norm_on_lateral=False, conv_cfg=None, norm_cfg=None, act_cfg=None): super(PAFPN, self).__init__(in_channels, out_channels, num_outs, start_level, end_level, add_extra_convs, extra_convs_on_inputs, relu_before_extra_convs, no_norm_on_lateral, conv_cfg, norm_cfg, act_cfg) # add extra bottom up pathway self.downsample_convs = nn.ModuleList() self.pafpn_convs = nn.ModuleList() for i in range(self.start_level + 1, self.backbone_end_level): d_conv = ConvModule( out_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) pafpn_conv = ConvModule( out_channels, out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) self.downsample_convs.append(d_conv) self.pafpn_convs.append(pafpn_conv) @auto_fp16() def forward(self, inputs): """Forward function.""" assert len(inputs) == len(self.in_channels) # build laterals laterals = [ lateral_conv(inputs[i + self.start_level]) for i, lateral_conv in enumerate(self.lateral_convs) ] # build top-down path used_backbone_levels = len(laterals) for i in range(used_backbone_levels - 1, 0, -1): prev_shape = laterals[i - 1].shape[2:] laterals[i - 1] += F.interpolate( laterals[i], size=prev_shape, mode='nearest') # build outputs # part 1: from original levels inter_outs = [ self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) ] # part 2: add bottom-up path for i in range(0, used_backbone_levels - 1): inter_outs[i + 1] += self.downsample_convs[i](inter_outs[i]) outs = [] outs.append(inter_outs[0]) outs.extend([ self.pafpn_convs[i - 1](inter_outs[i]) for i in range(1, used_backbone_levels) ]) # part 3: add extra levels if self.num_outs > len(outs): # use max pool to get more levels on top of outputs # (e.g., Faster R-CNN, Mask R-CNN) if not self.add_extra_convs: for i in range(self.num_outs - used_backbone_levels): outs.append(F.max_pool2d(outs[-1], 1, stride=2)) # add conv layers on top of original feature maps (RetinaNet) else: if self.extra_convs_on_inputs: orig = inputs[self.backbone_end_level - 1] outs.append(self.fpn_convs[used_backbone_levels](orig)) else: outs.append(self.fpn_convs[used_backbone_levels](outs[-1])) for i in range(used_backbone_levels + 1, self.num_outs): if self.relu_before_extra_convs: outs.append(self.fpn_convs[i](F.relu(outs[-1]))) else: outs.append(self.fpn_convs[i](outs[-1])) return tuple(outs)
5,433
38.376812
79
py
GFocalV2
GFocalV2-master/mmdet/models/necks/nasfcos_fpn.py
import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, caffe2_xavier_init from mmcv.ops.merge_cells import ConcatCell from ..builder import NECKS @NECKS.register_module() class NASFCOS_FPN(nn.Module): """FPN structure in NASFPN. Implementation of paper `NAS-FCOS: Fast Neural Architecture Search for Object Detection <https://arxiv.org/abs/1906.04423>`_ Args: in_channels (List[int]): Number of input channels per scale. out_channels (int): Number of output channels (used at each scale) num_outs (int): Number of output scales. start_level (int): Index of the start input backbone level used to build the feature pyramid. Default: 0. end_level (int): Index of the end input backbone level (exclusive) to build the feature pyramid. Default: -1, which means the last level. add_extra_convs (bool): It decides whether to add conv layers on top of the original feature maps. Default to False. If True, its actual mode is specified by `extra_convs_on_inputs`. conv_cfg (dict): dictionary to construct and config conv layer. norm_cfg (dict): dictionary to construct and config norm layer. """ def __init__(self, in_channels, out_channels, num_outs, start_level=1, end_level=-1, add_extra_convs=False, conv_cfg=None, norm_cfg=None): super(NASFCOS_FPN, self).__init__() assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.norm_cfg = norm_cfg self.conv_cfg = conv_cfg if end_level == -1: self.backbone_end_level = self.num_ins assert num_outs >= self.num_ins - start_level else: self.backbone_end_level = end_level assert end_level <= len(in_channels) assert num_outs == end_level - start_level self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs self.adapt_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): adapt_conv = ConvModule( in_channels[i], out_channels, 1, stride=1, padding=0, bias=False, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU', inplace=False)) self.adapt_convs.append(adapt_conv) # C2 is omitted according to the paper extra_levels = num_outs - self.backbone_end_level + self.start_level def build_concat_cell(with_input1_conv, with_input2_conv): cell_conv_cfg = dict( kernel_size=1, padding=0, bias=False, groups=out_channels) return ConcatCell( in_channels=out_channels, out_channels=out_channels, with_out_conv=True, out_conv_cfg=cell_conv_cfg, out_norm_cfg=dict(type='BN'), out_conv_order=('norm', 'act', 'conv'), with_input1_conv=with_input1_conv, with_input2_conv=with_input2_conv, input_conv_cfg=conv_cfg, input_norm_cfg=norm_cfg, upsample_mode='nearest') # Denote c3=f0, c4=f1, c5=f2 for convince self.fpn = nn.ModuleDict() self.fpn['c22_1'] = build_concat_cell(True, True) self.fpn['c22_2'] = build_concat_cell(True, True) self.fpn['c32'] = build_concat_cell(True, False) self.fpn['c02'] = build_concat_cell(True, False) self.fpn['c42'] = build_concat_cell(True, True) self.fpn['c36'] = build_concat_cell(True, True) self.fpn['c61'] = build_concat_cell(True, True) # f9 self.extra_downsamples = nn.ModuleList() for i in range(extra_levels): extra_act_cfg = None if i == 0 \ else dict(type='ReLU', inplace=False) self.extra_downsamples.append( ConvModule( out_channels, out_channels, 3, stride=2, padding=1, act_cfg=extra_act_cfg, order=('act', 'norm', 'conv'))) def forward(self, inputs): """Forward function.""" feats = [ adapt_conv(inputs[i + self.start_level]) for i, adapt_conv in enumerate(self.adapt_convs) ] for (i, module_name) in enumerate(self.fpn): idx_1, idx_2 = int(module_name[1]), int(module_name[2]) res = self.fpn[module_name](feats[idx_1], feats[idx_2]) feats.append(res) ret = [] for (idx, input_idx) in zip([9, 8, 7], [1, 2, 3]): # add P3, P4, P5 feats1, feats2 = feats[idx], feats[5] feats2_resize = F.interpolate( feats2, size=feats1.size()[2:], mode='bilinear', align_corners=False) feats_sum = feats1 + feats2_resize ret.append( F.interpolate( feats_sum, size=inputs[input_idx].size()[2:], mode='bilinear', align_corners=False)) for submodule in self.extra_downsamples: ret.append(submodule(ret[-1])) return tuple(ret) def init_weights(self): """Initialize the weights of module.""" for module in self.fpn.values(): if hasattr(module, 'conv_out'): caffe2_xavier_init(module.out_conv.conv) for modules in [ self.adapt_convs.modules(), self.extra_downsamples.modules() ]: for module in modules: if isinstance(module, nn.Conv2d): caffe2_xavier_init(module)
6,164
37.055556
79
py
GFocalV2
GFocalV2-master/mmdet/models/necks/fpn_carafe.py
import torch.nn as nn from mmcv.cnn import ConvModule, build_upsample_layer, xavier_init from mmcv.ops.carafe import CARAFEPack from ..builder import NECKS @NECKS.register_module() class FPN_CARAFE(nn.Module): """FPN_CARAFE is a more flexible implementation of FPN. It allows more choice for upsample methods during the top-down pathway. It can reproduce the preformance of ICCV 2019 paper CARAFE: Content-Aware ReAssembly of FEatures Please refer to https://arxiv.org/abs/1905.02188 for more details. Args: in_channels (list[int]): Number of channels for each input feature map. out_channels (int): Output channels of feature pyramids. num_outs (int): Number of output stages. start_level (int): Start level of feature pyramids. (Default: 0) end_level (int): End level of feature pyramids. (Default: -1 indicates the last level). norm_cfg (dict): Dictionary to construct and config norm layer. activate (str): Type of activation function in ConvModule (Default: None indicates w/o activation). order (dict): Order of components in ConvModule. upsample (str): Type of upsample layer. upsample_cfg (dict): Dictionary to construct and config upsample layer. """ def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=-1, norm_cfg=None, act_cfg=None, order=('conv', 'norm', 'act'), upsample_cfg=dict( type='carafe', up_kernel=5, up_group=1, encoder_kernel=3, encoder_dilation=1)): super(FPN_CARAFE, self).__init__() assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.with_bias = norm_cfg is None self.upsample_cfg = upsample_cfg.copy() self.upsample = self.upsample_cfg.get('type') self.relu = nn.ReLU(inplace=False) self.order = order assert order in [('conv', 'norm', 'act'), ('act', 'conv', 'norm')] assert self.upsample in [ 'nearest', 'bilinear', 'deconv', 'pixel_shuffle', 'carafe', None ] if self.upsample in ['deconv', 'pixel_shuffle']: assert hasattr( self.upsample_cfg, 'upsample_kernel') and self.upsample_cfg.upsample_kernel > 0 self.upsample_kernel = self.upsample_cfg.pop('upsample_kernel') if end_level == -1: self.backbone_end_level = self.num_ins assert num_outs >= self.num_ins - start_level else: # if end_level < inputs, no extra level is allowed self.backbone_end_level = end_level assert end_level <= len(in_channels) assert num_outs == end_level - start_level self.start_level = start_level self.end_level = end_level self.lateral_convs = nn.ModuleList() self.fpn_convs = nn.ModuleList() self.upsample_modules = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): l_conv = ConvModule( in_channels[i], out_channels, 1, norm_cfg=norm_cfg, bias=self.with_bias, act_cfg=act_cfg, inplace=False, order=self.order) fpn_conv = ConvModule( out_channels, out_channels, 3, padding=1, norm_cfg=self.norm_cfg, bias=self.with_bias, act_cfg=act_cfg, inplace=False, order=self.order) if i != self.backbone_end_level - 1: upsample_cfg_ = self.upsample_cfg.copy() if self.upsample == 'deconv': upsample_cfg_.update( in_channels=out_channels, out_channels=out_channels, kernel_size=self.upsample_kernel, stride=2, padding=(self.upsample_kernel - 1) // 2, output_padding=(self.upsample_kernel - 1) // 2) elif self.upsample == 'pixel_shuffle': upsample_cfg_.update( in_channels=out_channels, out_channels=out_channels, scale_factor=2, upsample_kernel=self.upsample_kernel) elif self.upsample == 'carafe': upsample_cfg_.update(channels=out_channels, scale_factor=2) else: # suppress warnings align_corners = (None if self.upsample == 'nearest' else False) upsample_cfg_.update( scale_factor=2, mode=self.upsample, align_corners=align_corners) upsample_module = build_upsample_layer(upsample_cfg_) self.upsample_modules.append(upsample_module) self.lateral_convs.append(l_conv) self.fpn_convs.append(fpn_conv) # add extra conv layers (e.g., RetinaNet) extra_out_levels = ( num_outs - self.backbone_end_level + self.start_level) if extra_out_levels >= 1: for i in range(extra_out_levels): in_channels = ( self.in_channels[self.backbone_end_level - 1] if i == 0 else out_channels) extra_l_conv = ConvModule( in_channels, out_channels, 3, stride=2, padding=1, norm_cfg=norm_cfg, bias=self.with_bias, act_cfg=act_cfg, inplace=False, order=self.order) if self.upsample == 'deconv': upsampler_cfg_ = dict( in_channels=out_channels, out_channels=out_channels, kernel_size=self.upsample_kernel, stride=2, padding=(self.upsample_kernel - 1) // 2, output_padding=(self.upsample_kernel - 1) // 2) elif self.upsample == 'pixel_shuffle': upsampler_cfg_ = dict( in_channels=out_channels, out_channels=out_channels, scale_factor=2, upsample_kernel=self.upsample_kernel) elif self.upsample == 'carafe': upsampler_cfg_ = dict( channels=out_channels, scale_factor=2, **self.upsample_cfg) else: # suppress warnings align_corners = (None if self.upsample == 'nearest' else False) upsampler_cfg_ = dict( scale_factor=2, mode=self.upsample, align_corners=align_corners) upsampler_cfg_['type'] = self.upsample upsample_module = build_upsample_layer(upsampler_cfg_) extra_fpn_conv = ConvModule( out_channels, out_channels, 3, padding=1, norm_cfg=self.norm_cfg, bias=self.with_bias, act_cfg=act_cfg, inplace=False, order=self.order) self.upsample_modules.append(upsample_module) self.fpn_convs.append(extra_fpn_conv) self.lateral_convs.append(extra_l_conv) # default init_weights for conv(msra) and norm in ConvModule def init_weights(self): """Initialize the weights of module.""" for m in self.modules(): if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): xavier_init(m, distribution='uniform') for m in self.modules(): if isinstance(m, CARAFEPack): m.init_weights() def slice_as(self, src, dst): """Slice ``src`` as ``dst`` Note: ``src`` should have the same or larger size than ``dst``. Args: src (torch.Tensor): Tensors to be sliced. dst (torch.Tensor): ``src`` will be sliced to have the same size as ``dst``. Returns: torch.Tensor: Sliced tensor. """ assert (src.size(2) >= dst.size(2)) and (src.size(3) >= dst.size(3)) if src.size(2) == dst.size(2) and src.size(3) == dst.size(3): return src else: return src[:, :, :dst.size(2), :dst.size(3)] def tensor_add(self, a, b): """Add tensors ``a`` and ``b`` that might have different sizes.""" if a.size() == b.size(): c = a + b else: c = a + self.slice_as(b, a) return c def forward(self, inputs): """Forward function.""" assert len(inputs) == len(self.in_channels) # build laterals laterals = [] for i, lateral_conv in enumerate(self.lateral_convs): if i <= self.backbone_end_level - self.start_level: input = inputs[min(i + self.start_level, len(inputs) - 1)] else: input = laterals[-1] lateral = lateral_conv(input) laterals.append(lateral) # build top-down path for i in range(len(laterals) - 1, 0, -1): if self.upsample is not None: upsample_feat = self.upsample_modules[i - 1](laterals[i]) else: upsample_feat = laterals[i] laterals[i - 1] = self.tensor_add(laterals[i - 1], upsample_feat) # build outputs num_conv_outs = len(self.fpn_convs) outs = [] for i in range(num_conv_outs): out = self.fpn_convs[i](laterals[i]) outs.append(out) return tuple(outs)
10,671
38.820896
79
py
GFocalV2
GFocalV2-master/mmdet/models/necks/fpn.py
import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, xavier_init from mmcv.runner import auto_fp16 from ..builder import NECKS @NECKS.register_module() class FPN(nn.Module): r"""Feature Pyramid Network. This is an implementation of paper `Feature Pyramid Networks for Object Detection <https://arxiv.org/abs/1612.03144>`_. Args: in_channels (List[int]): Number of input channels per scale. out_channels (int): Number of output channels (used at each scale) num_outs (int): Number of output scales. start_level (int): Index of the start input backbone level used to build the feature pyramid. Default: 0. end_level (int): Index of the end input backbone level (exclusive) to build the feature pyramid. Default: -1, which means the last level. add_extra_convs (bool | str): If bool, it decides whether to add conv layers on top of the original feature maps. Default to False. If True, its actual mode is specified by `extra_convs_on_inputs`. If str, it specifies the source feature map of the extra convs. Only the following options are allowed - 'on_input': Last feat map of neck inputs (i.e. backbone feature). - 'on_lateral': Last feature map after lateral convs. - 'on_output': The last output feature map after fpn convs. extra_convs_on_inputs (bool, deprecated): Whether to apply extra convs on the original feature from the backbone. If True, it is equivalent to `add_extra_convs='on_input'`. If False, it is equivalent to set `add_extra_convs='on_output'`. Default to True. relu_before_extra_convs (bool): Whether to apply relu before the extra conv. Default: False. no_norm_on_lateral (bool): Whether to apply norm on lateral. Default: False. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Config dict for normalization layer. Default: None. act_cfg (str): Config dict for activation layer in ConvModule. Default: None. upsample_cfg (dict): Config dict for interpolate layer. Default: `dict(mode='nearest')` Example: >>> import torch >>> in_channels = [2, 3, 5, 7] >>> scales = [340, 170, 84, 43] >>> inputs = [torch.rand(1, c, s, s) ... for c, s in zip(in_channels, scales)] >>> self = FPN(in_channels, 11, len(in_channels)).eval() >>> outputs = self.forward(inputs) >>> for i in range(len(outputs)): ... print(f'outputs[{i}].shape = {outputs[i].shape}') outputs[0].shape = torch.Size([1, 11, 340, 340]) outputs[1].shape = torch.Size([1, 11, 170, 170]) outputs[2].shape = torch.Size([1, 11, 84, 84]) outputs[3].shape = torch.Size([1, 11, 43, 43]) """ def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=-1, add_extra_convs=False, extra_convs_on_inputs=True, relu_before_extra_convs=False, no_norm_on_lateral=False, conv_cfg=None, norm_cfg=None, act_cfg=None, upsample_cfg=dict(mode='nearest')): super(FPN, self).__init__() assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.relu_before_extra_convs = relu_before_extra_convs self.no_norm_on_lateral = no_norm_on_lateral self.fp16_enabled = False self.upsample_cfg = upsample_cfg.copy() if end_level == -1: self.backbone_end_level = self.num_ins assert num_outs >= self.num_ins - start_level else: # if end_level < inputs, no extra level is allowed self.backbone_end_level = end_level assert end_level <= len(in_channels) assert num_outs == end_level - start_level self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs assert isinstance(add_extra_convs, (str, bool)) if isinstance(add_extra_convs, str): # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output' assert add_extra_convs in ('on_input', 'on_lateral', 'on_output') elif add_extra_convs: # True if extra_convs_on_inputs: # For compatibility with previous release # TODO: deprecate `extra_convs_on_inputs` self.add_extra_convs = 'on_input' else: self.add_extra_convs = 'on_output' self.lateral_convs = nn.ModuleList() self.fpn_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): l_conv = ConvModule( in_channels[i], out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, act_cfg=act_cfg, inplace=False) fpn_conv = ConvModule( out_channels, out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) self.lateral_convs.append(l_conv) self.fpn_convs.append(fpn_conv) # add extra conv layers (e.g., RetinaNet) extra_levels = num_outs - self.backbone_end_level + self.start_level if self.add_extra_convs and extra_levels >= 1: for i in range(extra_levels): if i == 0 and self.add_extra_convs == 'on_input': in_channels = self.in_channels[self.backbone_end_level - 1] else: in_channels = out_channels extra_fpn_conv = ConvModule( in_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) self.fpn_convs.append(extra_fpn_conv) # default init_weights for conv(msra) and norm in ConvModule def init_weights(self): """Initialize the weights of FPN module.""" for m in self.modules(): if isinstance(m, nn.Conv2d): xavier_init(m, distribution='uniform') @auto_fp16() def forward(self, inputs): """Forward function.""" assert len(inputs) == len(self.in_channels) # build laterals laterals = [ lateral_conv(inputs[i + self.start_level]) for i, lateral_conv in enumerate(self.lateral_convs) ] # build top-down path used_backbone_levels = len(laterals) for i in range(used_backbone_levels - 1, 0, -1): # In some cases, fixing `scale factor` (e.g. 2) is preferred, but # it cannot co-exist with `size` in `F.interpolate`. if 'scale_factor' in self.upsample_cfg: laterals[i - 1] += F.interpolate(laterals[i], **self.upsample_cfg) else: prev_shape = laterals[i - 1].shape[2:] laterals[i - 1] += F.interpolate( laterals[i], size=prev_shape, **self.upsample_cfg) # build outputs # part 1: from original levels outs = [ self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) ] # part 2: add extra levels if self.num_outs > len(outs): # use max pool to get more levels on top of outputs # (e.g., Faster R-CNN, Mask R-CNN) if not self.add_extra_convs: for i in range(self.num_outs - used_backbone_levels): outs.append(F.max_pool2d(outs[-1], 1, stride=2)) # add conv layers on top of original feature maps (RetinaNet) else: if self.add_extra_convs == 'on_input': extra_source = inputs[self.backbone_end_level - 1] elif self.add_extra_convs == 'on_lateral': extra_source = laterals[-1] elif self.add_extra_convs == 'on_output': extra_source = outs[-1] else: raise NotImplementedError outs.append(self.fpn_convs[used_backbone_levels](extra_source)) for i in range(used_backbone_levels + 1, self.num_outs): if self.relu_before_extra_convs: outs.append(self.fpn_convs[i](F.relu(outs[-1]))) else: outs.append(self.fpn_convs[i](outs[-1])) return tuple(outs)
9,282
41.778802
79
py
GFocalV2
GFocalV2-master/mmdet/models/necks/nas_fpn.py
import torch.nn as nn from mmcv.cnn import ConvModule, caffe2_xavier_init from mmcv.ops.merge_cells import GlobalPoolingCell, SumCell from ..builder import NECKS @NECKS.register_module() class NASFPN(nn.Module): """NAS-FPN. Implementation of `NAS-FPN: Learning Scalable Feature Pyramid Architecture for Object Detection <https://arxiv.org/abs/1904.07392>`_ Args: in_channels (List[int]): Number of input channels per scale. out_channels (int): Number of output channels (used at each scale) num_outs (int): Number of output scales. stack_times (int): The number of times the pyramid architecture will be stacked. start_level (int): Index of the start input backbone level used to build the feature pyramid. Default: 0. end_level (int): Index of the end input backbone level (exclusive) to build the feature pyramid. Default: -1, which means the last level. add_extra_convs (bool): It decides whether to add conv layers on top of the original feature maps. Default to False. If True, its actual mode is specified by `extra_convs_on_inputs`. """ def __init__(self, in_channels, out_channels, num_outs, stack_times, start_level=0, end_level=-1, add_extra_convs=False, norm_cfg=None): super(NASFPN, self).__init__() assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) # num of input feature levels self.num_outs = num_outs # num of output feature levels self.stack_times = stack_times self.norm_cfg = norm_cfg if end_level == -1: self.backbone_end_level = self.num_ins assert num_outs >= self.num_ins - start_level else: # if end_level < inputs, no extra level is allowed self.backbone_end_level = end_level assert end_level <= len(in_channels) assert num_outs == end_level - start_level self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs # add lateral connections self.lateral_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): l_conv = ConvModule( in_channels[i], out_channels, 1, norm_cfg=norm_cfg, act_cfg=None) self.lateral_convs.append(l_conv) # add extra downsample layers (stride-2 pooling or conv) extra_levels = num_outs - self.backbone_end_level + self.start_level self.extra_downsamples = nn.ModuleList() for i in range(extra_levels): extra_conv = ConvModule( out_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None) self.extra_downsamples.append( nn.Sequential(extra_conv, nn.MaxPool2d(2, 2))) # add NAS FPN connections self.fpn_stages = nn.ModuleList() for _ in range(self.stack_times): stage = nn.ModuleDict() # gp(p6, p4) -> p4_1 stage['gp_64_4'] = GlobalPoolingCell( in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) # sum(p4_1, p4) -> p4_2 stage['sum_44_4'] = SumCell( in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) # sum(p4_2, p3) -> p3_out stage['sum_43_3'] = SumCell( in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) # sum(p3_out, p4_2) -> p4_out stage['sum_34_4'] = SumCell( in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) # sum(p5, gp(p4_out, p3_out)) -> p5_out stage['gp_43_5'] = GlobalPoolingCell(with_out_conv=False) stage['sum_55_5'] = SumCell( in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) # sum(p7, gp(p5_out, p4_2)) -> p7_out stage['gp_54_7'] = GlobalPoolingCell(with_out_conv=False) stage['sum_77_7'] = SumCell( in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) # gp(p7_out, p5_out) -> p6_out stage['gp_75_6'] = GlobalPoolingCell( in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg) self.fpn_stages.append(stage) def init_weights(self): """Initialize the weights of module.""" for m in self.modules(): if isinstance(m, nn.Conv2d): caffe2_xavier_init(m) def forward(self, inputs): """Forward function.""" # build P3-P5 feats = [ lateral_conv(inputs[i + self.start_level]) for i, lateral_conv in enumerate(self.lateral_convs) ] # build P6-P7 on top of P5 for downsample in self.extra_downsamples: feats.append(downsample(feats[-1])) p3, p4, p5, p6, p7 = feats for stage in self.fpn_stages: # gp(p6, p4) -> p4_1 p4_1 = stage['gp_64_4'](p6, p4, out_size=p4.shape[-2:]) # sum(p4_1, p4) -> p4_2 p4_2 = stage['sum_44_4'](p4_1, p4, out_size=p4.shape[-2:]) # sum(p4_2, p3) -> p3_out p3 = stage['sum_43_3'](p4_2, p3, out_size=p3.shape[-2:]) # sum(p3_out, p4_2) -> p4_out p4 = stage['sum_34_4'](p3, p4_2, out_size=p4.shape[-2:]) # sum(p5, gp(p4_out, p3_out)) -> p5_out p5_tmp = stage['gp_43_5'](p4, p3, out_size=p5.shape[-2:]) p5 = stage['sum_55_5'](p5, p5_tmp, out_size=p5.shape[-2:]) # sum(p7, gp(p5_out, p4_2)) -> p7_out p7_tmp = stage['gp_54_7'](p5, p4_2, out_size=p7.shape[-2:]) p7 = stage['sum_77_7'](p7, p7_tmp, out_size=p7.shape[-2:]) # gp(p7_out, p5_out) -> p6_out p6 = stage['gp_75_6'](p7, p5, out_size=p6.shape[-2:]) return p3, p4, p5, p6, p7
6,539
39.621118
79
py
GFocalV2
GFocalV2-master/mmdet/models/necks/bfp.py
import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, xavier_init from mmcv.cnn.bricks import NonLocal2d from ..builder import NECKS @NECKS.register_module() class BFP(nn.Module): """BFP (Balanced Feature Pyrmamids) BFP takes multi-level features as inputs and gather them into a single one, then refine the gathered feature and scatter the refined results to multi-level features. This module is used in Libra R-CNN (CVPR 2019), see the paper `Libra R-CNN: Towards Balanced Learning for Object Detection <https://arxiv.org/abs/1904.02701>`_ for details. Args: in_channels (int): Number of input channels (feature maps of all levels should have the same channels). num_levels (int): Number of input feature levels. conv_cfg (dict): The config dict for convolution layers. norm_cfg (dict): The config dict for normalization layers. refine_level (int): Index of integration and refine level of BSF in multi-level features from bottom to top. refine_type (str): Type of the refine op, currently support [None, 'conv', 'non_local']. """ def __init__(self, in_channels, num_levels, refine_level=2, refine_type=None, conv_cfg=None, norm_cfg=None): super(BFP, self).__init__() assert refine_type in [None, 'conv', 'non_local'] self.in_channels = in_channels self.num_levels = num_levels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.refine_level = refine_level self.refine_type = refine_type assert 0 <= self.refine_level < self.num_levels if self.refine_type == 'conv': self.refine = ConvModule( self.in_channels, self.in_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) elif self.refine_type == 'non_local': self.refine = NonLocal2d( self.in_channels, reduction=1, use_scale=False, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) def init_weights(self): """Initialize the weights of FPN module.""" for m in self.modules(): if isinstance(m, nn.Conv2d): xavier_init(m, distribution='uniform') def forward(self, inputs): """Forward function.""" assert len(inputs) == self.num_levels # step 1: gather multi-level features by resize and average feats = [] gather_size = inputs[self.refine_level].size()[2:] for i in range(self.num_levels): if i < self.refine_level: gathered = F.adaptive_max_pool2d( inputs[i], output_size=gather_size) else: gathered = F.interpolate( inputs[i], size=gather_size, mode='nearest') feats.append(gathered) bsf = sum(feats) / len(feats) # step 2: refine gathered features if self.refine_type is not None: bsf = self.refine(bsf) # step 3: scatter refined features to multi-levels by a residual path outs = [] for i in range(self.num_levels): out_size = inputs[i].size()[2:] if i < self.refine_level: residual = F.interpolate(bsf, size=out_size, mode='nearest') else: residual = F.adaptive_max_pool2d(bsf, output_size=out_size) outs.append(residual + inputs[i]) return tuple(outs)
3,745
34.67619
79
py
GFocalV2
GFocalV2-master/mmdet/models/necks/yolo_neck.py
# Copyright (c) 2019 Western Digital Corporation or its affiliates. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from ..builder import NECKS class DetectionBlock(nn.Module): """Detection block in YOLO neck. Let out_channels = n, the DetectionBlock contains: Six ConvLayers, 1 Conv2D Layer and 1 YoloLayer. The first 6 ConvLayers are formed the following way: 1x1xn, 3x3x2n, 1x1xn, 3x3x2n, 1x1xn, 3x3x2n. The Conv2D layer is 1x1x255. Some block will have branch after the fifth ConvLayer. The input channel is arbitrary (in_channels) Args: in_channels (int): The number of input channels. out_channels (int): The number of output channels. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). """ def __init__(self, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1)): super(DetectionBlock, self).__init__() double_out_channels = out_channels * 2 # shortcut cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.conv1 = ConvModule(in_channels, out_channels, 1, **cfg) self.conv2 = ConvModule( out_channels, double_out_channels, 3, padding=1, **cfg) self.conv3 = ConvModule(double_out_channels, out_channels, 1, **cfg) self.conv4 = ConvModule( out_channels, double_out_channels, 3, padding=1, **cfg) self.conv5 = ConvModule(double_out_channels, out_channels, 1, **cfg) def forward(self, x): tmp = self.conv1(x) tmp = self.conv2(tmp) tmp = self.conv3(tmp) tmp = self.conv4(tmp) out = self.conv5(tmp) return out @NECKS.register_module() class YOLOV3Neck(nn.Module): """The neck of YOLOV3. It can be treated as a simplified version of FPN. It will take the result from Darknet backbone and do some upsampling and concatenation. It will finally output the detection result. Note: The input feats should be from top to bottom. i.e., from high-lvl to low-lvl But YOLOV3Neck will process them in reversed order. i.e., from bottom (high-lvl) to top (low-lvl) Args: num_scales (int): The number of scales / stages. in_channels (int): The number of input channels. out_channels (int): The number of output channels. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). """ def __init__(self, num_scales, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1)): super(YOLOV3Neck, self).__init__() assert (num_scales == len(in_channels) == len(out_channels)) self.num_scales = num_scales self.in_channels = in_channels self.out_channels = out_channels # shortcut cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) # To support arbitrary scales, the code looks awful, but it works. # Better solution is welcomed. self.detect1 = DetectionBlock(in_channels[0], out_channels[0], **cfg) for i in range(1, self.num_scales): in_c, out_c = self.in_channels[i], self.out_channels[i] self.add_module(f'conv{i}', ConvModule(in_c, out_c, 1, **cfg)) # in_c + out_c : High-lvl feats will be cat with low-lvl feats self.add_module(f'detect{i+1}', DetectionBlock(in_c + out_c, out_c, **cfg)) def forward(self, feats): assert len(feats) == self.num_scales # processed from bottom (high-lvl) to top (low-lvl) outs = [] out = self.detect1(feats[-1]) outs.append(out) for i, x in enumerate(reversed(feats[:-1])): conv = getattr(self, f'conv{i+1}') tmp = conv(out) # Cat with low-lvl feats tmp = F.interpolate(tmp, scale_factor=2) tmp = torch.cat((tmp, x), 1) detect = getattr(self, f'detect{i+2}') out = detect(tmp) outs.append(out) return tuple(outs) def init_weights(self): """Initialize the weights of module.""" # init is done in ConvModule pass
5,089
36.153285
77
py
GFocalV2
GFocalV2-master/mmdet/models/necks/channel_mapper.py
import torch.nn as nn from mmcv.cnn import ConvModule, xavier_init from ..builder import NECKS @NECKS.register_module() class ChannelMapper(nn.Module): r"""Channel Mapper to reduce/increase channels of backbone features. This is used to reduce/increase channels of backbone features. Args: in_channels (List[int]): Number of input channels per scale. out_channels (int): Number of output channels (used at each scale). kernel_size (int, optional): kernel_size for reducing channels (used at each scale). Default: 3. conv_cfg (dict, optional): Config dict for convolution layer. Default: None. norm_cfg (dict, optional): Config dict for normalization layer. Default: None. act_cfg (dict, optional): Config dict for activation layer in ConvModule. Default: dict(type='ReLU'). Example: >>> import torch >>> in_channels = [2, 3, 5, 7] >>> scales = [340, 170, 84, 43] >>> inputs = [torch.rand(1, c, s, s) ... for c, s in zip(in_channels, scales)] >>> self = ChannelMapper(in_channels, 11, 3).eval() >>> outputs = self.forward(inputs) >>> for i in range(len(outputs)): ... print(f'outputs[{i}].shape = {outputs[i].shape}') outputs[0].shape = torch.Size([1, 11, 340, 340]) outputs[1].shape = torch.Size([1, 11, 170, 170]) outputs[2].shape = torch.Size([1, 11, 84, 84]) outputs[3].shape = torch.Size([1, 11, 43, 43]) """ def __init__(self, in_channels, out_channels, kernel_size=3, conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ReLU')): super(ChannelMapper, self).__init__() assert isinstance(in_channels, list) self.convs = nn.ModuleList() for in_channel in in_channels: self.convs.append( ConvModule( in_channel, out_channels, kernel_size, padding=(kernel_size - 1) // 2, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) # default init_weights for conv(msra) and norm in ConvModule def init_weights(self): """Initialize the weights of ChannelMapper module.""" for m in self.modules(): if isinstance(m, nn.Conv2d): xavier_init(m, distribution='uniform') def forward(self, inputs): """Forward function.""" assert len(inputs) == len(self.convs) outs = [self.convs[i](inputs[i]) for i in range(len(inputs))] return tuple(outs)
2,765
35.88
76
py
GFocalV2
GFocalV2-master/mmdet/models/necks/hrfpn.py
import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, caffe2_xavier_init from torch.utils.checkpoint import checkpoint from ..builder import NECKS @NECKS.register_module() class HRFPN(nn.Module): """HRFPN (High Resolution Feature Pyrmamids) paper: `High-Resolution Representations for Labeling Pixels and Regions <https://arxiv.org/abs/1904.04514>`_. Args: in_channels (list): number of channels for each branch. out_channels (int): output channels of feature pyramids. num_outs (int): number of output stages. pooling_type (str): pooling for generating feature pyramids from {MAX, AVG}. conv_cfg (dict): dictionary to construct and config conv layer. norm_cfg (dict): dictionary to construct and config norm layer. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. stride (int): stride of 3x3 convolutional layers """ def __init__(self, in_channels, out_channels, num_outs=5, pooling_type='AVG', conv_cfg=None, norm_cfg=None, with_cp=False, stride=1): super(HRFPN, self).__init__() assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.with_cp = with_cp self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reduction_conv = ConvModule( sum(in_channels), out_channels, kernel_size=1, conv_cfg=self.conv_cfg, act_cfg=None) self.fpn_convs = nn.ModuleList() for i in range(self.num_outs): self.fpn_convs.append( ConvModule( out_channels, out_channels, kernel_size=3, padding=1, stride=stride, conv_cfg=self.conv_cfg, act_cfg=None)) if pooling_type == 'MAX': self.pooling = F.max_pool2d else: self.pooling = F.avg_pool2d def init_weights(self): """Initialize the weights of module.""" for m in self.modules(): if isinstance(m, nn.Conv2d): caffe2_xavier_init(m) def forward(self, inputs): """Forward function.""" assert len(inputs) == self.num_ins outs = [inputs[0]] for i in range(1, self.num_ins): outs.append( F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear')) out = torch.cat(outs, dim=1) if out.requires_grad and self.with_cp: out = checkpoint(self.reduction_conv, out) else: out = self.reduction_conv(out) outs = [out] for i in range(1, self.num_outs): outs.append(self.pooling(out, kernel_size=2**i, stride=2**i)) outputs = [] for i in range(self.num_outs): if outs[i].requires_grad and self.with_cp: tmp_out = checkpoint(self.fpn_convs[i], outs[i]) else: tmp_out = self.fpn_convs[i](outs[i]) outputs.append(tmp_out) return tuple(outputs)
3,481
32.805825
79
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/nasfcos_head.py
import copy import torch.nn as nn from mmcv.cnn import (ConvModule, Scale, bias_init_with_prob, caffe2_xavier_init, normal_init) from mmdet.models.dense_heads.fcos_head import FCOSHead from ..builder import HEADS @HEADS.register_module() class NASFCOSHead(FCOSHead): """Anchor-free head used in `NASFCOS <https://arxiv.org/abs/1906.04423>`_. It is quite similar with FCOS head, except for the searched structure of classification branch and bbox regression branch, where a structure of "dconv3x3, conv3x3, dconv3x3, conv1x1" is utilized instead. """ def _init_layers(self): """Initialize layers of the head.""" dconv3x3_config = dict( type='DCNv2', kernel_size=3, use_bias=True, deform_groups=2, padding=1) conv3x3_config = dict(type='Conv', kernel_size=3, padding=1) conv1x1_config = dict(type='Conv', kernel_size=1) self.arch_config = [ dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config ] self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i, op_ in enumerate(self.arch_config): op = copy.deepcopy(op_) chn = self.in_channels if i == 0 else self.feat_channels assert isinstance(op, dict) use_bias = op.pop('use_bias', False) padding = op.pop('padding', 0) kernel_size = op.pop('kernel_size') module = ConvModule( chn, self.feat_channels, kernel_size, stride=1, padding=padding, norm_cfg=self.norm_cfg, bias=use_bias, conv_cfg=op) self.cls_convs.append(copy.deepcopy(module)) self.reg_convs.append(copy.deepcopy(module)) self.conv_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1) self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) def init_weights(self): """Initialize weights of the head.""" # retinanet_bias_init bias_cls = bias_init_with_prob(0.01) normal_init(self.conv_reg, std=0.01) normal_init(self.conv_centerness, std=0.01) normal_init(self.conv_cls, std=0.01, bias=bias_cls) for branch in [self.cls_convs, self.reg_convs]: for module in branch.modules(): if isinstance(module, ConvModule) \ and isinstance(module.conv, nn.Conv2d): caffe2_xavier_init(module.conv)
2,793
35.763158
78
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/reppoints_head.py
import numpy as np import torch import torch.nn as nn from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init from mmcv.ops import DeformConv2d from mmdet.core import (PointGenerator, build_assigner, build_sampler, images_to_levels, multi_apply, multiclass_nms, unmap) from ..builder import HEADS, build_loss from .anchor_free_head import AnchorFreeHead @HEADS.register_module() class RepPointsHead(AnchorFreeHead): """RepPoint head. Args: point_feat_channels (int): Number of channels of points features. gradient_mul (float): The multiplier to gradients from points refinement and recognition. point_strides (Iterable): points strides. point_base_scale (int): bbox scale for assigning labels. loss_cls (dict): Config of classification loss. loss_bbox_init (dict): Config of initial points loss. loss_bbox_refine (dict): Config of points loss in refinement. use_grid_points (bool): If we use bounding box representation, the reppoints is represented as grid points on the bounding box. center_init (bool): Whether to use center point assignment. transform_method (str): The methods to transform RepPoints to bbox. """ # noqa: W605 def __init__(self, num_classes, in_channels, point_feat_channels=256, num_points=9, gradient_mul=0.1, point_strides=[8, 16, 32, 64, 128], point_base_scale=4, loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_init=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5), loss_bbox_refine=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), use_grid_points=False, center_init=True, transform_method='moment', moment_mul=0.01, **kwargs): self.num_points = num_points self.point_feat_channels = point_feat_channels self.use_grid_points = use_grid_points self.center_init = center_init # we use deform conv to extract points features self.dcn_kernel = int(np.sqrt(num_points)) self.dcn_pad = int((self.dcn_kernel - 1) / 2) assert self.dcn_kernel * self.dcn_kernel == num_points, \ 'The points number should be a square number.' assert self.dcn_kernel % 2 == 1, \ 'The points number should be an odd square number.' dcn_base = np.arange(-self.dcn_pad, self.dcn_pad + 1).astype(np.float64) dcn_base_y = np.repeat(dcn_base, self.dcn_kernel) dcn_base_x = np.tile(dcn_base, self.dcn_kernel) dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape( (-1)) self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1) super().__init__(num_classes, in_channels, loss_cls=loss_cls, **kwargs) self.gradient_mul = gradient_mul self.point_base_scale = point_base_scale self.point_strides = point_strides self.point_generators = [PointGenerator() for _ in self.point_strides] self.sampling = loss_cls['type'] not in ['FocalLoss'] if self.train_cfg: self.init_assigner = build_assigner(self.train_cfg.init.assigner) self.refine_assigner = build_assigner( self.train_cfg.refine.assigner) # use PseudoSampler when sampling is False if self.sampling and hasattr(self.train_cfg, 'sampler'): sampler_cfg = self.train_cfg.sampler else: sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.transform_method = transform_method if self.transform_method == 'moment': self.moment_transfer = nn.Parameter( data=torch.zeros(2), requires_grad=True) self.moment_mul = moment_mul self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) if self.use_sigmoid_cls: self.cls_out_channels = self.num_classes else: self.cls_out_channels = self.num_classes + 1 self.loss_bbox_init = build_loss(loss_bbox_init) self.loss_bbox_refine = build_loss(loss_bbox_refine) def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) pts_out_dim = 4 if self.use_grid_points else 2 * self.num_points self.reppoints_cls_conv = DeformConv2d(self.feat_channels, self.point_feat_channels, self.dcn_kernel, 1, self.dcn_pad) self.reppoints_cls_out = nn.Conv2d(self.point_feat_channels, self.cls_out_channels, 1, 1, 0) self.reppoints_pts_init_conv = nn.Conv2d(self.feat_channels, self.point_feat_channels, 3, 1, 1) self.reppoints_pts_init_out = nn.Conv2d(self.point_feat_channels, pts_out_dim, 1, 1, 0) self.reppoints_pts_refine_conv = DeformConv2d(self.feat_channels, self.point_feat_channels, self.dcn_kernel, 1, self.dcn_pad) self.reppoints_pts_refine_out = nn.Conv2d(self.point_feat_channels, pts_out_dim, 1, 1, 0) def init_weights(self): """Initialize weights of the head.""" for m in self.cls_convs: normal_init(m.conv, std=0.01) for m in self.reg_convs: normal_init(m.conv, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.reppoints_cls_conv, std=0.01) normal_init(self.reppoints_cls_out, std=0.01, bias=bias_cls) normal_init(self.reppoints_pts_init_conv, std=0.01) normal_init(self.reppoints_pts_init_out, std=0.01) normal_init(self.reppoints_pts_refine_conv, std=0.01) normal_init(self.reppoints_pts_refine_out, std=0.01) def points2bbox(self, pts, y_first=True): """Converting the points set into bounding box. :param pts: the input points sets (fields), each points set (fields) is represented as 2n scalar. :param y_first: if y_fisrt=True, the point set is represented as [y1, x1, y2, x2 ... yn, xn], otherwise the point set is represented as [x1, y1, x2, y2 ... xn, yn]. :return: each points set is converting to a bbox [x1, y1, x2, y2]. """ pts_reshape = pts.view(pts.shape[0], -1, 2, *pts.shape[2:]) pts_y = pts_reshape[:, :, 0, ...] if y_first else pts_reshape[:, :, 1, ...] pts_x = pts_reshape[:, :, 1, ...] if y_first else pts_reshape[:, :, 0, ...] if self.transform_method == 'minmax': bbox_left = pts_x.min(dim=1, keepdim=True)[0] bbox_right = pts_x.max(dim=1, keepdim=True)[0] bbox_up = pts_y.min(dim=1, keepdim=True)[0] bbox_bottom = pts_y.max(dim=1, keepdim=True)[0] bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom], dim=1) elif self.transform_method == 'partial_minmax': pts_y = pts_y[:, :4, ...] pts_x = pts_x[:, :4, ...] bbox_left = pts_x.min(dim=1, keepdim=True)[0] bbox_right = pts_x.max(dim=1, keepdim=True)[0] bbox_up = pts_y.min(dim=1, keepdim=True)[0] bbox_bottom = pts_y.max(dim=1, keepdim=True)[0] bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom], dim=1) elif self.transform_method == 'moment': pts_y_mean = pts_y.mean(dim=1, keepdim=True) pts_x_mean = pts_x.mean(dim=1, keepdim=True) pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True) pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True) moment_transfer = (self.moment_transfer * self.moment_mul) + ( self.moment_transfer.detach() * (1 - self.moment_mul)) moment_width_transfer = moment_transfer[0] moment_height_transfer = moment_transfer[1] half_width = pts_x_std * torch.exp(moment_width_transfer) half_height = pts_y_std * torch.exp(moment_height_transfer) bbox = torch.cat([ pts_x_mean - half_width, pts_y_mean - half_height, pts_x_mean + half_width, pts_y_mean + half_height ], dim=1) else: raise NotImplementedError return bbox def gen_grid_from_reg(self, reg, previous_boxes): """Base on the previous bboxes and regression values, we compute the regressed bboxes and generate the grids on the bboxes. :param reg: the regression value to previous bboxes. :param previous_boxes: previous bboxes. :return: generate grids on the regressed bboxes. """ b, _, h, w = reg.shape bxy = (previous_boxes[:, :2, ...] + previous_boxes[:, 2:, ...]) / 2. bwh = (previous_boxes[:, 2:, ...] - previous_boxes[:, :2, ...]).clamp(min=1e-6) grid_topleft = bxy + bwh * reg[:, :2, ...] - 0.5 * bwh * torch.exp( reg[:, 2:, ...]) grid_wh = bwh * torch.exp(reg[:, 2:, ...]) grid_left = grid_topleft[:, [0], ...] grid_top = grid_topleft[:, [1], ...] grid_width = grid_wh[:, [0], ...] grid_height = grid_wh[:, [1], ...] intervel = torch.linspace(0., 1., self.dcn_kernel).view( 1, self.dcn_kernel, 1, 1).type_as(reg) grid_x = grid_left + grid_width * intervel grid_x = grid_x.unsqueeze(1).repeat(1, self.dcn_kernel, 1, 1, 1) grid_x = grid_x.view(b, -1, h, w) grid_y = grid_top + grid_height * intervel grid_y = grid_y.unsqueeze(2).repeat(1, 1, self.dcn_kernel, 1, 1) grid_y = grid_y.view(b, -1, h, w) grid_yx = torch.stack([grid_y, grid_x], dim=2) grid_yx = grid_yx.view(b, -1, h, w) regressed_bbox = torch.cat([ grid_left, grid_top, grid_left + grid_width, grid_top + grid_height ], 1) return grid_yx, regressed_bbox def forward(self, feats): return multi_apply(self.forward_single, feats) def forward_single(self, x): """Forward feature map of a single FPN level.""" dcn_base_offset = self.dcn_base_offset.type_as(x) # If we use center_init, the initial reppoints is from center points. # If we use bounding bbox representation, the initial reppoints is # from regular grid placed on a pre-defined bbox. if self.use_grid_points or not self.center_init: scale = self.point_base_scale / 2 points_init = dcn_base_offset / dcn_base_offset.max() * scale bbox_init = x.new_tensor([-scale, -scale, scale, scale]).view(1, 4, 1, 1) else: points_init = 0 cls_feat = x pts_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: pts_feat = reg_conv(pts_feat) # initialize reppoints pts_out_init = self.reppoints_pts_init_out( self.relu(self.reppoints_pts_init_conv(pts_feat))) if self.use_grid_points: pts_out_init, bbox_out_init = self.gen_grid_from_reg( pts_out_init, bbox_init.detach()) else: pts_out_init = pts_out_init + points_init # refine and classify reppoints pts_out_init_grad_mul = (1 - self.gradient_mul) * pts_out_init.detach( ) + self.gradient_mul * pts_out_init dcn_offset = pts_out_init_grad_mul - dcn_base_offset cls_out = self.reppoints_cls_out( self.relu(self.reppoints_cls_conv(cls_feat, dcn_offset))) pts_out_refine = self.reppoints_pts_refine_out( self.relu(self.reppoints_pts_refine_conv(pts_feat, dcn_offset))) if self.use_grid_points: pts_out_refine, bbox_out_refine = self.gen_grid_from_reg( pts_out_refine, bbox_out_init.detach()) else: pts_out_refine = pts_out_refine + pts_out_init.detach() return cls_out, pts_out_init, pts_out_refine def get_points(self, featmap_sizes, img_metas, device): """Get points according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. img_metas (list[dict]): Image meta info. Returns: tuple: points of each image, valid flags of each image """ num_imgs = len(img_metas) num_levels = len(featmap_sizes) # since feature map sizes of all images are the same, we only compute # points center for one time multi_level_points = [] for i in range(num_levels): points = self.point_generators[i].grid_points( featmap_sizes[i], self.point_strides[i], device) multi_level_points.append(points) points_list = [[point.clone() for point in multi_level_points] for _ in range(num_imgs)] # for each image, we compute valid flags of multi level grids valid_flag_list = [] for img_id, img_meta in enumerate(img_metas): multi_level_flags = [] for i in range(num_levels): point_stride = self.point_strides[i] feat_h, feat_w = featmap_sizes[i] h, w = img_meta['pad_shape'][:2] valid_feat_h = min(int(np.ceil(h / point_stride)), feat_h) valid_feat_w = min(int(np.ceil(w / point_stride)), feat_w) flags = self.point_generators[i].valid_flags( (feat_h, feat_w), (valid_feat_h, valid_feat_w), device) multi_level_flags.append(flags) valid_flag_list.append(multi_level_flags) return points_list, valid_flag_list def centers_to_bboxes(self, point_list): """Get bboxes according to center points. Only used in :class:`MaxIoUAssigner`. """ bbox_list = [] for i_img, point in enumerate(point_list): bbox = [] for i_lvl in range(len(self.point_strides)): scale = self.point_base_scale * self.point_strides[i_lvl] * 0.5 bbox_shift = torch.Tensor([-scale, -scale, scale, scale]).view(1, 4).type_as(point[0]) bbox_center = torch.cat( [point[i_lvl][:, :2], point[i_lvl][:, :2]], dim=1) bbox.append(bbox_center + bbox_shift) bbox_list.append(bbox) return bbox_list def offset_to_pts(self, center_list, pred_list): """Change from point offset to point coordinate.""" pts_list = [] for i_lvl in range(len(self.point_strides)): pts_lvl = [] for i_img in range(len(center_list)): pts_center = center_list[i_img][i_lvl][:, :2].repeat( 1, self.num_points) pts_shift = pred_list[i_lvl][i_img] yx_pts_shift = pts_shift.permute(1, 2, 0).view( -1, 2 * self.num_points) y_pts_shift = yx_pts_shift[..., 0::2] x_pts_shift = yx_pts_shift[..., 1::2] xy_pts_shift = torch.stack([x_pts_shift, y_pts_shift], -1) xy_pts_shift = xy_pts_shift.view(*yx_pts_shift.shape[:-1], -1) pts = xy_pts_shift * self.point_strides[i_lvl] + pts_center pts_lvl.append(pts) pts_lvl = torch.stack(pts_lvl, 0) pts_list.append(pts_lvl) return pts_list def _point_target_single(self, flat_proposals, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, label_channels=1, stage='init', unmap_outputs=True): inside_flags = valid_flags if not inside_flags.any(): return (None, ) * 7 # assign gt and sample proposals proposals = flat_proposals[inside_flags, :] if stage == 'init': assigner = self.init_assigner pos_weight = self.train_cfg.init.pos_weight else: assigner = self.refine_assigner pos_weight = self.train_cfg.refine.pos_weight assign_result = assigner.assign(proposals, gt_bboxes, gt_bboxes_ignore, None if self.sampling else gt_labels) sampling_result = self.sampler.sample(assign_result, proposals, gt_bboxes) num_valid_proposals = proposals.shape[0] bbox_gt = proposals.new_zeros([num_valid_proposals, 4]) pos_proposals = torch.zeros_like(proposals) proposals_weights = proposals.new_zeros([num_valid_proposals, 4]) labels = proposals.new_full((num_valid_proposals, ), self.num_classes, dtype=torch.long) label_weights = proposals.new_zeros( num_valid_proposals, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: pos_gt_bboxes = sampling_result.pos_gt_bboxes bbox_gt[pos_inds, :] = pos_gt_bboxes pos_proposals[pos_inds, :] = proposals[pos_inds, :] proposals_weights[pos_inds, :] = 1.0 if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of proposals if unmap_outputs: num_total_proposals = flat_proposals.size(0) labels = unmap(labels, num_total_proposals, inside_flags) label_weights = unmap(label_weights, num_total_proposals, inside_flags) bbox_gt = unmap(bbox_gt, num_total_proposals, inside_flags) pos_proposals = unmap(pos_proposals, num_total_proposals, inside_flags) proposals_weights = unmap(proposals_weights, num_total_proposals, inside_flags) return (labels, label_weights, bbox_gt, pos_proposals, proposals_weights, pos_inds, neg_inds) def get_targets(self, proposals_list, valid_flag_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, stage='init', label_channels=1, unmap_outputs=True): """Compute corresponding GT box and classification targets for proposals. Args: proposals_list (list[list]): Multi level points/bboxes of each image. valid_flag_list (list[list]): Multi level valid flags of each image. gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be ignored. gt_bboxes_list (list[Tensor]): Ground truth labels of each box. stage (str): `init` or `refine`. Generate target for init stage or refine stage label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: - labels_list (list[Tensor]): Labels of each level. - label_weights_list (list[Tensor]): Label weights of each level. # noqa: E501 - bbox_gt_list (list[Tensor]): Ground truth bbox of each level. - proposal_list (list[Tensor]): Proposals(points/bboxes) of each level. # noqa: E501 - proposal_weights_list (list[Tensor]): Proposal weights of each level. # noqa: E501 - num_total_pos (int): Number of positive samples in all images. # noqa: E501 - num_total_neg (int): Number of negative samples in all images. # noqa: E501 """ assert stage in ['init', 'refine'] num_imgs = len(img_metas) assert len(proposals_list) == len(valid_flag_list) == num_imgs # points number of multi levels num_level_proposals = [points.size(0) for points in proposals_list[0]] # concat all level points and flags to a single tensor for i in range(num_imgs): assert len(proposals_list[i]) == len(valid_flag_list[i]) proposals_list[i] = torch.cat(proposals_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] (all_labels, all_label_weights, all_bbox_gt, all_proposals, all_proposal_weights, pos_inds_list, neg_inds_list) = multi_apply( self._point_target_single, proposals_list, valid_flag_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, stage=stage, label_channels=label_channels, unmap_outputs=unmap_outputs) # no valid points if any([labels is None for labels in all_labels]): return None # sampled points of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) labels_list = images_to_levels(all_labels, num_level_proposals) label_weights_list = images_to_levels(all_label_weights, num_level_proposals) bbox_gt_list = images_to_levels(all_bbox_gt, num_level_proposals) proposals_list = images_to_levels(all_proposals, num_level_proposals) proposal_weights_list = images_to_levels(all_proposal_weights, num_level_proposals) return (labels_list, label_weights_list, bbox_gt_list, proposals_list, proposal_weights_list, num_total_pos, num_total_neg) def loss_single(self, cls_score, pts_pred_init, pts_pred_refine, labels, label_weights, bbox_gt_init, bbox_weights_init, bbox_gt_refine, bbox_weights_refine, stride, num_total_samples_init, num_total_samples_refine): # classification loss labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) cls_score = cls_score.contiguous() loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=num_total_samples_refine) # points loss bbox_gt_init = bbox_gt_init.reshape(-1, 4) bbox_weights_init = bbox_weights_init.reshape(-1, 4) bbox_pred_init = self.points2bbox( pts_pred_init.reshape(-1, 2 * self.num_points), y_first=False) bbox_gt_refine = bbox_gt_refine.reshape(-1, 4) bbox_weights_refine = bbox_weights_refine.reshape(-1, 4) bbox_pred_refine = self.points2bbox( pts_pred_refine.reshape(-1, 2 * self.num_points), y_first=False) normalize_term = self.point_base_scale * stride loss_pts_init = self.loss_bbox_init( bbox_pred_init / normalize_term, bbox_gt_init / normalize_term, bbox_weights_init, avg_factor=num_total_samples_init) loss_pts_refine = self.loss_bbox_refine( bbox_pred_refine / normalize_term, bbox_gt_refine / normalize_term, bbox_weights_refine, avg_factor=num_total_samples_refine) return loss_cls, loss_pts_init, loss_pts_refine def loss(self, cls_scores, pts_preds_init, pts_preds_refine, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == len(self.point_generators) device = cls_scores[0].device label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 # target for initial stage center_list, valid_flag_list = self.get_points(featmap_sizes, img_metas, device) pts_coordinate_preds_init = self.offset_to_pts(center_list, pts_preds_init) if self.train_cfg.init.assigner['type'] == 'PointAssigner': # Assign target for center list candidate_list = center_list else: # transform center list to bbox list and # assign target for bbox list bbox_list = self.centers_to_bboxes(center_list) candidate_list = bbox_list cls_reg_targets_init = self.get_targets( candidate_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, stage='init', label_channels=label_channels) (*_, bbox_gt_list_init, candidate_list_init, bbox_weights_list_init, num_total_pos_init, num_total_neg_init) = cls_reg_targets_init num_total_samples_init = ( num_total_pos_init + num_total_neg_init if self.sampling else num_total_pos_init) # target for refinement stage center_list, valid_flag_list = self.get_points(featmap_sizes, img_metas, device) pts_coordinate_preds_refine = self.offset_to_pts( center_list, pts_preds_refine) bbox_list = [] for i_img, center in enumerate(center_list): bbox = [] for i_lvl in range(len(pts_preds_refine)): bbox_preds_init = self.points2bbox( pts_preds_init[i_lvl].detach()) bbox_shift = bbox_preds_init * self.point_strides[i_lvl] bbox_center = torch.cat( [center[i_lvl][:, :2], center[i_lvl][:, :2]], dim=1) bbox.append(bbox_center + bbox_shift[i_img].permute(1, 2, 0).reshape(-1, 4)) bbox_list.append(bbox) cls_reg_targets_refine = self.get_targets( bbox_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, stage='refine', label_channels=label_channels) (labels_list, label_weights_list, bbox_gt_list_refine, candidate_list_refine, bbox_weights_list_refine, num_total_pos_refine, num_total_neg_refine) = cls_reg_targets_refine num_total_samples_refine = ( num_total_pos_refine + num_total_neg_refine if self.sampling else num_total_pos_refine) # compute loss losses_cls, losses_pts_init, losses_pts_refine = multi_apply( self.loss_single, cls_scores, pts_coordinate_preds_init, pts_coordinate_preds_refine, labels_list, label_weights_list, bbox_gt_list_init, bbox_weights_list_init, bbox_gt_list_refine, bbox_weights_list_refine, self.point_strides, num_total_samples_init=num_total_samples_init, num_total_samples_refine=num_total_samples_refine) loss_dict_all = { 'loss_cls': losses_cls, 'loss_pts_init': losses_pts_init, 'loss_pts_refine': losses_pts_refine } return loss_dict_all def get_bboxes(self, cls_scores, pts_preds_init, pts_preds_refine, img_metas, cfg=None, rescale=False, with_nms=True): assert len(cls_scores) == len(pts_preds_refine) device = cls_scores[0].device bbox_preds_refine = [ self.points2bbox(pts_pred_refine) for pts_pred_refine in pts_preds_refine ] num_levels = len(cls_scores) mlvl_points = [ self.point_generators[i].grid_points(cls_scores[i].size()[-2:], self.point_strides[i], device) for i in range(num_levels) ] result_list = [] for img_id in range(len(img_metas)): cls_score_list = [ cls_scores[i][img_id].detach() for i in range(num_levels) ] bbox_pred_list = [ bbox_preds_refine[i][img_id].detach() for i in range(num_levels) ] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list, mlvl_points, img_shape, scale_factor, cfg, rescale, with_nms) result_list.append(proposals) return result_list def _get_bboxes_single(self, cls_scores, bbox_preds, mlvl_points, img_shape, scale_factor, cfg, rescale=False, with_nms=True): cfg = self.test_cfg if cfg is None else cfg assert len(cls_scores) == len(bbox_preds) == len(mlvl_points) mlvl_bboxes = [] mlvl_scores = [] for i_lvl, (cls_score, bbox_pred, points) in enumerate( zip(cls_scores, bbox_preds, mlvl_points)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1) bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) nms_pre = cfg.get('nms_pre', -1) if nms_pre > 0 and scores.shape[0] > nms_pre: if self.use_sigmoid_cls: max_scores, _ = scores.max(dim=1) else: # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class max_scores, _ = scores[:, :-1].max(dim=1) _, topk_inds = max_scores.topk(nms_pre) points = points[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1) bboxes = bbox_pred * self.point_strides[i_lvl] + bbox_pos_center x1 = bboxes[:, 0].clamp(min=0, max=img_shape[1]) y1 = bboxes[:, 1].clamp(min=0, max=img_shape[0]) x2 = bboxes[:, 2].clamp(min=0, max=img_shape[1]) y2 = bboxes[:, 3].clamp(min=0, max=img_shape[0]) bboxes = torch.stack([x1, y1, x2, y2], dim=-1) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) if self.use_sigmoid_cls: # Add a dummy background class to the backend when using sigmoid # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) if with_nms: det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img) return det_bboxes, det_labels else: return mlvl_bboxes, mlvl_scores
35,305
45.212042
101
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/vfnet_head.py
import numpy as np import torch import torch.nn as nn from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init from mmcv.ops import DeformConv2d from mmcv.runner import force_fp32 from mmdet.core import (bbox2distance, bbox_overlaps, build_anchor_generator, build_assigner, build_sampler, distance2bbox, multi_apply, multiclass_nms, reduce_mean) from ..builder import HEADS, build_loss from .atss_head import ATSSHead from .fcos_head import FCOSHead INF = 1e8 @HEADS.register_module() class VFNetHead(ATSSHead, FCOSHead): """Head of `VarifocalNet (VFNet): An IoU-aware Dense Object Detector.<https://arxiv.org/abs/2008.13367>`_. The VFNet predicts IoU-aware classification scores which mix the object presence confidence and object localization accuracy as the detection score. It is built on the FCOS architecture and uses ATSS for defining positive/negative training examples. The VFNet is trained with Varifocal Loss and empolys star-shaped deformable convolution to extract features for a bbox. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. regress_ranges (tuple[tuple[int, int]]): Regress range of multiple level points. center_sampling (bool): If true, use center sampling. Default: False. center_sample_radius (float): Radius of center sampling. Default: 1.5. sync_num_pos (bool): If true, synchronize the number of positive examples across GPUs. Default: True gradient_mul (float): The multiplier to gradients from bbox refinement and recognition. Default: 0.1. bbox_norm_type (str): The bbox normalization type, 'reg_denom' or 'stride'. Default: reg_denom loss_cls_fl (dict): Config of focal loss. use_vfl (bool): If true, use varifocal loss for training. Default: True. loss_cls (dict): Config of varifocal loss. loss_bbox (dict): Config of localization loss, GIoU Loss. loss_bbox (dict): Config of localization refinement loss, GIoU Loss. norm_cfg (dict): dictionary to construct and config norm layer. Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True). use_atss (bool): If true, use ATSS to define positive/negative examples. Default: True. anchor_generator (dict): Config of anchor generator for ATSS. Example: >>> self = VFNetHead(11, 7) >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] >>> cls_score, bbox_pred, bbox_pred_refine= self.forward(feats) >>> assert len(cls_score) == len(self.scales) """ # noqa: E501 def __init__(self, num_classes, in_channels, regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512), (512, INF)), center_sampling=False, center_sample_radius=1.5, sync_num_pos=True, gradient_mul=0.1, bbox_norm_type='reg_denom', loss_cls_fl=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), use_vfl=True, loss_cls=dict( type='VarifocalLoss', use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.5), loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0), norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), use_atss=True, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, center_offset=0.0, strides=[8, 16, 32, 64, 128]), **kwargs): # dcn base offsets, adapted from reppoints_head.py self.num_dconv_points = 9 self.dcn_kernel = int(np.sqrt(self.num_dconv_points)) self.dcn_pad = int((self.dcn_kernel - 1) / 2) dcn_base = np.arange(-self.dcn_pad, self.dcn_pad + 1).astype(np.float64) dcn_base_y = np.repeat(dcn_base, self.dcn_kernel) dcn_base_x = np.tile(dcn_base, self.dcn_kernel) dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape( (-1)) self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1) super(FCOSHead, self).__init__( num_classes, in_channels, norm_cfg=norm_cfg, **kwargs) self.regress_ranges = regress_ranges self.reg_denoms = [ regress_range[-1] for regress_range in regress_ranges ] self.reg_denoms[-1] = self.reg_denoms[-2] * 2 self.center_sampling = center_sampling self.center_sample_radius = center_sample_radius self.sync_num_pos = sync_num_pos self.bbox_norm_type = bbox_norm_type self.gradient_mul = gradient_mul self.use_vfl = use_vfl if self.use_vfl: self.loss_cls = build_loss(loss_cls) else: self.loss_cls = build_loss(loss_cls_fl) self.loss_bbox = build_loss(loss_bbox) self.loss_bbox_refine = build_loss(loss_bbox_refine) # for getting ATSS targets self.use_atss = use_atss self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) self.anchor_generator = build_anchor_generator(anchor_generator) self.anchor_center_offset = anchor_generator['center_offset'] self.num_anchors = self.anchor_generator.num_base_anchors[0] self.sampling = False if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) def _init_layers(self): """Initialize layers of the head.""" super(FCOSHead, self)._init_cls_convs() super(FCOSHead, self)._init_reg_convs() self.relu = nn.ReLU(inplace=True) self.vfnet_reg_conv = ConvModule( self.feat_channels, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.conv_bias) self.vfnet_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) self.vfnet_reg_refine_dconv = DeformConv2d( self.feat_channels, self.feat_channels, self.dcn_kernel, 1, padding=self.dcn_pad) self.vfnet_reg_refine = nn.Conv2d(self.feat_channels, 4, 3, padding=1) self.scales_refine = nn.ModuleList([Scale(1.0) for _ in self.strides]) self.vfnet_cls_dconv = DeformConv2d( self.feat_channels, self.feat_channels, self.dcn_kernel, 1, padding=self.dcn_pad) self.vfnet_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) def init_weights(self): """Initialize weights of the head.""" for m in self.cls_convs: if isinstance(m.conv, nn.Conv2d): normal_init(m.conv, std=0.01) for m in self.reg_convs: if isinstance(m.conv, nn.Conv2d): normal_init(m.conv, std=0.01) normal_init(self.vfnet_reg_conv.conv, std=0.01) normal_init(self.vfnet_reg, std=0.01) normal_init(self.vfnet_reg_refine_dconv, std=0.01) normal_init(self.vfnet_reg_refine, std=0.01) normal_init(self.vfnet_cls_dconv, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.vfnet_cls, std=0.01, bias=bias_cls) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: cls_scores (list[Tensor]): Box iou-aware scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box offsets for each scale level, each is a 4D-tensor, the channel number is num_points * 4. bbox_preds_refine (list[Tensor]): Refined Box offsets for each scale level, each is a 4D-tensor, the channel number is num_points * 4. """ return multi_apply(self.forward_single, feats, self.scales, self.scales_refine, self.strides, self.reg_denoms) def forward_single(self, x, scale, scale_refine, stride, reg_denom): """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. scale_refine (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the refined bbox prediction. stride (int): The corresponding stride for feature maps, used to normalize the bbox prediction when bbox_norm_type = 'stride'. reg_denom (int): The corresponding regression range for feature maps, only used to normalize the bbox prediction when bbox_norm_type = 'reg_denom'. Returns: tuple: iou-aware cls scores for each box, bbox predictions and refined bbox predictions of input feature maps. """ cls_feat = x reg_feat = x for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) for reg_layer in self.reg_convs: reg_feat = reg_layer(reg_feat) # predict the bbox_pred of different level reg_feat_init = self.vfnet_reg_conv(reg_feat) if self.bbox_norm_type == 'reg_denom': bbox_pred = scale( self.vfnet_reg(reg_feat_init)).float().exp() * reg_denom elif self.bbox_norm_type == 'stride': bbox_pred = scale( self.vfnet_reg(reg_feat_init)).float().exp() * stride else: raise NotImplementedError # compute star deformable convolution offsets dcn_offset = self.star_dcn_offset(bbox_pred, self.gradient_mul, stride) # refine the bbox_pred reg_feat = self.relu(self.vfnet_reg_refine_dconv(reg_feat, dcn_offset)) bbox_pred_refine = scale_refine( self.vfnet_reg_refine(reg_feat)).float().exp() bbox_pred_refine = bbox_pred_refine * bbox_pred.detach() # predict the iou-aware cls score cls_feat = self.relu(self.vfnet_cls_dconv(cls_feat, dcn_offset)) cls_score = self.vfnet_cls(cls_feat) return cls_score, bbox_pred, bbox_pred_refine def star_dcn_offset(self, bbox_pred, gradient_mul, stride): """Compute the star deformable conv offsets. Args: bbox_pred (Tensor): Predicted bbox distance offsets (l, r, t, b). gradient_mul (float): Gradient multiplier. stride (int): The corresponding stride for feature maps, used to project the bbox onto the feature map. Returns: dcn_offsets (Tensor): The offsets for deformable convolution. """ dcn_base_offset = self.dcn_base_offset.type_as(bbox_pred) bbox_pred_grad_mul = (1 - gradient_mul) * bbox_pred.detach() + \ gradient_mul * bbox_pred # map to the feature map scale bbox_pred_grad_mul = bbox_pred_grad_mul / stride N, C, H, W = bbox_pred.size() x1 = bbox_pred_grad_mul[:, 0, :, :] y1 = bbox_pred_grad_mul[:, 1, :, :] x2 = bbox_pred_grad_mul[:, 2, :, :] y2 = bbox_pred_grad_mul[:, 3, :, :] bbox_pred_grad_mul_offset = bbox_pred.new_zeros( N, 2 * self.num_dconv_points, H, W) bbox_pred_grad_mul_offset[:, 0, :, :] = -1.0 * y1 # -y1 bbox_pred_grad_mul_offset[:, 1, :, :] = -1.0 * x1 # -x1 bbox_pred_grad_mul_offset[:, 2, :, :] = -1.0 * y1 # -y1 bbox_pred_grad_mul_offset[:, 4, :, :] = -1.0 * y1 # -y1 bbox_pred_grad_mul_offset[:, 5, :, :] = x2 # x2 bbox_pred_grad_mul_offset[:, 7, :, :] = -1.0 * x1 # -x1 bbox_pred_grad_mul_offset[:, 11, :, :] = x2 # x2 bbox_pred_grad_mul_offset[:, 12, :, :] = y2 # y2 bbox_pred_grad_mul_offset[:, 13, :, :] = -1.0 * x1 # -x1 bbox_pred_grad_mul_offset[:, 14, :, :] = y2 # y2 bbox_pred_grad_mul_offset[:, 16, :, :] = y2 # y2 bbox_pred_grad_mul_offset[:, 17, :, :] = x2 # x2 dcn_offset = bbox_pred_grad_mul_offset - dcn_base_offset return dcn_offset @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'bbox_preds_refine')) def loss(self, cls_scores, bbox_preds, bbox_preds_refine, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute loss of the head. Args: cls_scores (list[Tensor]): Box iou-aware scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box offsets for each scale level, each is a 4D-tensor, the channel number is num_points * 4. bbox_preds_refine (list[Tensor]): Refined Box offsets for each scale level, each is a 4D-tensor, the channel number is num_points * 4. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Default: None. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device) labels, label_weights, bbox_targets, bbox_weights = self.get_targets( cls_scores, all_level_points, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) num_imgs = cls_scores[0].size(0) # flatten cls_scores, bbox_preds and bbox_preds_refine flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels).contiguous() for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4).contiguous() for bbox_pred in bbox_preds ] flatten_bbox_preds_refine = [ bbox_pred_refine.permute(0, 2, 3, 1).reshape(-1, 4).contiguous() for bbox_pred_refine in bbox_preds_refine ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) flatten_bbox_preds_refine = torch.cat(flatten_bbox_preds_refine) flatten_labels = torch.cat(labels) flatten_bbox_targets = torch.cat(bbox_targets) # repeat points to align with bbox_preds flatten_points = torch.cat( [points.repeat(num_imgs, 1) for points in all_level_points]) # FG cat_id: [0, num_classes - 1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = torch.where( ((flatten_labels >= 0) & (flatten_labels < bg_class_ind)) > 0)[0] num_pos = len(pos_inds) pos_bbox_preds = flatten_bbox_preds[pos_inds] pos_bbox_preds_refine = flatten_bbox_preds_refine[pos_inds] pos_labels = flatten_labels[pos_inds] # sync num_pos across all gpus if self.sync_num_pos: num_pos_avg_per_gpu = reduce_mean( pos_inds.new_tensor(num_pos).float()).item() num_pos_avg_per_gpu = max(num_pos_avg_per_gpu, 1.0) else: num_pos_avg_per_gpu = num_pos if num_pos > 0: pos_bbox_targets = flatten_bbox_targets[pos_inds] pos_points = flatten_points[pos_inds] pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds) pos_decoded_target_preds = distance2bbox(pos_points, pos_bbox_targets) iou_targets_ini = bbox_overlaps( pos_decoded_bbox_preds, pos_decoded_target_preds.detach(), is_aligned=True).clamp(min=1e-6) bbox_weights_ini = iou_targets_ini.clone().detach() iou_targets_ini_avg_per_gpu = reduce_mean( bbox_weights_ini.sum()).item() bbox_avg_factor_ini = max(iou_targets_ini_avg_per_gpu, 1.0) loss_bbox = self.loss_bbox( pos_decoded_bbox_preds, pos_decoded_target_preds.detach(), weight=bbox_weights_ini, avg_factor=bbox_avg_factor_ini) pos_decoded_bbox_preds_refine = \ distance2bbox(pos_points, pos_bbox_preds_refine) iou_targets_rf = bbox_overlaps( pos_decoded_bbox_preds_refine, pos_decoded_target_preds.detach(), is_aligned=True).clamp(min=1e-6) bbox_weights_rf = iou_targets_rf.clone().detach() iou_targets_rf_avg_per_gpu = reduce_mean( bbox_weights_rf.sum()).item() bbox_avg_factor_rf = max(iou_targets_rf_avg_per_gpu, 1.0) loss_bbox_refine = self.loss_bbox_refine( pos_decoded_bbox_preds_refine, pos_decoded_target_preds.detach(), weight=bbox_weights_rf, avg_factor=bbox_avg_factor_rf) # build IoU-aware cls_score targets if self.use_vfl: pos_ious = iou_targets_rf.clone().detach() cls_iou_targets = torch.zeros_like(flatten_cls_scores) cls_iou_targets[pos_inds, pos_labels] = pos_ious else: loss_bbox = pos_bbox_preds.sum() * 0 loss_bbox_refine = pos_bbox_preds_refine.sum() * 0 if self.use_vfl: cls_iou_targets = torch.zeros_like(flatten_cls_scores) if self.use_vfl: loss_cls = self.loss_cls( flatten_cls_scores, cls_iou_targets, avg_factor=num_pos_avg_per_gpu) else: loss_cls = self.loss_cls( flatten_cls_scores, flatten_labels, weight=label_weights, avg_factor=num_pos_avg_per_gpu) return dict( loss_cls=loss_cls, loss_bbox=loss_bbox, loss_bbox_rf=loss_bbox_refine) @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'bbox_preds_refine')) def get_bboxes(self, cls_scores, bbox_preds, bbox_preds_refine, img_metas, cfg=None, rescale=None, with_nms=True): """Transform network outputs for a batch into bbox predictions. Args: cls_scores (list[Tensor]): Box iou-aware scores for each scale level with shape (N, num_points * num_classes, H, W). bbox_preds (list[Tensor]): Box offsets for each scale level with shape (N, num_points * 4, H, W). bbox_preds_refine (list[Tensor]): Refined Box offsets for each scale level with shape (N, num_points * 4, H, W). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. Default: None. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before returning boxes. Default: True. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is an (n, 5) tensor, where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. The second item is a (n,) tensor where each item is the predicted class label of the corresponding box. """ assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine) num_levels = len(cls_scores) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device) result_list = [] for img_id in range(len(img_metas)): cls_score_list = [ cls_scores[i][img_id].detach() for i in range(num_levels) ] bbox_pred_list = [ bbox_preds_refine[i][img_id].detach() for i in range(num_levels) ] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] det_bboxes = self._get_bboxes_single(cls_score_list, bbox_pred_list, mlvl_points, img_shape, scale_factor, cfg, rescale, with_nms) result_list.append(det_bboxes) return result_list def _get_bboxes_single(self, cls_scores, bbox_preds, mlvl_points, img_shape, scale_factor, cfg, rescale=False, with_nms=True): """Transform outputs for a single batch item into bbox predictions. Args: cls_scores (list[Tensor]): Box iou-aware scores for a single scale level with shape (num_points * num_classes, H, W). bbox_preds (list[Tensor]): Box offsets for a single scale level with shape (num_points * 4, H, W). mlvl_points (list[Tensor]): Box reference for a single scale level with shape (num_total_points, 4). img_shape (tuple[int]): Shape of the input image, (height, width, 3). scale_factor (ndarray): Scale factor of the image arrange as (w_scale, h_scale, w_scale, h_scale). cfg (mmcv.Config | None): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before returning boxes. Default: True. Returns: tuple(Tensor): det_bboxes (Tensor): BBox predictions in shape (n, 5), where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. det_labels (Tensor): A (n,) tensor where each item is the predicted class label of the corresponding box. """ cfg = self.test_cfg if cfg is None else cfg assert len(cls_scores) == len(bbox_preds) == len(mlvl_points) mlvl_bboxes = [] mlvl_scores = [] for cls_score, bbox_pred, points in zip(cls_scores, bbox_preds, mlvl_points): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] scores = cls_score.permute(1, 2, 0).reshape( -1, self.cls_out_channels).contiguous().sigmoid() bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4).contiguous() nms_pre = cfg.get('nms_pre', -1) if 0 < nms_pre < scores.shape[0]: max_scores, _ = scores.max(dim=1) _, topk_inds = max_scores.topk(nms_pre) points = points[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) if with_nms: det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img) return det_bboxes, det_labels else: return mlvl_bboxes, mlvl_scores def _get_points_single(self, featmap_size, stride, dtype, device, flatten=False): """Get points according to feature map sizes.""" h, w = featmap_size x_range = torch.arange( 0, w * stride, stride, dtype=dtype, device=device) y_range = torch.arange( 0, h * stride, stride, dtype=dtype, device=device) y, x = torch.meshgrid(y_range, x_range) # to be compatible with anchor points in ATSS if self.use_atss: points = torch.stack( (x.reshape(-1), y.reshape(-1)), dim=-1) + \ stride * self.anchor_center_offset else: points = torch.stack( (x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2 return points def get_targets(self, cls_scores, mlvl_points, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore): """A wrapper for computing ATSS and FCOS targets for points in multiple images. Args: cls_scores (list[Tensor]): Box iou-aware scores for each scale level with shape (N, num_points * num_classes, H, W). mlvl_points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each has shape (num_gt, 4). gt_labels (list[Tensor]): Ground truth labels of each box, each has shape (num_gt,). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). Returns: tuple: labels_list (list[Tensor]): Labels of each level. label_weights (Tensor/None): Label weights of all levels. bbox_targets_list (list[Tensor]): Regression targets of each level, (l, t, r, b). bbox_weights (Tensor/None): Bbox weights of all levels. """ if self.use_atss: return self.get_atss_targets(cls_scores, mlvl_points, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore) else: self.norm_on_bbox = False return self.get_fcos_targets(mlvl_points, gt_bboxes, gt_labels) def _get_target_single(self, *args, **kwargs): """Avoid ambiguity in multiple inheritance.""" if self.use_atss: return ATSSHead._get_target_single(self, *args, **kwargs) else: return FCOSHead._get_target_single(self, *args, **kwargs) def get_fcos_targets(self, points, gt_bboxes_list, gt_labels_list): """Compute FCOS regression and classification targets for points in multiple images. Args: points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, each has shape (num_gt, 4). gt_labels_list (list[Tensor]): Ground truth labels of each box, each has shape (num_gt,). Returns: tuple: labels (list[Tensor]): Labels of each level. label_weights: None, to be compatible with ATSS targets. bbox_targets (list[Tensor]): BBox targets of each level. bbox_weights: None, to be compatible with ATSS targets. """ labels, bbox_targets = FCOSHead.get_targets(self, points, gt_bboxes_list, gt_labels_list) label_weights = None bbox_weights = None return labels, label_weights, bbox_targets, bbox_weights def get_atss_targets(self, cls_scores, mlvl_points, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """A wrapper for computing ATSS targets for points in multiple images. Args: cls_scores (list[Tensor]): Box iou-aware scores for each scale level with shape (N, num_points * num_classes, H, W). mlvl_points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each has shape (num_gt, 4). gt_labels (list[Tensor]): Ground truth labels of each box, each has shape (num_gt,). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). Default: None. Returns: tuple: labels_list (list[Tensor]): Labels of each level. label_weights (Tensor): Label weights of all levels. bbox_targets_list (list[Tensor]): Regression targets of each level, (l, t, r, b). bbox_weights (Tensor): Bbox weights of all levels. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.anchor_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = ATSSHead.get_targets( self, anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, unmap_outputs=True) if cls_reg_targets is None: return None (anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets bbox_targets_list = [ bbox_targets.reshape(-1, 4) for bbox_targets in bbox_targets_list ] num_imgs = len(img_metas) # transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format bbox_targets_list = self.transform_bbox_targets( bbox_targets_list, mlvl_points, num_imgs) labels_list = [labels.reshape(-1) for labels in labels_list] label_weights_list = [ label_weights.reshape(-1) for label_weights in label_weights_list ] bbox_weights_list = [ bbox_weights.reshape(-1) for bbox_weights in bbox_weights_list ] label_weights = torch.cat(label_weights_list) bbox_weights = torch.cat(bbox_weights_list) return labels_list, label_weights, bbox_targets_list, bbox_weights def transform_bbox_targets(self, decoded_bboxes, mlvl_points, num_imgs): """Transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format. Args: decoded_bboxes (list[Tensor]): Regression targets of each level, in the form of (x1, y1, x2, y2). mlvl_points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). num_imgs (int): the number of images in a batch. Returns: bbox_targets (list[Tensor]): Regression targets of each level in the form of (l, t, r, b). """ # TODO: Re-implemented in Class PointCoder assert len(decoded_bboxes) == len(mlvl_points) num_levels = len(decoded_bboxes) mlvl_points = [points.repeat(num_imgs, 1) for points in mlvl_points] bbox_targets = [] for i in range(num_levels): bbox_target = bbox2distance(mlvl_points[i], decoded_bboxes[i]) bbox_targets.append(bbox_target) return bbox_targets def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): """Override the method in the parent class to avoid changing para's name.""" pass
35,246
43.503788
79
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/fsaf_head.py
import numpy as np import torch from mmcv.cnn import normal_init from mmcv.runner import force_fp32 from mmdet.core import (anchor_inside_flags, images_to_levels, multi_apply, unmap) from ..builder import HEADS from ..losses.accuracy import accuracy from ..losses.utils import weight_reduce_loss from .retina_head import RetinaHead @HEADS.register_module() class FSAFHead(RetinaHead): """Anchor-free head used in `FSAF <https://arxiv.org/abs/1903.00621>`_. The head contains two subnetworks. The first classifies anchor boxes and the second regresses deltas for the anchors (num_anchors is 1 for anchor- free methods) Args: *args: Same as its base class in :class:`RetinaHead` score_threshold (float, optional): The score_threshold to calculate positive recall. If given, prediction scores lower than this value is counted as incorrect prediction. Default to None. **kwargs: Same as its base class in :class:`RetinaHead` Example: >>> import torch >>> self = FSAFHead(11, 7) >>> x = torch.rand(1, 7, 32, 32) >>> cls_score, bbox_pred = self.forward_single(x) >>> # Each anchor predicts a score for each class except background >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors >>> assert cls_per_anchor == self.num_classes >>> assert box_per_anchor == 4 """ def __init__(self, *args, score_threshold=None, **kwargs): super().__init__(*args, **kwargs) self.score_threshold = score_threshold def forward_single(self, x): """Forward feature map of a single scale level. Args: x (Tensor): Feature map of a single scale level. Returns: tuple (Tensor): cls_score (Tensor): Box scores for each scale level Has shape (N, num_points * num_classes, H, W). bbox_pred (Tensor): Box energies / deltas for each scale level with shape (N, num_points * 4, H, W). """ cls_score, bbox_pred = super().forward_single(x) # relu: TBLR encoder only accepts positive bbox_pred return cls_score, self.relu(bbox_pred) def init_weights(self): """Initialize weights of the head.""" super(FSAFHead, self).init_weights() # The positive bias in self.retina_reg conv is to prevent predicted \ # bbox with 0 area normal_init(self.retina_reg, std=0.01, bias=0.25) def _get_targets_single(self, flat_anchors, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True): """Compute regression and classification targets for anchors in a single image. Most of the codes are the same with the base class :obj: `AnchorHead`, except that it also collects and returns the matched gt index in the image (from 0 to num_gt-1). If the anchor bbox is not matched to any gt, the corresponding value in pos_gt_inds is -1. """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg.allowed_border) if not inside_flags.any(): return (None, ) * 7 # Assign gt and sample anchors anchors = flat_anchors[inside_flags.type(torch.bool), :] assign_result = self.assigner.assign( anchors, gt_bboxes, gt_bboxes_ignore, None if self.sampling else gt_labels) sampling_result = self.sampler.sample(assign_result, anchors, gt_bboxes) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros((num_valid_anchors, label_channels), dtype=torch.float) pos_gt_inds = anchors.new_full((num_valid_anchors, ), -1, dtype=torch.long) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: if not self.reg_decoded_bbox: pos_bbox_targets = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) else: pos_bbox_targets = sampling_result.pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 # The assigned gt_index for each anchor. (0-based) pos_gt_inds[pos_inds] = sampling_result.pos_assigned_gt_inds if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if self.train_cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # shadowed_labels is a tensor composed of tuples # (anchor_inds, class_label) that indicate those anchors lying in the # outer region of a gt or overlapped by another gt with a smaller # area. # # Therefore, only the shadowed labels are ignored for loss calculation. # the key `shadowed_labels` is defined in :obj:`CenterRegionAssigner` shadowed_labels = assign_result.get_extra_property('shadowed_labels') if shadowed_labels is not None and shadowed_labels.numel(): if len(shadowed_labels.shape) == 2: idx_, label_ = shadowed_labels[:, 0], shadowed_labels[:, 1] assert (labels[idx_] != label_).all(), \ 'One label cannot be both positive and ignored' label_weights[idx_, label_] = 0 else: label_weights[shadowed_labels] = 0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) labels = unmap(labels, num_total_anchors, inside_flags) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) pos_gt_inds = unmap( pos_gt_inds, num_total_anchors, inside_flags, fill=-1) return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds, sampling_result, pos_gt_inds) @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute loss of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_points * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_points * 4, H, W). gt_bboxes (list[Tensor]): each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ for i in range(len(bbox_preds)): # loop over fpn level # avoid 0 area of the predicted bbox bbox_preds[i] = bbox_preds[i].clamp(min=1e-4) # TODO: It may directly use the base-class loss function. featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.anchor_generator.num_levels batch_size = len(gt_bboxes) device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg, pos_assigned_gt_inds_list) = cls_reg_targets num_gts = np.array(list(map(len, gt_labels))) num_total_samples = ( num_total_pos + num_total_neg if self.sampling else num_total_pos) # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors and flags to a single tensor concat_anchor_list = [] for i in range(len(anchor_list)): concat_anchor_list.append(torch.cat(anchor_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) losses_cls, losses_bbox = multi_apply( self.loss_single, cls_scores, bbox_preds, all_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_samples=num_total_samples) # `pos_assigned_gt_inds_list` (length: fpn_levels) stores the assigned # gt index of each anchor bbox in each fpn level. cum_num_gts = list(np.cumsum(num_gts)) # length of batch_size for i, assign in enumerate(pos_assigned_gt_inds_list): # loop over fpn levels for j in range(1, batch_size): # loop over batch size # Convert gt indices in each img to those in the batch assign[j][assign[j] >= 0] += int(cum_num_gts[j - 1]) pos_assigned_gt_inds_list[i] = assign.flatten() labels_list[i] = labels_list[i].flatten() num_gts = sum(map(len, gt_labels)) # total number of gt in the batch # The unique label index of each gt in the batch label_sequence = torch.arange(num_gts, device=device) # Collect the average loss of each gt in each level with torch.no_grad(): loss_levels, = multi_apply( self.collect_loss_level_single, losses_cls, losses_bbox, pos_assigned_gt_inds_list, labels_seq=label_sequence) # Shape: (fpn_levels, num_gts). Loss of each gt at each fpn level loss_levels = torch.stack(loss_levels, dim=0) # Locate the best fpn level for loss back-propagation if loss_levels.numel() == 0: # zero gt argmin = loss_levels.new_empty((num_gts, ), dtype=torch.long) else: _, argmin = loss_levels.min(dim=0) # Reweight the loss of each (anchor, label) pair, so that only those # at the best gt level are back-propagated. losses_cls, losses_bbox, pos_inds = multi_apply( self.reweight_loss_single, losses_cls, losses_bbox, pos_assigned_gt_inds_list, labels_list, list(range(len(losses_cls))), min_levels=argmin) num_pos = torch.cat(pos_inds, 0).sum().float() pos_recall = self.calculate_pos_recall(cls_scores, labels_list, pos_inds) if num_pos == 0: # No gt avg_factor = num_pos + float(num_total_neg) else: avg_factor = num_pos for i in range(len(losses_cls)): losses_cls[i] /= avg_factor losses_bbox[i] /= avg_factor return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, num_pos=num_pos / batch_size, pos_recall=pos_recall) def calculate_pos_recall(self, cls_scores, labels_list, pos_inds): """Calculate positive recall with score threshold. Args: cls_scores (list[Tensor]): Classification scores at all fpn levels. Each tensor is in shape (N, num_classes * num_anchors, H, W) labels_list (list[Tensor]): The label that each anchor is assigned to. Shape (N * H * W * num_anchors, ) pos_inds (list[Tensor]): List of bool tensors indicating whether the anchor is assigned to a positive label. Shape (N * H * W * num_anchors, ) Returns: Tensor: A single float number indicating the positive recall. """ with torch.no_grad(): num_class = self.num_classes scores = [ cls.permute(0, 2, 3, 1).reshape(-1, num_class)[pos] for cls, pos in zip(cls_scores, pos_inds) ] labels = [ label.reshape(-1)[pos] for label, pos in zip(labels_list, pos_inds) ] scores = torch.cat(scores, dim=0) labels = torch.cat(labels, dim=0) if self.use_sigmoid_cls: scores = scores.sigmoid() else: scores = scores.softmax(dim=1) return accuracy(scores, labels, thresh=self.score_threshold) def collect_loss_level_single(self, cls_loss, reg_loss, assigned_gt_inds, labels_seq): """Get the average loss in each FPN level w.r.t. each gt label. Args: cls_loss (Tensor): Classification loss of each feature map pixel, shape (num_anchor, num_class) reg_loss (Tensor): Regression loss of each feature map pixel, shape (num_anchor, 4) assigned_gt_inds (Tensor): It indicates which gt the prior is assigned to (0-based, -1: no assignment). shape (num_anchor), labels_seq: The rank of labels. shape (num_gt) Returns: shape: (num_gt), average loss of each gt in this level """ if len(reg_loss.shape) == 2: # iou loss has shape (num_prior, 4) reg_loss = reg_loss.sum(dim=-1) # sum loss in tblr dims if len(cls_loss.shape) == 2: cls_loss = cls_loss.sum(dim=-1) # sum loss in class dims loss = cls_loss + reg_loss assert loss.size(0) == assigned_gt_inds.size(0) # Default loss value is 1e6 for a layer where no anchor is positive # to ensure it will not be chosen to back-propagate gradient losses_ = loss.new_full(labels_seq.shape, 1e6) for i, l in enumerate(labels_seq): match = assigned_gt_inds == l if match.any(): losses_[i] = loss[match].mean() return losses_, def reweight_loss_single(self, cls_loss, reg_loss, assigned_gt_inds, labels, level, min_levels): """Reweight loss values at each level. Reassign loss values at each level by masking those where the pre-calculated loss is too large. Then return the reduced losses. Args: cls_loss (Tensor): Element-wise classification loss. Shape: (num_anchors, num_classes) reg_loss (Tensor): Element-wise regression loss. Shape: (num_anchors, 4) assigned_gt_inds (Tensor): The gt indices that each anchor bbox is assigned to. -1 denotes a negative anchor, otherwise it is the gt index (0-based). Shape: (num_anchors, ), labels (Tensor): Label assigned to anchors. Shape: (num_anchors, ). level (int): The current level index in the pyramid (0-4 for RetinaNet) min_levels (Tensor): The best-matching level for each gt. Shape: (num_gts, ), Returns: tuple: - cls_loss: Reduced corrected classification loss. Scalar. - reg_loss: Reduced corrected regression loss. Scalar. - pos_flags (Tensor): Corrected bool tensor indicating the final postive anchors. Shape: (num_anchors, ). """ loc_weight = torch.ones_like(reg_loss) cls_weight = torch.ones_like(cls_loss) pos_flags = assigned_gt_inds >= 0 # positive pixel flag pos_indices = torch.nonzero(pos_flags, as_tuple=False).flatten() if pos_flags.any(): # pos pixels exist pos_assigned_gt_inds = assigned_gt_inds[pos_flags] zeroing_indices = (min_levels[pos_assigned_gt_inds] != level) neg_indices = pos_indices[zeroing_indices] if neg_indices.numel(): pos_flags[neg_indices] = 0 loc_weight[neg_indices] = 0 # Only the weight corresponding to the label is # zeroed out if not selected zeroing_labels = labels[neg_indices] assert (zeroing_labels >= 0).all() cls_weight[neg_indices, zeroing_labels] = 0 # Weighted loss for both cls and reg loss cls_loss = weight_reduce_loss(cls_loss, cls_weight, reduction='sum') reg_loss = weight_reduce_loss(reg_loss, loc_weight, reduction='sum') return cls_loss, reg_loss, pos_flags
18,614
43.427208
79
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/atss_head.py
import torch import torch.nn as nn from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init from mmcv.runner import force_fp32 from mmdet.core import (anchor_inside_flags, build_assigner, build_sampler, images_to_levels, multi_apply, multiclass_nms, reduce_mean, unmap) from ..builder import HEADS, build_loss from .anchor_head import AnchorHead EPS = 1e-12 @HEADS.register_module() class ATSSHead(AnchorHead): """Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection. ATSS head structure is similar with FCOS, however ATSS use anchor boxes and assign label by Adaptive Training Sample Selection instead max-iou. https://arxiv.org/abs/1912.02424 """ def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), **kwargs): self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg super(ATSSHead, self).__init__(num_classes, in_channels, **kwargs) self.sampling = False if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) # SSD sampling=False so use PseudoSampler sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.loss_centerness = build_loss(loss_centerness) def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.atss_cls = nn.Conv2d( self.feat_channels, self.num_anchors * self.cls_out_channels, 3, padding=1) self.atss_reg = nn.Conv2d( self.feat_channels, self.num_anchors * 4, 3, padding=1) self.atss_centerness = nn.Conv2d( self.feat_channels, self.num_anchors * 1, 3, padding=1) self.scales = nn.ModuleList( [Scale(1.0) for _ in self.anchor_generator.strides]) def init_weights(self): """Initialize weights of the head.""" for m in self.cls_convs: normal_init(m.conv, std=0.01) for m in self.reg_convs: normal_init(m.conv, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.atss_cls, std=0.01, bias=bias_cls) normal_init(self.atss_reg, std=0.01) normal_init(self.atss_centerness, std=0.01) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of classification scores and bbox prediction cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_anchors * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_anchors * 4. """ return multi_apply(self.forward_single, feats, self.scales) def forward_single(self, x, scale): """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. Returns: tuple: cls_score (Tensor): Cls scores for a single scale level the channels number is num_anchors * num_classes. bbox_pred (Tensor): Box energies / deltas for a single scale level, the channels number is num_anchors * 4. centerness (Tensor): Centerness for a single scale level, the channel number is (N, num_anchors * 1, H, W). """ cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.atss_cls(cls_feat) # we just follow atss, not apply exp in bbox_pred bbox_pred = scale(self.atss_reg(reg_feat)).float() centerness = self.atss_centerness(reg_feat) return cls_score, bbox_pred, centerness def loss_single(self, anchors, cls_score, bbox_pred, centerness, labels, label_weights, bbox_targets, num_total_samples): """Compute loss of a single scale level. Args: cls_score (Tensor): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W). bbox_pred (Tensor): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor wight shape (N, num_total_anchors, 4). num_total_samples (int): Number os positive samples that is reduced over all GPUs. Returns: dict[str, Tensor]: A dictionary of loss components. """ anchors = anchors.reshape(-1, 4) cls_score = cls_score.permute(0, 2, 3, 1).reshape( -1, self.cls_out_channels).contiguous() bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) centerness = centerness.permute(0, 2, 3, 1).reshape(-1) bbox_targets = bbox_targets.reshape(-1, 4) labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) # classification loss loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=num_total_samples) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1) if len(pos_inds) > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_centerness = centerness[pos_inds] centerness_targets = self.centerness_target( pos_anchors, pos_bbox_targets) pos_decode_bbox_pred = self.bbox_coder.decode( pos_anchors, pos_bbox_pred) pos_decode_bbox_targets = self.bbox_coder.decode( pos_anchors, pos_bbox_targets) # regression loss loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_decode_bbox_targets, weight=centerness_targets, avg_factor=1.0) # centerness loss loss_centerness = self.loss_centerness( pos_centerness, centerness_targets, avg_factor=num_total_samples) else: loss_bbox = bbox_pred.sum() * 0 loss_centerness = centerness.sum() * 0 centerness_targets = bbox_targets.new_tensor(0.) return loss_cls, loss_bbox, loss_centerness, centerness_targets.sum() @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses')) def loss(self, cls_scores, bbox_preds, centernesses, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) centernesses (list[Tensor]): Centerness for each scale level with shape (N, num_anchors * 1, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.anchor_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) if cls_reg_targets is None: return None (anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_total_samples = reduce_mean( torch.tensor(num_total_pos).cuda()).item() num_total_samples = max(num_total_samples, 1.0) losses_cls, losses_bbox, loss_centerness,\ bbox_avg_factor = multi_apply( self.loss_single, anchor_list, cls_scores, bbox_preds, centernesses, labels_list, label_weights_list, bbox_targets_list, num_total_samples=num_total_samples) bbox_avg_factor = sum(bbox_avg_factor) bbox_avg_factor = reduce_mean(bbox_avg_factor).item() if bbox_avg_factor < EPS: bbox_avg_factor = 1 losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox)) return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_centerness=loss_centerness) def centerness_target(self, anchors, bbox_targets): # only calculate pos centerness targets, otherwise there may be nan gts = self.bbox_coder.decode(anchors, bbox_targets) anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2 anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2 l_ = anchors_cx - gts[:, 0] t_ = anchors_cy - gts[:, 1] r_ = gts[:, 2] - anchors_cx b_ = gts[:, 3] - anchors_cy left_right = torch.stack([l_, r_], dim=1) top_bottom = torch.stack([t_, b_], dim=1) centerness = torch.sqrt( (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])) assert not torch.isnan(centerness).any() return centerness @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses')) def get_bboxes(self, cls_scores, bbox_preds, centernesses, img_metas, cfg=None, rescale=False, with_nms=True): """Transform network output for a batch into bbox predictions. Args: cls_scores (list[Tensor]): Box scores for each scale level with shape (N, num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). centernesses (list[Tensor]): Centerness for each scale level with shape (N, num_anchors * 1, H, W). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. cfg (mmcv.Config | None): Test / postprocessing configuration, if None, test_cfg would be used. Default: None. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is an (n, 5) tensor, where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. The second item is a (n,) tensor where each item is the predicted class label of the corresponding box. """ cfg = self.test_cfg if cfg is None else cfg assert len(cls_scores) == len(bbox_preds) num_levels = len(cls_scores) device = cls_scores[0].device featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] mlvl_anchors = self.anchor_generator.grid_anchors( featmap_sizes, device=device) result_list = [] for img_id in range(len(img_metas)): cls_score_list = [ cls_scores[i][img_id].detach() for i in range(num_levels) ] bbox_pred_list = [ bbox_preds[i][img_id].detach() for i in range(num_levels) ] centerness_pred_list = [ centernesses[i][img_id].detach() for i in range(num_levels) ] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list, centerness_pred_list, mlvl_anchors, img_shape, scale_factor, cfg, rescale, with_nms) result_list.append(proposals) return result_list def _get_bboxes_single(self, cls_scores, bbox_preds, centernesses, mlvl_anchors, img_shape, scale_factor, cfg, rescale=False, with_nms=True): """Transform outputs for a single batch item into labeled boxes. Args: cls_scores (list[Tensor]): Box scores for a single scale level with shape (num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for a single scale level with shape (num_anchors * 4, H, W). centernesses (list[Tensor]): Centerness for a single scale level with shape (num_anchors * 1, H, W). mlvl_anchors (list[Tensor]): Box reference for a single scale level with shape (num_total_anchors, 4). img_shape (tuple[int]): Shape of the input image, (height, width, 3). scale_factor (ndarray): Scale factor of the image arrange as (w_scale, h_scale, w_scale, h_scale). cfg (mmcv.Config | None): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: tuple(Tensor): det_bboxes (Tensor): BBox predictions in shape (n, 5), where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. det_labels (Tensor): A (n,) tensor where each item is the predicted class label of the corresponding box. """ assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors) mlvl_bboxes = [] mlvl_scores = [] mlvl_centerness = [] for cls_score, bbox_pred, centerness, anchors in zip( cls_scores, bbox_preds, centernesses, mlvl_anchors): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] scores = cls_score.permute(1, 2, 0).reshape( -1, self.cls_out_channels).sigmoid() bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) centerness = centerness.permute(1, 2, 0).reshape(-1).sigmoid() nms_pre = cfg.get('nms_pre', -1) if nms_pre > 0 and scores.shape[0] > nms_pre: max_scores, _ = (scores * centerness[:, None]).max(dim=1) _, topk_inds = max_scores.topk(nms_pre) anchors = anchors[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] centerness = centerness[topk_inds] bboxes = self.bbox_coder.decode( anchors, bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_centerness.append(centerness) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) # Add a dummy background class to the backend when using sigmoid # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) mlvl_centerness = torch.cat(mlvl_centerness) if with_nms: det_bboxes, det_labels = multiclass_nms( mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img, score_factors=mlvl_centerness) return det_bboxes, det_labels else: return mlvl_bboxes, mlvl_scores, mlvl_centerness def get_targets(self, anchor_list, valid_flag_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, unmap_outputs=True): """Get targets for ATSS head. This method is almost the same as `AnchorHead.get_targets()`. Besides returning the targets as the parent method does, it also returns the anchors as the first element of the returned tuple. """ num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] num_level_anchors_list = [num_level_anchors] * num_imgs # concat all level anchors and flags to a single tensor for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) anchor_list[i] = torch.cat(anchor_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] (all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( self._get_target_single, anchor_list, valid_flag_list, num_level_anchors_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, unmap_outputs=unmap_outputs) # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels anchors_list = images_to_levels(all_anchors, num_level_anchors) labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors) return (anchors_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) def _get_target_single(self, flat_anchors, valid_flags, num_level_anchors, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True): """Compute regression, classification targets for anchors in a single image. Args: flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_anchors ,4) valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_anchors,). num_level_anchors Tensor): Number of anchors of each scale level. gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). img_meta (dict): Meta info of the image. label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: N is the number of total anchors in the image. labels (Tensor): Labels of all anchors in the image with shape (N,). label_weights (Tensor): Label weights of all anchor in the image with shape (N,). bbox_targets (Tensor): BBox targets of all anchors in the image with shape (N, 4). bbox_weights (Tensor): BBox weights of all anchors in the image with shape (N, 4) pos_inds (Tensor): Indices of postive anchor with shape (num_pos,). neg_inds (Tensor): Indices of negative anchor with shape (num_neg,). """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg.allowed_border) if not inside_flags.any(): return (None, ) * 7 # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] num_level_anchors_inside = self.get_num_level_anchors_inside( num_level_anchors, inside_flags) assign_result = self.assigner.assign(anchors, num_level_anchors_inside, gt_bboxes, gt_bboxes_ignore, gt_labels) sampling_result = self.sampler.sample(assign_result, anchors, gt_bboxes) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: if hasattr(self, 'bbox_coder'): pos_bbox_targets = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) else: # used in VFNetHead pos_bbox_targets = sampling_result.pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class since v2.5.0 labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if self.train_cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) anchors = unmap(anchors, num_total_anchors, inside_flags) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (anchors, labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds) def get_num_level_anchors_inside(self, num_level_anchors, inside_flags): split_inside_flags = torch.split(inside_flags, num_level_anchors) num_level_anchors_inside = [ int(flags.sum()) for flags in split_inside_flags ] return num_level_anchors_inside
28,304
42.479263
79
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/rpn_head.py
import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import normal_init from mmcv.ops import batched_nms from ..builder import HEADS from .anchor_head import AnchorHead from .rpn_test_mixin import RPNTestMixin @HEADS.register_module() class RPNHead(RPNTestMixin, AnchorHead): """RPN head. Args: in_channels (int): Number of channels in the input feature map. """ # noqa: W605 def __init__(self, in_channels, **kwargs): super(RPNHead, self).__init__(1, in_channels, **kwargs) def _init_layers(self): """Initialize layers of the head.""" self.rpn_conv = nn.Conv2d( self.in_channels, self.feat_channels, 3, padding=1) self.rpn_cls = nn.Conv2d(self.feat_channels, self.num_anchors * self.cls_out_channels, 1) self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1) def init_weights(self): """Initialize weights of the head.""" normal_init(self.rpn_conv, std=0.01) normal_init(self.rpn_cls, std=0.01) normal_init(self.rpn_reg, std=0.01) def forward_single(self, x): """Forward feature map of a single scale level.""" x = self.rpn_conv(x) x = F.relu(x, inplace=True) rpn_cls_score = self.rpn_cls(x) rpn_bbox_pred = self.rpn_reg(x) return rpn_cls_score, rpn_bbox_pred def loss(self, cls_scores, bbox_preds, gt_bboxes, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ losses = super(RPNHead, self).loss( cls_scores, bbox_preds, gt_bboxes, None, img_metas, gt_bboxes_ignore=gt_bboxes_ignore) return dict( loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox']) def _get_bboxes_single(self, cls_scores, bbox_preds, mlvl_anchors, img_shape, scale_factor, cfg, rescale=False): """Transform outputs for a single batch item into bbox predictions. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (num_anchors * 4, H, W). mlvl_anchors (list[Tensor]): Box reference for each scale level with shape (num_total_anchors, 4). img_shape (tuple[int]): Shape of the input image, (height, width, 3). scale_factor (ndarray): Scale factor of the image arange as (w_scale, h_scale, w_scale, h_scale). cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Returns: Tensor: Labeled boxes in shape (n, 5), where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. """ cfg = self.test_cfg if cfg is None else cfg # bboxes from different level should be independent during NMS, # level_ids are used as labels for batched NMS to separate them level_ids = [] mlvl_scores = [] mlvl_bbox_preds = [] mlvl_valid_anchors = [] for idx in range(len(cls_scores)): rpn_cls_score = cls_scores[idx] rpn_bbox_pred = bbox_preds[idx] assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] rpn_cls_score = rpn_cls_score.permute(1, 2, 0) if self.use_sigmoid_cls: rpn_cls_score = rpn_cls_score.reshape(-1) scores = rpn_cls_score.sigmoid() else: rpn_cls_score = rpn_cls_score.reshape(-1, 2) # We set FG labels to [0, num_class-1] and BG label to # num_class in RPN head since mmdet v2.5, which is unified to # be consistent with other head since mmdet v2.0. In mmdet v2.0 # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head. scores = rpn_cls_score.softmax(dim=1)[:, 0] rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4) anchors = mlvl_anchors[idx] if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre: # sort is faster than topk # _, topk_inds = scores.topk(cfg.nms_pre) ranked_scores, rank_inds = scores.sort(descending=True) topk_inds = rank_inds[:cfg.nms_pre] scores = ranked_scores[:cfg.nms_pre] rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] anchors = anchors[topk_inds, :] mlvl_scores.append(scores) mlvl_bbox_preds.append(rpn_bbox_pred) mlvl_valid_anchors.append(anchors) level_ids.append( scores.new_full((scores.size(0), ), idx, dtype=torch.long)) scores = torch.cat(mlvl_scores) anchors = torch.cat(mlvl_valid_anchors) rpn_bbox_pred = torch.cat(mlvl_bbox_preds) proposals = self.bbox_coder.decode( anchors, rpn_bbox_pred, max_shape=img_shape) ids = torch.cat(level_ids) if cfg.min_bbox_size > 0: w = proposals[:, 2] - proposals[:, 0] h = proposals[:, 3] - proposals[:, 1] valid_inds = torch.nonzero( (w >= cfg.min_bbox_size) & (h >= cfg.min_bbox_size), as_tuple=False).squeeze() if valid_inds.sum().item() != len(proposals): proposals = proposals[valid_inds, :] scores = scores[valid_inds] ids = ids[valid_inds] # TODO: remove the hard coded nms type nms_cfg = dict(type='nms', iou_threshold=cfg.nms_thr) dets, keep = batched_nms(proposals, scores, ids, nms_cfg) return dets[:cfg.nms_post]
7,095
40.988166
79
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/anchor_head.py
import torch import torch.nn as nn from mmcv.cnn import normal_init from mmcv.runner import force_fp32 from mmdet.core import (anchor_inside_flags, build_anchor_generator, build_assigner, build_bbox_coder, build_sampler, images_to_levels, multi_apply, multiclass_nms, unmap) from ..builder import HEADS, build_loss from .base_dense_head import BaseDenseHead from .dense_test_mixins import BBoxTestMixin @HEADS.register_module() class AnchorHead(BaseDenseHead, BBoxTestMixin): """Anchor-based head (RPN, RetinaNet, SSD, etc.). Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels. Used in child classes. anchor_generator (dict): Config dict for anchor generator bbox_coder (dict): Config of bounding box coder. reg_decoded_bbox (bool): If true, the regression loss would be applied on decoded bounding boxes. Default: False loss_cls (dict): Config of classification loss. loss_bbox (dict): Config of localization loss. train_cfg (dict): Training config of anchor head. test_cfg (dict): Testing config of anchor head. """ # noqa: W605 def __init__(self, num_classes, in_channels, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8, 16, 32], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=(.0, .0, .0, .0), target_stds=(1.0, 1.0, 1.0, 1.0)), reg_decoded_bbox=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), train_cfg=None, test_cfg=None): super(AnchorHead, self).__init__() self.in_channels = in_channels self.num_classes = num_classes self.feat_channels = feat_channels self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) # TODO better way to determine whether sample or not self.sampling = loss_cls['type'] not in [ 'FocalLoss', 'GHMC', 'QualityFocalLoss' ] if self.use_sigmoid_cls: self.cls_out_channels = num_classes else: self.cls_out_channels = num_classes + 1 if self.cls_out_channels <= 0: raise ValueError(f'num_classes={num_classes} is too small') self.reg_decoded_bbox = reg_decoded_bbox self.bbox_coder = build_bbox_coder(bbox_coder) self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) self.train_cfg = train_cfg self.test_cfg = test_cfg if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) # use PseudoSampler when sampling is False if self.sampling and hasattr(self.train_cfg, 'sampler'): sampler_cfg = self.train_cfg.sampler else: sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.fp16_enabled = False self.anchor_generator = build_anchor_generator(anchor_generator) # usually the numbers of anchors for each level are the same # except SSD detectors self.num_anchors = self.anchor_generator.num_base_anchors[0] self._init_layers() def _init_layers(self): """Initialize layers of the head.""" self.conv_cls = nn.Conv2d(self.in_channels, self.num_anchors * self.cls_out_channels, 1) self.conv_reg = nn.Conv2d(self.in_channels, self.num_anchors * 4, 1) def init_weights(self): """Initialize weights of the head.""" normal_init(self.conv_cls, std=0.01) normal_init(self.conv_reg, std=0.01) def forward_single(self, x): """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. Returns: tuple: cls_score (Tensor): Cls scores for a single scale level \ the channels number is num_anchors * num_classes. bbox_pred (Tensor): Box energies / deltas for a single scale \ level, the channels number is num_anchors * 4. """ cls_score = self.conv_cls(x) bbox_pred = self.conv_reg(x) return cls_score, bbox_pred def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of classification scores and bbox prediction. - cls_scores (list[Tensor]): Classification scores for all \ scale levels, each is a 4D-tensor, the channels number \ is num_anchors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for all \ scale levels, each is a 4D-tensor, the channels number \ is num_anchors * 4. """ return multi_apply(self.forward_single, feats) def get_anchors(self, featmap_sizes, img_metas, device='cuda'): """Get anchors according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. img_metas (list[dict]): Image meta info. device (torch.device | str): Device for returned tensors Returns: tuple: anchor_list (list[Tensor]): Anchors of each image. valid_flag_list (list[Tensor]): Valid flags of each image. """ num_imgs = len(img_metas) # since feature map sizes of all images are the same, we only compute # anchors for one time multi_level_anchors = self.anchor_generator.grid_anchors( featmap_sizes, device) anchor_list = [multi_level_anchors for _ in range(num_imgs)] # for each image, we compute valid flags of multi level anchors valid_flag_list = [] for img_id, img_meta in enumerate(img_metas): multi_level_flags = self.anchor_generator.valid_flags( featmap_sizes, img_meta['pad_shape'], device) valid_flag_list.append(multi_level_flags) return anchor_list, valid_flag_list def _get_targets_single(self, flat_anchors, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True): """Compute regression and classification targets for anchors in a single image. Args: flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_anchors ,4) valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_anchors,). gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). img_meta (dict): Meta info of the image. gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). img_meta (dict): Meta info of the image. gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: labels_list (list[Tensor]): Labels of each level label_weights_list (list[Tensor]): Label weights of each level bbox_targets_list (list[Tensor]): BBox targets of each level bbox_weights_list (list[Tensor]): BBox weights of each level num_total_pos (int): Number of positive samples in all images num_total_neg (int): Number of negative samples in all images """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg.allowed_border) if not inside_flags.any(): return (None, ) * 7 # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] assign_result = self.assigner.assign( anchors, gt_bboxes, gt_bboxes_ignore, None if self.sampling else gt_labels) sampling_result = self.sampler.sample(assign_result, anchors, gt_bboxes) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: if not self.reg_decoded_bbox: pos_bbox_targets = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) else: pos_bbox_targets = sampling_result.pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class since v2.5.0 labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if self.train_cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) # fill bg label label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds, sampling_result) def get_targets(self, anchor_list, valid_flag_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, unmap_outputs=True, return_sampling_results=False): """Compute regression and classification targets for anchors in multiple images. Args: anchor_list (list[list[Tensor]]): Multi level anchors of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, 4). valid_flag_list (list[list[Tensor]]): Multi level valid flags of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_anchors, ) gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be ignored. gt_labels_list (list[Tensor]): Ground truth labels of each box. label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: Usually returns a tuple containing learning targets. - labels_list (list[Tensor]): Labels of each level. - label_weights_list (list[Tensor]): Label weights of each \ level. - bbox_targets_list (list[Tensor]): BBox targets of each level. - bbox_weights_list (list[Tensor]): BBox weights of each level. - num_total_pos (int): Number of positive samples in all \ images. - num_total_neg (int): Number of negative samples in all \ images. additional_returns: This function enables user-defined returns from `self._get_targets_single`. These returns are currently refined to properties at each feature map (i.e. having HxW dimension). The results will be concatenated after the end """ num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors to a single tensor concat_anchor_list = [] concat_valid_flag_list = [] for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) concat_anchor_list.append(torch.cat(anchor_list[i])) concat_valid_flag_list.append(torch.cat(valid_flag_list[i])) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] results = multi_apply( self._get_targets_single, concat_anchor_list, concat_valid_flag_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, unmap_outputs=unmap_outputs) (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list, sampling_results_list) = results[:7] rest_results = list(results[7:]) # user-added return values # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors) res = (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) if return_sampling_results: res = res + (sampling_results_list, ) for i, r in enumerate(rest_results): # user-added return values rest_results[i] = images_to_levels(r, num_level_anchors) return res + tuple(rest_results) def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights, bbox_targets, bbox_weights, num_total_samples): """Compute loss of a single scale level. Args: cls_score (Tensor): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W). bbox_pred (Tensor): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor wight shape (N, num_total_anchors, 4). bbox_weights (Tensor): BBox regression loss weights of each anchor with shape (N, num_total_anchors, 4). num_total_samples (int): If sampling, num total samples equal to the number of total anchors; Otherwise, it is the number of positive anchors. Returns: dict[str, Tensor]: A dictionary of loss components. """ # classification loss labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=num_total_samples) # regression loss bbox_targets = bbox_targets.reshape(-1, 4) bbox_weights = bbox_weights.reshape(-1, 4) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) if self.reg_decoded_bbox: anchors = anchors.reshape(-1, 4) bbox_pred = self.bbox_coder.decode(anchors, bbox_pred) loss_bbox = self.loss_bbox( bbox_pred, bbox_targets, bbox_weights, avg_factor=num_total_samples) return loss_cls, loss_bbox @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Default: None Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.anchor_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_total_samples = ( num_total_pos + num_total_neg if self.sampling else num_total_pos) # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors and flags to a single tensor concat_anchor_list = [] for i in range(len(anchor_list)): concat_anchor_list.append(torch.cat(anchor_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) losses_cls, losses_bbox = multi_apply( self.loss_single, cls_scores, bbox_preds, all_anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_samples=num_total_samples) return dict(loss_cls=losses_cls, loss_bbox=losses_bbox) @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def get_bboxes(self, cls_scores, bbox_preds, img_metas, cfg=None, rescale=False, with_nms=True): """Transform network output for a batch into bbox predictions. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. cfg (mmcv.Config | None): Test / postprocessing configuration, if None, test_cfg would be used rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is an (n, 5) tensor, where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. The second item is a (n,) tensor where each item is the predicted class labelof the corresponding box. Example: >>> import mmcv >>> self = AnchorHead( >>> num_classes=9, >>> in_channels=1, >>> anchor_generator=dict( >>> type='AnchorGenerator', >>> scales=[8], >>> ratios=[0.5, 1.0, 2.0], >>> strides=[4,])) >>> img_metas = [{'img_shape': (32, 32, 3), 'scale_factor': 1}] >>> cfg = mmcv.Config(dict( >>> score_thr=0.00, >>> nms=dict(type='nms', iou_thr=1.0), >>> max_per_img=10)) >>> feat = torch.rand(1, 1, 3, 3) >>> cls_score, bbox_pred = self.forward_single(feat) >>> # note the input lists are over different levels, not images >>> cls_scores, bbox_preds = [cls_score], [bbox_pred] >>> result_list = self.get_bboxes(cls_scores, bbox_preds, >>> img_metas, cfg) >>> det_bboxes, det_labels = result_list[0] >>> assert len(result_list) == 1 >>> assert det_bboxes.shape[1] == 5 >>> assert len(det_bboxes) == len(det_labels) == cfg.max_per_img """ assert len(cls_scores) == len(bbox_preds) num_levels = len(cls_scores) device = cls_scores[0].device featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] mlvl_anchors = self.anchor_generator.grid_anchors( featmap_sizes, device=device) result_list = [] for img_id in range(len(img_metas)): cls_score_list = [ cls_scores[i][img_id].detach() for i in range(num_levels) ] bbox_pred_list = [ bbox_preds[i][img_id].detach() for i in range(num_levels) ] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] if with_nms: # some heads don't support with_nms argument proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list, mlvl_anchors, img_shape, scale_factor, cfg, rescale) else: proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list, mlvl_anchors, img_shape, scale_factor, cfg, rescale, with_nms) result_list.append(proposals) return result_list def _get_bboxes_single(self, cls_score_list, bbox_pred_list, mlvl_anchors, img_shape, scale_factor, cfg, rescale=False, with_nms=True): """Transform outputs for a single batch item into bbox predictions. Args: cls_score_list (list[Tensor]): Box scores for a single scale level Has shape (num_anchors * num_classes, H, W). bbox_pred_list (list[Tensor]): Box energies / deltas for a single scale level with shape (num_anchors * 4, H, W). mlvl_anchors (list[Tensor]): Box reference for a single scale level with shape (num_total_anchors, 4). img_shape (tuple[int]): Shape of the input image, (height, width, 3). scale_factor (ndarray): Scale factor of the image arange as (w_scale, h_scale, w_scale, h_scale). cfg (mmcv.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: Tensor: Labeled boxes in shape (n, 5), where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. """ cfg = self.test_cfg if cfg is None else cfg assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors) mlvl_bboxes = [] mlvl_scores = [] for cls_score, bbox_pred, anchors in zip(cls_score_list, bbox_pred_list, mlvl_anchors): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1) bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) nms_pre = cfg.get('nms_pre', -1) if nms_pre > 0 and scores.shape[0] > nms_pre: # Get maximum scores for foreground classes. if self.use_sigmoid_cls: max_scores, _ = scores.max(dim=1) else: # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class max_scores, _ = scores[:, :-1].max(dim=1) _, topk_inds = max_scores.topk(nms_pre) anchors = anchors[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] bboxes = self.bbox_coder.decode( anchors, bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) if self.use_sigmoid_cls: # Add a dummy background class to the backend when using sigmoid # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) if with_nms: det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img) return det_bboxes, det_labels else: return mlvl_bboxes, mlvl_scores def aug_test(self, feats, img_metas, rescale=False): """Test function with test time augmentation. Args: feats (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains features for all images in the batch. img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[ndarray]: bbox results of each class """ return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
31,107
44.612903
79
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/retina_head.py
import torch.nn as nn from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init from ..builder import HEADS from .anchor_head import AnchorHead @HEADS.register_module() class RetinaHead(AnchorHead): r"""An anchor-based head used in `RetinaNet <https://arxiv.org/pdf/1708.02002.pdf>`_. The head contains two subnetworks. The first classifies anchor boxes and the second regresses deltas for the anchors. Example: >>> import torch >>> self = RetinaHead(11, 7) >>> x = torch.rand(1, 7, 32, 32) >>> cls_score, bbox_pred = self.forward_single(x) >>> # Each anchor predicts a score for each class except background >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors >>> assert cls_per_anchor == (self.num_classes) >>> assert box_per_anchor == 4 """ def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, anchor_generator=dict( type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), **kwargs): self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg super(RetinaHead, self).__init__( num_classes, in_channels, anchor_generator=anchor_generator, **kwargs) def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.retina_cls = nn.Conv2d( self.feat_channels, self.num_anchors * self.cls_out_channels, 3, padding=1) self.retina_reg = nn.Conv2d( self.feat_channels, self.num_anchors * 4, 3, padding=1) def init_weights(self): """Initialize weights of the head.""" for m in self.cls_convs: normal_init(m.conv, std=0.01) for m in self.reg_convs: normal_init(m.conv, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.retina_cls, std=0.01, bias=bias_cls) normal_init(self.retina_reg, std=0.01) def forward_single(self, x): """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. Returns: tuple: cls_score (Tensor): Cls scores for a single scale level the channels number is num_anchors * num_classes. bbox_pred (Tensor): Box energies / deltas for a single scale level, the channels number is num_anchors * 4. """ cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.retina_cls(cls_feat) bbox_pred = self.retina_reg(reg_feat) return cls_score, bbox_pred
4,051
34.234783
76
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/ga_rpn_head.py
import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import normal_init from mmcv.ops import nms from ..builder import HEADS from .guided_anchor_head import GuidedAnchorHead from .rpn_test_mixin import RPNTestMixin @HEADS.register_module() class GARPNHead(RPNTestMixin, GuidedAnchorHead): """Guided-Anchor-based RPN head.""" def __init__(self, in_channels, **kwargs): super(GARPNHead, self).__init__(1, in_channels, **kwargs) def _init_layers(self): """Initialize layers of the head.""" self.rpn_conv = nn.Conv2d( self.in_channels, self.feat_channels, 3, padding=1) super(GARPNHead, self)._init_layers() def init_weights(self): """Initialize weights of the head.""" normal_init(self.rpn_conv, std=0.01) super(GARPNHead, self).init_weights() def forward_single(self, x): """Forward feature of a single scale level.""" x = self.rpn_conv(x) x = F.relu(x, inplace=True) (cls_score, bbox_pred, shape_pred, loc_pred) = super(GARPNHead, self).forward_single(x) return cls_score, bbox_pred, shape_pred, loc_pred def loss(self, cls_scores, bbox_preds, shape_preds, loc_preds, gt_bboxes, img_metas, gt_bboxes_ignore=None): losses = super(GARPNHead, self).loss( cls_scores, bbox_preds, shape_preds, loc_preds, gt_bboxes, None, img_metas, gt_bboxes_ignore=gt_bboxes_ignore) return dict( loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox'], loss_anchor_shape=losses['loss_shape'], loss_anchor_loc=losses['loss_loc']) def _get_bboxes_single(self, cls_scores, bbox_preds, mlvl_anchors, mlvl_masks, img_shape, scale_factor, cfg, rescale=False): cfg = self.test_cfg if cfg is None else cfg mlvl_proposals = [] for idx in range(len(cls_scores)): rpn_cls_score = cls_scores[idx] rpn_bbox_pred = bbox_preds[idx] anchors = mlvl_anchors[idx] mask = mlvl_masks[idx] assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] # if no location is kept, end. if mask.sum() == 0: continue rpn_cls_score = rpn_cls_score.permute(1, 2, 0) if self.use_sigmoid_cls: rpn_cls_score = rpn_cls_score.reshape(-1) scores = rpn_cls_score.sigmoid() else: rpn_cls_score = rpn_cls_score.reshape(-1, 2) # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class scores = rpn_cls_score.softmax(dim=1)[:, :-1] # filter scores, bbox_pred w.r.t. mask. # anchors are filtered in get_anchors() beforehand. scores = scores[mask] rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4)[mask, :] if scores.dim() == 0: rpn_bbox_pred = rpn_bbox_pred.unsqueeze(0) anchors = anchors.unsqueeze(0) scores = scores.unsqueeze(0) # filter anchors, bbox_pred, scores w.r.t. scores if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre: _, topk_inds = scores.topk(cfg.nms_pre) rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] anchors = anchors[topk_inds, :] scores = scores[topk_inds] # get proposals w.r.t. anchors and rpn_bbox_pred proposals = self.bbox_coder.decode( anchors, rpn_bbox_pred, max_shape=img_shape) # filter out too small bboxes if cfg.min_bbox_size > 0: w = proposals[:, 2] - proposals[:, 0] h = proposals[:, 3] - proposals[:, 1] valid_inds = torch.nonzero( (w >= cfg.min_bbox_size) & (h >= cfg.min_bbox_size), as_tuple=False).squeeze() proposals = proposals[valid_inds, :] scores = scores[valid_inds] # NMS in current level proposals, _ = nms(proposals, scores, cfg.nms_thr) proposals = proposals[:cfg.nms_post, :] mlvl_proposals.append(proposals) proposals = torch.cat(mlvl_proposals, 0) if cfg.nms_across_levels: # NMS across multi levels proposals, _ = nms(proposals[:, :4], proposals[:, -1], cfg.nms_thr) proposals = proposals[:cfg.max_num, :] else: scores = proposals[:, 4] num = min(cfg.max_num, proposals.shape[0]) _, topk_inds = scores.topk(num) proposals = proposals[topk_inds, :] return proposals
5,247
38.164179
79
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/ga_retina_head.py
import torch.nn as nn from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init from mmcv.ops import MaskedConv2d from ..builder import HEADS from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead @HEADS.register_module() class GARetinaHead(GuidedAnchorHead): """Guided-Anchor-based RetinaNet head.""" def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, **kwargs): self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg super(GARetinaHead, self).__init__(num_classes, in_channels, **kwargs) def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1) self.conv_shape = nn.Conv2d(self.feat_channels, self.num_anchors * 2, 1) self.feature_adaption_cls = FeatureAdaption( self.feat_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.feature_adaption_reg = FeatureAdaption( self.feat_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.retina_cls = MaskedConv2d( self.feat_channels, self.num_anchors * self.cls_out_channels, 3, padding=1) self.retina_reg = MaskedConv2d( self.feat_channels, self.num_anchors * 4, 3, padding=1) def init_weights(self): """Initialize weights of the layer.""" for m in self.cls_convs: normal_init(m.conv, std=0.01) for m in self.reg_convs: normal_init(m.conv, std=0.01) self.feature_adaption_cls.init_weights() self.feature_adaption_reg.init_weights() bias_cls = bias_init_with_prob(0.01) normal_init(self.conv_loc, std=0.01, bias=bias_cls) normal_init(self.conv_shape, std=0.01) normal_init(self.retina_cls, std=0.01, bias=bias_cls) normal_init(self.retina_reg, std=0.01) def forward_single(self, x): """Forward feature map of a single scale level.""" cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) loc_pred = self.conv_loc(cls_feat) shape_pred = self.conv_shape(reg_feat) cls_feat = self.feature_adaption_cls(cls_feat, shape_pred) reg_feat = self.feature_adaption_reg(reg_feat, shape_pred) if not self.training: mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr else: mask = None cls_score = self.retina_cls(cls_feat, mask) bbox_pred = self.retina_reg(reg_feat, mask) return cls_score, bbox_pred, shape_pred, loc_pred
3,876
34.245455
78
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/ssd_head.py
import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import xavier_init from mmcv.runner import force_fp32 from mmdet.core import (build_anchor_generator, build_assigner, build_bbox_coder, build_sampler, multi_apply) from ..builder import HEADS from ..losses import smooth_l1_loss from .anchor_head import AnchorHead # TODO: add loss evaluator for SSD @HEADS.register_module() class SSDHead(AnchorHead): """SSD head used in https://arxiv.org/abs/1512.02325. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. anchor_generator (dict): Config dict for anchor generator bbox_coder (dict): Config of bounding box coder. reg_decoded_bbox (bool): If true, the regression loss would be applied on decoded bounding boxes. Default: False train_cfg (dict): Training config of anchor head. test_cfg (dict): Testing config of anchor head. """ # noqa: W605 def __init__(self, num_classes=80, in_channels=(512, 1024, 512, 256, 256, 256), anchor_generator=dict( type='SSDAnchorGenerator', scale_major=False, input_size=300, strides=[8, 16, 32, 64, 100, 300], ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]), basesize_ratio_range=(0.1, 0.9)), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0], ), reg_decoded_bbox=False, train_cfg=None, test_cfg=None): super(AnchorHead, self).__init__() self.num_classes = num_classes self.in_channels = in_channels self.cls_out_channels = num_classes + 1 # add background class self.anchor_generator = build_anchor_generator(anchor_generator) num_anchors = self.anchor_generator.num_base_anchors reg_convs = [] cls_convs = [] for i in range(len(in_channels)): reg_convs.append( nn.Conv2d( in_channels[i], num_anchors[i] * 4, kernel_size=3, padding=1)) cls_convs.append( nn.Conv2d( in_channels[i], num_anchors[i] * (num_classes + 1), kernel_size=3, padding=1)) self.reg_convs = nn.ModuleList(reg_convs) self.cls_convs = nn.ModuleList(cls_convs) self.bbox_coder = build_bbox_coder(bbox_coder) self.reg_decoded_bbox = reg_decoded_bbox self.use_sigmoid_cls = False self.cls_focal_loss = False self.train_cfg = train_cfg self.test_cfg = test_cfg # set sampling=False for archor_target self.sampling = False if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) # SSD sampling=False so use PseudoSampler sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.fp16_enabled = False def init_weights(self): """Initialize weights of the head.""" for m in self.modules(): if isinstance(m, nn.Conv2d): xavier_init(m, distribution='uniform', bias=0) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_anchors * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_anchors * 4. """ cls_scores = [] bbox_preds = [] for feat, reg_conv, cls_conv in zip(feats, self.reg_convs, self.cls_convs): cls_scores.append(cls_conv(feat)) bbox_preds.append(reg_conv(feat)) return cls_scores, bbox_preds def loss_single(self, cls_score, bbox_pred, anchor, labels, label_weights, bbox_targets, bbox_weights, num_total_samples): """Compute loss of a single image. Args: cls_score (Tensor): Box scores for eachimage Has shape (num_total_anchors, num_classes). bbox_pred (Tensor): Box energies / deltas for each image level with shape (num_total_anchors, 4). anchors (Tensor): Box reference for each scale level with shape (num_total_anchors, 4). labels (Tensor): Labels of each anchors with shape (num_total_anchors,). label_weights (Tensor): Label weights of each anchor with shape (num_total_anchors,) bbox_targets (Tensor): BBox regression targets of each anchor wight shape (num_total_anchors, 4). bbox_weights (Tensor): BBox regression loss weights of each anchor with shape (num_total_anchors, 4). num_total_samples (int): If sampling, num total samples equal to the number of total anchors; Otherwise, it is the number of positive anchors. Returns: dict[str, Tensor]: A dictionary of loss components. """ loss_cls_all = F.cross_entropy( cls_score, labels, reduction='none') * label_weights # FG cat_id: [0, num_classes -1], BG cat_id: num_classes pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero().reshape(-1) neg_inds = (labels == self.num_classes).nonzero().view(-1) num_pos_samples = pos_inds.size(0) num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples if num_neg_samples > neg_inds.size(0): num_neg_samples = neg_inds.size(0) topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples) loss_cls_pos = loss_cls_all[pos_inds].sum() loss_cls_neg = topk_loss_cls_neg.sum() loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples if self.reg_decoded_bbox: bbox_pred = self.bbox_coder.decode(anchor, bbox_pred) loss_bbox = smooth_l1_loss( bbox_pred, bbox_targets, bbox_weights, beta=self.train_cfg.smoothl1_beta, avg_factor=num_total_samples) return loss_cls[None], loss_bbox @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) gt_bboxes (list[Tensor]): each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.anchor_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=1, unmap_outputs=False) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_images = len(img_metas) all_cls_scores = torch.cat([ s.permute(0, 2, 3, 1).reshape( num_images, -1, self.cls_out_channels) for s in cls_scores ], 1) all_labels = torch.cat(labels_list, -1).view(num_images, -1) all_label_weights = torch.cat(label_weights_list, -1).view(num_images, -1) all_bbox_preds = torch.cat([ b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) for b in bbox_preds ], -2) all_bbox_targets = torch.cat(bbox_targets_list, -2).view(num_images, -1, 4) all_bbox_weights = torch.cat(bbox_weights_list, -2).view(num_images, -1, 4) # concat all level anchors to a single tensor all_anchors = [] for i in range(num_images): all_anchors.append(torch.cat(anchor_list[i])) # check NaN and Inf assert torch.isfinite(all_cls_scores).all().item(), \ 'classification scores become infinite or NaN!' assert torch.isfinite(all_bbox_preds).all().item(), \ 'bbox predications become infinite or NaN!' losses_cls, losses_bbox = multi_apply( self.loss_single, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, num_total_samples=num_total_pos) return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
10,567
39.803089
79
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/fcos_head.py
import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import Scale, normal_init from mmcv.runner import force_fp32 from mmdet.core import distance2bbox, multi_apply, multiclass_nms from ..builder import HEADS, build_loss from .anchor_free_head import AnchorFreeHead INF = 1e8 @HEADS.register_module() class FCOSHead(AnchorFreeHead): """Anchor-free head used in `FCOS <https://arxiv.org/abs/1904.01355>`_. The FCOS head does not use anchor boxes. Instead bounding boxes are predicted at each pixel and a centerness measure is used to supress low-quality predictions. Here norm_on_bbox, centerness_on_reg, dcn_on_last_conv are training tricks used in official repo, which will bring remarkable mAP gains of up to 4.9. Please see https://github.com/tianzhi0549/FCOS for more detail. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. strides (list[int] | list[tuple[int, int]]): Strides of points in multiple feature levels. Default: (4, 8, 16, 32, 64). regress_ranges (tuple[tuple[int, int]]): Regress range of multiple level points. center_sampling (bool): If true, use center sampling. Default: False. center_sample_radius (float): Radius of center sampling. Default: 1.5. norm_on_bbox (bool): If true, normalize the regression targets with FPN strides. Default: False. centerness_on_reg (bool): If true, position centerness on the regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042. Default: False. conv_bias (bool | str): If specified as `auto`, it will be decided by the norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise False. Default: "auto". loss_cls (dict): Config of classification loss. loss_bbox (dict): Config of localization loss. loss_centerness (dict): Config of centerness loss. norm_cfg (dict): dictionary to construct and config norm layer. Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True). Example: >>> self = FCOSHead(11, 7) >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] >>> cls_score, bbox_pred, centerness = self.forward(feats) >>> assert len(cls_score) == len(self.scales) """ # noqa: E501 def __init__(self, num_classes, in_channels, regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512), (512, INF)), center_sampling=False, center_sample_radius=1.5, norm_on_bbox=False, centerness_on_reg=False, loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='IoULoss', loss_weight=1.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), **kwargs): self.regress_ranges = regress_ranges self.center_sampling = center_sampling self.center_sample_radius = center_sample_radius self.norm_on_bbox = norm_on_bbox self.centerness_on_reg = centerness_on_reg super().__init__( num_classes, in_channels, loss_cls=loss_cls, loss_bbox=loss_bbox, norm_cfg=norm_cfg, **kwargs) self.loss_centerness = build_loss(loss_centerness) def _init_layers(self): """Initialize layers of the head.""" super()._init_layers() self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1) self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) def init_weights(self): """Initialize weights of the head.""" super().init_weights() normal_init(self.conv_centerness, std=0.01) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: cls_scores (list[Tensor]): Box scores for each scale level, \ each is a 4D-tensor, the channel number is \ num_points * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each \ scale level, each is a 4D-tensor, the channel number is \ num_points * 4. centernesses (list[Tensor]): Centerss for each scale level, \ each is a 4D-tensor, the channel number is num_points * 1. """ return multi_apply(self.forward_single, feats, self.scales, self.strides) def forward_single(self, x, scale, stride): """Forward features of a single scale levle. Args: x (Tensor): FPN feature maps of the specified stride. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. stride (int): The corresponding stride for feature maps, only used to normalize the bbox prediction when self.norm_on_bbox is True. Returns: tuple: scores for each class, bbox predictions and centerness \ predictions of input feature maps. """ cls_score, bbox_pred, cls_feat, reg_feat = super().forward_single(x) if self.centerness_on_reg: centerness = self.conv_centerness(reg_feat) else: centerness = self.conv_centerness(cls_feat) # scale the bbox_pred of different level # float to avoid overflow when enabling FP16 bbox_pred = scale(bbox_pred).float() if self.norm_on_bbox: bbox_pred = F.relu(bbox_pred) if not self.training: bbox_pred *= stride else: bbox_pred = bbox_pred.exp() return cls_score, bbox_pred, centerness @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses')) def loss(self, cls_scores, bbox_preds, centernesses, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute loss of the head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_points * num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is num_points * 4. centernesses (list[Tensor]): Centerss for each scale level, each is a 4D-tensor, the channel number is num_points * 1. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert len(cls_scores) == len(bbox_preds) == len(centernesses) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device) labels, bbox_targets = self.get_targets(all_level_points, gt_bboxes, gt_labels) num_imgs = cls_scores[0].size(0) # flatten cls_scores, bbox_preds and centerness flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds ] flatten_centerness = [ centerness.permute(0, 2, 3, 1).reshape(-1) for centerness in centernesses ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) flatten_centerness = torch.cat(flatten_centerness) flatten_labels = torch.cat(labels) flatten_bbox_targets = torch.cat(bbox_targets) # repeat points to align with bbox_preds flatten_points = torch.cat( [points.repeat(num_imgs, 1) for points in all_level_points]) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((flatten_labels >= 0) & (flatten_labels < bg_class_ind)).nonzero().reshape(-1) num_pos = len(pos_inds) loss_cls = self.loss_cls( flatten_cls_scores, flatten_labels, avg_factor=num_pos + num_imgs) # avoid num_pos is 0 pos_bbox_preds = flatten_bbox_preds[pos_inds] pos_centerness = flatten_centerness[pos_inds] if num_pos > 0: pos_bbox_targets = flatten_bbox_targets[pos_inds] pos_centerness_targets = self.centerness_target(pos_bbox_targets) pos_points = flatten_points[pos_inds] pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds) pos_decoded_target_preds = distance2bbox(pos_points, pos_bbox_targets) # centerness weighted iou loss loss_bbox = self.loss_bbox( pos_decoded_bbox_preds, pos_decoded_target_preds, weight=pos_centerness_targets, avg_factor=pos_centerness_targets.sum()) loss_centerness = self.loss_centerness(pos_centerness, pos_centerness_targets) else: loss_bbox = pos_bbox_preds.sum() loss_centerness = pos_centerness.sum() return dict( loss_cls=loss_cls, loss_bbox=loss_bbox, loss_centerness=loss_centerness) @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses')) def get_bboxes(self, cls_scores, bbox_preds, centernesses, img_metas, cfg=None, rescale=False, with_nms=True): """Transform network output for a batch into bbox predictions. Args: cls_scores (list[Tensor]): Box scores for each scale level with shape (N, num_points * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_points * 4, H, W). centernesses (list[Tensor]): Centerness for each scale level with shape (N, num_points * 1, H, W). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. cfg (mmcv.Config | None): Test / postprocessing configuration, if None, test_cfg would be used. Default: None. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is an (n, 5) tensor, where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. The second item is a (n,) tensor where each item is the predicted class label of the corresponding box. """ assert len(cls_scores) == len(bbox_preds) num_levels = len(cls_scores) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device) result_list = [] for img_id in range(len(img_metas)): cls_score_list = [ cls_scores[i][img_id].detach() for i in range(num_levels) ] bbox_pred_list = [ bbox_preds[i][img_id].detach() for i in range(num_levels) ] centerness_pred_list = [ centernesses[i][img_id].detach() for i in range(num_levels) ] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] det_bboxes = self._get_bboxes_single( cls_score_list, bbox_pred_list, centerness_pred_list, mlvl_points, img_shape, scale_factor, cfg, rescale, with_nms) result_list.append(det_bboxes) return result_list def _get_bboxes_single(self, cls_scores, bbox_preds, centernesses, mlvl_points, img_shape, scale_factor, cfg, rescale=False, with_nms=True): """Transform outputs for a single batch item into bbox predictions. Args: cls_scores (list[Tensor]): Box scores for a single scale level with shape (num_points * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for a single scale level with shape (num_points * 4, H, W). centernesses (list[Tensor]): Centerness for a single scale level with shape (num_points * 4, H, W). mlvl_points (list[Tensor]): Box reference for a single scale level with shape (num_total_points, 4). img_shape (tuple[int]): Shape of the input image, (height, width, 3). scale_factor (ndarray): Scale factor of the image arrange as (w_scale, h_scale, w_scale, h_scale). cfg (mmcv.Config | None): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: tuple(Tensor): det_bboxes (Tensor): BBox predictions in shape (n, 5), where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. det_labels (Tensor): A (n,) tensor where each item is the predicted class label of the corresponding box. """ cfg = self.test_cfg if cfg is None else cfg assert len(cls_scores) == len(bbox_preds) == len(mlvl_points) mlvl_bboxes = [] mlvl_scores = [] mlvl_centerness = [] for cls_score, bbox_pred, centerness, points in zip( cls_scores, bbox_preds, centernesses, mlvl_points): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] scores = cls_score.permute(1, 2, 0).reshape( -1, self.cls_out_channels).sigmoid() centerness = centerness.permute(1, 2, 0).reshape(-1).sigmoid() bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) nms_pre = cfg.get('nms_pre', -1) if nms_pre > 0 and scores.shape[0] > nms_pre: max_scores, _ = (scores * centerness[:, None]).max(dim=1) _, topk_inds = max_scores.topk(nms_pre) points = points[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] centerness = centerness[topk_inds] bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_centerness.append(centerness) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) mlvl_centerness = torch.cat(mlvl_centerness) if with_nms: det_bboxes, det_labels = multiclass_nms( mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img, score_factors=mlvl_centerness) return det_bboxes, det_labels else: return mlvl_bboxes, mlvl_scores, mlvl_centerness def _get_points_single(self, featmap_size, stride, dtype, device, flatten=False): """Get points according to feature map sizes.""" y, x = super()._get_points_single(featmap_size, stride, dtype, device) points = torch.stack((x.reshape(-1) * stride, y.reshape(-1) * stride), dim=-1) + stride // 2 return points def get_targets(self, points, gt_bboxes_list, gt_labels_list): """Compute regression, classification and centerss targets for points in multiple images. Args: points (list[Tensor]): Points of each fpn level, each has shape (num_points, 2). gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, each has shape (num_gt, 4). gt_labels_list (list[Tensor]): Ground truth labels of each box, each has shape (num_gt,). Returns: tuple: concat_lvl_labels (list[Tensor]): Labels of each level. \ concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \ level. """ assert len(points) == len(self.regress_ranges) num_levels = len(points) # expand regress ranges to align with points expanded_regress_ranges = [ points[i].new_tensor(self.regress_ranges[i])[None].expand_as( points[i]) for i in range(num_levels) ] # concat all levels points and regress ranges concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0) concat_points = torch.cat(points, dim=0) # the number of points per img, per lvl num_points = [center.size(0) for center in points] # get labels and bbox_targets of each image labels_list, bbox_targets_list = multi_apply( self._get_target_single, gt_bboxes_list, gt_labels_list, points=concat_points, regress_ranges=concat_regress_ranges, num_points_per_lvl=num_points) # split to per img, per level labels_list = [labels.split(num_points, 0) for labels in labels_list] bbox_targets_list = [ bbox_targets.split(num_points, 0) for bbox_targets in bbox_targets_list ] # concat per level image concat_lvl_labels = [] concat_lvl_bbox_targets = [] for i in range(num_levels): concat_lvl_labels.append( torch.cat([labels[i] for labels in labels_list])) bbox_targets = torch.cat( [bbox_targets[i] for bbox_targets in bbox_targets_list]) if self.norm_on_bbox: bbox_targets = bbox_targets / self.strides[i] concat_lvl_bbox_targets.append(bbox_targets) return concat_lvl_labels, concat_lvl_bbox_targets def _get_target_single(self, gt_bboxes, gt_labels, points, regress_ranges, num_points_per_lvl): """Compute regression and classification targets for a single image.""" num_points = points.size(0) num_gts = gt_labels.size(0) if num_gts == 0: return gt_labels.new_full((num_points,), self.num_classes), \ gt_bboxes.new_zeros((num_points, 4)) areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * ( gt_bboxes[:, 3] - gt_bboxes[:, 1]) # TODO: figure out why these two are different # areas = areas[None].expand(num_points, num_gts) areas = areas[None].repeat(num_points, 1) regress_ranges = regress_ranges[:, None, :].expand( num_points, num_gts, 2) gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) xs, ys = points[:, 0], points[:, 1] xs = xs[:, None].expand(num_points, num_gts) ys = ys[:, None].expand(num_points, num_gts) left = xs - gt_bboxes[..., 0] right = gt_bboxes[..., 2] - xs top = ys - gt_bboxes[..., 1] bottom = gt_bboxes[..., 3] - ys bbox_targets = torch.stack((left, top, right, bottom), -1) if self.center_sampling: # condition1: inside a `center bbox` radius = self.center_sample_radius center_xs = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2 center_ys = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2 center_gts = torch.zeros_like(gt_bboxes) stride = center_xs.new_zeros(center_xs.shape) # project the points on current lvl back to the `original` sizes lvl_begin = 0 for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl): lvl_end = lvl_begin + num_points_lvl stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius lvl_begin = lvl_end x_mins = center_xs - stride y_mins = center_ys - stride x_maxs = center_xs + stride y_maxs = center_ys + stride center_gts[..., 0] = torch.where(x_mins > gt_bboxes[..., 0], x_mins, gt_bboxes[..., 0]) center_gts[..., 1] = torch.where(y_mins > gt_bboxes[..., 1], y_mins, gt_bboxes[..., 1]) center_gts[..., 2] = torch.where(x_maxs > gt_bboxes[..., 2], gt_bboxes[..., 2], x_maxs) center_gts[..., 3] = torch.where(y_maxs > gt_bboxes[..., 3], gt_bboxes[..., 3], y_maxs) cb_dist_left = xs - center_gts[..., 0] cb_dist_right = center_gts[..., 2] - xs cb_dist_top = ys - center_gts[..., 1] cb_dist_bottom = center_gts[..., 3] - ys center_bbox = torch.stack( (cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1) inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0 else: # condition1: inside a gt bbox inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0 # condition2: limit the regression range for each location max_regress_distance = bbox_targets.max(-1)[0] inside_regress_range = ( (max_regress_distance >= regress_ranges[..., 0]) & (max_regress_distance <= regress_ranges[..., 1])) # if there are still more than one objects for a location, # we choose the one with minimal area areas[inside_gt_bbox_mask == 0] = INF areas[inside_regress_range == 0] = INF min_area, min_area_inds = areas.min(dim=1) labels = gt_labels[min_area_inds] labels[min_area == INF] = self.num_classes # set as BG bbox_targets = bbox_targets[range(num_points), min_area_inds] return labels, bbox_targets def centerness_target(self, pos_bbox_targets): """Compute centerness targets. Args: pos_bbox_targets (Tensor): BBox targets of positive bboxes in shape (num_pos, 4) Returns: Tensor: Centerness target. """ # only calculate pos centerness targets, otherwise there may be nan left_right = pos_bbox_targets[:, [0, 2]] top_bottom = pos_bbox_targets[:, [1, 3]] centerness_targets = ( left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * ( top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]) return torch.sqrt(centerness_targets)
25,553
43.441739
113
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/gfocal_head.py
import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init from mmcv.runner import force_fp32 from mmdet.core import (anchor_inside_flags, bbox2distance, bbox_overlaps, build_assigner, build_sampler, distance2bbox, images_to_levels, multi_apply, multiclass_nms, reduce_mean, unmap) from ..builder import HEADS, build_loss from .anchor_head import AnchorHead class Integral(nn.Module): """A fixed layer for calculating integral result from distribution. This layer calculates the target location by :math: `sum{P(y_i) * y_i}`, P(y_i) denotes the softmax vector that represents the discrete distribution y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max} Args: reg_max (int): The maximal value of the discrete set. Default: 16. You may want to reset it according to your new dataset or related settings. """ def __init__(self, reg_max=16): super(Integral, self).__init__() self.reg_max = reg_max self.register_buffer('project', torch.linspace(0, self.reg_max, self.reg_max + 1)) def forward(self, x): """Forward feature from the regression head to get integral result of bounding box location. Args: x (Tensor): Features of the regression head, shape (N, 4*(n+1)), n is self.reg_max. Returns: x (Tensor): Integral result of box locations, i.e., distance offsets from the box center in four directions, shape (N, 4). """ x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1) x = F.linear(x, self.project.type_as(x)).reshape(-1, 4) return x @HEADS.register_module() class GFocalHead(AnchorHead): """Generalized Focal Loss V2: Learning Reliable Localization Quality Estimation for Dense Object Detection. GFocal head structure is similar with GFL head, however GFocal uses the statistics of learned distribution to guide the localization quality estimation (LQE) Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. stacked_convs (int): Number of conv layers in cls and reg tower. Default: 4. conv_cfg (dict): dictionary to construct and config conv layer. Default: None. norm_cfg (dict): dictionary to construct and config norm layer. Default: dict(type='GN', num_groups=32, requires_grad=True). loss_qfl (dict): Config of Quality Focal Loss (QFL). reg_max (int): Max value of integral set :math: `{0, ..., reg_max}` in QFL setting. Default: 16. reg_topk (int): top-k statistics of distribution to guide LQE reg_channels (int): hidden layer unit to generate LQE Example: >>> self = GFocalHead(11, 7) >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] >>> cls_quality_score, bbox_pred = self.forward(feats) >>> assert len(cls_quality_score) == len(self.scales) """ def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25), reg_max=16, reg_topk=4, reg_channels=64, add_mean=True, **kwargs): self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reg_max = reg_max self.reg_topk = reg_topk self.reg_channels = reg_channels self.add_mean = add_mean self.total_dim = reg_topk if add_mean: self.total_dim += 1 print('total dim = ', self.total_dim * 4) super(GFocalHead, self).__init__(num_classes, in_channels, **kwargs) self.sampling = False if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) # SSD sampling=False so use PseudoSampler sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.integral = Integral(self.reg_max) self.loss_dfl = build_loss(loss_dfl) def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) assert self.num_anchors == 1, 'anchor free version' self.gfl_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.gfl_reg = nn.Conv2d( self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1) self.scales = nn.ModuleList( [Scale(1.0) for _ in self.anchor_generator.strides]) conf_vector = [nn.Conv2d(4 * self.total_dim, self.reg_channels, 1)] conf_vector += [self.relu] conf_vector += [nn.Conv2d(self.reg_channels, 1, 1), nn.Sigmoid()] self.reg_conf = nn.Sequential(*conf_vector) def init_weights(self): """Initialize weights of the head.""" for m in self.cls_convs: normal_init(m.conv, std=0.01) for m in self.reg_convs: normal_init(m.conv, std=0.01) for m in self.reg_conf: if isinstance(m, nn.Conv2d): normal_init(m, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.gfl_cls, std=0.01, bias=bias_cls) normal_init(self.gfl_reg, std=0.01) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: Usually a tuple of classification scores and bbox prediction cls_scores (list[Tensor]): Classification and quality (IoU) joint scores for all scale levels, each is a 4D-tensor, the channel number is num_classes. bbox_preds (list[Tensor]): Box distribution logits for all scale levels, each is a 4D-tensor, the channel number is 4*(n+1), n is max value of integral set. """ return multi_apply(self.forward_single, feats, self.scales) def forward_single(self, x, scale): """Forward feature of a single scale level. Args: x (Tensor): Features of a single scale level. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. Returns: tuple: cls_score (Tensor): Cls and quality joint scores for a single scale level the channel number is num_classes. bbox_pred (Tensor): Box distribution logits for a single scale level, the channel number is 4*(n+1), n is max value of integral set. """ cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) bbox_pred = scale(self.gfl_reg(reg_feat)).float() N, C, H, W = bbox_pred.size() prob = F.softmax(bbox_pred.reshape(N, 4, self.reg_max+1, H, W), dim=2) prob_topk, _ = prob.topk(self.reg_topk, dim=2) if self.add_mean: stat = torch.cat([prob_topk, prob_topk.mean(dim=2, keepdim=True)], dim=2) else: stat = prob_topk quality_score = self.reg_conf(stat.reshape(N, -1, H, W)) cls_score = self.gfl_cls(cls_feat).sigmoid() * quality_score return cls_score, bbox_pred def anchor_center(self, anchors): """Get anchor centers from anchors. Args: anchors (Tensor): Anchor list with shape (N, 4), "xyxy" format. Returns: Tensor: Anchor centers with shape (N, 2), "xy" format. """ anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2 anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2 return torch.stack([anchors_cx, anchors_cy], dim=-1) def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights, bbox_targets, stride, num_total_samples): """Compute loss of a single scale level. Args: anchors (Tensor): Box reference for each scale level with shape (N, num_total_anchors, 4). cls_score (Tensor): Cls and quality joint scores for each scale level has shape (N, num_classes, H, W). bbox_pred (Tensor): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. labels (Tensor): Labels of each anchors with shape (N, num_total_anchors). label_weights (Tensor): Label weights of each anchor with shape (N, num_total_anchors) bbox_targets (Tensor): BBox regression targets of each anchor wight shape (N, num_total_anchors, 4). stride (tuple): Stride in this scale level. num_total_samples (int): Number of positive samples that is reduced over all GPUs. Returns: dict[str, Tensor]: A dictionary of loss components. """ assert stride[0] == stride[1], 'h stride is not equal to w stride!' anchors = anchors.reshape(-1, 4) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4 * (self.reg_max + 1)) bbox_targets = bbox_targets.reshape(-1, 4) labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = self.num_classes pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1) score = label_weights.new_zeros(labels.shape) if len(pos_inds) > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0] weight_targets = cls_score.detach() weight_targets = weight_targets.max(dim=1)[0][pos_inds] pos_bbox_pred_corners = self.integral(pos_bbox_pred) pos_decode_bbox_pred = distance2bbox(pos_anchor_centers, pos_bbox_pred_corners) pos_decode_bbox_targets = pos_bbox_targets / stride[0] score[pos_inds] = bbox_overlaps( pos_decode_bbox_pred.detach(), pos_decode_bbox_targets, is_aligned=True) pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1) target_corners = bbox2distance(pos_anchor_centers, pos_decode_bbox_targets, self.reg_max).reshape(-1) # regression loss loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_decode_bbox_targets, weight=weight_targets, avg_factor=1.0) # dfl loss loss_dfl = self.loss_dfl( pred_corners, target_corners, weight=weight_targets[:, None].expand(-1, 4).reshape(-1), avg_factor=4.0) else: loss_bbox = bbox_pred.sum() * 0 loss_dfl = bbox_pred.sum() * 0 weight_targets = torch.tensor(0).cuda() # cls (qfl) loss loss_cls = self.loss_cls( cls_score, (labels, score), weight=label_weights, avg_factor=num_total_samples) return loss_cls, loss_bbox, loss_dfl, weight_targets.sum() @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute losses of the head. Args: cls_scores (list[Tensor]): Cls and quality scores for each scale level has shape (N, num_classes, H, W). bbox_preds (list[Tensor]): Box distribution logits for each scale level with shape (N, 4*(n+1), H, W), n is max value of integral set. gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (list[Tensor] | None): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == self.anchor_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) if cls_reg_targets is None: return None (anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_total_samples = reduce_mean( torch.tensor(num_total_pos).cuda()).item() num_total_samples = max(num_total_samples, 1.0) losses_cls, losses_bbox, losses_dfl,\ avg_factor = multi_apply( self.loss_single, anchor_list, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_targets_list, self.anchor_generator.strides, num_total_samples=num_total_samples) avg_factor = sum(avg_factor) avg_factor = reduce_mean(avg_factor).item() losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox)) losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl)) return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl) def _get_bboxes_single(self, cls_scores, bbox_preds, mlvl_anchors, img_shape, scale_factor, cfg, rescale=False, with_nms=True): """Transform outputs for a single batch item into labeled boxes. Args: cls_scores (list[Tensor]): Box scores for a single scale level has shape (num_classes, H, W). bbox_preds (list[Tensor]): Box distribution logits for a single scale level with shape (4*(n+1), H, W), n is max value of integral set. mlvl_anchors (list[Tensor]): Box reference for a single scale level with shape (num_total_anchors, 4). img_shape (tuple[int]): Shape of the input image, (height, width, 3). scale_factor (ndarray): Scale factor of the image arange as (w_scale, h_scale, w_scale, h_scale). cfg (mmcv.Config | None): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: tuple(Tensor): det_bboxes (Tensor): Bbox predictions in shape (N, 5), where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. det_labels (Tensor): A (N,) tensor where each item is the predicted class label of the corresponding box. """ cfg = self.test_cfg if cfg is None else cfg assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors) mlvl_bboxes = [] mlvl_scores = [] for cls_score, bbox_pred, stride, anchors in zip( cls_scores, bbox_preds, self.anchor_generator.strides, mlvl_anchors): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] assert stride[0] == stride[1] scores = cls_score.permute(1, 2, 0).reshape( -1, self.cls_out_channels) bbox_pred = bbox_pred.permute(1, 2, 0) bbox_pred = self.integral(bbox_pred) * stride[0] nms_pre = cfg.get('nms_pre', -1) if nms_pre > 0 and scores.shape[0] > nms_pre: max_scores, _ = scores.max(dim=1) _, topk_inds = max_scores.topk(nms_pre) anchors = anchors[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] bboxes = distance2bbox( self.anchor_center(anchors), bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) # Add a dummy background class to the backend when using sigmoid # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 # BG cat_id: num_class padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) if with_nms: det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img) return det_bboxes, det_labels else: return mlvl_bboxes, mlvl_scores def get_targets(self, anchor_list, valid_flag_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, unmap_outputs=True): """Get targets for GFL head. This method is almost the same as `AnchorHead.get_targets()`. Besides returning the targets as the parent method does, it also returns the anchors as the first element of the returned tuple. """ num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] num_level_anchors_list = [num_level_anchors] * num_imgs # concat all level anchors and flags to a single tensor for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) anchor_list[i] = torch.cat(anchor_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] (all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( self._get_target_single, anchor_list, valid_flag_list, num_level_anchors_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, label_channels=label_channels, unmap_outputs=unmap_outputs) # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels anchors_list = images_to_levels(all_anchors, num_level_anchors) labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors) return (anchors_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) def _get_target_single(self, flat_anchors, valid_flags, num_level_anchors, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, label_channels=1, unmap_outputs=True): """Compute regression, classification targets for anchors in a single image. Args: flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_anchors, 4) valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_anchors,). num_level_anchors Tensor): Number of anchors of each scale level. gt_bboxes (Tensor): Ground truth bboxes of the image, shape (num_gts, 4). gt_bboxes_ignore (Tensor): Ground truth bboxes to be ignored, shape (num_ignored_gts, 4). gt_labels (Tensor): Ground truth labels of each box, shape (num_gts,). img_meta (dict): Meta info of the image. label_channels (int): Channel of label. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: N is the number of total anchors in the image. anchors (Tensor): All anchors in the image with shape (N, 4). labels (Tensor): Labels of all anchors in the image with shape (N,). label_weights (Tensor): Label weights of all anchor in the image with shape (N,). bbox_targets (Tensor): BBox targets of all anchors in the image with shape (N, 4). bbox_weights (Tensor): BBox weights of all anchors in the image with shape (N, 4). pos_inds (Tensor): Indices of postive anchor with shape (num_pos,). neg_inds (Tensor): Indices of negative anchor with shape (num_neg,). """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], self.train_cfg.allowed_border) if not inside_flags.any(): return (None, ) * 7 # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] num_level_anchors_inside = self.get_num_level_anchors_inside( num_level_anchors, inside_flags) assign_result = self.assigner.assign(anchors, num_level_anchors_inside, gt_bboxes, gt_bboxes_ignore, gt_labels) sampling_result = self.sampler.sample(assign_result, anchors, gt_bboxes) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_full((num_valid_anchors, ), self.num_classes, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: pos_bbox_targets = sampling_result.pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 if gt_labels is None: # Only rpn gives gt_labels as None # Foreground is the first class labels[pos_inds] = 0 else: labels[pos_inds] = gt_labels[ sampling_result.pos_assigned_gt_inds] if self.train_cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) anchors = unmap(anchors, num_total_anchors, inside_flags) labels = unmap( labels, num_total_anchors, inside_flags, fill=self.num_classes) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (anchors, labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds) def get_num_level_anchors_inside(self, num_level_anchors, inside_flags): split_inside_flags = torch.split(inside_flags, num_level_anchors) num_level_anchors_inside = [ int(flags.sum()) for flags in split_inside_flags ] return num_level_anchors_inside
28,346
41.755656
79
py
GFocalV2
GFocalV2-master/mmdet/models/dense_heads/yolo_head.py
# Copyright (c) 2019 Western Digital Corporation or its affiliates. import warnings import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, normal_init from mmcv.runner import force_fp32 from mmdet.core import (build_anchor_generator, build_assigner, build_bbox_coder, build_sampler, images_to_levels, multi_apply, multiclass_nms) from ..builder import HEADS, build_loss from .base_dense_head import BaseDenseHead from .dense_test_mixins import BBoxTestMixin @HEADS.register_module() class YOLOV3Head(BaseDenseHead, BBoxTestMixin): """YOLOV3Head Paper link: https://arxiv.org/abs/1804.02767. Args: num_classes (int): The number of object classes (w/o background) in_channels (List[int]): Number of input channels per scale. out_channels (List[int]): The number of output channels per scale before the final 1x1 layer. Default: (1024, 512, 256). anchor_generator (dict): Config dict for anchor generator bbox_coder (dict): Config of bounding box coder. featmap_strides (List[int]): The stride of each scale. Should be in descending order. Default: (32, 16, 8). one_hot_smoother (float): Set a non-zero value to enable label-smooth Default: 0. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). loss_cls (dict): Config of classification loss. loss_conf (dict): Config of confidence loss. loss_xy (dict): Config of xy coordinate loss. loss_wh (dict): Config of wh coordinate loss. train_cfg (dict): Training config of YOLOV3 head. Default: None. test_cfg (dict): Testing config of YOLOV3 head. Default: None. """ def __init__(self, num_classes, in_channels, out_channels=(1024, 512, 256), anchor_generator=dict( type='YOLOAnchorGenerator', base_sizes=[[(116, 90), (156, 198), (373, 326)], [(30, 61), (62, 45), (59, 119)], [(10, 13), (16, 30), (33, 23)]], strides=[32, 16, 8]), bbox_coder=dict(type='YOLOBBoxCoder'), featmap_strides=[32, 16, 8], one_hot_smoother=0., conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_conf=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_xy=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_wh=dict(type='MSELoss', loss_weight=1.0), train_cfg=None, test_cfg=None): super(YOLOV3Head, self).__init__() # Check params assert (len(in_channels) == len(out_channels) == len(featmap_strides)) self.num_classes = num_classes self.in_channels = in_channels self.out_channels = out_channels self.featmap_strides = featmap_strides self.train_cfg = train_cfg self.test_cfg = test_cfg if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) if hasattr(self.train_cfg, 'sampler'): sampler_cfg = self.train_cfg.sampler else: sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.one_hot_smoother = one_hot_smoother self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.bbox_coder = build_bbox_coder(bbox_coder) self.anchor_generator = build_anchor_generator(anchor_generator) self.loss_cls = build_loss(loss_cls) self.loss_conf = build_loss(loss_conf) self.loss_xy = build_loss(loss_xy) self.loss_wh = build_loss(loss_wh) # usually the numbers of anchors for each level are the same # except SSD detectors self.num_anchors = self.anchor_generator.num_base_anchors[0] assert len( self.anchor_generator.num_base_anchors) == len(featmap_strides) self._init_layers() @property def num_levels(self): return len(self.featmap_strides) @property def num_attrib(self): """int: number of attributes in pred_map, bboxes (4) + objectness (1) + num_classes""" return 5 + self.num_classes def _init_layers(self): self.convs_bridge = nn.ModuleList() self.convs_pred = nn.ModuleList() for i in range(self.num_levels): conv_bridge = ConvModule( self.in_channels[i], self.out_channels[i], 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) conv_pred = nn.Conv2d(self.out_channels[i], self.num_anchors * self.num_attrib, 1) self.convs_bridge.append(conv_bridge) self.convs_pred.append(conv_pred) def init_weights(self): """Initialize weights of the head.""" for m in self.convs_pred: normal_init(m, std=0.01) def forward(self, feats): """Forward features from the upstream network. Args: feats (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple[Tensor]: A tuple of multi-level predication map, each is a 4D-tensor of shape (batch_size, 5+num_classes, height, width). """ assert len(feats) == self.num_levels pred_maps = [] for i in range(self.num_levels): x = feats[i] x = self.convs_bridge[i](x) pred_map = self.convs_pred[i](x) pred_maps.append(pred_map) return tuple(pred_maps), @force_fp32(apply_to=('pred_maps', )) def get_bboxes(self, pred_maps, img_metas, cfg=None, rescale=False, with_nms=True): """Transform network output for a batch into bbox predictions. Args: pred_maps (list[Tensor]): Raw predictions for a batch of images. img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. cfg (mmcv.Config | None): Test / postprocessing configuration, if None, test_cfg would be used. Default: None. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. The first item is an (n, 5) tensor, where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. The second item is a (n,) tensor where each item is the predicted class label of the corresponding box. """ result_list = [] num_levels = len(pred_maps) for img_id in range(len(img_metas)): pred_maps_list = [ pred_maps[i][img_id].detach() for i in range(num_levels) ] scale_factor = img_metas[img_id]['scale_factor'] proposals = self._get_bboxes_single(pred_maps_list, scale_factor, cfg, rescale, with_nms) result_list.append(proposals) return result_list def _get_bboxes_single(self, pred_maps_list, scale_factor, cfg, rescale=False, with_nms=True): """Transform outputs for a single batch item into bbox predictions. Args: pred_maps_list (list[Tensor]): Prediction maps for different scales of each single image in the batch. scale_factor (ndarray): Scale factor of the image arrange as (w_scale, h_scale, w_scale, h_scale). cfg (mmcv.Config | None): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Default: False. with_nms (bool): If True, do nms before return boxes. Default: True. Returns: tuple(Tensor): det_bboxes (Tensor): BBox predictions in shape (n, 5), where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. det_labels (Tensor): A (n,) tensor where each item is the predicted class label of the corresponding box. """ cfg = self.test_cfg if cfg is None else cfg assert len(pred_maps_list) == self.num_levels multi_lvl_bboxes = [] multi_lvl_cls_scores = [] multi_lvl_conf_scores = [] num_levels = len(pred_maps_list) featmap_sizes = [ pred_maps_list[i].shape[-2:] for i in range(num_levels) ] multi_lvl_anchors = self.anchor_generator.grid_anchors( featmap_sizes, pred_maps_list[0][0].device) for i in range(self.num_levels): # get some key info for current scale pred_map = pred_maps_list[i] stride = self.featmap_strides[i] # (h, w, num_anchors*num_attrib) -> (h*w*num_anchors, num_attrib) pred_map = pred_map.permute(1, 2, 0).reshape(-1, self.num_attrib) pred_map[..., :2] = torch.sigmoid(pred_map[..., :2]) bbox_pred = self.bbox_coder.decode(multi_lvl_anchors[i], pred_map[..., :4], stride) # conf and cls conf_pred = torch.sigmoid(pred_map[..., 4]).view(-1) cls_pred = torch.sigmoid(pred_map[..., 5:]).view( -1, self.num_classes) # Cls pred one-hot. # Filtering out all predictions with conf < conf_thr conf_thr = cfg.get('conf_thr', -1) if conf_thr > 0: # add as_tuple=False for compatibility in Pytorch 1.6 conf_inds = conf_pred.ge(conf_thr).nonzero( as_tuple=False).flatten() bbox_pred = bbox_pred[conf_inds, :] cls_pred = cls_pred[conf_inds, :] conf_pred = conf_pred[conf_inds] # Get top-k prediction nms_pre = cfg.get('nms_pre', -1) if 0 < nms_pre < conf_pred.size(0): _, topk_inds = conf_pred.topk(nms_pre) bbox_pred = bbox_pred[topk_inds, :] cls_pred = cls_pred[topk_inds, :] conf_pred = conf_pred[topk_inds] # Save the result of current scale multi_lvl_bboxes.append(bbox_pred) multi_lvl_cls_scores.append(cls_pred) multi_lvl_conf_scores.append(conf_pred) # Merge the results of different scales together multi_lvl_bboxes = torch.cat(multi_lvl_bboxes) multi_lvl_cls_scores = torch.cat(multi_lvl_cls_scores) multi_lvl_conf_scores = torch.cat(multi_lvl_conf_scores) if with_nms and (multi_lvl_conf_scores.size(0) == 0): return torch.zeros((0, 5)), torch.zeros((0, )) if rescale: multi_lvl_bboxes /= multi_lvl_bboxes.new_tensor(scale_factor) # In mmdet 2.x, the class_id for background is num_classes. # i.e., the last column. padding = multi_lvl_cls_scores.new_zeros(multi_lvl_cls_scores.shape[0], 1) multi_lvl_cls_scores = torch.cat([multi_lvl_cls_scores, padding], dim=1) # Support exporting to onnx without nms if with_nms and cfg.get('nms', None) is not None: det_bboxes, det_labels = multiclass_nms( multi_lvl_bboxes, multi_lvl_cls_scores, cfg.score_thr, cfg.nms, cfg.max_per_img, score_factors=multi_lvl_conf_scores) return det_bboxes, det_labels else: return (multi_lvl_bboxes, multi_lvl_cls_scores, multi_lvl_conf_scores) @force_fp32(apply_to=('pred_maps', )) def loss(self, pred_maps, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): """Compute loss of the head. Args: pred_maps (list[Tensor]): Prediction map for each scale level, shape (N, num_anchors * num_attrib, H, W) gt_bboxes (list[Tensor]): Ground truth bboxes for each image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_imgs = len(img_metas) device = pred_maps[0][0].device featmap_sizes = [ pred_maps[i].shape[-2:] for i in range(self.num_levels) ] multi_level_anchors = self.anchor_generator.grid_anchors( featmap_sizes, device) anchor_list = [multi_level_anchors for _ in range(num_imgs)] responsible_flag_list = [] for img_id in range(len(img_metas)): responsible_flag_list.append( self.anchor_generator.responsible_flags( featmap_sizes, gt_bboxes[img_id], device)) target_maps_list, neg_maps_list = self.get_targets( anchor_list, responsible_flag_list, gt_bboxes, gt_labels) losses_cls, losses_conf, losses_xy, losses_wh = multi_apply( self.loss_single, pred_maps, target_maps_list, neg_maps_list) return dict( loss_cls=losses_cls, loss_conf=losses_conf, loss_xy=losses_xy, loss_wh=losses_wh) def loss_single(self, pred_map, target_map, neg_map): """Compute loss of a single image from a batch. Args: pred_map (Tensor): Raw predictions for a single level. target_map (Tensor): The Ground-Truth target for a single level. neg_map (Tensor): The negative masks for a single level. Returns: tuple: loss_cls (Tensor): Classification loss. loss_conf (Tensor): Confidence loss. loss_xy (Tensor): Regression loss of x, y coordinate. loss_wh (Tensor): Regression loss of w, h coordinate. """ num_imgs = len(pred_map) pred_map = pred_map.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.num_attrib) neg_mask = neg_map.float() pos_mask = target_map[..., 4] pos_and_neg_mask = neg_mask + pos_mask pos_mask = pos_mask.unsqueeze(dim=-1) if torch.max(pos_and_neg_mask) > 1.: warnings.warn('There is overlap between pos and neg sample.') pos_and_neg_mask = pos_and_neg_mask.clamp(min=0., max=1.) pred_xy = pred_map[..., :2] pred_wh = pred_map[..., 2:4] pred_conf = pred_map[..., 4] pred_label = pred_map[..., 5:] target_xy = target_map[..., :2] target_wh = target_map[..., 2:4] target_conf = target_map[..., 4] target_label = target_map[..., 5:] loss_cls = self.loss_cls(pred_label, target_label, weight=pos_mask) loss_conf = self.loss_conf( pred_conf, target_conf, weight=pos_and_neg_mask) loss_xy = self.loss_xy(pred_xy, target_xy, weight=pos_mask) loss_wh = self.loss_wh(pred_wh, target_wh, weight=pos_mask) return loss_cls, loss_conf, loss_xy, loss_wh def get_targets(self, anchor_list, responsible_flag_list, gt_bboxes_list, gt_labels_list): """Compute target maps for anchors in multiple images. Args: anchor_list (list[list[Tensor]]): Multi level anchors of each image. The outer list indicates images, and the inner list corresponds to feature levels of the image. Each element of the inner list is a tensor of shape (num_total_anchors, 4). responsible_flag_list (list[list[Tensor]]): Multi level responsible flags of each image. Each element is a tensor of shape (num_total_anchors, ) gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. gt_labels_list (list[Tensor]): Ground truth labels of each box. Returns: tuple: Usually returns a tuple containing learning targets. - target_map_list (list[Tensor]): Target map of each level. - neg_map_list (list[Tensor]): Negative map of each level. """ num_imgs = len(anchor_list) # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] results = multi_apply(self._get_targets_single, anchor_list, responsible_flag_list, gt_bboxes_list, gt_labels_list) all_target_maps, all_neg_maps = results assert num_imgs == len(all_target_maps) == len(all_neg_maps) target_maps_list = images_to_levels(all_target_maps, num_level_anchors) neg_maps_list = images_to_levels(all_neg_maps, num_level_anchors) return target_maps_list, neg_maps_list def _get_targets_single(self, anchors, responsible_flags, gt_bboxes, gt_labels): """Generate matching bounding box prior and converted GT. Args: anchors (list[Tensor]): Multi-level anchors of the image. responsible_flags (list[Tensor]): Multi-level responsible flags of anchors gt_bboxes (Tensor): Ground truth bboxes of single image. gt_labels (Tensor): Ground truth labels of single image. Returns: tuple: target_map (Tensor): Predication target map of each scale level, shape (num_total_anchors, 5+num_classes) neg_map (Tensor): Negative map of each scale level, shape (num_total_anchors,) """ anchor_strides = [] for i in range(len(anchors)): anchor_strides.append( torch.tensor(self.featmap_strides[i], device=gt_bboxes.device).repeat(len(anchors[i]))) concat_anchors = torch.cat(anchors) concat_responsible_flags = torch.cat(responsible_flags) anchor_strides = torch.cat(anchor_strides) assert len(anchor_strides) == len(concat_anchors) == \ len(concat_responsible_flags) assign_result = self.assigner.assign(concat_anchors, concat_responsible_flags, gt_bboxes) sampling_result = self.sampler.sample(assign_result, concat_anchors, gt_bboxes) target_map = concat_anchors.new_zeros( concat_anchors.size(0), self.num_attrib) target_map[sampling_result.pos_inds, :4] = self.bbox_coder.encode( sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes, anchor_strides[sampling_result.pos_inds]) target_map[sampling_result.pos_inds, 4] = 1 gt_labels_one_hot = F.one_hot( gt_labels, num_classes=self.num_classes).float() if self.one_hot_smoother != 0: # label smooth gt_labels_one_hot = gt_labels_one_hot * ( 1 - self.one_hot_smoother ) + self.one_hot_smoother / self.num_classes target_map[sampling_result.pos_inds, 5:] = gt_labels_one_hot[ sampling_result.pos_assigned_gt_inds] neg_map = concat_anchors.new_zeros( concat_anchors.size(0), dtype=torch.uint8) neg_map[sampling_result.neg_inds] = 1 return target_map, neg_map def aug_test(self, feats, img_metas, rescale=False): """Test function with test time augmentation. Args: feats (list[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains features for all images in the batch. img_metas (list[list[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch. each dict has image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[ndarray]: bbox results of each class """ return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
22,392
41.013133
79
py