repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
mmyolo
|
mmyolo-main/tools/model_converters/convert_kd_ckpt_to_student.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from pathlib import Path
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Convert KD checkpoint to student-only checkpoint')
parser.add_argument('checkpoint', help='input checkpoint filename')
parser.add_argument('--out-path', help='save checkpoint path')
parser.add_argument(
'--inplace', action='store_true', help='replace origin ckpt')
args = parser.parse_args()
return args
def main():
args = parse_args()
checkpoint = torch.load(args.checkpoint, map_location='cpu')
new_state_dict = dict()
new_meta = checkpoint['meta']
for key, value in checkpoint['state_dict'].items():
if key.startswith('architecture.'):
new_key = key.replace('architecture.', '')
new_state_dict[new_key] = value
checkpoint = dict()
checkpoint['meta'] = new_meta
checkpoint['state_dict'] = new_state_dict
if args.inplace:
torch.save(checkpoint, args.checkpoint)
else:
ckpt_path = Path(args.checkpoint)
ckpt_name = ckpt_path.stem
if args.out_path:
ckpt_dir = Path(args.out_path)
else:
ckpt_dir = ckpt_path.parent
new_ckpt_path = ckpt_dir / f'{ckpt_name}_student.pth'
torch.save(checkpoint, new_ckpt_path)
if __name__ == '__main__':
main()
| 1,412
| 27.836735
| 71
|
py
|
mmyolo
|
mmyolo-main/tools/model_converters/yolov7_to_mmyolo.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from collections import OrderedDict
import torch
convert_dict_tiny = {
# stem
'model.0': 'backbone.stem.0',
'model.1': 'backbone.stem.1',
# stage1 TinyDownSampleBlock
'model.2': 'backbone.stage1.0.short_conv',
'model.3': 'backbone.stage1.0.main_convs.0',
'model.4': 'backbone.stage1.0.main_convs.1',
'model.5': 'backbone.stage1.0.main_convs.2',
'model.7': 'backbone.stage1.0.final_conv',
# stage2 TinyDownSampleBlock
'model.9': 'backbone.stage2.1.short_conv',
'model.10': 'backbone.stage2.1.main_convs.0',
'model.11': 'backbone.stage2.1.main_convs.1',
'model.12': 'backbone.stage2.1.main_convs.2',
'model.14': 'backbone.stage2.1.final_conv',
# stage3 TinyDownSampleBlock
'model.16': 'backbone.stage3.1.short_conv',
'model.17': 'backbone.stage3.1.main_convs.0',
'model.18': 'backbone.stage3.1.main_convs.1',
'model.19': 'backbone.stage3.1.main_convs.2',
'model.21': 'backbone.stage3.1.final_conv',
# stage4 TinyDownSampleBlock
'model.23': 'backbone.stage4.1.short_conv',
'model.24': 'backbone.stage4.1.main_convs.0',
'model.25': 'backbone.stage4.1.main_convs.1',
'model.26': 'backbone.stage4.1.main_convs.2',
'model.28': 'backbone.stage4.1.final_conv',
# neck SPPCSPBlock
'model.29': 'neck.reduce_layers.2.short_layer',
'model.30': 'neck.reduce_layers.2.main_layers',
'model.35': 'neck.reduce_layers.2.fuse_layers',
'model.37': 'neck.reduce_layers.2.final_conv',
'model.38': 'neck.upsample_layers.0.0',
'model.40': 'neck.reduce_layers.1',
'model.42': 'neck.top_down_layers.0.short_conv',
'model.43': 'neck.top_down_layers.0.main_convs.0',
'model.44': 'neck.top_down_layers.0.main_convs.1',
'model.45': 'neck.top_down_layers.0.main_convs.2',
'model.47': 'neck.top_down_layers.0.final_conv',
'model.48': 'neck.upsample_layers.1.0',
'model.50': 'neck.reduce_layers.0',
'model.52': 'neck.top_down_layers.1.short_conv',
'model.53': 'neck.top_down_layers.1.main_convs.0',
'model.54': 'neck.top_down_layers.1.main_convs.1',
'model.55': 'neck.top_down_layers.1.main_convs.2',
'model.57': 'neck.top_down_layers.1.final_conv',
'model.58': 'neck.downsample_layers.0',
'model.60': 'neck.bottom_up_layers.0.short_conv',
'model.61': 'neck.bottom_up_layers.0.main_convs.0',
'model.62': 'neck.bottom_up_layers.0.main_convs.1',
'model.63': 'neck.bottom_up_layers.0.main_convs.2',
'model.65': 'neck.bottom_up_layers.0.final_conv',
'model.66': 'neck.downsample_layers.1',
'model.68': 'neck.bottom_up_layers.1.short_conv',
'model.69': 'neck.bottom_up_layers.1.main_convs.0',
'model.70': 'neck.bottom_up_layers.1.main_convs.1',
'model.71': 'neck.bottom_up_layers.1.main_convs.2',
'model.73': 'neck.bottom_up_layers.1.final_conv',
'model.74': 'neck.out_layers.0',
'model.75': 'neck.out_layers.1',
'model.76': 'neck.out_layers.2',
# head
'model.77.m.0': 'bbox_head.head_module.convs_pred.0.1',
'model.77.m.1': 'bbox_head.head_module.convs_pred.1.1',
'model.77.m.2': 'bbox_head.head_module.convs_pred.2.1'
}
convert_dict_l = {
# stem
'model.0': 'backbone.stem.0',
'model.1': 'backbone.stem.1',
'model.2': 'backbone.stem.2',
# stage1
# ConvModule
'model.3': 'backbone.stage1.0',
# ELANBlock expand_channel_2x
'model.4': 'backbone.stage1.1.short_conv',
'model.5': 'backbone.stage1.1.main_conv',
'model.6': 'backbone.stage1.1.blocks.0.0',
'model.7': 'backbone.stage1.1.blocks.0.1',
'model.8': 'backbone.stage1.1.blocks.1.0',
'model.9': 'backbone.stage1.1.blocks.1.1',
'model.11': 'backbone.stage1.1.final_conv',
# stage2
# MaxPoolBlock reduce_channel_2x
'model.13': 'backbone.stage2.0.maxpool_branches.1',
'model.14': 'backbone.stage2.0.stride_conv_branches.0',
'model.15': 'backbone.stage2.0.stride_conv_branches.1',
# ELANBlock expand_channel_2x
'model.17': 'backbone.stage2.1.short_conv',
'model.18': 'backbone.stage2.1.main_conv',
'model.19': 'backbone.stage2.1.blocks.0.0',
'model.20': 'backbone.stage2.1.blocks.0.1',
'model.21': 'backbone.stage2.1.blocks.1.0',
'model.22': 'backbone.stage2.1.blocks.1.1',
'model.24': 'backbone.stage2.1.final_conv',
# stage3
# MaxPoolBlock reduce_channel_2x
'model.26': 'backbone.stage3.0.maxpool_branches.1',
'model.27': 'backbone.stage3.0.stride_conv_branches.0',
'model.28': 'backbone.stage3.0.stride_conv_branches.1',
# ELANBlock expand_channel_2x
'model.30': 'backbone.stage3.1.short_conv',
'model.31': 'backbone.stage3.1.main_conv',
'model.32': 'backbone.stage3.1.blocks.0.0',
'model.33': 'backbone.stage3.1.blocks.0.1',
'model.34': 'backbone.stage3.1.blocks.1.0',
'model.35': 'backbone.stage3.1.blocks.1.1',
'model.37': 'backbone.stage3.1.final_conv',
# stage4
# MaxPoolBlock reduce_channel_2x
'model.39': 'backbone.stage4.0.maxpool_branches.1',
'model.40': 'backbone.stage4.0.stride_conv_branches.0',
'model.41': 'backbone.stage4.0.stride_conv_branches.1',
# ELANBlock no_change_channel
'model.43': 'backbone.stage4.1.short_conv',
'model.44': 'backbone.stage4.1.main_conv',
'model.45': 'backbone.stage4.1.blocks.0.0',
'model.46': 'backbone.stage4.1.blocks.0.1',
'model.47': 'backbone.stage4.1.blocks.1.0',
'model.48': 'backbone.stage4.1.blocks.1.1',
'model.50': 'backbone.stage4.1.final_conv',
# neck SPPCSPBlock
'model.51.cv1': 'neck.reduce_layers.2.main_layers.0',
'model.51.cv3': 'neck.reduce_layers.2.main_layers.1',
'model.51.cv4': 'neck.reduce_layers.2.main_layers.2',
'model.51.cv5': 'neck.reduce_layers.2.fuse_layers.0',
'model.51.cv6': 'neck.reduce_layers.2.fuse_layers.1',
'model.51.cv2': 'neck.reduce_layers.2.short_layer',
'model.51.cv7': 'neck.reduce_layers.2.final_conv',
# neck
'model.52': 'neck.upsample_layers.0.0',
'model.54': 'neck.reduce_layers.1',
# neck ELANBlock reduce_channel_2x
'model.56': 'neck.top_down_layers.0.short_conv',
'model.57': 'neck.top_down_layers.0.main_conv',
'model.58': 'neck.top_down_layers.0.blocks.0',
'model.59': 'neck.top_down_layers.0.blocks.1',
'model.60': 'neck.top_down_layers.0.blocks.2',
'model.61': 'neck.top_down_layers.0.blocks.3',
'model.63': 'neck.top_down_layers.0.final_conv',
'model.64': 'neck.upsample_layers.1.0',
'model.66': 'neck.reduce_layers.0',
# neck ELANBlock reduce_channel_2x
'model.68': 'neck.top_down_layers.1.short_conv',
'model.69': 'neck.top_down_layers.1.main_conv',
'model.70': 'neck.top_down_layers.1.blocks.0',
'model.71': 'neck.top_down_layers.1.blocks.1',
'model.72': 'neck.top_down_layers.1.blocks.2',
'model.73': 'neck.top_down_layers.1.blocks.3',
'model.75': 'neck.top_down_layers.1.final_conv',
# neck MaxPoolBlock no_change_channel
'model.77': 'neck.downsample_layers.0.maxpool_branches.1',
'model.78': 'neck.downsample_layers.0.stride_conv_branches.0',
'model.79': 'neck.downsample_layers.0.stride_conv_branches.1',
# neck ELANBlock reduce_channel_2x
'model.81': 'neck.bottom_up_layers.0.short_conv',
'model.82': 'neck.bottom_up_layers.0.main_conv',
'model.83': 'neck.bottom_up_layers.0.blocks.0',
'model.84': 'neck.bottom_up_layers.0.blocks.1',
'model.85': 'neck.bottom_up_layers.0.blocks.2',
'model.86': 'neck.bottom_up_layers.0.blocks.3',
'model.88': 'neck.bottom_up_layers.0.final_conv',
# neck MaxPoolBlock no_change_channel
'model.90': 'neck.downsample_layers.1.maxpool_branches.1',
'model.91': 'neck.downsample_layers.1.stride_conv_branches.0',
'model.92': 'neck.downsample_layers.1.stride_conv_branches.1',
# neck ELANBlock reduce_channel_2x
'model.94': 'neck.bottom_up_layers.1.short_conv',
'model.95': 'neck.bottom_up_layers.1.main_conv',
'model.96': 'neck.bottom_up_layers.1.blocks.0',
'model.97': 'neck.bottom_up_layers.1.blocks.1',
'model.98': 'neck.bottom_up_layers.1.blocks.2',
'model.99': 'neck.bottom_up_layers.1.blocks.3',
'model.101': 'neck.bottom_up_layers.1.final_conv',
# RepVGGBlock
'model.102.rbr_dense.0': 'neck.out_layers.0.rbr_dense.conv',
'model.102.rbr_dense.1': 'neck.out_layers.0.rbr_dense.bn',
'model.102.rbr_1x1.0': 'neck.out_layers.0.rbr_1x1.conv',
'model.102.rbr_1x1.1': 'neck.out_layers.0.rbr_1x1.bn',
'model.103.rbr_dense.0': 'neck.out_layers.1.rbr_dense.conv',
'model.103.rbr_dense.1': 'neck.out_layers.1.rbr_dense.bn',
'model.103.rbr_1x1.0': 'neck.out_layers.1.rbr_1x1.conv',
'model.103.rbr_1x1.1': 'neck.out_layers.1.rbr_1x1.bn',
'model.104.rbr_dense.0': 'neck.out_layers.2.rbr_dense.conv',
'model.104.rbr_dense.1': 'neck.out_layers.2.rbr_dense.bn',
'model.104.rbr_1x1.0': 'neck.out_layers.2.rbr_1x1.conv',
'model.104.rbr_1x1.1': 'neck.out_layers.2.rbr_1x1.bn',
# head
'model.105.m.0': 'bbox_head.head_module.convs_pred.0.1',
'model.105.m.1': 'bbox_head.head_module.convs_pred.1.1',
'model.105.m.2': 'bbox_head.head_module.convs_pred.2.1'
}
convert_dict_x = {
# stem
'model.0': 'backbone.stem.0',
'model.1': 'backbone.stem.1',
'model.2': 'backbone.stem.2',
# stage1
# ConvModule
'model.3': 'backbone.stage1.0',
# ELANBlock expand_channel_2x
'model.4': 'backbone.stage1.1.short_conv',
'model.5': 'backbone.stage1.1.main_conv',
'model.6': 'backbone.stage1.1.blocks.0.0',
'model.7': 'backbone.stage1.1.blocks.0.1',
'model.8': 'backbone.stage1.1.blocks.1.0',
'model.9': 'backbone.stage1.1.blocks.1.1',
'model.10': 'backbone.stage1.1.blocks.2.0',
'model.11': 'backbone.stage1.1.blocks.2.1',
'model.13': 'backbone.stage1.1.final_conv',
# stage2
# MaxPoolBlock reduce_channel_2x
'model.15': 'backbone.stage2.0.maxpool_branches.1',
'model.16': 'backbone.stage2.0.stride_conv_branches.0',
'model.17': 'backbone.stage2.0.stride_conv_branches.1',
# ELANBlock expand_channel_2x
'model.19': 'backbone.stage2.1.short_conv',
'model.20': 'backbone.stage2.1.main_conv',
'model.21': 'backbone.stage2.1.blocks.0.0',
'model.22': 'backbone.stage2.1.blocks.0.1',
'model.23': 'backbone.stage2.1.blocks.1.0',
'model.24': 'backbone.stage2.1.blocks.1.1',
'model.25': 'backbone.stage2.1.blocks.2.0',
'model.26': 'backbone.stage2.1.blocks.2.1',
'model.28': 'backbone.stage2.1.final_conv',
# stage3
# MaxPoolBlock reduce_channel_2x
'model.30': 'backbone.stage3.0.maxpool_branches.1',
'model.31': 'backbone.stage3.0.stride_conv_branches.0',
'model.32': 'backbone.stage3.0.stride_conv_branches.1',
# ELANBlock expand_channel_2x
'model.34': 'backbone.stage3.1.short_conv',
'model.35': 'backbone.stage3.1.main_conv',
'model.36': 'backbone.stage3.1.blocks.0.0',
'model.37': 'backbone.stage3.1.blocks.0.1',
'model.38': 'backbone.stage3.1.blocks.1.0',
'model.39': 'backbone.stage3.1.blocks.1.1',
'model.40': 'backbone.stage3.1.blocks.2.0',
'model.41': 'backbone.stage3.1.blocks.2.1',
'model.43': 'backbone.stage3.1.final_conv',
# stage4
# MaxPoolBlock reduce_channel_2x
'model.45': 'backbone.stage4.0.maxpool_branches.1',
'model.46': 'backbone.stage4.0.stride_conv_branches.0',
'model.47': 'backbone.stage4.0.stride_conv_branches.1',
# ELANBlock no_change_channel
'model.49': 'backbone.stage4.1.short_conv',
'model.50': 'backbone.stage4.1.main_conv',
'model.51': 'backbone.stage4.1.blocks.0.0',
'model.52': 'backbone.stage4.1.blocks.0.1',
'model.53': 'backbone.stage4.1.blocks.1.0',
'model.54': 'backbone.stage4.1.blocks.1.1',
'model.55': 'backbone.stage4.1.blocks.2.0',
'model.56': 'backbone.stage4.1.blocks.2.1',
'model.58': 'backbone.stage4.1.final_conv',
# neck SPPCSPBlock
'model.59.cv1': 'neck.reduce_layers.2.main_layers.0',
'model.59.cv3': 'neck.reduce_layers.2.main_layers.1',
'model.59.cv4': 'neck.reduce_layers.2.main_layers.2',
'model.59.cv5': 'neck.reduce_layers.2.fuse_layers.0',
'model.59.cv6': 'neck.reduce_layers.2.fuse_layers.1',
'model.59.cv2': 'neck.reduce_layers.2.short_layer',
'model.59.cv7': 'neck.reduce_layers.2.final_conv',
# neck
'model.60': 'neck.upsample_layers.0.0',
'model.62': 'neck.reduce_layers.1',
# neck ELANBlock reduce_channel_2x
'model.64': 'neck.top_down_layers.0.short_conv',
'model.65': 'neck.top_down_layers.0.main_conv',
'model.66': 'neck.top_down_layers.0.blocks.0.0',
'model.67': 'neck.top_down_layers.0.blocks.0.1',
'model.68': 'neck.top_down_layers.0.blocks.1.0',
'model.69': 'neck.top_down_layers.0.blocks.1.1',
'model.70': 'neck.top_down_layers.0.blocks.2.0',
'model.71': 'neck.top_down_layers.0.blocks.2.1',
'model.73': 'neck.top_down_layers.0.final_conv',
'model.74': 'neck.upsample_layers.1.0',
'model.76': 'neck.reduce_layers.0',
# neck ELANBlock reduce_channel_2x
'model.78': 'neck.top_down_layers.1.short_conv',
'model.79': 'neck.top_down_layers.1.main_conv',
'model.80': 'neck.top_down_layers.1.blocks.0.0',
'model.81': 'neck.top_down_layers.1.blocks.0.1',
'model.82': 'neck.top_down_layers.1.blocks.1.0',
'model.83': 'neck.top_down_layers.1.blocks.1.1',
'model.84': 'neck.top_down_layers.1.blocks.2.0',
'model.85': 'neck.top_down_layers.1.blocks.2.1',
'model.87': 'neck.top_down_layers.1.final_conv',
# neck MaxPoolBlock no_change_channel
'model.89': 'neck.downsample_layers.0.maxpool_branches.1',
'model.90': 'neck.downsample_layers.0.stride_conv_branches.0',
'model.91': 'neck.downsample_layers.0.stride_conv_branches.1',
# neck ELANBlock reduce_channel_2x
'model.93': 'neck.bottom_up_layers.0.short_conv',
'model.94': 'neck.bottom_up_layers.0.main_conv',
'model.95': 'neck.bottom_up_layers.0.blocks.0.0',
'model.96': 'neck.bottom_up_layers.0.blocks.0.1',
'model.97': 'neck.bottom_up_layers.0.blocks.1.0',
'model.98': 'neck.bottom_up_layers.0.blocks.1.1',
'model.99': 'neck.bottom_up_layers.0.blocks.2.0',
'model.100': 'neck.bottom_up_layers.0.blocks.2.1',
'model.102': 'neck.bottom_up_layers.0.final_conv',
# neck MaxPoolBlock no_change_channel
'model.104': 'neck.downsample_layers.1.maxpool_branches.1',
'model.105': 'neck.downsample_layers.1.stride_conv_branches.0',
'model.106': 'neck.downsample_layers.1.stride_conv_branches.1',
# neck ELANBlock reduce_channel_2x
'model.108': 'neck.bottom_up_layers.1.short_conv',
'model.109': 'neck.bottom_up_layers.1.main_conv',
'model.110': 'neck.bottom_up_layers.1.blocks.0.0',
'model.111': 'neck.bottom_up_layers.1.blocks.0.1',
'model.112': 'neck.bottom_up_layers.1.blocks.1.0',
'model.113': 'neck.bottom_up_layers.1.blocks.1.1',
'model.114': 'neck.bottom_up_layers.1.blocks.2.0',
'model.115': 'neck.bottom_up_layers.1.blocks.2.1',
'model.117': 'neck.bottom_up_layers.1.final_conv',
# Conv
'model.118': 'neck.out_layers.0',
'model.119': 'neck.out_layers.1',
'model.120': 'neck.out_layers.2',
# head
'model.121.m.0': 'bbox_head.head_module.convs_pred.0.1',
'model.121.m.1': 'bbox_head.head_module.convs_pred.1.1',
'model.121.m.2': 'bbox_head.head_module.convs_pred.2.1'
}
convert_dict_w = {
# stem
'model.1': 'backbone.stem.conv',
# stage1
# ConvModule
'model.2': 'backbone.stage1.0',
# ELANBlock
'model.3': 'backbone.stage1.1.short_conv',
'model.4': 'backbone.stage1.1.main_conv',
'model.5': 'backbone.stage1.1.blocks.0.0',
'model.6': 'backbone.stage1.1.blocks.0.1',
'model.7': 'backbone.stage1.1.blocks.1.0',
'model.8': 'backbone.stage1.1.blocks.1.1',
'model.10': 'backbone.stage1.1.final_conv',
# stage2
'model.11': 'backbone.stage2.0',
# ELANBlock
'model.12': 'backbone.stage2.1.short_conv',
'model.13': 'backbone.stage2.1.main_conv',
'model.14': 'backbone.stage2.1.blocks.0.0',
'model.15': 'backbone.stage2.1.blocks.0.1',
'model.16': 'backbone.stage2.1.blocks.1.0',
'model.17': 'backbone.stage2.1.blocks.1.1',
'model.19': 'backbone.stage2.1.final_conv',
# stage3
'model.20': 'backbone.stage3.0',
# ELANBlock
'model.21': 'backbone.stage3.1.short_conv',
'model.22': 'backbone.stage3.1.main_conv',
'model.23': 'backbone.stage3.1.blocks.0.0',
'model.24': 'backbone.stage3.1.blocks.0.1',
'model.25': 'backbone.stage3.1.blocks.1.0',
'model.26': 'backbone.stage3.1.blocks.1.1',
'model.28': 'backbone.stage3.1.final_conv',
# stage4
'model.29': 'backbone.stage4.0',
# ELANBlock
'model.30': 'backbone.stage4.1.short_conv',
'model.31': 'backbone.stage4.1.main_conv',
'model.32': 'backbone.stage4.1.blocks.0.0',
'model.33': 'backbone.stage4.1.blocks.0.1',
'model.34': 'backbone.stage4.1.blocks.1.0',
'model.35': 'backbone.stage4.1.blocks.1.1',
'model.37': 'backbone.stage4.1.final_conv',
# stage5
'model.38': 'backbone.stage5.0',
# ELANBlock
'model.39': 'backbone.stage5.1.short_conv',
'model.40': 'backbone.stage5.1.main_conv',
'model.41': 'backbone.stage5.1.blocks.0.0',
'model.42': 'backbone.stage5.1.blocks.0.1',
'model.43': 'backbone.stage5.1.blocks.1.0',
'model.44': 'backbone.stage5.1.blocks.1.1',
'model.46': 'backbone.stage5.1.final_conv',
# neck SPPCSPBlock
'model.47.cv1': 'neck.reduce_layers.3.main_layers.0',
'model.47.cv3': 'neck.reduce_layers.3.main_layers.1',
'model.47.cv4': 'neck.reduce_layers.3.main_layers.2',
'model.47.cv5': 'neck.reduce_layers.3.fuse_layers.0',
'model.47.cv6': 'neck.reduce_layers.3.fuse_layers.1',
'model.47.cv2': 'neck.reduce_layers.3.short_layer',
'model.47.cv7': 'neck.reduce_layers.3.final_conv',
# neck
'model.48': 'neck.upsample_layers.0.0',
'model.50': 'neck.reduce_layers.2',
# neck ELANBlock
'model.52': 'neck.top_down_layers.0.short_conv',
'model.53': 'neck.top_down_layers.0.main_conv',
'model.54': 'neck.top_down_layers.0.blocks.0',
'model.55': 'neck.top_down_layers.0.blocks.1',
'model.56': 'neck.top_down_layers.0.blocks.2',
'model.57': 'neck.top_down_layers.0.blocks.3',
'model.59': 'neck.top_down_layers.0.final_conv',
'model.60': 'neck.upsample_layers.1.0',
'model.62': 'neck.reduce_layers.1',
# neck ELANBlock reduce_channel_2x
'model.64': 'neck.top_down_layers.1.short_conv',
'model.65': 'neck.top_down_layers.1.main_conv',
'model.66': 'neck.top_down_layers.1.blocks.0',
'model.67': 'neck.top_down_layers.1.blocks.1',
'model.68': 'neck.top_down_layers.1.blocks.2',
'model.69': 'neck.top_down_layers.1.blocks.3',
'model.71': 'neck.top_down_layers.1.final_conv',
'model.72': 'neck.upsample_layers.2.0',
'model.74': 'neck.reduce_layers.0',
'model.76': 'neck.top_down_layers.2.short_conv',
'model.77': 'neck.top_down_layers.2.main_conv',
'model.78': 'neck.top_down_layers.2.blocks.0',
'model.79': 'neck.top_down_layers.2.blocks.1',
'model.80': 'neck.top_down_layers.2.blocks.2',
'model.81': 'neck.top_down_layers.2.blocks.3',
'model.83': 'neck.top_down_layers.2.final_conv',
'model.84': 'neck.downsample_layers.0',
# neck ELANBlock
'model.86': 'neck.bottom_up_layers.0.short_conv',
'model.87': 'neck.bottom_up_layers.0.main_conv',
'model.88': 'neck.bottom_up_layers.0.blocks.0',
'model.89': 'neck.bottom_up_layers.0.blocks.1',
'model.90': 'neck.bottom_up_layers.0.blocks.2',
'model.91': 'neck.bottom_up_layers.0.blocks.3',
'model.93': 'neck.bottom_up_layers.0.final_conv',
'model.94': 'neck.downsample_layers.1',
# neck ELANBlock reduce_channel_2x
'model.96': 'neck.bottom_up_layers.1.short_conv',
'model.97': 'neck.bottom_up_layers.1.main_conv',
'model.98': 'neck.bottom_up_layers.1.blocks.0',
'model.99': 'neck.bottom_up_layers.1.blocks.1',
'model.100': 'neck.bottom_up_layers.1.blocks.2',
'model.101': 'neck.bottom_up_layers.1.blocks.3',
'model.103': 'neck.bottom_up_layers.1.final_conv',
'model.104': 'neck.downsample_layers.2',
# neck ELANBlock reduce_channel_2x
'model.106': 'neck.bottom_up_layers.2.short_conv',
'model.107': 'neck.bottom_up_layers.2.main_conv',
'model.108': 'neck.bottom_up_layers.2.blocks.0',
'model.109': 'neck.bottom_up_layers.2.blocks.1',
'model.110': 'neck.bottom_up_layers.2.blocks.2',
'model.111': 'neck.bottom_up_layers.2.blocks.3',
'model.113': 'neck.bottom_up_layers.2.final_conv',
'model.114': 'bbox_head.head_module.main_convs_pred.0.0',
'model.115': 'bbox_head.head_module.main_convs_pred.1.0',
'model.116': 'bbox_head.head_module.main_convs_pred.2.0',
'model.117': 'bbox_head.head_module.main_convs_pred.3.0',
# head
'model.118.m.0': 'bbox_head.head_module.main_convs_pred.0.2',
'model.118.m.1': 'bbox_head.head_module.main_convs_pred.1.2',
'model.118.m.2': 'bbox_head.head_module.main_convs_pred.2.2',
'model.118.m.3': 'bbox_head.head_module.main_convs_pred.3.2'
}
convert_dict_e = {
# stem
'model.1': 'backbone.stem.conv',
# stage1
'model.2.cv1': 'backbone.stage1.0.stride_conv_branches.0',
'model.2.cv2': 'backbone.stage1.0.stride_conv_branches.1',
'model.2.cv3': 'backbone.stage1.0.maxpool_branches.1',
# ELANBlock
'model.3': 'backbone.stage1.1.short_conv',
'model.4': 'backbone.stage1.1.main_conv',
'model.5': 'backbone.stage1.1.blocks.0.0',
'model.6': 'backbone.stage1.1.blocks.0.1',
'model.7': 'backbone.stage1.1.blocks.1.0',
'model.8': 'backbone.stage1.1.blocks.1.1',
'model.9': 'backbone.stage1.1.blocks.2.0',
'model.10': 'backbone.stage1.1.blocks.2.1',
'model.12': 'backbone.stage1.1.final_conv',
# stage2
'model.13.cv1': 'backbone.stage2.0.stride_conv_branches.0',
'model.13.cv2': 'backbone.stage2.0.stride_conv_branches.1',
'model.13.cv3': 'backbone.stage2.0.maxpool_branches.1',
# ELANBlock
'model.14': 'backbone.stage2.1.short_conv',
'model.15': 'backbone.stage2.1.main_conv',
'model.16': 'backbone.stage2.1.blocks.0.0',
'model.17': 'backbone.stage2.1.blocks.0.1',
'model.18': 'backbone.stage2.1.blocks.1.0',
'model.19': 'backbone.stage2.1.blocks.1.1',
'model.20': 'backbone.stage2.1.blocks.2.0',
'model.21': 'backbone.stage2.1.blocks.2.1',
'model.23': 'backbone.stage2.1.final_conv',
# stage3
'model.24.cv1': 'backbone.stage3.0.stride_conv_branches.0',
'model.24.cv2': 'backbone.stage3.0.stride_conv_branches.1',
'model.24.cv3': 'backbone.stage3.0.maxpool_branches.1',
# ELANBlock
'model.25': 'backbone.stage3.1.short_conv',
'model.26': 'backbone.stage3.1.main_conv',
'model.27': 'backbone.stage3.1.blocks.0.0',
'model.28': 'backbone.stage3.1.blocks.0.1',
'model.29': 'backbone.stage3.1.blocks.1.0',
'model.30': 'backbone.stage3.1.blocks.1.1',
'model.31': 'backbone.stage3.1.blocks.2.0',
'model.32': 'backbone.stage3.1.blocks.2.1',
'model.34': 'backbone.stage3.1.final_conv',
# stage4
'model.35.cv1': 'backbone.stage4.0.stride_conv_branches.0',
'model.35.cv2': 'backbone.stage4.0.stride_conv_branches.1',
'model.35.cv3': 'backbone.stage4.0.maxpool_branches.1',
# ELANBlock
'model.36': 'backbone.stage4.1.short_conv',
'model.37': 'backbone.stage4.1.main_conv',
'model.38': 'backbone.stage4.1.blocks.0.0',
'model.39': 'backbone.stage4.1.blocks.0.1',
'model.40': 'backbone.stage4.1.blocks.1.0',
'model.41': 'backbone.stage4.1.blocks.1.1',
'model.42': 'backbone.stage4.1.blocks.2.0',
'model.43': 'backbone.stage4.1.blocks.2.1',
'model.45': 'backbone.stage4.1.final_conv',
# stage5
'model.46.cv1': 'backbone.stage5.0.stride_conv_branches.0',
'model.46.cv2': 'backbone.stage5.0.stride_conv_branches.1',
'model.46.cv3': 'backbone.stage5.0.maxpool_branches.1',
# ELANBlock
'model.47': 'backbone.stage5.1.short_conv',
'model.48': 'backbone.stage5.1.main_conv',
'model.49': 'backbone.stage5.1.blocks.0.0',
'model.50': 'backbone.stage5.1.blocks.0.1',
'model.51': 'backbone.stage5.1.blocks.1.0',
'model.52': 'backbone.stage5.1.blocks.1.1',
'model.53': 'backbone.stage5.1.blocks.2.0',
'model.54': 'backbone.stage5.1.blocks.2.1',
'model.56': 'backbone.stage5.1.final_conv',
# neck SPPCSPBlock
'model.57.cv1': 'neck.reduce_layers.3.main_layers.0',
'model.57.cv3': 'neck.reduce_layers.3.main_layers.1',
'model.57.cv4': 'neck.reduce_layers.3.main_layers.2',
'model.57.cv5': 'neck.reduce_layers.3.fuse_layers.0',
'model.57.cv6': 'neck.reduce_layers.3.fuse_layers.1',
'model.57.cv2': 'neck.reduce_layers.3.short_layer',
'model.57.cv7': 'neck.reduce_layers.3.final_conv',
# neck
'model.58': 'neck.upsample_layers.0.0',
'model.60': 'neck.reduce_layers.2',
# neck ELANBlock
'model.62': 'neck.top_down_layers.0.short_conv',
'model.63': 'neck.top_down_layers.0.main_conv',
'model.64': 'neck.top_down_layers.0.blocks.0',
'model.65': 'neck.top_down_layers.0.blocks.1',
'model.66': 'neck.top_down_layers.0.blocks.2',
'model.67': 'neck.top_down_layers.0.blocks.3',
'model.68': 'neck.top_down_layers.0.blocks.4',
'model.69': 'neck.top_down_layers.0.blocks.5',
'model.71': 'neck.top_down_layers.0.final_conv',
'model.72': 'neck.upsample_layers.1.0',
'model.74': 'neck.reduce_layers.1',
# neck ELANBlock
'model.76': 'neck.top_down_layers.1.short_conv',
'model.77': 'neck.top_down_layers.1.main_conv',
'model.78': 'neck.top_down_layers.1.blocks.0',
'model.79': 'neck.top_down_layers.1.blocks.1',
'model.80': 'neck.top_down_layers.1.blocks.2',
'model.81': 'neck.top_down_layers.1.blocks.3',
'model.82': 'neck.top_down_layers.1.blocks.4',
'model.83': 'neck.top_down_layers.1.blocks.5',
'model.85': 'neck.top_down_layers.1.final_conv',
'model.86': 'neck.upsample_layers.2.0',
'model.88': 'neck.reduce_layers.0',
'model.90': 'neck.top_down_layers.2.short_conv',
'model.91': 'neck.top_down_layers.2.main_conv',
'model.92': 'neck.top_down_layers.2.blocks.0',
'model.93': 'neck.top_down_layers.2.blocks.1',
'model.94': 'neck.top_down_layers.2.blocks.2',
'model.95': 'neck.top_down_layers.2.blocks.3',
'model.96': 'neck.top_down_layers.2.blocks.4',
'model.97': 'neck.top_down_layers.2.blocks.5',
'model.99': 'neck.top_down_layers.2.final_conv',
'model.100.cv1': 'neck.downsample_layers.0.stride_conv_branches.0',
'model.100.cv2': 'neck.downsample_layers.0.stride_conv_branches.1',
'model.100.cv3': 'neck.downsample_layers.0.maxpool_branches.1',
# neck ELANBlock
'model.102': 'neck.bottom_up_layers.0.short_conv',
'model.103': 'neck.bottom_up_layers.0.main_conv',
'model.104': 'neck.bottom_up_layers.0.blocks.0',
'model.105': 'neck.bottom_up_layers.0.blocks.1',
'model.106': 'neck.bottom_up_layers.0.blocks.2',
'model.107': 'neck.bottom_up_layers.0.blocks.3',
'model.108': 'neck.bottom_up_layers.0.blocks.4',
'model.109': 'neck.bottom_up_layers.0.blocks.5',
'model.111': 'neck.bottom_up_layers.0.final_conv',
'model.112.cv1': 'neck.downsample_layers.1.stride_conv_branches.0',
'model.112.cv2': 'neck.downsample_layers.1.stride_conv_branches.1',
'model.112.cv3': 'neck.downsample_layers.1.maxpool_branches.1',
# neck ELANBlock
'model.114': 'neck.bottom_up_layers.1.short_conv',
'model.115': 'neck.bottom_up_layers.1.main_conv',
'model.116': 'neck.bottom_up_layers.1.blocks.0',
'model.117': 'neck.bottom_up_layers.1.blocks.1',
'model.118': 'neck.bottom_up_layers.1.blocks.2',
'model.119': 'neck.bottom_up_layers.1.blocks.3',
'model.120': 'neck.bottom_up_layers.1.blocks.4',
'model.121': 'neck.bottom_up_layers.1.blocks.5',
'model.123': 'neck.bottom_up_layers.1.final_conv',
'model.124.cv1': 'neck.downsample_layers.2.stride_conv_branches.0',
'model.124.cv2': 'neck.downsample_layers.2.stride_conv_branches.1',
'model.124.cv3': 'neck.downsample_layers.2.maxpool_branches.1',
# neck ELANBlock
'model.126': 'neck.bottom_up_layers.2.short_conv',
'model.127': 'neck.bottom_up_layers.2.main_conv',
'model.128': 'neck.bottom_up_layers.2.blocks.0',
'model.129': 'neck.bottom_up_layers.2.blocks.1',
'model.130': 'neck.bottom_up_layers.2.blocks.2',
'model.131': 'neck.bottom_up_layers.2.blocks.3',
'model.132': 'neck.bottom_up_layers.2.blocks.4',
'model.133': 'neck.bottom_up_layers.2.blocks.5',
'model.135': 'neck.bottom_up_layers.2.final_conv',
'model.136': 'bbox_head.head_module.main_convs_pred.0.0',
'model.137': 'bbox_head.head_module.main_convs_pred.1.0',
'model.138': 'bbox_head.head_module.main_convs_pred.2.0',
'model.139': 'bbox_head.head_module.main_convs_pred.3.0',
# head
'model.140.m.0': 'bbox_head.head_module.main_convs_pred.0.2',
'model.140.m.1': 'bbox_head.head_module.main_convs_pred.1.2',
'model.140.m.2': 'bbox_head.head_module.main_convs_pred.2.2',
'model.140.m.3': 'bbox_head.head_module.main_convs_pred.3.2'
}
convert_dict_e2e = {
# stem
'model.1': 'backbone.stem.conv',
# stage1
'model.2.cv1': 'backbone.stage1.0.stride_conv_branches.0',
'model.2.cv2': 'backbone.stage1.0.stride_conv_branches.1',
'model.2.cv3': 'backbone.stage1.0.maxpool_branches.1',
# E-ELANBlock
'model.3': 'backbone.stage1.1.e_elan_blocks.0.short_conv',
'model.4': 'backbone.stage1.1.e_elan_blocks.0.main_conv',
'model.5': 'backbone.stage1.1.e_elan_blocks.0.blocks.0.0',
'model.6': 'backbone.stage1.1.e_elan_blocks.0.blocks.0.1',
'model.7': 'backbone.stage1.1.e_elan_blocks.0.blocks.1.0',
'model.8': 'backbone.stage1.1.e_elan_blocks.0.blocks.1.1',
'model.9': 'backbone.stage1.1.e_elan_blocks.0.blocks.2.0',
'model.10': 'backbone.stage1.1.e_elan_blocks.0.blocks.2.1',
'model.12': 'backbone.stage1.1.e_elan_blocks.0.final_conv',
'model.13': 'backbone.stage1.1.e_elan_blocks.1.short_conv',
'model.14': 'backbone.stage1.1.e_elan_blocks.1.main_conv',
'model.15': 'backbone.stage1.1.e_elan_blocks.1.blocks.0.0',
'model.16': 'backbone.stage1.1.e_elan_blocks.1.blocks.0.1',
'model.17': 'backbone.stage1.1.e_elan_blocks.1.blocks.1.0',
'model.18': 'backbone.stage1.1.e_elan_blocks.1.blocks.1.1',
'model.19': 'backbone.stage1.1.e_elan_blocks.1.blocks.2.0',
'model.20': 'backbone.stage1.1.e_elan_blocks.1.blocks.2.1',
'model.22': 'backbone.stage1.1.e_elan_blocks.1.final_conv',
# stage2
'model.24.cv1': 'backbone.stage2.0.stride_conv_branches.0',
'model.24.cv2': 'backbone.stage2.0.stride_conv_branches.1',
'model.24.cv3': 'backbone.stage2.0.maxpool_branches.1',
# E-ELANBlock
'model.25': 'backbone.stage2.1.e_elan_blocks.0.short_conv',
'model.26': 'backbone.stage2.1.e_elan_blocks.0.main_conv',
'model.27': 'backbone.stage2.1.e_elan_blocks.0.blocks.0.0',
'model.28': 'backbone.stage2.1.e_elan_blocks.0.blocks.0.1',
'model.29': 'backbone.stage2.1.e_elan_blocks.0.blocks.1.0',
'model.30': 'backbone.stage2.1.e_elan_blocks.0.blocks.1.1',
'model.31': 'backbone.stage2.1.e_elan_blocks.0.blocks.2.0',
'model.32': 'backbone.stage2.1.e_elan_blocks.0.blocks.2.1',
'model.34': 'backbone.stage2.1.e_elan_blocks.0.final_conv',
'model.35': 'backbone.stage2.1.e_elan_blocks.1.short_conv',
'model.36': 'backbone.stage2.1.e_elan_blocks.1.main_conv',
'model.37': 'backbone.stage2.1.e_elan_blocks.1.blocks.0.0',
'model.38': 'backbone.stage2.1.e_elan_blocks.1.blocks.0.1',
'model.39': 'backbone.stage2.1.e_elan_blocks.1.blocks.1.0',
'model.40': 'backbone.stage2.1.e_elan_blocks.1.blocks.1.1',
'model.41': 'backbone.stage2.1.e_elan_blocks.1.blocks.2.0',
'model.42': 'backbone.stage2.1.e_elan_blocks.1.blocks.2.1',
'model.44': 'backbone.stage2.1.e_elan_blocks.1.final_conv',
# stage3
'model.46.cv1': 'backbone.stage3.0.stride_conv_branches.0',
'model.46.cv2': 'backbone.stage3.0.stride_conv_branches.1',
'model.46.cv3': 'backbone.stage3.0.maxpool_branches.1',
# E-ELANBlock
'model.47': 'backbone.stage3.1.e_elan_blocks.0.short_conv',
'model.48': 'backbone.stage3.1.e_elan_blocks.0.main_conv',
'model.49': 'backbone.stage3.1.e_elan_blocks.0.blocks.0.0',
'model.50': 'backbone.stage3.1.e_elan_blocks.0.blocks.0.1',
'model.51': 'backbone.stage3.1.e_elan_blocks.0.blocks.1.0',
'model.52': 'backbone.stage3.1.e_elan_blocks.0.blocks.1.1',
'model.53': 'backbone.stage3.1.e_elan_blocks.0.blocks.2.0',
'model.54': 'backbone.stage3.1.e_elan_blocks.0.blocks.2.1',
'model.56': 'backbone.stage3.1.e_elan_blocks.0.final_conv',
'model.57': 'backbone.stage3.1.e_elan_blocks.1.short_conv',
'model.58': 'backbone.stage3.1.e_elan_blocks.1.main_conv',
'model.59': 'backbone.stage3.1.e_elan_blocks.1.blocks.0.0',
'model.60': 'backbone.stage3.1.e_elan_blocks.1.blocks.0.1',
'model.61': 'backbone.stage3.1.e_elan_blocks.1.blocks.1.0',
'model.62': 'backbone.stage3.1.e_elan_blocks.1.blocks.1.1',
'model.63': 'backbone.stage3.1.e_elan_blocks.1.blocks.2.0',
'model.64': 'backbone.stage3.1.e_elan_blocks.1.blocks.2.1',
'model.66': 'backbone.stage3.1.e_elan_blocks.1.final_conv',
# stage4
'model.68.cv1': 'backbone.stage4.0.stride_conv_branches.0',
'model.68.cv2': 'backbone.stage4.0.stride_conv_branches.1',
'model.68.cv3': 'backbone.stage4.0.maxpool_branches.1',
# E-ELANBlock
'model.69': 'backbone.stage4.1.e_elan_blocks.0.short_conv',
'model.70': 'backbone.stage4.1.e_elan_blocks.0.main_conv',
'model.71': 'backbone.stage4.1.e_elan_blocks.0.blocks.0.0',
'model.72': 'backbone.stage4.1.e_elan_blocks.0.blocks.0.1',
'model.73': 'backbone.stage4.1.e_elan_blocks.0.blocks.1.0',
'model.74': 'backbone.stage4.1.e_elan_blocks.0.blocks.1.1',
'model.75': 'backbone.stage4.1.e_elan_blocks.0.blocks.2.0',
'model.76': 'backbone.stage4.1.e_elan_blocks.0.blocks.2.1',
'model.78': 'backbone.stage4.1.e_elan_blocks.0.final_conv',
'model.79': 'backbone.stage4.1.e_elan_blocks.1.short_conv',
'model.80': 'backbone.stage4.1.e_elan_blocks.1.main_conv',
'model.81': 'backbone.stage4.1.e_elan_blocks.1.blocks.0.0',
'model.82': 'backbone.stage4.1.e_elan_blocks.1.blocks.0.1',
'model.83': 'backbone.stage4.1.e_elan_blocks.1.blocks.1.0',
'model.84': 'backbone.stage4.1.e_elan_blocks.1.blocks.1.1',
'model.85': 'backbone.stage4.1.e_elan_blocks.1.blocks.2.0',
'model.86': 'backbone.stage4.1.e_elan_blocks.1.blocks.2.1',
'model.88': 'backbone.stage4.1.e_elan_blocks.1.final_conv',
# stage5
'model.90.cv1': 'backbone.stage5.0.stride_conv_branches.0',
'model.90.cv2': 'backbone.stage5.0.stride_conv_branches.1',
'model.90.cv3': 'backbone.stage5.0.maxpool_branches.1',
# E-ELANBlock
'model.91': 'backbone.stage5.1.e_elan_blocks.0.short_conv',
'model.92': 'backbone.stage5.1.e_elan_blocks.0.main_conv',
'model.93': 'backbone.stage5.1.e_elan_blocks.0.blocks.0.0',
'model.94': 'backbone.stage5.1.e_elan_blocks.0.blocks.0.1',
'model.95': 'backbone.stage5.1.e_elan_blocks.0.blocks.1.0',
'model.96': 'backbone.stage5.1.e_elan_blocks.0.blocks.1.1',
'model.97': 'backbone.stage5.1.e_elan_blocks.0.blocks.2.0',
'model.98': 'backbone.stage5.1.e_elan_blocks.0.blocks.2.1',
'model.100': 'backbone.stage5.1.e_elan_blocks.0.final_conv',
'model.101': 'backbone.stage5.1.e_elan_blocks.1.short_conv',
'model.102': 'backbone.stage5.1.e_elan_blocks.1.main_conv',
'model.103': 'backbone.stage5.1.e_elan_blocks.1.blocks.0.0',
'model.104': 'backbone.stage5.1.e_elan_blocks.1.blocks.0.1',
'model.105': 'backbone.stage5.1.e_elan_blocks.1.blocks.1.0',
'model.106': 'backbone.stage5.1.e_elan_blocks.1.blocks.1.1',
'model.107': 'backbone.stage5.1.e_elan_blocks.1.blocks.2.0',
'model.108': 'backbone.stage5.1.e_elan_blocks.1.blocks.2.1',
'model.110': 'backbone.stage5.1.e_elan_blocks.1.final_conv',
# neck SPPCSPBlock
'model.112.cv1': 'neck.reduce_layers.3.main_layers.0',
'model.112.cv3': 'neck.reduce_layers.3.main_layers.1',
'model.112.cv4': 'neck.reduce_layers.3.main_layers.2',
'model.112.cv5': 'neck.reduce_layers.3.fuse_layers.0',
'model.112.cv6': 'neck.reduce_layers.3.fuse_layers.1',
'model.112.cv2': 'neck.reduce_layers.3.short_layer',
'model.112.cv7': 'neck.reduce_layers.3.final_conv',
# neck
'model.113': 'neck.upsample_layers.0.0',
'model.115': 'neck.reduce_layers.2',
# neck E-ELANBlock
'model.117': 'neck.top_down_layers.0.e_elan_blocks.0.short_conv',
'model.118': 'neck.top_down_layers.0.e_elan_blocks.0.main_conv',
'model.119': 'neck.top_down_layers.0.e_elan_blocks.0.blocks.0',
'model.120': 'neck.top_down_layers.0.e_elan_blocks.0.blocks.1',
'model.121': 'neck.top_down_layers.0.e_elan_blocks.0.blocks.2',
'model.122': 'neck.top_down_layers.0.e_elan_blocks.0.blocks.3',
'model.123': 'neck.top_down_layers.0.e_elan_blocks.0.blocks.4',
'model.124': 'neck.top_down_layers.0.e_elan_blocks.0.blocks.5',
'model.126': 'neck.top_down_layers.0.e_elan_blocks.0.final_conv',
'model.127': 'neck.top_down_layers.0.e_elan_blocks.1.short_conv',
'model.128': 'neck.top_down_layers.0.e_elan_blocks.1.main_conv',
'model.129': 'neck.top_down_layers.0.e_elan_blocks.1.blocks.0',
'model.130': 'neck.top_down_layers.0.e_elan_blocks.1.blocks.1',
'model.131': 'neck.top_down_layers.0.e_elan_blocks.1.blocks.2',
'model.132': 'neck.top_down_layers.0.e_elan_blocks.1.blocks.3',
'model.133': 'neck.top_down_layers.0.e_elan_blocks.1.blocks.4',
'model.134': 'neck.top_down_layers.0.e_elan_blocks.1.blocks.5',
'model.136': 'neck.top_down_layers.0.e_elan_blocks.1.final_conv',
'model.138': 'neck.upsample_layers.1.0',
'model.140': 'neck.reduce_layers.1',
# neck E-ELANBlock
'model.142': 'neck.top_down_layers.1.e_elan_blocks.0.short_conv',
'model.143': 'neck.top_down_layers.1.e_elan_blocks.0.main_conv',
'model.144': 'neck.top_down_layers.1.e_elan_blocks.0.blocks.0',
'model.145': 'neck.top_down_layers.1.e_elan_blocks.0.blocks.1',
'model.146': 'neck.top_down_layers.1.e_elan_blocks.0.blocks.2',
'model.147': 'neck.top_down_layers.1.e_elan_blocks.0.blocks.3',
'model.148': 'neck.top_down_layers.1.e_elan_blocks.0.blocks.4',
'model.149': 'neck.top_down_layers.1.e_elan_blocks.0.blocks.5',
'model.151': 'neck.top_down_layers.1.e_elan_blocks.0.final_conv',
'model.152': 'neck.top_down_layers.1.e_elan_blocks.1.short_conv',
'model.153': 'neck.top_down_layers.1.e_elan_blocks.1.main_conv',
'model.154': 'neck.top_down_layers.1.e_elan_blocks.1.blocks.0',
'model.155': 'neck.top_down_layers.1.e_elan_blocks.1.blocks.1',
'model.156': 'neck.top_down_layers.1.e_elan_blocks.1.blocks.2',
'model.157': 'neck.top_down_layers.1.e_elan_blocks.1.blocks.3',
'model.158': 'neck.top_down_layers.1.e_elan_blocks.1.blocks.4',
'model.159': 'neck.top_down_layers.1.e_elan_blocks.1.blocks.5',
'model.161': 'neck.top_down_layers.1.e_elan_blocks.1.final_conv',
'model.163': 'neck.upsample_layers.2.0',
'model.165': 'neck.reduce_layers.0',
'model.167': 'neck.top_down_layers.2.e_elan_blocks.0.short_conv',
'model.168': 'neck.top_down_layers.2.e_elan_blocks.0.main_conv',
'model.169': 'neck.top_down_layers.2.e_elan_blocks.0.blocks.0',
'model.170': 'neck.top_down_layers.2.e_elan_blocks.0.blocks.1',
'model.171': 'neck.top_down_layers.2.e_elan_blocks.0.blocks.2',
'model.172': 'neck.top_down_layers.2.e_elan_blocks.0.blocks.3',
'model.173': 'neck.top_down_layers.2.e_elan_blocks.0.blocks.4',
'model.174': 'neck.top_down_layers.2.e_elan_blocks.0.blocks.5',
'model.176': 'neck.top_down_layers.2.e_elan_blocks.0.final_conv',
'model.177': 'neck.top_down_layers.2.e_elan_blocks.1.short_conv',
'model.178': 'neck.top_down_layers.2.e_elan_blocks.1.main_conv',
'model.179': 'neck.top_down_layers.2.e_elan_blocks.1.blocks.0',
'model.180': 'neck.top_down_layers.2.e_elan_blocks.1.blocks.1',
'model.181': 'neck.top_down_layers.2.e_elan_blocks.1.blocks.2',
'model.182': 'neck.top_down_layers.2.e_elan_blocks.1.blocks.3',
'model.183': 'neck.top_down_layers.2.e_elan_blocks.1.blocks.4',
'model.184': 'neck.top_down_layers.2.e_elan_blocks.1.blocks.5',
'model.186': 'neck.top_down_layers.2.e_elan_blocks.1.final_conv',
'model.188.cv1': 'neck.downsample_layers.0.stride_conv_branches.0',
'model.188.cv2': 'neck.downsample_layers.0.stride_conv_branches.1',
'model.188.cv3': 'neck.downsample_layers.0.maxpool_branches.1',
# neck E-ELANBlock
'model.190': 'neck.bottom_up_layers.0.e_elan_blocks.0.short_conv',
'model.191': 'neck.bottom_up_layers.0.e_elan_blocks.0.main_conv',
'model.192': 'neck.bottom_up_layers.0.e_elan_blocks.0.blocks.0',
'model.193': 'neck.bottom_up_layers.0.e_elan_blocks.0.blocks.1',
'model.194': 'neck.bottom_up_layers.0.e_elan_blocks.0.blocks.2',
'model.195': 'neck.bottom_up_layers.0.e_elan_blocks.0.blocks.3',
'model.196': 'neck.bottom_up_layers.0.e_elan_blocks.0.blocks.4',
'model.197': 'neck.bottom_up_layers.0.e_elan_blocks.0.blocks.5',
'model.199': 'neck.bottom_up_layers.0.e_elan_blocks.0.final_conv',
'model.200': 'neck.bottom_up_layers.0.e_elan_blocks.1.short_conv',
'model.201': 'neck.bottom_up_layers.0.e_elan_blocks.1.main_conv',
'model.202': 'neck.bottom_up_layers.0.e_elan_blocks.1.blocks.0',
'model.203': 'neck.bottom_up_layers.0.e_elan_blocks.1.blocks.1',
'model.204': 'neck.bottom_up_layers.0.e_elan_blocks.1.blocks.2',
'model.205': 'neck.bottom_up_layers.0.e_elan_blocks.1.blocks.3',
'model.206': 'neck.bottom_up_layers.0.e_elan_blocks.1.blocks.4',
'model.207': 'neck.bottom_up_layers.0.e_elan_blocks.1.blocks.5',
'model.209': 'neck.bottom_up_layers.0.e_elan_blocks.1.final_conv',
'model.211.cv1': 'neck.downsample_layers.1.stride_conv_branches.0',
'model.211.cv2': 'neck.downsample_layers.1.stride_conv_branches.1',
'model.211.cv3': 'neck.downsample_layers.1.maxpool_branches.1',
'model.213': 'neck.bottom_up_layers.1.e_elan_blocks.0.short_conv',
'model.214': 'neck.bottom_up_layers.1.e_elan_blocks.0.main_conv',
'model.215': 'neck.bottom_up_layers.1.e_elan_blocks.0.blocks.0',
'model.216': 'neck.bottom_up_layers.1.e_elan_blocks.0.blocks.1',
'model.217': 'neck.bottom_up_layers.1.e_elan_blocks.0.blocks.2',
'model.218': 'neck.bottom_up_layers.1.e_elan_blocks.0.blocks.3',
'model.219': 'neck.bottom_up_layers.1.e_elan_blocks.0.blocks.4',
'model.220': 'neck.bottom_up_layers.1.e_elan_blocks.0.blocks.5',
'model.222': 'neck.bottom_up_layers.1.e_elan_blocks.0.final_conv',
'model.223': 'neck.bottom_up_layers.1.e_elan_blocks.1.short_conv',
'model.224': 'neck.bottom_up_layers.1.e_elan_blocks.1.main_conv',
'model.225': 'neck.bottom_up_layers.1.e_elan_blocks.1.blocks.0',
'model.226': 'neck.bottom_up_layers.1.e_elan_blocks.1.blocks.1',
'model.227': 'neck.bottom_up_layers.1.e_elan_blocks.1.blocks.2',
'model.228': 'neck.bottom_up_layers.1.e_elan_blocks.1.blocks.3',
'model.229': 'neck.bottom_up_layers.1.e_elan_blocks.1.blocks.4',
'model.230': 'neck.bottom_up_layers.1.e_elan_blocks.1.blocks.5',
'model.232': 'neck.bottom_up_layers.1.e_elan_blocks.1.final_conv',
'model.234.cv1': 'neck.downsample_layers.2.stride_conv_branches.0',
'model.234.cv2': 'neck.downsample_layers.2.stride_conv_branches.1',
'model.234.cv3': 'neck.downsample_layers.2.maxpool_branches.1',
# neck E-ELANBlock
'model.236': 'neck.bottom_up_layers.2.e_elan_blocks.0.short_conv',
'model.237': 'neck.bottom_up_layers.2.e_elan_blocks.0.main_conv',
'model.238': 'neck.bottom_up_layers.2.e_elan_blocks.0.blocks.0',
'model.239': 'neck.bottom_up_layers.2.e_elan_blocks.0.blocks.1',
'model.240': 'neck.bottom_up_layers.2.e_elan_blocks.0.blocks.2',
'model.241': 'neck.bottom_up_layers.2.e_elan_blocks.0.blocks.3',
'model.242': 'neck.bottom_up_layers.2.e_elan_blocks.0.blocks.4',
'model.243': 'neck.bottom_up_layers.2.e_elan_blocks.0.blocks.5',
'model.245': 'neck.bottom_up_layers.2.e_elan_blocks.0.final_conv',
'model.246': 'neck.bottom_up_layers.2.e_elan_blocks.1.short_conv',
'model.247': 'neck.bottom_up_layers.2.e_elan_blocks.1.main_conv',
'model.248': 'neck.bottom_up_layers.2.e_elan_blocks.1.blocks.0',
'model.249': 'neck.bottom_up_layers.2.e_elan_blocks.1.blocks.1',
'model.250': 'neck.bottom_up_layers.2.e_elan_blocks.1.blocks.2',
'model.251': 'neck.bottom_up_layers.2.e_elan_blocks.1.blocks.3',
'model.252': 'neck.bottom_up_layers.2.e_elan_blocks.1.blocks.4',
'model.253': 'neck.bottom_up_layers.2.e_elan_blocks.1.blocks.5',
'model.255': 'neck.bottom_up_layers.2.e_elan_blocks.1.final_conv',
'model.257': 'bbox_head.head_module.main_convs_pred.0.0',
'model.258': 'bbox_head.head_module.main_convs_pred.1.0',
'model.259': 'bbox_head.head_module.main_convs_pred.2.0',
'model.260': 'bbox_head.head_module.main_convs_pred.3.0',
# head
'model.261.m.0': 'bbox_head.head_module.main_convs_pred.0.2',
'model.261.m.1': 'bbox_head.head_module.main_convs_pred.1.2',
'model.261.m.2': 'bbox_head.head_module.main_convs_pred.2.2',
'model.261.m.3': 'bbox_head.head_module.main_convs_pred.3.2'
}
convert_dicts = {
'yolov7-tiny.pt': convert_dict_tiny,
'yolov7-w6.pt': convert_dict_w,
'yolov7-e6.pt': convert_dict_e,
'yolov7-e6e.pt': convert_dict_e2e,
'yolov7.pt': convert_dict_l,
'yolov7x.pt': convert_dict_x
}
def convert(src, dst):
src_key = osp.basename(src)
convert_dict = convert_dicts[osp.basename(src)]
num_levels = 3
if src_key == 'yolov7.pt':
indexes = [102, 51]
in_channels = [256, 512, 1024]
elif src_key == 'yolov7x.pt':
indexes = [121, 59]
in_channels = [320, 640, 1280]
elif src_key == 'yolov7-tiny.pt':
indexes = [77, 1000]
in_channels = [128, 256, 512]
elif src_key == 'yolov7-w6.pt':
indexes = [118, 47]
in_channels = [256, 512, 768, 1024]
num_levels = 4
elif src_key == 'yolov7-e6.pt':
indexes = [140, [2, 13, 24, 35, 46, 57, 100, 112, 124]]
in_channels = 320, 640, 960, 1280
num_levels = 4
elif src_key == 'yolov7-e6e.pt':
indexes = [261, [2, 24, 46, 68, 90, 112, 188, 211, 234]]
in_channels = 320, 640, 960, 1280
num_levels = 4
if isinstance(indexes[1], int):
indexes[1] = [indexes[1]]
"""Convert keys in detectron pretrained YOLOv7 models to mmyolo style."""
try:
yolov7_model = torch.load(src)['model'].float()
blobs = yolov7_model.state_dict()
except ModuleNotFoundError:
raise RuntimeError(
'This script must be placed under the WongKinYiu/yolov7 repo,'
' because loading the official pretrained model need'
' `model.py` to build model.')
state_dict = OrderedDict()
for key, weight in blobs.items():
if key.find('anchors') >= 0 or key.find('anchor_grid') >= 0:
continue
num, module = key.split('.')[1:3]
if int(num) < indexes[0] and int(num) not in indexes[1]:
prefix = f'model.{num}'
new_key = key.replace(prefix, convert_dict[prefix])
state_dict[new_key] = weight
print(f'Convert {key} to {new_key}')
elif int(num) in indexes[1]:
strs_key = key.split('.')[:3]
new_key = key.replace('.'.join(strs_key),
convert_dict['.'.join(strs_key)])
state_dict[new_key] = weight
print(f'Convert {key} to {new_key}')
else:
strs_key = key.split('.')[:4]
new_key = key.replace('.'.join(strs_key),
convert_dict['.'.join(strs_key)])
state_dict[new_key] = weight
print(f'Convert {key} to {new_key}')
# Add ImplicitA and ImplicitM
for i in range(num_levels):
if num_levels == 3:
implicit_a = f'bbox_head.head_module.' \
f'convs_pred.{i}.0.implicit'
state_dict[implicit_a] = torch.zeros((1, in_channels[i], 1, 1))
implicit_m = f'bbox_head.head_module.' \
f'convs_pred.{i}.2.implicit'
state_dict[implicit_m] = torch.ones((1, 3 * 85, 1, 1))
else:
implicit_a = f'bbox_head.head_module.' \
f'main_convs_pred.{i}.1.implicit'
state_dict[implicit_a] = torch.zeros((1, in_channels[i], 1, 1))
implicit_m = f'bbox_head.head_module.' \
f'main_convs_pred.{i}.3.implicit'
state_dict[implicit_m] = torch.ones((1, 3 * 85, 1, 1))
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
# Note: This script must be placed under the yolov7 repo to run.
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument(
'src', default='yolov7.pt', help='src yolov7 model path')
parser.add_argument('dst', default='mm_yolov7l.pt', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
print('If your model weights are from P6 models, such as W6, E6, D6, \
E6E, the auxiliary training module is not required to be loaded, \
so it is normal for the weights of the auxiliary module \
to be missing.')
if __name__ == '__main__':
main()
| 50,022
| 44.724863
| 78
|
py
|
mmyolo
|
mmyolo-main/tools/model_converters/yolov5_to_mmyolo.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
convert_dict_p5 = {
'model.0': 'backbone.stem',
'model.1': 'backbone.stage1.0',
'model.2': 'backbone.stage1.1',
'model.3': 'backbone.stage2.0',
'model.4': 'backbone.stage2.1',
'model.5': 'backbone.stage3.0',
'model.6': 'backbone.stage3.1',
'model.7': 'backbone.stage4.0',
'model.8': 'backbone.stage4.1',
'model.9.cv1': 'backbone.stage4.2.conv1',
'model.9.cv2': 'backbone.stage4.2.conv2',
'model.10': 'neck.reduce_layers.2',
'model.13': 'neck.top_down_layers.0.0',
'model.14': 'neck.top_down_layers.0.1',
'model.17': 'neck.top_down_layers.1',
'model.18': 'neck.downsample_layers.0',
'model.20': 'neck.bottom_up_layers.0',
'model.21': 'neck.downsample_layers.1',
'model.23': 'neck.bottom_up_layers.1',
'model.24.m': 'bbox_head.head_module.convs_pred',
}
convert_dict_p6 = {
'model.0': 'backbone.stem',
'model.1': 'backbone.stage1.0',
'model.2': 'backbone.stage1.1',
'model.3': 'backbone.stage2.0',
'model.4': 'backbone.stage2.1',
'model.5': 'backbone.stage3.0',
'model.6': 'backbone.stage3.1',
'model.7': 'backbone.stage4.0',
'model.8': 'backbone.stage4.1',
'model.9': 'backbone.stage5.0',
'model.10': 'backbone.stage5.1',
'model.11.cv1': 'backbone.stage5.2.conv1',
'model.11.cv2': 'backbone.stage5.2.conv2',
'model.12': 'neck.reduce_layers.3',
'model.15': 'neck.top_down_layers.0.0',
'model.16': 'neck.top_down_layers.0.1',
'model.19': 'neck.top_down_layers.1.0',
'model.20': 'neck.top_down_layers.1.1',
'model.23': 'neck.top_down_layers.2',
'model.24': 'neck.downsample_layers.0',
'model.26': 'neck.bottom_up_layers.0',
'model.27': 'neck.downsample_layers.1',
'model.29': 'neck.bottom_up_layers.1',
'model.30': 'neck.downsample_layers.2',
'model.32': 'neck.bottom_up_layers.2',
'model.33.m': 'bbox_head.head_module.convs_pred',
}
def convert(src, dst):
"""Convert keys in pretrained YOLOv5 models to mmyolo style."""
if src.endswith('6.pt'):
convert_dict = convert_dict_p6
is_p6_model = True
print('Converting P6 model')
else:
convert_dict = convert_dict_p5
is_p6_model = False
print('Converting P5 model')
try:
yolov5_model = torch.load(src)['model']
blobs = yolov5_model.state_dict()
except ModuleNotFoundError:
raise RuntimeError(
'This script must be placed under the ultralytics/yolov5 repo,'
' because loading the official pretrained model need'
' `model.py` to build model.')
state_dict = OrderedDict()
for key, weight in blobs.items():
num, module = key.split('.')[1:3]
if (is_p6_model and
(num == '11' or num == '33')) or (not is_p6_model and
(num == '9' or num == '24')):
if module == 'anchors':
continue
prefix = f'model.{num}.{module}'
else:
prefix = f'model.{num}'
new_key = key.replace(prefix, convert_dict[prefix])
if '.m.' in new_key:
new_key = new_key.replace('.m.', '.blocks.')
new_key = new_key.replace('.cv', '.conv')
else:
new_key = new_key.replace('.cv1', '.main_conv')
new_key = new_key.replace('.cv2', '.short_conv')
new_key = new_key.replace('.cv3', '.final_conv')
state_dict[new_key] = weight
print(f'Convert {key} to {new_key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
# Note: This script must be placed under the yolov5 repo to run.
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument(
'--src', default='yolov5s.pt', help='src yolov5 model path')
parser.add_argument('--dst', default='mmyolov5s.pt', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 4,176
| 32.95935
| 75
|
py
|
mmyolo
|
mmyolo-main/tools/dataset_converters/labelme2coco.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""This script helps to convert labelme-style dataset to the coco format.
Usage:
$ python labelme2coco.py \
--img-dir /path/to/images \
--labels-dir /path/to/labels \
--out /path/to/coco_instances.json \
[--class-id-txt /path/to/class_with_id.txt]
Note:
Labels dir file structure:
.
└── PATH_TO_LABELS
├── image1.json
├── image2.json
└── ...
Images dir file structure:
.
└── PATH_TO_IMAGES
├── image1.jpg
├── image2.png
└── ...
If user set `--class-id-txt` then will use it in `categories` field,
if not set, then will generate auto base on the all labelme label
files to `class_with_id.json`.
class_with_id.txt example, each line is "id class_name":
```text
1 cat
2 dog
3 bicycle
4 motorcycle
```
"""
import argparse
import json
from pathlib import Path
from typing import Optional
import numpy as np
from mmengine import track_iter_progress
from mmyolo.utils.misc import IMG_EXTENSIONS
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--img-dir', type=str, help='Dataset image directory')
parser.add_argument(
'--labels-dir', type=str, help='Dataset labels directory')
parser.add_argument('--out', type=str, help='COCO label json output path')
parser.add_argument(
'--class-id-txt', default=None, type=str, help='All class id txt path')
args = parser.parse_args()
return args
def format_coco_annotations(points: list, image_id: int, annotations_id: int,
category_id: int) -> dict:
"""Gen COCO annotations format label from labelme format label.
Args:
points (list): Coordinates of four vertices of rectangle bbox.
image_id (int): Image id.
annotations_id (int): Annotations id.
category_id (int): Image dir path.
Return:
annotation_info (dict): COCO annotation data.
"""
annotation_info = dict()
annotation_info['iscrowd'] = 0
annotation_info['category_id'] = category_id
annotation_info['id'] = annotations_id
annotation_info['image_id'] = image_id
# bbox is [x1, y1, w, h]
annotation_info['bbox'] = [
points[0][0], points[0][1], points[1][0] - points[0][0],
points[1][1] - points[0][1]
]
annotation_info['area'] = annotation_info['bbox'][2] * annotation_info[
'bbox'][3] # bbox w * h
segmentation_points = np.asarray(points).copy()
segmentation_points[1, :] = np.asarray(points)[2, :]
segmentation_points[2, :] = np.asarray(points)[1, :]
annotation_info['segmentation'] = [list(segmentation_points.flatten())]
return annotation_info
def parse_labelme_to_coco(
image_dir: str,
labels_root: str,
all_classes_id: Optional[dict] = None) -> (dict, dict):
"""Gen COCO json format label from labelme format label.
Args:
image_dir (str): Image dir path.
labels_root (str): Image label root path.
all_classes_id (Optional[dict]): All class with id. Default None.
Return:
coco_json (dict): COCO json data.
category_to_id (dict): category id and name.
COCO json example:
{
"images": [
{
"height": 3000,
"width": 4000,
"id": 1,
"file_name": "IMG_20210627_225110.jpg"
},
...
],
"categories": [
{
"id": 1,
"name": "cat"
},
...
],
"annotations": [
{
"iscrowd": 0,
"category_id": 1,
"id": 1,
"image_id": 1,
"bbox": [
1183.7313232421875,
1230.0509033203125,
1270.9998779296875,
927.0848388671875
],
"area": 1178324.7170306593,
"segmentation": [
[
1183.7313232421875,
1230.0509033203125,
1183.7313232421875,
2157.1357421875,
2454.731201171875,
2157.1357421875,
2454.731201171875,
1230.0509033203125
]
]
},
...
]
}
"""
# init coco json field
coco_json = {'images': [], 'categories': [], 'annotations': []}
image_id = 0
annotations_id = 0
if all_classes_id is None:
category_to_id = dict()
categories_labels = []
else:
category_to_id = all_classes_id
categories_labels = list(all_classes_id.keys())
# add class_ids and class_names to the categories list in coco_json
for class_name, class_id in category_to_id.items():
coco_json['categories'].append({
'id': class_id,
'name': class_name
})
# filter incorrect image file
img_file_list = [
img_file for img_file in Path(image_dir).iterdir()
if img_file.suffix.lower() in IMG_EXTENSIONS
]
for img_file in track_iter_progress(img_file_list):
# get label file according to the image file name
label_path = Path(labels_root).joinpath(
img_file.stem).with_suffix('.json')
if not label_path.exists():
print(f'Can not find label file: {label_path}, skip...')
continue
# load labelme label
with open(label_path, encoding='utf-8') as f:
labelme_data = json.load(f)
image_id = image_id + 1 # coco id begin from 1
# update coco 'images' field
coco_json['images'].append({
'height':
labelme_data['imageHeight'],
'width':
labelme_data['imageWidth'],
'id':
image_id,
'file_name':
Path(labelme_data['imagePath']).name
})
for label_shapes in labelme_data['shapes']:
# Update coco 'categories' field
class_name = label_shapes['label']
if (all_classes_id is None) and (class_name
not in categories_labels):
# only update when not been added before
coco_json['categories'].append({
'id':
len(categories_labels) + 1, # categories id start with 1
'name': class_name
})
categories_labels.append(class_name)
category_to_id[class_name] = len(categories_labels)
elif (all_classes_id is not None) and (class_name
not in categories_labels):
# check class name
raise ValueError(f'Got unexpected class name {class_name}, '
'which is not in your `--class-id-txt`.')
# get shape type and convert it to coco format
shape_type = label_shapes['shape_type']
if shape_type != 'rectangle':
print(f'not support `{shape_type}` yet, skip...')
continue
annotations_id = annotations_id + 1
# convert point from [xmin, ymin, xmax, ymax] to [x1, y1, w, h]
(x1, y1), (x2, y2) = label_shapes['points']
x1, x2 = sorted([x1, x2]) # xmin, xmax
y1, y2 = sorted([y1, y2]) # ymin, ymax
points = [[x1, y1], [x2, y2], [x1, y2], [x2, y1]]
coco_annotations = format_coco_annotations(
points, image_id, annotations_id, category_to_id[class_name])
coco_json['annotations'].append(coco_annotations)
print(f'Total image = {image_id}')
print(f'Total annotations = {annotations_id}')
print(f'Number of categories = {len(categories_labels)}, '
f'which is {categories_labels}')
return coco_json, category_to_id
def convert_labelme_to_coco(image_dir: str,
labels_dir: str,
out_path: str,
class_id_txt: Optional[str] = None):
"""Convert labelme format label to COCO json format label.
Args:
image_dir (str): Image dir path.
labels_dir (str): Image label path.
out_path (str): COCO json file save path.
class_id_txt (Optional[str]): All class id txt file path.
Default None.
"""
assert Path(out_path).suffix == '.json'
if class_id_txt is not None:
assert Path(class_id_txt).suffix == '.txt'
all_classes_id = dict()
with open(class_id_txt, encoding='utf-8') as f:
txt_lines = f.read().splitlines()
assert len(txt_lines) > 0
for txt_line in txt_lines:
class_info = txt_line.split(' ')
if len(class_info) != 2:
raise ValueError('Error parse "class_id_txt" file '
f'{class_id_txt}, please check if some of '
'the class names is blank, like "1 " -> '
'"1 blank", or class name has space between'
' words, like "1 Big house" -> "1 '
'Big-house".')
v, k = class_info
all_classes_id.update({k: int(v)})
else:
all_classes_id = None
# convert to coco json
coco_json_data, category_to_id = parse_labelme_to_coco(
image_dir, labels_dir, all_classes_id)
# save json result
Path(out_path).parent.mkdir(exist_ok=True, parents=True)
print(f'Saving json to {out_path}')
json.dump(coco_json_data, open(out_path, 'w'), indent=2)
if class_id_txt is None:
category_to_id_path = Path(out_path).with_name('class_with_id.txt')
print(f'Saving class id txt to {category_to_id_path}')
with open(category_to_id_path, 'w', encoding='utf-8') as f:
for k, v in category_to_id.items():
f.write(f'{v} {k}\n')
else:
print('Not Saving new class id txt, user should using '
f'{class_id_txt} for training config')
def main():
args = parse_args()
convert_labelme_to_coco(args.img_dir, args.labels_dir, args.out,
args.class_id_txt)
print('All done!')
if __name__ == '__main__':
main()
| 10,676
| 31.751534
| 79
|
py
|
mmyolo
|
mmyolo-main/tools/dataset_converters/balloon2coco.py
|
import os.path as osp
import mmcv
import mmengine
def convert_balloon_to_coco(ann_file, out_file, image_prefix):
data_infos = mmengine.load(ann_file)
annotations = []
images = []
obj_count = 0
for idx, v in enumerate(mmengine.track_iter_progress(data_infos.values())):
filename = v['filename']
img_path = osp.join(image_prefix, filename)
height, width = mmcv.imread(img_path).shape[:2]
images.append(
dict(id=idx, file_name=filename, height=height, width=width))
for _, obj in v['regions'].items():
assert not obj['region_attributes']
obj = obj['shape_attributes']
px = obj['all_points_x']
py = obj['all_points_y']
poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)]
poly = [p for x in poly for p in x]
x_min, y_min, x_max, y_max = (min(px), min(py), max(px), max(py))
data_anno = dict(
image_id=idx,
id=obj_count,
category_id=0,
bbox=[x_min, y_min, x_max - x_min, y_max - y_min],
area=(x_max - x_min) * (y_max - y_min),
segmentation=[poly],
iscrowd=0)
annotations.append(data_anno)
obj_count += 1
coco_format_json = dict(
images=images,
annotations=annotations,
categories=[{
'id': 0,
'name': 'balloon'
}])
mmengine.dump(coco_format_json, out_file)
if __name__ == '__main__':
convert_balloon_to_coco('data/balloon/train/via_region_data.json',
'data/balloon/train.json', 'data/balloon/train/')
convert_balloon_to_coco('data/balloon/val/via_region_data.json',
'data/balloon/val.json', 'data/balloon/val/')
| 1,850
| 30.372881
| 79
|
py
|
mmyolo
|
mmyolo-main/tools/dataset_converters/yolo2coco.py
|
"""This script helps to convert yolo-style dataset to the coco format.
Usage:
$ python yolo2coco.py /path/to/dataset # image_dir
Note:
1. Before running this script, please make sure the root directory
of your dataset is formatted in the following struction:
.
└── $ROOT_PATH
├── classes.txt
├── labels
│ ├── a.txt
│ ├── b.txt
│ └── ...
├── images
│ ├── a.jpg
│ ├── b.png
│ └── ...
└── ...
2. The script will automatically check whether the corresponding
`train.txt`, ` val.txt`, and `test.txt` exist under your `image_dir`
or not. If these files are detected, the script will organize the
dataset. The image paths in these files must be ABSOLUTE paths.
3. Once the script finishes, the result files will be saved in the
directory named 'annotations' in the root directory of your dataset.
The default output file is result.json. The root directory folder may
look like this in the root directory after the converting:
.
└── $ROOT_PATH
├── annotations
│ ├── result.json
│ └── ...
├── classes.txt
├── labels
│ ├── a.txt
│ ├── b.txt
│ └── ...
├── images
│ ├── a.jpg
│ ├── b.png
│ └── ...
└── ...
4. After converting to coco, you can use the
`tools/analysis_tools/browse_coco_json.py` script to visualize
whether it is correct.
"""
import argparse
import os
import os.path as osp
import mmcv
import mmengine
IMG_EXTENSIONS = ('.jpg', '.png', '.jpeg')
def check_existence(file_path: str):
"""Check if target file is existed."""
if not osp.exists(file_path):
raise FileNotFoundError(f'{file_path} does not exist!')
def get_image_info(yolo_image_dir, idx, file_name):
"""Retrieve image information."""
img_path = osp.join(yolo_image_dir, file_name)
check_existence(img_path)
img = mmcv.imread(img_path)
height, width = img.shape[:2]
img_info_dict = {
'file_name': file_name,
'id': idx,
'width': width,
'height': height
}
return img_info_dict, height, width
def convert_bbox_info(label, idx, obj_count, image_height, image_width):
"""Convert yolo-style bbox info to the coco format."""
label = label.strip().split()
x = float(label[1])
y = float(label[2])
w = float(label[3])
h = float(label[4])
# convert x,y,w,h to x1,y1,x2,y2
x1 = (x - w / 2) * image_width
y1 = (y - h / 2) * image_height
x2 = (x + w / 2) * image_width
y2 = (y + h / 2) * image_height
cls_id = int(label[0])
width = max(0., x2 - x1)
height = max(0., y2 - y1)
coco_format_info = {
'image_id': idx,
'id': obj_count,
'category_id': cls_id,
'bbox': [x1, y1, width, height],
'area': width * height,
'segmentation': [[x1, y1, x2, y1, x2, y2, x1, y2]],
'iscrowd': 0
}
obj_count += 1
return coco_format_info, obj_count
def organize_by_existing_files(image_dir: str, existed_categories: list):
"""Format annotations by existing train/val/test files."""
categories = ['train', 'val', 'test']
image_list = []
for cat in categories:
if cat in existed_categories:
txt_file = osp.join(image_dir, f'{cat}.txt')
print(f'Start to read {cat} dataset definition')
assert osp.exists(txt_file)
with open(txt_file) as f:
img_paths = f.readlines()
img_paths = [
os.path.split(img_path.strip())[1]
for img_path in img_paths
] # split the absolute path
image_list.append(img_paths)
else:
image_list.append([])
return image_list[0], image_list[1], image_list[2]
def convert_yolo_to_coco(image_dir: str):
"""Convert annotations from yolo style to coco style.
Args:
image_dir (str): the root directory of your datasets which contains
labels, images, classes.txt, etc
"""
print(f'Start to load existing images and annotations from {image_dir}')
check_existence(image_dir)
# check local environment
yolo_label_dir = osp.join(image_dir, 'labels')
yolo_image_dir = osp.join(image_dir, 'images')
yolo_class_txt = osp.join(image_dir, 'classes.txt')
check_existence(yolo_label_dir)
check_existence(yolo_image_dir)
check_existence(yolo_class_txt)
print(f'All necessary files are located at {image_dir}')
train_txt_path = osp.join(image_dir, 'train.txt')
val_txt_path = osp.join(image_dir, 'val.txt')
test_txt_path = osp.join(image_dir, 'test.txt')
existed_categories = []
print(f'Checking if train.txt, val.txt, and test.txt are in {image_dir}')
if osp.exists(train_txt_path):
print('Found train.txt')
existed_categories.append('train')
if osp.exists(val_txt_path):
print('Found val.txt')
existed_categories.append('val')
if osp.exists(test_txt_path):
print('Found test.txt')
existed_categories.append('test')
# prepare the output folders
output_folder = osp.join(image_dir, 'annotations')
if not osp.exists(output_folder):
os.makedirs(output_folder)
check_existence(output_folder)
# start the convert procedure
with open(yolo_class_txt) as f:
classes = f.read().strip().split()
indices = os.listdir(yolo_image_dir)
total = len(indices)
dataset = {'images': [], 'annotations': [], 'categories': []}
if existed_categories == []:
print('These files are not located, no need to organize separately.')
for i, cls in enumerate(classes, 0):
dataset['categories'].append({'id': i, 'name': cls})
else:
print('Need to organize the data accordingly.')
train_dataset = {'images': [], 'annotations': [], 'categories': []}
val_dataset = {'images': [], 'annotations': [], 'categories': []}
test_dataset = {'images': [], 'annotations': [], 'categories': []}
# category id starts from 0
for i, cls in enumerate(classes, 0):
train_dataset['categories'].append({'id': i, 'name': cls})
val_dataset['categories'].append({'id': i, 'name': cls})
test_dataset['categories'].append({'id': i, 'name': cls})
train_img, val_img, test_img = organize_by_existing_files(
image_dir, existed_categories)
obj_count = 0
skipped = 0
converted = 0
for idx, image in enumerate(mmengine.track_iter_progress(indices)):
img_info_dict, image_height, image_width = get_image_info(
yolo_image_dir, idx, image)
if existed_categories != []:
if image in train_img:
dataset = train_dataset
elif image in val_img:
dataset = val_dataset
elif image in test_img:
dataset = test_dataset
dataset['images'].append(img_info_dict)
img_name = osp.splitext(image)[0]
label_path = f'{osp.join(yolo_label_dir, img_name)}.txt'
if not osp.exists(label_path):
# if current image is not annotated or the annotation file failed
print(
f'WARNING: {label_path} does not exist. Please check the file.'
)
skipped += 1
continue
with open(label_path) as f:
labels = f.readlines()
for label in labels:
coco_info, obj_count = convert_bbox_info(
label, idx, obj_count, image_height, image_width)
dataset['annotations'].append(coco_info)
converted += 1
# saving results to result json
if existed_categories == []:
out_file = osp.join(image_dir, 'annotations/result.json')
print(f'Saving converted results to {out_file} ...')
mmengine.dump(dataset, out_file)
else:
for category in existed_categories:
out_file = osp.join(output_folder, f'{category}.json')
print(f'Saving converted results to {out_file} ...')
if category == 'train':
mmengine.dump(train_dataset, out_file)
elif category == 'val':
mmengine.dump(val_dataset, out_file)
elif category == 'test':
mmengine.dump(test_dataset, out_file)
# simple statistics
print(f'Process finished! Please check at {output_folder} .')
print(f'Number of images found: {total}, converted: {converted},',
f'and skipped: {skipped}. Total annotation count: {obj_count}.')
print('You can use tools/analysis_tools/browse_coco_json.py to visualize!')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'image_dir',
type=str,
help='dataset directory with ./images and ./labels, classes.txt, etc.')
arg = parser.parse_args()
convert_yolo_to_coco(arg.image_dir)
| 9,114
| 33.396226
| 79
|
py
|
mmyolo
|
mmyolo-main/tools/dataset_converters/dota/dota_split.py
|
# Copyright (c) OpenMMLab. All rights reserved.
# Reference: https://github.com/jbwang1997/BboxToolkit
import argparse
import codecs
import datetime
import itertools
import os
import os.path as osp
import time
from functools import partial, reduce
from math import ceil
from multiprocessing import Manager, Pool
from typing import List, Sequence
import cv2
import numpy as np
from mmengine import Config, MMLogger, mkdir_or_exist, print_log
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
try:
import shapely.geometry as shgeo
except ImportError:
raise ImportError('Please run "pip install shapely" '
'to install shapely first.')
PHASE_REQUIRE_SETS = dict(
trainval=['train', 'val'],
train=[
'train',
],
val=[
'val',
],
test=[
'test',
],
)
def parse_args():
"""Parse arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
'split_config', type=str, help='The split config for image slicing.')
parser.add_argument(
'data_root', type=str, help='Root dir of DOTA dataset.')
parser.add_argument(
'out_dir', type=str, help='Output dir for split result.')
parser.add_argument(
'--ann-subdir',
default='labelTxt-v1.0',
type=str,
help='output directory')
parser.add_argument(
'--phase',
'-p',
nargs='+',
default=['trainval', 'test'],
type=str,
choices=['trainval', 'train', 'val', 'test'],
help='Phase of the data set to be prepared.')
parser.add_argument(
'--nproc', default=8, type=int, help='Number of processes.')
parser.add_argument(
'--save-ext',
default=None,
type=str,
help='Extension of the saved image.')
parser.add_argument(
'--overwrite',
action='store_true',
help='Whether to allow overwrite if annotation folder exist.')
args = parser.parse_args()
assert args.split_config is not None, "argument split_config can't be None"
split_cfg = Config.fromfile(args.split_config)
# assert arguments
assert args.data_root is not None, "argument data_root can't be None"
if args.save_ext:
assert args.save_ext in ['png', 'jpg', 'bmp', 'tif']
assert len(split_cfg.patch_sizes) == len(split_cfg.patch_overlap_sizes)
assert 0 <= split_cfg.iof_thr <= 1
if split_cfg.get('padding'):
padding_value = split_cfg.get('padding_value')
assert padding_value is not None, \
"padding_value can't be None when padding is True."
padding_value = padding_value[0] \
if len(padding_value) == 1 else padding_value
split_cfg.padding_value = padding_value
else:
split_cfg.padding = False
split_cfg.padding_value = None
return args, split_cfg
def _make_dirs(out_dir: str, phase: List[str], allow_overwrite: bool):
"""Prepare folder for DOTA dataset.
Args:
out_dir (str): The output dir for DOTA split.
phase (List[str]): The phase to prepare.
allow_overwrite (bool): Whether to allow overwrite when folder exist.
"""
logger = MMLogger.get_current_instance()
for p in phase:
phase_dir = osp.join(out_dir, p)
if not allow_overwrite:
assert not osp.exists(phase_dir), \
f'{osp.join(phase_dir)} already exists,' \
'If you want to ignore existing files, set --overwrite'
else:
if osp.exists(phase_dir):
logger.warning(
f'{p} set in {osp.join(phase_dir)} will be overwritten')
mkdir_or_exist(phase_dir)
mkdir_or_exist(osp.join(phase_dir, 'images'))
mkdir_or_exist(osp.join(phase_dir, 'annfiles'))
def load_original_annotations(data_root: str,
ann_subdir: str = 'labelTxt-v1.0',
phase: str = 'train',
nproc: int = 8):
img_dir = osp.join(data_root, phase, 'images')
assert osp.isdir(img_dir), f'The {img_dir} is not an existing dir!'
if phase == 'test':
ann_dir = None
else:
ann_dir = osp.join(data_root, phase, ann_subdir, 'labelTxt')
assert osp.isdir(ann_dir), f'The {ann_dir} is not an existing dir!'
_load_func = partial(_load_dota_single, img_dir=img_dir, ann_dir=ann_dir)
if nproc > 1:
pool = Pool(nproc)
contents = pool.map(_load_func, os.listdir(img_dir))
pool.close()
else:
contents = list(map(_load_func, os.listdir(img_dir)))
infos = [c for c in contents if c is not None]
return infos
def _load_dota_single(imgfile: str, img_dir: str, ann_dir: str):
"""Load DOTA's single image.
Args:
imgfile (str): Filename of single image.
img_dir (str): Path of images.
ann_dir (str): Path of annotations.
Returns:
result (dict): Information of a single image.
- ``id``: Image id.
- ``filename``: Filename of single image.
- ``filepath``: Filepath of single image.
- ``width``: The width of image.
- ``height``: The height of image.
- ``annotations``: The annotation of single image.
- ``gsd``: The ground sampling distance.
"""
img_id, ext = osp.splitext(imgfile)
if ext not in ['.jpg', '.JPG', '.png', '.tif', '.bmp']:
return None
imgpath = osp.join(img_dir, imgfile)
size = Image.open(imgpath).size
txtfile = None if ann_dir is None else osp.join(ann_dir, img_id + '.txt')
content = _load_dota_txt(txtfile)
content.update(
dict(
width=size[0],
height=size[1],
filename=imgfile,
filepath=imgpath,
id=img_id))
return content
def _load_dota_txt(txtfile):
"""Load DOTA's txt annotation.
Args:
txtfile (str): Filename of single Dota txt annotation.
Returns:
result (dict): Annotation of single image.
- ``annotations``: The annotation of single image.
- ``gsd``: The ground sampling distance.
"""
gsd, bboxes, labels, diffs = None, [], [], []
if txtfile is None:
pass
elif not osp.isfile(txtfile):
print(f"Can't find {txtfile}, treated as empty txtfile")
else:
with open(txtfile) as f:
for line in f:
if line.startswith('gsd'):
num = line.split(':')[-1]
try:
gsd = float(num)
except ValueError:
gsd = None
continue
items = line.split(' ')
if len(items) >= 9:
bboxes.append([float(i) for i in items[:8]])
labels.append(items[8])
diffs.append(int(items[9]) if len(items) == 10 else 0)
bboxes = np.array(bboxes, dtype=np.float32) if bboxes else \
np.zeros((0, 8), dtype=np.float32)
diffs = np.array(diffs, dtype=np.int64) if diffs else \
np.zeros((0,), dtype=np.int64)
ann = dict(bboxes=bboxes, labels=labels, diffs=diffs)
return dict(gsd=gsd, annotations=ann)
def poly2hbb(polys):
"""Convert polygons to horizontal bboxes.
Args:
polys (np.array): Polygons with shape (N, 8)
Returns:
np.array: Horizontal bboxes.
"""
shape = polys.shape
polys = polys.reshape(*shape[:-1], shape[-1] // 2, 2)
lt_point = np.min(polys, axis=-2)
rb_point = np.max(polys, axis=-2)
return np.concatenate([lt_point, rb_point], axis=-1)
def get_sliding_window(info, patch_settings, img_rate_thr):
"""Get sliding windows.
Args:
info (dict): Dict of image's width and height.
patch_settings (list): List of patch settings,
each in format (patch_size, patch_overlap).
img_rate_thr (float): Threshold of window area divided by image area.
Returns:
list[np.array]: Information of valid windows.
"""
eps = 0.01
windows = []
width, height = info['width'], info['height']
for (size, gap) in patch_settings:
assert size > gap, f'invaild size gap pair [{size} {gap}]'
step = size - gap
x_num = 1 if width <= size else ceil((width - size) / step + 1)
x_start = [step * i for i in range(x_num)]
if len(x_start) > 1 and x_start[-1] + size > width:
x_start[-1] = width - size
y_num = 1 if height <= size else ceil((height - size) / step + 1)
y_start = [step * i for i in range(y_num)]
if len(y_start) > 1 and y_start[-1] + size > height:
y_start[-1] = height - size
start = np.array(
list(itertools.product(x_start, y_start)), dtype=np.int64)
stop = start + size
windows.append(np.concatenate([start, stop], axis=1))
windows = np.concatenate(windows, axis=0)
img_in_wins = windows.copy()
img_in_wins[:, 0::2] = np.clip(img_in_wins[:, 0::2], 0, width)
img_in_wins[:, 1::2] = np.clip(img_in_wins[:, 1::2], 0, height)
img_areas = (img_in_wins[:, 2] - img_in_wins[:, 0]) * \
(img_in_wins[:, 3] - img_in_wins[:, 1])
win_areas = (windows[:, 2] - windows[:, 0]) * \
(windows[:, 3] - windows[:, 1])
img_rates = img_areas / win_areas
if not (img_rates > img_rate_thr).any():
max_rate = img_rates.max()
img_rates[abs(img_rates - max_rate) < eps] = 1
return windows[img_rates > img_rate_thr]
def get_window_annotation(info, windows, iof_thr):
"""Get annotation by sliding windows.
Args:
info (dict): Dict of bbox annotations.
windows (np.array): information of sliding windows.
iof_thr (float): Threshold of overlaps between bbox and window.
Returns:
list[dict]: List of bbox annotations of every window.
"""
bboxes = info['annotations']['bboxes']
iofs = ann_window_iof(bboxes, windows)
window_anns = []
for i in range(windows.shape[0]):
win_iofs = iofs[:, i]
pos_inds = np.nonzero(win_iofs >= iof_thr)[0].tolist()
win_ann = dict()
for k, v in info['annotations'].items():
try:
win_ann[k] = v[pos_inds]
except TypeError:
win_ann[k] = [v[i] for i in pos_inds]
win_ann['trunc'] = win_iofs[pos_inds] < 1
window_anns.append(win_ann)
return window_anns
def ann_window_iof(anns, window, eps=1e-6):
"""Compute overlaps (iof) between annotations (poly) and window (hbox).
Args:
anns (np.array): quadri annotations with shape (n, 8).
window (np.array): slide windows with shape (m, 4).
eps (float, optional): Defaults to 1e-6.
Returns:
np.array: iof between box and window.
"""
rows = anns.shape[0]
cols = window.shape[0]
if rows * cols == 0:
return np.zeros((rows, cols), dtype=np.float32)
hbboxes_ann = poly2hbb(anns)
hbboxes_win = window
hbboxes_ann = hbboxes_ann[:, None, :]
lt = np.maximum(hbboxes_ann[..., :2], hbboxes_win[..., :2])
rb = np.minimum(hbboxes_ann[..., 2:], hbboxes_win[..., 2:])
wh = np.clip(rb - lt, 0, np.inf)
h_overlaps = wh[..., 0] * wh[..., 1]
l, t, r, b = (window[..., i] for i in range(4))
polys_win = np.stack([l, t, r, t, r, b, l, b], axis=-1)
sg_polys_ann = [shgeo.Polygon(p) for p in anns.reshape(rows, -1, 2)]
sg_polys_win = [shgeo.Polygon(p) for p in polys_win.reshape(cols, -1, 2)]
overlaps = np.zeros(h_overlaps.shape)
for p in zip(*np.nonzero(h_overlaps)):
overlaps[p] = sg_polys_ann[p[0]].intersection(sg_polys_win[p[-1]]).area
unions = np.array([p.area for p in sg_polys_ann], dtype=np.float32)
unions = unions[..., None]
unions = np.clip(unions, eps, np.inf)
outputs = overlaps / unions
if outputs.ndim == 1:
outputs = outputs[..., None]
return outputs
def crop_and_save_img(info, windows, window_anns, padding, padding_value,
save_dir, anno_dir, img_ext):
"""Crop the image and save.
Args:
info (dict): Image's information.
windows (np.array): information of sliding windows.
window_anns (list[dict]): List of bbox annotations of every window.
padding (bool): If True, with padding.
padding_value (tuple[int|float]): Padding value.
save_dir (str): Save filename.
anno_dir (str): Annotation filename.
img_ext (str): Picture suffix.
Returns:
list[dict]: Information of paths.
"""
img = cv2.imread(info['filepath'])
patch_infos = []
for window, ann in zip(windows, window_anns):
patch_info = dict()
for k, v in info.items():
if k not in [
'id', 'filename', 'filepath', 'width', 'height',
'annotations'
]:
patch_info[k] = v
x_start, y_start, x_stop, y_stop = window.tolist()
patch_info['x_start'] = x_start
patch_info['y_start'] = y_start
patch_info['id'] = \
info['id'] + '__' + str(x_stop - x_start) + \
'__' + str(x_start) + '___' + str(y_start)
patch_info['ori_id'] = info['id']
ann['bboxes'] = shift_qbboxes(ann['bboxes'], [-x_start, -y_start])
patch_info['ann'] = ann
patch = img[y_start:y_stop, x_start:x_stop]
if padding:
height = y_stop - y_start
width = x_stop - x_start
if height > patch.shape[0] or width > patch.shape[1]:
padding_patch = np.empty((height, width, patch.shape[-1]),
dtype=np.uint8)
if not isinstance(padding_value, (int, float)):
assert len(padding_value) == patch.shape[-1]
padding_patch[...] = padding_value
padding_patch[:patch.shape[0], :patch.shape[1], ...] = patch
patch = padding_patch
patch_info['height'] = patch.shape[0]
patch_info['width'] = patch.shape[1]
cv2.imwrite(
osp.join(save_dir, patch_info['id'] + '.' + img_ext), patch)
patch_info['filename'] = patch_info['id'] + '.' + img_ext
patch_infos.append(patch_info)
bboxes_num = patch_info['ann']['bboxes'].shape[0]
outdir = os.path.join(anno_dir, patch_info['id'] + '.txt')
with codecs.open(outdir, 'w', 'utf-8') as f_out:
if bboxes_num == 0:
pass
else:
for idx in range(bboxes_num):
obj = patch_info['ann']
outline = ' '.join(list(map(str, obj['bboxes'][idx])))
diffs = str(
obj['diffs'][idx]) if not obj['trunc'][idx] else '2'
outline = outline + ' ' + obj['labels'][idx] + ' ' + diffs
f_out.write(outline + '\n')
return patch_infos
def shift_qbboxes(bboxes, offset: Sequence[float]):
"""Map bboxes from window coordinate back to original coordinate. TODO
Refactor and move to `mmyolo/utils/large_image.py`
Args:
bboxes (np.array): quadrilateral boxes with window coordinate.
offset (Sequence[float]): The translation offsets with shape of (2, ).
Returns:
np.array: bboxes with original coordinate.
"""
dim = bboxes.shape[-1]
translated = bboxes + np.array(offset * int(dim / 2), dtype=np.float32)
return translated
def single_split(info, patch_settings, min_img_ratio, iof_thr, padding,
padding_value, save_dir, anno_dir, img_ext, lock, prog,
total):
"""Single image split. TODO Refactoring to make it more generic.
Args:
info (dict): Image info and annotations.
patch_settings (list): List of patch settings,
each in format (patch_size, patch_overlap).
min_img_ratio (float): Threshold of window area divided by image area.
iof_thr (float): Threshold of overlaps between bbox and window.
padding (bool): If True, with padding.
padding_value (tuple[int|float]): Padding value.
save_dir (str): Save filename.
anno_dir (str): Annotation filename.
img_ext (str): Picture suffix.
lock (Lock): Lock of Manager.
prog (object): Progress of Manager.
total (int): Length of infos.
Returns:
list[dict]: Information of paths.
"""
img_ext = img_ext if img_ext is not None else info['filename'].split(
'.')[-1]
windows = get_sliding_window(info, patch_settings, min_img_ratio)
window_anns = get_window_annotation(info, windows, iof_thr)
patch_infos = crop_and_save_img(info, windows, window_anns, padding,
padding_value, save_dir, anno_dir, img_ext)
assert patch_infos
lock.acquire()
prog.value += 1
msg = f'({prog.value / total:3.1%} {prog.value}:{total})'
msg += ' - ' + f"Filename: {info['filename']}"
msg += ' - ' + f"width: {info['width']:<5d}"
msg += ' - ' + f"height: {info['height']:<5d}"
msg += ' - ' + f"Objects: {len(info['annotations']['bboxes']):<5d}"
msg += ' - ' + f'Patches: {len(patch_infos)}'
print_log(msg, 'current')
lock.release()
return patch_infos
def main():
args, split_cfg = parse_args()
mkdir_or_exist(args.out_dir)
# init logger
log_file_name = datetime.datetime.now().strftime('%Y%m%d_%H%M%S') + '.log'
logger: MMLogger = MMLogger.get_instance(
'mmyolo',
log_file=osp.join(args.out_dir, log_file_name),
log_level='INFO')
# print configs
arg_str = ''
for arg in args._get_kwargs():
arg_str += arg[0] + ' = ' + str(arg[1]) + '\n'
logger.info('Base Settings:\n' + arg_str)
logger.info('Split Settings:\n' + split_cfg.pretty_text)
# make dirs
_make_dirs(args.out_dir, args.phase, args.overwrite)
# Load original dota data
required_sets = []
for p in args.phase:
required_sets.extend(PHASE_REQUIRE_SETS[p])
required_sets = set(required_sets)
loaded_data_set = dict()
for req_set in required_sets:
logger.info(f'Starting loading DOTA {req_set} set information.')
start_time = time.time()
infos = load_original_annotations(
data_root=args.data_root,
ann_subdir=args.ann_subdir,
phase=req_set)
end_time = time.time()
result_log = f'Finishing loading {req_set} set, '
result_log += f'get {len(infos)} images, '
result_log += f'using {end_time - start_time:.3f}s.'
logger.info(result_log)
loaded_data_set[req_set] = infos
# Preprocess patch settings
patch_settings = []
for ratio in split_cfg.img_resize_ratio:
for size, gap in zip(split_cfg.patch_sizes,
split_cfg.patch_overlap_sizes):
size_gap = (int(size / ratio), int(gap / ratio))
if size_gap not in patch_settings:
patch_settings.append(size_gap)
# Split data
for p in args.phase:
save_imgs_dir = osp.join(args.out_dir, p, 'images')
save_anns_dir = osp.join(args.out_dir, p, 'annfiles')
logger.info(f'Start splitting {p} set images!')
start = time.time()
manager = Manager()
data_infos = []
for req_set in PHASE_REQUIRE_SETS[p]:
data_infos.extend(loaded_data_set[req_set])
worker = partial(
single_split,
patch_settings=patch_settings,
min_img_ratio=split_cfg.min_img_ratio,
iof_thr=split_cfg.iof_thr,
padding=split_cfg.padding,
padding_value=split_cfg.padding_value,
save_dir=save_imgs_dir,
anno_dir=save_anns_dir,
img_ext=args.save_ext,
lock=manager.Lock(),
prog=manager.Value('i', 0),
total=len(data_infos))
if args.nproc > 1:
pool = Pool(args.nproc)
patch_infos = pool.map(worker, data_infos)
pool.close()
else:
patch_infos = list(map(worker, data_infos))
patch_infos = reduce(lambda x, y: x + y, patch_infos)
stop = time.time()
logger.info(
f'Finish splitting {p} set images in {int(stop - start)} second!!!'
)
logger.info(f'Total images number: {len(patch_infos)}')
if __name__ == '__main__':
main()
| 20,591
| 33.092715
| 79
|
py
|
mmyolo
|
mmyolo-main/tools/analysis_tools/benchmark.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import time
import torch
from mmengine import Config, DictAction
from mmengine.dist import get_world_size, init_dist
from mmengine.logging import MMLogger, print_log
from mmengine.registry import init_default_scope
from mmengine.runner import Runner, load_checkpoint
from mmengine.utils import mkdir_or_exist
from mmengine.utils.dl_utils import set_multi_processing
from mmyolo.registry import MODELS
# TODO: Refactoring and improving
def parse_args():
parser = argparse.ArgumentParser(description='MMYOLO benchmark a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--repeat-num',
type=int,
default=1,
help='number of repeat times of measurement for averaging the results')
parser.add_argument(
'--max-iter', type=int, default=2000, help='num of max iter')
parser.add_argument(
'--log-interval', type=int, default=50, help='interval of logging')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing '
'benchmark metrics')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def measure_inference_speed(cfg, checkpoint, max_iter, log_interval,
is_fuse_conv_bn):
env_cfg = cfg.get('env_cfg')
if env_cfg.get('cudnn_benchmark'):
torch.backends.cudnn.benchmark = True
mp_cfg: dict = env_cfg.get('mp_cfg', {})
set_multi_processing(**mp_cfg, distributed=cfg.distributed)
# Because multiple processes will occupy additional CPU resources,
# FPS statistics will be more unstable when num_workers is not 0.
# It is reasonable to set num_workers to 0.
dataloader_cfg = cfg.test_dataloader
dataloader_cfg['num_workers'] = 0
dataloader_cfg['batch_size'] = 1
dataloader_cfg['persistent_workers'] = False
data_loader = Runner.build_dataloader(dataloader_cfg)
# build the model and load checkpoint
model = MODELS.build(cfg.model)
load_checkpoint(model, checkpoint, map_location='cpu')
model = model.cuda()
model.eval()
# the first several iterations may be very slow so skip them
num_warmup = 5
pure_inf_time = 0
fps = 0
# benchmark with 2000 image and take the average
for i, data in enumerate(data_loader):
torch.cuda.synchronize()
start_time = time.perf_counter()
with torch.no_grad():
model.test_step(data)
torch.cuda.synchronize()
elapsed = time.perf_counter() - start_time
if i >= num_warmup:
pure_inf_time += elapsed
if (i + 1) % log_interval == 0:
fps = (i + 1 - num_warmup) / pure_inf_time
print_log(
f'Done image [{i + 1:<3}/ {max_iter}], '
f'fps: {fps:.1f} img / s, '
f'times per image: {1000 / fps:.1f} ms / img', 'current')
if (i + 1) == max_iter:
fps = (i + 1 - num_warmup) / pure_inf_time
print_log(
f'Overall fps: {fps:.1f} img / s, '
f'times per image: {1000 / fps:.1f} ms / img', 'current')
break
return fps
def repeat_measure_inference_speed(cfg,
checkpoint,
max_iter,
log_interval,
is_fuse_conv_bn,
repeat_num=1):
assert repeat_num >= 1
fps_list = []
for _ in range(repeat_num):
cp_cfg = copy.deepcopy(cfg)
fps_list.append(
measure_inference_speed(cp_cfg, checkpoint, max_iter, log_interval,
is_fuse_conv_bn))
if repeat_num > 1:
fps_list_ = [round(fps, 1) for fps in fps_list]
times_pre_image_list_ = [round(1000 / fps, 1) for fps in fps_list]
mean_fps_ = sum(fps_list_) / len(fps_list_)
mean_times_pre_image_ = sum(times_pre_image_list_) / len(
times_pre_image_list_)
print_log(
f'Overall fps: {fps_list_}[{mean_fps_:.1f}] img / s, '
f'times per image: '
f'{times_pre_image_list_}[{mean_times_pre_image_:.1f}] ms / img',
'current')
return fps_list
return fps_list[0]
# TODO: refactoring
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
init_default_scope(cfg.get('default_scope', 'mmyolo'))
distributed = False
if args.launcher != 'none':
init_dist(args.launcher, **cfg.get('env_cfg', {}).get('dist_cfg', {}))
distributed = True
assert get_world_size(
) == 1, 'Inference benchmark does not allow distributed multi-GPU'
cfg.distributed = distributed
log_file = None
if args.work_dir:
log_file = os.path.join(args.work_dir, 'benchmark.log')
mkdir_or_exist(args.work_dir)
MMLogger.get_instance('mmyolo', log_file=log_file, log_level='INFO')
repeat_measure_inference_speed(cfg, args.checkpoint, args.max_iter,
args.log_interval, args.fuse_conv_bn,
args.repeat_num)
if __name__ == '__main__':
main()
| 6,460
| 33.185185
| 79
|
py
|
mmyolo
|
mmyolo-main/tools/analysis_tools/optimize_anchors.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Optimize anchor settings on a specific dataset.
This script provides three methods to optimize YOLO anchors including k-means
anchor cluster, differential evolution and v5-k-means. You can use
``--algorithm k-means``, ``--algorithm differential_evolution`` and
``--algorithm v5-k-means`` to switch those methods.
Example:
Use k-means anchor cluster::
python tools/analysis_tools/optimize_anchors.py ${CONFIG} \
--algorithm k-means --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \
--out-dir ${OUT_DIR}
Use differential evolution to optimize anchors::
python tools/analysis_tools/optimize_anchors.py ${CONFIG} \
--algorithm differential_evolution \
--input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \
--out-dir ${OUT_DIR}
Use v5-k-means to optimize anchors::
python tools/analysis_tools/optimize_anchors.py ${CONFIG} \
--algorithm v5-k-means \
--input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \
--prior_match_thr ${PRIOR_MATCH_THR} \
--out-dir ${OUT_DIR}
"""
import argparse
import os.path as osp
import random
from typing import Tuple
import numpy as np
import torch
from mmdet.structures.bbox import (bbox_cxcywh_to_xyxy, bbox_overlaps,
bbox_xyxy_to_cxcywh)
from mmdet.utils import replace_cfg_vals, update_data_root
from mmengine.config import Config
from mmengine.fileio import dump
from mmengine.logging import MMLogger
from mmengine.registry import init_default_scope
from mmengine.utils import ProgressBar
from scipy.optimize import differential_evolution
from torch import Tensor
from mmyolo.registry import DATASETS
try:
from scipy.cluster.vq import kmeans
except ImportError:
kmeans = None
def parse_args():
parser = argparse.ArgumentParser(description='Optimize anchor parameters.')
parser.add_argument('config', help='Train config file path.')
parser.add_argument(
'--input-shape',
type=int,
nargs='+',
default=[640, 640],
help='input image size, represent [width, height]')
parser.add_argument(
'--algorithm',
default='DE',
help='Algorithm used for anchor optimizing.'
'Support k-means and differential_evolution for YOLO,'
'and v5-k-means is special for YOLOV5.')
parser.add_argument(
'--iters',
default=1000,
type=int,
help='Maximum iterations for optimizer.')
parser.add_argument(
'--prior-match-thr',
default=4.0,
type=float,
help='anchor-label `gt_filter_sizes` ratio threshold '
'hyperparameter used for training, default=4.0, this '
'parameter is unique to v5-k-means')
parser.add_argument(
'--mutation-args',
type=float,
nargs='+',
default=[0.9, 0.1],
help='paramter of anchor optimize method genetic algorithm, '
'represent [prob, sigma], this parameter is unique to v5-k-means')
parser.add_argument(
'--augment-args',
type=float,
nargs='+',
default=[0.9, 1.1],
help='scale factor of box size augment when metric box and anchor, '
'represent [min, max], this parameter is unique to v5-k-means')
parser.add_argument(
'--device', default='cuda:0', help='Device used for calculating.')
parser.add_argument(
'--out-dir',
default=None,
type=str,
help='Path to save anchor optimize result.')
args = parser.parse_args()
return args
class BaseAnchorOptimizer:
"""Base class for anchor optimizer.
Args:
dataset (obj:`Dataset`): Dataset object.
input_shape (list[int]): Input image shape of the model.
Format in [width, height].
num_anchor_per_level (list[int]) : Number of anchors for each level.
logger (obj:`logging.Logger`): The logger for logging.
device (str, optional): Device used for calculating.
Default: 'cuda:0'
out_dir (str, optional): Path to save anchor optimize result.
Default: None
"""
def __init__(self,
dataset,
input_shape,
num_anchor_per_level,
logger,
device='cuda:0',
out_dir=None):
self.dataset = dataset
self.input_shape = input_shape
self.num_anchor_per_level = num_anchor_per_level
self.num_anchors = sum(num_anchor_per_level)
self.logger = logger
self.device = device
self.out_dir = out_dir
bbox_whs, img_shapes = self.get_whs_and_shapes()
ratios = img_shapes.max(1, keepdims=True) / np.array([input_shape])
# resize to input shape
self.bbox_whs = bbox_whs / ratios
def get_whs_and_shapes(self):
"""Get widths and heights of bboxes and shapes of images.
Returns:
tuple[np.ndarray]: Array of bbox shapes and array of image
shapes with shape (num_bboxes, 2) in [width, height] format.
"""
self.logger.info('Collecting bboxes from annotation...')
bbox_whs = []
img_shapes = []
prog_bar = ProgressBar(len(self.dataset))
for idx in range(len(self.dataset)):
data_info = self.dataset.get_data_info(idx)
img_shape = np.array([data_info['width'], data_info['height']])
gt_instances = data_info['instances']
for instance in gt_instances:
bbox = np.array(instance['bbox'])
gt_filter_sizes = bbox[2:4] - bbox[0:2]
img_shapes.append(img_shape)
bbox_whs.append(gt_filter_sizes)
prog_bar.update()
print('\n')
bbox_whs = np.array(bbox_whs)
img_shapes = np.array(img_shapes)
self.logger.info(f'Collected {bbox_whs.shape[0]} bboxes.')
return bbox_whs, img_shapes
def get_zero_center_bbox_tensor(self):
"""Get a tensor of bboxes centered at (0, 0).
Returns:
Tensor: Tensor of bboxes with shape (num_bboxes, 4)
in [xmin, ymin, xmax, ymax] format.
"""
whs = torch.from_numpy(self.bbox_whs).to(
self.device, dtype=torch.float32)
bboxes = bbox_cxcywh_to_xyxy(
torch.cat([torch.zeros_like(whs), whs], dim=1))
return bboxes
def optimize(self):
raise NotImplementedError
def save_result(self, anchors, path=None):
anchor_results = []
start = 0
for num in self.num_anchor_per_level:
end = num + start
anchor_results.append([(round(w), round(h))
for w, h in anchors[start:end]])
start = end
self.logger.info(f'Anchor optimize result:{anchor_results}')
if path:
json_path = osp.join(path, 'anchor_optimize_result.json')
dump(anchor_results, json_path)
self.logger.info(f'Result saved in {json_path}')
class YOLOKMeansAnchorOptimizer(BaseAnchorOptimizer):
r"""YOLO anchor optimizer using k-means. Code refer to `AlexeyAB/darknet.
<https://github.com/AlexeyAB/darknet/blob/master/src/detector.c>`_.
Args:
iters (int): Maximum iterations for k-means.
"""
def __init__(self, iters, **kwargs):
super().__init__(**kwargs)
self.iters = iters
def optimize(self):
anchors = self.kmeans_anchors()
self.save_result(anchors, self.out_dir)
def kmeans_anchors(self):
self.logger.info(
f'Start cluster {self.num_anchors} YOLO anchors with K-means...')
bboxes = self.get_zero_center_bbox_tensor()
cluster_center_idx = torch.randint(
0, bboxes.shape[0], (self.num_anchors, )).to(self.device)
assignments = torch.zeros((bboxes.shape[0], )).to(self.device)
cluster_centers = bboxes[cluster_center_idx]
if self.num_anchors == 1:
cluster_centers = self.kmeans_maximization(bboxes, assignments,
cluster_centers)
anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy()
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
return anchors
prog_bar = ProgressBar(self.iters)
for i in range(self.iters):
converged, assignments = self.kmeans_expectation(
bboxes, assignments, cluster_centers)
if converged:
self.logger.info(f'K-means process has converged at iter {i}.')
break
cluster_centers = self.kmeans_maximization(bboxes, assignments,
cluster_centers)
prog_bar.update()
print('\n')
avg_iou = bbox_overlaps(bboxes,
cluster_centers).max(1)[0].mean().item()
anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy()
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
self.logger.info(f'Anchor cluster finish. Average IOU: {avg_iou}')
return anchors
def kmeans_maximization(self, bboxes, assignments, centers):
"""Maximization part of EM algorithm(Expectation-Maximization)"""
new_centers = torch.zeros_like(centers)
for i in range(centers.shape[0]):
mask = (assignments == i)
if mask.sum():
new_centers[i, :] = bboxes[mask].mean(0)
return new_centers
def kmeans_expectation(self, bboxes, assignments, centers):
"""Expectation part of EM algorithm(Expectation-Maximization)"""
ious = bbox_overlaps(bboxes, centers)
closest = ious.argmax(1)
converged = (closest == assignments).all()
return converged, closest
class YOLOV5KMeansAnchorOptimizer(BaseAnchorOptimizer):
r"""YOLOv5 anchor optimizer using shape k-means.
Code refer to `ultralytics/yolov5.
<https://github.com/ultralytics/yolov5/blob/master/utils/autoanchor.py>`_.
Args:
iters (int): Maximum iterations for k-means.
prior_match_thr (float): anchor-label width height
ratio threshold hyperparameter.
"""
def __init__(self,
iters,
prior_match_thr=4.0,
mutation_args=[0.9, 0.1],
augment_args=[0.9, 1.1],
**kwargs):
super().__init__(**kwargs)
self.iters = iters
self.prior_match_thr = prior_match_thr
[self.mutation_prob, self.mutation_sigma] = mutation_args
[self.augment_min, self.augment_max] = augment_args
def optimize(self):
self.logger.info(
f'Start cluster {self.num_anchors} YOLOv5 anchors with K-means...')
bbox_whs = torch.from_numpy(self.bbox_whs).to(
self.device, dtype=torch.float32)
anchors = self.anchor_generate(
bbox_whs,
num=self.num_anchors,
img_size=self.input_shape[0],
prior_match_thr=self.prior_match_thr,
iters=self.iters)
best_ratio, mean_matched = self.anchor_metric(bbox_whs, anchors)
self.logger.info(f'{mean_matched:.2f} anchors/target {best_ratio:.3f} '
'Best Possible Recall (BPR). ')
self.save_result(anchors.tolist(), self.out_dir)
def anchor_generate(self,
box_size: Tensor,
num: int = 9,
img_size: int = 640,
prior_match_thr: float = 4.0,
iters: int = 1000) -> Tensor:
"""cluster boxes metric with anchors.
Args:
box_size (Tensor): The size of the bxes, which shape is
(box_num, 2),the number 2 means width and height.
num (int): number of anchors.
img_size (int): image size used for training
prior_match_thr (float): width/height ratio threshold
used for training
iters (int): iterations to evolve anchors using genetic algorithm
Returns:
anchors (Tensor): kmeans evolved anchors
"""
thr = 1 / prior_match_thr
# step1: filter small bbox
box_size = self._filter_box(box_size)
assert num <= len(box_size)
# step2: init anchors
if kmeans:
try:
self.logger.info(
'beginning init anchors with scipy kmeans method')
# sigmas for whitening
sigmas = box_size.std(0).cpu().numpy()
anchors = kmeans(
box_size.cpu().numpy() / sigmas, num, iter=30)[0] * sigmas
# kmeans may return fewer points than requested
# if width/height is insufficient or too similar
assert num == len(anchors)
except Exception:
self.logger.warning(
'scipy kmeans method cannot get enough points '
'because of width/height is insufficient or too similar, '
'now switching strategies from kmeans to random init.')
anchors = np.sort(np.random.rand(num * 2)).reshape(
num, 2) * img_size
else:
self.logger.info(
'cannot found scipy package, switching strategies from kmeans '
'to random init, you can install scipy package to '
'get better anchor init')
anchors = np.sort(np.random.rand(num * 2)).reshape(num,
2) * img_size
self.logger.info('init done, beginning evolve anchors...')
# sort small to large
anchors = torch.tensor(anchors[np.argsort(anchors.prod(1))]).to(
box_size.device, dtype=torch.float32)
# step3: evolve anchors use Genetic Algorithm
prog_bar = ProgressBar(iters)
fitness = self._anchor_fitness(box_size, anchors, thr)
cluster_shape = anchors.shape
for _ in range(iters):
mutate_result = np.ones(cluster_shape)
# mutate until a change occurs (prevent duplicates)
while (mutate_result == 1).all():
# mutate_result is scale factor of anchors, between 0.3 and 3
mutate_result = (
(np.random.random(cluster_shape) < self.mutation_prob) *
random.random() * np.random.randn(*cluster_shape) *
self.mutation_sigma + 1).clip(0.3, 3.0)
mutate_result = torch.from_numpy(mutate_result).to(box_size.device)
new_anchors = (anchors.clone() * mutate_result).clip(min=2.0)
new_fitness = self._anchor_fitness(box_size, new_anchors, thr)
if new_fitness > fitness:
fitness = new_fitness
anchors = new_anchors.clone()
prog_bar.update()
print('\n')
# sort small to large
anchors = anchors[torch.argsort(anchors.prod(1))]
self.logger.info(f'Anchor cluster finish. fitness = {fitness:.4f}')
return anchors
def anchor_metric(self,
box_size: Tensor,
anchors: Tensor,
threshold: float = 4.0) -> Tuple:
"""compute boxes metric with anchors.
Args:
box_size (Tensor): The size of the bxes, which shape
is (box_num, 2), the number 2 means width and height.
anchors (Tensor): The size of the bxes, which shape
is (anchor_num, 2), the number 2 means width and height.
threshold (float): the compare threshold of ratio
Returns:
Tuple: a tuple of metric result, best_ratio_mean and mean_matched
"""
# step1: augment scale
# According to the uniform distribution,the scaling scale between
# augment_min and augment_max is randomly generated
scale = np.random.uniform(
self.augment_min, self.augment_max, size=(box_size.shape[0], 1))
box_size = torch.tensor(
np.array(
[l[:, ] * s for s, l in zip(scale,
box_size.cpu().numpy())])).to(
box_size.device,
dtype=torch.float32)
# step2: calculate ratio
min_ratio, best_ratio = self._metric(box_size, anchors)
mean_matched = (min_ratio > 1 / threshold).float().sum(1).mean()
best_ratio_mean = (best_ratio > 1 / threshold).float().mean()
return best_ratio_mean, mean_matched
def _filter_box(self, box_size: Tensor) -> Tensor:
small_cnt = (box_size < 3.0).any(1).sum()
if small_cnt:
self.logger.warning(
f'Extremely small objects found: {small_cnt} '
f'of {len(box_size)} labels are <3 pixels in size')
# filter > 2 pixels
filter_sizes = box_size[(box_size >= 2.0).any(1)]
return filter_sizes
def _anchor_fitness(self, box_size: Tensor, anchors: Tensor, thr: float):
"""mutation fitness."""
_, best = self._metric(box_size, anchors)
return (best * (best > thr).float()).mean()
def _metric(self, box_size: Tensor, anchors: Tensor) -> Tuple:
"""compute boxes metric with anchors.
Args:
box_size (Tensor): The size of the bxes, which shape is
(box_num, 2), the number 2 means width and height.
anchors (Tensor): The size of the bxes, which shape is
(anchor_num, 2), the number 2 means width and height.
Returns:
Tuple: a tuple of metric result, min_ratio and best_ratio
"""
# ratio means the (width_1/width_2 and height_1/height_2) ratio of each
# box and anchor, the ratio shape is torch.Size([box_num,anchor_num,2])
ratio = box_size[:, None] / anchors[None]
# min_ratio records the min ratio of each box with all anchor,
# min_ratio.shape is torch.Size([box_num,anchor_num])
# notice:
# smaller ratio means worse shape-match between boxes and anchors
min_ratio = torch.min(ratio, 1 / ratio).min(2)[0]
# find the best shape-match ratio for each box
# box_best_ratio.shape is torch.Size([box_num])
best_ratio = min_ratio.max(1)[0]
return min_ratio, best_ratio
class YOLODEAnchorOptimizer(BaseAnchorOptimizer):
"""YOLO anchor optimizer using differential evolution algorithm.
Args:
iters (int): Maximum iterations for k-means.
strategy (str): The differential evolution strategy to use.
Should be one of:
- 'best1bin'
- 'best1exp'
- 'rand1exp'
- 'randtobest1exp'
- 'currenttobest1exp'
- 'best2exp'
- 'rand2exp'
- 'randtobest1bin'
- 'currenttobest1bin'
- 'best2bin'
- 'rand2bin'
- 'rand1bin'
Default: 'best1bin'.
population_size (int): Total population size of evolution algorithm.
Default: 15.
convergence_thr (float): Tolerance for convergence, the
optimizing stops when ``np.std(pop) <= abs(convergence_thr)
+ convergence_thr * np.abs(np.mean(population_energies))``,
respectively. Default: 0.0001.
mutation (tuple[float]): Range of dithering randomly changes the
mutation constant. Default: (0.5, 1).
recombination (float): Recombination constant of crossover probability.
Default: 0.7.
"""
def __init__(self,
iters,
strategy='best1bin',
population_size=15,
convergence_thr=0.0001,
mutation=(0.5, 1),
recombination=0.7,
**kwargs):
super().__init__(**kwargs)
self.iters = iters
self.strategy = strategy
self.population_size = population_size
self.convergence_thr = convergence_thr
self.mutation = mutation
self.recombination = recombination
def optimize(self):
anchors = self.differential_evolution()
self.save_result(anchors, self.out_dir)
def differential_evolution(self):
bboxes = self.get_zero_center_bbox_tensor()
bounds = []
for i in range(self.num_anchors):
bounds.extend([(0, self.input_shape[0]), (0, self.input_shape[1])])
result = differential_evolution(
func=self.avg_iou_cost,
bounds=bounds,
args=(bboxes, ),
strategy=self.strategy,
maxiter=self.iters,
popsize=self.population_size,
tol=self.convergence_thr,
mutation=self.mutation,
recombination=self.recombination,
updating='immediate',
disp=True)
self.logger.info(
f'Anchor evolution finish. Average IOU: {1 - result.fun}')
anchors = [(w, h) for w, h in zip(result.x[::2], result.x[1::2])]
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
return anchors
@staticmethod
def avg_iou_cost(anchor_params, bboxes):
assert len(anchor_params) % 2 == 0
anchor_whs = torch.tensor(
[[w, h]
for w, h in zip(anchor_params[::2], anchor_params[1::2])]).to(
bboxes.device, dtype=bboxes.dtype)
anchor_boxes = bbox_cxcywh_to_xyxy(
torch.cat([torch.zeros_like(anchor_whs), anchor_whs], dim=1))
ious = bbox_overlaps(bboxes, anchor_boxes)
max_ious, _ = ious.max(1)
cost = 1 - max_ious.mean().item()
return cost
def main():
logger = MMLogger.get_current_instance()
args = parse_args()
cfg = args.config
cfg = Config.fromfile(cfg)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
init_default_scope(cfg.get('default_scope', 'mmyolo'))
input_shape = args.input_shape
assert len(input_shape) == 2
anchor_type = cfg.model.bbox_head.prior_generator.type
assert anchor_type == 'mmdet.YOLOAnchorGenerator', \
f'Only support optimize YOLOAnchor, but get {anchor_type}.'
base_sizes = cfg.model.bbox_head.prior_generator.base_sizes
num_anchor_per_level = [len(sizes) for sizes in base_sizes]
train_data_cfg = cfg.train_dataloader
while 'dataset' in train_data_cfg:
train_data_cfg = train_data_cfg['dataset']
dataset = DATASETS.build(train_data_cfg)
if args.algorithm == 'k-means':
optimizer = YOLOKMeansAnchorOptimizer(
dataset=dataset,
input_shape=input_shape,
device=args.device,
num_anchor_per_level=num_anchor_per_level,
iters=args.iters,
logger=logger,
out_dir=args.out_dir)
elif args.algorithm == 'DE':
optimizer = YOLODEAnchorOptimizer(
dataset=dataset,
input_shape=input_shape,
device=args.device,
num_anchor_per_level=num_anchor_per_level,
iters=args.iters,
logger=logger,
out_dir=args.out_dir)
elif args.algorithm == 'v5-k-means':
optimizer = YOLOV5KMeansAnchorOptimizer(
dataset=dataset,
input_shape=input_shape,
device=args.device,
num_anchor_per_level=num_anchor_per_level,
iters=args.iters,
prior_match_thr=args.prior_match_thr,
mutation_args=args.mutation_args,
augment_args=args.augment_args,
logger=logger,
out_dir=args.out_dir)
else:
raise NotImplementedError(
f'Only support k-means and differential_evolution, '
f'but get {args.algorithm}')
optimizer.optimize()
if __name__ == '__main__':
main()
| 24,296
| 36.49537
| 79
|
py
|
mmyolo
|
mmyolo-main/tools/analysis_tools/dataset_analysis.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path
from statistics import median
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
from mmengine.config import Config
from mmengine.registry import init_default_scope
from mmengine.utils import ProgressBar
from prettytable import PrettyTable
from mmyolo.registry import DATASETS
from mmyolo.utils.misc import show_data_classes
def parse_args():
parser = argparse.ArgumentParser(
description='Distribution of categories and bbox instances')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--val-dataset',
default=False,
action='store_true',
help='The default train_dataset.'
'To change it to val_dataset, enter "--val-dataset"')
parser.add_argument(
'--class-name',
default=None,
type=str,
help='Display specific class, e.g., "bicycle"')
parser.add_argument(
'--area-rule',
default=None,
type=int,
nargs='+',
help='Redefine area rules,but no more than three numbers.'
' e.g., 30 70 125')
parser.add_argument(
'--func',
default=None,
type=str,
choices=[
'show_bbox_num', 'show_bbox_wh', 'show_bbox_wh_ratio',
'show_bbox_area'
],
help='Dataset analysis function selection.')
parser.add_argument(
'--out-dir',
default='./dataset_analysis',
type=str,
help='Output directory of dataset analysis visualization results,'
' Save in "./dataset_analysis/" by default')
args = parser.parse_args()
return args
def show_bbox_num(cfg, out_dir, fig_set, class_name, class_num):
"""Display the distribution map of categories and number of bbox
instances."""
print('\n\nDrawing bbox_num figure:')
# Draw designs
fig = plt.figure(
figsize=(fig_set['figsize'][0], fig_set['figsize'][1]), dpi=300)
plt.bar(class_name, class_num, align='center')
# Draw titles, labels and so on
for x, y in enumerate(class_num):
plt.text(x, y, '%s' % y, ha='center', fontsize=fig_set['fontsize'] + 3)
plt.xticks(rotation=fig_set['xticks_angle'])
plt.xlabel('Category Name')
plt.ylabel('Num of instances')
plt.title(cfg.dataset_type)
# Save figure
if not os.path.exists(out_dir):
os.makedirs(out_dir)
out_name = fig_set['out_name']
fig.savefig(
f'{out_dir}/{out_name}_bbox_num.jpg',
bbox_inches='tight',
pad_inches=0.1) # Save Image
plt.close()
print(f'End and save in {out_dir}/{out_name}_bbox_num.jpg')
def show_bbox_wh(out_dir, fig_set, class_bbox_w, class_bbox_h, class_name):
"""Display the width and height distribution of categories and bbox
instances."""
print('\n\nDrawing bbox_wh figure:')
# Draw designs
fig, ax = plt.subplots(
figsize=(fig_set['figsize'][0], fig_set['figsize'][1]), dpi=300)
# Set the position of the map and label on the x-axis
positions_w = list(range(0, 12 * len(class_name), 12))
positions_h = list(range(6, 12 * len(class_name), 12))
positions_x_label = list(range(3, 12 * len(class_name) + 1, 12))
ax.violinplot(
class_bbox_w, positions_w, showmeans=True, showmedians=True, widths=4)
ax.violinplot(
class_bbox_h, positions_h, showmeans=True, showmedians=True, widths=4)
# Draw titles, labels and so on
plt.xticks(rotation=fig_set['xticks_angle'])
plt.ylabel('The width or height of bbox')
plt.xlabel('Class name')
plt.title('Width or height distribution of classes and bbox instances')
# Draw the max, min and median of wide data in violin chart
for i in range(len(class_bbox_w)):
plt.text(
positions_w[i],
median(class_bbox_w[i]),
f'{"%.2f" % median(class_bbox_w[i])}',
ha='center',
fontsize=fig_set['fontsize'])
plt.text(
positions_w[i],
max(class_bbox_w[i]),
f'{"%.2f" % max(class_bbox_w[i])}',
ha='center',
fontsize=fig_set['fontsize'])
plt.text(
positions_w[i],
min(class_bbox_w[i]),
f'{"%.2f" % min(class_bbox_w[i])}',
ha='center',
fontsize=fig_set['fontsize'])
# Draw the max, min and median of height data in violin chart
for i in range(len(positions_h)):
plt.text(
positions_h[i],
median(class_bbox_h[i]),
f'{"%.2f" % median(class_bbox_h[i])}',
ha='center',
fontsize=fig_set['fontsize'])
plt.text(
positions_h[i],
max(class_bbox_h[i]),
f'{"%.2f" % max(class_bbox_h[i])}',
ha='center',
fontsize=fig_set['fontsize'])
plt.text(
positions_h[i],
min(class_bbox_h[i]),
f'{"%.2f" % min(class_bbox_h[i])}',
ha='center',
fontsize=fig_set['fontsize'])
# Draw Legend
plt.setp(ax, xticks=positions_x_label, xticklabels=class_name)
labels = ['bbox_w', 'bbox_h']
colors = ['steelblue', 'darkorange']
patches = [
mpatches.Patch(color=colors[i], label=f'{labels[i]:s}')
for i in range(len(colors))
]
ax = plt.gca()
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height * 0.8])
ax.legend(loc='upper center', handles=patches, ncol=2)
# Save figure
if not os.path.exists(out_dir):
os.makedirs(out_dir)
out_name = fig_set['out_name']
fig.savefig(
f'{out_dir}/{out_name}_bbox_wh.jpg',
bbox_inches='tight',
pad_inches=0.1) # Save Image
plt.close()
print(f'End and save in {out_dir}/{out_name}_bbox_wh.jpg')
def show_bbox_wh_ratio(out_dir, fig_set, class_name, class_bbox_ratio):
"""Display the distribution map of category and bbox instance width and
height ratio."""
print('\n\nDrawing bbox_wh_ratio figure:')
# Draw designs
fig, ax = plt.subplots(
figsize=(fig_set['figsize'][0], fig_set['figsize'][1]), dpi=300)
# Set the position of the map and label on the x-axis
positions = list(range(0, 6 * len(class_name), 6))
ax.violinplot(
class_bbox_ratio,
positions,
showmeans=True,
showmedians=True,
widths=5)
# Draw titles, labels and so on
plt.xticks(rotation=fig_set['xticks_angle'])
plt.ylabel('Ratio of width to height of bbox')
plt.xlabel('Class name')
plt.title('Width to height ratio distribution of class and bbox instances')
# Draw the max, min and median of wide data in violin chart
for i in range(len(class_bbox_ratio)):
plt.text(
positions[i],
median(class_bbox_ratio[i]),
f'{"%.2f" % median(class_bbox_ratio[i])}',
ha='center',
fontsize=fig_set['fontsize'])
plt.text(
positions[i],
max(class_bbox_ratio[i]),
f'{"%.2f" % max(class_bbox_ratio[i])}',
ha='center',
fontsize=fig_set['fontsize'])
plt.text(
positions[i],
min(class_bbox_ratio[i]),
f'{"%.2f" % min(class_bbox_ratio[i])}',
ha='center',
fontsize=fig_set['fontsize'])
# Set the position of the map and label on the x-axis
plt.setp(ax, xticks=positions, xticklabels=class_name)
# Save figure
if not os.path.exists(out_dir):
os.makedirs(out_dir)
out_name = fig_set['out_name']
fig.savefig(
f'{out_dir}/{out_name}_bbox_ratio.jpg',
bbox_inches='tight',
pad_inches=0.1) # Save Image
plt.close()
print(f'End and save in {out_dir}/{out_name}_bbox_ratio.jpg')
def show_bbox_area(out_dir, fig_set, area_rule, class_name, bbox_area_num):
"""Display the distribution map of category and bbox instance area based on
the rules of large, medium and small objects."""
print('\n\nDrawing bbox_area figure:')
# Set the direct distance of each label and the width of each histogram
# Set the required labels and colors
positions = np.arange(0, 2 * len(class_name), 2)
width = 0.4
labels = ['Small', 'Mediun', 'Large', 'Huge']
colors = ['#438675', '#F7B469', '#6BA6DA', '#913221']
# Draw designs
fig = plt.figure(
figsize=(fig_set['figsize'][0], fig_set['figsize'][1]), dpi=300)
for i in range(len(area_rule) - 1):
area_num = [bbox_area_num[idx][i] for idx in range(len(class_name))]
plt.bar(
positions + width * i,
area_num,
width,
label=labels[i],
color=colors[i])
for idx, (x, y) in enumerate(zip(positions.tolist(), area_num)):
plt.text(
x + width * i,
y,
y,
ha='center',
fontsize=fig_set['fontsize'] - 1)
# Draw titles, labels and so on
plt.xticks(rotation=fig_set['xticks_angle'])
plt.xticks(positions + width * ((len(area_rule) - 2) / 2), class_name)
plt.ylabel('Class Area')
plt.xlabel('Class Name')
plt.title(
'Area and number of large, medium and small objects of each class')
# Set and Draw Legend
patches = [
mpatches.Patch(color=colors[i], label=f'{labels[i]:s}')
for i in range(len(area_rule) - 1)
]
ax = plt.gca()
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height * 0.8])
ax.legend(loc='upper center', handles=patches, ncol=len(area_rule) - 1)
# Save figure
if not os.path.exists(out_dir):
os.makedirs(out_dir)
out_name = fig_set['out_name']
fig.savefig(
f'{out_dir}/{out_name}_bbox_area.jpg',
bbox_inches='tight',
pad_inches=0.1) # Save Image
plt.close()
print(f'End and save in {out_dir}/{out_name}_bbox_area.jpg')
def show_class_list(classes, class_num):
"""Print the data of the class obtained by the current run."""
print('\n\nThe information obtained is as follows:')
class_info = PrettyTable()
class_info.title = 'Information of dataset class'
# List Print Settings
# If the quantity is too large, 25 rows will be displayed in each column
if len(classes) < 25:
class_info.add_column('Class name', classes)
class_info.add_column('Bbox num', class_num)
elif len(classes) % 25 != 0 and len(classes) > 25:
col_num = int(len(classes) / 25) + 1
class_nums = class_num.tolist()
class_name_list = list(classes)
for i in range(0, (col_num * 25) - len(classes)):
class_name_list.append('')
class_nums.append('')
for i in range(0, len(class_name_list), 25):
class_info.add_column('Class name', class_name_list[i:i + 25])
class_info.add_column('Bbox num', class_nums[i:i + 25])
# Align display data to the left
class_info.align['Class name'] = 'l'
class_info.align['Bbox num'] = 'l'
print(class_info)
def show_data_list(args, area_rule):
"""Print run setup information."""
print('\n\nPrint current running information:')
data_info = PrettyTable()
data_info.title = 'Dataset information'
# Print the corresponding information according to the settings
if args.val_dataset is False:
data_info.add_column('Dataset type', ['train_dataset'])
elif args.val_dataset is True:
data_info.add_column('Dataset type', ['val_dataset'])
if args.class_name is None:
data_info.add_column('Class name', ['All classes'])
else:
data_info.add_column('Class name', [args.class_name])
if args.func is None:
data_info.add_column('Function', ['All function'])
else:
data_info.add_column('Function', [args.func])
data_info.add_column('Area rule', [area_rule])
print(data_info)
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
init_default_scope(cfg.get('default_scope', 'mmyolo'))
def replace_pipeline_to_none(cfg):
"""Recursively iterate over all dataset(or datasets) and set their
pipelines to none.Datasets are mean ConcatDataset.
Recursively terminates only when all dataset(or datasets) have been
traversed
"""
if cfg.get('dataset', None) is None and cfg.get('datasets',
None) is None:
return
dataset = cfg.dataset if cfg.get('dataset', None) else cfg.datasets
if isinstance(dataset, list):
for item in dataset:
item.pipeline = None
elif dataset.get('pipeline', None):
dataset.pipeline = None
else:
replace_pipeline_to_none(dataset)
# 1.Build Dataset
if args.val_dataset is False:
replace_pipeline_to_none(cfg.train_dataloader)
dataset = DATASETS.build(cfg.train_dataloader.dataset)
else:
replace_pipeline_to_none(cfg.val_dataloader)
dataset = DATASETS.build(cfg.val_dataloader.dataset)
# 2.Prepare data
# Drawing settings
fig_all_set = {
'figsize': [35, 18],
'fontsize': int(10 - 0.08 * len(dataset.metainfo['classes'])),
'xticks_angle': 70,
'out_name': cfg.dataset_type
}
fig_one_set = {
'figsize': [15, 10],
'fontsize': 10,
'xticks_angle': 0,
'out_name': args.class_name
}
# Call the category name and save address
if args.class_name is None:
classes = dataset.metainfo['classes']
classes_idx = [i for i in range(len(classes))]
fig_set = fig_all_set
elif args.class_name in dataset.metainfo['classes']:
classes = [args.class_name]
classes_idx = [dataset.metainfo['classes'].index(args.class_name)]
fig_set = fig_one_set
else:
data_classes = dataset.metainfo['classes']
show_data_classes(data_classes)
raise RuntimeError(f'Expected args.class_name to be one of the list,'
f'but got "{args.class_name}"')
# Building Area Rules
if args.area_rule is None:
area_rule = [0, 32, 96, 1e5]
elif args.area_rule and len(args.area_rule) <= 3:
area_rules = [0] + args.area_rule + [1e5]
area_rule = sorted(area_rules)
else:
raise RuntimeError(
f'Expected the "{args.area_rule}" to be e.g. 30 60 120, '
'and no more than three numbers.')
# Build arrays or lists to store data for each category
class_num = np.zeros((len(classes), ), dtype=np.int64)
class_bbox = [[] for _ in classes]
class_name = []
class_bbox_w = []
class_bbox_h = []
class_bbox_ratio = []
bbox_area_num = []
show_data_list(args, area_rule)
# Get the quantity and bbox data corresponding to each category
print('\nRead the information of each picture in the dataset:')
progress_bar = ProgressBar(len(dataset))
for index in range(len(dataset)):
for instance in dataset[index]['instances']:
if instance[
'bbox_label'] in classes_idx and args.class_name is None:
class_num[instance['bbox_label']] += 1
class_bbox[instance['bbox_label']].append(instance['bbox'])
elif instance['bbox_label'] in classes_idx and args.class_name:
class_num[0] += 1
class_bbox[0].append(instance['bbox'])
progress_bar.update()
show_class_list(classes, class_num)
# Get the width, height and area of bbox corresponding to each category
print('\nRead bbox information in each class:')
progress_bar_classes = ProgressBar(len(classes))
for idx, (classes, classes_idx) in enumerate(zip(classes, classes_idx)):
bbox = np.array(class_bbox[idx])
bbox_area_nums = np.zeros((len(area_rule) - 1, ), dtype=np.int64)
if len(bbox) > 0:
bbox_wh = bbox[:, 2:4] - bbox[:, 0:2]
bbox_ratio = bbox_wh[:, 0] / bbox_wh[:, 1]
bbox_area = bbox_wh[:, 0] * bbox_wh[:, 1]
class_bbox_w.append(bbox_wh[:, 0].tolist())
class_bbox_h.append(bbox_wh[:, 1].tolist())
class_bbox_ratio.append(bbox_ratio.tolist())
# The area rule, there is an section between two numbers
for i in range(len(area_rule) - 1):
bbox_area_nums[i] = np.logical_and(
bbox_area >= area_rule[i]**2,
bbox_area < area_rule[i + 1]**2).sum()
elif len(bbox) == 0:
class_bbox_w.append([0])
class_bbox_h.append([0])
class_bbox_ratio.append([0])
class_name.append(classes)
bbox_area_num.append(bbox_area_nums.tolist())
progress_bar_classes.update()
# 3.draw Dataset Information
if args.func is None:
show_bbox_num(cfg, args.out_dir, fig_set, class_name, class_num)
show_bbox_wh(args.out_dir, fig_set, class_bbox_w, class_bbox_h,
class_name)
show_bbox_wh_ratio(args.out_dir, fig_set, class_name, class_bbox_ratio)
show_bbox_area(args.out_dir, fig_set, area_rule, class_name,
bbox_area_num)
elif args.func == 'show_bbox_num':
show_bbox_num(cfg, args.out_dir, fig_set, class_name, class_num)
elif args.func == 'show_bbox_wh':
show_bbox_wh(args.out_dir, fig_set, class_bbox_w, class_bbox_h,
class_name)
elif args.func == 'show_bbox_wh_ratio':
show_bbox_wh_ratio(args.out_dir, fig_set, class_name, class_bbox_ratio)
elif args.func == 'show_bbox_area':
show_bbox_area(args.out_dir, fig_set, area_rule, class_name,
bbox_area_num)
else:
raise RuntimeError(
'Please enter the correct func name, e.g., show_bbox_num')
if __name__ == '__main__':
main()
| 18,087
| 35.248497
| 79
|
py
|
mmyolo
|
mmyolo-main/tools/analysis_tools/get_flops.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import tempfile
from pathlib import Path
import torch
from mmdet.registry import MODELS
from mmengine.analysis import get_model_complexity_info
from mmengine.config import Config, DictAction
from mmengine.logging import MMLogger
from mmengine.model import revert_sync_batchnorm
from mmengine.registry import init_default_scope
from mmyolo.utils import switch_to_deploy
def parse_args():
parser = argparse.ArgumentParser(description='Get a detector flops')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[640, 640],
help='input image size')
parser.add_argument(
'--show-arch',
action='store_true',
help='whether return the statistics in the form of network layers')
parser.add_argument(
'--not-show-table',
action='store_true',
help='whether return the statistics in the form of table'),
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
return parser.parse_args()
def inference(args, logger):
config_name = Path(args.config)
if not config_name.exists():
logger.error(f'{config_name} not found.')
cfg = Config.fromfile(args.config)
cfg.work_dir = tempfile.TemporaryDirectory().name
cfg.log_level = 'WARN'
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
init_default_scope(cfg.get('default_scope', 'mmyolo'))
if len(args.shape) == 1:
h = w = args.shape[0]
elif len(args.shape) == 2:
h, w = args.shape
else:
raise ValueError('invalid input shape')
# model
model = MODELS.build(cfg.model)
if torch.cuda.is_available():
model.cuda()
model = revert_sync_batchnorm(model)
model.eval()
switch_to_deploy(model)
# input tensor
# automatically generate a input tensor with the given input_shape.
data_batch = {'inputs': [torch.rand(3, h, w)], 'batch_samples': [None]}
data = model.data_preprocessor(data_batch)
result = {'ori_shape': (h, w), 'pad_shape': data['inputs'].shape[-2:]}
outputs = get_model_complexity_info(
model,
input_shape=None,
inputs=data['inputs'], # the input tensor of the model
show_table=not args.not_show_table, # show the complexity table
show_arch=args.show_arch) # show the complexity arch
result['flops'] = outputs['flops_str']
result['params'] = outputs['params_str']
result['out_table'] = outputs['out_table']
result['out_arch'] = outputs['out_arch']
return result
def main():
args = parse_args()
logger = MMLogger.get_instance(name='MMLogger')
result = inference(args, logger)
split_line = '=' * 30
ori_shape = result['ori_shape']
pad_shape = result['pad_shape']
flops = result['flops']
params = result['params']
print(result['out_table']) # print related information by table
print(result['out_arch']) # print related information by network layers
if pad_shape != ori_shape:
print(f'{split_line}\nUse size divisor set input shape '
f'from {ori_shape} to {pad_shape}')
print(f'{split_line}\n'
f'Input shape: {pad_shape}\nModel Flops: {flops}\n'
f'Model Parameters: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify '
'that the flops computation is correct.')
if __name__ == '__main__':
main()
| 4,085
| 31.951613
| 78
|
py
|
mmyolo
|
mmyolo-main/tools/analysis_tools/browse_dataset.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import sys
from typing import Tuple
import cv2
import mmcv
import numpy as np
from mmdet.models.utils import mask2ndarray
from mmdet.structures.bbox import BaseBoxes
from mmengine.config import Config, DictAction
from mmengine.dataset import Compose
from mmengine.registry import init_default_scope
from mmengine.utils import ProgressBar
from mmengine.visualization import Visualizer
from mmyolo.registry import DATASETS, VISUALIZERS
# TODO: Support for printing the change in key of results
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--phase',
'-p',
default='train',
type=str,
choices=['train', 'test', 'val'],
help='phase of dataset to visualize, accept "train" "test" and "val".'
' Defaults to "train".')
parser.add_argument(
'--mode',
'-m',
default='transformed',
type=str,
choices=['original', 'transformed', 'pipeline'],
help='display mode; display original pictures or '
'transformed pictures or comparison pictures. "original" '
'means show images load from disk; "transformed" means '
'to show images after transformed; "pipeline" means show all '
'the intermediate images. Defaults to "transformed".')
parser.add_argument(
'--out-dir',
default='output',
type=str,
help='If there is no display interface, you can save it.')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-number',
'-n',
type=int,
default=sys.maxsize,
help='number of images selected to visualize, '
'must bigger than 0. if the number is bigger than length '
'of dataset, show all the images in dataset; '
'default "sys.maxsize", show all images in dataset')
parser.add_argument(
'--show-interval',
'-i',
type=float,
default=3,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def _get_adaptive_scale(img_shape: Tuple[int, int],
min_scale: float = 0.3,
max_scale: float = 3.0) -> float:
"""Get adaptive scale according to image shape.
The target scale depends on the the short edge length of the image. If the
short edge length equals 224, the output is 1.0. And output linear
scales according the short edge length. You can also specify the minimum
scale and the maximum scale to limit the linear scale.
Args:
img_shape (Tuple[int, int]): The shape of the canvas image.
min_scale (int): The minimum scale. Defaults to 0.3.
max_scale (int): The maximum scale. Defaults to 3.0.
Returns:
int: The adaptive scale.
"""
short_edge_length = min(img_shape)
scale = short_edge_length / 224.
return min(max(scale, min_scale), max_scale)
def make_grid(imgs, names):
"""Concat list of pictures into a single big picture, align height here."""
visualizer = Visualizer.get_current_instance()
ori_shapes = [img.shape[:2] for img in imgs]
max_height = int(max(img.shape[0] for img in imgs) * 1.1)
min_width = min(img.shape[1] for img in imgs)
horizontal_gap = min_width // 10
img_scale = _get_adaptive_scale((max_height, min_width))
texts = []
text_positions = []
start_x = 0
for i, img in enumerate(imgs):
pad_height = (max_height - img.shape[0]) // 2
pad_width = horizontal_gap // 2
# make border
imgs[i] = cv2.copyMakeBorder(
img,
pad_height,
max_height - img.shape[0] - pad_height + int(img_scale * 30 * 2),
pad_width,
pad_width,
cv2.BORDER_CONSTANT,
value=(255, 255, 255))
texts.append(f'{"execution: "}{i}\n{names[i]}\n{ori_shapes[i]}')
text_positions.append(
[start_x + img.shape[1] // 2 + pad_width, max_height])
start_x += img.shape[1] + horizontal_gap
display_img = np.concatenate(imgs, axis=1)
visualizer.set_image(display_img)
img_scale = _get_adaptive_scale(display_img.shape[:2])
visualizer.draw_texts(
texts,
positions=np.array(text_positions),
font_sizes=img_scale * 7,
colors='black',
horizontal_alignments='center',
font_families='monospace')
return visualizer.get_image()
class InspectCompose(Compose):
"""Compose multiple transforms sequentially.
And record "img" field of all results in one list.
"""
def __init__(self, transforms, intermediate_imgs):
super().__init__(transforms=transforms)
self.intermediate_imgs = intermediate_imgs
def __call__(self, data):
if 'img' in data:
self.intermediate_imgs.append({
'name': 'original',
'img': data['img'].copy()
})
self.ptransforms = [
self.transforms[i] for i in range(len(self.transforms) - 1)
]
for t in self.ptransforms:
data = t(data)
# Keep the same meta_keys in the PackDetInputs
self.transforms[-1].meta_keys = [key for key in data]
data_sample = self.transforms[-1](data)
if data is None:
return None
if 'img' in data:
self.intermediate_imgs.append({
'name':
t.__class__.__name__,
'dataset_sample':
data_sample['data_samples']
})
return data
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
init_default_scope(cfg.get('default_scope', 'mmyolo'))
dataset_cfg = cfg.get(args.phase + '_dataloader').get('dataset')
dataset = DATASETS.build(dataset_cfg)
visualizer = VISUALIZERS.build(cfg.visualizer)
visualizer.dataset_meta = dataset.metainfo
intermediate_imgs = []
if not hasattr(dataset, 'pipeline'):
# for dataset_wrapper
dataset = dataset.dataset
# TODO: The dataset wrapper occasion is not considered here
dataset.pipeline = InspectCompose(dataset.pipeline.transforms,
intermediate_imgs)
# init visualization image number
assert args.show_number > 0
display_number = min(args.show_number, len(dataset))
progress_bar = ProgressBar(display_number)
for i, item in zip(range(display_number), dataset):
image_i = []
result_i = [result['dataset_sample'] for result in intermediate_imgs]
for k, datasample in enumerate(result_i):
image = datasample.img
gt_instances = datasample.gt_instances
image = image[..., [2, 1, 0]] # bgr to rgb
gt_bboxes = gt_instances.get('bboxes', None)
if gt_bboxes is not None and isinstance(gt_bboxes, BaseBoxes):
gt_instances.bboxes = gt_bboxes.tensor
gt_masks = gt_instances.get('masks', None)
if gt_masks is not None:
masks = mask2ndarray(gt_masks)
gt_instances.masks = masks.astype(bool)
datasample.gt_instances = gt_instances
# get filename from dataset or just use index as filename
visualizer.add_datasample(
'result',
image,
datasample,
draw_pred=False,
draw_gt=True,
show=False)
image_show = visualizer.get_image()
image_i.append(image_show)
if args.mode == 'original':
image = image_i[0]
elif args.mode == 'transformed':
image = image_i[-1]
else:
image = make_grid([result for result in image_i],
[result['name'] for result in intermediate_imgs])
if hasattr(datasample, 'img_path'):
filename = osp.basename(datasample.img_path)
else:
# some dataset have not image path
filename = f'{i}.jpg'
out_file = osp.join(args.out_dir,
filename) if args.out_dir is not None else None
if out_file is not None:
mmcv.imwrite(image[..., ::-1], out_file)
if not args.not_show:
visualizer.show(
image, win_name=filename, wait_time=args.show_interval)
intermediate_imgs.clear()
progress_bar.update()
if __name__ == '__main__':
main()
| 9,343
| 34.664122
| 79
|
py
|
mmyolo
|
mmyolo-main/tools/analysis_tools/confusion_matrix.py
|
import argparse
import os
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import MultipleLocator
from mmcv.ops import nms
from mmdet.evaluation import bbox_overlaps
from mmdet.utils import replace_cfg_vals, update_data_root
from mmengine import Config, DictAction
from mmengine.fileio import load
from mmengine.registry import init_default_scope
from mmengine.utils import ProgressBar
from mmyolo.registry import DATASETS
def parse_args():
parser = argparse.ArgumentParser(
description='Generate confusion matrix from detection results')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'prediction_path', help='prediction path where test .pkl result')
parser.add_argument(
'save_dir', help='directory where confusion matrix will be saved')
parser.add_argument(
'--show', action='store_true', help='show confusion matrix')
parser.add_argument(
'--color-theme',
default='plasma',
help='theme of the matrix color map')
parser.add_argument(
'--score-thr',
type=float,
default=0.3,
help='score threshold to filter detection bboxes')
parser.add_argument(
'--tp-iou-thr',
type=float,
default=0.5,
help='IoU threshold to be considered as matched')
parser.add_argument(
'--nms-iou-thr',
type=float,
default=None,
help='nms IoU threshold, only applied when users want to change the'
'nms IoU threshold.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def calculate_confusion_matrix(dataset,
results,
score_thr=0,
nms_iou_thr=None,
tp_iou_thr=0.5):
"""Calculate the confusion matrix.
Args:
dataset (Dataset): Test or val dataset.
results (list[ndarray]): A list of detection results in each image.
score_thr (float|optional): Score threshold to filter bboxes.
Default: 0.
nms_iou_thr (float|optional): nms IoU threshold, the detection results
have done nms in the detector, only applied when users want to
change the nms IoU threshold. Default: None.
tp_iou_thr (float|optional): IoU threshold to be considered as matched.
Default: 0.5.
"""
num_classes = len(dataset.metainfo['classes'])
confusion_matrix = np.zeros(shape=[num_classes + 1, num_classes + 1])
assert len(dataset) == len(results)
prog_bar = ProgressBar(len(results))
for idx, per_img_res in enumerate(results):
res_bboxes = per_img_res['pred_instances']
gts = dataset.get_data_info(idx)['instances']
analyze_per_img_dets(confusion_matrix, gts, res_bboxes, score_thr,
tp_iou_thr, nms_iou_thr)
prog_bar.update()
return confusion_matrix
def analyze_per_img_dets(confusion_matrix,
gts,
result,
score_thr=0,
tp_iou_thr=0.5,
nms_iou_thr=None):
"""Analyze detection results on each image.
Args:
confusion_matrix (ndarray): The confusion matrix,
has shape (num_classes + 1, num_classes + 1).
gt_bboxes (ndarray): Ground truth bboxes, has shape (num_gt, 4).
gt_labels (ndarray): Ground truth labels, has shape (num_gt).
result (ndarray): Detection results, has shape
(num_classes, num_bboxes, 5).
score_thr (float): Score threshold to filter bboxes.
Default: 0.
tp_iou_thr (float): IoU threshold to be considered as matched.
Default: 0.5.
nms_iou_thr (float|optional): nms IoU threshold, the detection results
have done nms in the detector, only applied when users want to
change the nms IoU threshold. Default: None.
"""
true_positives = np.zeros(len(gts))
gt_bboxes = []
gt_labels = []
for gt in gts:
gt_bboxes.append(gt['bbox'])
gt_labels.append(gt['bbox_label'])
gt_bboxes = np.array(gt_bboxes)
gt_labels = np.array(gt_labels)
unique_label = np.unique(result['labels'].numpy())
for det_label in unique_label:
mask = (result['labels'] == det_label)
det_bboxes = result['bboxes'][mask].numpy()
det_scores = result['scores'][mask].numpy()
if nms_iou_thr:
det_bboxes, _ = nms(
det_bboxes, det_scores, nms_iou_thr, score_threshold=score_thr)
ious = bbox_overlaps(det_bboxes[:, :4], gt_bboxes)
for i, score in enumerate(det_scores):
det_match = 0
if score >= score_thr:
for j, gt_label in enumerate(gt_labels):
if ious[i, j] >= tp_iou_thr:
det_match += 1
if gt_label == det_label:
true_positives[j] += 1 # TP
confusion_matrix[gt_label, det_label] += 1
if det_match == 0: # BG FP
confusion_matrix[-1, det_label] += 1
for num_tp, gt_label in zip(true_positives, gt_labels):
if num_tp == 0: # FN
confusion_matrix[gt_label, -1] += 1
def plot_confusion_matrix(confusion_matrix,
labels,
save_dir=None,
show=True,
title='Normalized Confusion Matrix',
color_theme='plasma'):
"""Draw confusion matrix with matplotlib.
Args:
confusion_matrix (ndarray): The confusion matrix.
labels (list[str]): List of class names.
save_dir (str|optional): If set, save the confusion matrix plot to the
given path. Default: None.
show (bool): Whether to show the plot. Default: True.
title (str): Title of the plot. Default: `Normalized Confusion Matrix`.
color_theme (str): Theme of the matrix color map. Default: `plasma`.
"""
# normalize the confusion matrix
per_label_sums = confusion_matrix.sum(axis=1)[:, np.newaxis]
confusion_matrix = \
confusion_matrix.astype(np.float32) / per_label_sums * 100
num_classes = len(labels)
fig, ax = plt.subplots(
figsize=(0.5 * num_classes, 0.5 * num_classes * 0.8), dpi=180)
cmap = plt.get_cmap(color_theme)
im = ax.imshow(confusion_matrix, cmap=cmap)
plt.colorbar(mappable=im, ax=ax)
title_font = {'weight': 'bold', 'size': 12}
ax.set_title(title, fontdict=title_font)
label_font = {'size': 10}
plt.ylabel('Ground Truth Label', fontdict=label_font)
plt.xlabel('Prediction Label', fontdict=label_font)
# draw locator
xmajor_locator = MultipleLocator(1)
xminor_locator = MultipleLocator(0.5)
ax.xaxis.set_major_locator(xmajor_locator)
ax.xaxis.set_minor_locator(xminor_locator)
ymajor_locator = MultipleLocator(1)
yminor_locator = MultipleLocator(0.5)
ax.yaxis.set_major_locator(ymajor_locator)
ax.yaxis.set_minor_locator(yminor_locator)
# draw grid
ax.grid(True, which='minor', linestyle='-')
# draw label
ax.set_xticks(np.arange(num_classes))
ax.set_yticks(np.arange(num_classes))
ax.set_xticklabels(labels)
ax.set_yticklabels(labels)
ax.tick_params(
axis='x', bottom=False, top=True, labelbottom=False, labeltop=True)
plt.setp(
ax.get_xticklabels(), rotation=45, ha='left', rotation_mode='anchor')
# draw confution matrix value
for i in range(num_classes):
for j in range(num_classes):
ax.text(
j,
i,
'{}%'.format(
int(confusion_matrix[
i,
j]) if not np.isnan(confusion_matrix[i, j]) else -1),
ha='center',
va='center',
color='w',
size=7)
ax.set_ylim(len(confusion_matrix) - 0.5, -0.5) # matplotlib>3.1.1
fig.tight_layout()
if save_dir is not None:
plt.savefig(
os.path.join(save_dir, 'confusion_matrix.png'), format='png')
if show:
plt.show()
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMYOLO_DATASETS
update_data_root(cfg)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
init_default_scope(cfg.get('default_scope', 'mmyolo'))
results = load(args.prediction_path)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
dataset = DATASETS.build(cfg.test_dataloader.dataset)
confusion_matrix = calculate_confusion_matrix(dataset, results,
args.score_thr,
args.nms_iou_thr,
args.tp_iou_thr)
plot_confusion_matrix(
confusion_matrix,
dataset.metainfo['classes'] + ('background', ),
save_dir=args.save_dir,
show=args.show,
color_theme=args.color_theme)
if __name__ == '__main__':
main()
| 9,903
| 35.145985
| 79
|
py
|
mmyolo
|
mmyolo-main/tools/analysis_tools/browse_coco_json.py
|
import argparse
import os.path as osp
import cv2
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
from pycocotools.coco import COCO
def show_coco_json(args):
if args.data_root is not None:
coco = COCO(osp.join(args.data_root, args.ann_file))
else:
coco = COCO(args.ann_file)
print(f'Total number of images:{len(coco.getImgIds())}')
categories = coco.loadCats(coco.getCatIds())
category_names = [category['name'] for category in categories]
print(f'Total number of Categories : {len(category_names)}')
print('Categories: \n{}\n'.format(' '.join(category_names)))
if args.category_names is None:
category_ids = []
else:
assert set(category_names) > set(args.category_names)
category_ids = coco.getCatIds(args.category_names)
image_ids = coco.getImgIds(catIds=category_ids)
if args.shuffle:
np.random.shuffle(image_ids)
for i in range(len(image_ids)):
image_data = coco.loadImgs(image_ids[i])[0]
if args.data_root is not None:
image_path = osp.join(args.data_root, args.img_dir,
image_data['file_name'])
else:
image_path = osp.join(args.img_dir, image_data['file_name'])
annotation_ids = coco.getAnnIds(
imgIds=image_data['id'], catIds=category_ids, iscrowd=0)
annotations = coco.loadAnns(annotation_ids)
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.figure()
plt.imshow(image)
if args.disp_all:
coco.showAnns(annotations)
else:
show_bbox_only(coco, annotations)
if args.wait_time == 0:
plt.show()
else:
plt.show(block=False)
plt.pause(args.wait_time)
plt.close()
def show_bbox_only(coco, anns, show_label_bbox=True, is_filling=True):
"""Show bounding box of annotations Only."""
if len(anns) == 0:
return
ax = plt.gca()
ax.set_autoscale_on(False)
image2color = dict()
for cat in coco.getCatIds():
image2color[cat] = (np.random.random((1, 3)) * 0.7 + 0.3).tolist()[0]
polygons = []
colors = []
for ann in anns:
color = image2color[ann['category_id']]
bbox_x, bbox_y, bbox_w, bbox_h = ann['bbox']
poly = [[bbox_x, bbox_y], [bbox_x, bbox_y + bbox_h],
[bbox_x + bbox_w, bbox_y + bbox_h], [bbox_x + bbox_w, bbox_y]]
polygons.append(Polygon(np.array(poly).reshape((4, 2))))
colors.append(color)
if show_label_bbox:
label_bbox = dict(facecolor=color)
else:
label_bbox = None
ax.text(
bbox_x,
bbox_y,
'%s' % (coco.loadCats(ann['category_id'])[0]['name']),
color='white',
bbox=label_bbox)
if is_filling:
p = PatchCollection(
polygons, facecolor=colors, linewidths=0, alpha=0.4)
ax.add_collection(p)
p = PatchCollection(
polygons, facecolor='none', edgecolors=colors, linewidths=2)
ax.add_collection(p)
def parse_args():
parser = argparse.ArgumentParser(description='Show coco json file')
parser.add_argument('--data-root', default=None, help='dataset root')
parser.add_argument(
'--img-dir', default='data/coco/train2017', help='image folder path')
parser.add_argument(
'--ann-file',
default='data/coco/annotations/instances_train2017.json',
help='ann file path')
parser.add_argument(
'--wait-time', type=float, default=2, help='the interval of show (s)')
parser.add_argument(
'--disp-all',
action='store_true',
help='Whether to display all types of data, '
'such as bbox and mask.'
' Default is to display only bbox')
parser.add_argument(
'--category-names',
type=str,
default=None,
nargs='+',
help='Display category-specific data, e.g., "bicycle", "person"')
parser.add_argument(
'--shuffle',
action='store_true',
help='Whether to display in disorder')
args = parser.parse_args()
return args
def main():
args = parse_args()
show_coco_json(args)
if __name__ == '__main__':
main()
| 4,424
| 28.898649
| 78
|
py
|
mmyolo
|
mmyolo-main/tools/analysis_tools/vis_scheduler.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Hyper-parameter Scheduler Visualization.
This tool aims to help the user to check
the hyper-parameter scheduler of the optimizer(without training),
which support the "learning rate", "momentum", and "weight_decay".
Example:
```shell
python tools/analysis_tools/vis_scheduler.py \
configs/rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco.py \
--dataset-size 118287 \
--ngpus 8 \
--out-dir ./output
```
Modified from: https://github.com/open-mmlab/mmclassification/blob/1.x/tools/visualizations/vis_scheduler.py # noqa
"""
import argparse
import json
import os.path as osp
import re
from pathlib import Path
from unittest.mock import MagicMock
import matplotlib.pyplot as plt
import rich
import torch.nn as nn
from mmengine.config import Config, DictAction
from mmengine.hooks import Hook
from mmengine.model import BaseModel
from mmengine.registry import init_default_scope
from mmengine.runner import Runner
from mmengine.utils.path import mkdir_or_exist
from mmengine.visualization import Visualizer
from rich.progress import BarColumn, MofNCompleteColumn, Progress, TextColumn
def parse_args():
parser = argparse.ArgumentParser(
description='Visualize a hyper-parameter scheduler')
parser.add_argument('config', help='config file path')
parser.add_argument(
'-p',
'--parameter',
type=str,
default='lr',
choices=['lr', 'momentum', 'wd'],
help='The parameter to visualize its change curve, choose from'
'"lr", "wd" and "momentum". Defaults to "lr".')
parser.add_argument(
'-d',
'--dataset-size',
type=int,
help='The size of the dataset. If specify, `DATASETS.build` will '
'be skipped and use this size as the dataset size.')
parser.add_argument(
'-n',
'--ngpus',
type=int,
default=1,
help='The number of GPUs used in training.')
parser.add_argument(
'-o', '--out-dir', type=Path, help='Path to output file')
parser.add_argument(
'--log-level',
default='WARNING',
help='The log level of the handler and logger. Defaults to '
'WARNING.')
parser.add_argument('--title', type=str, help='title of figure')
parser.add_argument(
'--style', type=str, default='whitegrid', help='style of plt')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--window-size',
default='12*7',
help='Size of the window to display images, in format of "$W*$H".')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if args.window_size != '':
assert re.match(r'\d+\*\d+', args.window_size), \
"'window-size' must be in format 'W*H'."
return args
class SimpleModel(BaseModel):
"""simple model that do nothing in train_step."""
def __init__(self):
super().__init__()
self.data_preprocessor = nn.Identity()
self.conv = nn.Conv2d(1, 1, 1)
def forward(self, inputs, data_samples, mode='tensor'):
pass
def train_step(self, data, optim_wrapper):
pass
class ParamRecordHook(Hook):
def __init__(self, by_epoch):
super().__init__()
self.by_epoch = by_epoch
self.lr_list = []
self.momentum_list = []
self.wd_list = []
self.task_id = 0
self.progress = Progress(BarColumn(), MofNCompleteColumn(),
TextColumn('{task.description}'))
def before_train(self, runner):
if self.by_epoch:
total = runner.train_loop.max_epochs
self.task_id = self.progress.add_task(
'epochs', start=True, total=total)
else:
total = runner.train_loop.max_iters
self.task_id = self.progress.add_task(
'iters', start=True, total=total)
self.progress.start()
def after_train_epoch(self, runner):
if self.by_epoch:
self.progress.update(self.task_id, advance=1)
# TODO: Support multiple schedulers
def after_train_iter(self, runner, batch_idx, data_batch, outputs):
if not self.by_epoch:
self.progress.update(self.task_id, advance=1)
self.lr_list.append(runner.optim_wrapper.get_lr()['lr'][0])
self.momentum_list.append(
runner.optim_wrapper.get_momentum()['momentum'][0])
self.wd_list.append(
runner.optim_wrapper.param_groups[0]['weight_decay'])
def after_train(self, runner):
self.progress.stop()
def plot_curve(lr_list, args, param_name, iters_per_epoch, by_epoch=True):
"""Plot learning rate vs iter graph."""
try:
import seaborn as sns
sns.set_style(args.style)
except ImportError:
pass
wind_w, wind_h = args.window_size.split('*')
wind_w, wind_h = int(wind_w), int(wind_h)
plt.figure(figsize=(wind_w, wind_h))
ax: plt.Axes = plt.subplot()
ax.plot(lr_list, linewidth=1)
if by_epoch:
ax.xaxis.tick_top()
ax.set_xlabel('Iters')
ax.xaxis.set_label_position('top')
sec_ax = ax.secondary_xaxis(
'bottom',
functions=(lambda x: x / iters_per_epoch,
lambda y: y * iters_per_epoch))
sec_ax.set_xlabel('Epochs')
else:
plt.xlabel('Iters')
plt.ylabel(param_name)
if args.title is None:
plt.title(f'{osp.basename(args.config)} {param_name} curve')
else:
plt.title(args.title)
def simulate_train(data_loader, cfg, by_epoch):
model = SimpleModel()
param_record_hook = ParamRecordHook(by_epoch=by_epoch)
default_hooks = dict(
param_scheduler=cfg.default_hooks['param_scheduler'],
runtime_info=None,
timer=None,
logger=None,
checkpoint=None,
sampler_seed=None,
param_record=param_record_hook)
runner = Runner(
model=model,
work_dir=cfg.work_dir,
train_dataloader=data_loader,
train_cfg=cfg.train_cfg,
log_level=cfg.log_level,
optim_wrapper=cfg.optim_wrapper,
param_scheduler=cfg.param_scheduler,
default_scope=cfg.default_scope,
default_hooks=default_hooks,
visualizer=MagicMock(spec=Visualizer),
custom_hooks=cfg.get('custom_hooks', None))
runner.train()
param_dict = dict(
lr=param_record_hook.lr_list,
momentum=param_record_hook.momentum_list,
wd=param_record_hook.wd_list)
return param_dict
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
if cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
cfg.log_level = args.log_level
init_default_scope(cfg.get('default_scope', 'mmyolo'))
# init logger
print('Param_scheduler :')
rich.print_json(json.dumps(cfg.param_scheduler))
# prepare data loader
batch_size = cfg.train_dataloader.batch_size * args.ngpus
if 'by_epoch' in cfg.train_cfg:
by_epoch = cfg.train_cfg.get('by_epoch')
elif 'type' in cfg.train_cfg:
by_epoch = cfg.train_cfg.get('type') == 'EpochBasedTrainLoop'
else:
raise ValueError('please set `train_cfg`.')
if args.dataset_size is None and by_epoch:
from mmyolo.registry import DATASETS
dataset_size = len(DATASETS.build(cfg.train_dataloader.dataset))
else:
dataset_size = args.dataset_size or batch_size
class FakeDataloader(list):
dataset = MagicMock(metainfo=None)
data_loader = FakeDataloader(range(dataset_size // batch_size))
dataset_info = (
f'\nDataset infos:'
f'\n - Dataset size: {dataset_size}'
f'\n - Batch size per GPU: {cfg.train_dataloader.batch_size}'
f'\n - Number of GPUs: {args.ngpus}'
f'\n - Total batch size: {batch_size}')
if by_epoch:
dataset_info += f'\n - Iterations per epoch: {len(data_loader)}'
rich.print(dataset_info + '\n')
# simulation training process
param_dict = simulate_train(data_loader, cfg, by_epoch)
param_list = param_dict[args.parameter]
if args.parameter == 'lr':
param_name = 'Learning Rate'
elif args.parameter == 'momentum':
param_name = 'Momentum'
else:
param_name = 'Weight Decay'
plot_curve(param_list, args, param_name, len(data_loader), by_epoch)
if args.out_dir:
# make dir for output
mkdir_or_exist(args.out_dir)
# save the graph
out_file = osp.join(
args.out_dir, f'{osp.basename(args.config)}-{args.parameter}.jpg')
plt.savefig(out_file)
print(f'\nThe {param_name} graph is saved at {out_file}')
if not args.not_show:
plt.show()
if __name__ == '__main__':
main()
| 9,574
| 31.347973
| 115
|
py
|
mmyolo
|
mmyolo-main/projects/example_project/dummy/dummy_yolov5cspdarknet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmyolo.models import YOLOv5CSPDarknet
from mmyolo.registry import MODELS
@MODELS.register_module()
class DummyYOLOv5CSPDarknet(YOLOv5CSPDarknet):
"""Implements a dummy YOLOv5CSPDarknet wrapper for demonstration purpose.
Args:
**kwargs: All the arguments are passed to the parent class.
"""
def __init__(self, **kwargs) -> None:
print('Hello world!')
super().__init__(**kwargs)
| 474
| 26.941176
| 77
|
py
|
mmyolo
|
mmyolo-main/projects/example_project/dummy/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dummy_yolov5cspdarknet import DummyYOLOv5CSPDarknet
__all__ = ['DummyYOLOv5CSPDarknet']
| 143
| 27.8
| 57
|
py
|
mmyolo
|
mmyolo-main/projects/example_project/configs/yolov5_s_dummy-backbone_v61_syncbn_8xb16-300e_coco.py
|
_base_ = '../../../configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py'
custom_imports = dict(imports=['projects.example_project.dummy'])
_base_.model.backbone.type = 'DummyYOLOv5CSPDarknet'
| 195
| 31.666667
| 73
|
py
|
mmyolo
|
mmyolo-main/projects/assigner_visualization/assigner_visualization.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import sys
import warnings
import mmcv
import numpy as np
import torch
from mmengine import ProgressBar
from mmengine.config import Config, DictAction
from mmengine.dataset import COLLATE_FUNCTIONS
from mmengine.runner.checkpoint import load_checkpoint
from numpy import random
from mmyolo.registry import DATASETS, MODELS
from mmyolo.utils import register_all_modules
from projects.assigner_visualization.dense_heads import (RTMHeadAssigner,
YOLOv5HeadAssigner,
YOLOv7HeadAssigner,
YOLOv8HeadAssigner)
from projects.assigner_visualization.visualization import \
YOLOAssignerVisualizer
def parse_args():
parser = argparse.ArgumentParser(
description='MMYOLO show the positive sample assigning'
' results.')
parser.add_argument('config', help='config file path')
parser.add_argument('--checkpoint', '-c', type=str, help='checkpoint file')
parser.add_argument(
'--show-number',
'-n',
type=int,
default=sys.maxsize,
help='number of images selected to save, '
'must bigger than 0. if the number is bigger than length '
'of dataset, show all the images in dataset; '
'default "sys.maxsize", show all images in dataset')
parser.add_argument(
'--output-dir',
default='assigned_results',
type=str,
help='The name of the folder where the image is saved.')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference.')
parser.add_argument(
'--show-prior',
default=False,
action='store_true',
help='Whether to show prior on image.')
parser.add_argument(
'--not-show-label',
default=False,
action='store_true',
help='Whether to show label on image.')
parser.add_argument('--seed', default=-1, type=int, help='random seed')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
register_all_modules()
# set random seed
seed = int(args.seed)
if seed != -1:
print(f'Set the global seed: {seed}')
random.seed(int(args.seed))
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# build model
model = MODELS.build(cfg.model)
if args.checkpoint is not None:
load_checkpoint(model, args.checkpoint)
elif isinstance(model.bbox_head, (YOLOv7HeadAssigner, RTMHeadAssigner)):
warnings.warn(
'if you use dynamic_assignment methods such as YOLOv7 or '
'YOLOv8 or RTMDet assigner, please load the checkpoint.')
assert isinstance(model.bbox_head, (YOLOv5HeadAssigner,
YOLOv7HeadAssigner,
YOLOv8HeadAssigner,
RTMHeadAssigner)), \
'Now, this script only support YOLOv5, YOLOv7, YOLOv8 and RTMdet, ' \
'and bbox_head must use ' \
'`YOLOv5HeadAssigner or YOLOv7HeadAssigne or YOLOv8HeadAssigner ' \
'or RTMHeadAssigner`. Please use `' \
'yolov5_s-v61_syncbn_fast_8xb16-300e_coco_assignervisualization.py' \
'or yolov7_tiny_syncbn_fast_8x16b-300e_coco_assignervisualization.py' \
'or yolov8_s_syncbn_fast_8xb16-500e_coco_assignervisualization.py' \
'or rtmdet_s_syncbn_fast_8xb32-300e_coco_assignervisualization.py' \
"""` as config file."""
model.eval()
model.to(args.device)
# build dataset
dataset_cfg = cfg.get('train_dataloader').get('dataset')
dataset = DATASETS.build(dataset_cfg)
# get collate_fn
collate_fn_cfg = cfg.get('train_dataloader').pop(
'collate_fn', dict(type='pseudo_collate'))
collate_fn_type = collate_fn_cfg.pop('type')
collate_fn = COLLATE_FUNCTIONS.get(collate_fn_type)
# init visualizer
visualizer = YOLOAssignerVisualizer(
vis_backends=[{
'type': 'LocalVisBackend'
}], name='visualizer')
visualizer.dataset_meta = dataset.metainfo
# need priors size to draw priors
if hasattr(model.bbox_head.prior_generator, 'base_anchors'):
visualizer.priors_size = model.bbox_head.prior_generator.base_anchors
# make output dir
os.makedirs(args.output_dir, exist_ok=True)
print('Results will save to ', args.output_dir)
# init visualization image number
assert args.show_number > 0
display_number = min(args.show_number, len(dataset))
progress_bar = ProgressBar(display_number)
for ind_img in range(display_number):
data = dataset.prepare_data(ind_img)
if data is None:
print('Unable to visualize {} due to strong data augmentations'.
format(dataset[ind_img]['data_samples'].img_path))
continue
# convert data to batch format
batch_data = collate_fn([data])
with torch.no_grad():
assign_results = model.assign(batch_data)
img = data['inputs'].cpu().numpy().astype(np.uint8).transpose(
(1, 2, 0))
# bgr2rgb
img = mmcv.bgr2rgb(img)
gt_instances = data['data_samples'].gt_instances
img_show = visualizer.draw_assign(img, assign_results, gt_instances,
args.show_prior, args.not_show_label)
if hasattr(data['data_samples'], 'img_path'):
filename = osp.basename(data['data_samples'].img_path)
else:
# some dataset have not image path
filename = f'{ind_img}.jpg'
out_file = osp.join(args.output_dir, filename)
# convert rgb 2 bgr and save img
mmcv.imwrite(mmcv.rgb2bgr(img_show), out_file)
progress_bar.update()
if __name__ == '__main__':
main()
| 6,558
| 35.848315
| 79
|
py
|
mmyolo
|
mmyolo-main/projects/assigner_visualization/detectors/yolo_detector_assigner.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Union
from mmyolo.models import YOLODetector
from mmyolo.registry import MODELS
from projects.assigner_visualization.dense_heads import (RTMHeadAssigner,
YOLOv7HeadAssigner,
YOLOv8HeadAssigner)
@MODELS.register_module()
class YOLODetectorAssigner(YOLODetector):
def assign(self, data: dict) -> Union[dict, list]:
"""Calculate assigning results from a batch of inputs and data
samples.This function is provided to the `assigner_visualization.py`
script.
Args:
data (dict or tuple or list): Data sampled from dataset.
Returns:
dict: A dictionary of assigning components.
"""
assert isinstance(data, dict)
assert len(data['inputs']) == 1, 'Only support batchsize == 1'
data = self.data_preprocessor(data, True)
available_assigners = (YOLOv7HeadAssigner, YOLOv8HeadAssigner,
RTMHeadAssigner)
if isinstance(self.bbox_head, available_assigners):
data['data_samples']['feats'] = self.extract_feat(data['inputs'])
inputs_hw = data['inputs'].shape[-2:]
assign_results = self.bbox_head.assign(data['data_samples'], inputs_hw)
return assign_results
| 1,408
| 39.257143
| 79
|
py
|
mmyolo
|
mmyolo-main/projects/assigner_visualization/detectors/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from projects.assigner_visualization.detectors.yolo_detector_assigner import \
YOLODetectorAssigner
__all__ = ['YOLODetectorAssigner']
| 188
| 30.5
| 78
|
py
|
mmyolo
|
mmyolo-main/projects/assigner_visualization/configs/rtmdet_s_syncbn_fast_8xb32-300e_coco_assignervisualization.py
|
_base_ = ['../../../configs/rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco.py']
custom_imports = dict(imports=[
'projects.assigner_visualization.detectors',
'projects.assigner_visualization.dense_heads'
])
model = dict(
type='YOLODetectorAssigner', bbox_head=dict(type='RTMHeadAssigner'))
| 300
| 29.1
| 76
|
py
|
mmyolo
|
mmyolo-main/projects/assigner_visualization/configs/yolov7_tiny_syncbn_fast_8xb16-300e_coco_assignervisualization.py
|
_base_ = ['../../../configs/yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco.py']
custom_imports = dict(imports=[
'projects.assigner_visualization.detectors',
'projects.assigner_visualization.dense_heads'
])
model = dict(
type='YOLODetectorAssigner', bbox_head=dict(type='YOLOv7HeadAssigner'))
| 306
| 29.7
| 79
|
py
|
mmyolo
|
mmyolo-main/projects/assigner_visualization/configs/yolov8_s_syncbn_fast_8xb16-500e_coco_assignervisualization.py
|
_base_ = ['../../../configs/yolov8/yolov8_s_syncbn_fast_8xb16-500e_coco.py']
custom_imports = dict(imports=[
'projects.assigner_visualization.detectors',
'projects.assigner_visualization.dense_heads'
])
model = dict(
type='YOLODetectorAssigner', bbox_head=dict(type='YOLOv8HeadAssigner'))
| 303
| 29.4
| 76
|
py
|
mmyolo
|
mmyolo-main/projects/assigner_visualization/configs/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_assignervisualization.py
|
_base_ = [
'../../../configs/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py'
]
custom_imports = dict(imports=[
'projects.assigner_visualization.detectors',
'projects.assigner_visualization.dense_heads'
])
model = dict(
type='YOLODetectorAssigner', bbox_head=dict(type='YOLOv5HeadAssigner'))
| 313
| 25.166667
| 75
|
py
|
mmyolo
|
mmyolo-main/projects/assigner_visualization/visualization/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .assigner_visualizer import YOLOAssignerVisualizer
__all__ = ['YOLOAssignerVisualizer']
| 142
| 27.6
| 55
|
py
|
mmyolo
|
mmyolo-main/projects/assigner_visualization/visualization/assigner_visualizer.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import List, Union
import mmcv
import numpy as np
import torch
from mmdet.structures.bbox import HorizontalBoxes
from mmdet.visualization import DetLocalVisualizer
from mmdet.visualization.palette import _get_adaptive_scales, get_palette
from mmengine.structures import InstanceData
from torch import Tensor
from mmyolo.registry import VISUALIZERS
@VISUALIZERS.register_module()
class YOLOAssignerVisualizer(DetLocalVisualizer):
"""MMYOLO Detection Assigner Visualizer.
This class is provided to the `assigner_visualization.py` script.
Args:
name (str): Name of the instance. Defaults to 'visualizer'.
"""
def __init__(self, name: str = 'visualizer', *args, **kwargs):
super().__init__(name=name, *args, **kwargs)
# need priors_size from config
self.priors_size = None
def draw_grid(self,
stride: int = 8,
line_styles: Union[str, List[str]] = ':',
colors: Union[str, tuple, List[str],
List[tuple]] = (180, 180, 180),
line_widths: Union[Union[int, float],
List[Union[int, float]]] = 1):
"""Draw grids on image.
Args:
stride (int): Downsample factor of feature map.
line_styles (Union[str, List[str]]): The linestyle
of lines. ``line_styles`` can have the same length with
texts or just single value. If ``line_styles`` is single
value, all the lines will have the same linestyle.
Reference to
https://matplotlib.org/stable/api/collections_api.html?highlight=collection#matplotlib.collections.AsteriskPolygonCollection.set_linestyle
for more details. Defaults to ':'.
colors (Union[str, tuple, List[str], List[tuple]]): The colors of
lines. ``colors`` can have the same length with lines or just
single value. If ``colors`` is single value, all the lines
will have the same colors. Reference to
https://matplotlib.org/stable/gallery/color/named_colors.html
for more details. Defaults to (180, 180, 180).
line_widths (Union[Union[int, float], List[Union[int, float]]]):
The linewidth of lines. ``line_widths`` can have
the same length with lines or just single value.
If ``line_widths`` is single value, all the lines will
have the same linewidth. Defaults to 1.
"""
assert self._image is not None, 'Please set image using `set_image`'
# draw vertical lines
x_datas_vertical = ((np.arange(self.width // stride - 1) + 1) *
stride).reshape((-1, 1)).repeat(
2, axis=1)
y_datas_vertical = np.array([[0, self.height - 1]]).repeat(
self.width // stride - 1, axis=0)
self.draw_lines(
x_datas_vertical,
y_datas_vertical,
colors=colors,
line_styles=line_styles,
line_widths=line_widths)
# draw horizontal lines
x_datas_horizontal = np.array([[0, self.width - 1]]).repeat(
self.height // stride - 1, axis=0)
y_datas_horizontal = ((np.arange(self.height // stride - 1) + 1) *
stride).reshape((-1, 1)).repeat(
2, axis=1)
self.draw_lines(
x_datas_horizontal,
y_datas_horizontal,
colors=colors,
line_styles=line_styles,
line_widths=line_widths)
def draw_instances_assign(self,
instances: InstanceData,
retained_gt_inds: Tensor,
not_show_label: bool = False):
"""Draw instances of GT.
Args:
instances (:obj:`InstanceData`): gt_instance. It usually
includes ``bboxes`` and ``labels`` attributes.
retained_gt_inds (Tensor): The gt indexes assigned as the
positive sample in the current prior.
not_show_label (bool): Whether to show gt labels on images.
"""
assert self.dataset_meta is not None
classes = self.dataset_meta['classes']
palette = self.dataset_meta['palette']
if len(retained_gt_inds) == 0:
return self.get_image()
draw_gt_inds = torch.from_numpy(
np.array(
list(set(retained_gt_inds.cpu().numpy())), dtype=np.int64))
bboxes = instances.bboxes[draw_gt_inds]
labels = instances.labels[draw_gt_inds]
if not isinstance(bboxes, Tensor):
bboxes = bboxes.tensor
edge_colors = [palette[i] for i in labels]
max_label = int(max(labels) if len(labels) > 0 else 0)
text_palette = get_palette(self.text_color, max_label + 1)
text_colors = [text_palette[label] for label in labels]
self.draw_bboxes(
bboxes,
edge_colors=edge_colors,
alpha=self.alpha,
line_widths=self.line_width)
if not not_show_label:
positions = bboxes[:, :2] + self.line_width
areas = (bboxes[:, 3] - bboxes[:, 1]) * (
bboxes[:, 2] - bboxes[:, 0])
scales = _get_adaptive_scales(areas)
for i, (pos, label) in enumerate(zip(positions, labels)):
label_text = classes[
label] if classes is not None else f'class {label}'
self.draw_texts(
label_text,
pos,
colors=text_colors[i],
font_sizes=int(13 * scales[i]),
bboxes=[{
'facecolor': 'black',
'alpha': 0.8,
'pad': 0.7,
'edgecolor': 'none'
}])
def draw_positive_assign(self,
grid_x_inds: Tensor,
grid_y_inds: Tensor,
class_inds: Tensor,
stride: int,
bboxes: Union[Tensor, HorizontalBoxes],
retained_gt_inds: Tensor,
offset: float = 0.5):
"""
Args:
grid_x_inds (Tensor): The X-axis indexes of the positive sample
in current prior.
grid_y_inds (Tensor): The Y-axis indexes of the positive sample
in current prior.
class_inds (Tensor): The classes indexes of the positive sample
in current prior.
stride (int): Downsample factor of feature map.
bboxes (Union[Tensor, HorizontalBoxes]): Bounding boxes of GT.
retained_gt_inds (Tensor): The gt indexes assigned as the
positive sample in the current prior.
offset (float): The offset of points, the value is normalized
with corresponding stride. Defaults to 0.5.
"""
if not isinstance(bboxes, Tensor):
# Convert HorizontalBoxes to Tensor
bboxes = bboxes.tensor
# The PALETTE in the dataset_meta is required
assert self.dataset_meta is not None
palette = self.dataset_meta['palette']
x = ((grid_x_inds + offset) * stride).long()
y = ((grid_y_inds + offset) * stride).long()
center = torch.stack((x, y), dim=-1)
retained_bboxes = bboxes[retained_gt_inds]
bbox_wh = retained_bboxes[:, 2:] - retained_bboxes[:, :2]
bbox_area = bbox_wh[:, 0] * bbox_wh[:, 1]
radius = _get_adaptive_scales(bbox_area) * 4
colors = [palette[i] for i in class_inds]
self.draw_circles(
center,
radius,
colors,
line_widths=0,
face_colors=colors,
alpha=1.0)
def draw_prior(self,
grid_x_inds: Tensor,
grid_y_inds: Tensor,
class_inds: Tensor,
stride: int,
feat_ind: int,
prior_ind: int,
offset: float = 0.5):
"""Draw priors on image.
Args:
grid_x_inds (Tensor): The X-axis indexes of the positive sample
in current prior.
grid_y_inds (Tensor): The Y-axis indexes of the positive sample
in current prior.
class_inds (Tensor): The classes indexes of the positive sample
in current prior.
stride (int): Downsample factor of feature map.
feat_ind (int): Index of featmap.
prior_ind (int): Index of prior in current featmap.
offset (float): The offset of points, the value is normalized
with corresponding stride. Defaults to 0.5.
"""
palette = self.dataset_meta['palette']
center_x = ((grid_x_inds + offset) * stride)
center_y = ((grid_y_inds + offset) * stride)
xyxy = torch.stack((center_x, center_y, center_x, center_y), dim=1)
device = xyxy.device
if self.priors_size is not None:
xyxy += self.priors_size[feat_ind][prior_ind].to(device)
else:
xyxy += torch.tensor(
[[-stride / 2, -stride / 2, stride / 2, stride / 2]],
device=device)
colors = [palette[i] for i in class_inds]
self.draw_bboxes(
xyxy,
edge_colors=colors,
alpha=self.alpha,
line_styles='--',
line_widths=math.ceil(self.line_width * 0.3))
def draw_assign(self,
image: np.ndarray,
assign_results: List[List[dict]],
gt_instances: InstanceData,
show_prior: bool = False,
not_show_label: bool = False) -> np.ndarray:
"""Draw assigning results.
Args:
image (np.ndarray): The image to draw.
assign_results (list): The assigning results.
gt_instances (:obj:`InstanceData`): Data structure for
instance-level annotations or predictions.
show_prior (bool): Whether to show prior on image.
not_show_label (bool): Whether to show gt labels on images.
Returns:
np.ndarray: the drawn image which channel is RGB.
"""
img_show_list = []
for feat_ind, assign_results_feat in enumerate(assign_results):
img_show_list_feat = []
for prior_ind, assign_results_prior in enumerate(
assign_results_feat):
self.set_image(image)
h, w = image.shape[:2]
# draw grid
stride = assign_results_prior['stride']
self.draw_grid(stride)
# draw prior on matched gt
grid_x_inds = assign_results_prior['grid_x_inds']
grid_y_inds = assign_results_prior['grid_y_inds']
class_inds = assign_results_prior['class_inds']
prior_ind = assign_results_prior['prior_ind']
offset = assign_results_prior.get('offset', 0.5)
if show_prior:
self.draw_prior(grid_x_inds, grid_y_inds, class_inds,
stride, feat_ind, prior_ind, offset)
# draw matched gt
retained_gt_inds = assign_results_prior['retained_gt_inds']
self.draw_instances_assign(gt_instances, retained_gt_inds,
not_show_label)
# draw positive
self.draw_positive_assign(grid_x_inds, grid_y_inds, class_inds,
stride, gt_instances.bboxes,
retained_gt_inds, offset)
# draw title
if self.priors_size is not None:
base_prior = self.priors_size[feat_ind][prior_ind]
else:
base_prior = [stride, stride, stride * 2, stride * 2]
prior_size = (base_prior[2] - base_prior[0],
base_prior[3] - base_prior[1])
pos = np.array((20, 20))
text = f'feat_ind: {feat_ind} ' \
f'prior_ind: {prior_ind} ' \
f'prior_size: ({prior_size[0]}, {prior_size[1]})'
scales = _get_adaptive_scales(np.array([h * w / 16]))
font_sizes = int(13 * scales)
self.draw_texts(
text,
pos,
colors=self.text_color,
font_sizes=font_sizes,
bboxes=[{
'facecolor': 'black',
'alpha': 0.8,
'pad': 0.7,
'edgecolor': 'none'
}])
img_show = self.get_image()
img_show = mmcv.impad(img_show, padding=(5, 5, 5, 5))
img_show_list_feat.append(img_show)
img_show_list.append(np.concatenate(img_show_list_feat, axis=1))
# Merge all images into one image
# setting axis is to beautify the merged image
axis = 0 if len(assign_results[0]) > 1 else 1
return np.concatenate(img_show_list, axis=axis)
| 13,691
| 40.87156
| 154
|
py
|
mmyolo
|
mmyolo-main/projects/assigner_visualization/dense_heads/yolov5_head_assigner.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Sequence, Union
import torch
from mmdet.models.utils import unpack_gt_instances
from mmengine.structures import InstanceData
from torch import Tensor
from mmyolo.models import YOLOv5Head
from mmyolo.registry import MODELS
@MODELS.register_module()
class YOLOv5HeadAssigner(YOLOv5Head):
def assign_by_gt_and_feat(
self,
batch_gt_instances: Sequence[InstanceData],
batch_img_metas: Sequence[dict],
inputs_hw: Union[Tensor, tuple] = (640, 640)
) -> dict:
"""Calculate the assigning results based on the gt and features
extracted by the detection head.
Args:
batch_gt_instances (Sequence[InstanceData]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (Sequence[dict]): Meta information of each image,
e.g., image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
inputs_hw (Union[Tensor, tuple]): Height and width of inputs size.
Returns:
dict[str, Tensor]: A dictionary of assigning results.
"""
# 1. Convert gt to norm format
batch_targets_normed = self._convert_gt_to_norm_format(
batch_gt_instances, batch_img_metas)
device = batch_targets_normed.device
scaled_factor = torch.ones(7, device=device)
gt_inds = torch.arange(
batch_targets_normed.shape[1],
dtype=torch.long,
device=device,
requires_grad=False).unsqueeze(0).repeat((self.num_base_priors, 1))
assign_results = []
for i in range(self.num_levels):
assign_results_feat = []
h = inputs_hw[0] // self.featmap_strides[i]
w = inputs_hw[1] // self.featmap_strides[i]
# empty gt bboxes
if batch_targets_normed.shape[1] == 0:
for k in range(self.num_base_priors):
assign_results_feat.append({
'stride':
self.featmap_strides[i],
'grid_x_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'grid_y_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'img_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'class_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'retained_gt_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'prior_ind':
k
})
assign_results.append(assign_results_feat)
continue
priors_base_sizes_i = self.priors_base_sizes[i]
# feature map scale whwh
scaled_factor[2:6] = torch.tensor([w, h, w, h])
# Scale batch_targets from range 0-1 to range 0-features_maps size.
# (num_base_priors, num_bboxes, 7)
batch_targets_scaled = batch_targets_normed * scaled_factor
# 2. Shape match
wh_ratio = batch_targets_scaled[...,
4:6] / priors_base_sizes_i[:, None]
match_inds = torch.max(
wh_ratio, 1 / wh_ratio).max(2)[0] < self.prior_match_thr
batch_targets_scaled = batch_targets_scaled[match_inds]
match_gt_inds = gt_inds[match_inds]
# no gt bbox matches anchor
if batch_targets_scaled.shape[0] == 0:
for k in range(self.num_base_priors):
assign_results_feat.append({
'stride':
self.featmap_strides[i],
'grid_x_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'grid_y_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'img_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'class_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'retained_gt_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'prior_ind':
k
})
assign_results.append(assign_results_feat)
continue
# 3. Positive samples with additional neighbors
# check the left, up, right, bottom sides of the
# targets grid, and determine whether assigned
# them as positive samples as well.
batch_targets_cxcy = batch_targets_scaled[:, 2:4]
grid_xy = scaled_factor[[2, 3]] - batch_targets_cxcy
left, up = ((batch_targets_cxcy % 1 < self.near_neighbor_thr) &
(batch_targets_cxcy > 1)).T
right, bottom = ((grid_xy % 1 < self.near_neighbor_thr) &
(grid_xy > 1)).T
offset_inds = torch.stack(
(torch.ones_like(left), left, up, right, bottom))
batch_targets_scaled = batch_targets_scaled.repeat(
(5, 1, 1))[offset_inds]
retained_gt_inds = match_gt_inds.repeat((5, 1))[offset_inds]
retained_offsets = self.grid_offset.repeat(1, offset_inds.shape[1],
1)[offset_inds]
# prepare pred results and positive sample indexes to
# calculate class loss and bbox lo
_chunk_targets = batch_targets_scaled.chunk(4, 1)
img_class_inds, grid_xy, grid_wh, priors_inds = _chunk_targets
priors_inds, (img_inds, class_inds) = priors_inds.long().view(
-1), img_class_inds.long().T
grid_xy_long = (grid_xy -
retained_offsets * self.near_neighbor_thr).long()
grid_x_inds, grid_y_inds = grid_xy_long.T
for k in range(self.num_base_priors):
retained_inds = priors_inds == k
assign_results_prior = {
'stride': self.featmap_strides[i],
'grid_x_inds': grid_x_inds[retained_inds],
'grid_y_inds': grid_y_inds[retained_inds],
'img_inds': img_inds[retained_inds],
'class_inds': class_inds[retained_inds],
'retained_gt_inds': retained_gt_inds[retained_inds],
'prior_ind': k
}
assign_results_feat.append(assign_results_prior)
assign_results.append(assign_results_feat)
return assign_results
def assign(self, batch_data_samples: Union[list, dict],
inputs_hw: Union[tuple, torch.Size]) -> dict:
"""Calculate assigning results. This function is provided to the
`assigner_visualization.py` script.
Args:
batch_data_samples (List[:obj:`DetDataSample`], dict): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
inputs_hw: Height and width of inputs size
Returns:
dict: A dictionary of assigning components.
"""
if isinstance(batch_data_samples, list):
outputs = unpack_gt_instances(batch_data_samples)
(batch_gt_instances, batch_gt_instances_ignore,
batch_img_metas) = outputs
assign_inputs = (batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore, inputs_hw)
else:
# Fast version
assign_inputs = (batch_data_samples['bboxes_labels'],
batch_data_samples['img_metas'], inputs_hw)
assign_results = self.assign_by_gt_and_feat(*assign_inputs)
return assign_results
| 8,364
| 43.259259
| 79
|
py
|
mmyolo
|
mmyolo-main/projects/assigner_visualization/dense_heads/yolov8_head_assigner.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Union
import torch
from mmdet.utils import InstanceList
from torch import Tensor
from mmyolo.models import YOLOv8Head
from mmyolo.models.utils import gt_instances_preprocess
from mmyolo.registry import MODELS
@MODELS.register_module()
class YOLOv8HeadAssigner(YOLOv8Head):
def assign_by_gt_and_feat(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
inputs_hw: Union[Tensor, tuple] = (640, 640)
) -> dict:
"""Calculate the assigning results based on the gt and features
extracted by the detection head.
Args:
cls_scores (Sequence[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_priors * num_classes.
bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_priors * 4.
bbox_dist_preds (Sequence[Tensor]): Box distribution logits for
each scale level with shape (bs, reg_max + 1, H*W, 4).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
inputs_hw (Union[Tensor, tuple]): Height and width of inputs size.
Returns:
dict[str, Tensor]: A dictionary of assigning results.
"""
num_imgs = len(batch_img_metas)
device = cls_scores[0].device
current_featmap_sizes = [
cls_score.shape[2:] for cls_score in cls_scores
]
# If the shape does not equal, generate new one
if current_featmap_sizes != self.featmap_sizes_train:
self.featmap_sizes_train = current_featmap_sizes
mlvl_priors_with_stride = self.prior_generator.grid_priors(
self.featmap_sizes_train,
dtype=cls_scores[0].dtype,
device=device,
with_stride=True)
self.num_level_priors = [len(n) for n in mlvl_priors_with_stride]
self.flatten_priors_train = torch.cat(
mlvl_priors_with_stride, dim=0)
self.stride_tensor = self.flatten_priors_train[..., [2]]
# gt info
gt_info = gt_instances_preprocess(batch_gt_instances, num_imgs)
gt_labels = gt_info[:, :, :1]
gt_bboxes = gt_info[:, :, 1:] # xyxy
pad_bbox_flag = (gt_bboxes.sum(-1, keepdim=True) > 0).float()
# pred info
flatten_cls_preds = [
cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.num_classes)
for cls_pred in cls_scores
]
flatten_pred_bboxes = [
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
]
# (bs, n, 4 * reg_max)
flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1)
flatten_pred_bboxes = torch.cat(flatten_pred_bboxes, dim=1)
flatten_pred_bboxes = self.bbox_coder.decode(
self.flatten_priors_train[..., :2], flatten_pred_bboxes,
self.stride_tensor[..., 0])
assigned_result = self.assigner(
(flatten_pred_bboxes.detach()).type(gt_bboxes.dtype),
flatten_cls_preds.detach().sigmoid(), self.flatten_priors_train,
gt_labels, gt_bboxes, pad_bbox_flag)
labels = assigned_result['assigned_labels'].reshape(-1)
bbox_targets = assigned_result['assigned_bboxes'].reshape(-1, 4)
fg_mask_pre_prior = assigned_result['fg_mask_pre_prior'].squeeze(0)
pos_inds = fg_mask_pre_prior.nonzero().squeeze(1)
targets = bbox_targets[pos_inds]
gt_bboxes = gt_bboxes.squeeze(0)
matched_gt_inds = torch.tensor(
[((t == gt_bboxes).sum(dim=1) == t.shape[0]).nonzero()[0]
for t in targets],
device=device)
level_inds = torch.zeros_like(labels)
img_inds = torch.zeros_like(labels)
level_nums = [0] + self.num_level_priors
for i in range(len(level_nums) - 1):
level_nums[i + 1] = level_nums[i] + level_nums[i + 1]
level_inds[level_nums[i]:level_nums[i + 1]] = i
level_inds_pos = level_inds[pos_inds]
img_inds = img_inds[pos_inds]
labels = labels[pos_inds]
assign_results = []
for i in range(self.num_levels):
retained_inds = level_inds_pos == i
if not retained_inds.any():
assign_results_prior = {
'stride':
self.featmap_strides[i],
'grid_x_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'grid_y_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'img_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'class_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'retained_gt_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'prior_ind':
0
}
else:
w = inputs_hw[1] // self.featmap_strides[i]
retained_pos_inds = pos_inds[retained_inds] - level_nums[i]
grid_y_inds = retained_pos_inds // w
grid_x_inds = retained_pos_inds - retained_pos_inds // w * w
assign_results_prior = {
'stride': self.featmap_strides[i],
'grid_x_inds': grid_x_inds,
'grid_y_inds': grid_y_inds,
'img_inds': img_inds[retained_inds],
'class_inds': labels[retained_inds],
'retained_gt_inds': matched_gt_inds[retained_inds],
'prior_ind': 0
}
assign_results.append([assign_results_prior])
return assign_results
def assign(self, batch_data_samples: Union[list, dict],
inputs_hw: Union[tuple, torch.Size]) -> dict:
"""Calculate assigning results.
This function is provided to the
`assigner_visualization.py` script.
Args:
batch_data_samples (List[:obj:`DetDataSample`], dict): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
inputs_hw: Height and width of inputs size
Returns:
dict: A dictionary of assigning components.
"""
if isinstance(batch_data_samples, list):
raise NotImplementedError(
'assigning results_list is not implemented')
else:
# Fast version
cls_scores, bbox_preds = self(batch_data_samples['feats'])
assign_inputs = (cls_scores, bbox_preds,
batch_data_samples['bboxes_labels'],
batch_data_samples['img_metas'], inputs_hw)
assign_results = self.assign_by_gt_and_feat(*assign_inputs)
return assign_results
| 7,515
| 40.524862
| 79
|
py
|
mmyolo
|
mmyolo-main/projects/assigner_visualization/dense_heads/yolov7_head_assigner.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Union
import torch
from mmdet.utils import InstanceList
from torch import Tensor
from mmyolo.models import YOLOv7Head
from mmyolo.registry import MODELS
@MODELS.register_module()
class YOLOv7HeadAssigner(YOLOv7Head):
def assign_by_gt_and_feat(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
objectnesses: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
inputs_hw: Union[Tensor, tuple],
) -> dict:
"""Calculate the assigning results based on the gt and features
extracted by the detection head.
Args:
cls_scores (Sequence[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_priors * num_classes.
bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_priors * 4.
objectnesses (Sequence[Tensor]): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, 1, H, W)
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
inputs_hw (Union[Tensor, tuple]): Height and width of inputs size.
Returns:
dict[str, Tensor]: A dictionary of assigning results.
"""
device = cls_scores[0][0].device
head_preds = self._merge_predict_results(bbox_preds, objectnesses,
cls_scores)
batch_targets_normed = self._convert_gt_to_norm_format(
batch_gt_instances, batch_img_metas)
# yolov5_assign and simota_assign
assigner_results = self.assigner(
head_preds,
batch_targets_normed,
batch_img_metas[0]['batch_input_shape'],
self.priors_base_sizes,
self.grid_offset,
near_neighbor_thr=self.near_neighbor_thr)
# multi-level positive sample position.
mlvl_positive_infos = assigner_results['mlvl_positive_infos']
# assigned results with label and bboxes information.
mlvl_targets_normed = assigner_results['mlvl_targets_normed']
assign_results = []
for i in range(self.num_levels):
assign_results_feat = []
# no gt bbox matches anchor
if mlvl_positive_infos[i].shape[0] == 0:
for k in range(self.num_base_priors):
assign_results_feat.append({
'stride':
self.featmap_strides[i],
'grid_x_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'grid_y_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'img_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'class_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'retained_gt_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'prior_ind':
k
})
assign_results.append(assign_results_feat)
continue
# (batch_idx, prior_idx, x_scaled, y_scaled)
positive_info = mlvl_positive_infos[i]
targets_normed = mlvl_targets_normed[i]
priors_inds = positive_info[:, 1]
grid_x_inds = positive_info[:, 2]
grid_y_inds = positive_info[:, 3]
img_inds = targets_normed[:, 0]
class_inds = targets_normed[:, 1].long()
retained_gt_inds = self.get_gt_inds(
targets_normed, batch_targets_normed[0]).long()
for k in range(self.num_base_priors):
retained_inds = priors_inds == k
assign_results_prior = {
'stride': self.featmap_strides[i],
'grid_x_inds': grid_x_inds[retained_inds],
'grid_y_inds': grid_y_inds[retained_inds],
'img_inds': img_inds[retained_inds],
'class_inds': class_inds[retained_inds],
'retained_gt_inds': retained_gt_inds[retained_inds],
'prior_ind': k
}
assign_results_feat.append(assign_results_prior)
assign_results.append(assign_results_feat)
return assign_results
def get_gt_inds(self, assigned_target, gt_instance):
"""Judging which one gt_ind is assigned by comparing assign_target and
origin target.
Args:
assigned_target (Tensor(assign_nums,7)): YOLOv7 assigning results.
gt_instance (Tensor(gt_nums,7)): Normalized gt_instance, It
usually includes ``bboxes`` and ``labels`` attributes.
Returns:
gt_inds (Tensor): the index which one gt is assigned.
"""
gt_inds = torch.zeros(assigned_target.shape[0])
for i in range(assigned_target.shape[0]):
gt_inds[i] = ((assigned_target[i] == gt_instance).sum(
dim=1) == 7).nonzero().squeeze()
return gt_inds
def assign(self, batch_data_samples: Union[list, dict],
inputs_hw: Union[tuple, torch.Size]) -> dict:
"""Calculate assigning results.
This function is provided to the
`assigner_visualization.py` script.
Args:
batch_data_samples (List[:obj:`DetDataSample`], dict): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
inputs_hw: Height and width of inputs size
Returns:
dict: A dictionary of assigning components.
"""
if isinstance(batch_data_samples, list):
raise NotImplementedError(
'assigning results_list is not implemented')
else:
# Fast version
cls_scores, bbox_preds, objectnesses = self(
batch_data_samples['feats'])
assign_inputs = (cls_scores, bbox_preds, objectnesses,
batch_data_samples['bboxes_labels'],
batch_data_samples['img_metas'], inputs_hw)
assign_results = self.assign_by_gt_and_feat(*assign_inputs)
return assign_results
| 6,836
| 41.73125
| 79
|
py
|
mmyolo
|
mmyolo-main/projects/assigner_visualization/dense_heads/rtmdet_head_assigner.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Union
import torch
from mmdet.structures.bbox import distance2bbox
from mmdet.utils import InstanceList
from torch import Tensor
from mmyolo.models import RTMDetHead
from mmyolo.models.utils import gt_instances_preprocess
from mmyolo.registry import MODELS
@MODELS.register_module()
class RTMHeadAssigner(RTMDetHead):
def assign_by_gt_and_feat(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
inputs_hw: Union[Tensor, tuple] = (640, 640)
) -> dict:
"""Calculate the assigning results based on the gt and features
extracted by the detection head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Decoded box for each scale
level with shape (N, num_anchors * 4, H, W) in
[tl_x, tl_y, br_x, br_y] format.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
inputs_hw (Union[Tensor, tuple]): Height and width of inputs size.
Returns:
dict[str, Tensor]: A dictionary of assigning results.
"""
num_imgs = len(batch_img_metas)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.prior_generator.num_levels
# rtmdet's prior offset differs from others
prior_offset = self.prior_generator.offset
gt_info = gt_instances_preprocess(batch_gt_instances, num_imgs)
gt_labels = gt_info[:, :, :1]
gt_bboxes = gt_info[:, :, 1:] # xyxy
pad_bbox_flag = (gt_bboxes.sum(-1, keepdim=True) > 0).float()
device = cls_scores[0].device
# If the shape does not equal, generate new one
if featmap_sizes != self.featmap_sizes_train:
self.featmap_sizes_train = featmap_sizes
mlvl_priors_with_stride = self.prior_generator.grid_priors(
featmap_sizes, device=device, with_stride=True)
self.flatten_priors_train = torch.cat(
mlvl_priors_with_stride, dim=0)
flatten_cls_scores = torch.cat([
cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.cls_out_channels)
for cls_score in cls_scores
], 1).contiguous()
flatten_bboxes = torch.cat([
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
], 1)
flatten_bboxes = flatten_bboxes * self.flatten_priors_train[..., -1,
None]
flatten_bboxes = distance2bbox(self.flatten_priors_train[..., :2],
flatten_bboxes)
assigned_result = self.assigner(flatten_bboxes.detach(),
flatten_cls_scores.detach(),
self.flatten_priors_train, gt_labels,
gt_bboxes, pad_bbox_flag)
labels = assigned_result['assigned_labels'].reshape(-1)
bbox_targets = assigned_result['assigned_bboxes'].reshape(-1, 4)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = self.num_classes
pos_inds = ((labels >= 0)
& (labels < bg_class_ind)).nonzero().squeeze(1)
targets = bbox_targets[pos_inds]
gt_bboxes = gt_bboxes.squeeze(0)
matched_gt_inds = torch.tensor(
[((t == gt_bboxes).sum(dim=1) == t.shape[0]).nonzero()[0]
for t in targets],
device=device)
level_inds = torch.zeros_like(labels)
img_inds = torch.zeros_like(labels)
level_nums = [0] + [f[0] * f[1] for f in featmap_sizes]
for i in range(len(level_nums) - 1):
level_nums[i + 1] = level_nums[i] + level_nums[i + 1]
level_inds[level_nums[i]:level_nums[i + 1]] = i
level_inds_pos = level_inds[pos_inds]
img_inds = img_inds[pos_inds]
labels = labels[pos_inds]
inputs_hw = batch_img_metas[0]['batch_input_shape']
assign_results = []
for i in range(self.num_levels):
retained_inds = level_inds_pos == i
if not retained_inds.any():
assign_results_prior = {
'stride':
self.featmap_strides[i],
'grid_x_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'grid_y_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'img_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'class_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'retained_gt_inds':
torch.zeros([0], dtype=torch.int64).to(device),
'prior_ind':
0,
'offset':
prior_offset
}
else:
w = inputs_hw[1] // self.featmap_strides[i]
retained_pos_inds = pos_inds[retained_inds] - level_nums[i]
grid_y_inds = retained_pos_inds // w
grid_x_inds = retained_pos_inds - retained_pos_inds // w * w
assign_results_prior = {
'stride': self.featmap_strides[i],
'grid_x_inds': grid_x_inds,
'grid_y_inds': grid_y_inds,
'img_inds': img_inds[retained_inds],
'class_inds': labels[retained_inds],
'retained_gt_inds': matched_gt_inds[retained_inds],
'prior_ind': 0,
'offset': prior_offset
}
assign_results.append([assign_results_prior])
return assign_results
def assign(self, batch_data_samples: Union[list, dict],
inputs_hw: Union[tuple, torch.Size]) -> dict:
"""Calculate assigning results. This function is provided to the
`assigner_visualization.py` script.
Args:
batch_data_samples (List[:obj:`DetDataSample`], dict): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
inputs_hw: Height and width of inputs size
Returns:
dict: A dictionary of assigning components.
"""
if isinstance(batch_data_samples, list):
raise NotImplementedError(
'assigning results_list is not implemented')
else:
# Fast version
cls_scores, bbox_preds = self(batch_data_samples['feats'])
assign_inputs = (cls_scores, bbox_preds,
batch_data_samples['bboxes_labels'],
batch_data_samples['img_metas'], inputs_hw)
assign_results = self.assign_by_gt_and_feat(*assign_inputs)
return assign_results
| 7,517
| 41.715909
| 79
|
py
|
mmyolo
|
mmyolo-main/projects/assigner_visualization/dense_heads/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .rtmdet_head_assigner import RTMHeadAssigner
from .yolov5_head_assigner import YOLOv5HeadAssigner
from .yolov7_head_assigner import YOLOv7HeadAssigner
from .yolov8_head_assigner import YOLOv8HeadAssigner
__all__ = [
'YOLOv5HeadAssigner', 'YOLOv7HeadAssigner', 'YOLOv8HeadAssigner',
'RTMHeadAssigner'
]
| 364
| 32.181818
| 69
|
py
|
mmyolo
|
mmyolo-main/projects/easydeploy/backbone/common.py
|
import torch
import torch.nn as nn
from torch import Tensor
class DeployC2f(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x: Tensor) -> Tensor:
x_main = self.main_conv(x)
x_main = [x_main, x_main[:, self.mid_channels:, ...]]
x_main.extend(blocks(x_main[-1]) for blocks in self.blocks)
x_main.pop(1)
return self.final_conv(torch.cat(x_main, 1))
| 444
| 25.176471
| 67
|
py
|
mmyolo
|
mmyolo-main/projects/easydeploy/backbone/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .common import DeployC2f
from .focus import DeployFocus, GConvFocus, NcnnFocus
__all__ = ['DeployFocus', 'NcnnFocus', 'GConvFocus', 'DeployC2f']
| 199
| 32.333333
| 65
|
py
|
mmyolo
|
mmyolo-main/projects/easydeploy/backbone/focus.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
class DeployFocus(nn.Module):
def __init__(self, orin_Focus: nn.Module):
super().__init__()
self.__dict__.update(orin_Focus.__dict__)
def forward(self, x: Tensor) -> Tensor:
batch_size, channel, height, width = x.shape
x = x.reshape(batch_size, channel, -1, 2, width)
x = x.reshape(batch_size, channel, x.shape[2], 2, -1, 2)
half_h = x.shape[2]
half_w = x.shape[4]
x = x.permute(0, 5, 3, 1, 2, 4)
x = x.reshape(batch_size, channel * 4, half_h, half_w)
return self.conv(x)
class NcnnFocus(nn.Module):
def __init__(self, orin_Focus: nn.Module):
super().__init__()
self.__dict__.update(orin_Focus.__dict__)
def forward(self, x: Tensor) -> Tensor:
batch_size, c, h, w = x.shape
assert h % 2 == 0 and w % 2 == 0, f'focus for yolox needs even feature\
height and width, got {(h, w)}.'
x = x.reshape(batch_size, c * h, 1, w)
_b, _c, _h, _w = x.shape
g = _c // 2
# fuse to ncnn's shufflechannel
x = x.view(_b, g, 2, _h, _w)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(_b, -1, _h, _w)
x = x.reshape(_b, c * h * w, 1, 1)
_b, _c, _h, _w = x.shape
g = _c // 2
# fuse to ncnn's shufflechannel
x = x.view(_b, g, 2, _h, _w)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(_b, -1, _h, _w)
x = x.reshape(_b, c * 4, h // 2, w // 2)
return self.conv(x)
class GConvFocus(nn.Module):
def __init__(self, orin_Focus: nn.Module):
super().__init__()
device = next(orin_Focus.parameters()).device
self.weight1 = torch.tensor([[1., 0], [0, 0]]).expand(3, 1, 2,
2).to(device)
self.weight2 = torch.tensor([[0, 0], [1., 0]]).expand(3, 1, 2,
2).to(device)
self.weight3 = torch.tensor([[0, 1.], [0, 0]]).expand(3, 1, 2,
2).to(device)
self.weight4 = torch.tensor([[0, 0], [0, 1.]]).expand(3, 1, 2,
2).to(device)
self.__dict__.update(orin_Focus.__dict__)
def forward(self, x: Tensor) -> Tensor:
conv1 = F.conv2d(x, self.weight1, stride=2, groups=3)
conv2 = F.conv2d(x, self.weight2, stride=2, groups=3)
conv3 = F.conv2d(x, self.weight3, stride=2, groups=3)
conv4 = F.conv2d(x, self.weight4, stride=2, groups=3)
return self.conv(torch.cat([conv1, conv2, conv3, conv4], dim=1))
| 2,834
| 34.4375
| 79
|
py
|
mmyolo
|
mmyolo-main/projects/easydeploy/tools/build_engine.py
|
import argparse
from pathlib import Path
from typing import List, Optional, Tuple, Union
try:
import tensorrt as trt
except Exception:
trt = None
import warnings
import numpy as np
import torch
warnings.filterwarnings(action='ignore', category=DeprecationWarning)
class EngineBuilder:
def __init__(
self,
checkpoint: Union[str, Path],
opt_shape: Union[Tuple, List] = (1, 3, 640, 640),
device: Optional[Union[str, int, torch.device]] = None) -> None:
checkpoint = Path(checkpoint) if isinstance(checkpoint,
str) else checkpoint
assert checkpoint.exists() and checkpoint.suffix == '.onnx'
if isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device(f'cuda:{device}')
self.checkpoint = checkpoint
self.opt_shape = np.array(opt_shape, dtype=np.float32)
self.device = device
def __build_engine(self,
scale: Optional[List[List]] = None,
fp16: bool = True,
with_profiling: bool = True) -> None:
logger = trt.Logger(trt.Logger.WARNING)
trt.init_libnvinfer_plugins(logger, namespace='')
builder = trt.Builder(logger)
config = builder.create_builder_config()
config.max_workspace_size = torch.cuda.get_device_properties(
self.device).total_memory
flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
network = builder.create_network(flag)
parser = trt.OnnxParser(network, logger)
if not parser.parse_from_file(str(self.checkpoint)):
raise RuntimeError(
f'failed to load ONNX file: {str(self.checkpoint)}')
inputs = [network.get_input(i) for i in range(network.num_inputs)]
outputs = [network.get_output(i) for i in range(network.num_outputs)]
profile = None
dshape = -1 in network.get_input(0).shape
if dshape:
profile = builder.create_optimization_profile()
if scale is None:
scale = np.array(
[[1, 1, 0.5, 0.5], [1, 1, 1, 1], [4, 1, 1.5, 1.5]],
dtype=np.float32)
scale = (self.opt_shape * scale).astype(np.int32)
elif isinstance(scale, List):
scale = np.array(scale, dtype=np.int32)
assert scale.shape[0] == 3, 'Input a wrong scale list'
else:
raise NotImplementedError
for inp in inputs:
logger.log(
trt.Logger.WARNING,
f'input "{inp.name}" with shape{inp.shape} {inp.dtype}')
if dshape:
profile.set_shape(inp.name, *scale)
for out in outputs:
logger.log(
trt.Logger.WARNING,
f'output "{out.name}" with shape{out.shape} {out.dtype}')
if fp16 and builder.platform_has_fast_fp16:
config.set_flag(trt.BuilderFlag.FP16)
self.weight = self.checkpoint.with_suffix('.engine')
if dshape:
config.add_optimization_profile(profile)
if with_profiling:
config.profiling_verbosity = trt.ProfilingVerbosity.DETAILED
with builder.build_engine(network, config) as engine:
self.weight.write_bytes(engine.serialize())
logger.log(
trt.Logger.WARNING, f'Build tensorrt engine finish.\n'
f'Save in {str(self.weight.absolute())}')
def build(self,
scale: Optional[List[List]] = None,
fp16: bool = True,
with_profiling=True):
self.__build_engine(scale, fp16, with_profiling)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--img-size',
nargs='+',
type=int,
default=[640, 640],
help='Image size of height and width')
parser.add_argument(
'--device', type=str, default='cuda:0', help='TensorRT builder device')
parser.add_argument(
'--scales',
type=str,
default='[[1,3,640,640],[1,3,640,640],[1,3,640,640]]',
help='Input scales for build dynamic input shape engine')
parser.add_argument(
'--fp16', action='store_true', help='Build model with fp16 mode')
args = parser.parse_args()
args.img_size *= 2 if len(args.img_size) == 1 else 1
return args
def main(args):
img_size = (1, 3, *args.img_size)
try:
scales = eval(args.scales)
except Exception:
print('Input scales is not a python variable')
print('Set scales default None')
scales = None
builder = EngineBuilder(args.checkpoint, img_size, args.device)
builder.build(scales, fp16=args.fp16)
if __name__ == '__main__':
args = parse_args()
main(args)
| 5,007
| 35.554745
| 79
|
py
|
mmyolo
|
mmyolo-main/projects/easydeploy/tools/export.py
|
import argparse
import os
import warnings
from io import BytesIO
import onnx
import torch
from mmdet.apis import init_detector
from mmengine.config import ConfigDict
from mmengine.utils.path import mkdir_or_exist
from mmyolo.utils import register_all_modules
from projects.easydeploy.model import DeployModel
warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning)
warnings.filterwarnings(action='ignore', category=torch.jit.ScriptWarning)
warnings.filterwarnings(action='ignore', category=UserWarning)
warnings.filterwarnings(action='ignore', category=FutureWarning)
warnings.filterwarnings(action='ignore', category=ResourceWarning)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--model-only', action='store_true', help='Export model only')
parser.add_argument(
'--work-dir', default='./work_dir', help='Path to save export model')
parser.add_argument(
'--img-size',
nargs='+',
type=int,
default=[640, 640],
help='Image size of height and width')
parser.add_argument('--batch-size', type=int, default=1, help='Batch size')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--simplify',
action='store_true',
help='Simplify onnx model by onnx-sim')
parser.add_argument(
'--opset', type=int, default=11, help='ONNX opset version')
parser.add_argument(
'--backend', type=int, default=1, help='Backend for export onnx')
parser.add_argument(
'--pre-topk',
type=int,
default=1000,
help='Postprocess pre topk bboxes feed into NMS')
parser.add_argument(
'--keep-topk',
type=int,
default=100,
help='Postprocess keep topk bboxes out of NMS')
parser.add_argument(
'--iou-threshold',
type=float,
default=0.65,
help='IoU threshold for NMS')
parser.add_argument(
'--score-threshold',
type=float,
default=0.25,
help='Score threshold for NMS')
args = parser.parse_args()
args.img_size *= 2 if len(args.img_size) == 1 else 1
return args
def build_model_from_cfg(config_path, checkpoint_path, device):
model = init_detector(config_path, checkpoint_path, device=device)
model.eval()
return model
def main():
args = parse_args()
register_all_modules()
mkdir_or_exist(args.work_dir)
if args.model_only:
postprocess_cfg = None
output_names = None
else:
postprocess_cfg = ConfigDict(
pre_top_k=args.pre_topk,
keep_top_k=args.keep_topk,
iou_threshold=args.iou_threshold,
score_threshold=args.score_threshold,
backend=args.backend)
output_names = ['num_dets', 'boxes', 'scores', 'labels']
baseModel = build_model_from_cfg(args.config, args.checkpoint, args.device)
deploy_model = DeployModel(
baseModel=baseModel, postprocess_cfg=postprocess_cfg)
deploy_model.eval()
fake_input = torch.randn(args.batch_size, 3,
*args.img_size).to(args.device)
# dry run
deploy_model(fake_input)
save_onnx_path = os.path.join(args.work_dir, 'end2end.onnx')
# export onnx
with BytesIO() as f:
torch.onnx.export(
deploy_model,
fake_input,
f,
input_names=['images'],
output_names=output_names,
opset_version=args.opset)
f.seek(0)
onnx_model = onnx.load(f)
onnx.checker.check_model(onnx_model)
# Fix tensorrt onnx output shape, just for view
if args.backend in (2, 3):
shapes = [
args.batch_size, 1, args.batch_size, args.keep_topk, 4,
args.batch_size, args.keep_topk, args.batch_size,
args.keep_topk
]
for i in onnx_model.graph.output:
for j in i.type.tensor_type.shape.dim:
j.dim_param = str(shapes.pop(0))
if args.simplify:
try:
import onnxsim
onnx_model, check = onnxsim.simplify(onnx_model)
assert check, 'assert check failed'
except Exception as e:
print(f'Simplify failure: {e}')
onnx.save(onnx_model, save_onnx_path)
print(f'ONNX export success, save into {save_onnx_path}')
if __name__ == '__main__':
main()
| 4,623
| 31.335664
| 79
|
py
|
mmyolo
|
mmyolo-main/projects/easydeploy/tools/image-demo.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from projects.easydeploy.model import ORTWrapper, TRTWrapper # isort:skip
import os
import random
from argparse import ArgumentParser
import cv2
import mmcv
import numpy as np
import torch
from mmcv.transforms import Compose
from mmdet.utils import get_test_pipeline_cfg
from mmengine.config import Config, ConfigDict
from mmengine.utils import ProgressBar, path
from mmyolo.utils import register_all_modules
from mmyolo.utils.misc import get_file_list
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'img', help='Image path, include image file, dir and URL.')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--out-dir', default='./output', help='Path to output file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--show', action='store_true', help='Show the detection results')
args = parser.parse_args()
return args
def preprocess(config):
data_preprocess = config.get('model', {}).get('data_preprocessor', {})
mean = data_preprocess.get('mean', [0., 0., 0.])
std = data_preprocess.get('std', [1., 1., 1.])
mean = torch.tensor(mean, dtype=torch.float32).reshape(1, 3, 1, 1)
std = torch.tensor(std, dtype=torch.float32).reshape(1, 3, 1, 1)
class PreProcess(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x = x[None].float()
x -= mean.to(x.device)
x /= std.to(x.device)
return x
return PreProcess().eval()
def main():
args = parse_args()
# register all modules in mmdet into the registries
register_all_modules()
colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(1000)]
# build the model from a config file and a checkpoint file
if args.checkpoint.endswith('.onnx'):
model = ORTWrapper(args.checkpoint, args.device)
elif args.checkpoint.endswith('.engine') or args.checkpoint.endswith(
'.plan'):
model = TRTWrapper(args.checkpoint, args.device)
else:
raise NotImplementedError
model.to(args.device)
cfg = Config.fromfile(args.config)
class_names = cfg.get('class_name')
test_pipeline = get_test_pipeline_cfg(cfg)
test_pipeline[0] = ConfigDict({'type': 'mmdet.LoadImageFromNDArray'})
test_pipeline = Compose(test_pipeline)
pre_pipeline = preprocess(cfg)
if not args.show:
path.mkdir_or_exist(args.out_dir)
# get file list
files, source_type = get_file_list(args.img)
# start detector inference
progress_bar = ProgressBar(len(files))
for i, file in enumerate(files):
bgr = mmcv.imread(file)
rgb = mmcv.imconvert(bgr, 'bgr', 'rgb')
data, samples = test_pipeline(dict(img=rgb, img_id=i)).values()
pad_param = samples.get('pad_param',
np.array([0, 0, 0, 0], dtype=np.float32))
h, w = samples.get('ori_shape', rgb.shape[:2])
pad_param = torch.asarray(
[pad_param[2], pad_param[0], pad_param[2], pad_param[0]],
device=args.device)
scale_factor = samples.get('scale_factor', [1., 1])
scale_factor = torch.asarray(scale_factor * 2, device=args.device)
data = pre_pipeline(data).to(args.device)
result = model(data)
if source_type['is_dir']:
filename = os.path.relpath(file, args.img).replace('/', '_')
else:
filename = os.path.basename(file)
out_file = None if args.show else os.path.join(args.out_dir, filename)
# Get candidate predict info by num_dets
num_dets, bboxes, scores, labels = result
scores = scores[0, :num_dets]
bboxes = bboxes[0, :num_dets]
labels = labels[0, :num_dets]
bboxes -= pad_param
bboxes /= scale_factor
bboxes[:, 0::2].clamp_(0, w)
bboxes[:, 1::2].clamp_(0, h)
bboxes = bboxes.round().int()
for (bbox, score, label) in zip(bboxes, scores, labels):
bbox = bbox.tolist()
color = colors[label]
if class_names is not None:
label_name = class_names[label]
name = f'cls:{label_name}_score:{score:0.4f}'
else:
name = f'cls:{label}_score:{score:0.4f}'
cv2.rectangle(bgr, bbox[:2], bbox[2:], color, 2)
cv2.putText(
bgr,
name, (bbox[0], bbox[1] - 2),
cv2.FONT_HERSHEY_SIMPLEX,
2.0, [225, 255, 255],
thickness=3)
if args.show:
mmcv.imshow(bgr, 'result', 0)
else:
mmcv.imwrite(bgr, out_file)
progress_bar.update()
if __name__ == '__main__':
main()
| 4,968
| 31.477124
| 78
|
py
|
mmyolo
|
mmyolo-main/projects/easydeploy/bbox_code/bbox_coder.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
from torch import Tensor
def yolov5_bbox_decoder(priors: Tensor, bbox_preds: Tensor,
stride: Tensor) -> Tensor:
bbox_preds = bbox_preds.sigmoid()
x_center = (priors[..., 0] + priors[..., 2]) * 0.5
y_center = (priors[..., 1] + priors[..., 3]) * 0.5
w = priors[..., 2] - priors[..., 0]
h = priors[..., 3] - priors[..., 1]
x_center_pred = (bbox_preds[..., 0] - 0.5) * 2 * stride + x_center
y_center_pred = (bbox_preds[..., 1] - 0.5) * 2 * stride + y_center
w_pred = (bbox_preds[..., 2] * 2)**2 * w
h_pred = (bbox_preds[..., 3] * 2)**2 * h
decoded_bboxes = torch.stack(
[x_center_pred, y_center_pred, w_pred, h_pred], dim=-1)
return decoded_bboxes
def rtmdet_bbox_decoder(priors: Tensor, bbox_preds: Tensor,
stride: Optional[Tensor]) -> Tensor:
stride = stride[None, :, None]
bbox_preds *= stride
tl_x = (priors[..., 0] - bbox_preds[..., 0])
tl_y = (priors[..., 1] - bbox_preds[..., 1])
br_x = (priors[..., 0] + bbox_preds[..., 2])
br_y = (priors[..., 1] + bbox_preds[..., 3])
decoded_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], -1)
return decoded_bboxes
def yolox_bbox_decoder(priors: Tensor, bbox_preds: Tensor,
stride: Optional[Tensor]) -> Tensor:
stride = stride[None, :, None]
xys = (bbox_preds[..., :2] * stride) + priors
whs = bbox_preds[..., 2:].exp() * stride
decoded_bboxes = torch.cat([xys, whs], -1)
return decoded_bboxes
| 1,608
| 33.234043
| 70
|
py
|
mmyolo
|
mmyolo-main/projects/easydeploy/bbox_code/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox_coder import (rtmdet_bbox_decoder, yolov5_bbox_decoder,
yolox_bbox_decoder)
__all__ = ['yolov5_bbox_decoder', 'rtmdet_bbox_decoder', 'yolox_bbox_decoder']
| 240
| 39.166667
| 78
|
py
|
mmyolo
|
mmyolo-main/projects/easydeploy/model/backendwrapper.py
|
import warnings
from collections import namedtuple
from functools import partial
from pathlib import Path
from typing import List, Optional, Union
import numpy as np
import onnxruntime
try:
import tensorrt as trt
except Exception:
trt = None
import torch
warnings.filterwarnings(action='ignore', category=DeprecationWarning)
class TRTWrapper(torch.nn.Module):
dtype_mapping = {}
def __init__(self, weight: Union[str, Path],
device: Optional[torch.device]):
super().__init__()
weight = Path(weight) if isinstance(weight, str) else weight
assert weight.exists() and weight.suffix in ('.engine', '.plan')
if isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device(f'cuda:{device}')
self.weight = weight
self.device = device
self.stream = torch.cuda.Stream(device=device)
self.__update_mapping()
self.__init_engine()
self.__init_bindings()
def __update_mapping(self):
self.dtype_mapping.update({
trt.bool: torch.bool,
trt.int8: torch.int8,
trt.int32: torch.int32,
trt.float16: torch.float16,
trt.float32: torch.float32
})
def __init_engine(self):
logger = trt.Logger(trt.Logger.ERROR)
self.log = partial(logger.log, trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, namespace='')
self.logger = logger
with trt.Runtime(logger) as runtime:
model = runtime.deserialize_cuda_engine(self.weight.read_bytes())
context = model.create_execution_context()
names = [model.get_binding_name(i) for i in range(model.num_bindings)]
num_inputs, num_outputs = 0, 0
for i in range(model.num_bindings):
if model.binding_is_input(i):
num_inputs += 1
else:
num_outputs += 1
self.is_dynamic = -1 in model.get_binding_shape(0)
self.model = model
self.context = context
self.input_names = names[:num_inputs]
self.output_names = names[num_inputs:]
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.num_bindings = num_inputs + num_outputs
self.bindings: List[int] = [0] * self.num_bindings
def __init_bindings(self):
Binding = namedtuple('Binding', ('name', 'dtype', 'shape'))
inputs_info = []
outputs_info = []
for i, name in enumerate(self.input_names):
assert self.model.get_binding_name(i) == name
dtype = self.dtype_mapping[self.model.get_binding_dtype(i)]
shape = tuple(self.model.get_binding_shape(i))
inputs_info.append(Binding(name, dtype, shape))
for i, name in enumerate(self.output_names):
i += self.num_inputs
assert self.model.get_binding_name(i) == name
dtype = self.dtype_mapping[self.model.get_binding_dtype(i)]
shape = tuple(self.model.get_binding_shape(i))
outputs_info.append(Binding(name, dtype, shape))
self.inputs_info = inputs_info
self.outputs_info = outputs_info
if not self.is_dynamic:
self.output_tensor = [
torch.empty(o.shape, dtype=o.dtype, device=self.device)
for o in outputs_info
]
def forward(self, *inputs):
assert len(inputs) == self.num_inputs
contiguous_inputs: List[torch.Tensor] = [
i.contiguous() for i in inputs
]
for i in range(self.num_inputs):
self.bindings[i] = contiguous_inputs[i].data_ptr()
if self.is_dynamic:
self.context.set_binding_shape(
i, tuple(contiguous_inputs[i].shape))
# create output tensors
outputs: List[torch.Tensor] = []
for i in range(self.num_outputs):
j = i + self.num_inputs
if self.is_dynamic:
shape = tuple(self.context.get_binding_shape(j))
output = torch.empty(
size=shape,
dtype=self.output_dtypes[i],
device=self.device)
else:
output = self.output_tensor[i]
outputs.append(output)
self.bindings[j] = output.data_ptr()
self.context.execute_async_v2(self.bindings, self.stream.cuda_stream)
self.stream.synchronize()
return tuple(outputs)
class ORTWrapper(torch.nn.Module):
def __init__(self, weight: Union[str, Path],
device: Optional[torch.device]):
super().__init__()
weight = Path(weight) if isinstance(weight, str) else weight
assert weight.exists() and weight.suffix == '.onnx'
if isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device(f'cuda:{device}')
self.weight = weight
self.device = device
self.__init_session()
self.__init_bindings()
def __init_session(self):
providers = ['CPUExecutionProvider']
if 'cuda' in self.device.type:
providers.insert(0, 'CUDAExecutionProvider')
session = onnxruntime.InferenceSession(
str(self.weight), providers=providers)
self.session = session
def __init_bindings(self):
Binding = namedtuple('Binding', ('name', 'dtype', 'shape'))
inputs_info = []
outputs_info = []
self.is_dynamic = False
for i, tensor in enumerate(self.session.get_inputs()):
if any(not isinstance(i, int) for i in tensor.shape):
self.is_dynamic = True
inputs_info.append(
Binding(tensor.name, tensor.type, tuple(tensor.shape)))
for i, tensor in enumerate(self.session.get_outputs()):
outputs_info.append(
Binding(tensor.name, tensor.type, tuple(tensor.shape)))
self.inputs_info = inputs_info
self.outputs_info = outputs_info
self.num_inputs = len(inputs_info)
def forward(self, *inputs):
assert len(inputs) == self.num_inputs
contiguous_inputs: List[np.ndarray] = [
i.contiguous().cpu().numpy() for i in inputs
]
if not self.is_dynamic:
# make sure input shape is right for static input shape
for i in range(self.num_inputs):
assert contiguous_inputs[i].shape == self.inputs_info[i].shape
outputs = self.session.run([o.name for o in self.outputs_info], {
j.name: contiguous_inputs[i]
for i, j in enumerate(self.inputs_info)
})
return tuple(torch.from_numpy(o).to(self.device) for o in outputs)
| 6,885
| 32.921182
| 78
|
py
|
mmyolo
|
mmyolo-main/projects/easydeploy/model/model.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from functools import partial
from typing import List, Optional
import torch
import torch.nn as nn
from mmdet.models.backbones.csp_darknet import Focus
from mmengine.config import ConfigDict
from torch import Tensor
from mmyolo.models import RepVGGBlock
from mmyolo.models.dense_heads import (RTMDetHead, YOLOv5Head, YOLOv7Head,
YOLOXHead)
from mmyolo.models.layers import CSPLayerWithTwoConv
from ..backbone import DeployC2f, DeployFocus, GConvFocus, NcnnFocus
from ..bbox_code import (rtmdet_bbox_decoder, yolov5_bbox_decoder,
yolox_bbox_decoder)
from ..nms import batched_nms, efficient_nms, onnx_nms
class DeployModel(nn.Module):
def __init__(self,
baseModel: nn.Module,
postprocess_cfg: Optional[ConfigDict] = None):
super().__init__()
self.baseModel = baseModel
if postprocess_cfg is None:
self.with_postprocess = False
else:
self.with_postprocess = True
self.baseHead = baseModel.bbox_head
self.__init_sub_attributes()
self.detector_type = type(self.baseHead)
self.pre_top_k = postprocess_cfg.get('pre_top_k', 1000)
self.keep_top_k = postprocess_cfg.get('keep_top_k', 100)
self.iou_threshold = postprocess_cfg.get('iou_threshold', 0.65)
self.score_threshold = postprocess_cfg.get('score_threshold', 0.25)
self.backend = postprocess_cfg.get('backend', 1)
self.__switch_deploy()
def __init_sub_attributes(self):
self.bbox_decoder = self.baseHead.bbox_coder.decode
self.prior_generate = self.baseHead.prior_generator.grid_priors
self.num_base_priors = self.baseHead.num_base_priors
self.featmap_strides = self.baseHead.featmap_strides
self.num_classes = self.baseHead.num_classes
def __switch_deploy(self):
for layer in self.baseModel.modules():
if isinstance(layer, RepVGGBlock):
layer.switch_to_deploy()
elif isinstance(layer, Focus):
# onnxruntime tensorrt8 tensorrt7
if self.backend in (1, 2, 3):
self.baseModel.backbone.stem = DeployFocus(layer)
# ncnn
elif self.backend == 4:
self.baseModel.backbone.stem = NcnnFocus(layer)
# switch focus to group conv
else:
self.baseModel.backbone.stem = GConvFocus(layer)
elif isinstance(layer, CSPLayerWithTwoConv):
setattr(layer, '__class__', DeployC2f)
def pred_by_feat(self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
objectnesses: Optional[List[Tensor]] = None,
**kwargs):
assert len(cls_scores) == len(bbox_preds)
dtype = cls_scores[0].dtype
device = cls_scores[0].device
nms_func = self.select_nms()
if self.detector_type in (YOLOv5Head, YOLOv7Head):
bbox_decoder = yolov5_bbox_decoder
elif self.detector_type is RTMDetHead:
bbox_decoder = rtmdet_bbox_decoder
elif self.detector_type is YOLOXHead:
bbox_decoder = yolox_bbox_decoder
else:
bbox_decoder = self.bbox_decoder
num_imgs = cls_scores[0].shape[0]
featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]
mlvl_priors = self.prior_generate(
featmap_sizes, dtype=dtype, device=device)
flatten_priors = torch.cat(mlvl_priors)
mlvl_strides = [
flatten_priors.new_full(
(featmap_size[0] * featmap_size[1] * self.num_base_priors, ),
stride) for featmap_size, stride in zip(
featmap_sizes, self.featmap_strides)
]
flatten_stride = torch.cat(mlvl_strides)
# flatten cls_scores, bbox_preds and objectness
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.num_classes)
for cls_score in cls_scores
]
cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid()
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
]
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)
if objectnesses is not None:
flatten_objectness = [
objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1)
for objectness in objectnesses
]
flatten_objectness = torch.cat(flatten_objectness, dim=1).sigmoid()
cls_scores = cls_scores * (flatten_objectness.unsqueeze(-1))
scores = cls_scores
bboxes = bbox_decoder(flatten_priors[None], flatten_bbox_preds,
flatten_stride)
return nms_func(bboxes, scores, self.keep_top_k, self.iou_threshold,
self.score_threshold, self.pre_top_k, self.keep_top_k)
def select_nms(self):
if self.backend == 1:
nms_func = onnx_nms
elif self.backend == 2:
nms_func = efficient_nms
elif self.backend == 3:
nms_func = batched_nms
else:
raise NotImplementedError
if type(self.baseHead) in (YOLOv5Head, YOLOv7Head, YOLOXHead):
nms_func = partial(nms_func, box_coding=1)
return nms_func
def forward(self, inputs: Tensor):
neck_outputs = self.baseModel(inputs)
if self.with_postprocess:
return self.pred_by_feat(*neck_outputs)
else:
return neck_outputs
| 5,871
| 37.887417
| 79
|
py
|
mmyolo
|
mmyolo-main/projects/easydeploy/model/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .backendwrapper import ORTWrapper, TRTWrapper
from .model import DeployModel
__all__ = ['DeployModel', 'TRTWrapper', 'ORTWrapper']
| 185
| 30
| 53
|
py
|
mmyolo
|
mmyolo-main/projects/easydeploy/nms/ort_nms.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch import Tensor
_XYWH2XYXY = torch.tensor([[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0],
[-0.5, 0.0, 0.5, 0.0], [0.0, -0.5, 0.0, 0.5]],
dtype=torch.float32)
def select_nms_index(scores: Tensor,
boxes: Tensor,
nms_index: Tensor,
batch_size: int,
keep_top_k: int = -1):
batch_inds, cls_inds = nms_index[:, 0], nms_index[:, 1]
box_inds = nms_index[:, 2]
scores = scores[batch_inds, cls_inds, box_inds].unsqueeze(1)
boxes = boxes[batch_inds, box_inds, ...]
dets = torch.cat([boxes, scores], dim=1)
batched_dets = dets.unsqueeze(0).repeat(batch_size, 1, 1)
batch_template = torch.arange(
0, batch_size, dtype=batch_inds.dtype, device=batch_inds.device)
batched_dets = batched_dets.where(
(batch_inds == batch_template.unsqueeze(1)).unsqueeze(-1),
batched_dets.new_zeros(1))
batched_labels = cls_inds.unsqueeze(0).repeat(batch_size, 1)
batched_labels = batched_labels.where(
(batch_inds == batch_template.unsqueeze(1)),
batched_labels.new_ones(1) * -1)
N = batched_dets.shape[0]
batched_dets = torch.cat((batched_dets, batched_dets.new_zeros((N, 1, 5))),
1)
batched_labels = torch.cat((batched_labels, -batched_labels.new_ones(
(N, 1))), 1)
_, topk_inds = batched_dets[:, :, -1].sort(dim=1, descending=True)
topk_batch_inds = torch.arange(
batch_size, dtype=topk_inds.dtype,
device=topk_inds.device).view(-1, 1)
batched_dets = batched_dets[topk_batch_inds, topk_inds, ...]
batched_labels = batched_labels[topk_batch_inds, topk_inds, ...]
batched_dets, batched_scores = batched_dets.split([4, 1], 2)
batched_scores = batched_scores.squeeze(-1)
num_dets = (batched_scores > 0).sum(1, keepdim=True)
return num_dets, batched_dets, batched_scores, batched_labels
class ONNXNMSop(torch.autograd.Function):
@staticmethod
def forward(
ctx,
boxes: Tensor,
scores: Tensor,
max_output_boxes_per_class: Tensor = torch.tensor([100]),
iou_threshold: Tensor = torch.tensor([0.5]),
score_threshold: Tensor = torch.tensor([0.05])
) -> Tensor:
device = boxes.device
batch = scores.shape[0]
num_det = 20
batches = torch.randint(0, batch, (num_det, )).sort()[0].to(device)
idxs = torch.arange(100, 100 + num_det).to(device)
zeros = torch.zeros((num_det, ), dtype=torch.int64).to(device)
selected_indices = torch.cat([batches[None], zeros[None], idxs[None]],
0).T.contiguous()
selected_indices = selected_indices.to(torch.int64)
return selected_indices
@staticmethod
def symbolic(
g,
boxes: Tensor,
scores: Tensor,
max_output_boxes_per_class: Tensor = torch.tensor([100]),
iou_threshold: Tensor = torch.tensor([0.5]),
score_threshold: Tensor = torch.tensor([0.05]),
):
return g.op(
'NonMaxSuppression',
boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
outputs=1)
def onnx_nms(
boxes: torch.Tensor,
scores: torch.Tensor,
max_output_boxes_per_class: int = 100,
iou_threshold: float = 0.5,
score_threshold: float = 0.05,
pre_top_k: int = -1,
keep_top_k: int = 100,
box_coding: int = 0,
):
max_output_boxes_per_class = torch.tensor([max_output_boxes_per_class])
iou_threshold = torch.tensor([iou_threshold])
score_threshold = torch.tensor([score_threshold])
batch_size, _, _ = scores.shape
if box_coding == 1:
boxes = boxes @ (_XYWH2XYXY.to(boxes.device))
scores = scores.transpose(1, 2).contiguous()
selected_indices = ONNXNMSop.apply(boxes, scores,
max_output_boxes_per_class,
iou_threshold, score_threshold)
num_dets, batched_dets, batched_scores, batched_labels = select_nms_index(
scores, boxes, selected_indices, batch_size, keep_top_k=keep_top_k)
return num_dets, batched_dets, batched_scores, batched_labels.to(
torch.int32)
| 4,445
| 35.146341
| 79
|
py
|
mmyolo
|
mmyolo-main/projects/easydeploy/nms/trt_nms.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch import Tensor
_XYWH2XYXY = torch.tensor([[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0],
[-0.5, 0.0, 0.5, 0.0], [0.0, -0.5, 0.0, 0.5]],
dtype=torch.float32)
class TRTEfficientNMSop(torch.autograd.Function):
@staticmethod
def forward(
ctx,
boxes: Tensor,
scores: Tensor,
background_class: int = -1,
box_coding: int = 0,
iou_threshold: float = 0.45,
max_output_boxes: int = 100,
plugin_version: str = '1',
score_activation: int = 0,
score_threshold: float = 0.25,
):
batch_size, _, num_classes = scores.shape
num_det = torch.randint(
0, max_output_boxes, (batch_size, 1), dtype=torch.int32)
det_boxes = torch.randn(batch_size, max_output_boxes, 4)
det_scores = torch.randn(batch_size, max_output_boxes)
det_classes = torch.randint(
0, num_classes, (batch_size, max_output_boxes), dtype=torch.int32)
return num_det, det_boxes, det_scores, det_classes
@staticmethod
def symbolic(g,
boxes: Tensor,
scores: Tensor,
background_class: int = -1,
box_coding: int = 0,
iou_threshold: float = 0.45,
max_output_boxes: int = 100,
plugin_version: str = '1',
score_activation: int = 0,
score_threshold: float = 0.25):
out = g.op(
'TRT::EfficientNMS_TRT',
boxes,
scores,
background_class_i=background_class,
box_coding_i=box_coding,
iou_threshold_f=iou_threshold,
max_output_boxes_i=max_output_boxes,
plugin_version_s=plugin_version,
score_activation_i=score_activation,
score_threshold_f=score_threshold,
outputs=4)
num_det, det_boxes, det_scores, det_classes = out
return num_det, det_boxes, det_scores, det_classes
class TRTbatchedNMSop(torch.autograd.Function):
"""TensorRT NMS operation."""
@staticmethod
def forward(
ctx,
boxes: Tensor,
scores: Tensor,
plugin_version: str = '1',
shareLocation: int = 1,
backgroundLabelId: int = -1,
numClasses: int = 80,
topK: int = 1000,
keepTopK: int = 100,
scoreThreshold: float = 0.25,
iouThreshold: float = 0.45,
isNormalized: int = 0,
clipBoxes: int = 0,
scoreBits: int = 16,
caffeSemantics: int = 1,
):
batch_size, _, numClasses = scores.shape
num_det = torch.randint(
0, keepTopK, (batch_size, 1), dtype=torch.int32)
det_boxes = torch.randn(batch_size, keepTopK, 4)
det_scores = torch.randn(batch_size, keepTopK)
det_classes = torch.randint(0, numClasses,
(batch_size, keepTopK)).float()
return num_det, det_boxes, det_scores, det_classes
@staticmethod
def symbolic(
g,
boxes: Tensor,
scores: Tensor,
plugin_version: str = '1',
shareLocation: int = 1,
backgroundLabelId: int = -1,
numClasses: int = 80,
topK: int = 1000,
keepTopK: int = 100,
scoreThreshold: float = 0.25,
iouThreshold: float = 0.45,
isNormalized: int = 0,
clipBoxes: int = 0,
scoreBits: int = 16,
caffeSemantics: int = 1,
):
out = g.op(
'TRT::BatchedNMSDynamic_TRT',
boxes,
scores,
shareLocation_i=shareLocation,
plugin_version_s=plugin_version,
backgroundLabelId_i=backgroundLabelId,
numClasses_i=numClasses,
topK_i=topK,
keepTopK_i=keepTopK,
scoreThreshold_f=scoreThreshold,
iouThreshold_f=iouThreshold,
isNormalized_i=isNormalized,
clipBoxes_i=clipBoxes,
scoreBits_i=scoreBits,
caffeSemantics_i=caffeSemantics,
outputs=4)
num_det, det_boxes, det_scores, det_classes = out
return num_det, det_boxes, det_scores, det_classes
def _efficient_nms(
boxes: Tensor,
scores: Tensor,
max_output_boxes_per_class: int = 1000,
iou_threshold: float = 0.5,
score_threshold: float = 0.05,
pre_top_k: int = -1,
keep_top_k: int = 100,
box_coding: int = 0,
):
"""Wrapper for `efficient_nms` with TensorRT.
Args:
boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4].
scores (Tensor): The detection scores of shape
[N, num_boxes, num_classes].
max_output_boxes_per_class (int): Maximum number of output
boxes per class of nms. Defaults to 1000.
iou_threshold (float): IOU threshold of nms. Defaults to 0.5.
score_threshold (float): score threshold of nms.
Defaults to 0.05.
pre_top_k (int): Number of top K boxes to keep before nms.
Defaults to -1.
keep_top_k (int): Number of top K boxes to keep after nms.
Defaults to -1.
box_coding (int): Bounding boxes format for nms.
Defaults to 0 means [x1, y1 ,x2, y2].
Set to 1 means [x, y, w, h].
Returns:
tuple[Tensor, Tensor, Tensor, Tensor]:
(num_det, det_boxes, det_scores, det_classes),
`num_det` of shape [N, 1]
`det_boxes` of shape [N, num_det, 4]
`det_scores` of shape [N, num_det]
`det_classes` of shape [N, num_det]
"""
num_det, det_boxes, det_scores, det_classes = TRTEfficientNMSop.apply(
boxes, scores, -1, box_coding, iou_threshold, keep_top_k, '1', 0,
score_threshold)
return num_det, det_boxes, det_scores, det_classes
def _batched_nms(
boxes: Tensor,
scores: Tensor,
max_output_boxes_per_class: int = 1000,
iou_threshold: float = 0.5,
score_threshold: float = 0.05,
pre_top_k: int = -1,
keep_top_k: int = 100,
box_coding: int = 0,
):
"""Wrapper for `efficient_nms` with TensorRT.
Args:
boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4].
scores (Tensor): The detection scores of shape
[N, num_boxes, num_classes].
max_output_boxes_per_class (int): Maximum number of output
boxes per class of nms. Defaults to 1000.
iou_threshold (float): IOU threshold of nms. Defaults to 0.5.
score_threshold (float): score threshold of nms.
Defaults to 0.05.
pre_top_k (int): Number of top K boxes to keep before nms.
Defaults to -1.
keep_top_k (int): Number of top K boxes to keep after nms.
Defaults to -1.
box_coding (int): Bounding boxes format for nms.
Defaults to 0 means [x1, y1 ,x2, y2].
Set to 1 means [x, y, w, h].
Returns:
tuple[Tensor, Tensor, Tensor, Tensor]:
(num_det, det_boxes, det_scores, det_classes),
`num_det` of shape [N, 1]
`det_boxes` of shape [N, num_det, 4]
`det_scores` of shape [N, num_det]
`det_classes` of shape [N, num_det]
"""
if box_coding == 1:
boxes = boxes @ (_XYWH2XYXY.to(boxes.device))
boxes = boxes if boxes.dim() == 4 else boxes.unsqueeze(2)
_, _, numClasses = scores.shape
num_det, det_boxes, det_scores, det_classes = TRTbatchedNMSop.apply(
boxes, scores, '1', 1, -1, int(numClasses), min(pre_top_k, 4096),
keep_top_k, score_threshold, iou_threshold, 0, 0, 16, 1)
det_classes = det_classes.int()
return num_det, det_boxes, det_scores, det_classes
def efficient_nms(*args, **kwargs):
"""Wrapper function for `_efficient_nms`."""
return _efficient_nms(*args, **kwargs)
def batched_nms(*args, **kwargs):
"""Wrapper function for `_batched_nms`."""
return _batched_nms(*args, **kwargs)
| 8,045
| 34.444934
| 78
|
py
|
mmyolo
|
mmyolo-main/projects/easydeploy/nms/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .ort_nms import onnx_nms
from .trt_nms import batched_nms, efficient_nms
__all__ = ['efficient_nms', 'batched_nms', 'onnx_nms']
| 182
| 29.5
| 54
|
py
|
mmyolo
|
mmyolo-main/projects/misc/custom_dataset/yolov5_s-v61_syncbn_fast_1xb32-100e_cat.py
|
_base_ = '../yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py'
max_epochs = 100
data_root = './data/cat/'
# data_root = '/root/workspace/mmyolo/data/cat/' # Docker
work_dir = './work_dirs/yolov5_s-v61_syncbn_fast_1xb32-100e_cat'
load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth' # noqa
train_batch_size_per_gpu = 32
train_num_workers = 4
save_epoch_intervals = 2
# base_lr_default * (your_bs / default_bs)
base_lr = _base_.base_lr / 4
anchors = [
[(68, 69), (154, 91), (143, 162)], # P3/8
[(242, 160), (189, 287), (391, 207)], # P4/16
[(353, 337), (539, 341), (443, 432)] # P5/32
]
class_name = ('cat', )
num_classes = len(class_name)
metainfo = dict(classes=class_name, palette=[(220, 20, 60)])
train_cfg = dict(
max_epochs=max_epochs, val_begin=20, val_interval=save_epoch_intervals)
model = dict(
bbox_head=dict(
head_module=dict(num_classes=num_classes),
prior_generator=dict(base_sizes=anchors),
loss_cls=dict(loss_weight=0.5 *
(num_classes / 80 * 3 / _base_.num_det_layers))))
train_dataloader = dict(
batch_size=train_batch_size_per_gpu,
num_workers=train_num_workers,
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=_base_.dataset_type,
data_root=data_root,
metainfo=metainfo,
ann_file='annotations/trainval.json',
data_prefix=dict(img='images/'),
filter_cfg=dict(filter_empty_gt=False, min_size=32),
pipeline=_base_.train_pipeline)))
val_dataloader = dict(
dataset=dict(
metainfo=metainfo,
data_root=data_root,
ann_file='annotations/trainval.json',
data_prefix=dict(img='images/')))
test_dataloader = val_dataloader
val_evaluator = dict(ann_file=data_root + 'annotations/trainval.json')
test_evaluator = val_evaluator
optim_wrapper = dict(optimizer=dict(lr=base_lr))
default_hooks = dict(
checkpoint=dict(
type='CheckpointHook',
interval=save_epoch_intervals,
max_keep_ckpts=5,
save_best='auto'),
param_scheduler=dict(max_epochs=max_epochs),
logger=dict(type='LoggerHook', interval=10))
| 2,347
| 29.493506
| 180
|
py
|
mmyolo
|
mmyolo-main/projects/misc/custom_dataset/yolov6_s_syncbn_fast_1xb32-100e_cat.py
|
_base_ = '../yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco.py'
max_epochs = 100
data_root = './data/cat/'
work_dir = './work_dirs/yolov6_s_syncbn_fast_1xb32-100e_cat'
load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco/yolov6_s_syncbn_fast_8xb32-400e_coco_20221102_203035-932e1d91.pth' # noqa
train_batch_size_per_gpu = 32
train_num_workers = 4 # train_num_workers = nGPU x 4
save_epoch_intervals = 2
# base_lr_default * (your_bs / default_bs)
base_lr = _base_.base_lr / 8
class_name = ('cat', )
num_classes = len(class_name)
metainfo = dict(classes=class_name, palette=[(220, 20, 60)])
train_cfg = dict(
max_epochs=max_epochs,
val_begin=20,
val_interval=save_epoch_intervals,
dynamic_intervals=[(max_epochs - _base_.num_last_epochs, 1)])
model = dict(
bbox_head=dict(head_module=dict(num_classes=num_classes)),
train_cfg=dict(
initial_assigner=dict(num_classes=num_classes),
assigner=dict(num_classes=num_classes)))
train_dataloader = dict(
batch_size=train_batch_size_per_gpu,
num_workers=train_num_workers,
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=_base_.dataset_type,
data_root=data_root,
metainfo=metainfo,
ann_file='annotations/trainval.json',
data_prefix=dict(img='images/'),
filter_cfg=dict(filter_empty_gt=False, min_size=32),
pipeline=_base_.train_pipeline)))
val_dataloader = dict(
dataset=dict(
metainfo=metainfo,
data_root=data_root,
ann_file='annotations/trainval.json',
data_prefix=dict(img='images/')))
test_dataloader = val_dataloader
val_evaluator = dict(ann_file=data_root + 'annotations/trainval.json')
test_evaluator = val_evaluator
optim_wrapper = dict(optimizer=dict(lr=base_lr))
default_hooks = dict(
checkpoint=dict(
type='CheckpointHook',
interval=save_epoch_intervals,
max_keep_ckpts=5,
save_best='auto'),
param_scheduler=dict(max_epochs=max_epochs),
logger=dict(type='LoggerHook', interval=10))
custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0001,
update_buffers=True,
strict_load=False,
priority=49),
dict(
type='mmdet.PipelineSwitchHook',
switch_epoch=max_epochs - _base_.num_last_epochs,
switch_pipeline=_base_.train_pipeline_stage2)
]
| 2,525
| 28.372093
| 172
|
py
|
mmyolo
|
mmyolo-main/projects/misc/custom_dataset/yolov7_tiny_syncbn_fast_1xb32-100e_cat.py
|
_base_ = '../yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco.py'
max_epochs = 100
data_root = './data/cat/'
work_dir = './work_dirs/yolov7_tiny_syncbn_fast_1xb32-100e_cat'
load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco/yolov7_tiny_syncbn_fast_8x16b-300e_coco_20221126_102719-0ee5bbdf.pth' # noqa
train_batch_size_per_gpu = 32
train_num_workers = 4 # train_num_workers = nGPU x 4
save_epoch_intervals = 2
# base_lr_default * (your_bs / default_bs)
base_lr = 0.01 / 4
anchors = [
[(68, 69), (154, 91), (143, 162)], # P3/8
[(242, 160), (189, 287), (391, 207)], # P4/16
[(353, 337), (539, 341), (443, 432)] # P5/32
]
class_name = ('cat', )
num_classes = len(class_name)
metainfo = dict(classes=class_name, palette=[(220, 20, 60)])
train_cfg = dict(
max_epochs=max_epochs,
val_begin=20,
val_interval=save_epoch_intervals,
dynamic_intervals=[(max_epochs - 10, 1)])
model = dict(
bbox_head=dict(
head_module=dict(num_classes=num_classes),
prior_generator=dict(base_sizes=anchors),
loss_cls=dict(loss_weight=0.5 *
(num_classes / 80 * 3 / _base_.num_det_layers))))
train_dataloader = dict(
batch_size=train_batch_size_per_gpu,
num_workers=train_num_workers,
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=_base_.dataset_type,
data_root=data_root,
metainfo=metainfo,
ann_file='annotations/trainval.json',
data_prefix=dict(img='images/'),
filter_cfg=dict(filter_empty_gt=False, min_size=32),
pipeline=_base_.train_pipeline)))
val_dataloader = dict(
dataset=dict(
metainfo=metainfo,
data_root=data_root,
ann_file='annotations/trainval.json',
data_prefix=dict(img='images/')))
test_dataloader = val_dataloader
val_evaluator = dict(ann_file=data_root + 'annotations/trainval.json')
test_evaluator = val_evaluator
optim_wrapper = dict(optimizer=dict(lr=base_lr))
default_hooks = dict(
checkpoint=dict(
type='CheckpointHook',
interval=save_epoch_intervals,
max_keep_ckpts=2,
save_best='auto'),
param_scheduler=dict(max_epochs=max_epochs),
logger=dict(type='LoggerHook', interval=10))
| 2,360
| 28.886076
| 178
|
py
|
mmyolo
|
mmyolo-main/.dev_scripts/gather_models.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os
import os.path as osp
import shutil
import subprocess
import time
from collections import OrderedDict
import torch
import yaml
from mmengine.config import Config
from mmengine.fileio import dump
from mmengine.utils import mkdir_or_exist, scandir
def ordered_yaml_dump(data, stream=None, Dumper=yaml.SafeDumper, **kwds):
class OrderedDumper(Dumper):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items())
OrderedDumper.add_representer(OrderedDict, _dict_representer)
return yaml.dump(data, stream, OrderedDumper, **kwds)
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
if 'message_hub' in checkpoint:
del checkpoint['message_hub']
if 'ema_state_dict' in checkpoint:
del checkpoint['ema_state_dict']
for key in list(checkpoint['state_dict']):
if key.startswith('data_preprocessor'):
checkpoint['state_dict'].pop(key)
elif 'priors_base_sizes' in key:
checkpoint['state_dict'].pop(key)
elif 'grid_offset' in key:
checkpoint['state_dict'].pop(key)
elif 'prior_inds' in key:
checkpoint['state_dict'].pop(key)
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
final_file = out_file.rstrip('.pth') + f'-{sha[:8]}.pth'
subprocess.Popen(['mv', out_file, final_file])
return final_file
def is_by_epoch(config):
cfg = Config.fromfile('./configs/' + config)
return cfg.train_cfg.type == 'EpochBasedTrainLoop'
def get_final_epoch_or_iter(config):
cfg = Config.fromfile('./configs/' + config)
if cfg.train_cfg.type == 'EpochBasedTrainLoop':
return cfg.train_cfg.max_epochs
else:
return cfg.train_cfg.max_iters
def get_best_epoch_or_iter(exp_dir):
best_epoch_iter_full_path = list(
sorted(glob.glob(osp.join(exp_dir, 'best_*.pth'))))[-1]
best_epoch_or_iter_model_path = best_epoch_iter_full_path.split('/')[-1]
best_epoch_or_iter = best_epoch_or_iter_model_path. \
split('_')[-1].split('.')[0]
return best_epoch_or_iter_model_path, int(best_epoch_or_iter)
def get_real_epoch_or_iter(config):
cfg = Config.fromfile('./configs/' + config)
if cfg.train_cfg.type == 'EpochBasedTrainLoop':
epoch = cfg.train_cfg.max_epochs
return epoch
else:
return cfg.runner.max_iters
def get_final_results(log_json_path,
epoch_or_iter,
results_lut='coco/bbox_mAP',
by_epoch=True):
result_dict = dict()
with open(log_json_path) as f:
r = f.readlines()[-1]
last_metric = r.split(',')[0].split(': ')[-1].strip()
result_dict[results_lut] = last_metric
return result_dict
def get_dataset_name(config):
# If there are more dataset, add here.
name_map = dict(
CityscapesDataset='Cityscapes',
CocoDataset='COCO',
YOLOv5CocoDataset='COCO',
CocoPanopticDataset='COCO',
YOLOv5DOTADataset='DOTA 1.0',
DeepFashionDataset='Deep Fashion',
LVISV05Dataset='LVIS v0.5',
LVISV1Dataset='LVIS v1',
VOCDataset='Pascal VOC',
YOLOv5VOCDataset='Pascal VOC',
WIDERFaceDataset='WIDER Face',
OpenImagesDataset='OpenImagesDataset',
OpenImagesChallengeDataset='OpenImagesChallengeDataset')
cfg = Config.fromfile('./configs/' + config)
return name_map[cfg.dataset_type]
def find_last_dir(model_dir):
dst_times = []
for time_stamp in os.scandir(model_dir):
if osp.isdir(time_stamp):
dst_time = time.mktime(
time.strptime(time_stamp.name, '%Y%m%d_%H%M%S'))
dst_times.append([dst_time, time_stamp.name])
return max(dst_times, key=lambda x: x[0])[1]
def convert_model_info_to_pwc(model_infos):
pwc_files = {}
for model in model_infos:
cfg_folder_name = osp.split(model['config'])[-2]
pwc_model_info = OrderedDict()
pwc_model_info['Name'] = osp.split(model['config'])[-1].split('.')[0]
pwc_model_info['In Collection'] = 'Please fill in Collection name'
pwc_model_info['Config'] = osp.join('configs', model['config'])
# get metadata
meta_data = OrderedDict()
if 'epochs' in model:
meta_data['Epochs'] = get_real_epoch_or_iter(model['config'])
else:
meta_data['Iterations'] = get_real_epoch_or_iter(model['config'])
pwc_model_info['Metadata'] = meta_data
# get dataset name
dataset_name = get_dataset_name(model['config'])
# get results
results = []
# if there are more metrics, add here.
if 'bbox_mAP' in model['results']:
metric = round(model['results']['bbox_mAP'] * 100, 1)
results.append(
OrderedDict(
Task='Object Detection',
Dataset=dataset_name,
Metrics={'box AP': metric}))
if 'segm_mAP' in model['results']:
metric = round(model['results']['segm_mAP'] * 100, 1)
results.append(
OrderedDict(
Task='Instance Segmentation',
Dataset=dataset_name,
Metrics={'mask AP': metric}))
if 'PQ' in model['results']:
metric = round(model['results']['PQ'], 1)
results.append(
OrderedDict(
Task='Panoptic Segmentation',
Dataset=dataset_name,
Metrics={'PQ': metric}))
pwc_model_info['Results'] = results
link_string = 'https://download.openmmlab.com/mmyolo/v0/'
link_string += '{}/{}'.format(model['config'].rstrip('.py'),
osp.split(model['model_path'])[-1])
pwc_model_info['Weights'] = link_string
if cfg_folder_name in pwc_files:
pwc_files[cfg_folder_name].append(pwc_model_info)
else:
pwc_files[cfg_folder_name] = [pwc_model_info]
return pwc_files
def parse_args():
parser = argparse.ArgumentParser(description='Gather benchmarked models')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'out', type=str, help='output path of gathered models to be stored')
parser.add_argument(
'--best',
action='store_true',
help='whether to gather the best model.')
args = parser.parse_args()
return args
# TODO: Refine
def main():
args = parse_args()
models_root = args.root
models_out = args.out
mkdir_or_exist(models_out)
# find all models in the root directory to be gathered
raw_configs = list(scandir('./configs', '.py', recursive=True))
# filter configs that is not trained in the experiments dir
used_configs = []
for raw_config in raw_configs:
if osp.exists(osp.join(models_root, raw_config)):
used_configs.append(raw_config)
print(f'Find {len(used_configs)} models to be gathered')
# find final_ckpt and log file for trained each config
# and parse the best performance
model_infos = []
for used_config in used_configs:
exp_dir = osp.join(models_root, used_config)
by_epoch = is_by_epoch(used_config)
# check whether the exps is finished
if args.best is True:
final_model, final_epoch_or_iter = get_best_epoch_or_iter(exp_dir)
else:
final_epoch_or_iter = get_final_epoch_or_iter(used_config)
final_model = '{}_{}.pth'.format('epoch' if by_epoch else 'iter',
final_epoch_or_iter)
model_path = osp.join(exp_dir, final_model)
# skip if the model is still training
if not osp.exists(model_path):
continue
# get the latest logs
latest_exp_name = find_last_dir(exp_dir)
latest_exp_json = osp.join(exp_dir, latest_exp_name, 'vis_data',
latest_exp_name + '.json')
model_performance = get_final_results(
latest_exp_json, final_epoch_or_iter, by_epoch=by_epoch)
if model_performance is None:
continue
model_info = dict(
config=used_config,
results=model_performance,
final_model=final_model,
latest_exp_json=latest_exp_json,
latest_exp_name=latest_exp_name)
model_info['epochs' if by_epoch else 'iterations'] = \
final_epoch_or_iter
model_infos.append(model_info)
# publish model for each checkpoint
publish_model_infos = []
for model in model_infos:
model_publish_dir = osp.join(models_out, model['config'].rstrip('.py'))
mkdir_or_exist(model_publish_dir)
model_name = osp.split(model['config'])[-1].split('.')[0]
model_name += '_' + model['latest_exp_name']
publish_model_path = osp.join(model_publish_dir, model_name)
trained_model_path = osp.join(models_root, model['config'],
model['final_model'])
# convert model
final_model_path = process_checkpoint(trained_model_path,
publish_model_path)
# copy log
shutil.copy(model['latest_exp_json'],
osp.join(model_publish_dir, f'{model_name}.log.json'))
# copy config to guarantee reproducibility
config_path = model['config']
config_path = osp.join(
'configs',
config_path) if 'configs' not in config_path else config_path
target_config_path = osp.split(config_path)[-1]
shutil.copy(config_path, osp.join(model_publish_dir,
target_config_path))
model['model_path'] = final_model_path
publish_model_infos.append(model)
models = dict(models=publish_model_infos)
print(f'Totally gathered {len(publish_model_infos)} models')
dump(models, osp.join(models_out, 'model_info.json'))
pwc_files = convert_model_info_to_pwc(publish_model_infos)
for name in pwc_files:
with open(osp.join(models_out, name + '_metafile.yml'), 'w') as f:
ordered_yaml_dump(pwc_files[name], f, encoding='utf-8')
if __name__ == '__main__':
main()
| 11,017
| 34.314103
| 79
|
py
|
mmyolo
|
mmyolo-main/.dev_scripts/print_registers.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import importlib
import os
import os.path as osp
import pkgutil
import sys
import tempfile
from multiprocessing import Pool
from pathlib import Path
import numpy as np
import pandas as pd
# host_addr = 'https://gitee.com/open-mmlab'
host_addr = 'https://github.com/open-mmlab'
tools_list = ['tools', '.dev_scripts']
proxy_names = {
'mmdet': 'mmdetection',
'mmseg': 'mmsegmentation',
'mmcls': 'mmclassification'
}
merge_module_keys = {'mmcv': ['mmengine']}
# exclude_prefix = {'mmcv': ['<class \'mmengine.model.']}
exclude_prefix = {}
markdown_title = '# MM 系列开源库注册表\n'
markdown_title += '(注意:本文档是通过 .dev_scripts/print_registers.py 脚本自动生成)'
def capitalize(repo_name):
lower = repo_name.lower()
if lower == 'mmcv':
return repo_name.upper()
elif lower.startswith('mm'):
return 'MM' + repo_name[2:]
return repo_name.capitalize()
def mkdir_or_exist(dir_name, mode=0o777):
if dir_name == '':
return
dir_name = osp.expanduser(dir_name)
os.makedirs(dir_name, mode=mode, exist_ok=True)
def parse_repo_name(repo_name):
proxy_names_rev = dict(zip(proxy_names.values(), proxy_names.keys()))
repo_name = proxy_names.get(repo_name, repo_name)
module_name = proxy_names_rev.get(repo_name, repo_name)
return repo_name, module_name
def git_pull_branch(repo_name, branch_name='', pulldir='.'):
mkdir_or_exist(pulldir)
exec_str = f'cd {pulldir};git init;git pull '
exec_str += f'{host_addr}/{repo_name}.git'
if branch_name:
exec_str += f' {branch_name}'
returncode = os.system(exec_str)
if returncode:
raise RuntimeError(
f'failed to get the remote repo, code: {returncode}')
def load_modules_from_dir(module_name, module_root, throw_error=False):
print(f'loading the {module_name} modules...')
# # install the dependencies
# if osp.exists(osp.join(pkg_dir, 'requirements.txt')):
# os.system('pip install -r requirements.txt')
# get all module list
module_list = []
error_dict = {}
module_root = osp.join(module_root, module_name)
assert osp.exists(module_root), \
f'cannot find the module root: {module_root}'
for _root, _dirs, _files in os.walk(module_root):
if (('__init__.py' not in _files)
and (osp.split(_root)[1] != '__pycache__')):
# add __init__.py file to the package
with open(osp.join(_root, '__init__.py'), 'w') as _:
pass
def _onerror(*args, **kwargs):
pass
for _finder, _name, _ispkg in pkgutil.walk_packages([module_root],
prefix=module_name +
'.',
onerror=_onerror):
try:
module = importlib.import_module(_name)
module_list.append(module)
except Exception as e:
if throw_error:
raise e
_error_msg = f'{type(e)}: {e}.'
print(f'cannot import the module: {_name} ({_error_msg})')
assert (_name not in error_dict), \
f'duplicate error name was found: {_name}'
error_dict[_name] = _error_msg
for module in module_list:
assert module.__file__.startswith(module_root), \
f'the importing path of package was wrong: {module.__file__}'
print('modules were loaded...')
return module_list, error_dict
def get_registries_from_modules(module_list):
registries = {}
objects_set = set()
# import the Registry class,
# import at the beginning is not allowed
# because it is not the temp package
from mmengine.registry import Registry
# only get the specific registries in module list
for module in module_list:
for obj_name in dir(module):
_obj = getattr(module, obj_name)
if isinstance(_obj, Registry):
objects_set.add(_obj)
for _obj in objects_set:
if _obj.scope not in registries:
registries[_obj.scope] = {}
registries_scope = registries[_obj.scope]
assert _obj.name not in registries_scope, \
f'multiple definition of {_obj.name} in registries'
registries_scope[_obj.name] = {
key: str(val)
for key, val in _obj.module_dict.items()
}
print('registries got...')
return registries
def merge_registries(src_dict, dst_dict):
assert type(src_dict) == type(dst_dict), \
(f'merge type is not supported: '
f'{type(dst_dict)} and {type(src_dict)}')
if isinstance(src_dict, str):
return
for _k, _v in dst_dict.items():
if (_k not in src_dict):
src_dict.update({_k: _v})
else:
assert isinstance(_v, (dict, str)) and \
isinstance(src_dict[_k], (dict, str)), \
'merge type is not supported: ' \
f'{type(_v)} and {type(src_dict[_k])}'
merge_registries(src_dict[_k], _v)
def exclude_registries(registries, exclude_key):
for _k in list(registries.keys()):
_v = registries[_k]
if isinstance(_v, str) and _v.startswith(exclude_key):
registries.pop(_k)
elif isinstance(_v, dict):
exclude_registries(_v, exclude_key)
def get_scripts_from_dir(root):
def _recurse(_dict, _chain):
if len(_chain) <= 1:
_dict[_chain[0]] = None
return
_key, *_chain = _chain
if _key not in _dict:
_dict[_key] = {}
_recurse(_dict[_key], _chain)
# find all scripts in the root directory. (not just ('.py', '.sh'))
# can not use the scandir function in mmengine to scan the dir,
# because mmengine import is not allowed before git pull
scripts = {}
for _subroot, _dirs, _files in os.walk(root):
for _file in _files:
_script = osp.join(osp.relpath(_subroot, root), _file)
_recurse(scripts, Path(_script).parts)
return scripts
def get_version_from_module_name(module_name, branch):
branch_str = str(branch) if branch is not None else ''
version_str = ''
try:
exec(f'import {module_name}')
_module = eval(f'{module_name}')
if hasattr(_module, '__version__'):
version_str = str(_module.__version__)
else:
version_str = branch_str
version_str = f' ({version_str})' if version_str else version_str
except (ImportError, AttributeError) as e:
print(f'can not get the version of module {module_name}: {e}')
return version_str
def print_tree(print_dict):
# recursive print the dict tree
def _recurse(_dict, _connector='', n=0):
assert isinstance(_dict, dict), 'recursive type must be dict'
tree = ''
for idx, (_key, _val) in enumerate(_dict.items()):
sub_tree = ''
_last = (idx == (len(_dict) - 1))
if isinstance(_val, str):
_key += f' ({_val})'
elif isinstance(_val, dict):
sub_tree = _recurse(_val,
_connector + (' ' if _last else '│ '),
n + 1)
else:
assert (_val is None), f'unknown print type {_val}'
tree += ' ' + _connector + \
('└─' if _last else '├─') + f'({n}) {_key}' + '\n'
tree += sub_tree
return tree
for _pname, _pdict in print_dict.items():
print('-' * 100)
print(f'{_pname}\n' + _recurse(_pdict))
def divide_list_into_groups(_array, _maxsize_per_group):
if not _array:
return _array
_groups = np.asarray(len(_array) / _maxsize_per_group)
if len(_array) % _maxsize_per_group:
_groups = np.floor(_groups) + 1
_groups = _groups.astype(int)
return np.array_split(_array, _groups)
def registries_to_html(registries, title=''):
max_col_per_row = 5
max_size_per_cell = 20
html = ''
table_data = []
# save repository registries
for registry_name, registry_dict in registries.items():
# filter the empty registries
if not registry_dict:
continue
registry_strings = []
if isinstance(registry_dict, dict):
registry_dict = list(registry_dict.keys())
elif isinstance(registry_dict, list):
pass
else:
raise TypeError(
f'unknown type of registry_dict {type(registry_dict)}')
for _k in registry_dict:
registry_strings.append(f'<li>{_k}</li>')
table_data.append((registry_name, registry_strings))
# sort the data list
table_data = sorted(table_data, key=lambda x: len(x[1]))
# split multi parts
table_data_multi_parts = []
for (registry_name, registry_strings) in table_data:
multi_parts = False
if len(registry_strings) > max_size_per_cell:
multi_parts = True
for cell_idx, registry_cell in enumerate(
divide_list_into_groups(registry_strings, max_size_per_cell)):
registry_str = ''.join(registry_cell.tolist())
registry_str = f'<ul>{registry_str}</ul>'
table_data_multi_parts.append([
registry_name if not multi_parts else
f'{registry_name} (part {cell_idx + 1})', registry_str
])
for table_data in divide_list_into_groups(table_data_multi_parts,
max_col_per_row):
table_data = list(zip(*table_data.tolist()))
html += dataframe_to_html(
pd.DataFrame([table_data[1]], columns=table_data[0]))
if html:
html = f'<div align=\'center\'><b>{title}</b></div>\n{html}'
html = f'<details open>{html}</details>\n'
return html
def tools_to_html(tools_dict, repo_name=''):
def _recurse(_dict, _connector, _result):
assert isinstance(_dict, dict), \
f'unknown recurse type: {_dict} ({type(_dict)})'
for _k, _v in _dict.items():
if _v is None:
if _connector not in _result:
_result[_connector] = []
_result[_connector].append(_k)
else:
_recurse(_v, osp.join(_connector, _k), _result)
table_data = {}
title = f'{capitalize(repo_name)} Tools'
_recurse(tools_dict, '', table_data)
return registries_to_html(table_data, title)
def dataframe_to_html(dataframe):
styler = dataframe.style
styler = styler.hide(axis='index')
styler = styler.format(na_rep='-')
styler = styler.set_properties(**{
'text-align': 'left',
'align': 'center',
'vertical-align': 'top'
})
styler = styler.set_table_styles([{
'selector':
'thead th',
'props':
'align:center;text-align:center;vertical-align:bottom'
}])
html = styler.to_html()
html = f'<div align=\'center\'>\n{html}</div>'
return html
def generate_markdown_by_repository(repo_name,
module_name,
branch,
pulldir,
throw_error=False):
# add the pull dir to the system path so that it can be found
if pulldir not in sys.path:
sys.path.insert(0, pulldir)
module_list, error_dict = load_modules_from_dir(
module_name, pulldir, throw_error=throw_error)
registries_tree = get_registries_from_modules(module_list)
if error_dict:
error_dict_name = 'error_modules'
assert (error_dict_name not in registries_tree), \
f'duplicate module name was found: {error_dict_name}'
registries_tree.update({error_dict_name: error_dict})
# get the tools files
for tools_name in tools_list:
assert (tools_name not in registries_tree), \
f'duplicate tools name was found: {tools_name}'
tools_tree = osp.join(pulldir, tools_name)
tools_tree = get_scripts_from_dir(tools_tree)
registries_tree.update({tools_name: tools_tree})
# print_tree(registries_tree)
# get registries markdown string
module_registries = registries_tree.get(module_name, {})
for merge_key in merge_module_keys.get(module_name, []):
merge_dict = registries_tree.get(merge_key, {})
merge_registries(module_registries, merge_dict)
for exclude_key in exclude_prefix.get(module_name, []):
exclude_registries(module_registries, exclude_key)
markdown_str = registries_to_html(
module_registries, title=f'{capitalize(repo_name)} Module Components')
# get tools markdown string
tools_registries = {}
for tools_name in tools_list:
tools_registries.update(
{tools_name: registries_tree.get(tools_name, {})})
markdown_str += tools_to_html(tools_registries, repo_name=repo_name)
version_str = get_version_from_module_name(module_name, branch)
title_str = f'\n\n## {capitalize(repo_name)}{version_str}\n'
# remove the pull dir from system path
if pulldir in sys.path:
sys.path.remove(pulldir)
return f'{title_str}{markdown_str}'
def parse_args():
parser = argparse.ArgumentParser(
description='print registries in openmmlab repositories')
parser.add_argument(
'-r',
'--repositories',
nargs='+',
default=['mmdet', 'mmcls', 'mmseg', 'mmengine', 'mmcv'],
type=str,
help='git repositories name in OpenMMLab')
parser.add_argument(
'-b',
'--branches',
nargs='+',
default=['3.x', '1.x', '1.x', 'main', '2.x'],
type=str,
help='the branch names of git repositories, the length of branches '
'must be same as the length of repositories')
parser.add_argument(
'-o', '--out', type=str, default='.', help='output path of the file')
parser.add_argument(
'--throw-error',
action='store_true',
default=False,
help='whether to throw error when trying to import modules')
args = parser.parse_args()
return args
# TODO: Refine
def main():
args = parse_args()
repositories = args.repositories
branches = args.branches
assert isinstance(repositories, list), \
'Type of repositories must be list'
if branches is None:
branches = [None] * len(repositories)
assert isinstance(branches, list) and \
len(branches) == len(repositories), \
'The length of branches must be same as ' \
'that of repositories'
assert isinstance(args.out, str), \
'The type of output path must be string'
# save path of file
mkdir_or_exist(args.out)
save_path = osp.join(args.out, 'registries_info.md')
with tempfile.TemporaryDirectory() as tmpdir:
# multi process init
pool = Pool(processes=len(repositories))
multi_proc_input_list = []
multi_proc_output_list = []
# get the git repositories
for branch, repository in zip(branches, repositories):
repo_name, module_name = parse_repo_name(repository)
pulldir = osp.join(tmpdir, f'tmp_{repo_name}')
git_pull_branch(
repo_name=repo_name, branch_name=branch, pulldir=pulldir)
multi_proc_input_list.append(
(repo_name, module_name, branch, pulldir, args.throw_error))
print('starting the multi process to get the registries')
for multi_proc_input in multi_proc_input_list:
multi_proc_output_list.append(
pool.apply_async(generate_markdown_by_repository,
multi_proc_input))
pool.close()
pool.join()
with open(save_path, 'w', encoding='utf-8') as fw:
fw.write(f'{markdown_title}\n')
for multi_proc_output in multi_proc_output_list:
markdown_str = multi_proc_output.get()
fw.write(f'{markdown_str}\n')
print(f'saved registries to the path: {save_path}')
if __name__ == '__main__':
main()
| 16,298
| 35.300668
| 78
|
py
|
mmyolo
|
mmyolo-main/tests/test_engine/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
| 48
| 23.5
| 47
|
py
|
mmyolo
|
mmyolo-main/tests/test_engine/test_hooks/test_yolox_mode_switch_hook.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock
import torch
from mmengine.config import Config
from mmengine.runner import Runner
from torch.utils.data import Dataset
from mmyolo.engine.hooks import YOLOXModeSwitchHook
from mmyolo.utils import register_all_modules
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
@property
def metainfo(self):
return self.METAINFO
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
pipeline1 = [
dict(type='mmdet.Resize'),
]
pipeline2 = [
dict(type='mmdet.RandomFlip'),
]
register_all_modules()
class TestYOLOXModeSwitchHook(TestCase):
def test(self):
train_dataloader = dict(
dataset=DummyDataset(),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0)
runner = Mock()
runner.model = Mock()
runner.model.module = Mock()
runner.model.bbox_head.use_bbox_aux = False
runner.cfg.train_dataloader = Config(train_dataloader)
runner.train_dataloader = Runner.build_dataloader(train_dataloader)
runner.train_dataloader.dataset.pipeline = pipeline1
hook = YOLOXModeSwitchHook(
num_last_epochs=15, new_train_pipeline=pipeline2)
# test after change mode
runner.epoch = 284
runner.max_epochs = 300
hook.before_train_epoch(runner)
self.assertTrue(runner.model.bbox_head.use_bbox_aux)
self.assertEqual(runner.train_loop.dataloader.dataset.pipeline,
pipeline2)
| 1,810
| 25.632353
| 75
|
py
|
mmyolo
|
mmyolo-main/tests/test_engine/test_hooks/test_switch_to_deploy_hook.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock
from mmyolo.engine.hooks import SwitchToDeployHook
from mmyolo.models import RepVGGBlock
from mmyolo.utils import register_all_modules
register_all_modules()
class TestSwitchToDeployHook(TestCase):
def test(self):
runner = Mock()
runner.model = RepVGGBlock(256, 256)
hook = SwitchToDeployHook()
self.assertFalse(runner.model.deploy)
# test after change mode
hook.before_test_epoch(runner)
self.assertTrue(runner.model.deploy)
| 603
| 23.16
| 50
|
py
|
mmyolo
|
mmyolo-main/tests/test_engine/test_hooks/test_yolov5_param_scheduler_hook.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock
import torch
from mmengine.config import Config
from mmengine.optim import build_optim_wrapper
from mmengine.runner import Runner
from torch import nn
from torch.utils.data import Dataset
from mmyolo.engine.hooks import YOLOv5ParamSchedulerHook
from mmyolo.utils import register_all_modules
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, inputs, data_samples, mode='tensor'):
labels = torch.stack(data_samples)
inputs = torch.stack(inputs)
outputs = self.linear(inputs)
if mode == 'tensor':
return outputs
elif mode == 'loss':
loss = (labels - outputs).sum()
outputs = dict(loss=loss)
return outputs
else:
return outputs
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
@property
def metainfo(self):
return self.METAINFO
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(
type='SGD',
lr=0.01,
momentum=0.937,
weight_decay=0.0005,
nesterov=True,
batch_size_per_gpu=1),
constructor='YOLOv5OptimizerConstructor')
register_all_modules()
class TestYOLOv5ParamSchelerHook(TestCase):
def test(self):
model = ToyModel()
train_dataloader = dict(
dataset=DummyDataset(),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0)
runner = Mock()
runner.model = model
runner.optim_wrapper = build_optim_wrapper(model, optim_wrapper)
runner.cfg.train_dataloader = Config(train_dataloader)
runner.train_dataloader = Runner.build_dataloader(train_dataloader)
hook = YOLOv5ParamSchedulerHook(
scheduler_type='linear', lr_factor=0.01, max_epochs=300)
# test before train
runner.epoch = 0
runner.iter = 0
hook.before_train(runner)
for group in runner.optim_wrapper.param_groups:
self.assertEqual(group['lr'], 0.01)
self.assertEqual(group['momentum'], 0.937)
self.assertFalse(hook._warmup_end)
# test after training 10 steps
for i in range(10):
runner.iter += 1
hook.before_train_iter(runner, 0)
for group_idx, group in enumerate(runner.optim_wrapper.param_groups):
if group_idx == 2:
self.assertEqual(round(group['lr'], 5), 0.0991)
self.assertEqual(group['momentum'], 0.80137)
self.assertFalse(hook._warmup_end)
# test after warm up
runner.iter = 1000
hook.before_train_iter(runner, 0)
self.assertFalse(hook._warmup_end)
for group in runner.optim_wrapper.param_groups:
self.assertEqual(group['lr'], 0.01)
self.assertEqual(group['momentum'], 0.937)
runner.iter = 1001
hook.before_train_iter(runner, 0)
self.assertTrue(hook._warmup_end)
# test after train_epoch
hook.after_train_epoch(runner)
for group in runner.optim_wrapper.param_groups:
self.assertEqual(group['lr'], 0.01)
self.assertEqual(group['momentum'], 0.937)
| 3,619
| 27.96
| 77
|
py
|
mmyolo
|
mmyolo-main/tests/test_engine/test_optimizers/test_yolov5_optim_constructor.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
from unittest import TestCase
import torch
import torch.nn as nn
from mmengine.optim import build_optim_wrapper
from mmyolo.engine import YOLOv5OptimizerConstructor
from mmyolo.utils import register_all_modules
register_all_modules()
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.param1 = nn.Parameter(torch.ones(1))
self.conv1 = nn.Conv2d(3, 4, kernel_size=1, bias=False)
self.conv2 = nn.Conv2d(4, 2, kernel_size=1)
self.bn = nn.BatchNorm2d(2)
class TestYOLOv5OptimizerConstructor(TestCase):
def setUp(self):
self.model = ExampleModel()
self.base_lr = 0.01
self.weight_decay = 0.0001
self.optim_wrapper_cfg = dict(
type='OptimWrapper',
optimizer=dict(
type='SGD',
lr=self.base_lr,
momentum=0.9,
weight_decay=self.weight_decay,
batch_size_per_gpu=16))
def test_init(self):
YOLOv5OptimizerConstructor(copy.deepcopy(self.optim_wrapper_cfg))
YOLOv5OptimizerConstructor(
copy.deepcopy(self.optim_wrapper_cfg),
paramwise_cfg={'base_total_batch_size': 64})
# `paramwise_cfg` must include `base_total_batch_size` if not None.
with self.assertRaises(AssertionError):
YOLOv5OptimizerConstructor(
copy.deepcopy(self.optim_wrapper_cfg), paramwise_cfg={'a': 64})
def test_build(self):
optim_wrapper = YOLOv5OptimizerConstructor(
copy.deepcopy(self.optim_wrapper_cfg))(
self.model)
# test param_groups
assert len(optim_wrapper.optimizer.param_groups) == 3
for i in range(3):
param_groups_i = optim_wrapper.optimizer.param_groups[i]
assert param_groups_i['lr'] == self.base_lr
if i == 0:
assert param_groups_i['weight_decay'] == self.weight_decay
else:
assert param_groups_i['weight_decay'] == 0
# test weight_decay linear scaling
optim_wrapper_cfg = copy.deepcopy(self.optim_wrapper_cfg)
optim_wrapper_cfg['optimizer']['batch_size_per_gpu'] = 128
optim_wrapper = YOLOv5OptimizerConstructor(optim_wrapper_cfg)(
self.model)
assert optim_wrapper.optimizer.param_groups[0][
'weight_decay'] == self.weight_decay * 2
# test without batch_size_per_gpu
optim_wrapper_cfg = copy.deepcopy(self.optim_wrapper_cfg)
optim_wrapper_cfg['optimizer'].pop('batch_size_per_gpu')
optim_wrapper = dict(
optim_wrapper_cfg, constructor='YOLOv5OptimizerConstructor')
optim_wrapper = build_optim_wrapper(self.model, optim_wrapper)
assert optim_wrapper.optimizer.param_groups[0][
'weight_decay'] == self.weight_decay
| 2,934
| 34.792683
| 79
|
py
|
mmyolo
|
mmyolo-main/tests/test_engine/test_optimizers/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
| 48
| 23.5
| 47
|
py
|
mmyolo
|
mmyolo-main/tests/test_engine/test_optimizers/test_yolov7_optim_wrapper_constructor.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
from unittest import TestCase
import torch
import torch.nn as nn
from mmengine.optim import build_optim_wrapper
from mmyolo.engine import YOLOv7OptimWrapperConstructor
from mmyolo.utils import register_all_modules
register_all_modules()
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.param1 = nn.Parameter(torch.ones(1))
self.conv1 = nn.Conv2d(3, 4, kernel_size=1, bias=False)
self.conv2 = nn.Conv2d(4, 2, kernel_size=1)
self.bn = nn.BatchNorm2d(2)
class TestYOLOv7OptimWrapperConstructor(TestCase):
def setUp(self):
self.model = ExampleModel()
self.base_lr = 0.01
self.weight_decay = 0.0001
self.optim_wrapper_cfg = dict(
type='OptimWrapper',
optimizer=dict(
type='SGD',
lr=self.base_lr,
momentum=0.9,
weight_decay=self.weight_decay,
batch_size_per_gpu=16))
def test_init(self):
YOLOv7OptimWrapperConstructor(copy.deepcopy(self.optim_wrapper_cfg))
YOLOv7OptimWrapperConstructor(
copy.deepcopy(self.optim_wrapper_cfg),
paramwise_cfg={'base_total_batch_size': 64})
# `paramwise_cfg` must include `base_total_batch_size` if not None.
with self.assertRaises(AssertionError):
YOLOv7OptimWrapperConstructor(
copy.deepcopy(self.optim_wrapper_cfg), paramwise_cfg={'a': 64})
def test_build(self):
optim_wrapper = YOLOv7OptimWrapperConstructor(
copy.deepcopy(self.optim_wrapper_cfg))(
self.model)
# test param_groups
assert len(optim_wrapper.optimizer.param_groups) == 3
for i in range(3):
param_groups_i = optim_wrapper.optimizer.param_groups[i]
assert param_groups_i['lr'] == self.base_lr
if i == 0:
assert param_groups_i['weight_decay'] == self.weight_decay
else:
assert param_groups_i['weight_decay'] == 0
# test weight_decay linear scaling
optim_wrapper_cfg = copy.deepcopy(self.optim_wrapper_cfg)
optim_wrapper_cfg['optimizer']['batch_size_per_gpu'] = 128
optim_wrapper = YOLOv7OptimWrapperConstructor(optim_wrapper_cfg)(
self.model)
assert optim_wrapper.optimizer.param_groups[0][
'weight_decay'] == self.weight_decay * 2
# test without batch_size_per_gpu
optim_wrapper_cfg = copy.deepcopy(self.optim_wrapper_cfg)
optim_wrapper_cfg['optimizer'].pop('batch_size_per_gpu')
optim_wrapper = dict(
optim_wrapper_cfg, constructor='YOLOv7OptimWrapperConstructor')
optim_wrapper = build_optim_wrapper(self.model, optim_wrapper)
assert optim_wrapper.optimizer.param_groups[0][
'weight_decay'] == self.weight_decay
| 2,958
| 35.085366
| 79
|
py
|
mmyolo
|
mmyolo-main/tests/test_deploy/conftest.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
@pytest.fixture(autouse=True)
def init_test():
# init default scope
from mmdet.utils import register_all_modules as register_det
from mmyolo.utils import register_all_modules as register_yolo
register_yolo(True)
register_det(False)
| 318
| 21.785714
| 66
|
py
|
mmyolo
|
mmyolo-main/tests/test_deploy/test_object_detection.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
from tempfile import NamedTemporaryFile, TemporaryDirectory
import numpy as np
import pytest
import torch
from mmengine import Config
try:
import importlib
importlib.import_module('mmdeploy')
except ImportError:
pytest.skip('mmdeploy is not installed.', allow_module_level=True)
import mmdeploy.backend.onnxruntime as ort_apis
from mmdeploy.apis import build_task_processor
from mmdeploy.codebase import import_codebase
from mmdeploy.utils import load_config
from mmdeploy.utils.config_utils import register_codebase
from mmdeploy.utils.test import SwitchBackendWrapper
try:
codebase = register_codebase('mmyolo')
import_codebase(codebase, ['mmyolo.deploy'])
except ImportError:
pytest.skip('mmyolo is not installed.', allow_module_level=True)
model_cfg_path = 'tests/test_deploy/data/model.py'
model_cfg = load_config(model_cfg_path)[0]
model_cfg.test_dataloader.dataset.data_root = \
'tests/data'
model_cfg.test_dataloader.dataset.ann_file = 'coco_sample.json'
model_cfg.test_evaluator.ann_file = \
'tests/coco_sample.json'
deploy_cfg = Config(
dict(
backend_config=dict(type='onnxruntime'),
codebase_config=dict(
type='mmyolo',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
confidence_threshold=0.005, # for YOLOv3
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=5000,
keep_top_k=100,
background_label_id=-1,
),
module=['mmyolo.deploy']),
onnx_config=dict(
type='onnx',
export_params=True,
keep_initializers_as_inputs=False,
opset_version=11,
input_shape=None,
input_names=['input'],
output_names=['dets', 'labels'])))
onnx_file = NamedTemporaryFile(suffix='.onnx').name
task_processor = None
img_shape = (32, 32)
img = np.random.rand(*img_shape, 3)
@pytest.fixture(autouse=True)
def init_task_processor():
global task_processor
task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu')
@pytest.fixture
def backend_model():
from mmdeploy.backend.onnxruntime import ORTWrapper
ort_apis.__dict__.update({'ORTWrapper': ORTWrapper})
wrapper = SwitchBackendWrapper(ORTWrapper)
wrapper.set(
outputs={
'dets': torch.rand(1, 10, 5).sort(2).values,
'labels': torch.randint(0, 10, (1, 10))
})
yield task_processor.build_backend_model([''])
wrapper.recover()
def test_visualize(backend_model):
img_path = 'tests/data/color.jpg'
input_dict, _ = task_processor.create_input(
img_path, input_shape=img_shape)
results = backend_model.test_step(input_dict)[0]
with TemporaryDirectory() as dir:
filename = dir + 'tmp.jpg'
task_processor.visualize(img, results, filename, 'window')
assert os.path.exists(filename)
| 3,052
| 30.474227
| 71
|
py
|
mmyolo
|
mmyolo-main/tests/test_deploy/test_mmyolo_models.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import random
import numpy as np
import pytest
import torch
from mmengine import Config
try:
import importlib
importlib.import_module('mmdeploy')
except ImportError:
pytest.skip('mmdeploy is not installed.', allow_module_level=True)
from mmdeploy.codebase import import_codebase
from mmdeploy.utils import Backend
from mmdeploy.utils.config_utils import register_codebase
from mmdeploy.utils.test import (WrapModel, check_backend, get_model_outputs,
get_rewrite_outputs)
try:
codebase = register_codebase('mmyolo')
import_codebase(codebase, ['mmyolo.deploy'])
except ImportError:
pytest.skip('mmyolo is not installed.', allow_module_level=True)
def seed_everything(seed=1029):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.enabled = False
def get_yolov5_head_model():
"""YOLOv5 Head Config."""
test_cfg = Config(
dict(
multi_label=True,
nms_pre=30000,
score_thr=0.001,
nms=dict(type='nms', iou_threshold=0.65),
max_per_img=300))
from mmyolo.models.dense_heads import YOLOv5Head
head_module = dict(
type='YOLOv5HeadModule',
num_classes=4,
in_channels=[2, 4, 8],
featmap_strides=[8, 16, 32],
num_base_priors=1)
model = YOLOv5Head(head_module, test_cfg=test_cfg)
model.requires_grad_(False)
return model
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
def test_yolov5_head_predict_by_feat(backend_type: Backend):
"""Test predict_by_feat rewrite of YOLOXHead."""
check_backend(backend_type)
yolov5_head = get_yolov5_head_model()
yolov5_head.cpu().eval()
s = 256
batch_img_metas = [{
'scale_factor': (1.0, 1.0),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3)
}]
output_names = ['dets', 'labels']
deploy_cfg = Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmyolo',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=20,
pre_top_k=-1,
keep_top_k=10,
background_label_id=-1,
),
module=['mmyolo.deploy'])))
seed_everything(1234)
cls_scores = [
torch.rand(1, yolov5_head.num_classes * yolov5_head.num_base_priors,
4 * pow(2, i), 4 * pow(2, i)) for i in range(3, 0, -1)
]
seed_everything(5678)
bbox_preds = [
torch.rand(1, 4 * yolov5_head.num_base_priors, 4 * pow(2, i),
4 * pow(2, i)) for i in range(3, 0, -1)
]
seed_everything(9101)
objectnesses = [
torch.rand(1, 1 * yolov5_head.num_base_priors, 4 * pow(2, i),
4 * pow(2, i)) for i in range(3, 0, -1)
]
# to get outputs of pytorch model
model_inputs = {
'cls_scores': cls_scores,
'bbox_preds': bbox_preds,
'objectnesses': objectnesses,
'batch_img_metas': batch_img_metas,
'with_nms': True
}
model_outputs = get_model_outputs(yolov5_head, 'predict_by_feat',
model_inputs)
# to get outputs of onnx model after rewrite
wrapped_model = WrapModel(
yolov5_head,
'predict_by_feat',
batch_img_metas=batch_img_metas,
with_nms=True)
rewrite_inputs = {
'cls_scores': cls_scores,
'bbox_preds': bbox_preds,
'objectnesses': objectnesses,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
if is_backend_output:
# hard code to make two tensors with the same shape
# rewrite and original codes applied different nms strategy
min_shape = min(model_outputs[0].bboxes.shape[0],
rewrite_outputs[0].shape[1], 5)
for i in range(len(model_outputs)):
rewrite_outputs[0][i, :min_shape, 0::2] = \
rewrite_outputs[0][i, :min_shape, 0::2].clamp_(0, s)
rewrite_outputs[0][i, :min_shape, 1::2] = \
rewrite_outputs[0][i, :min_shape, 1::2].clamp_(0, s)
assert np.allclose(
model_outputs[i].bboxes[:min_shape],
rewrite_outputs[0][i, :min_shape, :4],
rtol=1e-03,
atol=1e-05)
assert np.allclose(
model_outputs[i].scores[:min_shape],
rewrite_outputs[0][i, :min_shape, 4],
rtol=1e-03,
atol=1e-05)
assert np.allclose(
model_outputs[i].labels[:min_shape],
rewrite_outputs[1][i, :min_shape],
rtol=1e-03,
atol=1e-05)
else:
assert rewrite_outputs is not None
| 5,546
| 32.415663
| 77
|
py
|
mmyolo
|
mmyolo-main/tests/test_deploy/data/model.py
|
# Copyright (c) OpenMMLab. All rights reserved.
# model settings
default_scope = 'mmyolo'
default_hooks = dict(
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=1),
sampler_seed=dict(type='DistSamplerSeedHook'),
visualization=dict(type='mmdet.DetVisualizationHook'))
env_cfg = dict(
cudnn_benchmark=False,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'),
)
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='mmdet.DetLocalVisualizer',
vis_backends=vis_backends,
name='visualizer')
log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True)
log_level = 'INFO'
load_from = None
resume = False
file_client_args = dict(backend='disk')
# dataset settings
data_root = 'data/coco/'
dataset_type = 'YOLOv5CocoDataset'
# parameters that often need to be modified
img_scale = (640, 640) # height, width
deepen_factor = 0.33
widen_factor = 0.5
max_epochs = 300
save_epoch_intervals = 10
train_batch_size_per_gpu = 16
train_num_workers = 8
val_batch_size_per_gpu = 1
val_num_workers = 2
# persistent_workers must be False if num_workers is 0.
persistent_workers = True
# only on Val
batch_shapes_cfg = dict(
type='BatchShapePolicy',
batch_size=val_batch_size_per_gpu,
img_size=img_scale[0],
size_divisor=32,
extra_pad_ratio=0.5)
anchors = [[(10, 13), (16, 30), (33, 23)], [(30, 61), (62, 45), (59, 119)],
[(116, 90), (156, 198), (373, 326)]]
strides = [8, 16, 32]
# single-scale training is recommended to
# be turned on, which can speed up training.
env_cfg = dict(cudnn_benchmark=True)
model = dict(
type='YOLODetector',
data_preprocessor=dict(
type='mmdet.DetDataPreprocessor',
mean=[0., 0., 0.],
std=[255., 255., 255.],
bgr_to_rgb=True),
backbone=dict(
type='YOLOv5CSPDarknet',
deepen_factor=deepen_factor,
widen_factor=widen_factor,
norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
act_cfg=dict(type='SiLU', inplace=True)),
neck=dict(
type='YOLOv5PAFPN',
deepen_factor=deepen_factor,
widen_factor=widen_factor,
in_channels=[256, 512, 1024],
out_channels=[256, 512, 1024],
num_csp_blocks=3,
norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
act_cfg=dict(type='SiLU', inplace=True)),
bbox_head=dict(
type='YOLOv5Head',
head_module=dict(
type='YOLOv5HeadModule',
num_classes=80,
in_channels=[256, 512, 1024],
widen_factor=widen_factor,
featmap_strides=strides,
num_base_priors=3),
prior_generator=dict(
type='mmdet.YOLOAnchorGenerator',
base_sizes=anchors,
strides=strides),
loss_cls=dict(
type='mmdet.CrossEntropyLoss',
use_sigmoid=True,
reduction='mean',
loss_weight=0.5),
loss_bbox=dict(
type='IoULoss',
iou_mode='ciou',
bbox_format='xywh',
eps=1e-7,
reduction='mean',
loss_weight=0.05,
return_iou=True),
loss_obj=dict(
type='mmdet.CrossEntropyLoss',
use_sigmoid=True,
reduction='mean',
loss_weight=1.0),
prior_match_thr=4.,
obj_level_weights=[4., 1., 0.4]),
test_cfg=dict(
multi_label=True,
nms_pre=30000,
score_thr=0.001,
nms=dict(type='nms', iou_threshold=0.65),
max_per_img=300))
albu_train_transforms = [
dict(type='Blur', p=0.01),
dict(type='MedianBlur', p=0.01),
dict(type='ToGray', p=0.01),
dict(type='CLAHE', p=0.01)
]
pre_transform = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True)
]
train_pipeline = [
*pre_transform,
dict(
type='Mosaic',
img_scale=img_scale,
pad_val=114.0,
pre_transform=pre_transform),
dict(
type='YOLOv5RandomAffine',
max_rotate_degree=0.0,
max_shear_degree=0.0,
scaling_ratio_range=(0.5, 1.5),
border=(-img_scale[0] // 2, -img_scale[1] // 2),
border_val=(114, 114, 114)),
dict(
type='mmdet.Albu',
transforms=albu_train_transforms,
bbox_params=dict(
type='BboxParams',
format='pascal_voc',
label_fields=['gt_bboxes_labels', 'gt_ignore_flags']),
keymap={
'img': 'image',
'gt_bboxes': 'bboxes'
}),
dict(type='YOLOv5HSVRandomAug'),
dict(type='mmdet.RandomFlip', prob=0.5),
dict(
type='mmdet.PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip',
'flip_direction'))
]
train_dataloader = dict(
batch_size=train_batch_size_per_gpu,
num_workers=train_num_workers,
persistent_workers=persistent_workers,
pin_memory=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=False, min_size=32),
pipeline=train_pipeline))
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='YOLOv5KeepRatioResize', scale=img_scale),
dict(
type='LetterResize',
scale=img_scale,
allow_scale_up=False,
pad_val=dict(img=114)),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='mmdet.PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'pad_param'))
]
val_dataloader = dict(
batch_size=val_batch_size_per_gpu,
num_workers=val_num_workers,
persistent_workers=persistent_workers,
pin_memory=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
test_mode=True,
data_prefix=dict(img='val2017/'),
ann_file='annotations/instances_val2017.json',
pipeline=test_pipeline,
batch_shapes_cfg=batch_shapes_cfg))
test_dataloader = val_dataloader
param_scheduler = None
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(
type='SGD',
lr=0.01,
momentum=0.937,
weight_decay=0.0005,
nesterov=True,
batch_size_per_gpu=train_batch_size_per_gpu),
constructor='YOLOv5OptimizerConstructor')
default_hooks = dict(
param_scheduler=dict(
type='YOLOv5ParamSchedulerHook',
scheduler_type='linear',
lr_factor=0.01,
max_epochs=max_epochs),
checkpoint=dict(
type='CheckpointHook', interval=save_epoch_intervals,
max_keep_ckpts=3))
custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0001,
update_buffers=True,
priority=49)
]
val_evaluator = dict(
type='mmdet.CocoMetric',
proposal_nums=(100, 1, 10),
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
train_cfg = dict(
type='EpochBasedTrainLoop',
max_epochs=max_epochs,
val_interval=save_epoch_intervals)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
| 7,658
| 28.011364
| 75
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
| 48
| 23.5
| 47
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_layers/test_ema.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import itertools
import math
from unittest import TestCase
import torch
import torch.nn as nn
from mmengine.testing import assert_allclose
from mmyolo.models.layers import ExpMomentumEMA
class TestEMA(TestCase):
def test_exp_momentum_ema(self):
model = nn.Sequential(nn.Conv2d(1, 5, kernel_size=3), nn.Linear(5, 10))
# Test invalid gamma
with self.assertRaisesRegex(AssertionError,
'gamma must be greater than 0'):
ExpMomentumEMA(model, gamma=-1)
# Test EMA
model = torch.nn.Sequential(
torch.nn.Conv2d(1, 5, kernel_size=3), torch.nn.Linear(5, 10))
momentum = 0.1
gamma = 4
ema_model = ExpMomentumEMA(model, momentum=momentum, gamma=gamma)
averaged_params = [
torch.zeros_like(param) for param in model.parameters()
]
n_updates = 10
for i in range(n_updates):
updated_averaged_params = []
for p, p_avg in zip(model.parameters(), averaged_params):
p.detach().add_(torch.randn_like(p))
if i == 0:
updated_averaged_params.append(p.clone())
else:
m = (1 - momentum) * math.exp(-(1 + i) / gamma) + momentum
updated_averaged_params.append(
(p_avg * (1 - m) + p * m).clone())
ema_model.update_parameters(model)
averaged_params = updated_averaged_params
for p_target, p_ema in zip(averaged_params, ema_model.parameters()):
assert_allclose(p_target, p_ema)
def test_exp_momentum_ema_update_buffer(self):
model = nn.Sequential(
nn.Conv2d(1, 5, kernel_size=3), nn.BatchNorm2d(5, momentum=0.3),
nn.Linear(5, 10))
# Test invalid gamma
with self.assertRaisesRegex(AssertionError,
'gamma must be greater than 0'):
ExpMomentumEMA(model, gamma=-1)
# Test EMA with momentum annealing.
momentum = 0.1
gamma = 4
ema_model = ExpMomentumEMA(
model, gamma=gamma, momentum=momentum, update_buffers=True)
averaged_params = [
torch.zeros_like(param)
for param in itertools.chain(model.parameters(), model.buffers())
if param.size() != torch.Size([])
]
n_updates = 10
for i in range(n_updates):
updated_averaged_params = []
params = [
param for param in itertools.chain(model.parameters(),
model.buffers())
if param.size() != torch.Size([])
]
for p, p_avg in zip(params, averaged_params):
p.detach().add_(torch.randn_like(p))
if i == 0:
updated_averaged_params.append(p.clone())
else:
m = (1 - momentum) * math.exp(-(1 + i) / gamma) + momentum
updated_averaged_params.append(
(p_avg * (1 - m) + p * m).clone())
ema_model.update_parameters(model)
averaged_params = updated_averaged_params
ema_params = [
param for param in itertools.chain(ema_model.module.parameters(),
ema_model.module.buffers())
if param.size() != torch.Size([])
]
for p_target, p_ema in zip(averaged_params, ema_params):
assert_allclose(p_target, p_ema)
| 3,634
| 37.263158
| 79
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_layers/test_yolo_bricks.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmyolo.models.layers import SPPFBottleneck
from mmyolo.utils import register_all_modules
register_all_modules()
class TestSPPFBottleneck(TestCase):
def test_forward(self):
input_tensor = torch.randn((1, 3, 20, 20))
bottleneck = SPPFBottleneck(3, 16)
out_tensor = bottleneck(input_tensor)
self.assertEqual(out_tensor.shape, (1, 16, 20, 20))
bottleneck = SPPFBottleneck(3, 16, kernel_sizes=[3, 5, 7])
out_tensor = bottleneck(input_tensor)
self.assertEqual(out_tensor.shape, (1, 16, 20, 20))
# set len(kernel_sizes)=4
bottleneck = SPPFBottleneck(3, 16, kernel_sizes=[3, 5, 7, 9])
out_tensor = bottleneck(input_tensor)
self.assertEqual(out_tensor.shape, (1, 16, 20, 20))
# set use_conv_first=False
bottleneck = SPPFBottleneck(
3, 16, use_conv_first=False, kernel_sizes=[3, 5, 7, 9])
out_tensor = bottleneck(input_tensor)
self.assertEqual(out_tensor.shape, (1, 16, 20, 20))
| 1,111
| 30.771429
| 69
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_layers/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
| 48
| 23.5
| 47
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_backbone/test_csp_resnet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmyolo.models import PPYOLOECSPResNet
from mmyolo.utils import register_all_modules
from .utils import check_norm_state, is_norm
register_all_modules()
class TestPPYOLOECSPResNet(TestCase):
def test_init(self):
# out_indices in range(len(arch_setting) + 1)
with pytest.raises(AssertionError):
PPYOLOECSPResNet(out_indices=(6, ))
with pytest.raises(ValueError):
# frozen_stages must in range(-1, len(arch_setting) + 1)
PPYOLOECSPResNet(frozen_stages=6)
def test_forward(self):
# Test PPYOLOECSPResNet with first stage frozen
frozen_stages = 1
model = PPYOLOECSPResNet(frozen_stages=frozen_stages)
model.init_weights()
model.train()
for mod in model.stem.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'stage{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test PPYOLOECSPResNet with norm_eval=True
model = PPYOLOECSPResNet(norm_eval=True)
model.train()
assert check_norm_state(model.modules(), False)
# Test PPYOLOECSPResNet-P5 forward with widen_factor=0.25
model = PPYOLOECSPResNet(
arch='P5', widen_factor=0.25, out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 16, 32, 32))
assert feat[1].shape == torch.Size((1, 32, 16, 16))
assert feat[2].shape == torch.Size((1, 64, 8, 8))
assert feat[3].shape == torch.Size((1, 128, 4, 4))
assert feat[4].shape == torch.Size((1, 256, 2, 2))
# Test PPYOLOECSPResNet forward with dict(type='ReLU')
model = PPYOLOECSPResNet(
widen_factor=0.125,
act_cfg=dict(type='ReLU'),
out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test PPYOLOECSPResNet with BatchNorm forward
model = PPYOLOECSPResNet(widen_factor=0.125, out_indices=range(0, 5))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test PPYOLOECSPResNet with BatchNorm forward
model = PPYOLOECSPResNet(plugins=[
dict(
cfg=dict(type='mmdet.DropBlock', drop_prob=0.1, block_size=3),
stages=(False, False, True, True)),
])
assert len(model.stage1) == 1
assert len(model.stage2) == 1
assert len(model.stage3) == 2 # +DropBlock
assert len(model.stage4) == 2 # +DropBlock
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size((1, 256, 32, 32))
assert feat[1].shape == torch.Size((1, 512, 16, 16))
assert feat[2].shape == torch.Size((1, 1024, 8, 8))
| 4,162
| 35.517544
| 78
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_backbone/test_efficient_rep.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmyolo.models.backbones import YOLOv6CSPBep, YOLOv6EfficientRep
from mmyolo.utils import register_all_modules
from .utils import check_norm_state, is_norm
register_all_modules()
class TestYOLOv6EfficientRep(TestCase):
def test_init(self):
# out_indices in range(len(arch_setting) + 1)
with pytest.raises(AssertionError):
YOLOv6EfficientRep(out_indices=(6, ))
with pytest.raises(ValueError):
# frozen_stages must in range(-1, len(arch_setting) + 1)
YOLOv6EfficientRep(frozen_stages=6)
def test_YOLOv6EfficientRep_forward(self):
# Test YOLOv6EfficientRep with first stage frozen
frozen_stages = 1
model = YOLOv6EfficientRep(frozen_stages=frozen_stages)
model.init_weights()
model.train()
for mod in model.stem.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'stage{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test YOLOv6EfficientRep with norm_eval=True
model = YOLOv6EfficientRep(norm_eval=True)
model.train()
assert check_norm_state(model.modules(), False)
# Test YOLOv6EfficientRep-P5 forward with widen_factor=0.25
model = YOLOv6EfficientRep(
arch='P5', widen_factor=0.25, out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 16, 32, 32))
assert feat[1].shape == torch.Size((1, 32, 16, 16))
assert feat[2].shape == torch.Size((1, 64, 8, 8))
assert feat[3].shape == torch.Size((1, 128, 4, 4))
assert feat[4].shape == torch.Size((1, 256, 2, 2))
# Test YOLOv6EfficientRep forward with dict(type='ReLU')
model = YOLOv6EfficientRep(
widen_factor=0.125,
act_cfg=dict(type='ReLU'),
out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test YOLOv6EfficientRep with BatchNorm forward
model = YOLOv6EfficientRep(widen_factor=0.125, out_indices=range(0, 5))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test YOLOv6EfficientRep with BatchNorm forward
model = YOLOv6EfficientRep(plugins=[
dict(
cfg=dict(type='mmdet.DropBlock', drop_prob=0.1, block_size=3),
stages=(False, False, True, True)),
])
assert len(model.stage1) == 1
assert len(model.stage2) == 1
assert len(model.stage3) == 2 # +DropBlock
assert len(model.stage4) == 3 # +SPPF+DropBlock
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size((1, 256, 32, 32))
assert feat[1].shape == torch.Size((1, 512, 16, 16))
assert feat[2].shape == torch.Size((1, 1024, 8, 8))
def test_YOLOv6CSPBep_forward(self):
# Test YOLOv6CSPBep with first stage frozen
frozen_stages = 1
model = YOLOv6CSPBep(frozen_stages=frozen_stages)
model.init_weights()
model.train()
for mod in model.stem.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'stage{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test YOLOv6CSPBep with norm_eval=True
model = YOLOv6CSPBep(norm_eval=True)
model.train()
assert check_norm_state(model.modules(), False)
# Test YOLOv6CSPBep forward with widen_factor=0.25
model = YOLOv6CSPBep(
arch='P5', widen_factor=0.25, out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 16, 32, 32))
assert feat[1].shape == torch.Size((1, 32, 16, 16))
assert feat[2].shape == torch.Size((1, 64, 8, 8))
assert feat[3].shape == torch.Size((1, 128, 4, 4))
assert feat[4].shape == torch.Size((1, 256, 2, 2))
# Test YOLOv6CSPBep forward with dict(type='ReLU')
model = YOLOv6CSPBep(
widen_factor=0.125,
act_cfg=dict(type='ReLU'),
out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test YOLOv6CSPBep with BatchNorm forward
model = YOLOv6CSPBep(widen_factor=0.125, out_indices=range(0, 5))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test YOLOv6CSPBep with BatchNorm forward
model = YOLOv6CSPBep(plugins=[
dict(
cfg=dict(type='mmdet.DropBlock', drop_prob=0.1, block_size=3),
stages=(False, False, True, True)),
])
assert len(model.stage1) == 1
assert len(model.stage2) == 1
assert len(model.stage3) == 2 # +DropBlock
assert len(model.stage4) == 3 # +SPPF+DropBlock
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size((1, 256, 32, 32))
assert feat[1].shape == torch.Size((1, 512, 16, 16))
assert feat[2].shape == torch.Size((1, 1024, 8, 8))
| 7,688
| 36.876847
| 79
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_backbone/utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.backbones.res2net import Bottle2neck
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from mmdet.models.layers import SimplifiedBasicBlock
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
def is_block(modules):
"""Check if is ResNet building block."""
if isinstance(modules, (BasicBlock, Bottleneck, BottleneckX, Bottle2neck,
SimplifiedBasicBlock)):
return True
return False
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (GroupNorm, _BatchNorm)):
return True
return False
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
| 1,026
| 31.09375
| 77
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_backbone/test_yolov7_backbone.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmyolo.models.backbones import YOLOv7Backbone
from mmyolo.utils import register_all_modules
from .utils import check_norm_state
register_all_modules()
class TestYOLOv7Backbone(TestCase):
def test_init(self):
# out_indices in range(len(arch_setting) + 1)
with pytest.raises(AssertionError):
YOLOv7Backbone(out_indices=(6, ))
with pytest.raises(ValueError):
# frozen_stages must in range(-1, len(arch_setting) + 1)
YOLOv7Backbone(frozen_stages=6)
def test_forward(self):
# Test YOLOv7Backbone-L with first stage frozen
frozen_stages = 1
model = YOLOv7Backbone(frozen_stages=frozen_stages)
model.init_weights()
model.train()
for mod in model.stem.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'stage{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test YOLOv7Backbone-L with norm_eval=True
model = YOLOv7Backbone(norm_eval=True)
model.train()
assert check_norm_state(model.modules(), False)
# Test YOLOv7Backbone-L forward with widen_factor=0.25
model = YOLOv7Backbone(
widen_factor=0.25, out_indices=tuple(range(0, 5)))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 16, 32, 32))
assert feat[1].shape == torch.Size((1, 64, 16, 16))
assert feat[2].shape == torch.Size((1, 128, 8, 8))
assert feat[3].shape == torch.Size((1, 256, 4, 4))
assert feat[4].shape == torch.Size((1, 256, 2, 2))
# Test YOLOv7Backbone-L with plugins
model = YOLOv7Backbone(
widen_factor=0.25,
plugins=[
dict(
cfg=dict(
type='mmdet.DropBlock', drop_prob=0.1, block_size=3),
stages=(False, False, True, True)),
])
assert len(model.stage1) == 2
assert len(model.stage2) == 2
assert len(model.stage3) == 3 # +DropBlock
assert len(model.stage4) == 3 # +DropBlock
model.train()
imgs = torch.randn(1, 3, 128, 128)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size((1, 128, 16, 16))
assert feat[1].shape == torch.Size((1, 256, 8, 8))
assert feat[2].shape == torch.Size((1, 256, 4, 4))
# Test YOLOv7Backbone-X forward with widen_factor=0.25
model = YOLOv7Backbone(arch='X', widen_factor=0.25)
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size((1, 160, 8, 8))
assert feat[1].shape == torch.Size((1, 320, 4, 4))
assert feat[2].shape == torch.Size((1, 320, 2, 2))
# Test YOLOv7Backbone-tiny forward with widen_factor=0.25
model = YOLOv7Backbone(arch='Tiny', widen_factor=0.25)
model.train()
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size((1, 32, 8, 8))
assert feat[1].shape == torch.Size((1, 64, 4, 4))
assert feat[2].shape == torch.Size((1, 128, 2, 2))
# Test YOLOv7Backbone-w forward with widen_factor=0.25
model = YOLOv7Backbone(
arch='W', widen_factor=0.25, out_indices=(2, 3, 4, 5))
model.train()
imgs = torch.randn(1, 3, 128, 128)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 64, 16, 16))
assert feat[1].shape == torch.Size((1, 128, 8, 8))
assert feat[2].shape == torch.Size((1, 192, 4, 4))
assert feat[3].shape == torch.Size((1, 256, 2, 2))
# Test YOLOv7Backbone-w forward with widen_factor=0.25
model = YOLOv7Backbone(
arch='D', widen_factor=0.25, out_indices=(2, 3, 4, 5))
model.train()
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 96, 16, 16))
assert feat[1].shape == torch.Size((1, 192, 8, 8))
assert feat[2].shape == torch.Size((1, 288, 4, 4))
assert feat[3].shape == torch.Size((1, 384, 2, 2))
# Test YOLOv7Backbone-w forward with widen_factor=0.25
model = YOLOv7Backbone(
arch='E', widen_factor=0.25, out_indices=(2, 3, 4, 5))
model.train()
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 80, 16, 16))
assert feat[1].shape == torch.Size((1, 160, 8, 8))
assert feat[2].shape == torch.Size((1, 240, 4, 4))
assert feat[3].shape == torch.Size((1, 320, 2, 2))
# Test YOLOv7Backbone-w forward with widen_factor=0.25
model = YOLOv7Backbone(
arch='E2E', widen_factor=0.25, out_indices=(2, 3, 4, 5))
model.train()
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 80, 16, 16))
assert feat[1].shape == torch.Size((1, 160, 8, 8))
assert feat[2].shape == torch.Size((1, 240, 4, 4))
assert feat[3].shape == torch.Size((1, 320, 2, 2))
| 5,705
| 35.812903
| 77
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_backbone/test_csp_darknet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from parameterized import parameterized
from torch.nn.modules.batchnorm import _BatchNorm
from mmyolo.models.backbones import (YOLOv5CSPDarknet, YOLOv8CSPDarknet,
YOLOXCSPDarknet)
from mmyolo.utils import register_all_modules
from .utils import check_norm_state, is_norm
register_all_modules()
class TestCSPDarknet(TestCase):
@parameterized.expand([(YOLOv5CSPDarknet, ), (YOLOXCSPDarknet, ),
(YOLOv8CSPDarknet, )])
def test_init(self, module_class):
# out_indices in range(len(arch_setting) + 1)
with pytest.raises(AssertionError):
module_class(out_indices=(6, ))
with pytest.raises(ValueError):
# frozen_stages must in range(-1, len(arch_setting) + 1)
module_class(frozen_stages=6)
@parameterized.expand([(YOLOv5CSPDarknet, ), (YOLOXCSPDarknet, ),
(YOLOv8CSPDarknet, )])
def test_forward(self, module_class):
# Test CSPDarknet with first stage frozen
frozen_stages = 1
model = module_class(frozen_stages=frozen_stages)
model.init_weights()
model.train()
for mod in model.stem.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'stage{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test CSPDarknet with norm_eval=True
model = module_class(norm_eval=True)
model.train()
assert check_norm_state(model.modules(), False)
# Test CSPDarknet-P5 forward with widen_factor=0.25
model = module_class(
arch='P5', widen_factor=0.25, out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 16, 32, 32))
assert feat[1].shape == torch.Size((1, 32, 16, 16))
assert feat[2].shape == torch.Size((1, 64, 8, 8))
assert feat[3].shape == torch.Size((1, 128, 4, 4))
assert feat[4].shape == torch.Size((1, 256, 2, 2))
# Test CSPDarknet forward with dict(type='ReLU')
model = module_class(
widen_factor=0.125,
act_cfg=dict(type='ReLU'),
out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test CSPDarknet with BatchNorm forward
model = module_class(widen_factor=0.125, out_indices=range(0, 5))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test CSPDarknet with Dropout Block
model = module_class(plugins=[
dict(
cfg=dict(type='mmdet.DropBlock', drop_prob=0.1, block_size=3),
stages=(False, False, True, True)),
])
assert len(model.stage1) == 2
assert len(model.stage2) == 2
assert len(model.stage3) == 3 # +DropBlock
assert len(model.stage4) == 4 # +SPPF+DropBlock
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size((1, 256, 32, 32))
assert feat[1].shape == torch.Size((1, 512, 16, 16))
assert feat[2].shape == torch.Size((1, 1024, 8, 8))
| 4,481
| 36.35
| 78
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_backbone/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
| 48
| 23.5
| 47
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_data_preprocessor/test_data_preprocessor.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmdet.structures import DetDataSample
from mmengine import MessageHub
from mmyolo.models import PPYOLOEBatchRandomResize, PPYOLOEDetDataPreprocessor
from mmyolo.models.data_preprocessors import (YOLOv5DetDataPreprocessor,
YOLOXBatchSyncRandomResize)
from mmyolo.utils import register_all_modules
register_all_modules()
class TestYOLOv5DetDataPreprocessor(TestCase):
def test_forward(self):
processor = YOLOv5DetDataPreprocessor(mean=[0, 0, 0], std=[1, 1, 1])
data = {
'inputs': [torch.randint(0, 256, (3, 11, 10))],
'data_samples': [DetDataSample()]
}
out_data = processor(data, training=False)
batch_inputs, batch_data_samples = out_data['inputs'], out_data[
'data_samples']
self.assertEqual(batch_inputs.shape, (1, 3, 11, 10))
self.assertEqual(len(batch_data_samples), 1)
# test channel_conversion
processor = YOLOv5DetDataPreprocessor(
mean=[0., 0., 0.], std=[1., 1., 1.], bgr_to_rgb=True)
out_data = processor(data, training=False)
batch_inputs, batch_data_samples = out_data['inputs'], out_data[
'data_samples']
self.assertEqual(batch_inputs.shape, (1, 3, 11, 10))
self.assertEqual(len(batch_data_samples), 1)
# test padding, training=False
data = {
'inputs': [
torch.randint(0, 256, (3, 10, 11)),
torch.randint(0, 256, (3, 9, 14))
]
}
processor = YOLOv5DetDataPreprocessor(
mean=[0., 0., 0.], std=[1., 1., 1.], bgr_to_rgb=True)
out_data = processor(data, training=False)
batch_inputs, batch_data_samples = out_data['inputs'], out_data[
'data_samples']
self.assertEqual(batch_inputs.shape, (2, 3, 10, 14))
self.assertIsNone(batch_data_samples)
# test training
data = {
'inputs': torch.randint(0, 256, (2, 3, 10, 11)),
'data_samples': {
'bboxes_labels': torch.randint(0, 11, (18, 6))
},
}
out_data = processor(data, training=True)
batch_inputs, batch_data_samples = out_data['inputs'], out_data[
'data_samples']
self.assertIn('img_metas', batch_data_samples)
self.assertIn('bboxes_labels', batch_data_samples)
self.assertEqual(batch_inputs.shape, (2, 3, 10, 11))
self.assertIsInstance(batch_data_samples['bboxes_labels'],
torch.Tensor)
self.assertIsInstance(batch_data_samples['img_metas'], list)
data = {
'inputs': [torch.randint(0, 256, (3, 11, 10))],
'data_samples': [DetDataSample()]
}
# data_samples must be dict
with self.assertRaises(AssertionError):
processor(data, training=True)
class TestPPYOLOEDetDataPreprocessor(TestCase):
def test_batch_random_resize(self):
processor = PPYOLOEDetDataPreprocessor(
pad_size_divisor=32,
batch_augments=[
dict(
type='PPYOLOEBatchRandomResize',
random_size_range=(320, 480),
interval=1,
size_divisor=32,
random_interp=True,
keep_ratio=False)
],
mean=[0., 0., 0.],
std=[255., 255., 255.],
bgr_to_rgb=True)
self.assertTrue(
isinstance(processor.batch_augments[0], PPYOLOEBatchRandomResize))
message_hub = MessageHub.get_instance('test_batch_random_resize')
message_hub.update_info('iter', 0)
# test training
data = {
'inputs': [
torch.randint(0, 256, (3, 10, 11)),
torch.randint(0, 256, (3, 10, 11))
],
'data_samples': {
'bboxes_labels': torch.randint(0, 11, (18, 6)).float()
},
}
out_data = processor(data, training=True)
batch_data_samples = out_data['data_samples']
self.assertIn('img_metas', batch_data_samples)
self.assertIn('bboxes_labels', batch_data_samples)
self.assertIsInstance(batch_data_samples['bboxes_labels'],
torch.Tensor)
self.assertIsInstance(batch_data_samples['img_metas'], list)
data = {
'inputs': [torch.randint(0, 256, (3, 11, 10))],
'data_samples': DetDataSample()
}
# data_samples must be list
with self.assertRaises(AssertionError):
processor(data, training=True)
class TestYOLOXDetDataPreprocessor(TestCase):
def test_batch_sync_random_size(self):
processor = YOLOXBatchSyncRandomResize(
random_size_range=(480, 800), size_divisor=32, interval=1)
self.assertTrue(isinstance(processor, YOLOXBatchSyncRandomResize))
message_hub = MessageHub.get_instance(
'test_yolox_batch_sync_random_resize')
message_hub.update_info('iter', 0)
# test training
inputs = torch.randint(0, 256, (4, 3, 10, 11))
data_samples = {'bboxes_labels': torch.randint(0, 11, (18, 6)).float()}
inputs, data_samples = processor(inputs, data_samples)
self.assertIn('bboxes_labels', data_samples)
self.assertIsInstance(data_samples['bboxes_labels'], torch.Tensor)
self.assertIsInstance(inputs, torch.Tensor)
inputs = torch.randint(0, 256, (4, 3, 10, 11))
data_samples = DetDataSample()
# data_samples must be dict
with self.assertRaises(AssertionError):
processor(inputs, data_samples)
| 5,829
| 36.133758
| 79
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_data_preprocessor/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
| 48
| 23.5
| 47
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_utils/test_misc.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmengine.structures import InstanceData
from torch import Tensor
from mmyolo.models.utils import gt_instances_preprocess
from mmyolo.utils import register_all_modules
register_all_modules()
class TestGtInstancesPreprocess:
@pytest.mark.parametrize('box_dim', [4, 5])
def test(self, box_dim):
gt_instances = InstanceData(
bboxes=torch.empty((0, box_dim)), labels=torch.LongTensor([]))
batch_size = 1
batch_instance = gt_instances_preprocess([gt_instances], batch_size)
assert isinstance(batch_instance, Tensor)
assert len(batch_instance.shape) == 3, 'the len of result must be 3.'
assert batch_instance.size(-1) == box_dim + 1
@pytest.mark.parametrize('box_dim', [4, 5])
def test_fast_version(self, box_dim: int):
gt_instances = torch.from_numpy(
np.array([[0., 1., *(0., ) * box_dim]], dtype=np.float32))
batch_size = 1
batch_instance = gt_instances_preprocess(gt_instances, batch_size)
assert isinstance(batch_instance, Tensor)
assert len(batch_instance.shape) == 3, 'the len of result must be 3.'
assert batch_instance.shape[1] == 1
assert batch_instance.shape[2] == box_dim + 1
| 1,330
| 35.972222
| 77
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_utils/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
| 48
| 23.5
| 47
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_necks/test_ppyoloe_csppan.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmyolo.models import PPYOLOECSPPAFPN
from mmyolo.utils import register_all_modules
register_all_modules()
class TestPPYOLOECSPPAFPN(TestCase):
def test_forward(self):
s = 64
in_channels = [8, 16, 32]
feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8]
out_channels = [8, 16, 32]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = PPYOLOECSPPAFPN(
in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels[i]
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
def test_drop_block(self):
s = 64
in_channels = [8, 16, 32]
feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8]
out_channels = [8, 16, 32]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = PPYOLOECSPPAFPN(
in_channels=in_channels,
out_channels=out_channels,
drop_block_cfg=dict(
type='mmdet.DropBlock',
drop_prob=0.1,
block_size=3,
warm_iters=0))
neck.train()
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels[i]
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
| 1,741
| 31.259259
| 71
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_necks/test_cspnext_pafpn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmyolo.models.necks import CSPNeXtPAFPN
from mmyolo.utils import register_all_modules
register_all_modules()
class TestCSPNeXtPAFPN(TestCase):
def test_forward(self):
s = 64
in_channels = [8, 16, 32]
feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8]
out_channels = 24
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = CSPNeXtPAFPN(in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# test depth-wise
neck = CSPNeXtPAFPN(
in_channels=in_channels,
out_channels=out_channels,
use_depthwise=True)
from mmcv.cnn.bricks import DepthwiseSeparableConvModule
self.assertTrue(neck.conv, DepthwiseSeparableConvModule)
| 1,159
| 29.526316
| 79
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_necks/test_yolov8_pafpn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmyolo.models import YOLOv8PAFPN
from mmyolo.utils import register_all_modules
register_all_modules()
class TestYOLOv8PAFPN(TestCase):
def test_YOLOv8PAFPN_forward(self):
s = 64
in_channels = [8, 16, 32]
feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8]
out_channels = [8, 16, 32]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = YOLOv8PAFPN(in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels[i]
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
| 879
| 29.344828
| 78
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_necks/test_yolox_pafpn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmyolo.models.necks import YOLOXPAFPN
from mmyolo.utils import register_all_modules
register_all_modules()
class TestYOLOXPAFPN(TestCase):
def test_forward(self):
s = 64
in_channels = [8, 16, 32]
feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8]
out_channels = 24
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = YOLOXPAFPN(in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
| 858
| 28.62069
| 77
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_necks/test_yolov7_pafpn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmcv.cnn import ConvModule
from mmyolo.models.necks import YOLOv7PAFPN
from mmyolo.utils import register_all_modules
register_all_modules()
class TestYOLOv7PAFPN(TestCase):
def test_forward(self):
# test P5
s = 64
in_channels = [8, 16, 32]
feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8]
out_channels = [8, 16, 32]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = YOLOv7PAFPN(in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels[i] * 2
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# test is_tiny_version
neck = YOLOv7PAFPN(
in_channels=in_channels,
out_channels=out_channels,
is_tiny_version=True)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels[i] * 2
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# test use_in_channels_in_downsample
neck = YOLOv7PAFPN(
in_channels=in_channels,
out_channels=out_channels,
use_in_channels_in_downsample=True)
for f in feats:
print(f.shape)
outs = neck(feats)
for f in outs:
print(f.shape)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels[i] * 2
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# test use_repconv_outs is False
neck = YOLOv7PAFPN(
in_channels=in_channels,
out_channels=out_channels,
use_repconv_outs=False)
self.assertIsInstance(neck.out_layers[0], ConvModule)
# test P6
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)]
out_channels = [8, 16, 32, 64]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = YOLOv7PAFPN(in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels[i]
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
| 2,718
| 32.9875
| 78
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_necks/test_yolov6_pafpn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmyolo.models.necks import YOLOv6CSPRepPAFPN, YOLOv6RepPAFPN
from mmyolo.utils import register_all_modules
register_all_modules()
class TestYOLOv6PAFPN(TestCase):
def test_YOLOv6RepPAFP_forward(self):
s = 64
in_channels = [8, 16, 32]
feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8]
out_channels = [8, 16, 32]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = YOLOv6RepPAFPN(
in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels[i]
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
def test_YOLOv6CSPRepPAFPN_forward(self):
s = 64
in_channels = [8, 16, 32]
feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8]
out_channels = [8, 16, 32]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = YOLOv6CSPRepPAFPN(
in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels[i]
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
| 1,593
| 32.914894
| 71
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_necks/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
| 48
| 23.5
| 47
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_necks/test_yolov5_pafpn.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmyolo.models.necks import YOLOv5PAFPN
from mmyolo.utils import register_all_modules
register_all_modules()
class TestYOLOv5PAFPN(TestCase):
def test_forward(self):
s = 64
in_channels = [8, 16, 32]
feat_sizes = [s // 2**i for i in range(4)] # [32, 16, 8]
out_channels = [8, 16, 32]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = YOLOv5PAFPN(in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels[i]
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
| 873
| 29.137931
| 78
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_dense_heads/test_ppyoloe_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import ConfigDict, MessageHub
from mmengine.config import Config
from mmengine.model import bias_init_with_prob
from mmengine.testing import assert_allclose
from mmyolo.models import PPYOLOEHead
from mmyolo.utils import register_all_modules
register_all_modules()
class TestPPYOLOEHead(TestCase):
def setUp(self):
self.head_module = dict(
type='PPYOLOEHeadModule',
num_classes=4,
in_channels=[32, 64, 128],
featmap_strides=(8, 16, 32))
def test_init_weights(self):
head = PPYOLOEHead(head_module=self.head_module)
head.head_module.init_weights()
bias_init = bias_init_with_prob(0.01)
for conv_cls, conv_reg in zip(head.head_module.cls_preds,
head.head_module.reg_preds):
assert_allclose(conv_cls.weight.data,
torch.zeros_like(conv_cls.weight.data))
assert_allclose(conv_reg.weight.data,
torch.zeros_like(conv_reg.weight.data))
assert_allclose(conv_cls.bias.data,
torch.ones_like(conv_cls.bias.data) * bias_init)
assert_allclose(conv_reg.bias.data,
torch.ones_like(conv_reg.bias.data))
def test_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': (1.0, 1.0),
}]
test_cfg = Config(
dict(
multi_label=True,
nms_pre=1000,
score_thr=0.01,
nms=dict(type='nms', iou_threshold=0.7),
max_per_img=300))
head = PPYOLOEHead(head_module=self.head_module, test_cfg=test_cfg)
head.eval()
feat = [
torch.rand(1, in_channels, s // feat_size, s // feat_size)
for in_channels, feat_size in [[32, 8], [64, 16], [128, 32]]
]
cls_scores, bbox_preds = head.forward(feat)
head.predict_by_feat(
cls_scores,
bbox_preds,
None,
img_metas,
cfg=test_cfg,
rescale=True,
with_nms=True)
head.predict_by_feat(
cls_scores,
bbox_preds,
None,
img_metas,
cfg=test_cfg,
rescale=False,
with_nms=False)
def test_loss_by_feat(self):
message_hub = MessageHub.get_instance('test_ppyoloe_loss_by_feat')
message_hub.update_info('epoch', 1)
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'batch_input_shape': (s, s),
'scale_factor': 1,
}]
head = PPYOLOEHead(
head_module=self.head_module,
train_cfg=ConfigDict(
initial_epoch=31,
initial_assigner=dict(
type='BatchATSSAssigner',
num_classes=4,
topk=9,
iou_calculator=dict(type='mmdet.BboxOverlaps2D')),
assigner=dict(
type='BatchTaskAlignedAssigner',
num_classes=4,
topk=13,
alpha=1,
beta=6)))
head.train()
feat = []
for i in range(len(self.head_module['in_channels'])):
in_channel = self.head_module['in_channels'][i]
feat_size = self.head_module['featmap_strides'][i]
feat.append(
torch.rand(1, in_channel, s // feat_size, s // feat_size))
cls_scores, bbox_preds, bbox_dist_preds = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = torch.empty((0, 6), dtype=torch.float32)
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
bbox_dist_preds, gt_instances,
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_dfl_loss = empty_gt_losses['loss_dfl'].sum()
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_dfl_loss.item(), 0,
'there should be df loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
head = PPYOLOEHead(
head_module=self.head_module,
train_cfg=ConfigDict(
initial_epoch=31,
initial_assigner=dict(
type='BatchATSSAssigner',
num_classes=4,
topk=9,
iou_calculator=dict(type='mmdet.BboxOverlaps2D')),
assigner=dict(
type='BatchTaskAlignedAssigner',
num_classes=4,
topk=13,
alpha=1,
beta=6)))
head.train()
gt_instances = torch.Tensor(
[[0., 0., 23.6667, 23.8757, 238.6326, 151.8874]])
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
bbox_dist_preds, gt_instances,
img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_loss_dfl = one_gt_losses['loss_dfl'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_loss_dfl.item(), 0,
'obj loss should be non-zero')
# test num_class = 1
self.head_module['num_classes'] = 1
head = PPYOLOEHead(
head_module=self.head_module,
train_cfg=ConfigDict(
initial_epoch=31,
initial_assigner=dict(
type='BatchATSSAssigner',
num_classes=1,
topk=9,
iou_calculator=dict(type='mmdet.BboxOverlaps2D')),
assigner=dict(
type='BatchTaskAlignedAssigner',
num_classes=1,
topk=13,
alpha=1,
beta=6)))
head.train()
gt_instances = torch.Tensor(
[[0., 0., 23.6667, 23.8757, 238.6326, 151.8874]])
cls_scores, bbox_preds, bbox_dist_preds = head.forward(feat)
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
bbox_dist_preds, gt_instances,
img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_loss_dfl = one_gt_losses['loss_dfl'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_loss_dfl.item(), 0,
'obj loss should be non-zero')
| 7,851
| 37.116505
| 78
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_dense_heads/test_yolov7_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import Config
from mmengine.structures import InstanceData
from mmyolo.models.dense_heads import YOLOv7Head
from mmyolo.utils import register_all_modules
register_all_modules()
# TODO: Test YOLOv7p6HeadModule
class TestYOLOv7Head(TestCase):
def setUp(self):
self.head_module = dict(
type='YOLOv7HeadModule',
num_classes=2,
in_channels=[32, 64, 128],
featmap_strides=[8, 16, 32],
num_base_priors=3)
def test_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': (1.0, 1.0),
}]
test_cfg = Config(
dict(
multi_label=True,
max_per_img=300,
score_thr=0.01,
nms=dict(type='nms', iou_threshold=0.65)))
head = YOLOv7Head(head_module=self.head_module, test_cfg=test_cfg)
feat = []
for i in range(len(self.head_module['in_channels'])):
in_channel = self.head_module['in_channels'][i]
feat_size = self.head_module['featmap_strides'][i]
feat.append(
torch.rand(1, in_channel, s // feat_size, s // feat_size))
cls_scores, bbox_preds, objectnesses = head.forward(feat)
head.predict_by_feat(
cls_scores,
bbox_preds,
objectnesses,
img_metas,
cfg=test_cfg,
rescale=True,
with_nms=True)
head.predict_by_feat(
cls_scores,
bbox_preds,
objectnesses,
img_metas,
cfg=test_cfg,
rescale=False,
with_nms=False)
def test_loss_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'batch_input_shape': (s, s),
'scale_factor': 1,
}]
head = YOLOv7Head(head_module=self.head_module)
feat = []
for i in range(len(self.head_module['in_channels'])):
in_channel = self.head_module['in_channels'][i]
feat_size = self.head_module['featmap_strides'][i]
feat.append(
torch.rand(1, in_channel, s // feat_size, s // feat_size))
cls_scores, bbox_preds, objectnesses = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData(
bboxes=torch.empty((0, 4)), labels=torch.LongTensor([]))
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
objectnesses, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertGreater(empty_obj_loss.item(), 0,
'objectness loss should be non-zero')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
head = YOLOv7Head(head_module=self.head_module)
gt_instances = InstanceData(
bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
labels=torch.LongTensor([1]))
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_obj_loss = one_gt_losses['loss_obj'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_obj_loss.item(), 0,
'obj loss should be non-zero')
# test num_class = 1
self.head_module['num_classes'] = 1
head = YOLOv7Head(head_module=self.head_module)
gt_instances = InstanceData(
bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
labels=torch.LongTensor([0]))
cls_scores, bbox_preds, objectnesses = head.forward(feat)
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_obj_loss = one_gt_losses['loss_obj'].sum()
self.assertEqual(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_obj_loss.item(), 0,
'obj loss should be non-zero')
| 5,521
| 36.821918
| 79
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_dense_heads/test_yolov5_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import Config
from mmengine.structures import InstanceData
from mmyolo.models.dense_heads import YOLOv5Head
from mmyolo.utils import register_all_modules
register_all_modules()
class TestYOLOv5Head(TestCase):
def setUp(self):
self.head_module = dict(
type='YOLOv5HeadModule',
num_classes=2,
in_channels=[32, 64, 128],
featmap_strides=[8, 16, 32],
num_base_priors=3)
def test_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': (1.0, 1.0),
}]
test_cfg = Config(
dict(
multi_label=True,
max_per_img=300,
score_thr=0.01,
nms=dict(type='nms', iou_threshold=0.65)))
head = YOLOv5Head(head_module=self.head_module, test_cfg=test_cfg)
feat = []
for i in range(len(self.head_module['in_channels'])):
in_channel = self.head_module['in_channels'][i]
feat_size = self.head_module['featmap_strides'][i]
feat.append(
torch.rand(1, in_channel, s // feat_size, s // feat_size))
cls_scores, bbox_preds, objectnesses = head.forward(feat)
head.predict_by_feat(
cls_scores,
bbox_preds,
objectnesses,
img_metas,
cfg=test_cfg,
rescale=True,
with_nms=True)
head.predict_by_feat(
cls_scores,
bbox_preds,
objectnesses,
img_metas,
cfg=test_cfg,
rescale=False,
with_nms=False)
def test_loss_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'batch_input_shape': (s, s),
'scale_factor': 1,
}]
head = YOLOv5Head(head_module=self.head_module)
feat = []
for i in range(len(self.head_module['in_channels'])):
in_channel = self.head_module['in_channels'][i]
feat_size = self.head_module['featmap_strides'][i]
feat.append(
torch.rand(1, in_channel, s // feat_size, s // feat_size))
cls_scores, bbox_preds, objectnesses = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData(
bboxes=torch.empty((0, 4)), labels=torch.LongTensor([]))
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
objectnesses, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertGreater(empty_obj_loss.item(), 0,
'objectness loss should be non-zero')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
head = YOLOv5Head(head_module=self.head_module)
gt_instances = InstanceData(
bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
labels=torch.LongTensor([1]))
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_obj_loss = one_gt_losses['loss_obj'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_obj_loss.item(), 0,
'obj loss should be non-zero')
# test num_class = 1
self.head_module['num_classes'] = 1
head = YOLOv5Head(head_module=self.head_module)
gt_instances = InstanceData(
bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
labels=torch.LongTensor([0]))
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_obj_loss = one_gt_losses['loss_obj'].sum()
self.assertEqual(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_obj_loss.item(), 0,
'obj loss should be non-zero')
def test_loss_by_feat_with_ignore(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'batch_input_shape': (s, s),
'scale_factor': 1,
}]
head = YOLOv5Head(head_module=self.head_module, ignore_iof_thr=0.8)
feat = []
for i in range(len(self.head_module['in_channels'])):
in_channel = self.head_module['in_channels'][i]
feat_size = self.head_module['featmap_strides'][i]
feat.append(
torch.rand(1, in_channel, s // feat_size, s // feat_size))
cls_scores, bbox_preds, objectnesses = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData(
bboxes=torch.empty((0, 4)), labels=torch.LongTensor([]))
# ignore boxes
gt_instances_ignore = torch.tensor(
[[0, 0, 69.7688, 0, 619.3611, 62.2711]], dtype=torch.float32)
empty_gt_losses = head._loss_by_feat_with_ignore(
cls_scores, bbox_preds, objectnesses, [gt_instances], img_metas,
gt_instances_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertGreater(empty_obj_loss.item(), 0,
'objectness loss should be non-zero')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
head = YOLOv5Head(head_module=self.head_module, ignore_iof_thr=0.8)
gt_instances = InstanceData(
bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
labels=torch.LongTensor([1]))
gt_instances_ignore = torch.tensor(
[[0, 0, 69.7688, 0, 619.3611, 62.2711]], dtype=torch.float32)
one_gt_losses = head._loss_by_feat_with_ignore(cls_scores, bbox_preds,
objectnesses,
[gt_instances],
img_metas,
gt_instances_ignore)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_obj_loss = one_gt_losses['loss_obj'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_obj_loss.item(), 0,
'obj loss should be non-zero')
# test num_class = 1
self.head_module['num_classes'] = 1
head = YOLOv5Head(head_module=self.head_module, ignore_iof_thr=0.8)
gt_instances = InstanceData(
bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
labels=torch.LongTensor([0]))
gt_instances_ignore = torch.tensor(
[[0, 0, 69.7688, 0, 619.3611, 62.2711]], dtype=torch.float32)
one_gt_losses = head._loss_by_feat_with_ignore(cls_scores, bbox_preds,
objectnesses,
[gt_instances],
img_metas,
gt_instances_ignore)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_obj_loss = one_gt_losses['loss_obj'].sum()
self.assertEqual(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_obj_loss.item(), 0,
'obj loss should be non-zero')
| 9,871
| 40.654008
| 79
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_dense_heads/test_yolox_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import Config
from mmengine.model import bias_init_with_prob
from mmengine.testing import assert_allclose
from mmyolo.models.dense_heads import YOLOXHead
from mmyolo.utils import register_all_modules
register_all_modules()
class TestYOLOXHead(TestCase):
def setUp(self):
self.head_module = dict(
type='YOLOXHeadModule',
num_classes=4,
in_channels=1,
stacked_convs=1,
)
def test_init_weights(self):
head = YOLOXHead(head_module=self.head_module)
head.head_module.init_weights()
bias_init = bias_init_with_prob(0.01)
for conv_cls, conv_obj in zip(head.head_module.multi_level_conv_cls,
head.head_module.multi_level_conv_obj):
assert_allclose(conv_cls.bias.data,
torch.ones_like(conv_cls.bias.data) * bias_init)
assert_allclose(conv_obj.bias.data,
torch.ones_like(conv_obj.bias.data) * bias_init)
def test_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': (1.0, 1.0),
}]
test_cfg = Config(
dict(
multi_label=True,
max_per_img=300,
score_thr=0.01,
nms=dict(type='nms', iou_threshold=0.65)))
head = YOLOXHead(head_module=self.head_module, test_cfg=test_cfg)
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds, objectnesses = head.forward(feat)
head.predict_by_feat(
cls_scores,
bbox_preds,
objectnesses,
img_metas,
cfg=test_cfg,
rescale=True,
with_nms=True)
head.predict_by_feat(
cls_scores,
bbox_preds,
objectnesses,
img_metas,
cfg=test_cfg,
rescale=False,
with_nms=False)
def test_loss_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
train_cfg = Config(
dict(
assigner=dict(
type='mmdet.SimOTAAssigner',
iou_calculator=dict(type='mmdet.BboxOverlaps2D'),
center_radius=2.5,
candidate_topk=10,
iou_weight=3.0,
cls_weight=1.0)))
head = YOLOXHead(head_module=self.head_module, train_cfg=train_cfg)
assert not head.use_bbox_aux
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds, objectnesses = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = torch.empty((0, 6))
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
objectnesses, gt_instances,
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertGreater(empty_obj_loss.item(), 0,
'objectness loss should be non-zero')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
head = YOLOXHead(head_module=self.head_module, train_cfg=train_cfg)
head.use_bbox_aux = True
gt_instances = torch.Tensor(
[[0, 2, 23.6667, 23.8757, 238.6326, 151.8874]])
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses,
gt_instances, img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_obj_loss = one_gt_losses['loss_obj'].sum()
onegt_l1_loss = one_gt_losses['loss_bbox_aux'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_obj_loss.item(), 0,
'obj loss should be non-zero')
self.assertGreater(onegt_l1_loss.item(), 0,
'l1 loss should be non-zero')
# Test groud truth out of bound
gt_instances = torch.Tensor(
[[0, 2, s * 4, s * 4, s * 4 + 10, s * 4 + 10]])
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
objectnesses, gt_instances,
img_metas)
# When gt_bboxes out of bound, the assign results should be empty,
# so the cls and bbox loss should be zero.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when gt_bboxes out of bound')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when gt_bboxes out of bound')
self.assertGreater(empty_obj_loss.item(), 0,
'objectness loss should be non-zero')
| 6,200
| 37.75625
| 79
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_dense_heads/test_yolov6_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import Config
from mmyolo.models.dense_heads import YOLOv6Head
from mmyolo.utils import register_all_modules
register_all_modules()
class TestYOLOv6Head(TestCase):
def setUp(self):
self.head_module = dict(
type='YOLOv6HeadModule',
num_classes=2,
in_channels=[32, 64, 128],
featmap_strides=[8, 16, 32])
def test_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': (1.0, 1.0),
}]
test_cfg = Config(
dict(
multi_label=True,
max_per_img=300,
score_thr=0.01,
nms=dict(type='nms', iou_threshold=0.65)))
head = YOLOv6Head(head_module=self.head_module, test_cfg=test_cfg)
feat = []
for i in range(len(self.head_module['in_channels'])):
in_channel = self.head_module['in_channels'][i]
feat_size = self.head_module['featmap_strides'][i]
feat.append(
torch.rand(1, in_channel, s // feat_size, s // feat_size))
cls_scores, bbox_preds = head.forward(feat)
head.predict_by_feat(
cls_scores,
bbox_preds,
None,
img_metas,
cfg=test_cfg,
rescale=True,
with_nms=True)
head.predict_by_feat(
cls_scores,
bbox_preds,
None,
img_metas,
cfg=test_cfg,
rescale=False,
with_nms=False)
| 1,713
| 26.645161
| 74
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_dense_heads/test_rotated_rtmdet_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from mmengine.config import Config
from mmengine.structures import InstanceData
from mmyolo.models.dense_heads import RTMDetRotatedHead
from mmyolo.utils import register_all_modules
register_all_modules()
class TestRTMDetRotatedHead(TestCase):
def setUp(self):
self.head_module = dict(
type='RTMDetRotatedSepBNHeadModule',
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=64,
featmap_strides=[4, 8, 16])
def test_init_weights(self):
head = RTMDetRotatedHead(head_module=self.head_module)
head.head_module.init_weights()
def test_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': (1.0, 1.0),
}]
test_cfg = dict(
multi_label=True,
decode_with_angle=True,
nms_pre=2000,
score_thr=0.01,
nms=dict(type='nms_rotated', iou_threshold=0.1),
max_per_img=300)
test_cfg = Config(test_cfg)
head = RTMDetRotatedHead(
head_module=self.head_module, test_cfg=test_cfg)
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds, angle_preds = head.forward(feat)
head.predict_by_feat(
cls_scores,
bbox_preds,
angle_preds,
batch_img_metas=img_metas,
cfg=test_cfg,
rescale=True,
with_nms=True)
head.predict_by_feat(
cls_scores,
bbox_preds,
angle_preds,
batch_img_metas=img_metas,
cfg=test_cfg,
rescale=False,
with_nms=False)
def test_loss_by_feat(self):
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'batch_input_shape': (s, s),
'scale_factor': 1,
}]
train_cfg = dict(
assigner=dict(
type='BatchDynamicSoftLabelAssigner',
num_classes=80,
topk=13,
iou_calculator=dict(type='mmrotate.RBboxOverlaps2D'),
batch_iou=False),
allowed_border=-1,
pos_weight=-1,
debug=False)
train_cfg = Config(train_cfg)
head = RTMDetRotatedHead(
head_module=self.head_module, train_cfg=train_cfg).cuda()
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size).cuda()
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds, angle_preds = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData(
bboxes=torch.empty((0, 5)).cuda(),
labels=torch.LongTensor([]).cuda())
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
angle_preds, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
self.assertGreater(empty_cls_loss.item(), 0,
'classification loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
head = RTMDetRotatedHead(
head_module=self.head_module, train_cfg=train_cfg).cuda()
gt_instances = InstanceData(
bboxes=torch.Tensor([[130.6667, 86.8757, 100.6326, 70.8874,
0.2]]).cuda(),
labels=torch.LongTensor([1]).cuda())
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, angle_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
# test num_class = 1
self.head_module['num_classes'] = 1
head = RTMDetRotatedHead(
head_module=self.head_module, train_cfg=train_cfg).cuda()
gt_instances = InstanceData(
bboxes=torch.Tensor([[130.6667, 86.8757, 100.6326, 70.8874,
0.2]]).cuda(),
labels=torch.LongTensor([0]).cuda())
cls_scores, bbox_preds, angle_preds = head.forward(feat)
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, angle_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
def test_hbb_loss_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'batch_input_shape': (s, s),
'scale_factor': 1,
}]
train_cfg = dict(
assigner=dict(
type='BatchDynamicSoftLabelAssigner',
num_classes=80,
topk=13,
iou_calculator=dict(type='mmrotate.RBboxOverlaps2D'),
batch_iou=False),
allowed_border=-1,
pos_weight=-1,
debug=False)
train_cfg = Config(train_cfg)
hbb_cfg = dict(
bbox_coder=dict(
type='DistanceAnglePointCoder', angle_version='le90'),
loss_bbox=dict(type='mmdet.GIoULoss', loss_weight=2.0),
angle_coder=dict(
type='mmrotate.CSLCoder',
angle_version='le90',
omega=1,
window='gaussian',
radius=1),
loss_angle=dict(
type='mmrotate.SmoothFocalLoss',
gamma=2.0,
alpha=0.25,
loss_weight=0.2),
use_hbbox_loss=True,
)
head = RTMDetRotatedHead(
head_module=self.head_module, **hbb_cfg, train_cfg=train_cfg)
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds, angle_preds = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData(
bboxes=torch.empty((0, 5)), labels=torch.LongTensor([]))
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
angle_preds, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_angle_loss = empty_gt_losses['loss_angle'].sum()
self.assertGreater(empty_cls_loss.item(), 0,
'classification loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_angle_loss.item(), 0,
'there should be no angle loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
head = RTMDetRotatedHead(
head_module=self.head_module, **hbb_cfg, train_cfg=train_cfg)
gt_instances = InstanceData(
bboxes=torch.Tensor([[130.6667, 86.8757, 100.6326, 70.8874, 0.2]]),
labels=torch.LongTensor([1]))
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, angle_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_angle_loss = one_gt_losses['loss_angle'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_angle_loss.item(), 0,
'angle loss should be non-zero')
# test num_class = 1
self.head_module['num_classes'] = 1
head = RTMDetRotatedHead(
head_module=self.head_module, **hbb_cfg, train_cfg=train_cfg)
gt_instances = InstanceData(
bboxes=torch.Tensor([[130.6667, 86.8757, 100.6326, 70.8874, 0.2]]),
labels=torch.LongTensor([0]))
cls_scores, bbox_preds, angle_preds = head.forward(feat)
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, angle_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_angle_loss = one_gt_losses['loss_angle'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_angle_loss.item(), 0,
'angle loss should be non-zero')
| 10,370
| 38.135849
| 79
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_dense_heads/test_yolov8_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import ConfigDict
from mmengine.config import Config
from mmyolo.models import YOLOv8Head
from mmyolo.utils import register_all_modules
register_all_modules()
class TestYOLOv8Head(TestCase):
def setUp(self):
self.head_module = dict(
type='YOLOv8HeadModule',
num_classes=4,
in_channels=[32, 64, 128],
featmap_strides=[8, 16, 32])
def test_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': (1.0, 1.0),
}]
test_cfg = Config(
dict(
multi_label=True,
max_per_img=300,
score_thr=0.01,
nms=dict(type='nms', iou_threshold=0.65)))
head = YOLOv8Head(head_module=self.head_module, test_cfg=test_cfg)
head.eval()
feat = []
for i in range(len(self.head_module['in_channels'])):
in_channel = self.head_module['in_channels'][i]
feat_size = self.head_module['featmap_strides'][i]
feat.append(
torch.rand(1, in_channel, s // feat_size, s // feat_size))
cls_scores, bbox_preds = head.forward(feat)
head.predict_by_feat(
cls_scores,
bbox_preds,
None,
img_metas,
cfg=test_cfg,
rescale=True,
with_nms=True)
head.predict_by_feat(
cls_scores,
bbox_preds,
None,
img_metas,
cfg=test_cfg,
rescale=False,
with_nms=False)
def test_loss_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'batch_input_shape': (s, s),
'scale_factor': 1,
}]
head = YOLOv8Head(
head_module=self.head_module,
train_cfg=ConfigDict(
assigner=dict(
type='BatchTaskAlignedAssigner',
num_classes=4,
topk=10,
alpha=0.5,
beta=6)))
head.train()
feat = []
for i in range(len(self.head_module['in_channels'])):
in_channel = self.head_module['in_channels'][i]
feat_size = self.head_module['featmap_strides'][i]
feat.append(
torch.rand(1, in_channel, s // feat_size, s // feat_size))
cls_scores, bbox_preds, bbox_dist_preds = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = torch.empty((0, 6), dtype=torch.float32)
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
bbox_dist_preds, gt_instances,
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_dfl_loss = empty_gt_losses['loss_dfl'].sum()
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_dfl_loss.item(), 0,
'there should be df loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = torch.Tensor(
[[0., 0., 23.6667, 23.8757, 238.6326, 151.8874]])
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
bbox_dist_preds, gt_instances,
img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_loss_dfl = one_gt_losses['loss_dfl'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_loss_dfl.item(), 0,
'obj loss should be non-zero')
# test num_class = 1
self.head_module['num_classes'] = 1
head = YOLOv8Head(
head_module=self.head_module,
train_cfg=ConfigDict(
assigner=dict(
type='BatchTaskAlignedAssigner',
num_classes=1,
topk=10,
alpha=0.5,
beta=6)))
head.train()
gt_instances = torch.Tensor(
[[0., 0., 23.6667, 23.8757, 238.6326, 151.8874],
[1., 0., 24.6667, 27.8757, 28.6326, 51.8874]])
cls_scores, bbox_preds, bbox_dist_preds = head.forward(feat)
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
bbox_dist_preds, gt_instances,
img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_loss_dfl = one_gt_losses['loss_dfl'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_loss_dfl.item(), 0,
'obj loss should be non-zero')
| 5,914
| 35.512346
| 78
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_dense_heads/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
| 48
| 23.5
| 47
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_dense_heads/test_rtmdet_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
import torch
from mmengine.config import Config
from mmengine.structures import InstanceData
from mmyolo.models import RTMDetInsSepBNHead
from mmyolo.models.dense_heads import RTMDetHead
from mmyolo.utils import register_all_modules
register_all_modules()
class TestRTMDetHead(TestCase):
def setUp(self):
self.head_module = dict(
type='RTMDetSepBNHeadModule',
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=64,
featmap_strides=[4, 8, 16])
def test_init_weights(self):
head = RTMDetHead(head_module=self.head_module)
head.head_module.init_weights()
def test_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': (1.0, 1.0),
}]
test_cfg = dict(
multi_label=True,
nms_pre=30000,
score_thr=0.001,
nms=dict(type='nms', iou_threshold=0.65),
max_per_img=300)
test_cfg = Config(test_cfg)
head = RTMDetHead(head_module=self.head_module, test_cfg=test_cfg)
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds = head.forward(feat)
head.predict_by_feat(
cls_scores,
bbox_preds,
batch_img_metas=img_metas,
cfg=test_cfg,
rescale=True,
with_nms=True)
head.predict_by_feat(
cls_scores,
bbox_preds,
batch_img_metas=img_metas,
cfg=test_cfg,
rescale=False,
with_nms=False)
def test_loss_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'batch_input_shape': (s, s),
'scale_factor': 1,
}]
train_cfg = dict(
assigner=dict(
num_classes=80,
type='BatchDynamicSoftLabelAssigner',
topk=13,
iou_calculator=dict(type='mmdet.BboxOverlaps2D')),
allowed_border=-1,
pos_weight=-1,
debug=False)
train_cfg = Config(train_cfg)
head = RTMDetHead(head_module=self.head_module, train_cfg=train_cfg)
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData(
bboxes=torch.empty((0, 4)), labels=torch.LongTensor([]))
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
self.assertGreater(empty_cls_loss.item(), 0,
'classification loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
head = RTMDetHead(head_module=self.head_module, train_cfg=train_cfg)
gt_instances = InstanceData(
bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
labels=torch.LongTensor([1]))
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
# test num_class = 1
self.head_module['num_classes'] = 1
head = RTMDetHead(head_module=self.head_module, train_cfg=train_cfg)
gt_instances = InstanceData(
bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
labels=torch.LongTensor([0]))
cls_scores, bbox_preds = head.forward(feat)
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
class TestRTMDetInsHead(TestCase):
def setUp(self):
self.head_module = dict(
type='RTMDetInsSepBNHeadModule',
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=64,
featmap_strides=[4, 8, 16],
num_prototypes=8,
dyconv_channels=8,
num_dyconvs=3,
share_conv=True,
use_sigmoid_cls=True)
def test_init_weights(self):
head = RTMDetInsSepBNHead(head_module=self.head_module)
head.head_module.init_weights()
def test_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': (1.0, 1.0),
'pad_param': np.array([0., 0., 0., 0.])
}]
test_cfg = dict(
multi_label=False,
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100,
mask_thr_binary=0.5)
test_cfg = Config(test_cfg)
head = RTMDetInsSepBNHead(
head_module=self.head_module, test_cfg=test_cfg)
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds, kernel_preds, mask_feat = head.forward(feat)
head.predict_by_feat(
cls_scores,
bbox_preds,
kernel_preds,
mask_feat,
batch_img_metas=img_metas,
cfg=test_cfg,
rescale=True,
with_nms=True)
img_metas_without_pad_param = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': (1.0, 1.0)
}]
head.predict_by_feat(
cls_scores,
bbox_preds,
kernel_preds,
mask_feat,
batch_img_metas=img_metas_without_pad_param,
cfg=test_cfg,
rescale=True,
with_nms=True)
with self.assertRaises(AssertionError):
head.predict_by_feat(
cls_scores,
bbox_preds,
kernel_preds,
mask_feat,
batch_img_metas=img_metas,
cfg=test_cfg,
rescale=False,
with_nms=False)
| 7,546
| 32.691964
| 78
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_detectors/test_yolo_detector.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
import unittest
from unittest import TestCase
import torch
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs
from mmengine.logging import MessageHub
from parameterized import parameterized
from mmyolo.testing import get_detector_cfg
from mmyolo.utils import register_all_modules
class TestSingleStageDetector(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand([
'yolov5/yolov5_n-v61_syncbn_fast_8xb16-300e_coco.py',
'yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco.py',
'yolox/yolox_tiny_fast_8xb8-300e_coco.py',
'rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py',
'yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco.py',
'yolov8/yolov8_n_syncbn_fast_8xb16-500e_coco.py'
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmyolo.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
@parameterized.expand([
('yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py', ('cuda', 'cpu')),
('yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco.py', ('cuda', 'cpu')),
('rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py', ('cuda', 'cpu')),
('yolov8/yolov8_n_syncbn_fast_8xb16-500e_coco.py', ('cuda', 'cpu'))
])
def test_forward_loss_mode(self, cfg_file, devices):
message_hub = MessageHub.get_instance(
f'test_single_stage_forward_loss_mode-{time.time()}')
message_hub.update_info('iter', 0)
message_hub.update_info('epoch', 0)
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
if 'fast' in cfg_file:
model.data_preprocessor = dict(
type='mmdet.DetDataPreprocessor',
mean=[0., 0., 0.],
std=[255., 255., 255.],
bgr_to_rgb=True)
from mmyolo.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
detector.init_weights()
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 320, 128], [3, 125, 320]])
data = detector.data_preprocessor(packed_inputs, True)
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([
('yolov5/yolov5_n-v61_syncbn_fast_8xb16-300e_coco.py', ('cuda',
'cpu')),
('yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco.py', ('cuda', 'cpu')),
('yolox/yolox_tiny_fast_8xb8-300e_coco.py', ('cuda', 'cpu')),
('yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco.py', ('cuda', 'cpu')),
('rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py', ('cuda', 'cpu')),
('yolov8/yolov8_n_syncbn_fast_8xb16-500e_coco.py', ('cuda', 'cpu'))
])
def test_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmyolo.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 320, 128], [3, 125, 320]])
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([
('yolov5/yolov5_n-v61_syncbn_fast_8xb16-300e_coco.py', ('cuda',
'cpu')),
('yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco.py', ('cuda', 'cpu')),
('yolox/yolox_tiny_fast_8xb8-300e_coco.py', ('cuda', 'cpu')),
('yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco.py', ('cuda', 'cpu')),
('rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py', ('cuda', 'cpu')),
('yolov8/yolov8_n_syncbn_fast_8xb16-500e_coco.py', ('cuda', 'cpu'))
])
def test_forward_tensor_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmyolo.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 320, 128], [3, 125, 320]])
data = detector.data_preprocessor(packed_inputs, False)
batch_results = detector.forward(**data, mode='tensor')
self.assertIsInstance(batch_results, tuple)
| 5,695
| 40.275362
| 79
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_task_modules/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
| 48
| 23.5
| 47
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_task_modules/test_coders/test_distance_point_bbox_coder.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmyolo.models.task_modules.coders import DistancePointBBoxCoder
class TestDistancePointBBoxCoder(TestCase):
def test_decoder(self):
coder = DistancePointBBoxCoder()
points = torch.Tensor([[74., 61.], [-29., 106.], [138., 61.],
[29., 170.]])
pred_bboxes = torch.Tensor([[0, -1, 3, 3], [-1, -7, -4.8, 9],
[-23, -1, 12, 1], [14.5, -13, 10, 18.3]])
expected_distance = torch.Tensor([[74, 63, 80, 67],
[-25, 134, -48.2, 142],
[276, 67, 210, 67],
[-58, 248, 89, 279.8]])
strides = torch.Tensor([2, 4, 6, 6])
out_distance = coder.decode(points, pred_bboxes, strides)
assert expected_distance.allclose(out_distance)
batch_priors = points.unsqueeze(0).repeat(2, 1, 1)
batch_pred_bboxes = pred_bboxes.unsqueeze(0).repeat(2, 1, 1)
batch_out = coder.decode(batch_priors, batch_pred_bboxes, strides)[0]
assert out_distance.allclose(batch_out)
| 1,218
| 39.633333
| 77
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_task_modules/test_coders/test_yolov5_bbox_coder.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmyolo.models.task_modules.coders import YOLOv5BBoxCoder
class TestYOLOv5Coder(TestCase):
def test_decoder(self):
coder = YOLOv5BBoxCoder()
priors = torch.Tensor([[10., 10., 20., 20.], [10., 8., 10., 10.],
[15., 8., 20., 3.], [2., 5., 5., 8.]])
pred_bboxes = torch.Tensor([[0.0000, 0.0000, 1.0000, 1.0000],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[1.0000, 5.0000, 9.0000, 5.0000]])
strides = torch.Tensor([2, 4, 8, 8])
expected_decode_bboxes = torch.Tensor(
[[4.3111, 4.3111, 25.6889, 25.6889],
[10.2813, 5.7033, 10.2813, 12.8594],
[7.7949, 11.1710, 27.2051, 2.3369],
[1.1984, 8.4730, 13.1955, 20.3129]])
out = coder.decode(priors, pred_bboxes, strides)
assert expected_decode_bboxes.allclose(out, atol=1e-04)
batch_priors = priors.unsqueeze(0).repeat(2, 1, 1)
batch_pred_bboxes = pred_bboxes.unsqueeze(0).repeat(2, 1, 1)
batch_out = coder.decode(batch_priors, batch_pred_bboxes, strides)[0]
assert out.allclose(batch_out)
| 1,336
| 39.515152
| 77
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_task_modules/test_coders/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
| 48
| 23.5
| 47
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_task_modules/test_coders/test_yolox_bbox_coder.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmyolo.models.task_modules.coders import YOLOXBBoxCoder
class TestYOLOv5Coder(TestCase):
def test_decoder(self):
coder = YOLOXBBoxCoder()
priors = torch.Tensor([[10., 10.], [8., 8.], [15., 8.], [2., 5.]])
pred_bboxes = torch.Tensor([[0.0000, 0.0000, 1.0000, 1.0000],
[0.0409, 0.1409, 0.8591, 0.8591],
[0.0000, 0.3161, 0.1945, 0.6839],
[1.0000, 5.0000, 0.2000, 0.6000]])
strides = torch.Tensor([2, 4, 6, 6])
expected_decode_bboxes = torch.Tensor(
[[7.2817, 7.2817, 12.7183, 12.7183],
[3.4415, 3.8415, 12.8857, 13.2857],
[11.3559, 3.9518, 18.6441, 15.8414],
[4.3358, 29.5336, 11.6642, 40.4664]])
out = coder.decode(priors, pred_bboxes, strides)
assert expected_decode_bboxes.allclose(out, atol=1e-04)
batch_priors = priors.unsqueeze(0).repeat(2, 1, 1)
batch_pred_bboxes = pred_bboxes.unsqueeze(0).repeat(2, 1, 1)
batch_out = coder.decode(batch_priors, batch_pred_bboxes, strides)[0]
assert out.allclose(batch_out)
| 1,266
| 38.59375
| 77
|
py
|
mmyolo
|
mmyolo-main/tests/test_models/test_task_modules/test_assigners/test_batch_dsl_assigner.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from mmyolo.models.task_modules.assigners import BatchDynamicSoftLabelAssigner
class TestBatchDynamicSoftLabelAssigner(TestCase):
def test_assign(self):
num_classes = 2
batch_size = 2
assigner = BatchDynamicSoftLabelAssigner(
num_classes=num_classes,
soft_center_radius=3.0,
topk=1,
iou_weight=3.0)
pred_bboxes = torch.FloatTensor([
[23, 23, 43, 43],
[4, 5, 6, 7],
]).unsqueeze(0).repeat(batch_size, 10, 1)
pred_scores = torch.FloatTensor([
[0.2],
[0.8],
]).unsqueeze(0).repeat(batch_size, 10, 1)
priors = torch.FloatTensor([[30, 30, 8, 8], [4, 5, 6,
7]]).repeat(10, 1)
gt_bboxes = torch.FloatTensor([[23, 23, 43, 43]]).unsqueeze(0).repeat(
batch_size, 1, 1)
gt_labels = torch.LongTensor([[0]
]).unsqueeze(0).repeat(batch_size, 1, 1)
pad_bbox_flag = torch.FloatTensor([[1]]).unsqueeze(0).repeat(
batch_size, 1, 1)
assign_result = assigner.forward(pred_bboxes, pred_scores, priors,
gt_labels, gt_bboxes, pad_bbox_flag)
assigned_labels = assign_result['assigned_labels']
assigned_labels_weights = assign_result['assigned_labels_weights']
assigned_bboxes = assign_result['assigned_bboxes']
assign_metrics = assign_result['assign_metrics']
self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 20]))
self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 20,
4]))
self.assertEqual(assigned_labels_weights.shape,
torch.Size([batch_size, 20]))
self.assertEqual(assign_metrics.shape, torch.Size([batch_size, 20]))
def test_assign_with_empty_gt(self):
num_classes = 2
batch_size = 2
assigner = BatchDynamicSoftLabelAssigner(
num_classes=num_classes,
soft_center_radius=3.0,
topk=1,
iou_weight=3.0)
pred_bboxes = torch.FloatTensor([
[23, 23, 43, 43],
[4, 5, 6, 7],
]).unsqueeze(0).repeat(batch_size, 10, 1)
pred_scores = torch.FloatTensor([
[0.2],
[0.8],
]).unsqueeze(0).repeat(batch_size, 10, 1)
priors = torch.FloatTensor([[30, 30, 8, 8], [4, 5, 6,
7]]).repeat(10, 1)
gt_bboxes = torch.zeros(batch_size, 0, 4)
gt_labels = torch.zeros(batch_size, 0, 1)
pad_bbox_flag = torch.zeros(batch_size, 0, 1)
assign_result = assigner.forward(pred_bboxes, pred_scores, priors,
gt_labels, gt_bboxes, pad_bbox_flag)
assigned_labels = assign_result['assigned_labels']
assigned_labels_weights = assign_result['assigned_labels_weights']
assigned_bboxes = assign_result['assigned_bboxes']
assign_metrics = assign_result['assign_metrics']
self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 20]))
self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 20,
4]))
self.assertEqual(assigned_labels_weights.shape,
torch.Size([batch_size, 20]))
self.assertEqual(assign_metrics.shape, torch.Size([batch_size, 20]))
def test_assign_with_empty_boxs(self):
num_classes = 2
batch_size = 2
assigner = BatchDynamicSoftLabelAssigner(
num_classes=num_classes,
soft_center_radius=3.0,
topk=1,
iou_weight=3.0)
pred_bboxes = torch.zeros(batch_size, 0, 4)
pred_scores = torch.zeros(batch_size, 0, 4)
priors = torch.zeros(0, 4)
gt_bboxes = torch.FloatTensor([[23, 23, 43, 43]]).unsqueeze(0).repeat(
batch_size, 1, 1)
gt_labels = torch.LongTensor([[0]
]).unsqueeze(0).repeat(batch_size, 1, 1)
pad_bbox_flag = torch.FloatTensor([[1]]).unsqueeze(0).repeat(
batch_size, 1, 1)
assign_result = assigner.forward(pred_bboxes, pred_scores, priors,
gt_labels, gt_bboxes, pad_bbox_flag)
assigned_labels = assign_result['assigned_labels']
assigned_labels_weights = assign_result['assigned_labels_weights']
assigned_bboxes = assign_result['assigned_bboxes']
assign_metrics = assign_result['assign_metrics']
self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 0]))
self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 0, 4]))
self.assertEqual(assigned_labels_weights.shape,
torch.Size([batch_size, 0]))
self.assertEqual(assign_metrics.shape, torch.Size([batch_size, 0]))
def test_assign_rotate_box(self):
try:
import importlib
importlib.import_module('mmrotate')
except ImportError:
pytest.skip('mmrotate is not installed.', allow_module_level=True)
num_classes = 2
batch_size = 2
assigner = BatchDynamicSoftLabelAssigner(
num_classes=num_classes,
soft_center_radius=3.0,
topk=1,
iou_weight=3.0,
iou_calculator=dict(type='mmrotate.RBboxOverlaps2D'),
# RBboxOverlaps2D doesn't support batch input, use loop instead.
batch_iou=False,
)
pred_bboxes = torch.FloatTensor([
[23, 23, 20, 20, 0.078],
[4, 5, 2, 2, 0.078],
]).unsqueeze(0).repeat(batch_size, 10, 1)
pred_scores = torch.FloatTensor([
[0.2],
[0.8],
]).unsqueeze(0).repeat(batch_size, 10, 1)
priors = torch.FloatTensor([[30, 30, 8, 8], [4, 5, 6,
7]]).repeat(10, 1)
gt_bboxes = torch.FloatTensor([[23, 23, 20, 20,
0.078]]).unsqueeze(0).repeat(
batch_size, 1, 1)
gt_labels = torch.LongTensor([[0]
]).unsqueeze(0).repeat(batch_size, 1, 1)
pad_bbox_flag = torch.FloatTensor([[1]]).unsqueeze(0).repeat(
batch_size, 1, 1)
assign_result = assigner.forward(pred_bboxes, pred_scores, priors,
gt_labels, gt_bboxes, pad_bbox_flag)
assigned_labels = assign_result['assigned_labels']
assigned_labels_weights = assign_result['assigned_labels_weights']
assigned_bboxes = assign_result['assigned_bboxes']
assign_metrics = assign_result['assign_metrics']
self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 20]))
self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 20,
5]))
self.assertEqual(assigned_labels_weights.shape,
torch.Size([batch_size, 20]))
self.assertEqual(assign_metrics.shape, torch.Size([batch_size, 20]))
| 7,466
| 37.689119
| 79
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.