repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
ERD | ERD-main/.dev_scripts/benchmark_filter.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
def parse_args():
parser = argparse.ArgumentParser(description='Filter configs to train')
parser.add_argument(
'--basic-arch',
action='store_true',
help='to train models in basic arch')
parser.add_argument(
'--datasets', action='store_true', help='to train models in dataset')
parser.add_argument(
'--data-pipeline',
action='store_true',
help='to train models related to data pipeline, e.g. augmentations')
parser.add_argument(
'--nn-module',
action='store_true',
help='to train models related to neural network modules')
parser.add_argument(
'--model-options',
nargs='+',
help='custom options to special model benchmark')
parser.add_argument(
'--out',
type=str,
default='batch_train_list.txt',
help='output path of gathered metrics to be stored')
args = parser.parse_args()
return args
basic_arch_root = [
'atss', 'autoassign', 'cascade_rcnn', 'cascade_rpn', 'centripetalnet',
'cornernet', 'detectors', 'deformable_detr', 'detr', 'double_heads',
'dynamic_rcnn', 'faster_rcnn', 'fcos', 'foveabox', 'fp16', 'free_anchor',
'fsaf', 'gfl', 'ghm', 'grid_rcnn', 'guided_anchoring', 'htc', 'ld',
'libra_rcnn', 'mask_rcnn', 'ms_rcnn', 'nas_fcos', 'paa', 'pisa',
'point_rend', 'reppoints', 'retinanet', 'rpn', 'sabl', 'ssd', 'tridentnet',
'vfnet', 'yolact', 'yolo', 'sparse_rcnn', 'scnet', 'yolof', 'centernet'
]
datasets_root = [
'wider_face', 'pascal_voc', 'cityscapes', 'lvis', 'deepfashion'
]
data_pipeline_root = ['albu_example', 'instaboost']
nn_module_root = [
'carafe', 'dcn', 'empirical_attention', 'gcnet', 'gn', 'gn+ws', 'hrnet',
'pafpn', 'nas_fpn', 'regnet', 'resnest', 'res2net', 'groie'
]
benchmark_pool = [
'configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py',
'configs/atss/atss_r50_fpn_1x_coco.py',
'configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py',
'configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py',
'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
'configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/centernet/centernet_resnet18_dcnv2_140e_coco.py',
'configs/centripetalnet/'
'centripetalnet_hourglass104_mstest_16x6_210e_coco.py',
'configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py',
'configs/cornernet/'
'cornernet_hourglass104_mstest_8x6_210e_coco.py',
'configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py',
'configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py',
'configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py',
'configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py',
'configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py',
'configs/detectors/detectors_htc_r50_1x_coco.py',
'configs/detr/detr_r50_8x2_150e_coco.py',
'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py',
'configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py',
'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py', # noqa
'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py',
'configs/fcos/fcos_center_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
'configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py',
'configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py',
'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
'configs/fsaf/fsaf_r50_fpn_1x_coco.py',
'configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py',
'configs/gfl/gfl_r50_fpn_1x_coco.py',
'configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py',
'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py',
'configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py',
'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
'configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py',
'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py',
'configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py',
'configs/htc/htc_r50_fpn_1x_coco.py',
'configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py',
'configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py',
'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py',
'configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py',
'configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py',
'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py',
'configs/paa/paa_r50_fpn_1x_coco.py',
'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py',
'configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py',
'configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py',
'configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py',
'configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py',
'configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py',
'configs/resnest/'
'mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py',
'configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py',
'configs/rpn/rpn_r50_fpn_1x_coco.py',
'configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py',
'configs/ssd/ssd300_coco.py',
'configs/tridentnet/tridentnet_r50_caffe_1x_coco.py',
'configs/vfnet/vfnet_r50_fpn_1x_coco.py',
'configs/yolact/yolact_r50_1x8_coco.py',
'configs/yolo/yolov3_d53_320_273e_coco.py',
'configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py',
'configs/scnet/scnet_r50_fpn_1x_coco.py',
'configs/yolof/yolof_r50_c5_8x8_1x_coco.py',
]
def main():
args = parse_args()
benchmark_type = []
if args.basic_arch:
benchmark_type += basic_arch_root
if args.datasets:
benchmark_type += datasets_root
if args.data_pipeline:
benchmark_type += data_pipeline_root
if args.nn_module:
benchmark_type += nn_module_root
special_model = args.model_options
if special_model is not None:
benchmark_type += special_model
config_dpath = 'configs/'
benchmark_configs = []
for cfg_root in benchmark_type:
cfg_dir = osp.join(config_dpath, cfg_root)
configs = os.scandir(cfg_dir)
for cfg in configs:
config_path = osp.join(cfg_dir, cfg.name)
if (config_path in benchmark_pool
and config_path not in benchmark_configs):
benchmark_configs.append(config_path)
print(f'Totally found {len(benchmark_configs)} configs to benchmark')
with open(args.out, 'w') as f:
for config in benchmark_configs:
f.write(config + '\n')
if __name__ == '__main__':
main()
| 7,106 | 41.303571 | 92 | py |
ERD | ERD-main/.dev_scripts/gather_models.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import json
import os.path as osp
import shutil
import subprocess
from collections import OrderedDict
import torch
import yaml
from mmengine.config import Config
from mmengine.fileio import dump
from mmengine.utils import mkdir_or_exist, scandir
def ordered_yaml_dump(data, stream=None, Dumper=yaml.SafeDumper, **kwds):
class OrderedDumper(Dumper):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items())
OrderedDumper.add_representer(OrderedDict, _dict_representer)
return yaml.dump(data, stream, OrderedDumper, **kwds)
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# remove ema state_dict
for key in list(checkpoint['state_dict']):
if key.startswith('ema_'):
checkpoint['state_dict'].pop(key)
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8])
subprocess.Popen(['mv', out_file, final_file])
return final_file
def is_by_epoch(config):
cfg = Config.fromfile('./configs/' + config)
return cfg.runner.type == 'EpochBasedRunner'
def get_final_epoch_or_iter(config):
cfg = Config.fromfile('./configs/' + config)
if cfg.runner.type == 'EpochBasedRunner':
return cfg.runner.max_epochs
else:
return cfg.runner.max_iters
def get_best_epoch_or_iter(exp_dir):
best_epoch_iter_full_path = list(
sorted(glob.glob(osp.join(exp_dir, 'best_*.pth'))))[-1]
best_epoch_or_iter_model_path = best_epoch_iter_full_path.split('/')[-1]
best_epoch_or_iter = best_epoch_or_iter_model_path.\
split('_')[-1].split('.')[0]
return best_epoch_or_iter_model_path, int(best_epoch_or_iter)
def get_real_epoch_or_iter(config):
cfg = Config.fromfile('./configs/' + config)
if cfg.runner.type == 'EpochBasedRunner':
epoch = cfg.runner.max_epochs
if cfg.data.train.type == 'RepeatDataset':
epoch *= cfg.data.train.times
return epoch
else:
return cfg.runner.max_iters
def get_final_results(log_json_path,
epoch_or_iter,
results_lut,
by_epoch=True):
result_dict = dict()
last_val_line = None
last_train_line = None
last_val_line_idx = -1
last_train_line_idx = -1
with open(log_json_path, 'r') as f:
for i, line in enumerate(f.readlines()):
log_line = json.loads(line)
if 'mode' not in log_line.keys():
continue
if by_epoch:
if (log_line['mode'] == 'train'
and log_line['epoch'] == epoch_or_iter):
result_dict['memory'] = log_line['memory']
if (log_line['mode'] == 'val'
and log_line['epoch'] == epoch_or_iter):
result_dict.update({
key: log_line[key]
for key in results_lut if key in log_line
})
return result_dict
else:
if log_line['mode'] == 'train':
last_train_line_idx = i
last_train_line = log_line
if log_line and log_line['mode'] == 'val':
last_val_line_idx = i
last_val_line = log_line
# bug: max_iters = 768, last_train_line['iter'] = 750
assert last_val_line_idx == last_train_line_idx + 1, \
'Log file is incomplete'
result_dict['memory'] = last_train_line['memory']
result_dict.update({
key: last_val_line[key]
for key in results_lut if key in last_val_line
})
return result_dict
def get_dataset_name(config):
# If there are more dataset, add here.
name_map = dict(
CityscapesDataset='Cityscapes',
CocoDataset='COCO',
CocoPanopticDataset='COCO',
DeepFashionDataset='Deep Fashion',
LVISV05Dataset='LVIS v0.5',
LVISV1Dataset='LVIS v1',
VOCDataset='Pascal VOC',
WIDERFaceDataset='WIDER Face',
OpenImagesDataset='OpenImagesDataset',
OpenImagesChallengeDataset='OpenImagesChallengeDataset',
Objects365V1Dataset='Objects365 v1',
Objects365V2Dataset='Objects365 v2')
cfg = Config.fromfile('./configs/' + config)
return name_map[cfg.dataset_type]
def convert_model_info_to_pwc(model_infos):
pwc_files = {}
for model in model_infos:
cfg_folder_name = osp.split(model['config'])[-2]
pwc_model_info = OrderedDict()
pwc_model_info['Name'] = osp.split(model['config'])[-1].split('.')[0]
pwc_model_info['In Collection'] = 'Please fill in Collection name'
pwc_model_info['Config'] = osp.join('configs', model['config'])
# get metadata
memory = round(model['results']['memory'] / 1024, 1)
meta_data = OrderedDict()
meta_data['Training Memory (GB)'] = memory
if 'epochs' in model:
meta_data['Epochs'] = get_real_epoch_or_iter(model['config'])
else:
meta_data['Iterations'] = get_real_epoch_or_iter(model['config'])
pwc_model_info['Metadata'] = meta_data
# get dataset name
dataset_name = get_dataset_name(model['config'])
# get results
results = []
# if there are more metrics, add here.
if 'bbox_mAP' in model['results']:
metric = round(model['results']['bbox_mAP'] * 100, 1)
results.append(
OrderedDict(
Task='Object Detection',
Dataset=dataset_name,
Metrics={'box AP': metric}))
if 'segm_mAP' in model['results']:
metric = round(model['results']['segm_mAP'] * 100, 1)
results.append(
OrderedDict(
Task='Instance Segmentation',
Dataset=dataset_name,
Metrics={'mask AP': metric}))
if 'PQ' in model['results']:
metric = round(model['results']['PQ'], 1)
results.append(
OrderedDict(
Task='Panoptic Segmentation',
Dataset=dataset_name,
Metrics={'PQ': metric}))
pwc_model_info['Results'] = results
link_string = 'https://download.openmmlab.com/mmdetection/v2.0/'
link_string += '{}/{}'.format(model['config'].rstrip('.py'),
osp.split(model['model_path'])[-1])
pwc_model_info['Weights'] = link_string
if cfg_folder_name in pwc_files:
pwc_files[cfg_folder_name].append(pwc_model_info)
else:
pwc_files[cfg_folder_name] = [pwc_model_info]
return pwc_files
def parse_args():
parser = argparse.ArgumentParser(description='Gather benchmarked models')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'out', type=str, help='output path of gathered models to be stored')
parser.add_argument(
'--best',
action='store_true',
help='whether to gather the best model.')
args = parser.parse_args()
return args
def main():
args = parse_args()
models_root = args.root
models_out = args.out
mkdir_or_exist(models_out)
# find all models in the root directory to be gathered
raw_configs = list(scandir('./configs', '.py', recursive=True))
# filter configs that is not trained in the experiments dir
used_configs = []
for raw_config in raw_configs:
if osp.exists(osp.join(models_root, raw_config)):
used_configs.append(raw_config)
print(f'Find {len(used_configs)} models to be gathered')
# find final_ckpt and log file for trained each config
# and parse the best performance
model_infos = []
for used_config in used_configs:
exp_dir = osp.join(models_root, used_config)
by_epoch = is_by_epoch(used_config)
# check whether the exps is finished
if args.best is True:
final_model, final_epoch_or_iter = get_best_epoch_or_iter(exp_dir)
else:
final_epoch_or_iter = get_final_epoch_or_iter(used_config)
final_model = '{}_{}.pth'.format('epoch' if by_epoch else 'iter',
final_epoch_or_iter)
model_path = osp.join(exp_dir, final_model)
# skip if the model is still training
if not osp.exists(model_path):
continue
# get the latest logs
log_json_path = list(
sorted(glob.glob(osp.join(exp_dir, '*.log.json'))))[-1]
log_txt_path = list(sorted(glob.glob(osp.join(exp_dir, '*.log'))))[-1]
cfg = Config.fromfile('./configs/' + used_config)
results_lut = cfg.evaluation.metric
if not isinstance(results_lut, list):
results_lut = [results_lut]
# case when using VOC, the evaluation key is only 'mAP'
# when using Panoptic Dataset, the evaluation key is 'PQ'.
for i, key in enumerate(results_lut):
if 'mAP' not in key and 'PQ' not in key:
results_lut[i] = key + '_mAP'
model_performance = get_final_results(log_json_path,
final_epoch_or_iter, results_lut,
by_epoch)
if model_performance is None:
continue
model_time = osp.split(log_txt_path)[-1].split('.')[0]
model_info = dict(
config=used_config,
results=model_performance,
model_time=model_time,
final_model=final_model,
log_json_path=osp.split(log_json_path)[-1])
model_info['epochs' if by_epoch else 'iterations'] =\
final_epoch_or_iter
model_infos.append(model_info)
# publish model for each checkpoint
publish_model_infos = []
for model in model_infos:
model_publish_dir = osp.join(models_out, model['config'].rstrip('.py'))
mkdir_or_exist(model_publish_dir)
model_name = osp.split(model['config'])[-1].split('.')[0]
model_name += '_' + model['model_time']
publish_model_path = osp.join(model_publish_dir, model_name)
trained_model_path = osp.join(models_root, model['config'],
model['final_model'])
# convert model
final_model_path = process_checkpoint(trained_model_path,
publish_model_path)
# copy log
shutil.copy(
osp.join(models_root, model['config'], model['log_json_path']),
osp.join(model_publish_dir, f'{model_name}.log.json'))
shutil.copy(
osp.join(models_root, model['config'],
model['log_json_path'].rstrip('.json')),
osp.join(model_publish_dir, f'{model_name}.log'))
# copy config to guarantee reproducibility
config_path = model['config']
config_path = osp.join(
'configs',
config_path) if 'configs' not in config_path else config_path
target_config_path = osp.split(config_path)[-1]
shutil.copy(config_path, osp.join(model_publish_dir,
target_config_path))
model['model_path'] = final_model_path
publish_model_infos.append(model)
models = dict(models=publish_model_infos)
print(f'Totally gathered {len(publish_model_infos)} models')
dump(models, osp.join(models_out, 'model_info.json'))
pwc_files = convert_model_info_to_pwc(publish_model_infos)
for name in pwc_files:
with open(osp.join(models_out, name + '_metafile.yml'), 'w') as f:
ordered_yaml_dump(pwc_files[name], f, encoding='utf-8')
if __name__ == '__main__':
main()
| 12,549 | 35.376812 | 79 | py |
ERD | ERD-main/.dev_scripts/benchmark_inference_fps.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.dist import init_dist
from mmengine.fileio import dump
from mmengine.utils import mkdir_or_exist
from terminaltables import GithubFlavoredMarkdownTable
from tools.analysis_tools.benchmark import repeat_measure_inference_speed
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet benchmark a model of FPS')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument(
'--round-num',
type=int,
default=1,
help='round a number to a given precision in decimal digits')
parser.add_argument(
'--repeat-num',
type=int,
default=1,
help='number of repeat times of measurement for averaging the results')
parser.add_argument(
'--out', type=str, help='output path of gathered fps to be stored')
parser.add_argument(
'--max-iter', type=int, default=2000, help='num of max iter')
parser.add_argument(
'--log-interval', type=int, default=50, help='interval of logging')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def results2markdown(result_dict):
table_data = []
is_multiple_results = False
for cfg_name, value in result_dict.items():
name = cfg_name.replace('configs/', '')
fps = value['fps']
ms_times_pre_image = value['ms_times_pre_image']
if isinstance(fps, list):
is_multiple_results = True
mean_fps = value['mean_fps']
mean_times_pre_image = value['mean_times_pre_image']
fps_str = ','.join([str(s) for s in fps])
ms_times_pre_image_str = ','.join(
[str(s) for s in ms_times_pre_image])
table_data.append([
name, fps_str, mean_fps, ms_times_pre_image_str,
mean_times_pre_image
])
else:
table_data.append([name, fps, ms_times_pre_image])
if is_multiple_results:
table_data.insert(0, [
'model', 'fps', 'mean_fps', 'times_pre_image(ms)',
'mean_times_pre_image(ms)'
])
else:
table_data.insert(0, ['model', 'fps', 'times_pre_image(ms)'])
table = GithubFlavoredMarkdownTable(table_data)
print(table.table, flush=True)
if __name__ == '__main__':
args = parse_args()
assert args.round_num >= 0
assert args.repeat_num >= 1
config = Config.fromfile(args.config)
if args.launcher == 'none':
raise NotImplementedError('Only supports distributed mode')
else:
init_dist(args.launcher)
result_dict = {}
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
record_metrics = model_info['metric']
cfg_path = model_info['config'].strip()
cfg = Config.fromfile(cfg_path)
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
fps = repeat_measure_inference_speed(cfg, checkpoint,
args.max_iter,
args.log_interval,
args.fuse_conv_bn,
args.repeat_num)
if args.repeat_num > 1:
fps_list = [round(fps_, args.round_num) for fps_ in fps]
times_pre_image_list = [
round(1000 / fps_, args.round_num) for fps_ in fps
]
mean_fps = round(
sum(fps_list) / len(fps_list), args.round_num)
mean_times_pre_image = round(
sum(times_pre_image_list) / len(times_pre_image_list),
args.round_num)
print(
f'{cfg_path} '
f'Overall fps: {fps_list}[{mean_fps}] img / s, '
f'times per image: '
f'{times_pre_image_list}[{mean_times_pre_image}] '
f'ms / img',
flush=True)
result_dict[cfg_path] = dict(
fps=fps_list,
mean_fps=mean_fps,
ms_times_pre_image=times_pre_image_list,
mean_times_pre_image=mean_times_pre_image)
else:
print(
f'{cfg_path} fps : {fps:.{args.round_num}f} img / s, '
f'times per image: {1000 / fps:.{args.round_num}f} '
f'ms / img',
flush=True)
result_dict[cfg_path] = dict(
fps=round(fps, args.round_num),
ms_times_pre_image=round(1000 / fps, args.round_num))
except Exception as e:
print(f'{cfg_path} error: {repr(e)}')
if args.repeat_num > 1:
result_dict[cfg_path] = dict(
fps=[0],
mean_fps=0,
ms_times_pre_image=[0],
mean_times_pre_image=0)
else:
result_dict[cfg_path] = dict(fps=0, ms_times_pre_image=0)
if args.out:
mkdir_or_exist(args.out)
dump(result_dict, osp.join(args.out, 'batch_inference_fps.json'))
results2markdown(result_dict)
| 6,830 | 38.715116 | 79 | py |
ERD | ERD-main/.dev_scripts/batch_test_list.py | # Copyright (c) OpenMMLab. All rights reserved.
# missing wider_face/timm_example/strong_baselines/simple_copy_paste/
# selfsup_pretrain/seesaw_loss/pascal_voc/openimages/lvis/ld/lad/cityscapes/deepfashion
# yapf: disable
atss = dict(
config='configs/atss/atss_r50_fpn_1x_coco.py',
checkpoint='atss_r50_fpn_1x_coco_20200209-985f7bd0.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r50_fpn_1x_coco/atss_r50_fpn_1x_coco_20200209-985f7bd0.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=39.4),
)
autoassign = dict(
config='configs/autoassign/autoassign_r50-caffe_fpn_1x_coco.py',
checkpoint='auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/autoassign/auto_assign_r50_fpn_1x_coco/auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
carafe = dict(
config='configs/carafe/faster-rcnn_r50_fpn-carafe_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/carafe/faster_rcnn_r50_fpn_carafe_1x_coco/faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.6),
)
cascade_rcnn = [
dict(
config='configs/cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py',
checkpoint='cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth',
eval='bbox',
url='https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco/cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth', # noqa
metric=dict(bbox_mAP=40.3),
),
dict(
config='configs/cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py',
checkpoint='cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=41.2, segm_mAP=35.9),
),
]
cascade_rpn = dict(
config='configs/cascade_rpn/cascade-rpn_faster-rcnn_r50-caffe_fpn_1x_coco.py', # noqa
checkpoint='crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco/crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
centernet = dict(
config='configs/centernet/centernet_r18-dcnv2_8xb16-crop512-140e_coco.py',
checkpoint='centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_dcnv2_140e_coco/centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=29.5),
)
centripetalnet = dict(
config='configs/centripetalnet/centripetalnet_hourglass104_16xb6-crop511-210e-mstest_coco.py', # noqa
checkpoint='centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=44.7),
)
convnext = dict(
config='configs/convnext/cascade-mask-rcnn_convnext-s-p4-w7_fpn_4conv1fc-giou_amp-ms-crop-3x_coco.py', # noqa
checkpoint='cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220510_201004-3d24f5a4.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220510_201004-3d24f5a4.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=51.8, segm_mAP=44.8),
)
cornernet = dict(
config='configs/cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py',
checkpoint='cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco/cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=41.2),
)
dcn = dict(
config='configs/dcn/faster-rcnn_r50-dconv-c3-c5_fpn_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=41.3),
)
dcnv2 = dict(
config='configs/dcnv2/faster-rcnn_r50_fpn_mdpool_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_mdpool_1x_coco_20200307-c0df27ff.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco/faster_rcnn_r50_fpn_mdpool_1x_coco_20200307-c0df27ff.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.7),
)
ddod = dict(
config='configs/ddod/ddod_r50_fpn_1x_coco.py',
checkpoint='ddod_r50_fpn_1x_coco_20220523_223737-29b2fc67.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/ddod/ddod_r50_fpn_1x_coco/ddod_r50_fpn_1x_coco_20220523_223737-29b2fc67.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=41.7),
)
deformable_detr = dict(
config='configs/deformable_detr/deformable-detr_r50_16xb2-50e_coco.py',
checkpoint='deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_r50_16x2_50e_coco/deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=44.5),
)
detectors = dict(
config='configs/detectors/detectors_htc-r50_1x_coco.py',
checkpoint='detectors_htc_r50_1x_coco-329b1453.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r50_1x_coco/detectors_htc_r50_1x_coco-329b1453.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=49.1, segm_mAP=42.6),
)
detr = dict(
config='configs/detr/detr_r50_8xb2-150e_coco.py',
checkpoint='detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/detr/detr_r50_8x2_150e_coco/detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.1),
)
double_heads = dict(
config='configs/double_heads/dh-faster-rcnn_r50_fpn_1x_coco.py',
checkpoint='dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/double_heads/dh_faster_rcnn_r50_fpn_1x_coco/dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.0),
)
dyhead = dict(
config='configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py',
checkpoint='atss_r50_fpn_dyhead_4x4_1x_coco_20211219_023314-eaa620c6.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_4x4_1x_coco/atss_r50_fpn_dyhead_4x4_1x_coco_20211219_023314-eaa620c6.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=43.3),
)
dynamic_rcnn = dict(
config='configs/dynamic_rcnn/dynamic-rcnn_r50_fpn_1x_coco.py',
checkpoint='dynamic_rcnn_r50_fpn_1x-62a3f276.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x/dynamic_rcnn_r50_fpn_1x-62a3f276.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.9),
)
efficientnet = dict(
config='configs/efficientnet/retinanet_effb3_fpn_8xb4-crop896-1x_coco.py',
checkpoint='retinanet_effb3_fpn_crop896_8x4_1x_coco_20220322_234806-615a0dda.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco/retinanet_effb3_fpn_crop896_8x4_1x_coco_20220322_234806-615a0dda.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.5),
)
empirical_attention = dict(
config='configs/empirical_attention/faster-rcnn_r50-attn1111_fpn_1x_coco.py', # noqa
checkpoint='faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.0),
)
faster_rcnn = dict(
config='configs/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.4),
)
fcos = dict(
config='configs/fcos/fcos_r50-caffe_fpn_gn-head-center-normbbox-centeronreg-giou_1x_coco.py', # noqa
checkpoint='fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.7),
)
foveabox = dict(
config='configs/foveabox/fovea_r50_fpn_gn-head-align_4xb4-2x_coco.py',
checkpoint='fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco/fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.9),
)
fpg = dict(
config='configs/fpg/mask-rcnn_r50_fpg_crop640-50e_coco.py',
checkpoint='mask_rcnn_r50_fpg_crop640_50e_coco_20220311_011857-233b8334.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg_crop640_50e_coco/mask_rcnn_r50_fpg_crop640_50e_coco_20220311_011857-233b8334.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=43.0, segm_mAP=38.1),
)
free_anchor = dict(
config='configs/free_anchor/freeanchor_r50_fpn_1x_coco.py',
checkpoint='retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco/retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.7),
)
fsaf = dict(
config='configs/fsaf/fsaf_r50_fpn_1x_coco.py',
checkpoint='fsaf_r50_fpn_1x_coco-94ccc51f.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r50_fpn_1x_coco/fsaf_r50_fpn_1x_coco-94ccc51f.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.4),
)
gcnet = dict(
config='configs/gcnet/mask-rcnn_r50-syncbn-gcb-r16-c3-c5_fpn_1x_coco.py', # noqa
checkpoint='mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=40.4, segm_mAP=36.2),
)
gfl = dict(
config='configs/gfl/gfl_r50_fpn_1x_coco.py',
checkpoint='gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_1x_coco/gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.2),
)
ghm = dict(
config='configs/ghm/retinanet_r50_fpn_ghm-1x_coco.py',
checkpoint='retinanet_ghm_r50_fpn_1x_coco_20200130-a437fda3.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r50_fpn_1x_coco/retinanet_ghm_r50_fpn_1x_coco_20200130-a437fda3.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.0),
)
gn = dict(
config='configs/gn/mask-rcnn_r50_fpn_gn-all_2x_coco.py',
checkpoint='mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_2x_coco/mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=40.1, segm_mAP=36.4),
)
gn_ws = dict(
config='configs/gn+ws/faster-rcnn_r50_fpn_gn-ws-all_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco/faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=39.7),
)
grid_rcnn = dict(
config='configs/grid_rcnn/grid-rcnn_r50_fpn_gn-head_2x_coco.py',
checkpoint='grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco/grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
groie = dict(
config='configs/groie/faste-rcnn_r50_fpn_groie_1x_coco.py',
checkpoint='faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/groie/faster_rcnn_r50_fpn_groie_1x_coco/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.3),
)
guided_anchoring = dict(
config='configs/guided_anchoring/ga-retinanet_r50-caffe_fpn_1x_coco.py', # noqa
checkpoint='ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco/ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=36.9),
)
hrnet = dict(
config='configs/hrnet/faster-rcnn_hrnetv2p-w18-1x_coco.py',
checkpoint='faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco/faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=36.9),
)
htc = dict(
config='configs/htc/htc_r50_fpn_1x_coco.py',
checkpoint='htc_r50_fpn_1x_coco_20200317-7332cf16.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_1x_coco/htc_r50_fpn_1x_coco_20200317-7332cf16.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=42.3, segm_mAP=37.4),
)
instaboost = dict(
config='configs/instaboost/mask-rcnn_r50_fpn_instaboost-4x_coco.py',
checkpoint='mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-d025f83a.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco/mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-d025f83a.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=40.6, segm_mAP=36.6),
)
libra_rcnn = dict(
config='configs/libra_rcnn/libra-faster-rcnn_r50_fpn_1x_coco.py',
checkpoint='libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco/libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.3),
)
mask2former = dict(
config='configs/mask2former/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py',
checkpoint='mask2former_r50_lsj_8x2_50e_coco-panoptic_20220326_224516-11a44721.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic/mask2former_r50_lsj_8x2_50e_coco-panoptic_20220326_224516-11a44721.pth', # noqa
eval=['bbox', 'segm', 'PQ'],
metric=dict(PQ=51.9, bbox_mAP=44.8, segm_mAP=41.9),
)
mask_rcnn = dict(
config='configs/mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py',
checkpoint='mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=38.2, segm_mAP=34.7),
)
maskformer = dict(
config='configs/maskformer/maskformer_r50_ms-16xb1-75e_coco.py',
checkpoint='maskformer_r50_mstrain_16x1_75e_coco_20220221_141956-bc2699cb.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_r50_mstrain_16x1_75e_coco/maskformer_r50_mstrain_16x1_75e_coco_20220221_141956-bc2699cb.pth', # noqa
eval='PQ',
metric=dict(PQ=46.9),
)
ms_rcnn = dict(
config='configs/ms_rcnn/ms-rcnn_r50-caffe_fpn_1x_coco.py',
checkpoint='ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco/ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=38.2, segm_mAP=36.0),
)
nas_fcos = dict(
config='configs/nas_fcos/nas-fcos_r50-caffe_fpn_nashead-gn-head_4xb4-1x_coco.py', # noqa
checkpoint='nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=39.4),
)
nas_fpn = dict(
config='configs/nas_fpn/retinanet_r50_nasfpn_crop640-50e_coco.py',
checkpoint='retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco/retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.5),
)
paa = dict(
config='configs/paa/paa_r50_fpn_1x_coco.py',
checkpoint='paa_r50_fpn_1x_coco_20200821-936edec3.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
pafpn = dict(
config='configs/pafpn/faster-rcnn_r50_pafpn_1x_coco.py',
checkpoint='faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/pafpn/faster_rcnn_r50_pafpn_1x_coco/faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.5),
)
panoptic_fpn = dict(
config='configs/panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py',
checkpoint='panoptic_fpn_r50_fpn_1x_coco_20210821_101153-9668fd13.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco/panoptic_fpn_r50_fpn_1x_coco_20210821_101153-9668fd13.pth', # noqa
eval='PQ',
metric=dict(PQ=40.2),
)
pisa = dict(
config='configs/pisa/faster-rcnn_r50_fpn_pisa_1x_coco.py',
checkpoint='pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_r50_fpn_1x_coco/pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=38.4),
)
point_rend = dict(
config='configs/point_rend/point-rend_r50-caffe_fpn_ms-1x_coco.py',
checkpoint='point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco/point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=38.4, segm_mAP=36.3),
)
pvt = dict(
config='configs/pvt/retinanet_pvt-s_fpn_1x_coco.py',
checkpoint='retinanet_pvt-s_fpn_1x_coco_20210906_142921-b6c94a5b.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-s_fpn_1x_coco/retinanet_pvt-s_fpn_1x_coco_20210906_142921-b6c94a5b.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=40.4),
)
queryinst = dict(
config='configs/queryinst/queryinst_r50_fpn_1x_coco.py',
checkpoint='queryinst_r50_fpn_1x_coco_20210907_084916-5a8f1998.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_1x_coco/queryinst_r50_fpn_1x_coco_20210907_084916-5a8f1998.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=42.0, segm_mAP=37.5),
)
regnet = dict(
config='configs/regnet/mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py',
checkpoint='mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=40.4, segm_mAP=36.7),
)
reppoints = dict(
config='configs/reppoints/reppoints-moment_r50_fpn_1x_coco.py',
checkpoint='reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_1x_coco/reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.0),
)
res2net = dict(
config='configs/res2net/faster-rcnn_res2net-101_fpn_2x_coco.py',
checkpoint='faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/res2net/faster_rcnn_r2_101_fpn_2x_coco/faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=43.0),
)
resnest = dict(
config='configs/resnest/faster-rcnn_s50_fpn_syncbn-backbone+head_ms-range-1x_coco.py', # noqa
checkpoint='faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco_20200926_125502-20289c16.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20200926_125502-20289c16.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=42.0),
)
resnet_strikes_back = dict(
config='configs/resnet_strikes_back/mask-rcnn_r50-rsb-pre_fpn_1x_coco.py', # noqa
checkpoint='mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_174054-06ce8ba0.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_174054-06ce8ba0.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=41.2, segm_mAP=38.2),
)
retinanet = dict(
config='configs/retinanet/retinanet_r50_fpn_1x_coco.py',
checkpoint='retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=36.5),
)
rpn = dict(
config='configs/rpn/rpn_r50_fpn_1x_coco.py',
checkpoint='rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_fpn_1x_coco/rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth', # noqa
eval='proposal_fast',
metric=dict(AR_1000=58.2),
)
sabl = [
dict(
config='configs/sabl/sabl-retinanet_r50_fpn_1x_coco.py',
checkpoint='sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_1x_coco/sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.7),
),
dict(
config='configs/sabl/sabl-faster-rcnn_r50_fpn_1x_coco.py',
checkpoint='sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r50_fpn_1x_coco/sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=39.9),
),
]
scnet = dict(
config='configs/scnet/scnet_r50_fpn_1x_coco.py',
checkpoint='scnet_r50_fpn_1x_coco-c3f09857.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_1x_coco/scnet_r50_fpn_1x_coco-c3f09857.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=43.5),
)
scratch = dict(
config='configs/scratch/mask-rcnn_r50-scratch_fpn_gn-all_6x_coco.py',
checkpoint='scratch_mask_rcnn_r50_fpn_gn_6x_bbox_mAP-0.412__segm_mAP-0.374_20200201_193051-1e190a40.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_mask_rcnn_r50_fpn_gn_6x_bbox_mAP-0.412__segm_mAP-0.374_20200201_193051-1e190a40.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=41.2, segm_mAP=37.4),
)
solo = dict(
config='configs/solo/decoupled-solo_r50_fpn_1x_coco.py',
checkpoint='decoupled_solo_r50_fpn_1x_coco_20210820_233348-6337c589.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_1x_coco/decoupled_solo_r50_fpn_1x_coco_20210820_233348-6337c589.pth', # noqa
eval='segm',
metric=dict(segm_mAP=33.9),
)
solov2 = dict(
config='configs/solov2/solov2_r50_fpn_1x_coco.py',
checkpoint='solov2_r50_fpn_1x_coco_20220512_125858-a357fa23.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_1x_coco/solov2_r50_fpn_1x_coco_20220512_125858-a357fa23.pth', # noqa
eval='segm',
metric=dict(segm_mAP=34.8),
)
sparse_rcnn = dict(
config='configs/sparse_rcnn/sparse-rcnn_r50_fpn_1x_coco.py',
checkpoint='sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.9),
)
ssd = [
dict(
config='configs/ssd/ssd300_coco.py',
checkpoint='ssd300_coco_20210803_015428-d231a06e.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd300_coco/ssd300_coco_20210803_015428-d231a06e.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=25.5),
),
dict(
config='configs/ssd/ssdlite_mobilenetv2-scratch_8xb24-600e_coco.py',
checkpoint='ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/ssd/ssdlite_mobilenetv2_scratch_600e_coco/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=21.3),
),
]
swin = dict(
config='configs/swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py',
checkpoint='mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937-9d6b7cfa.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco/mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937-9d6b7cfa.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=42.7, segm_mAP=39.3),
)
tood = dict(
config='configs/tood/tood_r50_fpn_1x_coco.py',
checkpoint='tood_r50_fpn_1x_coco_20211210_103425-20e20746.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_1x_coco/tood_r50_fpn_1x_coco_20211210_103425-20e20746.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=42.4),
)
tridentnet = dict(
config='configs/tridentnet/tridentnet_r50-caffe_1x_coco.py',
checkpoint='tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_1x_coco/tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.6),
)
vfnet = dict(
config='configs/vfnet/vfnet_r50_fpn_1x_coco.py',
checkpoint='vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=41.6),
)
yolact = dict(
config='configs/yolact/yolact_r50_1xb8-55e_coco.py',
checkpoint='yolact_r50_1x8_coco_20200908-f38d58df.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_1x8_coco/yolact_r50_1x8_coco_20200908-f38d58df.pth', # noqa
eval=['bbox', 'segm'],
metric=dict(bbox_mAP=31.2, segm_mAP=29.0),
)
yolo = dict(
config='configs/yolo/yolov3_d53_8xb8-320-273e_coco.py',
checkpoint='yolov3_d53_320_273e_coco-421362b6.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_320_273e_coco/yolov3_d53_320_273e_coco-421362b6.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=27.9),
)
yolof = dict(
config='configs/yolof/yolof_r50-c5_8xb8-1x_coco.py',
checkpoint='yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth',
url='https://download.openmmlab.com/mmdetection/v2.0/yolof/yolof_r50_c5_8x8_1x_coco/yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=37.5),
)
yolox = dict(
config='configs/yolox/yolox_tiny_8xb8-300e_coco.py',
checkpoint='yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth', # noqa
url='https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth', # noqa
eval='bbox',
metric=dict(bbox_mAP=31.8),
)
# yapf: enable
| 29,576 | 53.17033 | 249 | py |
ERD | ERD-main/.dev_scripts/benchmark_train.py | # Copyright (c) OpenMMLab. All rights reserved.
import logging
import os
import os.path as osp
from argparse import ArgumentParser
from mmengine.config import Config, DictAction
from mmengine.logging import MMLogger, print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.testing import replace_to_ceph
from mmdet.utils import register_all_modules, replace_cfg_vals
def parse_args():
parser = ArgumentParser()
parser.add_argument('config', help='test config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--ceph', action='store_true')
parser.add_argument('--save-ckpt', action='store_true')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
parser.add_argument(
'--resume',
action='store_true',
help='resume from the latest checkpoint in the work_dir automatically')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
args = parser.parse_args()
return args
# TODO: Need to refactor train.py so that it can be reused.
def fast_train_model(config_name, args, logger=None):
cfg = Config.fromfile(config_name)
cfg = replace_cfg_vals(cfg)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = osp.join(args.work_dir,
osp.splitext(osp.basename(config_name))[0])
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(config_name))[0])
ckpt_hook = cfg.default_hooks.checkpoint
by_epoch = ckpt_hook.get('by_epoch', True)
fast_stop_hook = dict(type='FastStopTrainingHook')
fast_stop_hook['by_epoch'] = by_epoch
if args.save_ckpt:
if by_epoch:
interval = 1
stop_iter_or_epoch = 2
else:
interval = 4
stop_iter_or_epoch = 10
fast_stop_hook['stop_iter_or_epoch'] = stop_iter_or_epoch
fast_stop_hook['save_ckpt'] = True
ckpt_hook.interval = interval
if 'custom_hooks' in cfg:
cfg.custom_hooks.append(fast_stop_hook)
else:
custom_hooks = [fast_stop_hook]
cfg.custom_hooks = custom_hooks
# TODO: temporary plan
if 'visualizer' in cfg:
if 'name' in cfg.visualizer:
del cfg.visualizer.name
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# enable automatically scaling LR
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
raise RuntimeError('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file.')
if args.ceph:
replace_to_ceph(cfg)
cfg.resume = args.resume
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
runner.train()
# Sample test whether the train code is correct
def main(args):
# register all modules in mmdet into the registries
register_all_modules(init_default_scope=False)
config = Config.fromfile(args.config)
# test all model
logger = MMLogger.get_instance(
name='MMLogger',
log_file='benchmark_train.log',
log_level=logging.ERROR)
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'], flush=True)
config_name = model_info['config'].strip()
try:
fast_train_model(config_name, args, logger)
except RuntimeError as e:
# quick exit is the normal exit message
if 'quick exit' not in repr(e):
logger.error(f'{config_name} " : {repr(e)}')
except Exception as e:
logger.error(f'{config_name} " : {repr(e)}')
if __name__ == '__main__':
args = parse_args()
main(args)
| 6,412 | 34.826816 | 79 | py |
ERD | ERD-main/.dev_scripts/benchmark_valid_flops.py | import logging
import re
import tempfile
from argparse import ArgumentParser
from collections import OrderedDict
from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
import torch
from mmengine import Config, DictAction
from mmengine.analysis import get_model_complexity_info
from mmengine.analysis.print_helper import _format_size
from mmengine.fileio import FileClient
from mmengine.logging import MMLogger
from mmengine.model import revert_sync_batchnorm
from mmengine.runner import Runner
from modelindex.load_model_index import load
from rich.console import Console
from rich.table import Table
from rich.text import Text
from tqdm import tqdm
from mmdet.registry import MODELS
from mmdet.utils import register_all_modules
console = Console()
MMDET_ROOT = Path(__file__).absolute().parents[1]
def parse_args():
parser = ArgumentParser(description='Valid all models in model-index.yml')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 800],
help='input image size')
parser.add_argument(
'--checkpoint_root',
help='Checkpoint file root path. If set, load checkpoint before test.')
parser.add_argument('--img', default='demo/demo.jpg', help='Image file')
parser.add_argument('--models', nargs='+', help='models name to inference')
parser.add_argument(
'--batch-size',
type=int,
default=1,
help='The batch size during the inference.')
parser.add_argument(
'--flops', action='store_true', help='Get Flops and Params of models')
parser.add_argument(
'--flops-str',
action='store_true',
help='Output FLOPs and params counts in a string form.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--size_divisor',
type=int,
default=32,
help='Pad the input image, the minimum size that is divisible '
'by size_divisor, -1 means do not pad the image.')
args = parser.parse_args()
return args
def inference(config_file, checkpoint, work_dir, args, exp_name):
logger = MMLogger.get_instance(name='MMLogger')
logger.warning('if you want test flops, please make sure torch>=1.12')
cfg = Config.fromfile(config_file)
cfg.work_dir = work_dir
cfg.load_from = checkpoint
cfg.log_level = 'WARN'
cfg.experiment_name = exp_name
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# forward the model
result = {'model': config_file.stem}
if args.flops:
if len(args.shape) == 1:
h = w = args.shape[0]
elif len(args.shape) == 2:
h, w = args.shape
else:
raise ValueError('invalid input shape')
divisor = args.size_divisor
if divisor > 0:
h = int(np.ceil(h / divisor)) * divisor
w = int(np.ceil(w / divisor)) * divisor
input_shape = (3, h, w)
result['resolution'] = input_shape
try:
cfg = Config.fromfile(config_file)
if hasattr(cfg, 'head_norm_cfg'):
cfg['head_norm_cfg'] = dict(type='SyncBN', requires_grad=True)
cfg['model']['roi_head']['bbox_head']['norm_cfg'] = dict(
type='SyncBN', requires_grad=True)
cfg['model']['roi_head']['mask_head']['norm_cfg'] = dict(
type='SyncBN', requires_grad=True)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
model = MODELS.build(cfg.model)
input = torch.rand(1, *input_shape)
if torch.cuda.is_available():
model.cuda()
input = input.cuda()
model = revert_sync_batchnorm(model)
inputs = (input, )
model.eval()
outputs = get_model_complexity_info(
model, input_shape, inputs, show_table=False, show_arch=False)
flops = outputs['flops']
params = outputs['params']
activations = outputs['activations']
result['Get Types'] = 'direct'
except: # noqa 772
logger = MMLogger.get_instance(name='MMLogger')
logger.warning(
'Direct get flops failed, try to get flops with data')
cfg = Config.fromfile(config_file)
if hasattr(cfg, 'head_norm_cfg'):
cfg['head_norm_cfg'] = dict(type='SyncBN', requires_grad=True)
cfg['model']['roi_head']['bbox_head']['norm_cfg'] = dict(
type='SyncBN', requires_grad=True)
cfg['model']['roi_head']['mask_head']['norm_cfg'] = dict(
type='SyncBN', requires_grad=True)
data_loader = Runner.build_dataloader(cfg.val_dataloader)
data_batch = next(iter(data_loader))
model = MODELS.build(cfg.model)
if torch.cuda.is_available():
model = model.cuda()
model = revert_sync_batchnorm(model)
model.eval()
_forward = model.forward
data = model.data_preprocessor(data_batch)
del data_loader
model.forward = partial(
_forward, data_samples=data['data_samples'])
outputs = get_model_complexity_info(
model,
input_shape,
data['inputs'],
show_table=False,
show_arch=False)
flops = outputs['flops']
params = outputs['params']
activations = outputs['activations']
result['Get Types'] = 'dataloader'
if args.flops_str:
flops = _format_size(flops)
params = _format_size(params)
activations = _format_size(activations)
result['flops'] = flops
result['params'] = params
return result
def show_summary(summary_data, args):
table = Table(title='Validation Benchmark Regression Summary')
table.add_column('Model')
table.add_column('Validation')
table.add_column('Resolution (c, h, w)')
if args.flops:
table.add_column('Flops', justify='right', width=11)
table.add_column('Params', justify='right')
for model_name, summary in summary_data.items():
row = [model_name]
valid = summary['valid']
color = 'green' if valid == 'PASS' else 'red'
row.append(f'[{color}]{valid}[/{color}]')
if valid == 'PASS':
row.append(str(summary['resolution']))
if args.flops:
row.append(str(summary['flops']))
row.append(str(summary['params']))
table.add_row(*row)
console.print(table)
table_data = {
x.header: [Text.from_markup(y).plain for y in x.cells]
for x in table.columns
}
table_pd = pd.DataFrame(table_data)
table_pd.to_csv('./mmdetection_flops.csv')
# Sample test whether the inference code is correct
def main(args):
register_all_modules()
model_index_file = MMDET_ROOT / 'model-index.yml'
model_index = load(str(model_index_file))
model_index.build_models_with_collections()
models = OrderedDict({model.name: model for model in model_index.models})
logger = MMLogger(
'validation',
logger_name='validation',
log_file='benchmark_test_image.log',
log_level=logging.INFO)
if args.models:
patterns = [
re.compile(pattern.replace('+', '_')) for pattern in args.models
]
filter_models = {}
for k, v in models.items():
k = k.replace('+', '_')
if any([re.match(pattern, k) for pattern in patterns]):
filter_models[k] = v
if len(filter_models) == 0:
print('No model found, please specify models in:')
print('\n'.join(models.keys()))
return
models = filter_models
summary_data = {}
tmpdir = tempfile.TemporaryDirectory()
for model_name, model_info in tqdm(models.items()):
if model_info.config is None:
continue
model_info.config = model_info.config.replace('%2B', '+')
config = Path(model_info.config)
try:
config.exists()
except: # noqa 722
logger.error(f'{model_name}: {config} not found.')
continue
logger.info(f'Processing: {model_name}')
http_prefix = 'https://download.openmmlab.com/mmdetection/'
if args.checkpoint_root is not None:
root = args.checkpoint_root
if 's3://' in args.checkpoint_root:
from petrel_client.common.exception import AccessDeniedError
file_client = FileClient.infer_client(uri=root)
checkpoint = file_client.join_path(
root, model_info.weights[len(http_prefix):])
try:
exists = file_client.exists(checkpoint)
except AccessDeniedError:
exists = False
else:
checkpoint = Path(root) / model_info.weights[len(http_prefix):]
exists = checkpoint.exists()
if exists:
checkpoint = str(checkpoint)
else:
print(f'WARNING: {model_name}: {checkpoint} not found.')
checkpoint = None
else:
checkpoint = None
try:
# build the model from a config file and a checkpoint file
result = inference(MMDET_ROOT / config, checkpoint, tmpdir.name,
args, model_name)
result['valid'] = 'PASS'
except Exception: # noqa 722
import traceback
logger.error(f'"{config}" :\n{traceback.format_exc()}')
result = {'valid': 'FAIL'}
summary_data[model_name] = result
tmpdir.cleanup()
show_summary(summary_data, args)
if __name__ == '__main__':
args = parse_args()
main(args)
| 10,539 | 34.608108 | 79 | py |
ERD | ERD-main/.dev_scripts/benchmark_test.py | # Copyright (c) OpenMMLab. All rights reserved.
import logging
import os
import os.path as osp
from argparse import ArgumentParser
from mmengine.config import Config, DictAction
from mmengine.logging import MMLogger
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.testing import replace_to_ceph
from mmdet.utils import register_all_modules, replace_cfg_vals
def parse_args():
parser = ArgumentParser()
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument('--work-dir', help='the dir to save logs')
parser.add_argument('--ceph', action='store_true')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
args = parser.parse_args()
return args
# TODO: Need to refactor test.py so that it can be reused.
def fast_test_model(config_name, checkpoint, args, logger=None):
cfg = Config.fromfile(config_name)
cfg = replace_cfg_vals(cfg)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = osp.join(args.work_dir,
osp.splitext(osp.basename(config_name))[0])
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(config_name))[0])
if args.ceph:
replace_to_ceph(cfg)
cfg.load_from = checkpoint
# TODO: temporary plan
if 'visualizer' in cfg:
if 'name' in cfg.visualizer:
del cfg.visualizer.name
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
runner.test()
# Sample test whether the inference code is correct
def main(args):
# register all modules in mmdet into the registries
register_all_modules(init_default_scope=False)
config = Config.fromfile(args.config)
# test all model
logger = MMLogger.get_instance(
name='MMLogger',
log_file='benchmark_test.log',
log_level=logging.ERROR)
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'], flush=True)
config_name = model_info['config'].strip()
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
fast_test_model(config_name, checkpoint, args, logger)
except Exception as e:
logger.error(f'{config_name} " : {repr(e)}')
if __name__ == '__main__':
args = parse_args()
main(args)
| 4,055 | 33.965517 | 79 | py |
ERD | ERD-main/tests/test_engine/test_schedulers/test_quadratic_warmup.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
import torch.nn.functional as F
import torch.optim as optim
from mmengine.optim.scheduler import _ParamScheduler
from mmengine.testing import assert_allclose
from mmdet.engine.schedulers import (QuadraticWarmupLR,
QuadraticWarmupMomentum,
QuadraticWarmupParamScheduler)
class ToyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
return self.conv2(F.relu(self.conv1(x)))
class TestQuadraticWarmupScheduler(TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.model = ToyModel()
self.optimizer = optim.SGD(
self.model.parameters(), lr=0.05, momentum=0.01, weight_decay=5e-4)
def _test_scheduler_value(self,
schedulers,
targets,
epochs=10,
param_name='lr'):
if isinstance(schedulers, _ParamScheduler):
schedulers = [schedulers]
for epoch in range(epochs):
for param_group, target in zip(self.optimizer.param_groups,
targets):
print(param_group[param_name])
assert_allclose(
target[epoch],
param_group[param_name],
msg='{} is wrong in epoch {}: expected {}, got {}'.format(
param_name, epoch, target[epoch],
param_group[param_name]),
atol=1e-5,
rtol=0)
[scheduler.step() for scheduler in schedulers]
def test_quadratic_warmup_scheduler(self):
with self.assertRaises(ValueError):
QuadraticWarmupParamScheduler(self.optimizer, param_name='lr')
epochs = 10
iters = 5
warmup_factor = [pow((i + 1) / float(iters), 2) for i in range(iters)]
single_targets = [x * 0.05 for x in warmup_factor] + [0.05] * (
epochs - iters)
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = QuadraticWarmupParamScheduler(
self.optimizer, param_name='lr', end=iters)
self._test_scheduler_value(scheduler, targets, epochs)
def test_quadratic_warmup_scheduler_convert_iterbased(self):
epochs = 10
end = 5
epoch_length = 11
iters = end * epoch_length
warmup_factor = [pow((i + 1) / float(iters), 2) for i in range(iters)]
single_targets = [x * 0.05 for x in warmup_factor] + [0.05] * (
epochs * epoch_length - iters)
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = QuadraticWarmupParamScheduler.build_iter_from_epoch(
self.optimizer,
param_name='lr',
end=end,
epoch_length=epoch_length)
self._test_scheduler_value(scheduler, targets, epochs * epoch_length)
def test_quadratic_warmup_lr(self):
epochs = 10
iters = 5
warmup_factor = [pow((i + 1) / float(iters), 2) for i in range(iters)]
single_targets = [x * 0.05 for x in warmup_factor] + [0.05] * (
epochs - iters)
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = QuadraticWarmupLR(self.optimizer, end=iters)
self._test_scheduler_value(scheduler, targets, epochs)
def test_quadratic_warmup_momentum(self):
epochs = 10
iters = 5
warmup_factor = [pow((i + 1) / float(iters), 2) for i in range(iters)]
single_targets = [x * 0.01 for x in warmup_factor] + [0.01] * (
epochs - iters)
targets = [single_targets, [x * epochs for x in single_targets]]
scheduler = QuadraticWarmupMomentum(self.optimizer, end=iters)
self._test_scheduler_value(
scheduler, targets, epochs, param_name='momentum')
| 4,323 | 38.669725 | 79 | py |
ERD | ERD-main/tests/test_engine/test_hooks/test_num_class_check_hook.py | # Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
from unittest import TestCase
from unittest.mock import Mock
from mmcv.cnn import VGG
from mmengine.dataset import BaseDataset
from torch import nn
from mmdet.engine.hooks import NumClassCheckHook
from mmdet.models.roi_heads.mask_heads import FusedSemanticHead
class TestNumClassCheckHook(TestCase):
def setUp(self):
# Setup NumClassCheckHook
hook = NumClassCheckHook()
self.hook = hook
# Setup runner mock
runner = Mock()
runner.model = Mock()
runner.logger = Mock()
runner.logger.warning = Mock()
runner.train_dataloader = Mock()
runner.val_dataloader = Mock()
self.runner = runner
# Setup dataset
metainfo = dict(classes=None)
self.none_classmeta_dataset = BaseDataset(
metainfo=metainfo, lazy_init=True)
metainfo = dict(classes='class_name')
self.str_classmeta_dataset = BaseDataset(
metainfo=metainfo, lazy_init=True)
metainfo = dict(classes=('bus', 'car'))
self.normal_classmeta_dataset = BaseDataset(
metainfo=metainfo, lazy_init=True)
# Setup valid model
valid_model = nn.Module()
valid_model.add_module('backbone', VGG(depth=11))
fused_semantic_head = FusedSemanticHead(
num_ins=1,
fusion_level=0,
num_convs=1,
in_channels=1,
conv_out_channels=1)
valid_model.add_module('semantic_head', fused_semantic_head)
rpn_head = nn.Module()
rpn_head.num_classes = 1
valid_model.add_module('rpn_head', rpn_head)
bbox_head = nn.Module()
bbox_head.num_classes = 2
valid_model.add_module('bbox_head', bbox_head)
self.valid_model = valid_model
# Setup invalid model
invalid_model = nn.Module()
bbox_head = nn.Module()
bbox_head.num_classes = 4
invalid_model.add_module('bbox_head', bbox_head)
self.invalid_model = invalid_model
def test_before_train_epch(self):
runner = deepcopy(self.runner)
# Test when dataset.metainfo['classes'] is None
runner.train_dataloader.dataset = self.none_classmeta_dataset
self.hook.before_train_epoch(runner)
runner.logger.warning.assert_called_once()
# Test when dataset.metainfo['classes'] is a str
runner.train_dataloader.dataset = self.str_classmeta_dataset
with self.assertRaises(AssertionError):
self.hook.before_train_epoch(runner)
runner.train_dataloader.dataset = self.normal_classmeta_dataset
# Test `num_classes` of model is compatible with dataset
runner.model = self.valid_model
self.hook.before_train_epoch(runner)
# Test `num_classes` of model is not compatible with dataset
runner.model = self.invalid_model
with self.assertRaises(AssertionError):
self.hook.before_train_epoch(runner)
def test_before_val_epoch(self):
runner = deepcopy(self.runner)
# Test when dataset.metainfo['classes'] is None
runner.val_dataloader.dataset = self.none_classmeta_dataset
self.hook.before_val_epoch(runner)
runner.logger.warning.assert_called_once()
# Test when dataset.metainfo['classes'] is a str
runner.val_dataloader.dataset = self.str_classmeta_dataset
with self.assertRaises(AssertionError):
self.hook.before_val_epoch(runner)
runner.val_dataloader.dataset = self.normal_classmeta_dataset
# Test `num_classes` of model is compatible with dataset
runner.model = self.valid_model
self.hook.before_val_epoch(runner)
# Test `num_classes` of model is not compatible with dataset
runner.model = self.invalid_model
with self.assertRaises(AssertionError):
self.hook.before_val_epoch(runner)
| 3,988 | 36.280374 | 71 | py |
ERD | ERD-main/tests/test_engine/test_hooks/test_sync_norm_hook.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock, patch
import torch.nn as nn
from mmdet.engine.hooks import SyncNormHook
class TestSyncNormHook(TestCase):
@patch(
'mmdet.engine.hooks.sync_norm_hook.get_dist_info', return_value=(0, 1))
def test_before_val_epoch_non_dist(self, mock):
model = nn.Sequential(
nn.Conv2d(1, 5, kernel_size=3), nn.BatchNorm2d(5, momentum=0.3),
nn.Linear(5, 10))
runner = Mock()
runner.model = model
hook = SyncNormHook()
hook.before_val_epoch(runner)
@patch(
'mmdet.engine.hooks.sync_norm_hook.get_dist_info', return_value=(0, 2))
def test_before_val_epoch_dist(self, mock):
model = nn.Sequential(
nn.Conv2d(1, 5, kernel_size=3), nn.BatchNorm2d(5, momentum=0.3),
nn.Linear(5, 10))
runner = Mock()
runner.model = model
hook = SyncNormHook()
hook.before_val_epoch(runner)
@patch(
'mmdet.engine.hooks.sync_norm_hook.get_dist_info', return_value=(0, 2))
def test_before_val_epoch_dist_no_norm(self, mock):
model = nn.Sequential(nn.Conv2d(1, 5, kernel_size=3), nn.Linear(5, 10))
runner = Mock()
runner.model = model
hook = SyncNormHook()
hook.before_val_epoch(runner)
| 1,374 | 31.738095 | 79 | py |
ERD | ERD-main/tests/test_engine/test_hooks/test_mean_teacher_hook.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
from unittest import TestCase
import torch
import torch.nn as nn
from mmengine.evaluator import BaseMetric
from mmengine.model import BaseModel
from mmengine.optim import OptimWrapper
from mmengine.registry import MODEL_WRAPPERS
from mmengine.runner import Runner
from torch.utils.data import Dataset
from mmdet.registry import DATASETS
from mmdet.utils import register_all_modules
register_all_modules()
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, inputs, data_samples, mode='tensor'):
labels = torch.stack(data_samples)
inputs = torch.stack(inputs)
outputs = self.linear(inputs)
if mode == 'tensor':
return outputs
elif mode == 'loss':
loss = (labels - outputs).sum()
outputs = dict(loss=loss)
return outputs
else:
return outputs
class ToyModel1(BaseModel, ToyModel):
def __init__(self):
super().__init__()
def forward(self, *args, **kwargs):
return super(BaseModel, self).forward(*args, **kwargs)
class ToyModel2(BaseModel):
def __init__(self):
super().__init__()
self.teacher = ToyModel1()
self.student = ToyModel1()
def forward(self, *args, **kwargs):
return self.student(*args, **kwargs)
@DATASETS.register_module(force=True)
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
@property
def metainfo(self):
return self.METAINFO
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_samples=self.label[index])
class ToyMetric1(BaseMetric):
def __init__(self, collect_device='cpu', dummy_metrics=None):
super().__init__(collect_device=collect_device)
self.dummy_metrics = dummy_metrics
def process(self, data_batch, predictions):
result = {'acc': 1}
self.results.append(result)
def compute_metrics(self, results):
return dict(acc=1)
class TestMeanTeacherHook(TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_mean_teacher_hook(self):
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model = ToyModel2().to(device)
runner = Runner(
model=model,
train_dataloader=dict(
dataset=DummyDataset(),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
val_dataloader=dict(
dataset=DummyDataset(),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=0),
val_evaluator=[ToyMetric1()],
work_dir=self.temp_dir.name,
default_scope='mmdet',
optim_wrapper=OptimWrapper(
torch.optim.Adam(ToyModel().parameters())),
train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=1),
val_cfg=dict(),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='MeanTeacherHook')],
experiment_name='test1')
runner.train()
self.assertTrue(
osp.exists(osp.join(self.temp_dir.name, 'epoch_2.pth')))
# checkpoint = torch.load(osp.join(self.temp_dir.name, 'epoch_2.pth'))
# load and testing
runner = Runner(
model=model,
test_dataloader=dict(
dataset=DummyDataset(),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
test_evaluator=[ToyMetric1()],
test_cfg=dict(),
work_dir=self.temp_dir.name,
default_scope='mmdet',
load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='MeanTeacherHook')],
experiment_name='test2')
runner.test()
@MODEL_WRAPPERS.register_module()
class DummyWrapper(BaseModel):
def __init__(self, model):
super().__init__()
self.module = model
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
# with model wrapper
runner = Runner(
model=DummyWrapper(ToyModel2()),
test_dataloader=dict(
dataset=DummyDataset(),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
test_evaluator=[ToyMetric1()],
test_cfg=dict(),
work_dir=self.temp_dir.name,
default_scope='mmdet',
load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='MeanTeacherHook')],
experiment_name='test3')
runner.test()
| 5,299 | 29.113636 | 78 | py |
ERD | ERD-main/tests/test_engine/test_hooks/test_visualization_hook.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import shutil
import time
from unittest import TestCase
from unittest.mock import Mock
import torch
from mmengine.structures import InstanceData
from mmdet.engine.hooks import DetVisualizationHook
from mmdet.structures import DetDataSample
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, cy, bw, bh = torch.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clamp(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clamp(0, h)
br_x = ((cx * w) + (w * bw / 2)).clamp(0, w)
br_y = ((cy * h) + (h * bh / 2)).clamp(0, h)
bboxes = torch.stack([tl_x, tl_y, br_x, br_y], dim=0).T
return bboxes
class TestVisualizationHook(TestCase):
def setUp(self) -> None:
DetLocalVisualizer.get_instance('current_visualizer')
pred_instances = InstanceData()
pred_instances.bboxes = _rand_bboxes(5, 10, 12)
pred_instances.labels = torch.randint(0, 2, (5, ))
pred_instances.scores = torch.rand((5, ))
pred_det_data_sample = DetDataSample()
pred_det_data_sample.set_metainfo({
'img_path':
osp.join(osp.dirname(__file__), '../../data/color.jpg')
})
pred_det_data_sample.pred_instances = pred_instances
self.outputs = [pred_det_data_sample] * 2
def test_after_val_iter(self):
runner = Mock()
runner.iter = 1
hook = DetVisualizationHook()
hook.after_val_iter(runner, 1, {}, self.outputs)
def test_after_test_iter(self):
runner = Mock()
runner.iter = 1
hook = DetVisualizationHook(draw=True)
hook.after_test_iter(runner, 1, {}, self.outputs)
self.assertEqual(hook._test_index, 2)
# test
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
test_out_dir = timestamp + '1'
runner.work_dir = timestamp
runner.timestamp = '1'
hook = DetVisualizationHook(draw=False, test_out_dir=test_out_dir)
hook.after_test_iter(runner, 1, {}, self.outputs)
self.assertTrue(not osp.exists(f'{timestamp}/1/{test_out_dir}'))
hook = DetVisualizationHook(draw=True, test_out_dir=test_out_dir)
hook.after_test_iter(runner, 1, {}, self.outputs)
self.assertTrue(osp.exists(f'{timestamp}/1/{test_out_dir}'))
shutil.rmtree(f'{timestamp}')
| 2,420 | 33.098592 | 74 | py |
ERD | ERD-main/tests/test_engine/test_hooks/test_checkloss_hook.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock
import torch
from mmdet.engine.hooks import CheckInvalidLossHook
class TestCheckInvalidLossHook(TestCase):
def test_after_train_iter(self):
n = 50
hook = CheckInvalidLossHook(n)
runner = Mock()
runner.logger = Mock()
runner.logger.info = Mock()
# Test `after_train_iter` function within the n iteration.
runner.iter = 10
outputs = dict(loss=torch.LongTensor([2]))
hook.after_train_iter(runner, 10, outputs=outputs)
outputs = dict(loss=torch.tensor(float('nan')))
hook.after_train_iter(runner, 10, outputs=outputs)
outputs = dict(loss=torch.tensor(float('inf')))
hook.after_train_iter(runner, 10, outputs=outputs)
# Test `after_train_iter` at the n iteration.
runner.iter = n - 1
outputs = dict(loss=torch.LongTensor([2]))
hook.after_train_iter(runner, n - 1, outputs=outputs)
outputs = dict(loss=torch.tensor(float('nan')))
with self.assertRaises(AssertionError):
hook.after_train_iter(runner, n - 1, outputs=outputs)
outputs = dict(loss=torch.tensor(float('inf')))
with self.assertRaises(AssertionError):
hook.after_train_iter(runner, n - 1, outputs=outputs)
| 1,372 | 35.131579 | 66 | py |
ERD | ERD-main/tests/test_engine/test_runner/test_loops.py | # Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from unittest import TestCase
from unittest.mock import Mock
import torch
import torch.nn as nn
from mmengine.evaluator import Evaluator
from mmengine.model import BaseModel
from mmengine.optim import OptimWrapper
from mmengine.runner import Runner
from torch.utils.data import Dataset
from mmdet.registry import DATASETS
from mmdet.utils import register_all_modules
register_all_modules()
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, inputs, data_samples, mode='tensor'):
labels = torch.stack(data_samples)
inputs = torch.stack(inputs)
outputs = self.linear(inputs)
if mode == 'tensor':
return outputs
elif mode == 'loss':
loss = (labels - outputs).sum()
outputs = dict(loss=loss)
return outputs
else:
return outputs
class ToyModel1(BaseModel, ToyModel):
def __init__(self):
super().__init__()
def forward(self, *args, **kwargs):
return super(BaseModel, self).forward(*args, **kwargs)
class ToyModel2(BaseModel):
def __init__(self):
super().__init__()
self.teacher = ToyModel1()
self.student = ToyModel1()
self.semi_test_cfg = dict(predict_on='teacher')
def forward(self, *args, **kwargs):
return self.student(*args, **kwargs)
@DATASETS.register_module(force=True)
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
@property
def metainfo(self):
return self.METAINFO
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_samples=self.label[index])
class TestTeacherStudentValLoop(TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_teacher_student_val_loop(self):
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model = ToyModel2().to(device)
evaluator = Mock()
evaluator.evaluate = Mock(return_value=dict(acc=0.5))
evaluator.__class__ = Evaluator
runner = Runner(
model=model,
train_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
val_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=0),
val_evaluator=evaluator,
work_dir=self.temp_dir.name,
default_scope='mmdet',
optim_wrapper=OptimWrapper(
torch.optim.Adam(ToyModel().parameters())),
train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=1),
val_cfg=dict(type='TeacherStudentValLoop'),
default_hooks=dict(logger=dict(type='LoggerHook', interval=1)),
experiment_name='test1')
runner.train()
| 3,296 | 27.921053 | 76 | py |
ERD | ERD-main/tests/test_engine/test_optimizers/test_layer_decay_optimizer_constructor.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.engine import LearningRateDecayOptimizerConstructor
base_lr = 1
decay_rate = 2
base_wd = 0.05
weight_decay = 0.05
expected_stage_wise_lr_wd_convnext = [{
'weight_decay': 0.0,
'lr_scale': 128
}, {
'weight_decay': 0.0,
'lr_scale': 1
}, {
'weight_decay': 0.05,
'lr_scale': 64
}, {
'weight_decay': 0.0,
'lr_scale': 64
}, {
'weight_decay': 0.05,
'lr_scale': 32
}, {
'weight_decay': 0.0,
'lr_scale': 32
}, {
'weight_decay': 0.05,
'lr_scale': 16
}, {
'weight_decay': 0.0,
'lr_scale': 16
}, {
'weight_decay': 0.05,
'lr_scale': 8
}, {
'weight_decay': 0.0,
'lr_scale': 8
}, {
'weight_decay': 0.05,
'lr_scale': 128
}, {
'weight_decay': 0.05,
'lr_scale': 1
}]
expected_layer_wise_lr_wd_convnext = [{
'weight_decay': 0.0,
'lr_scale': 128
}, {
'weight_decay': 0.0,
'lr_scale': 1
}, {
'weight_decay': 0.05,
'lr_scale': 64
}, {
'weight_decay': 0.0,
'lr_scale': 64
}, {
'weight_decay': 0.05,
'lr_scale': 32
}, {
'weight_decay': 0.0,
'lr_scale': 32
}, {
'weight_decay': 0.05,
'lr_scale': 16
}, {
'weight_decay': 0.0,
'lr_scale': 16
}, {
'weight_decay': 0.05,
'lr_scale': 2
}, {
'weight_decay': 0.0,
'lr_scale': 2
}, {
'weight_decay': 0.05,
'lr_scale': 128
}, {
'weight_decay': 0.05,
'lr_scale': 1
}]
class ToyConvNeXt(nn.Module):
def __init__(self):
super().__init__()
self.stages = nn.ModuleList()
for i in range(4):
stage = nn.Sequential(ConvModule(3, 4, kernel_size=1, bias=True))
self.stages.append(stage)
self.norm0 = nn.BatchNorm2d(2)
# add some variables to meet unit test coverate rate
self.cls_token = nn.Parameter(torch.ones(1))
self.mask_token = nn.Parameter(torch.ones(1))
self.pos_embed = nn.Parameter(torch.ones(1))
self.stem_norm = nn.Parameter(torch.ones(1))
self.downsample_norm0 = nn.BatchNorm2d(2)
self.downsample_norm1 = nn.BatchNorm2d(2)
self.downsample_norm2 = nn.BatchNorm2d(2)
self.lin = nn.Parameter(torch.ones(1))
self.lin.requires_grad = False
self.downsample_layers = nn.ModuleList()
for _ in range(4):
stage = nn.Sequential(nn.Conv2d(3, 4, kernel_size=1, bias=True))
self.downsample_layers.append(stage)
class ToyDetector(nn.Module):
def __init__(self, backbone):
super().__init__()
self.backbone = backbone
self.head = nn.Conv2d(2, 2, kernel_size=1, groups=2)
class PseudoDataParallel(nn.Module):
def __init__(self, model):
super().__init__()
self.module = model
def check_optimizer_lr_wd(optimizer, gt_lr_wd):
assert isinstance(optimizer, torch.optim.AdamW)
assert optimizer.defaults['lr'] == base_lr
assert optimizer.defaults['weight_decay'] == base_wd
param_groups = optimizer.param_groups
print(param_groups)
assert len(param_groups) == len(gt_lr_wd)
for i, param_dict in enumerate(param_groups):
assert param_dict['weight_decay'] == gt_lr_wd[i]['weight_decay']
assert param_dict['lr_scale'] == gt_lr_wd[i]['lr_scale']
assert param_dict['lr_scale'] == param_dict['lr']
def test_learning_rate_decay_optimizer_constructor():
# Test lr wd for ConvNeXT
backbone = ToyConvNeXt()
model = PseudoDataParallel(ToyDetector(backbone))
optim_wrapper_cfg = dict(
type='OptimWrapper',
optimizer=dict(
type='AdamW', lr=base_lr, betas=(0.9, 0.999), weight_decay=0.05))
# stagewise decay
stagewise_paramwise_cfg = dict(
decay_rate=decay_rate, decay_type='stage_wise', num_layers=6)
optim_constructor = LearningRateDecayOptimizerConstructor(
optim_wrapper_cfg, stagewise_paramwise_cfg)
optim_wrapper = optim_constructor(model)
check_optimizer_lr_wd(optim_wrapper.optimizer,
expected_stage_wise_lr_wd_convnext)
# layerwise decay
layerwise_paramwise_cfg = dict(
decay_rate=decay_rate, decay_type='layer_wise', num_layers=6)
optim_constructor = LearningRateDecayOptimizerConstructor(
optim_wrapper_cfg, layerwise_paramwise_cfg)
optim_wrapper = optim_constructor(model)
check_optimizer_lr_wd(optim_wrapper.optimizer,
expected_layer_wise_lr_wd_convnext)
| 4,532 | 25.822485 | 77 | py |
ERD | ERD-main/tests/test_structures/test_det_data_sample.py | from unittest import TestCase
import numpy as np
import pytest
import torch
from mmengine.structures import InstanceData, PixelData
from mmdet.structures import DetDataSample
def _equal(a, b):
if isinstance(a, (torch.Tensor, np.ndarray)):
return (a == b).all()
else:
return a == b
class TestDetDataSample(TestCase):
def test_init(self):
meta_info = dict(
img_size=[256, 256],
scale_factor=np.array([1.5, 1.5]),
img_shape=torch.rand(4))
det_data_sample = DetDataSample(metainfo=meta_info)
assert 'img_size' in det_data_sample
assert det_data_sample.img_size == [256, 256]
assert det_data_sample.get('img_size') == [256, 256]
def test_setter(self):
det_data_sample = DetDataSample()
# test gt_instances
gt_instances_data = dict(
bboxes=torch.rand(4, 4),
labels=torch.rand(4),
masks=np.random.rand(4, 2, 2))
gt_instances = InstanceData(**gt_instances_data)
det_data_sample.gt_instances = gt_instances
assert 'gt_instances' in det_data_sample
assert _equal(det_data_sample.gt_instances.bboxes,
gt_instances_data['bboxes'])
assert _equal(det_data_sample.gt_instances.labels,
gt_instances_data['labels'])
assert _equal(det_data_sample.gt_instances.masks,
gt_instances_data['masks'])
# test pred_instances
pred_instances_data = dict(
bboxes=torch.rand(2, 4),
labels=torch.rand(2),
masks=np.random.rand(2, 2, 2))
pred_instances = InstanceData(**pred_instances_data)
det_data_sample.pred_instances = pred_instances
assert 'pred_instances' in det_data_sample
assert _equal(det_data_sample.pred_instances.bboxes,
pred_instances_data['bboxes'])
assert _equal(det_data_sample.pred_instances.labels,
pred_instances_data['labels'])
assert _equal(det_data_sample.pred_instances.masks,
pred_instances_data['masks'])
# test proposals
proposals_data = dict(bboxes=torch.rand(4, 4), labels=torch.rand(4))
proposals = InstanceData(**proposals_data)
det_data_sample.proposals = proposals
assert 'proposals' in det_data_sample
assert _equal(det_data_sample.proposals.bboxes,
proposals_data['bboxes'])
assert _equal(det_data_sample.proposals.labels,
proposals_data['labels'])
# test ignored_instances
ignored_instances_data = dict(
bboxes=torch.rand(4, 4), labels=torch.rand(4))
ignored_instances = InstanceData(**ignored_instances_data)
det_data_sample.ignored_instances = ignored_instances
assert 'ignored_instances' in det_data_sample
assert _equal(det_data_sample.ignored_instances.bboxes,
ignored_instances_data['bboxes'])
assert _equal(det_data_sample.ignored_instances.labels,
ignored_instances_data['labels'])
# test gt_panoptic_seg
gt_panoptic_seg_data = dict(panoptic_seg=torch.rand(5, 4))
gt_panoptic_seg = PixelData(**gt_panoptic_seg_data)
det_data_sample.gt_panoptic_seg = gt_panoptic_seg
assert 'gt_panoptic_seg' in det_data_sample
assert _equal(det_data_sample.gt_panoptic_seg.panoptic_seg,
gt_panoptic_seg_data['panoptic_seg'])
# test pred_panoptic_seg
pred_panoptic_seg_data = dict(panoptic_seg=torch.rand(5, 4))
pred_panoptic_seg = PixelData(**pred_panoptic_seg_data)
det_data_sample.pred_panoptic_seg = pred_panoptic_seg
assert 'pred_panoptic_seg' in det_data_sample
assert _equal(det_data_sample.pred_panoptic_seg.panoptic_seg,
pred_panoptic_seg_data['panoptic_seg'])
# test gt_sem_seg
gt_segm_seg_data = dict(segm_seg=torch.rand(5, 4, 2))
gt_segm_seg = PixelData(**gt_segm_seg_data)
det_data_sample.gt_segm_seg = gt_segm_seg
assert 'gt_segm_seg' in det_data_sample
assert _equal(det_data_sample.gt_segm_seg.segm_seg,
gt_segm_seg_data['segm_seg'])
# test pred_segm_seg
pred_segm_seg_data = dict(segm_seg=torch.rand(5, 4, 2))
pred_segm_seg = PixelData(**pred_segm_seg_data)
det_data_sample.pred_segm_seg = pred_segm_seg
assert 'pred_segm_seg' in det_data_sample
assert _equal(det_data_sample.pred_segm_seg.segm_seg,
pred_segm_seg_data['segm_seg'])
# test type error
with pytest.raises(AssertionError):
det_data_sample.pred_instances = torch.rand(2, 4)
with pytest.raises(AssertionError):
det_data_sample.pred_panoptic_seg = torch.rand(2, 4)
with pytest.raises(AssertionError):
det_data_sample.pred_sem_seg = torch.rand(2, 4)
def test_deleter(self):
gt_instances_data = dict(
bboxes=torch.rand(4, 4),
labels=torch.rand(4),
masks=np.random.rand(4, 2, 2))
det_data_sample = DetDataSample()
gt_instances = InstanceData(data=gt_instances_data)
det_data_sample.gt_instances = gt_instances
assert 'gt_instances' in det_data_sample
del det_data_sample.gt_instances
assert 'gt_instances' not in det_data_sample
pred_panoptic_seg_data = torch.rand(5, 4)
pred_panoptic_seg = PixelData(data=pred_panoptic_seg_data)
det_data_sample.pred_panoptic_seg = pred_panoptic_seg
assert 'pred_panoptic_seg' in det_data_sample
del det_data_sample.pred_panoptic_seg
assert 'pred_panoptic_seg' not in det_data_sample
pred_segm_seg_data = dict(segm_seg=torch.rand(5, 4, 2))
pred_segm_seg = PixelData(**pred_segm_seg_data)
det_data_sample.pred_segm_seg = pred_segm_seg
assert 'pred_segm_seg' in det_data_sample
del det_data_sample.pred_segm_seg
assert 'pred_segm_seg' not in det_data_sample
| 6,181 | 39.671053 | 76 | py |
ERD | ERD-main/tests/test_structures/test_bbox/test_horizontal_boxes.py | import random
from math import sqrt
from unittest import TestCase
import cv2
import numpy as np
import torch
from mmengine.testing import assert_allclose
from mmdet.structures.bbox import HorizontalBoxes
from mmdet.structures.mask import BitmapMasks, PolygonMasks
class TestHorizontalBoxes(TestCase):
def test_init(self):
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
th_boxes_cxcywh = torch.Tensor([15, 15, 10, 10]).reshape(1, 1, 4)
boxes = HorizontalBoxes(th_boxes)
assert_allclose(boxes.tensor, th_boxes)
boxes = HorizontalBoxes(th_boxes, in_mode='xyxy')
assert_allclose(boxes.tensor, th_boxes)
boxes = HorizontalBoxes(th_boxes_cxcywh, in_mode='cxcywh')
assert_allclose(boxes.tensor, th_boxes)
with self.assertRaises(ValueError):
boxes = HorizontalBoxes(th_boxes, in_mode='invalid')
def test_cxcywh(self):
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
th_boxes_cxcywh = torch.Tensor([15, 15, 10, 10]).reshape(1, 1, 4)
boxes = HorizontalBoxes(th_boxes)
assert_allclose(
HorizontalBoxes.xyxy_to_cxcywh(th_boxes), th_boxes_cxcywh)
assert_allclose(th_boxes,
HorizontalBoxes.cxcywh_to_xyxy(th_boxes_cxcywh))
assert_allclose(boxes.cxcywh, th_boxes_cxcywh)
def test_propoerty(self):
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
boxes = HorizontalBoxes(th_boxes)
# Centers
centers = torch.Tensor([15, 15]).reshape(1, 1, 2)
assert_allclose(boxes.centers, centers)
# Areas
areas = torch.Tensor([100]).reshape(1, 1)
assert_allclose(boxes.areas, areas)
# widths
widths = torch.Tensor([10]).reshape(1, 1)
assert_allclose(boxes.widths, widths)
# heights
heights = torch.Tensor([10]).reshape(1, 1)
assert_allclose(boxes.heights, heights)
def test_flip(self):
img_shape = [50, 85]
# horizontal flip
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
flipped_boxes_th = torch.Tensor([65, 10, 75, 20]).reshape(1, 1, 4)
boxes = HorizontalBoxes(th_boxes)
boxes.flip_(img_shape, direction='horizontal')
assert_allclose(boxes.tensor, flipped_boxes_th)
# vertical flip
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
flipped_boxes_th = torch.Tensor([10, 30, 20, 40]).reshape(1, 1, 4)
boxes = HorizontalBoxes(th_boxes)
boxes.flip_(img_shape, direction='vertical')
assert_allclose(boxes.tensor, flipped_boxes_th)
# diagonal flip
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
flipped_boxes_th = torch.Tensor([65, 30, 75, 40]).reshape(1, 1, 4)
boxes = HorizontalBoxes(th_boxes)
boxes.flip_(img_shape, direction='diagonal')
assert_allclose(boxes.tensor, flipped_boxes_th)
def test_translate(self):
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
boxes = HorizontalBoxes(th_boxes)
boxes.translate_([23, 46])
translated_boxes_th = torch.Tensor([33, 56, 43, 66]).reshape(1, 1, 4)
assert_allclose(boxes.tensor, translated_boxes_th)
def test_clip(self):
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
img_shape = [13, 14]
boxes = HorizontalBoxes(th_boxes)
boxes.clip_(img_shape)
cliped_boxes_th = torch.Tensor([10, 10, 14, 13]).reshape(1, 1, 4)
assert_allclose(boxes.tensor, cliped_boxes_th)
def test_rotate(self):
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
center = (15, 15)
angle = -45
boxes = HorizontalBoxes(th_boxes)
boxes.rotate_(center, angle)
rotated_boxes_th = torch.Tensor([
15 - 5 * sqrt(2), 15 - 5 * sqrt(2), 15 + 5 * sqrt(2),
15 + 5 * sqrt(2)
]).reshape(1, 1, 4)
assert_allclose(boxes.tensor, rotated_boxes_th)
def test_project(self):
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
boxes1 = HorizontalBoxes(th_boxes)
boxes2 = boxes1.clone()
matrix = np.zeros((3, 3), dtype=np.float32)
center = [random.random() * 80, random.random() * 80]
angle = random.random() * 180
matrix[:2, :3] = cv2.getRotationMatrix2D(center, angle, 1)
x_translate = random.random() * 40
y_translate = random.random() * 40
matrix[0, 2] = matrix[0, 2] + x_translate
matrix[1, 2] = matrix[1, 2] + y_translate
scale_factor = random.random() * 2
matrix[2, 2] = 1 / scale_factor
boxes1.project_(matrix)
boxes2.rotate_(center, -angle)
boxes2.translate_([x_translate, y_translate])
boxes2.rescale_([scale_factor, scale_factor])
assert_allclose(boxes1.tensor, boxes2.tensor)
# test empty boxes
empty_boxes = HorizontalBoxes(torch.zeros((0, 4)))
empty_boxes.project_(matrix)
def test_rescale(self):
scale_factor = [0.4, 0.8]
# rescale
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
boxes = HorizontalBoxes(th_boxes)
boxes.rescale_(scale_factor)
rescaled_boxes_th = torch.Tensor([4, 8, 8, 16]).reshape(1, 1, 4)
assert_allclose(boxes.tensor, rescaled_boxes_th)
def test_resize(self):
scale_factor = [0.4, 0.8]
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 1, 4)
boxes = HorizontalBoxes(th_boxes)
boxes.resize_(scale_factor)
resized_boxes_th = torch.Tensor([13, 11, 17, 19]).reshape(1, 1, 4)
assert_allclose(boxes.tensor, resized_boxes_th)
def test_is_inside(self):
th_boxes = torch.Tensor([[10, 10, 20, 20], [-5, -5, 15, 15],
[45, 45, 55, 55]]).reshape(1, 3, 4)
img_shape = [30, 30]
boxes = HorizontalBoxes(th_boxes)
index = boxes.is_inside(img_shape)
index_th = torch.BoolTensor([True, True, False]).reshape(1, 3)
assert_allclose(index, index_th)
def test_find_inside_points(self):
th_boxes = torch.Tensor([10, 10, 20, 20]).reshape(1, 4)
boxes = HorizontalBoxes(th_boxes)
points = torch.Tensor([[0, 0], [0, 15], [15, 0], [15, 15]])
index = boxes.find_inside_points(points)
index_th = torch.BoolTensor([False, False, False, True]).reshape(4, 1)
assert_allclose(index, index_th)
# is_aligned
boxes = boxes.expand(4, 4)
index = boxes.find_inside_points(points, is_aligned=True)
index_th = torch.BoolTensor([False, False, False, True])
assert_allclose(index, index_th)
def test_from_instance_masks(self):
bitmap_masks = BitmapMasks.random()
boxes = HorizontalBoxes.from_instance_masks(bitmap_masks)
self.assertIsInstance(boxes, HorizontalBoxes)
self.assertEqual(len(boxes), len(bitmap_masks))
polygon_masks = PolygonMasks.random()
boxes = HorizontalBoxes.from_instance_masks(polygon_masks)
self.assertIsInstance(boxes, HorizontalBoxes)
self.assertEqual(len(boxes), len(bitmap_masks))
# zero length masks
bitmap_masks = BitmapMasks.random(num_masks=0)
boxes = HorizontalBoxes.from_instance_masks(bitmap_masks)
self.assertIsInstance(boxes, HorizontalBoxes)
self.assertEqual(len(boxes), 0)
polygon_masks = PolygonMasks.random(num_masks=0)
boxes = HorizontalBoxes.from_instance_masks(polygon_masks)
self.assertIsInstance(boxes, HorizontalBoxes)
self.assertEqual(len(boxes), 0)
| 7,719 | 40.06383 | 78 | py |
ERD | ERD-main/tests/test_structures/test_bbox/test_base_boxes.py | from unittest import TestCase
import numpy as np
import torch
from mmengine.testing import assert_allclose
from .utils import ToyBaseBoxes
class TestBaseBoxes(TestCase):
def test_init(self):
box_tensor = torch.rand((3, 4, 4))
boxes = ToyBaseBoxes(box_tensor)
boxes = ToyBaseBoxes(box_tensor, dtype=torch.float64)
self.assertEqual(boxes.tensor.dtype, torch.float64)
if torch.cuda.is_available():
boxes = ToyBaseBoxes(box_tensor, device='cuda')
self.assertTrue(boxes.tensor.is_cuda)
with self.assertRaises(AssertionError):
box_tensor = torch.rand((4, ))
boxes = ToyBaseBoxes(box_tensor)
with self.assertRaises(AssertionError):
box_tensor = torch.rand((3, 4, 3))
boxes = ToyBaseBoxes(box_tensor)
def test_getitem(self):
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
# test single dimension index
# int
new_boxes = boxes[0]
self.assertIsInstance(new_boxes, ToyBaseBoxes)
self.assertEqual(new_boxes.tensor.shape, (4, 4))
# list
new_boxes = boxes[[0, 2]]
self.assertIsInstance(new_boxes, ToyBaseBoxes)
self.assertEqual(new_boxes.tensor.shape, (2, 4, 4))
# slice
new_boxes = boxes[0:2]
self.assertIsInstance(new_boxes, ToyBaseBoxes)
self.assertEqual(new_boxes.tensor.shape, (2, 4, 4))
# torch.LongTensor
new_boxes = boxes[torch.LongTensor([0, 1])]
self.assertIsInstance(new_boxes, ToyBaseBoxes)
self.assertEqual(new_boxes.tensor.shape, (2, 4, 4))
# torch.BoolTensor
new_boxes = boxes[torch.BoolTensor([True, False, True])]
self.assertIsInstance(new_boxes, ToyBaseBoxes)
self.assertEqual(new_boxes.tensor.shape, (2, 4, 4))
with self.assertRaises(AssertionError):
index = torch.rand((2, 4, 4)) > 0
new_boxes = boxes[index]
# test multiple dimension index
# select single box
new_boxes = boxes[1, 2]
self.assertIsInstance(new_boxes, ToyBaseBoxes)
self.assertEqual(new_boxes.tensor.shape, (1, 4))
# select the last dimension
with self.assertRaises(AssertionError):
new_boxes = boxes[1, 2, 1]
# has Ellipsis
new_boxes = boxes[None, ...]
self.assertIsInstance(new_boxes, ToyBaseBoxes)
self.assertEqual(new_boxes.tensor.shape, (1, 3, 4, 4))
with self.assertRaises(AssertionError):
new_boxes = boxes[..., None]
def test_setitem(self):
values = ToyBaseBoxes(torch.rand(3, 4, 4))
tensor = torch.rand(3, 4, 4)
# only support BaseBoxes type
with self.assertRaises(AssertionError):
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
boxes[0:2] = tensor[0:2]
# test single dimension index
# int
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
boxes[1] = values[1]
assert_allclose(boxes.tensor[1], values.tensor[1])
# list
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
boxes[[1, 2]] = values[[1, 2]]
assert_allclose(boxes.tensor[[1, 2]], values.tensor[[1, 2]])
# slice
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
boxes[0:2] = values[0:2]
assert_allclose(boxes.tensor[0:2], values.tensor[0:2])
# torch.BoolTensor
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
index = torch.rand(3, 4) > 0.5
boxes[index] = values[index]
assert_allclose(boxes.tensor[index], values.tensor[index])
# multiple dimension index
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
boxes[0:2, 0:2] = values[0:2, 0:2]
assert_allclose(boxes.tensor[0:2, 0:2], values.tensor[0:2, 0:2])
# select single box
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
boxes[1, 1] = values[1, 1]
assert_allclose(boxes.tensor[1, 1], values.tensor[1, 1])
# select the last dimension
with self.assertRaises(AssertionError):
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
boxes[1, 1, 1] = values[1, 1, 1]
# has Ellipsis
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
boxes[0:2, ...] = values[0:2, ...]
assert_allclose(boxes.tensor[0:2, ...], values.tensor[0:2, ...])
def test_tensor_like_functions(self):
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
# new_tensor
boxes.new_tensor([1, 2, 3])
# new_full
boxes.new_full((3, 4), 0)
# new_empty
boxes.new_empty((3, 4))
# new_ones
boxes.new_ones((3, 4))
# new_zeros
boxes.new_zeros((3, 4))
# size
self.assertEqual(boxes.size(0), 3)
self.assertEqual(tuple(boxes.size()), (3, 4, 4))
# dim
self.assertEqual(boxes.dim(), 3)
# device
self.assertIsInstance(boxes.device, torch.device)
# dtype
self.assertIsInstance(boxes.dtype, torch.dtype)
# numpy
np_boxes = boxes.numpy()
self.assertIsInstance(np_boxes, np.ndarray)
self.assertTrue((np_boxes == np_boxes).all())
# to
new_boxes = boxes.to(torch.uint8)
self.assertEqual(new_boxes.tensor.dtype, torch.uint8)
if torch.cuda.is_available():
new_boxes = boxes.to(device='cuda')
self.assertTrue(new_boxes.tensor.is_cuda)
# cpu
if torch.cuda.is_available():
new_boxes = boxes.to(device='cuda')
new_boxes = new_boxes.cpu()
self.assertFalse(new_boxes.tensor.is_cuda)
# cuda
if torch.cuda.is_available():
new_boxes = boxes.cuda()
self.assertTrue(new_boxes.tensor.is_cuda)
# clone
boxes.clone()
# detach
boxes.detach()
# view
new_boxes = boxes.view(12, 4)
self.assertEqual(tuple(new_boxes.size()), (12, 4))
new_boxes = boxes.view(-1, 4)
self.assertEqual(tuple(new_boxes.size()), (12, 4))
with self.assertRaises(AssertionError):
new_boxes = boxes.view(-1)
# reshape
new_boxes = boxes.reshape(12, 4)
self.assertEqual(tuple(new_boxes.size()), (12, 4))
new_boxes = boxes.reshape(-1, 4)
self.assertEqual(tuple(new_boxes.size()), (12, 4))
with self.assertRaises(AssertionError):
new_boxes = boxes.reshape(-1)
# expand
new_boxes = boxes[None, ...].expand(4, -1, -1, -1)
self.assertEqual(tuple(new_boxes.size()), (4, 3, 4, 4))
# repeat
new_boxes = boxes.repeat(2, 2, 1)
self.assertEqual(tuple(new_boxes.size()), (6, 8, 4))
with self.assertRaises(AssertionError):
new_boxes = boxes.repeat(2, 2, 2)
# transpose
new_boxes = boxes.transpose(0, 1)
self.assertEqual(tuple(new_boxes.size()), (4, 3, 4))
with self.assertRaises(AssertionError):
new_boxes = boxes.transpose(1, 2)
# permute
new_boxes = boxes.permute(1, 0, 2)
self.assertEqual(tuple(new_boxes.size()), (4, 3, 4))
with self.assertRaises(AssertionError):
new_boxes = boxes.permute(2, 1, 0)
# split
boxes_list = boxes.split(1, dim=0)
for box in boxes_list:
self.assertIsInstance(box, ToyBaseBoxes)
self.assertEqual(tuple(box.size()), (1, 4, 4))
boxes_list = boxes.split([1, 2], dim=0)
with self.assertRaises(AssertionError):
boxes_list = boxes.split(1, dim=2)
# chunk
boxes_list = boxes.split(3, dim=1)
self.assertEqual(len(boxes_list), 2)
for box in boxes_list:
self.assertIsInstance(box, ToyBaseBoxes)
with self.assertRaises(AssertionError):
boxes_list = boxes.split(3, dim=2)
# unbind
boxes_list = boxes.unbind(dim=1)
self.assertEqual(len(boxes_list), 4)
for box in boxes_list:
self.assertIsInstance(box, ToyBaseBoxes)
self.assertEqual(tuple(box.size()), (3, 4))
with self.assertRaises(AssertionError):
boxes_list = boxes.unbind(dim=2)
# flatten
new_boxes = boxes.flatten()
self.assertEqual(tuple(new_boxes.size()), (12, 4))
with self.assertRaises(AssertionError):
new_boxes = boxes.flatten(end_dim=2)
# squeeze
boxes = ToyBaseBoxes(torch.rand(1, 3, 1, 4, 4))
new_boxes = boxes.squeeze()
self.assertEqual(tuple(new_boxes.size()), (3, 4, 4))
new_boxes = boxes.squeeze(dim=2)
self.assertEqual(tuple(new_boxes.size()), (1, 3, 4, 4))
# unsqueeze
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
new_boxes = boxes.unsqueeze(0)
self.assertEqual(tuple(new_boxes.size()), (1, 3, 4, 4))
with self.assertRaises(AssertionError):
new_boxes = boxes.unsqueeze(3)
# cat
with self.assertRaises(ValueError):
ToyBaseBoxes.cat([])
box_list = []
box_list.append(ToyBaseBoxes(torch.rand(3, 4, 4)))
box_list.append(ToyBaseBoxes(torch.rand(1, 4, 4)))
with self.assertRaises(AssertionError):
ToyBaseBoxes.cat(box_list, dim=2)
cat_boxes = ToyBaseBoxes.cat(box_list, dim=0)
self.assertIsInstance(cat_boxes, ToyBaseBoxes)
self.assertEqual((cat_boxes.size()), (4, 4, 4))
# stack
with self.assertRaises(ValueError):
ToyBaseBoxes.stack([])
box_list = []
box_list.append(ToyBaseBoxes(torch.rand(3, 4, 4)))
box_list.append(ToyBaseBoxes(torch.rand(3, 4, 4)))
with self.assertRaises(AssertionError):
ToyBaseBoxes.stack(box_list, dim=3)
stack_boxes = ToyBaseBoxes.stack(box_list, dim=1)
self.assertIsInstance(stack_boxes, ToyBaseBoxes)
self.assertEqual((stack_boxes.size()), (3, 2, 4, 4))
def test_misc(self):
boxes = ToyBaseBoxes(torch.rand(3, 4, 4))
# __len__
self.assertEqual(len(boxes), 3)
# __repr__
repr(boxes)
# fake_boxes
new_boxes = boxes.fake_boxes((3, 4, 4), 1)
self.assertEqual(tuple(new_boxes.size()), (3, 4, 4))
self.assertEqual(boxes.dtype, new_boxes.dtype)
self.assertEqual(boxes.device, new_boxes.device)
self.assertTrue((new_boxes.tensor == 1).all())
with self.assertRaises(AssertionError):
new_boxes = boxes.fake_boxes((3, 4, 1))
new_boxes = boxes.fake_boxes((3, 4, 4), dtype=torch.uint8)
self.assertEqual(new_boxes.dtype, torch.uint8)
if torch.cuda.is_available():
new_boxes = boxes.fake_boxes((3, 4, 4), device='cuda')
self.assertTrue(new_boxes.tensor.is_cuda)
| 10,821 | 38.068592 | 72 | py |
ERD | ERD-main/tests/test_structures/test_bbox/test_box_type.py | from unittest import TestCase
from unittest.mock import MagicMock
import torch
from mmdet.structures.bbox.box_type import (_box_type_to_name, box_converters,
box_types, convert_box_type,
get_box_type, register_box,
register_box_converter)
from .utils import ToyBaseBoxes
class TestBoxType(TestCase):
def setUp(self):
self.box_types = box_types.copy()
self.box_converters = box_converters.copy()
self._box_type_to_name = _box_type_to_name.copy()
def tearDown(self):
# Clear registered items
box_types.clear()
box_converters.clear()
_box_type_to_name.clear()
# Restore original items
box_types.update(self.box_types)
box_converters.update(self.box_converters)
_box_type_to_name.update(self._box_type_to_name)
def test_register_box(self):
# test usage of decorator
@register_box('A')
class A(ToyBaseBoxes):
pass
# test usage of normal function
class B(ToyBaseBoxes):
pass
register_box('B', B)
# register class without inheriting from BaseBoxes
with self.assertRaises(AssertionError):
@register_box('C')
class C:
pass
# test register registered class
with self.assertRaises(KeyError):
@register_box('A')
class AA(ToyBaseBoxes):
pass
with self.assertRaises(KeyError):
register_box('BB', B)
@register_box('A', force=True)
class AAA(ToyBaseBoxes):
pass
self.assertIs(box_types['a'], AAA)
self.assertEqual(_box_type_to_name[AAA], 'a')
register_box('BB', B, force=True)
self.assertIs(box_types['bb'], B)
self.assertEqual(_box_type_to_name[B], 'bb')
self.assertEqual(len(box_types), len(_box_type_to_name))
def test_register_box_converter(self):
@register_box('A')
class A(ToyBaseBoxes):
pass
@register_box('B')
class B(ToyBaseBoxes):
pass
@register_box('C')
class C(ToyBaseBoxes):
pass
# test usage of decorator
@register_box_converter('A', 'B')
def converter_A(bboxes):
return bboxes
# test usage of normal function
def converter_B(bboxes):
return bboxes
register_box_converter('B'
'A', converter_B)
# register uncallable object
with self.assertRaises(AssertionError):
register_box_converter('A', 'C', 'uncallable str')
# test register unregistered bbox mode
with self.assertRaises(AssertionError):
@register_box_converter('A', 'D')
def converter_C(bboxes):
return bboxes
# test register registered converter
with self.assertRaises(KeyError):
@register_box_converter('A', 'B')
def converter_D(bboxes):
return bboxes
@register_box_converter('A', 'B', force=True)
def converter_E(bboxes):
return bboxes
self.assertIs(box_converters['a2b'], converter_E)
def test_get_box_type(self):
@register_box('A')
class A(ToyBaseBoxes):
pass
mode_name, mode_cls = get_box_type('A')
self.assertEqual(mode_name, 'a')
self.assertIs(mode_cls, A)
mode_name, mode_cls = get_box_type(A)
self.assertEqual(mode_name, 'a')
self.assertIs(mode_cls, A)
# get unregistered mode
class B(ToyBaseBoxes):
pass
with self.assertRaises(AssertionError):
mode_name, mode_cls = get_box_type('B')
with self.assertRaises(AssertionError):
mode_name, mode_cls = get_box_type(B)
def test_convert_box_type(self):
@register_box('A')
class A(ToyBaseBoxes):
pass
@register_box('B')
class B(ToyBaseBoxes):
pass
@register_box('C')
class C(ToyBaseBoxes):
pass
converter = MagicMock()
converter.return_value = torch.rand(3, 4, 4)
register_box_converter('A', 'B', converter)
bboxes_a = A(torch.rand(3, 4, 4))
th_bboxes_a = bboxes_a.tensor
np_bboxes_a = th_bboxes_a.numpy()
# test convert to mode
convert_box_type(bboxes_a, dst_type='B')
self.assertTrue(converter.called)
converted_bboxes = convert_box_type(bboxes_a, dst_type='A')
self.assertIs(converted_bboxes, bboxes_a)
# test convert to unregistered mode
with self.assertRaises(AssertionError):
convert_box_type(bboxes_a, dst_type='C')
# test convert tensor and ndarray
# without specific src_type
with self.assertRaises(AssertionError):
convert_box_type(th_bboxes_a, dst_type='B')
with self.assertRaises(AssertionError):
convert_box_type(np_bboxes_a, dst_type='B')
# test np.ndarray
convert_box_type(np_bboxes_a, src_type='A', dst_type='B')
converted_bboxes = convert_box_type(
np_bboxes_a, src_type='A', dst_type='A')
self.assertIs(converted_bboxes, np_bboxes_a)
# test tensor
convert_box_type(th_bboxes_a, src_type='A', dst_type='B')
converted_bboxes = convert_box_type(
th_bboxes_a, src_type='A', dst_type='A')
self.assertIs(converted_bboxes, th_bboxes_a)
# test other type
with self.assertRaises(TypeError):
convert_box_type([[1, 2, 3, 4]], src_type='A', dst_type='B')
| 5,813 | 29.28125 | 78 | py |
ERD | ERD-main/tests/test_evaluation/test_metrics/test_coco_panoptic_metric.py | import os
import os.path as osp
import tempfile
import unittest
from copy import deepcopy
import mmcv
import numpy as np
import torch
from mmengine.fileio import dump
from mmdet.evaluation import INSTANCE_OFFSET, CocoPanopticMetric
try:
import panopticapi
except ImportError:
panopticapi = None
class TestCocoPanopticMetric(unittest.TestCase):
def _create_panoptic_gt_annotations(self, ann_file, seg_map_dir):
categories = [{
'id': 0,
'name': 'person',
'supercategory': 'person',
'isthing': 1
}, {
'id': 1,
'name': 'cat',
'supercategory': 'cat',
'isthing': 1
}, {
'id': 2,
'name': 'dog',
'supercategory': 'dog',
'isthing': 1
}, {
'id': 3,
'name': 'wall',
'supercategory': 'wall',
'isthing': 0
}]
images = [{
'id': 0,
'width': 80,
'height': 60,
'file_name': 'fake_name1.jpg',
}]
annotations = [{
'segments_info': [{
'id': 1,
'category_id': 0,
'area': 400,
'bbox': [10, 10, 10, 40],
'iscrowd': 0
}, {
'id': 2,
'category_id': 0,
'area': 400,
'bbox': [30, 10, 10, 40],
'iscrowd': 0
}, {
'id': 3,
'category_id': 2,
'iscrowd': 0,
'bbox': [50, 10, 10, 5],
'area': 50
}, {
'id': 4,
'category_id': 3,
'iscrowd': 0,
'bbox': [0, 0, 80, 60],
'area': 3950
}],
'file_name':
'fake_name1.png',
'image_id':
0
}]
gt_json = {
'images': images,
'annotations': annotations,
'categories': categories
}
# 4 is the id of the background class annotation.
gt = np.zeros((60, 80), dtype=np.int64) + 4
gt_bboxes = np.array(
[[10, 10, 10, 40], [30, 10, 10, 40], [50, 10, 10, 5]],
dtype=np.int64)
for i in range(3):
x, y, w, h = gt_bboxes[i]
gt[y:y + h, x:x + w] = i + 1 # id starts from 1
rgb_gt_seg_map = np.zeros(gt.shape + (3, ), dtype=np.uint8)
rgb_gt_seg_map[:, :, 2] = gt // (256 * 256)
rgb_gt_seg_map[:, :, 1] = gt % (256 * 256) // 256
rgb_gt_seg_map[:, :, 0] = gt % 256
img_path = osp.join(seg_map_dir, 'fake_name1.png')
mmcv.imwrite(rgb_gt_seg_map[:, :, ::-1], img_path)
dump(gt_json, ann_file)
return gt_json
def _create_panoptic_data_samples(self):
# predictions
# TP for background class, IoU=3576/4324=0.827
# 2 the category id of the background class
pred = np.zeros((60, 80), dtype=np.int64) + 2
pred_bboxes = np.array(
[
[11, 11, 10, 40], # TP IoU=351/449=0.78
[38, 10, 10, 40], # FP
[51, 10, 10, 5] # TP IoU=45/55=0.818
],
dtype=np.int64)
pred_labels = np.array([0, 0, 1], dtype=np.int64)
for i in range(3):
x, y, w, h = pred_bboxes[i]
pred[y:y + h, x:x + w] = (i + 1) * INSTANCE_OFFSET + pred_labels[i]
data_samples = [{
'img_id':
0,
'ori_shape': (60, 80),
'img_path':
'xxx/fake_name1.jpg',
'segments_info': [{
'id': 1,
'category': 0,
'is_thing': 1
}, {
'id': 2,
'category': 0,
'is_thing': 1
}, {
'id': 3,
'category': 1,
'is_thing': 1
}, {
'id': 4,
'category': 2,
'is_thing': 0
}],
'seg_map_path':
osp.join(self.gt_seg_dir, 'fake_name1.png'),
'pred_panoptic_seg': {
'sem_seg': torch.from_numpy(pred).unsqueeze(0)
},
}]
return data_samples
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
self.gt_json_path = osp.join(self.tmp_dir.name, 'gt.json')
self.gt_seg_dir = osp.join(self.tmp_dir.name, 'gt_seg')
os.mkdir(self.gt_seg_dir)
self._create_panoptic_gt_annotations(self.gt_json_path,
self.gt_seg_dir)
self.dataset_meta = {
'classes': ('person', 'dog', 'wall'),
'thing_classes': ('person', 'dog'),
'stuff_classes': ('wall', )
}
self.target = {
'coco_panoptic/PQ': 67.86874803219071,
'coco_panoptic/SQ': 80.89770126158936,
'coco_panoptic/RQ': 83.33333333333334,
'coco_panoptic/PQ_th': 60.45252075318891,
'coco_panoptic/SQ_th': 79.9959505972869,
'coco_panoptic/RQ_th': 75.0,
'coco_panoptic/PQ_st': 82.70120259019427,
'coco_panoptic/SQ_st': 82.70120259019427,
'coco_panoptic/RQ_st': 100.0
}
self.data_samples = self._create_panoptic_data_samples()
def tearDown(self):
self.tmp_dir.cleanup()
@unittest.skipIf(panopticapi is not None, 'panopticapi is installed')
def test_init(self):
with self.assertRaises(RuntimeError):
CocoPanopticMetric()
@unittest.skipIf(panopticapi is None, 'panopticapi is not installed')
def test_evaluate_without_json(self):
# with tmpfile, without json
metric = CocoPanopticMetric(
ann_file=None,
seg_prefix=self.gt_seg_dir,
classwise=False,
nproc=1,
outfile_prefix=None)
metric.dataset_meta = self.dataset_meta
metric.process({}, deepcopy(self.data_samples))
eval_results = metric.evaluate(size=1)
self.assertDictEqual(eval_results, self.target)
# without tmpfile and json
outfile_prefix = f'{self.tmp_dir.name}/test'
metric = CocoPanopticMetric(
ann_file=None,
seg_prefix=self.gt_seg_dir,
classwise=False,
nproc=1,
outfile_prefix=outfile_prefix)
metric.dataset_meta = self.dataset_meta
metric.process({}, deepcopy(self.data_samples))
eval_results = metric.evaluate(size=1)
self.assertDictEqual(eval_results, self.target)
@unittest.skipIf(panopticapi is None, 'panopticapi is not installed')
def test_evaluate_with_json(self):
# with tmpfile and json
metric = CocoPanopticMetric(
ann_file=self.gt_json_path,
seg_prefix=self.gt_seg_dir,
classwise=False,
nproc=1,
outfile_prefix=None)
metric.dataset_meta = self.dataset_meta
metric.process({}, deepcopy(self.data_samples))
eval_results = metric.evaluate(size=1)
self.assertDictEqual(eval_results, self.target)
# classwise
metric = CocoPanopticMetric(
ann_file=self.gt_json_path,
seg_prefix=self.gt_seg_dir,
classwise=True,
nproc=1,
outfile_prefix=None)
metric.dataset_meta = self.dataset_meta
metric.process({}, deepcopy(self.data_samples))
eval_results = metric.evaluate(size=1)
self.assertDictEqual(eval_results, self.target)
# without tmpfile, with json
outfile_prefix = f'{self.tmp_dir.name}/test1'
metric = CocoPanopticMetric(
ann_file=self.gt_json_path,
seg_prefix=self.gt_seg_dir,
classwise=False,
nproc=1,
outfile_prefix=outfile_prefix)
metric.dataset_meta = self.dataset_meta
metric.process({}, deepcopy(self.data_samples))
eval_results = metric.evaluate(size=1)
self.assertDictEqual(eval_results, self.target)
@unittest.skipIf(panopticapi is None, 'panopticapi is not installed')
def test_format_only(self):
with self.assertRaises(AssertionError):
metric = CocoPanopticMetric(
ann_file=self.gt_json_path,
seg_prefix=self.gt_seg_dir,
classwise=False,
nproc=1,
format_only=True,
outfile_prefix=None)
outfile_prefix = f'{self.tmp_dir.name}/test'
metric = CocoPanopticMetric(
ann_file=self.gt_json_path,
seg_prefix=self.gt_seg_dir,
classwise=False,
nproc=1,
format_only=True,
outfile_prefix=outfile_prefix)
metric.dataset_meta = self.dataset_meta
metric.process({}, deepcopy(self.data_samples))
eval_results = metric.evaluate(size=1)
self.assertDictEqual(eval_results, dict())
self.assertTrue(osp.exists(f'{self.tmp_dir.name}/test.panoptic'))
self.assertTrue(osp.exists(f'{self.tmp_dir.name}/test.panoptic.json'))
| 9,287 | 31.475524 | 79 | py |
ERD | ERD-main/tests/test_evaluation/test_metrics/test_crowdhuman_metric.py | import os.path as osp
import tempfile
from unittest import TestCase
import numpy as np
import torch
from mmdet.evaluation import CrowdHumanMetric
class TestCrowdHumanMetric(TestCase):
def _create_dummy_results(self):
bboxes = np.array([[1330, 317, 418, 1338], [792, 24, 723, 2017],
[693, 291, 307, 894], [522, 290, 285, 826],
[728, 336, 175, 602], [92, 337, 267, 681]])
bboxes[:, 2:4] += bboxes[:, 0:2]
scores = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
return dict(
bboxes=torch.from_numpy(bboxes), scores=torch.from_numpy(scores))
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
self.ann_file_path = \
'tests/data/crowdhuman_dataset/test_annotation_train.odgt',
def tearDown(self):
self.tmp_dir.cleanup()
def test_init(self):
with self.assertRaisesRegex(KeyError, 'metric should be one of'):
CrowdHumanMetric(ann_file=self.ann_file_path[0], metric='unknown')
def test_evaluate(self):
# create dummy data
dummy_pred = self._create_dummy_results()
crowdhuman_metric = CrowdHumanMetric(
ann_file=self.ann_file_path[0],
outfile_prefix=f'{self.tmp_dir.name}/test')
crowdhuman_metric.process({}, [
dict(
pred_instances=dummy_pred,
img_id='283554,35288000868e92d4',
ori_shape=(1640, 1640))
])
eval_results = crowdhuman_metric.evaluate(size=1)
target = {
'crowd_human/mAP': 0.8333,
'crowd_human/mMR': 0.0,
'crowd_human/JI': 1.0
}
self.assertDictEqual(eval_results, target)
self.assertTrue(osp.isfile(osp.join(self.tmp_dir.name, 'test.json')))
| 1,834 | 32.363636 | 78 | py |
ERD | ERD-main/tests/test_evaluation/test_metrics/test_coco_metric.py | import os.path as osp
import tempfile
from unittest import TestCase
import numpy as np
import pycocotools.mask as mask_util
import torch
from mmengine.fileio import dump
from mmdet.evaluation import CocoMetric
class TestCocoMetric(TestCase):
def _create_dummy_coco_json(self, json_name):
dummy_mask = np.zeros((10, 10), order='F', dtype=np.uint8)
dummy_mask[:5, :5] = 1
rle_mask = mask_util.encode(dummy_mask)
rle_mask['counts'] = rle_mask['counts'].decode('utf-8')
image = {
'id': 0,
'width': 640,
'height': 640,
'file_name': 'fake_name.jpg',
}
annotation_1 = {
'id': 1,
'image_id': 0,
'category_id': 0,
'area': 400,
'bbox': [50, 60, 20, 20],
'iscrowd': 0,
'segmentation': rle_mask,
}
annotation_2 = {
'id': 2,
'image_id': 0,
'category_id': 0,
'area': 900,
'bbox': [100, 120, 30, 30],
'iscrowd': 0,
'segmentation': rle_mask,
}
annotation_3 = {
'id': 3,
'image_id': 0,
'category_id': 1,
'area': 1600,
'bbox': [150, 160, 40, 40],
'iscrowd': 0,
'segmentation': rle_mask,
}
annotation_4 = {
'id': 4,
'image_id': 0,
'category_id': 0,
'area': 10000,
'bbox': [250, 260, 100, 100],
'iscrowd': 0,
'segmentation': rle_mask,
}
categories = [
{
'id': 0,
'name': 'car',
'supercategory': 'car',
},
{
'id': 1,
'name': 'bicycle',
'supercategory': 'bicycle',
},
]
fake_json = {
'images': [image],
'annotations':
[annotation_1, annotation_2, annotation_3, annotation_4],
'categories': categories
}
dump(fake_json, json_name)
def _create_dummy_results(self):
bboxes = np.array([[50, 60, 70, 80], [100, 120, 130, 150],
[150, 160, 190, 200], [250, 260, 350, 360]])
scores = np.array([1.0, 0.98, 0.96, 0.95])
labels = np.array([0, 0, 1, 0])
dummy_mask = np.zeros((4, 10, 10), dtype=np.uint8)
dummy_mask[:, :5, :5] = 1
return dict(
bboxes=torch.from_numpy(bboxes),
scores=torch.from_numpy(scores),
labels=torch.from_numpy(labels),
masks=torch.from_numpy(dummy_mask))
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tmp_dir.cleanup()
def test_init(self):
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_coco_json(fake_json_file)
with self.assertRaisesRegex(KeyError, 'metric should be one of'):
CocoMetric(ann_file=fake_json_file, metric='unknown')
def test_evaluate(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_coco_json(fake_json_file)
dummy_pred = self._create_dummy_results()
# test single coco dataset evaluation
coco_metric = CocoMetric(
ann_file=fake_json_file,
classwise=False,
outfile_prefix=f'{self.tmp_dir.name}/test')
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = coco_metric.evaluate(size=1)
target = {
'coco/bbox_mAP': 1.0,
'coco/bbox_mAP_50': 1.0,
'coco/bbox_mAP_75': 1.0,
'coco/bbox_mAP_s': 1.0,
'coco/bbox_mAP_m': 1.0,
'coco/bbox_mAP_l': 1.0,
}
self.assertDictEqual(eval_results, target)
self.assertTrue(
osp.isfile(osp.join(self.tmp_dir.name, 'test.bbox.json')))
# test box and segm coco dataset evaluation
coco_metric = CocoMetric(
ann_file=fake_json_file,
metric=['bbox', 'segm'],
classwise=False,
outfile_prefix=f'{self.tmp_dir.name}/test')
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = coco_metric.evaluate(size=1)
target = {
'coco/bbox_mAP': 1.0,
'coco/bbox_mAP_50': 1.0,
'coco/bbox_mAP_75': 1.0,
'coco/bbox_mAP_s': 1.0,
'coco/bbox_mAP_m': 1.0,
'coco/bbox_mAP_l': 1.0,
'coco/segm_mAP': 1.0,
'coco/segm_mAP_50': 1.0,
'coco/segm_mAP_75': 1.0,
'coco/segm_mAP_s': 1.0,
'coco/segm_mAP_m': 1.0,
'coco/segm_mAP_l': 1.0,
}
self.assertDictEqual(eval_results, target)
self.assertTrue(
osp.isfile(osp.join(self.tmp_dir.name, 'test.bbox.json')))
self.assertTrue(
osp.isfile(osp.join(self.tmp_dir.name, 'test.segm.json')))
# test invalid custom metric_items
with self.assertRaisesRegex(KeyError,
'metric item "invalid" is not supported'):
coco_metric = CocoMetric(
ann_file=fake_json_file, metric_items=['invalid'])
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process({}, [
dict(
pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))
])
coco_metric.evaluate(size=1)
# test custom metric_items
coco_metric = CocoMetric(
ann_file=fake_json_file, metric_items=['mAP_m'])
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = coco_metric.evaluate(size=1)
target = {
'coco/bbox_mAP_m': 1.0,
}
self.assertDictEqual(eval_results, target)
def test_classwise_evaluate(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_coco_json(fake_json_file)
dummy_pred = self._create_dummy_results()
# test single coco dataset evaluation
coco_metric = CocoMetric(
ann_file=fake_json_file, metric='bbox', classwise=True)
# coco_metric1 = CocoMetric(
# ann_file=fake_json_file, metric='bbox', classwise=True)
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = coco_metric.evaluate(size=1)
target = {
'coco/bbox_mAP': 1.0,
'coco/bbox_mAP_50': 1.0,
'coco/bbox_mAP_75': 1.0,
'coco/bbox_mAP_s': 1.0,
'coco/bbox_mAP_m': 1.0,
'coco/bbox_mAP_l': 1.0,
'coco/car_precision': 1.0,
'coco/bicycle_precision': 1.0,
}
self.assertDictEqual(eval_results, target)
def test_manually_set_iou_thrs(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_coco_json(fake_json_file)
# test single coco dataset evaluation
coco_metric = CocoMetric(
ann_file=fake_json_file, metric='bbox', iou_thrs=[0.3, 0.6])
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
self.assertEqual(coco_metric.iou_thrs, [0.3, 0.6])
def test_fast_eval_recall(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_coco_json(fake_json_file)
dummy_pred = self._create_dummy_results()
# test default proposal nums
coco_metric = CocoMetric(
ann_file=fake_json_file, metric='proposal_fast')
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = coco_metric.evaluate(size=1)
target = {'coco/AR@100': 1.0, 'coco/AR@300': 1.0, 'coco/AR@1000': 1.0}
self.assertDictEqual(eval_results, target)
# test manually set proposal nums
coco_metric = CocoMetric(
ann_file=fake_json_file,
metric='proposal_fast',
proposal_nums=(2, 4))
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = coco_metric.evaluate(size=1)
target = {'coco/AR@2': 0.5, 'coco/AR@4': 1.0}
self.assertDictEqual(eval_results, target)
def test_evaluate_proposal(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_coco_json(fake_json_file)
dummy_pred = self._create_dummy_results()
coco_metric = CocoMetric(ann_file=fake_json_file, metric='proposal')
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = coco_metric.evaluate(size=1)
print(eval_results)
target = {
'coco/AR@100': 1,
'coco/AR@300': 1.0,
'coco/AR@1000': 1.0,
'coco/AR_s@1000': 1.0,
'coco/AR_m@1000': 1.0,
'coco/AR_l@1000': 1.0
}
self.assertDictEqual(eval_results, target)
def test_empty_results(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_coco_json(fake_json_file)
coco_metric = CocoMetric(ann_file=fake_json_file, metric='bbox')
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
bboxes = np.zeros((0, 4))
labels = np.array([])
scores = np.array([])
dummy_mask = np.zeros((0, 10, 10), dtype=np.uint8)
empty_pred = dict(
bboxes=torch.from_numpy(bboxes),
scores=torch.from_numpy(scores),
labels=torch.from_numpy(labels),
masks=torch.from_numpy(dummy_mask))
coco_metric.process(
{},
[dict(pred_instances=empty_pred, img_id=0, ori_shape=(640, 640))])
# coco api Index error will be caught
coco_metric.evaluate(size=1)
def test_evaluate_without_json(self):
dummy_pred = self._create_dummy_results()
dummy_mask = np.zeros((10, 10), order='F', dtype=np.uint8)
dummy_mask[:5, :5] = 1
rle_mask = mask_util.encode(dummy_mask)
rle_mask['counts'] = rle_mask['counts'].decode('utf-8')
instances = [{
'bbox_label': 0,
'bbox': [50, 60, 70, 80],
'ignore_flag': 0,
'mask': rle_mask,
}, {
'bbox_label': 0,
'bbox': [100, 120, 130, 150],
'ignore_flag': 0,
'mask': rle_mask,
}, {
'bbox_label': 1,
'bbox': [150, 160, 190, 200],
'ignore_flag': 0,
'mask': rle_mask,
}, {
'bbox_label': 0,
'bbox': [250, 260, 350, 360],
'ignore_flag': 0,
'mask': rle_mask,
}]
coco_metric = CocoMetric(
ann_file=None,
metric=['bbox', 'segm'],
classwise=False,
outfile_prefix=f'{self.tmp_dir.name}/test')
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process({}, [
dict(
pred_instances=dummy_pred,
img_id=0,
ori_shape=(640, 640),
instances=instances)
])
eval_results = coco_metric.evaluate(size=1)
print(eval_results)
target = {
'coco/bbox_mAP': 1.0,
'coco/bbox_mAP_50': 1.0,
'coco/bbox_mAP_75': 1.0,
'coco/bbox_mAP_s': 1.0,
'coco/bbox_mAP_m': 1.0,
'coco/bbox_mAP_l': 1.0,
'coco/segm_mAP': 1.0,
'coco/segm_mAP_50': 1.0,
'coco/segm_mAP_75': 1.0,
'coco/segm_mAP_s': 1.0,
'coco/segm_mAP_m': 1.0,
'coco/segm_mAP_l': 1.0,
}
self.assertDictEqual(eval_results, target)
self.assertTrue(
osp.isfile(osp.join(self.tmp_dir.name, 'test.bbox.json')))
self.assertTrue(
osp.isfile(osp.join(self.tmp_dir.name, 'test.segm.json')))
self.assertTrue(
osp.isfile(osp.join(self.tmp_dir.name, 'test.gt.json')))
def test_format_only(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_coco_json(fake_json_file)
dummy_pred = self._create_dummy_results()
with self.assertRaises(AssertionError):
CocoMetric(
ann_file=fake_json_file,
classwise=False,
format_only=True,
outfile_prefix=None)
coco_metric = CocoMetric(
ann_file=fake_json_file,
metric='bbox',
classwise=False,
format_only=True,
outfile_prefix=f'{self.tmp_dir.name}/test')
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = coco_metric.evaluate(size=1)
self.assertDictEqual(eval_results, dict())
self.assertTrue(osp.exists(f'{self.tmp_dir.name}/test.bbox.json'))
| 14,349 | 34.696517 | 78 | py |
ERD | ERD-main/tests/test_evaluation/test_metrics/test_cityscapes_metric.py | import os
import os.path as osp
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from mmdet.evaluation import CityScapesMetric
try:
import cityscapesscripts
except ImportError:
cityscapesscripts = None
class TestCityScapesMetric(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tmp_dir.cleanup()
@unittest.skipIf(cityscapesscripts is None,
'cityscapesscripts is not installed.')
def test_init(self):
# test with outfile_prefix = None
with self.assertRaises(AssertionError):
CityScapesMetric(outfile_prefix=None)
@unittest.skipIf(cityscapesscripts is None,
'cityscapesscripts is not installed.')
def test_evaluate(self):
dummy_mask1 = np.zeros((1, 20, 20), dtype=np.uint8)
dummy_mask1[:, :10, :10] = 1
dummy_mask2 = np.zeros((1, 20, 20), dtype=np.uint8)
dummy_mask2[:, :10, :10] = 1
self.outfile_prefix = osp.join(self.tmp_dir.name, 'test')
self.seg_prefix = osp.join(self.tmp_dir.name, 'cityscapes/gtFine/val')
city = 'lindau'
sequenceNb = '000000'
frameNb = '000019'
img_name1 = f'{city}_{sequenceNb}_{frameNb}_gtFine_instanceIds.png'
img_path1 = osp.join(self.seg_prefix, city, img_name1)
frameNb = '000020'
img_name2 = f'{city}_{sequenceNb}_{frameNb}_gtFine_instanceIds.png'
img_path2 = osp.join(self.seg_prefix, city, img_name2)
os.makedirs(osp.join(self.seg_prefix, city))
masks1 = np.zeros((20, 20), dtype=np.int32)
masks1[:10, :10] = 24 * 1000
Image.fromarray(masks1).save(img_path1)
masks2 = np.zeros((20, 20), dtype=np.int32)
masks2[:10, :10] = 24 * 1000 + 1
Image.fromarray(masks2).save(img_path2)
data_samples = [{
'img_path': img_path1,
'pred_instances': {
'scores': torch.from_numpy(np.array([1.0])),
'labels': torch.from_numpy(np.array([0])),
'masks': torch.from_numpy(dummy_mask1)
}
}, {
'img_path': img_path2,
'pred_instances': {
'scores': torch.from_numpy(np.array([0.98])),
'labels': torch.from_numpy(np.array([1])),
'masks': torch.from_numpy(dummy_mask2)
}
}]
target = {'cityscapes/mAP': 0.5, 'cityscapes/AP@50': 0.5}
metric = CityScapesMetric(
seg_prefix=self.seg_prefix,
format_only=False,
outfile_prefix=self.outfile_prefix)
metric.dataset_meta = dict(
classes=('person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle'))
metric.process({}, data_samples)
results = metric.evaluate(size=2)
self.assertDictEqual(results, target)
del metric
self.assertTrue(not osp.exists('{self.outfile_prefix}.results'))
# test format_only
metric = CityScapesMetric(
seg_prefix=self.seg_prefix,
format_only=True,
outfile_prefix=self.outfile_prefix)
metric.dataset_meta = dict(
classes=('person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle'))
metric.process({}, data_samples)
results = metric.evaluate(size=2)
self.assertDictEqual(results, dict())
| 3,528 | 33.262136 | 78 | py |
ERD | ERD-main/tests/test_evaluation/test_metrics/test_dump_det_results.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
from unittest import TestCase
import torch
from mmengine.fileio import load
from torch import Tensor
from mmdet.evaluation import DumpDetResults
from mmdet.structures.mask import encode_mask_results
class TestDumpResults(TestCase):
def test_init(self):
with self.assertRaisesRegex(ValueError,
'The output file must be a pkl file.'):
DumpDetResults(out_file_path='./results.json')
def test_process(self):
metric = DumpDetResults(out_file_path='./results.pkl')
data_samples = [dict(data=(Tensor([1, 2, 3]), Tensor([4, 5, 6])))]
metric.process(None, data_samples)
self.assertEqual(len(metric.results), 1)
self.assertEqual(metric.results[0]['data'][0].device,
torch.device('cpu'))
metric = DumpDetResults(out_file_path='./results.pkl')
masks = torch.zeros(10, 10, 4)
data_samples = [
dict(pred_instances=dict(masks=masks), gt_instances=[])
]
metric.process(None, data_samples)
self.assertEqual(len(metric.results), 1)
self.assertEqual(metric.results[0]['pred_instances']['masks'],
encode_mask_results(masks.numpy()))
self.assertNotIn('gt_instances', metric.results[0])
def test_compute_metrics(self):
temp_dir = tempfile.TemporaryDirectory()
path = osp.join(temp_dir.name, 'results.pkl')
metric = DumpDetResults(out_file_path=path)
data_samples = [dict(data=(Tensor([1, 2, 3]), Tensor([4, 5, 6])))]
metric.process(None, data_samples)
metric.compute_metrics(metric.results)
self.assertTrue(osp.isfile(path))
results = load(path)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['data'][0].device, torch.device('cpu'))
temp_dir.cleanup()
| 1,963 | 35.37037 | 75 | py |
ERD | ERD-main/tests/test_evaluation/test_metrics/test_openimages_metric.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
import numpy as np
import torch
from mmdet.datasets import OpenImagesDataset
from mmdet.evaluation import OpenImagesMetric
from mmdet.utils import register_all_modules
class TestOpenImagesMetric(unittest.TestCase):
def _create_dummy_results(self):
bboxes = np.array([[23.2172, 31.7541, 987.3413, 357.8443],
[100, 120, 130, 150], [150, 160, 190, 200],
[250, 260, 350, 360]])
scores = np.array([1.0, 0.98, 0.96, 0.95])
labels = np.array([0, 0, 0, 0])
return dict(
bboxes=torch.from_numpy(bboxes),
scores=torch.from_numpy(scores),
labels=torch.from_numpy(labels))
def test_init(self):
# test invalid iou_thrs
with self.assertRaises(AssertionError):
OpenImagesMetric(iou_thrs={'a', 0.5}, ioa_thrs={'b', 0.5})
# test ioa and iou_thrs length not equal
with self.assertRaises(AssertionError):
OpenImagesMetric(iou_thrs=[0.5, 0.75], ioa_thrs=[0.5])
metric = OpenImagesMetric(iou_thrs=0.6)
self.assertEqual(metric.iou_thrs, [0.6])
def test_eval(self):
register_all_modules()
dataset = OpenImagesDataset(
data_root='tests/data/OpenImages/',
ann_file='annotations/oidv6-train-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/train/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/image-metas.pkl',
pipeline=[
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'instances'))
])
dataset.full_init()
data_sample = dataset[0]['data_samples'].to_dict()
data_sample['pred_instances'] = self._create_dummy_results()
metric = OpenImagesMetric()
metric.dataset_meta = dataset.metainfo
metric.process({}, [data_sample])
results = metric.evaluate(size=len(dataset))
targets = {'openimages/AP50': 1.0, 'openimages/mAP': 1.0}
self.assertDictEqual(results, targets)
# test multi-threshold
metric = OpenImagesMetric(iou_thrs=[0.1, 0.5], ioa_thrs=[0.1, 0.5])
metric.dataset_meta = dataset.metainfo
metric.process({}, [data_sample])
results = metric.evaluate(size=len(dataset))
targets = {
'openimages/AP10': 1.0,
'openimages/AP50': 1.0,
'openimages/mAP': 1.0
}
self.assertDictEqual(results, targets)
| 2,768 | 36.931507 | 75 | py |
ERD | ERD-main/tests/test_evaluation/test_metrics/test_lvis_metric.py | import os.path as osp
import tempfile
import unittest
import numpy as np
import pycocotools.mask as mask_util
import torch
from mmdet.evaluation.metrics import LVISMetric
try:
import lvis
except ImportError:
lvis = None
from mmengine.fileio import dump
class TestLVISMetric(unittest.TestCase):
def _create_dummy_lvis_json(self, json_name):
dummy_mask = np.zeros((10, 10), order='F', dtype=np.uint8)
dummy_mask[:5, :5] = 1
rle_mask = mask_util.encode(dummy_mask)
rle_mask['counts'] = rle_mask['counts'].decode('utf-8')
image = {
'id': 0,
'width': 640,
'height': 640,
'neg_category_ids': [],
'not_exhaustive_category_ids': [],
'coco_url': 'http://images.cocodataset.org/val2017/0.jpg',
}
annotation_1 = {
'id': 1,
'image_id': 0,
'category_id': 1,
'area': 400,
'bbox': [50, 60, 20, 20],
'segmentation': rle_mask,
}
annotation_2 = {
'id': 2,
'image_id': 0,
'category_id': 1,
'area': 900,
'bbox': [100, 120, 30, 30],
'segmentation': rle_mask,
}
annotation_3 = {
'id': 3,
'image_id': 0,
'category_id': 2,
'area': 1600,
'bbox': [150, 160, 40, 40],
'segmentation': rle_mask,
}
annotation_4 = {
'id': 4,
'image_id': 0,
'category_id': 1,
'area': 10000,
'bbox': [250, 260, 100, 100],
'segmentation': rle_mask,
}
categories = [
{
'id': 1,
'name': 'aerosol_can',
'frequency': 'c',
'image_count': 64
},
{
'id': 2,
'name': 'air_conditioner',
'frequency': 'f',
'image_count': 364
},
]
fake_json = {
'images': [image],
'annotations':
[annotation_1, annotation_2, annotation_3, annotation_4],
'categories': categories
}
dump(fake_json, json_name)
def _create_dummy_results(self):
bboxes = np.array([[50, 60, 70, 80], [100, 120, 130, 150],
[150, 160, 190, 200], [250, 260, 350, 360]])
scores = np.array([1.0, 0.98, 0.96, 0.95])
labels = np.array([0, 0, 1, 0])
dummy_mask = np.zeros((4, 10, 10), dtype=np.uint8)
dummy_mask[:, :5, :5] = 1
return dict(
bboxes=torch.from_numpy(bboxes),
scores=torch.from_numpy(scores),
labels=torch.from_numpy(labels),
masks=torch.from_numpy(dummy_mask))
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tmp_dir.cleanup()
@unittest.skipIf(lvis is None, 'lvis is not installed.')
def test_init(self):
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_lvis_json(fake_json_file)
with self.assertRaisesRegex(KeyError, 'metric should be one of'):
LVISMetric(ann_file=fake_json_file, metric='unknown')
@unittest.skipIf(lvis is None, 'lvis is not installed.')
def test_evaluate(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_lvis_json(fake_json_file)
dummy_pred = self._create_dummy_results()
# test single lvis dataset evaluation
lvis_metric = LVISMetric(
ann_file=fake_json_file,
classwise=False,
outfile_prefix=f'{self.tmp_dir.name}/test')
lvis_metric.dataset_meta = dict(
classes=['aerosol_can', 'air_conditioner'])
lvis_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = lvis_metric.evaluate(size=1)
target = {
'lvis/bbox_AP': 1.0,
'lvis/bbox_AP50': 1.0,
'lvis/bbox_AP75': 1.0,
'lvis/bbox_APs': 1.0,
'lvis/bbox_APm': 1.0,
'lvis/bbox_APl': 1.0,
'lvis/bbox_APr': -1.0,
'lvis/bbox_APc': 1.0,
'lvis/bbox_APf': 1.0
}
self.assertDictEqual(eval_results, target)
self.assertTrue(
osp.isfile(osp.join(self.tmp_dir.name, 'test.bbox.json')))
# test box and segm lvis dataset evaluation
lvis_metric = LVISMetric(
ann_file=fake_json_file,
metric=['bbox', 'segm'],
classwise=False,
outfile_prefix=f'{self.tmp_dir.name}/test')
lvis_metric.dataset_meta = dict(
classes=['aerosol_can', 'air_conditioner'])
lvis_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = lvis_metric.evaluate(size=1)
target = {
'lvis/bbox_AP': 1.0,
'lvis/bbox_AP50': 1.0,
'lvis/bbox_AP75': 1.0,
'lvis/bbox_APs': 1.0,
'lvis/bbox_APm': 1.0,
'lvis/bbox_APl': 1.0,
'lvis/bbox_APr': -1.0,
'lvis/bbox_APc': 1.0,
'lvis/bbox_APf': 1.0,
'lvis/segm_AP': 1.0,
'lvis/segm_AP50': 1.0,
'lvis/segm_AP75': 1.0,
'lvis/segm_APs': 1.0,
'lvis/segm_APm': 1.0,
'lvis/segm_APl': 1.0,
'lvis/segm_APr': -1.0,
'lvis/segm_APc': 1.0,
'lvis/segm_APf': 1.0
}
self.assertDictEqual(eval_results, target)
self.assertTrue(
osp.isfile(osp.join(self.tmp_dir.name, 'test.bbox.json')))
self.assertTrue(
osp.isfile(osp.join(self.tmp_dir.name, 'test.segm.json')))
# test invalid custom metric_items
with self.assertRaisesRegex(
KeyError,
"metric should be one of 'bbox', 'segm', 'proposal', "
"'proposal_fast', but got invalid."):
lvis_metric = LVISMetric(
ann_file=fake_json_file, metric=['invalid'])
lvis_metric.evaluate(size=1)
# test custom metric_items
lvis_metric = LVISMetric(ann_file=fake_json_file, metric_items=['APm'])
lvis_metric.dataset_meta = dict(
classes=['aerosol_can', 'air_conditioner'])
lvis_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = lvis_metric.evaluate(size=1)
target = {
'lvis/bbox_APm': 1.0,
}
self.assertDictEqual(eval_results, target)
@unittest.skipIf(lvis is None, 'lvis is not installed.')
def test_classwise_evaluate(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_lvis_json(fake_json_file)
dummy_pred = self._create_dummy_results()
# test single lvis dataset evaluation
lvis_metric = LVISMetric(
ann_file=fake_json_file, metric='bbox', classwise=True)
lvis_metric.dataset_meta = dict(
classes=['aerosol_can', 'air_conditioner'])
lvis_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = lvis_metric.evaluate(size=1)
target = {
'lvis/bbox_AP': 1.0,
'lvis/bbox_AP50': 1.0,
'lvis/bbox_AP75': 1.0,
'lvis/bbox_APs': 1.0,
'lvis/bbox_APm': 1.0,
'lvis/bbox_APl': 1.0,
'lvis/bbox_APr': -1.0,
'lvis/bbox_APc': 1.0,
'lvis/bbox_APf': 1.0,
'lvis/aerosol_can_precision': 1.0,
'lvis/air_conditioner_precision': 1.0,
}
self.assertDictEqual(eval_results, target)
@unittest.skipIf(lvis is None, 'lvis is not installed.')
def test_manually_set_iou_thrs(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_lvis_json(fake_json_file)
# test single lvis dataset evaluation
lvis_metric = LVISMetric(
ann_file=fake_json_file, metric='bbox', iou_thrs=[0.3, 0.6])
lvis_metric.dataset_meta = dict(
classes=['aerosol_can', 'air_conditioner'])
self.assertEqual(lvis_metric.iou_thrs, [0.3, 0.6])
@unittest.skipIf(lvis is None, 'lvis is not installed.')
def test_fast_eval_recall(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_lvis_json(fake_json_file)
dummy_pred = self._create_dummy_results()
# test default proposal nums
lvis_metric = LVISMetric(
ann_file=fake_json_file, metric='proposal_fast')
lvis_metric.dataset_meta = dict(
classes=['aerosol_can', 'air_conditioner'])
lvis_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = lvis_metric.evaluate(size=1)
target = {'lvis/AR@100': 1.0, 'lvis/AR@300': 1.0, 'lvis/AR@1000': 1.0}
self.assertDictEqual(eval_results, target)
# test manually set proposal nums
lvis_metric = LVISMetric(
ann_file=fake_json_file,
metric='proposal_fast',
proposal_nums=(2, 4))
lvis_metric.dataset_meta = dict(
classes=['aerosol_can', 'air_conditioner'])
lvis_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = lvis_metric.evaluate(size=1)
target = {'lvis/AR@2': 0.5, 'lvis/AR@4': 1.0}
self.assertDictEqual(eval_results, target)
@unittest.skipIf(lvis is None, 'lvis is not installed.')
def test_evaluate_proposal(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_lvis_json(fake_json_file)
dummy_pred = self._create_dummy_results()
lvis_metric = LVISMetric(ann_file=fake_json_file, metric='proposal')
lvis_metric.dataset_meta = dict(
classes=['aerosol_can', 'air_conditioner'])
lvis_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = lvis_metric.evaluate(size=1)
target = {
'lvis/AR@300': 1.0,
'lvis/ARs@300': 1.0,
'lvis/ARm@300': 1.0,
'lvis/ARl@300': 1.0
}
self.assertDictEqual(eval_results, target)
@unittest.skipIf(lvis is None, 'lvis is not installed.')
def test_empty_results(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_lvis_json(fake_json_file)
lvis_metric = LVISMetric(ann_file=fake_json_file, metric='bbox')
lvis_metric.dataset_meta = dict(
classes=['aerosol_can', 'air_conditioner'])
bboxes = np.zeros((0, 4))
labels = np.array([])
scores = np.array([])
dummy_mask = np.zeros((0, 10, 10), dtype=np.uint8)
empty_pred = dict(
bboxes=torch.from_numpy(bboxes),
scores=torch.from_numpy(scores),
labels=torch.from_numpy(labels),
masks=torch.from_numpy(dummy_mask))
lvis_metric.process(
{},
[dict(pred_instances=empty_pred, img_id=0, ori_shape=(640, 640))])
# lvis api Index error will be caught
lvis_metric.evaluate(size=1)
@unittest.skipIf(lvis is None, 'lvis is not installed.')
def test_format_only(self):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_lvis_json(fake_json_file)
dummy_pred = self._create_dummy_results()
with self.assertRaises(AssertionError):
LVISMetric(
ann_file=fake_json_file,
classwise=False,
format_only=True,
outfile_prefix=None)
lvis_metric = LVISMetric(
ann_file=fake_json_file,
metric='bbox',
classwise=False,
format_only=True,
outfile_prefix=f'{self.tmp_dir.name}/test')
lvis_metric.dataset_meta = dict(
classes=['aerosol_can', 'air_conditioner'])
lvis_metric.process(
{},
[dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])
eval_results = lvis_metric.evaluate(size=1)
self.assertDictEqual(eval_results, dict())
self.assertTrue(osp.exists(f'{self.tmp_dir.name}/test.bbox.json'))
| 13,025 | 34.785714 | 79 | py |
ERD | ERD-main/tests/test_models/test_backbones/test_pvt.py | import pytest
import torch
from mmdet.models.backbones.pvt import (PVTEncoderLayer,
PyramidVisionTransformer,
PyramidVisionTransformerV2)
def test_pvt_block():
# test PVT structure and forward
block = PVTEncoderLayer(
embed_dims=64, num_heads=4, feedforward_channels=256)
assert block.ffn.embed_dims == 64
assert block.attn.num_heads == 4
assert block.ffn.feedforward_channels == 256
x = torch.randn(1, 56 * 56, 64)
x_out = block(x, (56, 56))
assert x_out.shape == torch.Size([1, 56 * 56, 64])
def test_pvt():
"""Test PVT backbone."""
with pytest.raises(TypeError):
# Pretrained arg must be str or None.
PyramidVisionTransformer(pretrained=123)
# test pretrained image size
with pytest.raises(AssertionError):
PyramidVisionTransformer(pretrain_img_size=(224, 224, 224))
# Test absolute position embedding
temp = torch.randn((1, 3, 224, 224))
model = PyramidVisionTransformer(
pretrain_img_size=224, use_abs_pos_embed=True)
model.init_weights()
model(temp)
# Test normal inference
temp = torch.randn((1, 3, 32, 32))
model = PyramidVisionTransformer()
outs = model(temp)
assert outs[0].shape == (1, 64, 8, 8)
assert outs[1].shape == (1, 128, 4, 4)
assert outs[2].shape == (1, 320, 2, 2)
assert outs[3].shape == (1, 512, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 33, 33))
model = PyramidVisionTransformer()
outs = model(temp)
assert outs[0].shape == (1, 64, 8, 8)
assert outs[1].shape == (1, 128, 4, 4)
assert outs[2].shape == (1, 320, 2, 2)
assert outs[3].shape == (1, 512, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 112, 137))
model = PyramidVisionTransformer()
outs = model(temp)
assert outs[0].shape == (1, 64, 28, 34)
assert outs[1].shape == (1, 128, 14, 17)
assert outs[2].shape == (1, 320, 7, 8)
assert outs[3].shape == (1, 512, 3, 4)
def test_pvtv2():
"""Test PVTv2 backbone."""
with pytest.raises(TypeError):
# Pretrained arg must be str or None.
PyramidVisionTransformerV2(pretrained=123)
# test pretrained image size
with pytest.raises(AssertionError):
PyramidVisionTransformerV2(pretrain_img_size=(224, 224, 224))
# Test normal inference
temp = torch.randn((1, 3, 32, 32))
model = PyramidVisionTransformerV2()
outs = model(temp)
assert outs[0].shape == (1, 64, 8, 8)
assert outs[1].shape == (1, 128, 4, 4)
assert outs[2].shape == (1, 320, 2, 2)
assert outs[3].shape == (1, 512, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 31, 31))
model = PyramidVisionTransformerV2()
outs = model(temp)
assert outs[0].shape == (1, 64, 8, 8)
assert outs[1].shape == (1, 128, 4, 4)
assert outs[2].shape == (1, 320, 2, 2)
assert outs[3].shape == (1, 512, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 112, 137))
model = PyramidVisionTransformerV2()
outs = model(temp)
assert outs[0].shape == (1, 64, 28, 35)
assert outs[1].shape == (1, 128, 14, 18)
assert outs[2].shape == (1, 320, 7, 9)
assert outs[3].shape == (1, 512, 4, 5)
| 3,332 | 31.048077 | 69 | py |
ERD | ERD-main/tests/test_models/test_backbones/test_hourglass.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones.hourglass import HourglassNet
def test_hourglass_backbone():
with pytest.raises(AssertionError):
# HourglassNet's num_stacks should larger than 0
HourglassNet(num_stacks=0)
with pytest.raises(AssertionError):
# len(stage_channels) should equal len(stage_blocks)
HourglassNet(
stage_channels=[256, 256, 384, 384, 384],
stage_blocks=[2, 2, 2, 2, 2, 4])
with pytest.raises(AssertionError):
# len(stage_channels) should lagrer than downsample_times
HourglassNet(
downsample_times=5,
stage_channels=[256, 256, 384, 384, 384],
stage_blocks=[2, 2, 2, 2, 2])
# Test HourglassNet-52
model = HourglassNet(
num_stacks=1,
stage_channels=(64, 64, 96, 96, 96, 128),
feat_channel=64)
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 1
assert feat[0].shape == torch.Size([1, 64, 64, 64])
# Test HourglassNet-104
model = HourglassNet(
num_stacks=2,
stage_channels=(64, 64, 96, 96, 96, 128),
feat_channel=64)
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 2
assert feat[0].shape == torch.Size([1, 64, 64, 64])
assert feat[1].shape == torch.Size([1, 64, 64, 64])
| 1,464 | 28.3 | 65 | py |
ERD | ERD-main/tests/test_models/test_backbones/test_res2net.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import Res2Net
from mmdet.models.backbones.res2net import Bottle2neck
from .utils import is_block
def test_res2net_bottle2neck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
Bottle2neck(64, 64, base_width=26, scales=4, style='tensorflow')
with pytest.raises(AssertionError):
# Scale must be larger than 1
Bottle2neck(64, 64, base_width=26, scales=1, style='pytorch')
# Test Res2Net Bottle2neck structure
block = Bottle2neck(
64, 64, base_width=26, stride=2, scales=4, style='pytorch')
assert block.scales == 4
# Test Res2Net Bottle2neck with DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
# conv_cfg must be None if dcn is not None
Bottle2neck(
64,
64,
base_width=26,
scales=4,
dcn=dcn,
conv_cfg=dict(type='Conv'))
Bottle2neck(64, 64, dcn=dcn)
# Test Res2Net Bottle2neck forward
block = Bottle2neck(64, 16, base_width=26, scales=4)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_res2net_backbone():
with pytest.raises(KeyError):
# Res2Net depth should be in [50, 101, 152]
Res2Net(depth=18)
# Test Res2Net with scales 4, base_width 26
model = Res2Net(depth=50, scales=4, base_width=26)
for m in model.modules():
if is_block(m):
assert m.scales == 4
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 8, 8])
assert feat[1].shape == torch.Size([1, 512, 4, 4])
assert feat[2].shape == torch.Size([1, 1024, 2, 2])
assert feat[3].shape == torch.Size([1, 2048, 1, 1])
| 1,976 | 30.380952 | 72 | py |
ERD | ERD-main/tests/test_models/test_backbones/test_swin.py | import pytest
import torch
from mmdet.models.backbones.swin import SwinBlock, SwinTransformer
def test_swin_block():
# test SwinBlock structure and forward
block = SwinBlock(embed_dims=64, num_heads=4, feedforward_channels=256)
assert block.ffn.embed_dims == 64
assert block.attn.w_msa.num_heads == 4
assert block.ffn.feedforward_channels == 256
x = torch.randn(1, 56 * 56, 64)
x_out = block(x, (56, 56))
assert x_out.shape == torch.Size([1, 56 * 56, 64])
# Test BasicBlock with checkpoint forward
block = SwinBlock(
embed_dims=64, num_heads=4, feedforward_channels=256, with_cp=True)
assert block.with_cp
x = torch.randn(1, 56 * 56, 64)
x_out = block(x, (56, 56))
assert x_out.shape == torch.Size([1, 56 * 56, 64])
def test_swin_transformer():
"""Test Swin Transformer backbone."""
with pytest.raises(TypeError):
# Pretrained arg must be str or None.
SwinTransformer(pretrained=123)
with pytest.raises(AssertionError):
# Because swin uses non-overlapping patch embed, so the stride of patch
# embed must be equal to patch size.
SwinTransformer(strides=(2, 2, 2, 2), patch_size=4)
# test pretrained image size
with pytest.raises(AssertionError):
SwinTransformer(pretrain_img_size=(224, 224, 224))
# Test absolute position embedding
temp = torch.randn((1, 3, 224, 224))
model = SwinTransformer(pretrain_img_size=224, use_abs_pos_embed=True)
model.init_weights()
model(temp)
# Test patch norm
model = SwinTransformer(patch_norm=False)
model(temp)
# Test normal inference
temp = torch.randn((1, 3, 32, 32))
model = SwinTransformer()
outs = model(temp)
assert outs[0].shape == (1, 96, 8, 8)
assert outs[1].shape == (1, 192, 4, 4)
assert outs[2].shape == (1, 384, 2, 2)
assert outs[3].shape == (1, 768, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 31, 31))
model = SwinTransformer()
outs = model(temp)
assert outs[0].shape == (1, 96, 8, 8)
assert outs[1].shape == (1, 192, 4, 4)
assert outs[2].shape == (1, 384, 2, 2)
assert outs[3].shape == (1, 768, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 112, 137))
model = SwinTransformer()
outs = model(temp)
assert outs[0].shape == (1, 96, 28, 35)
assert outs[1].shape == (1, 192, 14, 18)
assert outs[2].shape == (1, 384, 7, 9)
assert outs[3].shape == (1, 768, 4, 5)
model = SwinTransformer(frozen_stages=4)
model.train()
for p in model.parameters():
assert not p.requires_grad
| 2,648 | 30.915663 | 79 | py |
ERD | ERD-main/tests/test_models/test_backbones/test_efficientnet.py | import pytest
import torch
from mmdet.models.backbones import EfficientNet
def test_efficientnet_backbone():
"""Test EfficientNet backbone."""
with pytest.raises(AssertionError):
# EfficientNet arch should be a key in EfficientNet.arch_settings
EfficientNet(arch='c3')
model = EfficientNet(arch='b0', out_indices=(0, 1, 2, 3, 4, 5, 6))
model.train()
imgs = torch.randn(2, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size([2, 32, 16, 16])
assert feat[1].shape == torch.Size([2, 16, 16, 16])
assert feat[2].shape == torch.Size([2, 24, 8, 8])
assert feat[3].shape == torch.Size([2, 40, 4, 4])
assert feat[4].shape == torch.Size([2, 112, 2, 2])
assert feat[5].shape == torch.Size([2, 320, 1, 1])
assert feat[6].shape == torch.Size([2, 1280, 1, 1])
| 859 | 32.076923 | 73 | py |
ERD | ERD-main/tests/test_models/test_backbones/test_resnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.ops import DeformConv2dPack
from torch.nn.modules import AvgPool2d, GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones import ResNet, ResNetV1d
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
from mmdet.models.layers import ResLayer, SimplifiedBasicBlock
from .utils import check_norm_state, is_block, is_norm
def assert_params_all_zeros(module) -> bool:
"""Check if the parameters of the module is all zeros.
Args:
module (nn.Module): The module to be checked.
Returns:
bool: Whether the parameters of the module is all zeros.
"""
weight_data = module.weight.data
is_weight_zero = weight_data.allclose(
weight_data.new_zeros(weight_data.size()))
if hasattr(module, 'bias') and module.bias is not None:
bias_data = module.bias.data
is_bias_zero = bias_data.allclose(
bias_data.new_zeros(bias_data.size()))
else:
is_bias_zero = True
return is_weight_zero and is_bias_zero
def test_resnet_basic_block():
with pytest.raises(AssertionError):
# Not implemented yet.
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
BasicBlock(64, 64, dcn=dcn)
with pytest.raises(AssertionError):
# Not implemented yet.
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
BasicBlock(64, 64, plugins=plugins)
with pytest.raises(AssertionError):
# Not implemented yet
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
BasicBlock(64, 64, plugins=plugins)
# test BasicBlock structure and forward
block = BasicBlock(64, 64)
assert block.conv1.in_channels == 64
assert block.conv1.out_channels == 64
assert block.conv1.kernel_size == (3, 3)
assert block.conv2.in_channels == 64
assert block.conv2.out_channels == 64
assert block.conv2.kernel_size == (3, 3)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test BasicBlock with checkpoint forward
block = BasicBlock(64, 64, with_cp=True)
assert block.with_cp
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnet_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
Bottleneck(64, 64, style='tensorflow')
with pytest.raises(AssertionError):
# Allowed positions are 'after_conv1', 'after_conv2', 'after_conv3'
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv4')
]
Bottleneck(64, 16, plugins=plugins)
with pytest.raises(AssertionError):
# Need to specify different postfix to avoid duplicate plugin name
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
Bottleneck(64, 16, plugins=plugins)
with pytest.raises(KeyError):
# Plugin type is not supported
plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')]
Bottleneck(64, 16, plugins=plugins)
# Test Bottleneck with checkpoint forward
block = Bottleneck(64, 16, with_cp=True)
assert block.with_cp
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck style
block = Bottleneck(64, 64, stride=2, style='pytorch')
assert block.conv1.stride == (1, 1)
assert block.conv2.stride == (2, 2)
block = Bottleneck(64, 64, stride=2, style='caffe')
assert block.conv1.stride == (2, 2)
assert block.conv2.stride == (1, 1)
# Test Bottleneck DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
Bottleneck(64, 64, dcn=dcn, conv_cfg=dict(type='Conv'))
block = Bottleneck(64, 64, dcn=dcn)
assert isinstance(block.conv2, DeformConv2dPack)
# Test Bottleneck forward
block = Bottleneck(64, 16)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2, 1 NonLocal2D
# after conv2, 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2'),
dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
assert block.nonlocal_block.in_channels == 16
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv2, 2 ContextBlock after
# conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),
position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=3),
position='after_conv3')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.context_block1.in_channels == 16
assert block.context_block2.in_channels == 64
assert block.context_block3.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_simplied_basic_block():
with pytest.raises(AssertionError):
# Not implemented yet.
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
SimplifiedBasicBlock(64, 64, dcn=dcn)
with pytest.raises(AssertionError):
# Not implemented yet.
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
SimplifiedBasicBlock(64, 64, plugins=plugins)
with pytest.raises(AssertionError):
# Not implemented yet
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
SimplifiedBasicBlock(64, 64, plugins=plugins)
with pytest.raises(AssertionError):
# Not implemented yet
SimplifiedBasicBlock(64, 64, with_cp=True)
# test SimplifiedBasicBlock structure and forward
block = SimplifiedBasicBlock(64, 64)
assert block.conv1.in_channels == 64
assert block.conv1.out_channels == 64
assert block.conv1.kernel_size == (3, 3)
assert block.conv2.in_channels == 64
assert block.conv2.out_channels == 64
assert block.conv2.kernel_size == (3, 3)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# test SimplifiedBasicBlock without norm
block = SimplifiedBasicBlock(64, 64, norm_cfg=None)
assert block.norm1 is None
assert block.norm2 is None
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnet_res_layer():
# Test ResLayer of 3 Bottleneck w\o downsample
layer = ResLayer(Bottleneck, 64, 16, 3)
assert len(layer) == 3
assert layer[0].conv1.in_channels == 64
assert layer[0].conv1.out_channels == 16
for i in range(1, len(layer)):
assert layer[i].conv1.in_channels == 64
assert layer[i].conv1.out_channels == 16
for i in range(len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test ResLayer of 3 Bottleneck with downsample
layer = ResLayer(Bottleneck, 64, 64, 3)
assert layer[0].downsample[0].out_channels == 256
for i in range(1, len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 256, 56, 56])
# Test ResLayer of 3 Bottleneck with stride=2
layer = ResLayer(Bottleneck, 64, 64, 3, stride=2)
assert layer[0].downsample[0].out_channels == 256
assert layer[0].downsample[0].stride == (2, 2)
for i in range(1, len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 256, 28, 28])
# Test ResLayer of 3 Bottleneck with stride=2 and average downsample
layer = ResLayer(Bottleneck, 64, 64, 3, stride=2, avg_down=True)
assert isinstance(layer[0].downsample[0], AvgPool2d)
assert layer[0].downsample[1].out_channels == 256
assert layer[0].downsample[1].stride == (1, 1)
for i in range(1, len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 256, 28, 28])
# Test ResLayer of 3 BasicBlock with stride=2 and downsample_first=False
layer = ResLayer(BasicBlock, 64, 64, 3, stride=2, downsample_first=False)
assert layer[2].downsample[0].out_channels == 64
assert layer[2].downsample[0].stride == (2, 2)
for i in range(len(layer) - 1):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 64, 28, 28])
def test_resnest_stem():
# Test default stem_channels
model = ResNet(50)
assert model.stem_channels == 64
assert model.conv1.out_channels == 64
assert model.norm1.num_features == 64
# Test default stem_channels, with base_channels=3
model = ResNet(50, base_channels=3)
assert model.stem_channels == 3
assert model.conv1.out_channels == 3
assert model.norm1.num_features == 3
assert model.layer1[0].conv1.in_channels == 3
# Test stem_channels=3
model = ResNet(50, stem_channels=3)
assert model.stem_channels == 3
assert model.conv1.out_channels == 3
assert model.norm1.num_features == 3
assert model.layer1[0].conv1.in_channels == 3
# Test stem_channels=3, with base_channels=2
model = ResNet(50, stem_channels=3, base_channels=2)
assert model.stem_channels == 3
assert model.conv1.out_channels == 3
assert model.norm1.num_features == 3
assert model.layer1[0].conv1.in_channels == 3
# Test V1d stem_channels
model = ResNetV1d(depth=50, stem_channels=6)
model.train()
assert model.stem[0].out_channels == 3
assert model.stem[1].num_features == 3
assert model.stem[3].out_channels == 3
assert model.stem[4].num_features == 3
assert model.stem[6].out_channels == 6
assert model.stem[7].num_features == 6
assert model.layer1[0].conv1.in_channels == 6
def test_resnet_backbone():
"""Test resnet backbone."""
with pytest.raises(KeyError):
# ResNet depth should be in [18, 34, 50, 101, 152]
ResNet(20)
with pytest.raises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
ResNet(50, num_stages=0)
with pytest.raises(AssertionError):
# len(stage_with_dcn) == num_stages
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
ResNet(50, dcn=dcn, stage_with_dcn=(True, ))
with pytest.raises(AssertionError):
# len(stage_with_plugin) == num_stages
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True),
position='after_conv3')
]
ResNet(50, plugins=plugins)
with pytest.raises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
ResNet(50, num_stages=5)
with pytest.raises(AssertionError):
# len(strides) == len(dilations) == num_stages
ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3)
with pytest.raises(TypeError):
# pretrained must be a string path
model = ResNet(50, pretrained=0)
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
ResNet(50, style='tensorflow')
# Test ResNet50 norm_eval=True
model = ResNet(50, norm_eval=True, base_channels=1)
model.train()
assert check_norm_state(model.modules(), False)
# Test ResNet50 with torchvision pretrained weight
model = ResNet(
depth=50, norm_eval=True, pretrained='torchvision://resnet50')
model.train()
assert check_norm_state(model.modules(), False)
# Test ResNet50 with first stage frozen
frozen_stages = 1
model = ResNet(50, frozen_stages=frozen_stages, base_channels=1)
model.train()
assert model.norm1.training is False
for layer in [model.conv1, model.norm1]:
for param in layer.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test ResNet50V1d with first stage frozen
model = ResNetV1d(depth=50, frozen_stages=frozen_stages, base_channels=2)
assert len(model.stem) == 9
model.train()
assert check_norm_state(model.stem, False)
for param in model.stem.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test ResNet18 forward
model = ResNet(18)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 64, 8, 8])
assert feat[1].shape == torch.Size([1, 128, 4, 4])
assert feat[2].shape == torch.Size([1, 256, 2, 2])
assert feat[3].shape == torch.Size([1, 512, 1, 1])
# Test ResNet18 with checkpoint forward
model = ResNet(18, with_cp=True)
for m in model.modules():
if is_block(m):
assert m.with_cp
# Test ResNet50 with BatchNorm forward
model = ResNet(50, base_channels=1)
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
assert feat[3].shape == torch.Size([1, 32, 1, 1])
# Test ResNet50 with layers 1, 2, 3 out forward
model = ResNet(50, out_indices=(0, 1, 2), base_channels=1)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
# Test ResNet50 with checkpoint forward
model = ResNet(50, with_cp=True, base_channels=1)
for m in model.modules():
if is_block(m):
assert m.with_cp
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
assert feat[3].shape == torch.Size([1, 32, 1, 1])
# Test ResNet50 with GroupNorm forward
model = ResNet(
50,
base_channels=4,
norm_cfg=dict(type='GN', num_groups=2, requires_grad=True))
for m in model.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 16, 8, 8])
assert feat[1].shape == torch.Size([1, 32, 4, 4])
assert feat[2].shape == torch.Size([1, 64, 2, 2])
assert feat[3].shape == torch.Size([1, 128, 1, 1])
# Test ResNet50 with 1 GeneralizedAttention after conv2, 1 NonLocal2D
# after conv2, 1 ContextBlock after conv3 in layers 2, 3, 4
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
stages=(False, True, True, True),
position='after_conv2'),
dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True, False),
position='after_conv3')
]
model = ResNet(50, plugins=plugins, base_channels=8)
for m in model.layer1.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert not hasattr(m, 'gen_attention_block')
assert m.nonlocal_block.in_channels == 8
for m in model.layer2.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 16
assert m.gen_attention_block.in_channels == 16
assert m.context_block.in_channels == 64
for m in model.layer3.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 32
assert m.gen_attention_block.in_channels == 32
assert m.context_block.in_channels == 128
for m in model.layer4.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 64
assert m.gen_attention_block.in_channels == 64
assert not hasattr(m, 'context_block')
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 32, 8, 8])
assert feat[1].shape == torch.Size([1, 64, 4, 4])
assert feat[2].shape == torch.Size([1, 128, 2, 2])
assert feat[3].shape == torch.Size([1, 256, 1, 1])
# Test ResNet50 with 1 ContextBlock after conv2, 1 ContextBlock after
# conv3 in layers 2, 3, 4
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),
stages=(False, True, True, False),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),
stages=(False, True, True, False),
position='after_conv3')
]
model = ResNet(50, plugins=plugins, base_channels=8)
for m in model.layer1.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert not hasattr(m, 'context_block1')
assert not hasattr(m, 'context_block2')
for m in model.layer2.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert m.context_block1.in_channels == 64
assert m.context_block2.in_channels == 64
for m in model.layer3.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert m.context_block1.in_channels == 128
assert m.context_block2.in_channels == 128
for m in model.layer4.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert not hasattr(m, 'context_block1')
assert not hasattr(m, 'context_block2')
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 32, 8, 8])
assert feat[1].shape == torch.Size([1, 64, 4, 4])
assert feat[2].shape == torch.Size([1, 128, 2, 2])
assert feat[3].shape == torch.Size([1, 256, 1, 1])
# Test ResNet50 zero initialization of residual
model = ResNet(50, zero_init_residual=True, base_channels=1)
model.init_weights()
for m in model.modules():
if isinstance(m, Bottleneck):
assert assert_params_all_zeros(m.norm3)
elif isinstance(m, BasicBlock):
assert assert_params_all_zeros(m.norm2)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
assert feat[3].shape == torch.Size([1, 32, 1, 1])
# Test ResNetV1d forward
model = ResNetV1d(depth=50, base_channels=2)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 8, 8, 8])
assert feat[1].shape == torch.Size([1, 16, 4, 4])
assert feat[2].shape == torch.Size([1, 32, 2, 2])
assert feat[3].shape == torch.Size([1, 64, 1, 1])
| 23,003 | 34.120611 | 78 | py |
ERD | ERD-main/tests/test_models/test_backbones/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.res2net import Bottle2neck
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from mmdet.models.layers import SimplifiedBasicBlock
def is_block(modules):
"""Check if is ResNet building block."""
if isinstance(modules, (BasicBlock, Bottleneck, BottleneckX, Bottle2neck,
SimplifiedBasicBlock)):
return True
return False
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (GroupNorm, _BatchNorm)):
return True
return False
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
| 1,027 | 30.151515 | 77 | py |
ERD | ERD-main/tests/test_models/test_backbones/test_mobilenet_v2.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.mobilenet_v2 import MobileNetV2
from .utils import check_norm_state, is_block, is_norm
def test_mobilenetv2_backbone():
with pytest.raises(ValueError):
# frozen_stages must in range(-1, 8)
MobileNetV2(frozen_stages=8)
with pytest.raises(ValueError):
# out_indices in range(-1, 8)
MobileNetV2(out_indices=[8])
# Test MobileNetV2 with first stage frozen
frozen_stages = 1
model = MobileNetV2(frozen_stages=frozen_stages)
model.train()
for mod in model.conv1.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test MobileNetV2 with norm_eval=True
model = MobileNetV2(norm_eval=True)
model.train()
assert check_norm_state(model.modules(), False)
# Test MobileNetV2 forward with widen_factor=1.0
model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 8))
model.train()
assert check_norm_state(model.modules(), True)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 8
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
assert feat[7].shape == torch.Size((1, 1280, 7, 7))
# Test MobileNetV2 forward with widen_factor=0.5
model = MobileNetV2(widen_factor=0.5, out_indices=range(0, 7))
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 8, 112, 112))
assert feat[1].shape == torch.Size((1, 16, 56, 56))
assert feat[2].shape == torch.Size((1, 16, 28, 28))
assert feat[3].shape == torch.Size((1, 32, 14, 14))
assert feat[4].shape == torch.Size((1, 48, 14, 14))
assert feat[5].shape == torch.Size((1, 80, 7, 7))
assert feat[6].shape == torch.Size((1, 160, 7, 7))
# Test MobileNetV2 forward with widen_factor=2.0
model = MobileNetV2(widen_factor=2.0, out_indices=range(0, 8))
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat[0].shape == torch.Size((1, 32, 112, 112))
assert feat[1].shape == torch.Size((1, 48, 56, 56))
assert feat[2].shape == torch.Size((1, 64, 28, 28))
assert feat[3].shape == torch.Size((1, 128, 14, 14))
assert feat[4].shape == torch.Size((1, 192, 14, 14))
assert feat[5].shape == torch.Size((1, 320, 7, 7))
assert feat[6].shape == torch.Size((1, 640, 7, 7))
assert feat[7].shape == torch.Size((1, 2560, 7, 7))
# Test MobileNetV2 forward with dict(type='ReLU')
model = MobileNetV2(
widen_factor=1.0, act_cfg=dict(type='ReLU'), out_indices=range(0, 7))
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with BatchNorm forward
model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 7))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with GroupNorm forward
model = MobileNetV2(
widen_factor=1.0,
norm_cfg=dict(type='GN', num_groups=2, requires_grad=True),
out_indices=range(0, 7))
for m in model.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with layers 1, 3, 5 out forward
model = MobileNetV2(widen_factor=1.0, out_indices=(0, 2, 4))
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 32, 28, 28))
assert feat[2].shape == torch.Size((1, 96, 14, 14))
# Test MobileNetV2 with checkpoint forward
model = MobileNetV2(
widen_factor=1.0, with_cp=True, out_indices=range(0, 7))
for m in model.modules():
if is_block(m):
assert m.with_cp
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
| 6,546 | 36.626437 | 77 | py |
ERD | ERD-main/tests/test_models/test_backbones/test_hrnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones.hrnet import HRModule, HRNet
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
@pytest.mark.parametrize('block', [BasicBlock, Bottleneck])
def test_hrmodule(block):
# Test multiscale forward
num_channles = (32, 64)
in_channels = [c * block.expansion for c in num_channles]
hrmodule = HRModule(
num_branches=2,
blocks=block,
in_channels=in_channels,
num_blocks=(4, 4),
num_channels=num_channles,
)
feats = [
torch.randn(1, in_channels[0], 64, 64),
torch.randn(1, in_channels[1], 32, 32)
]
feats = hrmodule(feats)
assert len(feats) == 2
assert feats[0].shape == torch.Size([1, in_channels[0], 64, 64])
assert feats[1].shape == torch.Size([1, in_channels[1], 32, 32])
# Test single scale forward
num_channles = (32, 64)
in_channels = [c * block.expansion for c in num_channles]
hrmodule = HRModule(
num_branches=2,
blocks=block,
in_channels=in_channels,
num_blocks=(4, 4),
num_channels=num_channles,
multiscale_output=False,
)
feats = [
torch.randn(1, in_channels[0], 64, 64),
torch.randn(1, in_channels[1], 32, 32)
]
feats = hrmodule(feats)
assert len(feats) == 1
assert feats[0].shape == torch.Size([1, in_channels[0], 64, 64])
def test_hrnet_backbone():
# only have 3 stages
extra = dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)))
with pytest.raises(AssertionError):
# HRNet now only support 4 stages
HRNet(extra=extra)
extra['stage4'] = dict(
num_modules=3,
num_branches=3, # should be 4
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))
with pytest.raises(AssertionError):
# len(num_blocks) should equal num_branches
HRNet(extra=extra)
extra['stage4']['num_branches'] = 4
# Test hrnetv2p_w32
model = HRNet(extra=extra)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 256, 256)
feats = model(imgs)
assert len(feats) == 4
assert feats[0].shape == torch.Size([1, 32, 64, 64])
assert feats[3].shape == torch.Size([1, 256, 8, 8])
# Test single scale output
model = HRNet(extra=extra, multiscale_output=False)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 256, 256)
feats = model(imgs)
assert len(feats) == 1
assert feats[0].shape == torch.Size([1, 32, 64, 64])
| 3,089 | 26.589286 | 68 | py |
ERD | ERD-main/tests/test_models/test_backbones/test_csp_darknet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.csp_darknet import CSPDarknet
from .utils import check_norm_state, is_norm
def test_csp_darknet_backbone():
with pytest.raises(ValueError):
# frozen_stages must in range(-1, len(arch_setting) + 1)
CSPDarknet(frozen_stages=6)
with pytest.raises(AssertionError):
# out_indices in range(len(arch_setting) + 1)
CSPDarknet(out_indices=[6])
# Test CSPDarknet with first stage frozen
frozen_stages = 1
model = CSPDarknet(frozen_stages=frozen_stages)
model.train()
for mod in model.stem.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'stage{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test CSPDarknet with norm_eval=True
model = CSPDarknet(norm_eval=True)
model.train()
assert check_norm_state(model.modules(), False)
# Test CSPDarknet-P5 forward with widen_factor=0.5
model = CSPDarknet(arch='P5', widen_factor=0.25, out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 16, 32, 32))
assert feat[1].shape == torch.Size((1, 32, 16, 16))
assert feat[2].shape == torch.Size((1, 64, 8, 8))
assert feat[3].shape == torch.Size((1, 128, 4, 4))
assert feat[4].shape == torch.Size((1, 256, 2, 2))
# Test CSPDarknet-P6 forward with widen_factor=0.5
model = CSPDarknet(
arch='P6',
widen_factor=0.25,
out_indices=range(0, 6),
spp_kernal_sizes=(3, 5, 7))
model.train()
imgs = torch.randn(1, 3, 128, 128)
feat = model(imgs)
assert feat[0].shape == torch.Size((1, 16, 64, 64))
assert feat[1].shape == torch.Size((1, 32, 32, 32))
assert feat[2].shape == torch.Size((1, 64, 16, 16))
assert feat[3].shape == torch.Size((1, 128, 8, 8))
assert feat[4].shape == torch.Size((1, 192, 4, 4))
assert feat[5].shape == torch.Size((1, 256, 2, 2))
# Test CSPDarknet forward with dict(type='ReLU')
model = CSPDarknet(
widen_factor=0.125, act_cfg=dict(type='ReLU'), out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test CSPDarknet with BatchNorm forward
model = CSPDarknet(widen_factor=0.125, out_indices=range(0, 5))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test CSPDarknet with custom arch forward
arch_ovewrite = [[32, 56, 3, True, False], [56, 224, 2, True, False],
[224, 512, 1, True, False]]
model = CSPDarknet(
arch_ovewrite=arch_ovewrite,
widen_factor=0.25,
out_indices=(0, 1, 2, 3))
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 8, 16, 16))
assert feat[1].shape == torch.Size((1, 14, 8, 8))
assert feat[2].shape == torch.Size((1, 56, 4, 4))
assert feat[3].shape == torch.Size((1, 128, 2, 2))
| 4,117 | 34.196581 | 79 | py |
ERD | ERD-main/tests/test_models/test_backbones/test_renext.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import ResNeXt
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from .utils import is_block
def test_renext_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
BottleneckX(64, 64, groups=32, base_width=4, style='tensorflow')
# Test ResNeXt Bottleneck structure
block = BottleneckX(
64, 64, groups=32, base_width=4, stride=2, style='pytorch')
assert block.conv2.stride == (2, 2)
assert block.conv2.groups == 32
assert block.conv2.out_channels == 128
# Test ResNeXt Bottleneck with DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
# conv_cfg must be None if dcn is not None
BottleneckX(
64,
64,
groups=32,
base_width=4,
dcn=dcn,
conv_cfg=dict(type='Conv'))
BottleneckX(64, 64, dcn=dcn)
# Test ResNeXt Bottleneck forward
block = BottleneckX(64, 16, groups=32, base_width=4)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test ResNeXt Bottleneck forward with plugins
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
stages=(False, False, True, True),
position='after_conv2')
]
block = BottleneckX(64, 16, groups=32, base_width=4, plugins=plugins)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnext_backbone():
with pytest.raises(KeyError):
# ResNeXt depth should be in [50, 101, 152]
ResNeXt(depth=18)
# Test ResNeXt with group 32, base_width 4
model = ResNeXt(depth=50, groups=32, base_width=4)
for m in model.modules():
if is_block(m):
assert m.conv2.groups == 32
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 8, 8])
assert feat[1].shape == torch.Size([1, 512, 4, 4])
assert feat[2].shape == torch.Size([1, 1024, 2, 2])
assert feat[3].shape == torch.Size([1, 2048, 1, 1])
regnet_test_data = [
('regnetx_400mf',
dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22,
bot_mul=1.0), [32, 64, 160, 384]),
('regnetx_800mf',
dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16,
bot_mul=1.0), [64, 128, 288, 672]),
('regnetx_1.6gf',
dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18,
bot_mul=1.0), [72, 168, 408, 912]),
('regnetx_3.2gf',
dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25,
bot_mul=1.0), [96, 192, 432, 1008]),
('regnetx_4.0gf',
dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23,
bot_mul=1.0), [80, 240, 560, 1360]),
('regnetx_6.4gf',
dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17,
bot_mul=1.0), [168, 392, 784, 1624]),
('regnetx_8.0gf',
dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23,
bot_mul=1.0), [80, 240, 720, 1920]),
('regnetx_12gf',
dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19,
bot_mul=1.0), [224, 448, 896, 2240]),
]
| 3,528 | 32.292453 | 73 | py |
ERD | ERD-main/tests/test_models/test_backbones/test_trident_resnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import TridentResNet
from mmdet.models.backbones.trident_resnet import TridentBottleneck
def test_trident_resnet_bottleneck():
trident_dilations = (1, 2, 3)
test_branch_idx = 1
concat_output = True
trident_build_config = (trident_dilations, test_branch_idx, concat_output)
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
TridentBottleneck(
*trident_build_config, inplanes=64, planes=64, style='tensorflow')
with pytest.raises(AssertionError):
# Allowed positions are 'after_conv1', 'after_conv2', 'after_conv3'
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv4')
]
TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
with pytest.raises(AssertionError):
# Need to specify different postfix to avoid duplicate plugin name
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
with pytest.raises(KeyError):
# Plugin type is not supported
plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')]
TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
# Test Bottleneck with checkpoint forward
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, with_cp=True)
assert block.with_cp
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck style
block = TridentBottleneck(
*trident_build_config,
inplanes=64,
planes=64,
stride=2,
style='pytorch')
assert block.conv1.stride == (1, 1)
assert block.conv2.stride == (2, 2)
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=64, stride=2, style='caffe')
assert block.conv1.stride == (2, 2)
assert block.conv2.stride == (1, 1)
# Test Bottleneck forward
block = TridentBottleneck(*trident_build_config, inplanes=64, planes=16)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2, 1 NonLocal2D
# after conv2, 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2'),
dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
assert block.nonlocal_block.in_channels == 16
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv2, 2 ContextBlock after
# conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),
position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=3),
position='after_conv3')
]
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert block.context_block1.in_channels == 16
assert block.context_block2.in_channels == 64
assert block.context_block3.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
def test_trident_resnet_backbone():
tridentresnet_config = dict(
num_branch=3,
test_branch_idx=1,
strides=(1, 2, 2),
dilations=(1, 1, 1),
trident_dilations=(1, 2, 3),
out_indices=(2, ),
)
"""Test tridentresnet backbone."""
with pytest.raises(AssertionError):
# TridentResNet depth should be in [50, 101, 152]
TridentResNet(18, **tridentresnet_config)
with pytest.raises(AssertionError):
# In TridentResNet: num_stages == 3
TridentResNet(50, num_stages=4, **tridentresnet_config)
model = TridentResNet(50, num_stages=3, **tridentresnet_config)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 1
assert feat[0].shape == torch.Size([3, 1024, 2, 2])
| 6,372 | 34.209945 | 79 | py |
ERD | ERD-main/tests/test_models/test_backbones/test_resnest.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import ResNeSt
from mmdet.models.backbones.resnest import Bottleneck as BottleneckS
def test_resnest_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
BottleneckS(64, 64, radix=2, reduction_factor=4, style='tensorflow')
# Test ResNeSt Bottleneck structure
block = BottleneckS(
2, 4, radix=2, reduction_factor=4, stride=2, style='pytorch')
assert block.avd_layer.stride == 2
assert block.conv2.channels == 4
# Test ResNeSt Bottleneck forward
block = BottleneckS(16, 4, radix=2, reduction_factor=4)
x = torch.randn(2, 16, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([2, 16, 56, 56])
def test_resnest_backbone():
with pytest.raises(KeyError):
# ResNeSt depth should be in [50, 101, 152, 200]
ResNeSt(depth=18)
# Test ResNeSt with radix 2, reduction_factor 4
model = ResNeSt(
depth=50,
base_channels=4,
radix=2,
reduction_factor=4,
out_indices=(0, 1, 2, 3))
model.train()
imgs = torch.randn(2, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([2, 16, 8, 8])
assert feat[1].shape == torch.Size([2, 32, 4, 4])
assert feat[2].shape == torch.Size([2, 64, 2, 2])
assert feat[3].shape == torch.Size([2, 128, 1, 1])
| 1,473 | 29.708333 | 76 | py |
ERD | ERD-main/tests/test_models/test_backbones/test_regnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import RegNet
regnet_test_data = [
('regnetx_400mf',
dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22,
bot_mul=1.0), [32, 64, 160, 384]),
('regnetx_800mf',
dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16,
bot_mul=1.0), [64, 128, 288, 672]),
('regnetx_1.6gf',
dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18,
bot_mul=1.0), [72, 168, 408, 912]),
('regnetx_3.2gf',
dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25,
bot_mul=1.0), [96, 192, 432, 1008]),
('regnetx_4.0gf',
dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23,
bot_mul=1.0), [80, 240, 560, 1360]),
('regnetx_6.4gf',
dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17,
bot_mul=1.0), [168, 392, 784, 1624]),
('regnetx_8.0gf',
dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23,
bot_mul=1.0), [80, 240, 720, 1920]),
('regnetx_12gf',
dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19,
bot_mul=1.0), [224, 448, 896, 2240]),
]
@pytest.mark.parametrize('arch_name,arch,out_channels', regnet_test_data)
def test_regnet_backbone(arch_name, arch, out_channels):
with pytest.raises(AssertionError):
# ResNeXt depth should be in [50, 101, 152]
RegNet(arch_name + '233')
# Test RegNet with arch_name
model = RegNet(arch_name)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, out_channels[0], 8, 8])
assert feat[1].shape == torch.Size([1, out_channels[1], 4, 4])
assert feat[2].shape == torch.Size([1, out_channels[2], 2, 2])
assert feat[3].shape == torch.Size([1, out_channels[3], 1, 1])
# Test RegNet with arch
model = RegNet(arch)
assert feat[0].shape == torch.Size([1, out_channels[0], 8, 8])
assert feat[1].shape == torch.Size([1, out_channels[1], 4, 4])
assert feat[2].shape == torch.Size([1, out_channels[2], 2, 2])
assert feat[3].shape == torch.Size([1, out_channels[3], 1, 1])
| 2,177 | 35.915254 | 73 | py |
ERD | ERD-main/tests/test_models/test_backbones/test_detectors_resnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
from mmdet.models.backbones import DetectoRS_ResNet
def test_detectorrs_resnet_backbone():
detectorrs_cfg = dict(
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True),
output_img=True)
"""Test init_weights config"""
with pytest.raises(AssertionError):
# pretrained and init_cfg cannot be specified at the same time
DetectoRS_ResNet(
**detectorrs_cfg, pretrained='Pretrained', init_cfg='Pretrained')
with pytest.raises(AssertionError):
# init_cfg must be a dict
DetectoRS_ResNet(
**detectorrs_cfg, pretrained=None, init_cfg=['Pretrained'])
with pytest.raises(KeyError):
# init_cfg must contain the key `type`
DetectoRS_ResNet(
**detectorrs_cfg,
pretrained=None,
init_cfg=dict(checkpoint='Pretrained'))
with pytest.raises(AssertionError):
# init_cfg only support initialize pretrained model way
DetectoRS_ResNet(
**detectorrs_cfg, pretrained=None, init_cfg=dict(type='Trained'))
with pytest.raises(TypeError):
# pretrained mast be a str or None
model = DetectoRS_ResNet(
**detectorrs_cfg, pretrained=['Pretrained'], init_cfg=None)
model.init_weights()
| 1,611 | 32.583333 | 77 | py |
ERD | ERD-main/tests/test_models/test_layers/test_position_encoding.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.layers import (LearnedPositionalEncoding,
SinePositionalEncoding)
def test_sine_positional_encoding(num_feats=16, batch_size=2):
# test invalid type of scale
with pytest.raises(AssertionError):
module = SinePositionalEncoding(
num_feats, scale=(3., ), normalize=True)
module = SinePositionalEncoding(num_feats)
h, w = 10, 6
mask = (torch.rand(batch_size, h, w) > 0.5).to(torch.int)
assert not module.normalize
out = module(mask)
assert out.shape == (batch_size, num_feats * 2, h, w)
# set normalize
module = SinePositionalEncoding(num_feats, normalize=True)
assert module.normalize
out = module(mask)
assert out.shape == (batch_size, num_feats * 2, h, w)
def test_learned_positional_encoding(num_feats=16,
row_num_embed=10,
col_num_embed=10,
batch_size=2):
module = LearnedPositionalEncoding(num_feats, row_num_embed, col_num_embed)
assert module.row_embed.weight.shape == (row_num_embed, num_feats)
assert module.col_embed.weight.shape == (col_num_embed, num_feats)
h, w = 10, 6
mask = torch.rand(batch_size, h, w) > 0.5
out = module(mask)
assert out.shape == (batch_size, num_feats * 2, h, w)
| 1,439 | 35 | 79 | py |
ERD | ERD-main/tests/test_models/test_layers/test_ema.py | # Copyright (c) OpenMMLab. All rights reserved.
import itertools
import math
from unittest import TestCase
import torch
import torch.nn as nn
from mmengine.testing import assert_allclose
from mmdet.models.layers import ExpMomentumEMA
class TestEMA(TestCase):
def test_exp_momentum_ema(self):
model = nn.Sequential(nn.Conv2d(1, 5, kernel_size=3), nn.Linear(5, 10))
# Test invalid gamma
with self.assertRaisesRegex(AssertionError,
'gamma must be greater than 0'):
ExpMomentumEMA(model, gamma=-1)
# Test EMA
model = torch.nn.Sequential(
torch.nn.Conv2d(1, 5, kernel_size=3), torch.nn.Linear(5, 10))
momentum = 0.1
gamma = 4
ema_model = ExpMomentumEMA(model, momentum=momentum, gamma=gamma)
averaged_params = [
torch.zeros_like(param) for param in model.parameters()
]
n_updates = 10
for i in range(n_updates):
updated_averaged_params = []
for p, p_avg in zip(model.parameters(), averaged_params):
p.detach().add_(torch.randn_like(p))
if i == 0:
updated_averaged_params.append(p.clone())
else:
m = (1 - momentum) * math.exp(-(1 + i) / gamma) + momentum
updated_averaged_params.append(
(p_avg * (1 - m) + p * m).clone())
ema_model.update_parameters(model)
averaged_params = updated_averaged_params
for p_target, p_ema in zip(averaged_params, ema_model.parameters()):
assert_allclose(p_target, p_ema)
def test_exp_momentum_ema_update_buffer(self):
model = nn.Sequential(
nn.Conv2d(1, 5, kernel_size=3), nn.BatchNorm2d(5, momentum=0.3),
nn.Linear(5, 10))
# Test invalid gamma
with self.assertRaisesRegex(AssertionError,
'gamma must be greater than 0'):
ExpMomentumEMA(model, gamma=-1)
# Test EMA with momentum annealing.
momentum = 0.1
gamma = 4
ema_model = ExpMomentumEMA(
model, gamma=gamma, momentum=momentum, update_buffers=True)
averaged_params = [
torch.zeros_like(param)
for param in itertools.chain(model.parameters(), model.buffers())
if param.size() != torch.Size([])
]
n_updates = 10
for i in range(n_updates):
updated_averaged_params = []
params = [
param for param in itertools.chain(model.parameters(),
model.buffers())
if param.size() != torch.Size([])
]
for p, p_avg in zip(params, averaged_params):
p.detach().add_(torch.randn_like(p))
if i == 0:
updated_averaged_params.append(p.clone())
else:
m = (1 - momentum) * math.exp(-(1 + i) / gamma) + momentum
updated_averaged_params.append(
(p_avg * (1 - m) + p * m).clone())
ema_model.update_parameters(model)
averaged_params = updated_averaged_params
ema_params = [
param for param in itertools.chain(ema_model.module.parameters(),
ema_model.module.buffers())
if param.size() != torch.Size([])
]
for p_target, p_ema in zip(averaged_params, ema_params):
assert_allclose(p_target, p_ema)
| 3,633 | 37.252632 | 79 | py |
ERD | ERD-main/tests/test_models/test_layers/test_se_layer.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
import torch.nn.functional as F
from mmengine.model import constant_init
from mmdet.models.layers import DyReLU, SELayer
def test_se_layer():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
SELayer(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
SELayer(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test SELayer forward
layer = SELayer(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
def test_dyrelu():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
DyReLU(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
DyReLU(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test DyReLU forward
layer = DyReLU(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
# DyReLU should act as standard (static) ReLU
# when eliminating the effect of SE-like module
layer = DyReLU(channels=32)
constant_init(layer.conv2.conv, 0)
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
relu_out = F.relu(x)
assert torch.equal(x_out, relu_out)
| 1,623 | 28.527273 | 76 | py |
ERD | ERD-main/tests/test_models/test_layers/test_plugins.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
import pytest
import torch
from mmengine.config import ConfigDict
from mmdet.models.layers import DropBlock
from mmdet.registry import MODELS
from mmdet.utils import register_all_modules
register_all_modules()
def test_dropblock():
feat = torch.rand(1, 1, 11, 11)
drop_prob = 1.0
dropblock = DropBlock(drop_prob, block_size=11, warmup_iters=0)
out_feat = dropblock(feat)
assert (out_feat == 0).all() and out_feat.shape == feat.shape
drop_prob = 0.5
dropblock = DropBlock(drop_prob, block_size=5, warmup_iters=0)
out_feat = dropblock(feat)
assert out_feat.shape == feat.shape
# drop_prob must be (0,1]
with pytest.raises(AssertionError):
DropBlock(1.5, 3)
# block_size cannot be an even number
with pytest.raises(AssertionError):
DropBlock(0.5, 2)
# warmup_iters cannot be less than 0
with pytest.raises(AssertionError):
DropBlock(0.5, 3, -1)
class TestPixelDecoder(unittest.TestCase):
def test_forward(self):
base_channels = 64
pixel_decoder_cfg = ConfigDict(
dict(
type='PixelDecoder',
in_channels=[base_channels * 2**i for i in range(4)],
feat_channels=base_channels,
out_channels=base_channels,
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='ReLU')))
self = MODELS.build(pixel_decoder_cfg)
self.init_weights()
img_metas = [{}, {}]
feats = [
torch.rand(
(2, base_channels * 2**i, 4 * 2**(3 - i), 5 * 2**(3 - i)))
for i in range(4)
]
mask_feature, memory = self(feats, img_metas)
assert (memory == feats[-1]).all()
assert mask_feature.shape == feats[0].shape
class TestTransformerEncoderPixelDecoder(unittest.TestCase):
def test_forward(self):
base_channels = 64
pixel_decoder_cfg = ConfigDict(
dict(
type='TransformerEncoderPixelDecoder',
in_channels=[base_channels * 2**i for i in range(4)],
feat_channels=base_channels,
out_channels=base_channels,
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='ReLU'),
encoder=dict( # DetrTransformerEncoder
num_layers=6,
layer_cfg=dict( # DetrTransformerEncoderLayer
self_attn_cfg=dict( # MultiheadAttention
embed_dims=base_channels,
num_heads=8,
attn_drop=0.1,
proj_drop=0.1,
dropout_layer=None,
batch_first=True),
ffn_cfg=dict(
embed_dims=base_channels,
feedforward_channels=base_channels * 8,
num_fcs=2,
act_cfg=dict(type='ReLU', inplace=True),
ffn_drop=0.1,
dropout_layer=None,
add_identity=True),
norm_cfg=dict(type='LN'),
init_cfg=None),
init_cfg=None),
positional_encoding=dict(
num_feats=base_channels // 2, normalize=True)))
self = MODELS.build(pixel_decoder_cfg)
self.init_weights()
img_metas = [{
'batch_input_shape': (128, 160),
'img_shape': (120, 160),
}, {
'batch_input_shape': (128, 160),
'img_shape': (125, 160),
}]
feats = [
torch.rand(
(2, base_channels * 2**i, 4 * 2**(3 - i), 5 * 2**(3 - i)))
for i in range(4)
]
mask_feature, memory = self(feats, img_metas)
assert memory.shape[-2:] == feats[-1].shape[-2:]
assert mask_feature.shape == feats[0].shape
class TestMSDeformAttnPixelDecoder(unittest.TestCase):
def test_forward(self):
base_channels = 64
pixel_decoder_cfg = ConfigDict(
dict(
type='MSDeformAttnPixelDecoder',
in_channels=[base_channels * 2**i for i in range(4)],
strides=[4, 8, 16, 32],
feat_channels=base_channels,
out_channels=base_channels,
num_outs=3,
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='ReLU'),
encoder=dict( # DeformableDetrTransformerEncoder
num_layers=6,
layer_cfg=dict( # DeformableDetrTransformerEncoderLayer
self_attn_cfg=dict( # MultiScaleDeformableAttention
embed_dims=base_channels,
num_heads=8,
num_levels=3,
num_points=4,
im2col_step=64,
dropout=0.0,
batch_first=True,
norm_cfg=None,
init_cfg=None),
ffn_cfg=dict(
embed_dims=base_channels,
feedforward_channels=base_channels * 4,
num_fcs=2,
ffn_drop=0.0,
act_cfg=dict(type='ReLU', inplace=True))),
init_cfg=None),
positional_encoding=dict(
num_feats=base_channels // 2, normalize=True),
init_cfg=None))
self = MODELS.build(pixel_decoder_cfg)
self.init_weights()
feats = [
torch.rand(
(2, base_channels * 2**i, 4 * 2**(3 - i), 5 * 2**(3 - i)))
for i in range(4)
]
mask_feature, multi_scale_features = self(feats)
assert mask_feature.shape == feats[0].shape
assert len(multi_scale_features) == 3
multi_scale_features = multi_scale_features[::-1]
for i in range(3):
assert multi_scale_features[i].shape[-2:] == feats[i +
1].shape[-2:]
| 6,408 | 36.261628 | 76 | py |
ERD | ERD-main/tests/test_models/test_layers/test_inverted_residual.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.cnn import is_norm
from torch.nn.modules import GroupNorm
from mmdet.models.layers import InvertedResidual, SELayer
def test_inverted_residual():
with pytest.raises(AssertionError):
# stride must be in [1, 2]
InvertedResidual(16, 16, 32, stride=3)
with pytest.raises(AssertionError):
# se_cfg must be None or dict
InvertedResidual(16, 16, 32, se_cfg=list())
with pytest.raises(AssertionError):
# in_channeld and mid_channels must be the same if
# with_expand_conv is False
InvertedResidual(16, 16, 32, with_expand_conv=False)
# Test InvertedResidual forward, stride=1
block = InvertedResidual(16, 16, 32, stride=1)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert getattr(block, 'se', None) is None
assert block.with_res_shortcut
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward, stride=2
block = InvertedResidual(16, 16, 32, stride=2)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert not block.with_res_shortcut
assert x_out.shape == torch.Size((1, 16, 28, 28))
# Test InvertedResidual forward with se layer
se_cfg = dict(channels=32)
block = InvertedResidual(16, 16, 32, stride=1, se_cfg=se_cfg)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert isinstance(block.se, SELayer)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward, with_expand_conv=False
block = InvertedResidual(32, 16, 32, with_expand_conv=False)
x = torch.randn(1, 32, 56, 56)
x_out = block(x)
assert getattr(block, 'expand_conv', None) is None
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with GroupNorm
block = InvertedResidual(
16, 16, 32, norm_cfg=dict(type='GN', num_groups=2))
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
for m in block.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with HSigmoid
block = InvertedResidual(16, 16, 32, act_cfg=dict(type='HSigmoid'))
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with checkpoint
block = InvertedResidual(16, 16, 32, with_cp=True)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert block.with_cp
assert x_out.shape == torch.Size((1, 16, 56, 56))
| 2,636 | 33.246753 | 71 | py |
ERD | ERD-main/tests/test_models/test_layers/test_conv_upsample.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.layers import ConvUpsample
@pytest.mark.parametrize('num_layers', [0, 1, 2])
def test_conv_upsample(num_layers):
num_upsample = num_layers if num_layers > 0 else 0
num_layers = num_layers if num_layers > 0 else 1
layer = ConvUpsample(
10,
5,
num_layers=num_layers,
num_upsample=num_upsample,
conv_cfg=None,
norm_cfg=None)
size = 5
x = torch.randn((1, 10, size, size))
size = size * pow(2, num_upsample)
x = layer(x)
assert x.shape[-2:] == (size, size)
| 629 | 24.2 | 54 | py |
ERD | ERD-main/tests/test_models/test_layers/test_transformer.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmengine.config import ConfigDict
from mmdet.models.layers.transformer import (AdaptivePadding,
DetrTransformerDecoder,
DetrTransformerEncoder,
PatchEmbed, PatchMerging)
def test_adaptive_padding():
for padding in ('same', 'corner'):
kernel_size = 16
stride = 16
dilation = 1
input = torch.rand(1, 1, 15, 17)
pool = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
out = pool(input)
# padding to divisible by 16
assert (out.shape[2], out.shape[3]) == (16, 32)
input = torch.rand(1, 1, 16, 17)
out = pool(input)
# padding to divisible by 16
assert (out.shape[2], out.shape[3]) == (16, 32)
kernel_size = (2, 2)
stride = (2, 2)
dilation = (1, 1)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
input = torch.rand(1, 1, 11, 13)
out = adap_pad(input)
# padding to divisible by 2
assert (out.shape[2], out.shape[3]) == (12, 14)
kernel_size = (2, 2)
stride = (10, 10)
dilation = (1, 1)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
input = torch.rand(1, 1, 10, 13)
out = adap_pad(input)
# no padding
assert (out.shape[2], out.shape[3]) == (10, 13)
kernel_size = (11, 11)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
input = torch.rand(1, 1, 11, 13)
out = adap_pad(input)
# all padding
assert (out.shape[2], out.shape[3]) == (21, 21)
# test padding as kernel is (7,9)
input = torch.rand(1, 1, 11, 13)
stride = (3, 4)
kernel_size = (4, 5)
dilation = (2, 2)
# actually (7, 9)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
dilation_out = adap_pad(input)
assert (dilation_out.shape[2], dilation_out.shape[3]) == (16, 21)
kernel_size = (7, 9)
dilation = (1, 1)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
kernel79_out = adap_pad(input)
assert (kernel79_out.shape[2], kernel79_out.shape[3]) == (16, 21)
assert kernel79_out.shape == dilation_out.shape
# assert only support "same" "corner"
with pytest.raises(AssertionError):
AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=1)
def test_patch_embed():
B = 2
H = 3
W = 4
C = 3
embed_dims = 10
kernel_size = 3
stride = 1
dummy_input = torch.rand(B, C, H, W)
patch_merge_1 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=1,
norm_cfg=None)
x1, shape = patch_merge_1(dummy_input)
# test out shape
assert x1.shape == (2, 2, 10)
# test outsize is correct
assert shape == (1, 2)
# test L = out_h * out_w
assert shape[0] * shape[1] == x1.shape[1]
B = 2
H = 10
W = 10
C = 3
embed_dims = 10
kernel_size = 5
stride = 2
dummy_input = torch.rand(B, C, H, W)
# test dilation
patch_merge_2 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=None,
)
x2, shape = patch_merge_2(dummy_input)
# test out shape
assert x2.shape == (2, 1, 10)
# test outsize is correct
assert shape == (1, 1)
# test L = out_h * out_w
assert shape[0] * shape[1] == x2.shape[1]
stride = 2
input_size = (10, 10)
dummy_input = torch.rand(B, C, H, W)
# test stride and norm
patch_merge_3 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=dict(type='LN'),
input_size=input_size)
x3, shape = patch_merge_3(dummy_input)
# test out shape
assert x3.shape == (2, 1, 10)
# test outsize is correct
assert shape == (1, 1)
# test L = out_h * out_w
assert shape[0] * shape[1] == x3.shape[1]
# test the init_out_size with nn.Unfold
assert patch_merge_3.init_out_size[1] == (input_size[0] - 2 * 4 -
1) // 2 + 1
assert patch_merge_3.init_out_size[0] == (input_size[0] - 2 * 4 -
1) // 2 + 1
H = 11
W = 12
input_size = (H, W)
dummy_input = torch.rand(B, C, H, W)
# test stride and norm
patch_merge_3 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=dict(type='LN'),
input_size=input_size)
_, shape = patch_merge_3(dummy_input)
# when input_size equal to real input
# the out_size should be equal to `init_out_size`
assert shape == patch_merge_3.init_out_size
input_size = (H, W)
dummy_input = torch.rand(B, C, H, W)
# test stride and norm
patch_merge_3 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=dict(type='LN'),
input_size=input_size)
_, shape = patch_merge_3(dummy_input)
# when input_size equal to real input
# the out_size should be equal to `init_out_size`
assert shape == patch_merge_3.init_out_size
# test adap padding
for padding in ('same', 'corner'):
in_c = 2
embed_dims = 3
B = 2
# test stride is 1
input_size = (5, 5)
kernel_size = (5, 5)
stride = (1, 1)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 25, 3)
assert out_size == (5, 5)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (5, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 1, 3)
assert out_size == (1, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (6, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 2, 3)
assert out_size == (2, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test different kernel_size with different stride
input_size = (6, 5)
kernel_size = (6, 2)
stride = (6, 2)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 3, 3)
assert out_size == (1, 3)
assert x_out.size(1) == out_size[0] * out_size[1]
def test_patch_merging():
# Test the model with int padding
in_c = 3
out_c = 4
kernel_size = 3
stride = 3
padding = 1
dilation = 1
bias = False
# test the case `pad_to_stride` is False
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
B, L, C = 1, 100, 3
input_size = (10, 10)
x = torch.rand(B, L, C)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (1, 16, 4)
assert out_size == (4, 4)
# assert out size is consistent with real output
assert x_out.size(1) == out_size[0] * out_size[1]
in_c = 4
out_c = 5
kernel_size = 6
stride = 3
padding = 2
dilation = 2
bias = False
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
B, L, C = 1, 100, 4
input_size = (10, 10)
x = torch.rand(B, L, C)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (1, 4, 5)
assert out_size == (2, 2)
# assert out size is consistent with real output
assert x_out.size(1) == out_size[0] * out_size[1]
# Test with adaptive padding
for padding in ('same', 'corner'):
in_c = 2
out_c = 3
B = 2
# test stride is 1
input_size = (5, 5)
kernel_size = (5, 5)
stride = (1, 1)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 25, 3)
assert out_size == (5, 5)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (5, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 1, 3)
assert out_size == (1, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (6, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 2, 3)
assert out_size == (2, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test different kernel_size with different stride
input_size = (6, 5)
kernel_size = (6, 2)
stride = (6, 2)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 3, 3)
assert out_size == (1, 3)
assert x_out.size(1) == out_size[0] * out_size[1]
def test_detr_transformer_encoder_decoder():
config = ConfigDict(
num_layers=6,
layer_cfg=dict( # DetrTransformerDecoderLayer
self_attn_cfg=dict( # MultiheadAttention
embed_dims=256,
num_heads=8,
dropout=0.1),
cross_attn_cfg=dict( # MultiheadAttention
embed_dims=256,
num_heads=8,
dropout=0.1),
ffn_cfg=dict(
embed_dims=256,
feedforward_channels=2048,
num_fcs=2,
ffn_drop=0.1,
act_cfg=dict(type='ReLU', inplace=True))))
assert len(DetrTransformerDecoder(**config).layers) == 6
assert DetrTransformerDecoder(**config)
config = ConfigDict(
dict(
num_layers=6,
layer_cfg=dict( # DetrTransformerEncoderLayer
self_attn_cfg=dict( # MultiheadAttention
embed_dims=256,
num_heads=8,
dropout=0.1),
ffn_cfg=dict(
embed_dims=256,
feedforward_channels=2048,
num_fcs=2,
ffn_drop=0.1,
act_cfg=dict(type='ReLU', inplace=True)))))
assert len(DetrTransformerEncoder(**config).layers) == 6
assert DetrTransformerEncoder(**config)
| 14,590 | 27.893069 | 73 | py |
ERD | ERD-main/tests/test_models/test_layers/test_brick_wrappers.py | from unittest.mock import patch
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.models.layers import AdaptiveAvgPool2d, adaptive_avg_pool2d
if torch.__version__ != 'parrots':
torch_version = '1.7'
else:
torch_version = 'parrots'
@patch('torch.__version__', torch_version)
def test_adaptive_avg_pool2d():
# Test the empty batch dimension
# Test the two input conditions
x_empty = torch.randn(0, 3, 4, 5)
# 1. tuple[int, int]
wrapper_out = adaptive_avg_pool2d(x_empty, (2, 2))
assert wrapper_out.shape == (0, 3, 2, 2)
# 2. int
wrapper_out = adaptive_avg_pool2d(x_empty, 2)
assert wrapper_out.shape == (0, 3, 2, 2)
# wrapper op with 3-dim input
x_normal = torch.randn(3, 3, 4, 5)
wrapper_out = adaptive_avg_pool2d(x_normal, (2, 2))
ref_out = F.adaptive_avg_pool2d(x_normal, (2, 2))
assert wrapper_out.shape == (3, 3, 2, 2)
assert torch.equal(wrapper_out, ref_out)
wrapper_out = adaptive_avg_pool2d(x_normal, 2)
ref_out = F.adaptive_avg_pool2d(x_normal, 2)
assert wrapper_out.shape == (3, 3, 2, 2)
assert torch.equal(wrapper_out, ref_out)
@patch('torch.__version__', torch_version)
def test_AdaptiveAvgPool2d():
# Test the empty batch dimension
x_empty = torch.randn(0, 3, 4, 5)
# Test the four input conditions
# 1. tuple[int, int]
wrapper = AdaptiveAvgPool2d((2, 2))
wrapper_out = wrapper(x_empty)
assert wrapper_out.shape == (0, 3, 2, 2)
# 2. int
wrapper = AdaptiveAvgPool2d(2)
wrapper_out = wrapper(x_empty)
assert wrapper_out.shape == (0, 3, 2, 2)
# 3. tuple[None, int]
wrapper = AdaptiveAvgPool2d((None, 2))
wrapper_out = wrapper(x_empty)
assert wrapper_out.shape == (0, 3, 4, 2)
# 3. tuple[int, None]
wrapper = AdaptiveAvgPool2d((2, None))
wrapper_out = wrapper(x_empty)
assert wrapper_out.shape == (0, 3, 2, 5)
# Test the normal batch dimension
x_normal = torch.randn(3, 3, 4, 5)
wrapper = AdaptiveAvgPool2d((2, 2))
ref = nn.AdaptiveAvgPool2d((2, 2))
wrapper_out = wrapper(x_normal)
ref_out = ref(x_normal)
assert wrapper_out.shape == (3, 3, 2, 2)
assert torch.equal(wrapper_out, ref_out)
wrapper = AdaptiveAvgPool2d(2)
ref = nn.AdaptiveAvgPool2d(2)
wrapper_out = wrapper(x_normal)
ref_out = ref(x_normal)
assert wrapper_out.shape == (3, 3, 2, 2)
assert torch.equal(wrapper_out, ref_out)
wrapper = AdaptiveAvgPool2d((None, 2))
ref = nn.AdaptiveAvgPool2d((None, 2))
wrapper_out = wrapper(x_normal)
ref_out = ref(x_normal)
assert wrapper_out.shape == (3, 3, 4, 2)
assert torch.equal(wrapper_out, ref_out)
wrapper = AdaptiveAvgPool2d((2, None))
ref = nn.AdaptiveAvgPool2d((2, None))
wrapper_out = wrapper(x_normal)
ref_out = ref(x_normal)
assert wrapper_out.shape == (3, 3, 2, 5)
assert torch.equal(wrapper_out, ref_out)
| 2,932 | 30.202128 | 70 | py |
ERD | ERD-main/tests/test_models/test_seg_heads/test_panoptic_fpn_head.py | import unittest
import torch
from mmengine.structures import PixelData
from mmengine.testing import assert_allclose
from mmdet.models.seg_heads import PanopticFPNHead
from mmdet.structures import DetDataSample
class TestPanopticFPNHead(unittest.TestCase):
def test_init_weights(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=32,
inner_channels=32)
head.init_weights()
assert_allclose(head.conv_logits.bias.data,
torch.zeros_like(head.conv_logits.bias.data))
def test_loss(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=32,
inner_channels=32,
start_level=0,
end_level=1)
x = [torch.rand((2, 32, 8, 8)), torch.rand((2, 32, 4, 4))]
data_sample1 = DetDataSample()
data_sample1.gt_sem_seg = PixelData(
sem_seg=torch.randint(0, 4, (1, 7, 8)))
data_sample2 = DetDataSample()
data_sample2.gt_sem_seg = PixelData(
sem_seg=torch.randint(0, 4, (1, 7, 8)))
batch_data_samples = [data_sample1, data_sample2]
results = head.loss(x, batch_data_samples)
self.assertIsInstance(results, dict)
def test_predict(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=32,
inner_channels=32,
start_level=0,
end_level=1)
x = [torch.rand((2, 32, 8, 8)), torch.rand((2, 32, 4, 4))]
img_meta1 = {
'batch_input_shape': (16, 16),
'img_shape': (14, 14),
'ori_shape': (12, 12),
}
img_meta2 = {
'batch_input_shape': (16, 16),
'img_shape': (16, 16),
'ori_shape': (16, 16),
}
batch_img_metas = [img_meta1, img_meta2]
head.eval()
with torch.no_grad():
seg_preds = head.predict(x, batch_img_metas, rescale=False)
self.assertTupleEqual(seg_preds[0].shape[-2:], (16, 16))
self.assertTupleEqual(seg_preds[1].shape[-2:], (16, 16))
seg_preds = head.predict(x, batch_img_metas, rescale=True)
self.assertTupleEqual(seg_preds[0].shape[-2:], (12, 12))
self.assertTupleEqual(seg_preds[1].shape[-2:], (16, 16))
| 2,446 | 33.464789 | 71 | py |
ERD | ERD-main/tests/test_models/test_seg_heads/test_heuristic_fusion_head.py | import unittest
import torch
from mmengine.config import Config
from mmengine.structures import InstanceData
from mmengine.testing import assert_allclose
from mmdet.evaluation import INSTANCE_OFFSET
from mmdet.models.seg_heads.panoptic_fusion_heads import HeuristicFusionHead
class TestHeuristicFusionHead(unittest.TestCase):
def test_loss(self):
head = HeuristicFusionHead(num_things_classes=2, num_stuff_classes=2)
result = head.loss()
self.assertTrue(not head.with_loss)
self.assertDictEqual(result, dict())
def test_predict(self):
test_cfg = Config(dict(mask_overlap=0.5, stuff_area_limit=1))
head = HeuristicFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
mask_results = InstanceData()
mask_results.bboxes = torch.tensor([[0, 0, 1, 1], [1, 1, 2, 2]])
mask_results.labels = torch.tensor([0, 1])
mask_results.scores = torch.tensor([0.8, 0.7])
mask_results.masks = torch.tensor([[[1, 0], [0, 0]], [[0, 0],
[0, 1]]]).bool()
seg_preds_list = [
torch.tensor([[[0.2, 0.7], [0.3, 0.1]], [[0.2, 0.2], [0.6, 0.1]],
[[0.6, 0.1], [0.1, 0.8]]])
]
target_list = [
torch.tensor([[0 + 1 * INSTANCE_OFFSET, 2],
[3, 1 + 2 * INSTANCE_OFFSET]])
]
results_list = head.predict([mask_results], seg_preds_list)
for target, result in zip(target_list, results_list):
assert_allclose(result.sem_seg[0], target)
# test with no thing
head = HeuristicFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
mask_results = InstanceData()
mask_results.bboxes = torch.zeros((0, 4))
mask_results.labels = torch.zeros((0, )).long()
mask_results.scores = torch.zeros((0, ))
mask_results.masks = torch.zeros((0, 2, 2), dtype=torch.bool)
seg_preds_list = [
torch.tensor([[[0.2, 0.7], [0.3, 0.1]], [[0.2, 0.2], [0.6, 0.1]],
[[0.6, 0.1], [0.1, 0.8]]])
]
target_list = [torch.tensor([[4, 2], [3, 4]])]
results_list = head.predict([mask_results], seg_preds_list)
for target, result in zip(target_list, results_list):
assert_allclose(result.sem_seg[0], target)
| 2,440 | 39.683333 | 78 | py |
ERD | ERD-main/tests/test_models/test_seg_heads/test_maskformer_fusion_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from mmengine.config import Config
from mmdet.models.seg_heads.panoptic_fusion_heads import MaskFormerFusionHead
from mmdet.structures import DetDataSample
class TestMaskFormerFusionHead(unittest.TestCase):
def test_loss(self):
head = MaskFormerFusionHead(num_things_classes=2, num_stuff_classes=2)
result = head.loss()
self.assertTrue(not head.with_loss)
self.assertDictEqual(result, dict())
def test_predict(self):
mask_cls_results = torch.rand((2, 10, 5))
mask_pred_results = torch.rand((2, 10, 32, 32))
batch_data_samples = [
DetDataSample(
metainfo={
'batch_input_shape': (32, 32),
'img_shape': (32, 30),
'ori_shape': (30, 30)
}),
DetDataSample(
metainfo={
'batch_input_shape': (32, 32),
'img_shape': (32, 30),
'ori_shape': (29, 30)
})
]
# get panoptic and instance segmentation results
test_cfg = Config(
dict(
panoptic_on=True,
semantic_on=False,
instance_on=True,
max_per_image=10,
object_mask_thr=0.3,
iou_thr=0.3,
filter_low_score=False))
head = MaskFormerFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
results = head.predict(
mask_cls_results,
mask_pred_results,
batch_data_samples,
rescale=False)
for i in range(len(results)):
self.assertEqual(results[i]['pan_results'].sem_seg.shape[-2:],
batch_data_samples[i].img_shape)
self.assertEqual(results[i]['ins_results'].masks.shape[-2:],
batch_data_samples[i].img_shape)
results = head.predict(
mask_cls_results,
mask_pred_results,
batch_data_samples,
rescale=True)
for i in range(len(results)):
self.assertEqual(results[i]['pan_results'].sem_seg.shape[-2:],
batch_data_samples[i].ori_shape)
self.assertEqual(results[i]['ins_results'].masks.shape[-2:],
batch_data_samples[i].ori_shape)
# get empty results
test_cfg = Config(
dict(
panoptic_on=False,
semantic_on=False,
instance_on=False,
max_per_image=10,
object_mask_thr=0.3,
iou_thr=0.3,
filter_low_score=False))
head = MaskFormerFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
results = head.predict(
mask_cls_results,
mask_pred_results,
batch_data_samples,
rescale=True)
for i in range(len(results)):
self.assertEqual(results[i], dict())
# semantic segmentation is not supported
test_cfg = Config(
dict(
panoptic_on=False,
semantic_on=True,
instance_on=False,
max_per_image=10,
object_mask_thr=0.3,
iou_thr=0.3,
filter_low_score=False))
head = MaskFormerFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
with self.assertRaises(AssertionError):
results = head.predict(
mask_cls_results,
mask_pred_results,
batch_data_samples,
rescale=True)
| 3,827 | 34.119266 | 78 | py |
ERD | ERD-main/tests/test_models/test_losses/test_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
import torch.nn.functional as F
from mmengine.utils import digit_version
from mmdet.models.losses import (BalancedL1Loss, CrossEntropyLoss, DiceLoss,
DistributionFocalLoss, FocalLoss,
GaussianFocalLoss,
KnowledgeDistillationKLDivLoss, L1Loss,
MSELoss, QualityFocalLoss, SeesawLoss,
SmoothL1Loss, VarifocalLoss)
from mmdet.models.losses.ghm_loss import GHMC, GHMR
from mmdet.models.losses.iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss,
EIoULoss, GIoULoss, IoULoss)
@pytest.mark.parametrize(
'loss_class',
[IoULoss, BoundedIoULoss, GIoULoss, DIoULoss, CIoULoss, EIoULoss])
def test_iou_type_loss_zeros_weight(loss_class):
pred = torch.rand((10, 4))
target = torch.rand((10, 4))
weight = torch.zeros(10)
loss = loss_class()(pred, target, weight)
assert loss == 0.
@pytest.mark.parametrize('loss_class', [
BalancedL1Loss, BoundedIoULoss, CIoULoss, CrossEntropyLoss, DIoULoss,
EIoULoss, FocalLoss, DistributionFocalLoss, MSELoss, SeesawLoss,
GaussianFocalLoss, GIoULoss, QualityFocalLoss, IoULoss, L1Loss,
VarifocalLoss, GHMR, GHMC, SmoothL1Loss, KnowledgeDistillationKLDivLoss,
DiceLoss
])
def test_loss_with_reduction_override(loss_class):
pred = torch.rand((10, 4))
target = torch.rand((10, 4)),
weight = None
with pytest.raises(AssertionError):
# only reduction_override from [None, 'none', 'mean', 'sum']
# is not allowed
reduction_override = True
loss_class()(
pred, target, weight, reduction_override=reduction_override)
@pytest.mark.parametrize('loss_class', [QualityFocalLoss])
@pytest.mark.parametrize('activated', [False, True])
def test_QualityFocalLoss_Loss(loss_class, activated):
input_shape = (4, 5)
pred = torch.rand(input_shape)
label = torch.Tensor([0, 1, 2, 0]).long()
quality_label = torch.rand(input_shape[0])
original_loss = loss_class(activated=activated)(pred,
(label, quality_label))
assert isinstance(original_loss, torch.Tensor)
target = torch.nn.functional.one_hot(label, 5)
target = target * quality_label.reshape(input_shape[0], 1)
new_loss = loss_class(activated=activated)(pred, target)
assert isinstance(new_loss, torch.Tensor)
assert new_loss == original_loss
@pytest.mark.parametrize('loss_class', [
IoULoss, BoundedIoULoss, GIoULoss, DIoULoss, CIoULoss, EIoULoss, MSELoss,
L1Loss, SmoothL1Loss, BalancedL1Loss
])
@pytest.mark.parametrize('input_shape', [(10, 4), (0, 4)])
def test_regression_losses(loss_class, input_shape):
pred = torch.rand(input_shape)
target = torch.rand(input_shape)
weight = torch.rand(input_shape)
# Test loss forward
loss = loss_class()(pred, target)
assert isinstance(loss, torch.Tensor)
# Test loss forward with weight
loss = loss_class()(pred, target, weight)
assert isinstance(loss, torch.Tensor)
# Test loss forward with reduction_override
loss = loss_class()(pred, target, reduction_override='mean')
assert isinstance(loss, torch.Tensor)
# Test loss forward with avg_factor
loss = loss_class()(pred, target, avg_factor=10)
assert isinstance(loss, torch.Tensor)
with pytest.raises(ValueError):
# loss can evaluate with avg_factor only if
# reduction is None, 'none' or 'mean'.
reduction_override = 'sum'
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
# Test loss forward with avg_factor and reduction
for reduction_override in [None, 'none', 'mean']:
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
assert isinstance(loss, torch.Tensor)
@pytest.mark.parametrize('loss_class', [CrossEntropyLoss])
@pytest.mark.parametrize('input_shape', [(10, 5), (0, 5)])
def test_classification_losses(loss_class, input_shape):
if input_shape[0] == 0 and digit_version(
torch.__version__) < digit_version('1.5.0'):
pytest.skip(
f'CELoss in PyTorch {torch.__version__} does not support empty'
f'tensor.')
pred = torch.rand(input_shape)
target = torch.randint(0, 5, (input_shape[0], ))
# Test loss forward
loss = loss_class()(pred, target)
assert isinstance(loss, torch.Tensor)
# Test loss forward with reduction_override
loss = loss_class()(pred, target, reduction_override='mean')
assert isinstance(loss, torch.Tensor)
# Test loss forward with avg_factor
loss = loss_class()(pred, target, avg_factor=10)
assert isinstance(loss, torch.Tensor)
with pytest.raises(ValueError):
# loss can evaluate with avg_factor only if
# reduction is None, 'none' or 'mean'.
reduction_override = 'sum'
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
# Test loss forward with avg_factor and reduction
for reduction_override in [None, 'none', 'mean']:
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
assert isinstance(loss, torch.Tensor)
@pytest.mark.parametrize('loss_class', [FocalLoss])
@pytest.mark.parametrize('input_shape', [(10, 5), (3, 5, 40, 40)])
def test_FocalLoss_loss(loss_class, input_shape):
pred = torch.rand(input_shape)
target = torch.randint(0, 5, (input_shape[0], ))
if len(input_shape) == 4:
B, N, W, H = input_shape
target = F.one_hot(torch.randint(0, 5, (B * W * H, )),
5).reshape(B, W, H, N).permute(0, 3, 1, 2)
# Test loss forward
loss = loss_class()(pred, target)
assert isinstance(loss, torch.Tensor)
# Test loss forward with reduction_override
loss = loss_class()(pred, target, reduction_override='mean')
assert isinstance(loss, torch.Tensor)
# Test loss forward with avg_factor
loss = loss_class()(pred, target, avg_factor=10)
assert isinstance(loss, torch.Tensor)
with pytest.raises(ValueError):
# loss can evaluate with avg_factor only if
# reduction is None, 'none' or 'mean'.
reduction_override = 'sum'
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
# Test loss forward with avg_factor and reduction
for reduction_override in [None, 'none', 'mean']:
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
assert isinstance(loss, torch.Tensor)
@pytest.mark.parametrize('loss_class', [GHMR])
@pytest.mark.parametrize('input_shape', [(10, 4), (0, 4)])
def test_GHMR_loss(loss_class, input_shape):
pred = torch.rand(input_shape)
target = torch.rand(input_shape)
weight = torch.rand(input_shape)
# Test loss forward
loss = loss_class()(pred, target, weight)
assert isinstance(loss, torch.Tensor)
@pytest.mark.parametrize('use_sigmoid', [True, False])
@pytest.mark.parametrize('reduction', ['sum', 'mean', None])
@pytest.mark.parametrize('avg_non_ignore', [True, False])
def test_loss_with_ignore_index(use_sigmoid, reduction, avg_non_ignore):
# Test cross_entropy loss
loss_class = CrossEntropyLoss(
use_sigmoid=use_sigmoid,
use_mask=False,
ignore_index=255,
avg_non_ignore=avg_non_ignore)
pred = torch.rand((10, 5))
target = torch.randint(0, 5, (10, ))
ignored_indices = torch.randint(0, 10, (2, ), dtype=torch.long)
target[ignored_indices] = 255
# Test loss forward with default ignore
loss_with_ignore = loss_class(pred, target, reduction_override=reduction)
assert isinstance(loss_with_ignore, torch.Tensor)
# Test loss forward with forward ignore
target[ignored_indices] = 255
loss_with_forward_ignore = loss_class(
pred, target, ignore_index=255, reduction_override=reduction)
assert isinstance(loss_with_forward_ignore, torch.Tensor)
# Verify correctness
if avg_non_ignore:
# manually remove the ignored elements
not_ignored_indices = (target != 255)
pred = pred[not_ignored_indices]
target = target[not_ignored_indices]
loss = loss_class(pred, target, reduction_override=reduction)
assert torch.allclose(loss, loss_with_ignore)
assert torch.allclose(loss, loss_with_forward_ignore)
# test ignore all target
pred = torch.rand((10, 5))
target = torch.ones((10, ), dtype=torch.long) * 255
loss = loss_class(pred, target, reduction_override=reduction)
assert loss == 0
@pytest.mark.parametrize('naive_dice', [True, False])
def test_dice_loss(naive_dice):
loss_class = DiceLoss
pred = torch.rand((10, 4, 4))
target = torch.rand((10, 4, 4))
weight = torch.rand((10))
# Test loss forward
loss = loss_class(naive_dice=naive_dice)(pred, target)
assert isinstance(loss, torch.Tensor)
# Test loss forward with weight
loss = loss_class(naive_dice=naive_dice)(pred, target, weight)
assert isinstance(loss, torch.Tensor)
# Test loss forward with reduction_override
loss = loss_class(naive_dice=naive_dice)(
pred, target, reduction_override='mean')
assert isinstance(loss, torch.Tensor)
# Test loss forward with avg_factor
loss = loss_class(naive_dice=naive_dice)(pred, target, avg_factor=10)
assert isinstance(loss, torch.Tensor)
with pytest.raises(ValueError):
# loss can evaluate with avg_factor only if
# reduction is None, 'none' or 'mean'.
reduction_override = 'sum'
loss_class(naive_dice=naive_dice)(
pred, target, avg_factor=10, reduction_override=reduction_override)
# Test loss forward with avg_factor and reduction
for reduction_override in [None, 'none', 'mean']:
loss_class(naive_dice=naive_dice)(
pred, target, avg_factor=10, reduction_override=reduction_override)
assert isinstance(loss, torch.Tensor)
# Test loss forward with has_acted=False and use_sigmoid=False
with pytest.raises(NotImplementedError):
loss_class(
use_sigmoid=False, activate=True, naive_dice=naive_dice)(pred,
target)
# Test loss forward with weight.ndim != loss.ndim
with pytest.raises(AssertionError):
weight = torch.rand((2, 8))
loss_class(naive_dice=naive_dice)(pred, target, weight)
# Test loss forward with len(weight) != len(pred)
with pytest.raises(AssertionError):
weight = torch.rand((8))
loss_class(naive_dice=naive_dice)(pred, target, weight)
| 10,928 | 36.428082 | 79 | py |
ERD | ERD-main/tests/test_models/test_losses/test_gaussian_focal_loss.py | import unittest
import torch
from mmdet.models.losses import GaussianFocalLoss
class TestGaussianFocalLoss(unittest.TestCase):
def test_forward(self):
pred = torch.rand((10, 4))
target = torch.rand((10, 4))
gaussian_focal_loss = GaussianFocalLoss()
loss1 = gaussian_focal_loss(pred, target)
self.assertIsInstance(loss1, torch.Tensor)
loss2 = gaussian_focal_loss(pred, target, avg_factor=0.5)
self.assertIsInstance(loss2, torch.Tensor)
# test reduction
gaussian_focal_loss = GaussianFocalLoss(reduction='none')
loss = gaussian_focal_loss(pred, target)
self.assertTrue(loss.shape == (10, 4))
# test reduction_override
loss = gaussian_focal_loss(pred, target, reduction_override='mean')
self.assertTrue(loss.ndim == 0)
# Only supports None, 'none', 'mean', 'sum'
with self.assertRaises(AssertionError):
gaussian_focal_loss(pred, target, reduction_override='max')
# test pos_inds
pos_inds = (torch.rand(5) * 8).long()
pos_labels = (torch.rand(5) * 2).long()
gaussian_focal_loss = GaussianFocalLoss()
loss = gaussian_focal_loss(pred, target, pos_inds, pos_labels)
self.assertIsInstance(loss, torch.Tensor)
| 1,303 | 32.435897 | 75 | py |
ERD | ERD-main/tests/test_models/test_data_preprocessors/test_data_preprocessor.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.logging import MessageHub
from mmdet.models.data_preprocessors import (BatchFixedSizePad,
BatchSyncRandomResize,
DetDataPreprocessor,
MultiBranchDataPreprocessor)
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs
class TestDetDataPreprocessor(TestCase):
def test_init(self):
# test mean is None
processor = DetDataPreprocessor()
self.assertTrue(not hasattr(processor, 'mean'))
self.assertTrue(processor._enable_normalize is False)
# test mean is not None
processor = DetDataPreprocessor(mean=[0, 0, 0], std=[1, 1, 1])
self.assertTrue(hasattr(processor, 'mean'))
self.assertTrue(hasattr(processor, 'std'))
self.assertTrue(processor._enable_normalize)
# please specify both mean and std
with self.assertRaises(AssertionError):
DetDataPreprocessor(mean=[0, 0, 0])
# bgr2rgb and rgb2bgr cannot be set to True at the same time
with self.assertRaises(AssertionError):
DetDataPreprocessor(bgr_to_rgb=True, rgb_to_bgr=True)
def test_forward(self):
processor = DetDataPreprocessor(mean=[0, 0, 0], std=[1, 1, 1])
data = {
'inputs': [torch.randint(0, 256, (3, 11, 10))],
'data_samples': [DetDataSample()]
}
out_data = processor(data)
batch_inputs, batch_data_samples = out_data['inputs'], out_data[
'data_samples']
self.assertEqual(batch_inputs.shape, (1, 3, 11, 10))
self.assertEqual(len(batch_data_samples), 1)
# test channel_conversion
processor = DetDataPreprocessor(
mean=[0., 0., 0.], std=[1., 1., 1.], bgr_to_rgb=True)
out_data = processor(data)
batch_inputs, batch_data_samples = out_data['inputs'], out_data[
'data_samples']
self.assertEqual(batch_inputs.shape, (1, 3, 11, 10))
self.assertEqual(len(batch_data_samples), 1)
# test padding
data = {
'inputs': [
torch.randint(0, 256, (3, 10, 11)),
torch.randint(0, 256, (3, 9, 14))
]
}
processor = DetDataPreprocessor(
mean=[0., 0., 0.], std=[1., 1., 1.], bgr_to_rgb=True)
out_data = processor(data)
batch_inputs, batch_data_samples = out_data['inputs'], out_data[
'data_samples']
self.assertEqual(batch_inputs.shape, (2, 3, 10, 14))
self.assertIsNone(batch_data_samples)
# test pad_size_divisor
data = {
'inputs': [
torch.randint(0, 256, (3, 10, 11)),
torch.randint(0, 256, (3, 9, 24))
],
'data_samples': [DetDataSample()] * 2
}
processor = DetDataPreprocessor(
mean=[0., 0., 0.], std=[1., 1., 1.], pad_size_divisor=5)
out_data = processor(data)
batch_inputs, batch_data_samples = out_data['inputs'], out_data[
'data_samples']
self.assertEqual(batch_inputs.shape, (2, 3, 10, 25))
self.assertEqual(len(batch_data_samples), 2)
for data_samples, expected_shape in zip(batch_data_samples,
[(10, 15), (10, 25)]):
self.assertEqual(data_samples.pad_shape, expected_shape)
# test pad_mask=True and pad_seg=True
processor = DetDataPreprocessor(
pad_mask=True, mask_pad_value=0, pad_seg=True, seg_pad_value=0)
packed_inputs = demo_mm_inputs(
2, [[3, 10, 11], [3, 9, 24]],
with_mask=True,
with_semantic=True,
use_box_type=True)
packed_inputs['data_samples'][0].gt_sem_seg.sem_seg = torch.randint(
0, 256, (1, 10, 11))
packed_inputs['data_samples'][1].gt_sem_seg.sem_seg = torch.randint(
0, 256, (1, 9, 24))
mask_pad_sums = [
x.gt_instances.masks.masks.sum()
for x in packed_inputs['data_samples']
]
seg_pad_sums = [
x.gt_sem_seg.sem_seg.sum() for x in packed_inputs['data_samples']
]
batch_data_samples = processor(
packed_inputs, training=True)['data_samples']
for data_samples, expected_shape, mask_pad_sum, seg_pad_sum in zip(
batch_data_samples, [(10, 24), (10, 24)], mask_pad_sums,
seg_pad_sums):
self.assertEqual(data_samples.gt_instances.masks.masks.shape[-2:],
expected_shape)
self.assertEqual(data_samples.gt_sem_seg.sem_seg.shape[-2:],
expected_shape)
self.assertEqual(data_samples.gt_instances.masks.masks.sum(),
mask_pad_sum)
self.assertEqual(data_samples.gt_sem_seg.sem_seg.sum(),
seg_pad_sum)
def test_batch_sync_random_resize(self):
processor = DetDataPreprocessor(batch_augments=[
dict(
type='BatchSyncRandomResize',
random_size_range=(320, 320),
size_divisor=32,
interval=1)
])
self.assertTrue(
isinstance(processor.batch_augments[0], BatchSyncRandomResize))
message_hub = MessageHub.get_instance('test_batch_sync_random_resize')
message_hub.update_info('iter', 0)
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 128, 128]], use_box_type=True)
batch_inputs = processor(packed_inputs, training=True)['inputs']
self.assertEqual(batch_inputs.shape, (2, 3, 128, 128))
# resize after one iter
message_hub.update_info('iter', 1)
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 128, 128]], use_box_type=True)
batch_inputs = processor(packed_inputs, training=True)['inputs']
self.assertEqual(batch_inputs.shape, (2, 3, 320, 320))
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 128, 128]], use_box_type=True)
batch_inputs = processor(packed_inputs, training=False)['inputs']
self.assertEqual(batch_inputs.shape, (2, 3, 128, 128))
def test_batch_fixed_size_pad(self):
# test pad_mask=False and pad_seg=False
processor = DetDataPreprocessor(
pad_mask=False,
pad_seg=False,
batch_augments=[
dict(
type='BatchFixedSizePad',
size=(32, 32),
img_pad_value=0,
pad_mask=True,
mask_pad_value=0,
pad_seg=True,
seg_pad_value=0)
])
self.assertTrue(
isinstance(processor.batch_augments[0], BatchFixedSizePad))
packed_inputs = demo_mm_inputs(
2, [[3, 10, 11], [3, 9, 24]],
with_mask=True,
with_semantic=True,
use_box_type=True)
packed_inputs['data_samples'][0].gt_sem_seg.sem_seg = torch.randint(
0, 256, (1, 10, 11))
packed_inputs['data_samples'][1].gt_sem_seg.sem_seg = torch.randint(
0, 256, (1, 9, 24))
mask_pad_sums = [
x.gt_instances.masks.masks.sum()
for x in packed_inputs['data_samples']
]
seg_pad_sums = [
x.gt_sem_seg.sem_seg.sum() for x in packed_inputs['data_samples']
]
data = processor(packed_inputs, training=True)
batch_inputs, batch_data_samples = data['inputs'], data['data_samples']
self.assertEqual(batch_inputs.shape[-2:], (32, 32))
for data_samples, expected_shape, mask_pad_sum, seg_pad_sum in zip(
batch_data_samples, [(32, 32), (32, 32)], mask_pad_sums,
seg_pad_sums):
self.assertEqual(data_samples.gt_instances.masks.masks.shape[-2:],
expected_shape)
self.assertEqual(data_samples.gt_sem_seg.sem_seg.shape[-2:],
expected_shape)
self.assertEqual(data_samples.gt_instances.masks.masks.sum(),
mask_pad_sum)
self.assertEqual(data_samples.gt_sem_seg.sem_seg.sum(),
seg_pad_sum)
# test pad_mask=True and pad_seg=True
processor = DetDataPreprocessor(
pad_mask=True,
pad_seg=True,
seg_pad_value=0,
mask_pad_value=0,
batch_augments=[
dict(
type='BatchFixedSizePad',
size=(32, 32),
img_pad_value=0,
pad_mask=True,
mask_pad_value=0,
pad_seg=True,
seg_pad_value=0)
])
self.assertTrue(
isinstance(processor.batch_augments[0], BatchFixedSizePad))
packed_inputs = demo_mm_inputs(
2, [[3, 10, 11], [3, 9, 24]],
with_mask=True,
with_semantic=True,
use_box_type=True)
packed_inputs['data_samples'][0].gt_sem_seg.sem_seg = torch.randint(
0, 256, (1, 10, 11))
packed_inputs['data_samples'][1].gt_sem_seg.sem_seg = torch.randint(
0, 256, (1, 9, 24))
mask_pad_sums = [
x.gt_instances.masks.masks.sum()
for x in packed_inputs['data_samples']
]
seg_pad_sums = [
x.gt_sem_seg.sem_seg.sum() for x in packed_inputs['data_samples']
]
data = processor(packed_inputs, training=True)
batch_inputs, batch_data_samples = data['inputs'], data['data_samples']
self.assertEqual(batch_inputs.shape[-2:], (32, 32))
for data_samples, expected_shape, mask_pad_sum, seg_pad_sum in zip(
batch_data_samples, [(32, 32), (32, 32)], mask_pad_sums,
seg_pad_sums):
self.assertEqual(data_samples.gt_instances.masks.masks.shape[-2:],
expected_shape)
self.assertEqual(data_samples.gt_sem_seg.sem_seg.shape[-2:],
expected_shape)
self.assertEqual(data_samples.gt_instances.masks.masks.sum(),
mask_pad_sum)
self.assertEqual(data_samples.gt_sem_seg.sem_seg.sum(),
seg_pad_sum)
# test negative pad/no pad
processor = DetDataPreprocessor(
pad_mask=True,
pad_seg=True,
seg_pad_value=0,
mask_pad_value=0,
batch_augments=[
dict(
type='BatchFixedSizePad',
size=(5, 5),
img_pad_value=0,
pad_mask=True,
mask_pad_value=1,
pad_seg=True,
seg_pad_value=1)
])
self.assertTrue(
isinstance(processor.batch_augments[0], BatchFixedSizePad))
packed_inputs = demo_mm_inputs(
2, [[3, 10, 11], [3, 9, 24]],
with_mask=True,
with_semantic=True,
use_box_type=True)
packed_inputs['data_samples'][0].gt_sem_seg.sem_seg = torch.randint(
0, 256, (1, 10, 11))
packed_inputs['data_samples'][1].gt_sem_seg.sem_seg = torch.randint(
0, 256, (1, 9, 24))
mask_pad_sums = [
x.gt_instances.masks.masks.sum()
for x in packed_inputs['data_samples']
]
seg_pad_sums = [
x.gt_sem_seg.sem_seg.sum() for x in packed_inputs['data_samples']
]
data = processor(packed_inputs, training=True)
batch_inputs, batch_data_samples = data['inputs'], data['data_samples']
self.assertEqual(batch_inputs.shape[-2:], (10, 24))
for data_samples, expected_shape, mask_pad_sum, seg_pad_sum in zip(
batch_data_samples, [(10, 24), (10, 24)], mask_pad_sums,
seg_pad_sums):
self.assertEqual(data_samples.gt_instances.masks.masks.shape[-2:],
expected_shape)
self.assertEqual(data_samples.gt_sem_seg.sem_seg.shape[-2:],
expected_shape)
self.assertEqual(data_samples.gt_instances.masks.masks.sum(),
mask_pad_sum)
self.assertEqual(data_samples.gt_sem_seg.sem_seg.sum(),
seg_pad_sum)
class TestMultiBranchDataPreprocessor(TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32)
self.multi_data = {
'inputs': {
'sup': [torch.randint(0, 256, (3, 224, 224))],
'unsup_teacher': [
torch.randint(0, 256, (3, 400, 600)),
torch.randint(0, 256, (3, 600, 400))
],
'unsup_student': [
torch.randint(0, 256, (3, 700, 500)),
torch.randint(0, 256, (3, 500, 700))
]
},
'data_samples': {
'sup': [DetDataSample()],
'unsup_teacher': [DetDataSample(),
DetDataSample()],
'unsup_student': [DetDataSample(),
DetDataSample()],
}
}
self.data = {
'inputs': [torch.randint(0, 256, (3, 224, 224))],
'data_samples': [DetDataSample()]
}
def test_multi_data_preprocessor(self):
processor = MultiBranchDataPreprocessor(self.data_preprocessor)
# test processing multi_data when training
multi_data = processor(self.multi_data, training=True)
self.assertEqual(multi_data['inputs']['sup'].shape, (1, 3, 224, 224))
self.assertEqual(multi_data['inputs']['unsup_teacher'].shape,
(2, 3, 608, 608))
self.assertEqual(multi_data['inputs']['unsup_student'].shape,
(2, 3, 704, 704))
self.assertEqual(len(multi_data['data_samples']['sup']), 1)
self.assertEqual(len(multi_data['data_samples']['unsup_teacher']), 2)
self.assertEqual(len(multi_data['data_samples']['unsup_student']), 2)
# test processing data when testing
data = processor(self.data)
self.assertEqual(data['inputs'].shape, (1, 3, 224, 224))
self.assertEqual(len(data['data_samples']), 1)
| 15,083 | 41.134078 | 79 | py |
ERD | ERD-main/tests/test_models/test_data_preprocessors/test_boxinst_preprocessor.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmdet.models.data_preprocessors import BoxInstDataPreprocessor
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs
class TestBoxInstDataPreprocessor(TestCase):
def test_forward(self):
processor = BoxInstDataPreprocessor(mean=[0, 0, 0], std=[1, 1, 1])
data = {
'inputs': [torch.randint(0, 256, (3, 256, 256))],
'data_samples': [DetDataSample()]
}
# Test evaluation mode
out_data = processor(data)
batch_inputs, batch_data_samples = out_data['inputs'], out_data[
'data_samples']
self.assertEqual(batch_inputs.shape, (1, 3, 256, 256))
self.assertEqual(len(batch_data_samples), 1)
# Test traning mode without gt bboxes
packed_inputs = demo_mm_inputs(
2, [[3, 256, 256], [3, 128, 128]], num_items=[0, 0])
out_data = processor(packed_inputs, training=True)
batch_inputs, batch_data_samples = out_data['inputs'], out_data[
'data_samples']
self.assertEqual(batch_inputs.shape, (2, 3, 256, 256))
self.assertEqual(len(batch_data_samples), 2)
self.assertEqual(len(batch_data_samples[0].gt_instances.masks), 0)
self.assertEqual(
len(batch_data_samples[0].gt_instances.pairwise_masks), 0)
self.assertEqual(len(batch_data_samples[1].gt_instances.masks), 0)
self.assertEqual(
len(batch_data_samples[1].gt_instances.pairwise_masks), 0)
# Test traning mode with gt bboxes
packed_inputs = demo_mm_inputs(
2, [[3, 256, 256], [3, 128, 128]], num_items=[2, 1])
out_data = processor(packed_inputs, training=True)
batch_inputs, batch_data_samples = out_data['inputs'], out_data[
'data_samples']
self.assertEqual(batch_inputs.shape, (2, 3, 256, 256))
self.assertEqual(len(batch_data_samples), 2)
self.assertEqual(len(batch_data_samples[0].gt_instances.masks), 2)
self.assertEqual(
len(batch_data_samples[0].gt_instances.pairwise_masks), 2)
self.assertEqual(len(batch_data_samples[1].gt_instances.masks), 1)
self.assertEqual(
len(batch_data_samples[1].gt_instances.pairwise_masks), 1)
| 2,360 | 38.35 | 74 | py |
ERD | ERD-main/tests/test_models/test_utils/test_model_misc.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from torch.autograd import gradcheck
from mmdet.models.utils import interpolate_as, sigmoid_geometric_mean
def test_interpolate_as():
source = torch.rand((1, 5, 4, 4))
target = torch.rand((1, 1, 16, 16))
# Test 4D source and target
result = interpolate_as(source, target)
assert result.shape == torch.Size((1, 5, 16, 16))
# Test 3D target
result = interpolate_as(source, target.squeeze(0))
assert result.shape == torch.Size((1, 5, 16, 16))
# Test 3D source
result = interpolate_as(source.squeeze(0), target)
assert result.shape == torch.Size((5, 16, 16))
# Test type(target) == np.ndarray
target = np.random.rand(16, 16)
result = interpolate_as(source.squeeze(0), target)
assert result.shape == torch.Size((5, 16, 16))
def test_sigmoid_geometric_mean():
x = torch.randn(20, 20, dtype=torch.double, requires_grad=True)
y = torch.randn(20, 20, dtype=torch.double, requires_grad=True)
inputs = (x, y)
test = gradcheck(sigmoid_geometric_mean, inputs, eps=1e-6, atol=1e-4)
assert test
| 1,149 | 30.081081 | 73 | py |
ERD | ERD-main/tests/test_models/test_utils/test_misc.py | import copy
import pytest
import torch
from mmengine.structures import InstanceData
from mmdet.models.utils import (empty_instances, filter_gt_instances,
rename_loss_dict, reweight_loss_dict,
unpack_gt_instances)
from mmdet.testing import demo_mm_inputs
def test_parse_gt_instance_info():
packed_inputs = demo_mm_inputs()['data_samples']
batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \
= unpack_gt_instances(packed_inputs)
assert len(batch_gt_instances) == len(packed_inputs)
assert len(batch_gt_instances_ignore) == len(packed_inputs)
assert len(batch_img_metas) == len(packed_inputs)
def test_process_empty_roi():
batch_size = 2
batch_img_metas = [{'ori_shape': (10, 12)}] * batch_size
device = torch.device('cpu')
results_list = empty_instances(batch_img_metas, device, task_type='bbox')
assert len(results_list) == batch_size
for results in results_list:
assert isinstance(results, InstanceData)
assert len(results) == 0
assert torch.allclose(results.bboxes, torch.zeros(0, 4, device=device))
results_list = empty_instances(
batch_img_metas,
device,
task_type='mask',
instance_results=results_list,
mask_thr_binary=0.5)
assert len(results_list) == batch_size
for results in results_list:
assert isinstance(results, InstanceData)
assert len(results) == 0
assert results.masks.shape == (0, 10, 12)
# batch_img_metas and instance_results length must be the same
with pytest.raises(AssertionError):
empty_instances(
batch_img_metas,
device,
task_type='mask',
instance_results=[results_list[0]] * 3)
def test_filter_gt_instances():
packed_inputs = demo_mm_inputs()['data_samples']
score_thr = 0.7
with pytest.raises(AssertionError):
filter_gt_instances(packed_inputs, score_thr=score_thr)
# filter no instances by score
for inputs in packed_inputs:
inputs.gt_instances.scores = torch.ones_like(
inputs.gt_instances.labels).float()
filtered_packed_inputs = filter_gt_instances(
copy.deepcopy(packed_inputs), score_thr=score_thr)
for filtered_inputs, inputs in zip(filtered_packed_inputs, packed_inputs):
assert len(filtered_inputs.gt_instances) == len(inputs.gt_instances)
# filter all instances
for inputs in packed_inputs:
inputs.gt_instances.scores = torch.zeros_like(
inputs.gt_instances.labels).float()
filtered_packed_inputs = filter_gt_instances(
copy.deepcopy(packed_inputs), score_thr=score_thr)
for filtered_inputs in filtered_packed_inputs:
assert len(filtered_inputs.gt_instances) == 0
packed_inputs = demo_mm_inputs()['data_samples']
# filter no instances by size
wh_thr = (0, 0)
filtered_packed_inputs = filter_gt_instances(
copy.deepcopy(packed_inputs), wh_thr=wh_thr)
for filtered_inputs, inputs in zip(filtered_packed_inputs, packed_inputs):
assert len(filtered_inputs.gt_instances) == len(inputs.gt_instances)
# filter all instances by size
for inputs in packed_inputs:
img_shape = inputs.img_shape
wh_thr = (max(wh_thr[0], img_shape[0]), max(wh_thr[1], img_shape[1]))
filtered_packed_inputs = filter_gt_instances(
copy.deepcopy(packed_inputs), wh_thr=wh_thr)
for filtered_inputs in filtered_packed_inputs:
assert len(filtered_inputs.gt_instances) == 0
def test_rename_loss_dict():
prefix = 'sup_'
losses = {'cls_loss': torch.tensor(2.), 'reg_loss': torch.tensor(1.)}
sup_losses = rename_loss_dict(prefix, losses)
for name in losses.keys():
assert sup_losses[prefix + name] == losses[name]
def test_reweight_loss_dict():
weight = 4
losses = {'cls_loss': torch.tensor(2.), 'reg_loss': torch.tensor(1.)}
weighted_losses = reweight_loss_dict(copy.deepcopy(losses), weight)
for name in losses.keys():
assert weighted_losses[name] == losses[name] * weight
| 4,139 | 36.297297 | 79 | py |
ERD | ERD-main/tests/test_models/test_tta/test_det_tta.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import ConfigDict
from mmdet.models import DetTTAModel
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class TestDetTTAModel(TestCase):
def setUp(self):
register_all_modules()
def test_det_tta_model(self):
detector_cfg = get_detector_cfg(
'retinanet/retinanet_r18_fpn_1x_coco.py')
cfg = ConfigDict(
type='DetTTAModel',
module=detector_cfg,
tta_cfg=dict(
nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
model: DetTTAModel = MODELS.build(cfg)
imgs = []
data_samples = []
directions = ['horizontal', 'vertical']
for i in range(12):
flip_direction = directions[0] if i % 3 == 0 else directions[1]
imgs.append(torch.randn(1, 3, 100 + 10 * i, 100 + 10 * i))
data_samples.append([
DetDataSample(
metainfo=dict(
ori_shape=(100, 100),
img_shape=(100 + 10 * i, 100 + 10 * i),
scale_factor=((100 + 10 * i) / 100,
(100 + 10 * i) / 100),
flip=(i % 2 == 0),
flip_direction=flip_direction), )
])
model.test_step(dict(inputs=imgs, data_samples=data_samples))
| 1,572 | 31.102041 | 75 | py |
ERD | ERD-main/tests/test_models/test_necks/test_ct_resnet_neck.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from mmdet.models.necks import CTResNetNeck
class TestCTResNetNeck(unittest.TestCase):
def test_init(self):
# num_filters/num_kernels must be same length
with self.assertRaises(AssertionError):
CTResNetNeck(
in_channels=10,
num_deconv_filters=(10, 10),
num_deconv_kernels=(4, ))
ct_resnet_neck = CTResNetNeck(
in_channels=16,
num_deconv_filters=(8, 8),
num_deconv_kernels=(4, 4),
use_dcn=False)
ct_resnet_neck.init_weights()
def test_forward(self):
in_channels = 16
num_filters = (8, 8)
num_kernels = (4, 4)
feat = torch.rand(1, 16, 4, 4)
ct_resnet_neck = CTResNetNeck(
in_channels=in_channels,
num_deconv_filters=num_filters,
num_deconv_kernels=num_kernels,
use_dcn=False)
# feat must be list or tuple
with self.assertRaises(AssertionError):
ct_resnet_neck(feat)
out_feat = ct_resnet_neck([feat])[0]
self.assertEqual(out_feat.shape, (1, num_filters[-1], 16, 16))
if torch.cuda.is_available():
# test dcn
ct_resnet_neck = CTResNetNeck(
in_channels=in_channels,
num_deconv_filters=num_filters,
num_deconv_kernels=num_kernels)
ct_resnet_neck = ct_resnet_neck.cuda()
feat = feat.cuda()
out_feat = ct_resnet_neck([feat])[0]
self.assertEqual(out_feat.shape, (1, num_filters[-1], 16, 16))
| 1,678 | 30.092593 | 74 | py |
ERD | ERD-main/tests/test_models/test_necks/test_necks.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.necks import (FPG, FPN, FPN_CARAFE, NASFCOS_FPN, NASFPN, SSH,
YOLOXPAFPN, ChannelMapper, DilatedEncoder,
DyHead, SSDNeck, YOLOV3Neck)
def test_fpn():
"""Tests fpn."""
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
out_channels = 8
# end_level=-1 is equal to end_level=3
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=0,
end_level=-1,
num_outs=5)
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=0,
end_level=3,
num_outs=5)
# `num_outs` is not equal to end_level - start_level + 1
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
end_level=2,
num_outs=3)
# `num_outs` is not equal to len(in_channels) - start_level
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
num_outs=2)
# `end_level` is larger than len(in_channels) - 1
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
end_level=4,
num_outs=2)
# `num_outs` is not equal to end_level - start_level
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
end_level=3,
num_outs=1)
# Invalid `add_extra_convs` option
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs='on_xxx',
num_outs=5)
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
num_outs=5)
# FPN expects a multiple levels of features per image
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
outs = fpn_model(feats)
assert fpn_model.add_extra_convs == 'on_input'
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Tests for fpn with no extra convs (pooling is used instead)
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=False,
num_outs=5)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
assert not fpn_model.add_extra_convs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Tests for fpn with lateral bns
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
no_norm_on_lateral=False,
norm_cfg=dict(type='BN', requires_grad=True),
num_outs=5)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
assert fpn_model.add_extra_convs == 'on_input'
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
bn_exist = False
for m in fpn_model.modules():
if isinstance(m, _BatchNorm):
bn_exist = True
assert bn_exist
# Bilinear upsample
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
upsample_cfg=dict(mode='bilinear', align_corners=True),
num_outs=5)
fpn_model(feats)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
assert fpn_model.add_extra_convs == 'on_input'
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Scale factor instead of fixed upsample size upsample
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
upsample_cfg=dict(scale_factor=2),
num_outs=5)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Extra convs source is 'inputs'
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_input',
start_level=1,
num_outs=5)
assert fpn_model.add_extra_convs == 'on_input'
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Extra convs source is 'laterals'
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_lateral',
start_level=1,
num_outs=5)
assert fpn_model.add_extra_convs == 'on_lateral'
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Extra convs source is 'outputs'
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_output',
start_level=1,
num_outs=5)
assert fpn_model.add_extra_convs == 'on_output'
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
def test_channel_mapper():
"""Tests ChannelMapper."""
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
out_channels = 8
kernel_size = 3
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
# in_channels must be a list
with pytest.raises(AssertionError):
channel_mapper = ChannelMapper(
in_channels=10, out_channels=out_channels, kernel_size=kernel_size)
# the length of channel_mapper's inputs must be equal to the length of
# in_channels
with pytest.raises(AssertionError):
channel_mapper = ChannelMapper(
in_channels=in_channels[:-1],
out_channels=out_channels,
kernel_size=kernel_size)
channel_mapper(feats)
channel_mapper = ChannelMapper(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size)
outs = channel_mapper(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
def test_dilated_encoder():
in_channels = 16
out_channels = 32
out_shape = 34
dilated_encoder = DilatedEncoder(in_channels, out_channels, 16, 2,
[2, 4, 6, 8])
feat = [torch.rand(1, in_channels, 34, 34)]
out_feat = dilated_encoder(feat)[0]
assert out_feat.shape == (1, out_channels, out_shape, out_shape)
def test_yolov3_neck():
# num_scales, in_channels, out_channels must be same length
with pytest.raises(AssertionError):
YOLOV3Neck(num_scales=3, in_channels=[16, 8, 4], out_channels=[8, 4])
# len(feats) must equal to num_scales
with pytest.raises(AssertionError):
neck = YOLOV3Neck(
num_scales=3, in_channels=[16, 8, 4], out_channels=[8, 4, 2])
feats = (torch.rand(1, 4, 16, 16), torch.rand(1, 8, 16, 16))
neck(feats)
# test normal channels
s = 32
in_channels = [16, 8, 4]
out_channels = [8, 4, 2]
feat_sizes = [s // 2**i for i in range(len(in_channels) - 1, -1, -1)]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels) - 1, -1, -1)
]
neck = YOLOV3Neck(
num_scales=3, in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(outs)):
assert outs[i].shape == \
(1, out_channels[i], feat_sizes[i], feat_sizes[i])
# test more flexible setting
s = 32
in_channels = [32, 8, 16]
out_channels = [19, 21, 5]
feat_sizes = [s // 2**i for i in range(len(in_channels) - 1, -1, -1)]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels) - 1, -1, -1)
]
neck = YOLOV3Neck(
num_scales=3, in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(outs)):
assert outs[i].shape == \
(1, out_channels[i], feat_sizes[i], feat_sizes[i])
def test_ssd_neck():
# level_strides/level_paddings must be same length
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[8, 16, 32],
level_strides=[2],
level_paddings=[2, 1])
# length of out_channels must larger than in_channels
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[8],
level_strides=[2],
level_paddings=[2])
# len(out_channels) - len(in_channels) must equal to len(level_strides)
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[4, 16, 64],
level_strides=[2, 2],
level_paddings=[2, 2])
# in_channels must be same with out_channels[:len(in_channels)]
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[4, 16, 64],
level_strides=[2],
level_paddings=[2])
ssd_neck = SSDNeck(
in_channels=[4],
out_channels=[4, 8, 16],
level_strides=[2, 1],
level_paddings=[1, 0])
feats = (torch.rand(1, 4, 16, 16), )
outs = ssd_neck(feats)
assert outs[0].shape == (1, 4, 16, 16)
assert outs[1].shape == (1, 8, 8, 8)
assert outs[2].shape == (1, 16, 6, 6)
# test SSD-Lite Neck
ssd_neck = SSDNeck(
in_channels=[4, 8],
out_channels=[4, 8, 16],
level_strides=[1],
level_paddings=[1],
l2_norm_scale=None,
use_depthwise=True,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'))
assert not hasattr(ssd_neck, 'l2_norm')
from mmcv.cnn.bricks import DepthwiseSeparableConvModule
assert isinstance(ssd_neck.extra_layers[0][-1],
DepthwiseSeparableConvModule)
feats = (torch.rand(1, 4, 8, 8), torch.rand(1, 8, 8, 8))
outs = ssd_neck(feats)
assert outs[0].shape == (1, 4, 8, 8)
assert outs[1].shape == (1, 8, 8, 8)
assert outs[2].shape == (1, 16, 8, 8)
def test_yolox_pafpn():
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
out_channels = 24
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = YOLOXPAFPN(in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# test depth-wise
neck = YOLOXPAFPN(
in_channels=in_channels, out_channels=out_channels, use_depthwise=True)
from mmcv.cnn.bricks import DepthwiseSeparableConvModule
assert isinstance(neck.downsamples[0], DepthwiseSeparableConvModule)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
def test_dyhead():
s = 64
in_channels = 8
out_channels = 16
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
feats = [
torch.rand(1, in_channels, feat_sizes[i], feat_sizes[i])
for i in range(len(feat_sizes))
]
neck = DyHead(
in_channels=in_channels, out_channels=out_channels, num_blocks=3)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(outs)):
assert outs[i].shape[1] == out_channels
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
feat = torch.rand(1, 8, 4, 4)
# input feat must be tuple or list
with pytest.raises(AssertionError):
neck(feat)
def test_fpg():
# end_level=-1 is equal to end_level=3
norm_cfg = dict(type='BN', requires_grad=True)
FPG(in_channels=[8, 16, 32, 64],
out_channels=8,
inter_channels=8,
num_outs=5,
add_extra_convs=True,
start_level=1,
end_level=-1,
stack_times=9,
paths=['bu'] * 9,
same_down_trans=None,
same_up_trans=dict(
type='conv',
kernel_size=3,
stride=2,
padding=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
across_lateral_trans=dict(
type='conv',
kernel_size=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
across_down_trans=dict(
type='interpolation_conv',
mode='nearest',
kernel_size=3,
norm_cfg=norm_cfg,
order=('act', 'conv', 'norm'),
inplace=False),
across_up_trans=None,
across_skip_trans=dict(
type='conv',
kernel_size=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
output_trans=dict(
type='last_conv',
kernel_size=3,
order=('act', 'conv', 'norm'),
inplace=False),
norm_cfg=norm_cfg,
skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])
FPG(in_channels=[8, 16, 32, 64],
out_channels=8,
inter_channels=8,
num_outs=5,
add_extra_convs=True,
start_level=1,
end_level=3,
stack_times=9,
paths=['bu'] * 9,
same_down_trans=None,
same_up_trans=dict(
type='conv',
kernel_size=3,
stride=2,
padding=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
across_lateral_trans=dict(
type='conv',
kernel_size=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
across_down_trans=dict(
type='interpolation_conv',
mode='nearest',
kernel_size=3,
norm_cfg=norm_cfg,
order=('act', 'conv', 'norm'),
inplace=False),
across_up_trans=None,
across_skip_trans=dict(
type='conv',
kernel_size=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
output_trans=dict(
type='last_conv',
kernel_size=3,
order=('act', 'conv', 'norm'),
inplace=False),
norm_cfg=norm_cfg,
skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])
# `end_level` is larger than len(in_channels) - 1
with pytest.raises(AssertionError):
FPG(in_channels=[8, 16, 32, 64],
out_channels=8,
stack_times=9,
paths=['bu'] * 9,
start_level=1,
end_level=4,
num_outs=2,
skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])
# `num_outs` is not equal to end_level - start_level + 1
with pytest.raises(AssertionError):
FPG(in_channels=[8, 16, 32, 64],
out_channels=8,
stack_times=9,
paths=['bu'] * 9,
start_level=1,
end_level=2,
num_outs=3,
skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])
def test_fpn_carafe():
# end_level=-1 is equal to end_level=3
FPN_CARAFE(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=0,
end_level=3,
num_outs=4)
FPN_CARAFE(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=0,
end_level=-1,
num_outs=4)
# `end_level` is larger than len(in_channels) - 1
with pytest.raises(AssertionError):
FPN_CARAFE(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=1,
end_level=4,
num_outs=2)
# `num_outs` is not equal to end_level - start_level + 1
with pytest.raises(AssertionError):
FPN_CARAFE(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=1,
end_level=2,
num_outs=3)
def test_nas_fpn():
# end_level=-1 is equal to end_level=3
NASFPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
stack_times=9,
start_level=0,
end_level=3,
num_outs=4)
NASFPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
stack_times=9,
start_level=0,
end_level=-1,
num_outs=4)
# `end_level` is larger than len(in_channels) - 1
with pytest.raises(AssertionError):
NASFPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
stack_times=9,
start_level=1,
end_level=4,
num_outs=2)
# `num_outs` is not equal to end_level - start_level + 1
with pytest.raises(AssertionError):
NASFPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
stack_times=9,
start_level=1,
end_level=2,
num_outs=3)
def test_nasfcos_fpn():
# end_level=-1 is equal to end_level=3
NASFCOS_FPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=0,
end_level=3,
num_outs=4)
NASFCOS_FPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=0,
end_level=-1,
num_outs=4)
# `end_level` is larger than len(in_channels) - 1
with pytest.raises(AssertionError):
NASFCOS_FPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=1,
end_level=4,
num_outs=2)
# `num_outs` is not equal to end_level - start_level + 1
with pytest.raises(AssertionError):
NASFCOS_FPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=1,
end_level=2,
num_outs=3)
def test_ssh_neck():
"""Tests ssh."""
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
out_channels = [16, 32, 64, 128]
ssh_model = SSH(
num_scales=4, in_channels=in_channels, out_channels=out_channels)
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
outs = ssh_model(feats)
assert len(outs) == len(feats)
for i in range(len(outs)):
assert outs[i].shape == \
(1, out_channels[i], feat_sizes[i], feat_sizes[i])
| 20,260 | 30.075153 | 79 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_reppoints_head.py | import unittest
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from parameterized import parameterized
from mmdet.models.dense_heads import RepPointsHead
from mmdet.structures import DetDataSample
class TestRepPointsHead(unittest.TestCase):
@parameterized.expand(['moment', 'minmax', 'partial_minmax'])
def test_head_loss(self, transform_method='moment'):
cfg = ConfigDict(
dict(
num_classes=2,
in_channels=32,
point_feat_channels=10,
num_points=9,
gradient_mul=0.1,
point_strides=[8, 16, 32, 64, 128],
point_base_scale=4,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_init=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),
loss_bbox_refine=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
use_grid_points=False,
center_init=True,
transform_method=transform_method,
moment_mul=0.01,
init_cfg=dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='reppoints_cls_out',
std=0.01,
bias_prob=0.01)),
train_cfg=dict(
init=dict(
assigner=dict(
type='PointAssigner', scale=4, pos_num=1),
allowed_border=-1,
pos_weight=-1,
debug=False),
refine=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
reppoints_head = RepPointsHead(**cfg)
s = 256
img_metas = [{
'img_shape': (s, s),
'scale_factor': (1, 1),
'pad_shape': (s, s),
'batch_input_shape': (s, s)
}]
x = [
torch.rand(1, 32, s // 2**(i + 2), s // 2**(i + 2))
for i in range(5)
]
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
gt_bboxes_ignore = None
reppoints_head.train()
forward_outputs = reppoints_head.forward(x)
empty_gt_losses = reppoints_head.loss_by_feat(*forward_outputs,
[gt_instances],
img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no pts loss.
for key, losses in empty_gt_losses.items():
for loss in losses:
if 'cls' in key:
self.assertGreater(loss.item(), 0,
'cls loss should be non-zero')
elif 'pts' in key:
self.assertEqual(
loss.item(), 0,
'there should be no reg loss when no ground true boxes'
)
# When truth is non-empty then both cls and pts loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = reppoints_head.loss_by_feat(*forward_outputs,
[gt_instances], img_metas,
gt_bboxes_ignore)
# loss_cls should all be non-zero
self.assertTrue(
all([loss.item() > 0 for loss in one_gt_losses['loss_cls']]))
# only one level loss_pts_init is non-zero
cnt_non_zero = 0
for loss in one_gt_losses['loss_pts_init']:
if loss.item() != 0:
cnt_non_zero += 1
self.assertEqual(cnt_non_zero, 1)
# only one level loss_pts_refine is non-zero
cnt_non_zero = 0
for loss in one_gt_losses['loss_pts_init']:
if loss.item() != 0:
cnt_non_zero += 1
self.assertEqual(cnt_non_zero, 1)
# test loss
samples = DetDataSample()
samples.set_metainfo(img_metas[0])
samples.gt_instances = gt_instances
reppoints_head.loss(x, [samples])
# test only predict
reppoints_head.eval()
reppoints_head.predict(x, [samples], rescale=True)
| 5,581 | 37.763889 | 79 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_rpn_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import RPNHead
class TestRPNHead(TestCase):
def test_init(self):
"""Test init rpn head."""
rpn_head = RPNHead(num_classes=1, in_channels=1)
self.assertTrue(rpn_head.rpn_conv)
self.assertTrue(rpn_head.rpn_cls)
self.assertTrue(rpn_head.rpn_reg)
# rpn_head.num_convs > 1
rpn_head = RPNHead(num_classes=1, in_channels=1, num_convs=2)
self.assertTrue(rpn_head.rpn_conv)
self.assertTrue(rpn_head.rpn_cls)
self.assertTrue(rpn_head.rpn_reg)
def test_rpn_head_loss(self):
"""Tests rpn head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False))
rpn_head = RPNHead(num_classes=1, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(rpn_head.prior_generator.strides)))
cls_scores, bbox_preds = rpn_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = rpn_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_rpn_cls'])
empty_box_loss = sum(empty_gt_losses['loss_rpn_bbox'])
self.assertGreater(empty_cls_loss.item(), 0,
'rpn cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([0])
one_gt_losses = rpn_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_rpn_cls'])
onegt_box_loss = sum(one_gt_losses['loss_rpn_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'rpn cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'rpn box loss should be non-zero')
# When there is no valid anchor, the loss will be None,
# and this will raise a ValueError.
img_metas = [{
'img_shape': (8, 8, 3),
'pad_shape': (8, 8, 3),
'scale_factor': 1,
}]
with pytest.raises(ValueError):
rpn_head.loss_by_feat(cls_scores, bbox_preds, [gt_instances],
img_metas)
def test_bbox_post_process(self):
"""Test the length of detection instance results is 0."""
from mmengine.config import ConfigDict
cfg = ConfigDict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)
rpn_head = RPNHead(num_classes=1, in_channels=1)
results = InstanceData(metainfo=dict())
results.bboxes = torch.zeros((0, 4))
results.scores = torch.zeros(0)
results = rpn_head._bbox_post_process(results, cfg, img_meta=dict())
self.assertEqual(len(results), 0)
self.assertEqual(results.bboxes.size(), (0, 4))
self.assertEqual(results.scores.size(), (0, ))
self.assertEqual(results.labels.size(), (0, ))
| 4,805 | 37.448 | 76 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_lad_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import LADHead, lad_head
from mmdet.models.dense_heads.lad_head import levels_to_images
class TestLADHead(TestCase):
def test_lad_head_loss(self):
"""Tests lad head loss when truth is empty and non-empty."""
class mock_skm:
def GaussianMixture(self, *args, **kwargs):
return self
def fit(self, loss):
pass
def predict(self, loss):
components = np.zeros_like(loss, dtype=np.long)
return components.reshape(-1)
def score_samples(self, loss):
scores = np.random.random(len(loss))
return scores
lad_head.skm = mock_skm()
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1
}]
train_cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since Focal Loss is not supported on CPU
# since Focal Loss is not supported on CPU
lad = LADHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
teacher_model = LADHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
lad.init_weights()
teacher_model.init_weights()
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
batch_gt_instances_ignore = None
outs_teacher = teacher_model(feat)
label_assignment_results = teacher_model.get_label_assignment(
*outs_teacher, [gt_instances], img_metas,
batch_gt_instances_ignore)
outs = teacher_model(feat)
empty_gt_losses = lad.loss_by_feat(*outs, [gt_instances], img_metas,
batch_gt_instances_ignore,
label_assignment_results)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
empty_iou_loss = empty_gt_losses['loss_iou']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_iou_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
batch_gt_instances_ignore = None
label_assignment_results = teacher_model.get_label_assignment(
*outs_teacher, [gt_instances], img_metas,
batch_gt_instances_ignore)
one_gt_losses = lad.loss_by_feat(*outs, [gt_instances], img_metas,
batch_gt_instances_ignore,
label_assignment_results)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
onegt_iou_loss = one_gt_losses['loss_iou']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_iou_loss.item(), 0,
'box loss should be non-zero')
n, c, h, w = 10, 4, 20, 20
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
self.assertEqual(len(results), n)
self.assertEqual(results[0].size(), (h * w * 5, c))
self.assertTrue(lad.with_score_voting)
lad = LADHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
cfg = Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
rescale = False
lad.predict_by_feat(
cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale)
| 6,520 | 37.585799 | 79 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_guided_anchor_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import GuidedAnchorHead
guided_anchor_head_config = ConfigDict(
dict(
num_classes=4,
in_channels=4,
feat_channels=4,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0),
train_cfg=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.4,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
center_ratio=0.2,
ignore_ratio=0.5,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
class TestGuidedAnchorHead(TestCase):
def test_guided_anchor_head_loss(self):
"""Tests guided anchor loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': (1, 1)
}]
guided_anchor_head = GuidedAnchorHead(**guided_anchor_head_config)
feats = (
torch.rand(1, 4, s // stride[1], s // stride[0])
for stride in guided_anchor_head.square_anchor_generator.strides)
outs = guided_anchor_head(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = guided_anchor_head.loss_by_feat(
*outs, [gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# box shape and location loss should be zero
empty_cls_loss = sum(empty_gt_losses['loss_cls']).item()
empty_box_loss = sum(empty_gt_losses['loss_bbox']).item()
empty_shape_loss = sum(empty_gt_losses['loss_shape']).item()
empty_loc_loss = sum(empty_gt_losses['loss_loc']).item()
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(empty_loc_loss, 0,
'location loss should be non-zero')
self.assertEqual(
empty_box_loss, 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_shape_loss, 0,
'there should be no shape loss when there are no true boxes')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = guided_anchor_head.loss_by_feat(
*outs, [gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls']).item()
onegt_box_loss = sum(one_gt_losses['loss_bbox']).item()
onegt_shape_loss = sum(one_gt_losses['loss_shape']).item()
onegt_loc_loss = sum(one_gt_losses['loss_loc']).item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(onegt_shape_loss, 0,
'shape loss should be non-zero')
self.assertGreater(onegt_loc_loss, 0,
'location loss should be non-zero')
def test_guided_anchor_head_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': (1, 1)
}]
guided_anchor_head = GuidedAnchorHead(**guided_anchor_head_config)
feats = (
torch.rand(1, 4, s // stride[1], s // stride[0])
for stride in guided_anchor_head.square_anchor_generator.strides)
outs = guided_anchor_head(feats)
guided_anchor_head.predict_by_feat(
*outs, batch_img_metas=img_metas, rescale=True)
| 5,905 | 36.379747 | 77 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_ssd_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from math import ceil
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import SSDHead
class TestSSDHead(TestCase):
def test_ssd_head_loss(self):
"""Tests ssd head loss when truth is empty and non-empty."""
s = 300
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
sampler=dict(type='PseudoSampler'),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False))
ssd_head = SSDHead(
num_classes=4,
in_channels=(1, 1, 1, 1, 1, 1),
stacked_convs=1,
feat_channels=1,
use_depthwise=True,
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=s,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
train_cfg=cfg)
# SSD head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))
for stride in ssd_head.prior_generator.strides)
cls_scores, bbox_preds = ssd_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = ssd_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, cls_loss and box_loss should all be zero.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = ssd_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
| 3,494 | 36.98913 | 75 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_boxinst_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
import torch
from mmengine import MessageHub
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import BoxInstBboxHead, BoxInstMaskHead
from mmdet.structures.mask import BitmapMasks
def _rand_masks(num_items, bboxes, img_w, img_h):
rng = np.random.RandomState(0)
masks = np.zeros((num_items, img_h, img_w), dtype=np.float32)
for i, bbox in enumerate(bboxes):
bbox = bbox.astype(np.int32)
mask = (rng.rand(1, bbox[3] - bbox[1], bbox[2] - bbox[0]) >
0.3).astype(np.int64)
masks[i:i + 1, bbox[1]:bbox[3], bbox[0]:bbox[2]] = mask
return BitmapMasks(masks, height=img_h, width=img_w)
def _fake_mask_feature_head():
mask_feature_head = ConfigDict(
in_channels=1,
feat_channels=1,
start_level=0,
end_level=2,
out_channels=8,
mask_stride=8,
num_stacked_convs=4,
norm_cfg=dict(type='BN', requires_grad=True))
return mask_feature_head
class TestBoxInstHead(TestCase):
def test_boxinst_maskhead_loss(self):
"""Tests boxinst maskhead loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
boxinst_bboxhead = BoxInstBboxHead(
num_classes=4,
in_channels=1,
feat_channels=1,
stacked_convs=1,
norm_cfg=None)
mask_feature_head = _fake_mask_feature_head()
boxinst_maskhead = BoxInstMaskHead(
mask_feature_head=mask_feature_head,
loss_mask=dict(
type='DiceLoss',
use_sigmoid=True,
activate=True,
eps=5e-6,
loss_weight=1.0))
# Fcos head expects a multiple levels of features per image
feats = []
for i in range(len(boxinst_bboxhead.strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 3)), s // (2**(i + 3))))
feats = tuple(feats)
cls_scores, bbox_preds, centernesses, param_preds =\
boxinst_bboxhead.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)
gt_instances.pairwise_masks = _rand_masks(
0, gt_instances.bboxes.numpy(), s // 4, s // 4).to_tensor(
dtype=torch.float32,
device='cpu').unsqueeze(1).repeat(1, 8, 1, 1)
message_hub = MessageHub.get_instance('runtime_info')
message_hub.update_info('iter', 1)
_ = boxinst_bboxhead.loss_by_feat(cls_scores, bbox_preds, centernesses,
param_preds, [gt_instances],
img_metas)
# When truth is empty then all mask loss
# should be zero for random inputs
positive_infos = boxinst_bboxhead.get_positive_infos()
mask_outs = boxinst_maskhead.forward(feats, positive_infos)
empty_gt_mask_losses = boxinst_maskhead.loss_by_feat(
*mask_outs, [gt_instances], img_metas, positive_infos)
loss_mask_project = empty_gt_mask_losses['loss_mask_project']
loss_mask_pairwise = empty_gt_mask_losses['loss_mask_pairwise']
self.assertEqual(loss_mask_project, 0,
'mask project loss should be zero')
self.assertEqual(loss_mask_pairwise, 0,
'mask pairwise loss should be zero')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor([[0.111, 0.222, 25.6667, 29.8757]])
gt_instances.labels = torch.LongTensor([2])
gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)
gt_instances.pairwise_masks = _rand_masks(
1, gt_instances.bboxes.numpy(), s // 4, s // 4).to_tensor(
dtype=torch.float32,
device='cpu').unsqueeze(1).repeat(1, 8, 1, 1)
_ = boxinst_bboxhead.loss_by_feat(cls_scores, bbox_preds, centernesses,
param_preds, [gt_instances],
img_metas)
positive_infos = boxinst_bboxhead.get_positive_infos()
mask_outs = boxinst_maskhead.forward(feats, positive_infos)
one_gt_mask_losses = boxinst_maskhead.loss_by_feat(
*mask_outs, [gt_instances], img_metas, positive_infos)
loss_mask_project = one_gt_mask_losses['loss_mask_project']
loss_mask_pairwise = one_gt_mask_losses['loss_mask_pairwise']
self.assertGreater(loss_mask_project, 0,
'mask project loss should be nonzero')
self.assertGreater(loss_mask_pairwise, 0,
'mask pairwise loss should be nonzero')
| 5,300 | 41.071429 | 79 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_anchor_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import AnchorHead
class TestAnchorHead(TestCase):
def test_anchor_head_loss(self):
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False))
anchor_head = AnchorHead(num_classes=4, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(anchor_head.prior_generator.strides)))
cls_scores, bbox_preds = anchor_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
| 3,035 | 37.923077 | 77 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_cascade_rpn_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import CascadeRPNHead
from mmdet.structures import DetDataSample
rpn_weight = 0.7
cascade_rpn_config = ConfigDict(
dict(
num_stages=2,
num_classes=1,
stages=[
dict(
type='StageCascadeRPNHead',
in_channels=1,
feat_channels=1,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[1.0],
strides=[4, 8, 16, 32, 64]),
adapt_cfg=dict(type='dilation', dilation=3),
bridged_feature=True,
with_cls=False,
reg_decoded_bbox=True,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(0.1, 0.1, 0.5, 0.5)),
loss_bbox=dict(
type='IoULoss', linear=True,
loss_weight=10.0 * rpn_weight)),
dict(
type='StageCascadeRPNHead',
in_channels=1,
feat_channels=1,
adapt_cfg=dict(type='offset'),
bridged_feature=False,
with_cls=True,
reg_decoded_bbox=True,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(0.05, 0.05, 0.1, 0.1)),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0 * rpn_weight),
loss_bbox=dict(
type='IoULoss', linear=True,
loss_weight=10.0 * rpn_weight))
],
train_cfg=[
dict(
assigner=dict(
type='RegionAssigner', center_ratio=0.2, ignore_ratio=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False)
],
test_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.8))))
class TestStageCascadeRPNHead(TestCase):
def test_cascade_rpn_head_loss(self):
"""Tests cascade rpn head loss when truth is empty and non-empty."""
cascade_rpn_head = CascadeRPNHead(**cascade_rpn_config)
s = 256
feats = [
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in cascade_rpn_head.stages[0].prior_generator.strides
]
img_metas = {
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': 1,
}
sample = DetDataSample()
sample.set_metainfo(img_metas)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
sample.gt_instances = gt_instances
empty_gt_losses = cascade_rpn_head.loss(feats, [sample])
for key, loss in empty_gt_losses.items():
loss = sum(loss)
if 'cls' in key:
self.assertGreater(loss.item(), 0,
'cls loss should be non-zero')
elif 'reg' in key:
self.assertEqual(
loss.item(), 0,
'there should be no reg loss when no ground true boxes')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([0])
sample.gt_instances = gt_instances
one_gt_losses = cascade_rpn_head.loss(feats, [sample])
for loss in one_gt_losses.values():
loss = sum(loss)
self.assertGreater(
loss.item(), 0,
'cls loss, or box loss, or iou loss should be non-zero')
def test_cascade_rpn_head_loss_and_predict(self):
"""Tests cascade rpn head loss and predict function."""
cascade_rpn_head = CascadeRPNHead(**cascade_rpn_config)
s = 256
feats = [
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in cascade_rpn_head.stages[0].prior_generator.strides
]
img_metas = {
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': 1,
}
sample = DetDataSample()
sample.set_metainfo(img_metas)
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
sample.gt_instances = gt_instances
proposal_cfg = ConfigDict(
dict(max_per_img=300, nms=dict(iou_threshold=0.8)))
cascade_rpn_head.loss_and_predict(feats, [sample], proposal_cfg)
def test_cascade_rpn_head_predict(self):
"""Tests cascade rpn head predict function."""
cascade_rpn_head = CascadeRPNHead(**cascade_rpn_config)
s = 256
feats = [
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in cascade_rpn_head.stages[0].prior_generator.strides
]
img_metas = {
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': 1,
}
sample = DetDataSample()
sample.set_metainfo(img_metas)
cascade_rpn_head.predict(feats, [sample])
| 6,383 | 34.466667 | 79 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_centernet_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import CenterNetHead
class TestCenterNetHead(TestCase):
def test_center_head_loss(self):
"""Tests center head loss when truth is empty and non-empty."""
s = 256
img_metas = [{'batch_input_shape': (s, s, 3)}]
test_cfg = dict(topK=100, max_per_img=100)
centernet_head = CenterNetHead(
num_classes=4, in_channels=1, feat_channels=4, test_cfg=test_cfg)
feat = [torch.rand(1, 1, s, s)]
center_out, wh_out, offset_out = centernet_head.forward(feat)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = centernet_head.loss_by_feat(center_out, wh_out,
offset_out,
[gt_instances],
img_metas)
loss_center = empty_gt_losses['loss_center_heatmap']
loss_wh = empty_gt_losses['loss_wh']
loss_offset = empty_gt_losses['loss_offset']
assert loss_center.item() > 0, 'loss_center should be non-zero'
assert loss_wh.item() == 0, (
'there should be no loss_wh when there are no true boxes')
assert loss_offset.item() == 0, (
'there should be no loss_offset when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = centernet_head.loss_by_feat(center_out, wh_out,
offset_out, [gt_instances],
img_metas)
loss_center = one_gt_losses['loss_center_heatmap']
loss_wh = one_gt_losses['loss_wh']
loss_offset = one_gt_losses['loss_offset']
assert loss_center.item() > 0, 'loss_center should be non-zero'
assert loss_wh.item() > 0, 'loss_wh should be non-zero'
assert loss_offset.item() > 0, 'loss_offset should be non-zero'
def test_centernet_head_get_targets(self):
"""Tests center head generating and decoding the heatmap."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'batch_input_shape': (s, s),
}]
test_cfg = ConfigDict(
dict(topk=100, local_maximum_kernel=3, max_per_img=100))
gt_bboxes = [
torch.Tensor([[10, 20, 200, 240], [40, 50, 100, 200],
[10, 20, 100, 240]])
]
gt_labels = [torch.LongTensor([1, 1, 2])]
centernet_head = CenterNetHead(
num_classes=4, in_channels=1, feat_channels=4, test_cfg=test_cfg)
self.feat_shape = (1, 1, s // 4, s // 4)
targets, _ = centernet_head.get_targets(gt_bboxes, gt_labels,
self.feat_shape,
img_metas[0]['img_shape'])
center_target = targets['center_heatmap_target']
wh_target = targets['wh_target']
offset_target = targets['offset_target']
# make sure assign target right
for i in range(len(gt_bboxes[0])):
bbox, label = gt_bboxes[0][i] / 4, gt_labels[0][i]
ctx, cty = sum(bbox[0::2]) / 2, sum(bbox[1::2]) / 2
int_ctx, int_cty = int(sum(bbox[0::2]) / 2), int(
sum(bbox[1::2]) / 2)
w, h = bbox[2] - bbox[0], bbox[3] - bbox[1]
x_off = ctx - int(ctx)
y_off = cty - int(cty)
assert center_target[0, label, int_cty, int_ctx] == 1
assert wh_target[0, 0, int_cty, int_ctx] == w
assert wh_target[0, 1, int_cty, int_ctx] == h
assert offset_target[0, 0, int_cty, int_ctx] == x_off
assert offset_target[0, 1, int_cty, int_ctx] == y_off
def test_centernet_head_get_results(self):
"""Tests center head generating and decoding the heatmap."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'batch_input_shape': (s, s),
'border': (0, 0, 0, 0),
}]
test_cfg = ConfigDict(
dict(
topk=100,
local_maximum_kernel=3,
max_per_img=100,
nms=dict(type='nms', iou_threshold=0.5)))
gt_bboxes = [
torch.Tensor([[10, 20, 200, 240], [40, 50, 100, 200],
[10, 20, 100, 240]])
]
gt_labels = [torch.LongTensor([1, 1, 2])]
centernet_head = CenterNetHead(
num_classes=4, in_channels=1, feat_channels=4, test_cfg=test_cfg)
self.feat_shape = (1, 1, s // 4, s // 4)
targets, _ = centernet_head.get_targets(gt_bboxes, gt_labels,
self.feat_shape,
img_metas[0]['img_shape'])
center_target = targets['center_heatmap_target']
wh_target = targets['wh_target']
offset_target = targets['offset_target']
# make sure get_bboxes is right
detections = centernet_head.predict_by_feat([center_target],
[wh_target],
[offset_target],
img_metas,
rescale=True,
with_nms=False)
pred_instances = detections[0]
out_bboxes = pred_instances.bboxes[:3]
out_clses = pred_instances.labels[:3]
for bbox, cls in zip(out_bboxes, out_clses):
flag = False
for gt_bbox, gt_cls in zip(gt_bboxes[0], gt_labels[0]):
if (bbox[:4] == gt_bbox[:4]).all():
flag = True
assert flag, 'get_bboxes is wrong'
detections = centernet_head.predict_by_feat([center_target],
[wh_target],
[offset_target],
img_metas,
rescale=True,
with_nms=True)
pred_instances = detections[0]
out_bboxes = pred_instances.bboxes[:3]
out_clses = pred_instances.labels[:3]
for bbox, cls in zip(out_bboxes, out_clses):
flag = False
for gt_bbox, gt_cls in zip(gt_bboxes[0], gt_labels[0]):
if (bbox[:4] == gt_bbox[:4]).all():
flag = True
assert flag, 'get_bboxes is wrong'
| 7,238 | 44.528302 | 79 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_nasfcos_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import NASFCOSHead
class TestNASFCOSHead(TestCase):
def test_nasfcos_head_loss(self):
"""Tests nasfcos head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
nasfcos_head = NASFCOSHead(
num_classes=4,
in_channels=2, # the same as `deform_groups` in dconv3x3_config
feat_channels=2,
norm_cfg=None)
# Nasfcos head expects a multiple levels of features per image
feats = (
torch.rand(1, 2, s // stride[1], s // stride[0]).float()
for stride in nasfcos_head.prior_generator.strides)
cls_scores, bbox_preds, centernesses = nasfcos_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = nasfcos_head.loss_by_feat(cls_scores, bbox_preds,
centernesses,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# box loss and centerness loss should be zero
empty_cls_loss = empty_gt_losses['loss_cls'].item()
empty_box_loss = empty_gt_losses['loss_bbox'].item()
empty_ctr_loss = empty_gt_losses['loss_centerness'].item()
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertEqual(
empty_box_loss, 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_ctr_loss, 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = nasfcos_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].item()
onegt_box_loss = one_gt_losses['loss_bbox'].item()
onegt_ctr_loss = one_gt_losses['loss_centerness'].item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(onegt_ctr_loss, 0,
'centerness loss should be non-zero')
| 3,065 | 42.183099 | 79 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_centripetal_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import CentripetalHead
class TestCentripetalHead(TestCase):
def test_centripetal_head_loss(self):
"""Tests corner head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}]
centripetal_head = CentripetalHead(
num_classes=4, in_channels=1, corner_emb_channels=0)
# Corner head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // 4, s // 4)
for _ in range(centripetal_head.num_feat_levels)
]
forward_outputs = centripetal_head.forward(feat)
# Test that empty ground truth encourages the network
# to predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
gt_bboxes_ignore = None
empty_gt_losses = centripetal_head.loss_by_feat(
*forward_outputs, [gt_instances], img_metas, gt_bboxes_ignore)
empty_det_loss = sum(empty_gt_losses['det_loss'])
empty_guiding_loss = sum(empty_gt_losses['guiding_loss'])
empty_centripetal_loss = sum(empty_gt_losses['centripetal_loss'])
empty_off_loss = sum(empty_gt_losses['off_loss'])
self.assertTrue(empty_det_loss.item() > 0,
'det loss should be non-zero')
self.assertTrue(
empty_guiding_loss.item() == 0,
'there should be no guiding loss when there are no true boxes')
self.assertTrue(
empty_centripetal_loss.item() == 0,
'there should be no centripetal loss when there are no true boxes')
self.assertTrue(
empty_off_loss.item() == 0,
'there should be no box loss when there are no true boxes')
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874],
[123.6667, 123.8757, 138.6326, 251.8874]])
gt_instances.labels = torch.LongTensor([2, 3])
two_gt_losses = centripetal_head.loss_by_feat(*forward_outputs,
[gt_instances],
img_metas,
gt_bboxes_ignore)
twogt_det_loss = sum(two_gt_losses['det_loss'])
twogt_guiding_loss = sum(two_gt_losses['guiding_loss'])
twogt_centripetal_loss = sum(two_gt_losses['centripetal_loss'])
twogt_off_loss = sum(two_gt_losses['off_loss'])
assert twogt_det_loss.item() > 0, 'det loss should be non-zero'
assert twogt_guiding_loss.item() > 0, 'push loss should be non-zero'
assert twogt_centripetal_loss.item(
) > 0, 'pull loss should be non-zero'
assert twogt_off_loss.item() > 0, 'off loss should be non-zero'
| 3,158 | 41.12 | 79 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_solov2_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import SOLOV2Head
from mmdet.structures.mask import BitmapMasks
def _rand_masks(num_items, bboxes, img_w, img_h):
rng = np.random.RandomState(0)
masks = np.zeros((num_items, img_h, img_w))
for i, bbox in enumerate(bboxes):
bbox = bbox.astype(np.int32)
mask = (rng.rand(1, bbox[3] - bbox[1], bbox[2] - bbox[0]) >
0.3).astype(np.int64)
masks[i:i + 1, bbox[1]:bbox[3], bbox[0]:bbox[2]] = mask
return BitmapMasks(masks, height=img_h, width=img_w)
def _fake_mask_feature_head():
mask_feature_head = ConfigDict(
feat_channels=128,
start_level=0,
end_level=3,
out_channels=256,
mask_stride=4,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))
return mask_feature_head
class TestSOLOv2Head(TestCase):
def test_solov2_head_loss(self):
"""Tests mask head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}]
mask_feature_head = _fake_mask_feature_head()
mask_head = SOLOV2Head(
num_classes=4, in_channels=1, mask_feature_head=mask_feature_head)
# SOLO head expects a multiple levels of features per image
feats = []
for i in range(len(mask_head.strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))))
feats = tuple(feats)
mask_outs = mask_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty(0, 4)
gt_instances.labels = torch.LongTensor([])
gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)
empty_gt_losses = mask_head.loss_by_feat(
*mask_outs,
batch_gt_instances=[gt_instances],
batch_img_metas=img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_mask_loss = empty_gt_losses['loss_mask']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_mask_loss.item(), 0,
'there should be no mask loss when there are no true mask')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)
one_gt_losses = mask_head.loss_by_feat(
*mask_outs,
batch_gt_instances=[gt_instances],
batch_img_metas=img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_mask_loss = one_gt_losses['loss_mask']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_mask_loss.item(), 0,
'mask loss should be non-zero')
def test_solov2_head_empty_result(self):
s = 256
img_metas = {
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}
mask_feature_head = _fake_mask_feature_head()
mask_head = SOLOV2Head(
num_classes=4, in_channels=1, mask_feature_head=mask_feature_head)
kernel_preds = torch.empty(0, 128)
cls_scores = torch.empty(0, 80)
mask_feats = torch.empty(0, 16, 16)
test_cfg = ConfigDict(
score_thr=0.1,
mask_thr=0.5,
)
results = mask_head._predict_by_feat_single(
kernel_preds=kernel_preds,
cls_scores=cls_scores,
mask_feats=mask_feats,
img_meta=img_metas,
cfg=test_cfg)
self.assertIsInstance(results, InstanceData)
self.assertEqual(len(results), 0)
| 4,598 | 34.10687 | 78 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_yolof_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import YOLOFHead
class TestYOLOFHead(TestCase):
def test_yolof_head_loss(self):
"""Tests yolof head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = Config(
dict(
assigner=dict(
type='UniformAssigner',
pos_ignore_thr=0.15,
neg_ignore_thr=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False))
yolof_head = YOLOFHead(
num_classes=4,
in_channels=1,
feat_channels=1,
reg_decoded_bbox=True,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[1, 2, 4, 8, 16],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1., 1., 1., 1.],
add_ctr_clamp=True,
ctr_clamp=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0))
feat = [torch.rand(1, 1, s // 32, s // 32)]
cls_scores, bbox_preds = yolof_head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = yolof_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = yolof_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
| 3,396 | 37.168539 | 78 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_vfnet_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import VFNetHead
class TestVFNetHead(TestCase):
def test_vfnet_head_loss(self):
"""Tests vfnet head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since VarFocal Loss is not supported on CPU
vfnet_head = VFNetHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='VarifocalLoss', use_sigmoid=True, loss_weight=1.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds, bbox_preds_refine = vfnet_head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = vfnet_head.loss_by_feat(cls_scores, bbox_preds,
bbox_preds_refine,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = vfnet_head.loss_by_feat(cls_scores, bbox_preds,
bbox_preds_refine,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
def test_vfnet_head_loss_without_atss(self):
"""Tests vfnet head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since VarFocal Loss is not supported on CPU
vfnet_head = VFNetHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
use_atss=False,
loss_cls=dict(
type='VarifocalLoss', use_sigmoid=True, loss_weight=1.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds, bbox_preds_refine = vfnet_head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = vfnet_head.loss_by_feat(cls_scores, bbox_preds,
bbox_preds_refine,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = vfnet_head.loss_by_feat(cls_scores, bbox_preds,
bbox_preds_refine,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
| 5,715 | 41.029412 | 78 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_free_anchor_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import FreeAnchorRetinaHead
class TestFreeAnchorRetinaHead(TestCase):
def test_free_anchor_head_loss(self):
"""Tests rpn head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
anchor_head = FreeAnchorRetinaHead(num_classes=1, in_channels=1)
# Anchor head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(anchor_head.prior_generator.strides)))
cls_scores, bbox_preds = anchor_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
positive_bag_loss = empty_gt_losses['positive_bag_loss']
negative_bag_loss = empty_gt_losses['negative_bag_loss']
self.assertGreater(negative_bag_loss.item(), 0,
'negative_bag loss should be non-zero')
self.assertEqual(
positive_bag_loss.item(), 0,
'there should be no positive_bag loss when there are no true boxes'
)
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([0])
one_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['positive_bag_loss']
onegt_box_loss = one_gt_losses['negative_bag_loss']
self.assertGreater(onegt_cls_loss.item(), 0,
'positive bag loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'negative bag loss should be non-zero')
| 2,605 | 39.71875 | 79 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_embedding_rpn_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import EmbeddingRPNHead
from mmdet.structures import DetDataSample
class TestEmbeddingRPNHead(TestCase):
def test_init(self):
"""Test init rpn head."""
rpn_head = EmbeddingRPNHead(
num_proposals=100, proposal_feature_channel=256)
rpn_head.init_weights()
self.assertTrue(rpn_head.init_proposal_bboxes)
self.assertTrue(rpn_head.init_proposal_features)
def test_loss_and_predict(self):
s = 256
img_meta = {
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}
rpn_head = EmbeddingRPNHead(
num_proposals=100, proposal_feature_channel=256)
feats = [
torch.rand(2, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(5)
]
data_sample = DetDataSample()
data_sample.set_metainfo(img_meta)
# test predict
result_list = rpn_head.predict(feats, [data_sample])
self.assertTrue(isinstance(result_list, list))
self.assertTrue(isinstance(result_list[0], InstanceData))
# test loss_and_predict
result_list = rpn_head.loss_and_predict(feats, [data_sample])
self.assertTrue(isinstance(result_list, tuple))
self.assertTrue(isinstance(result_list[0], dict))
self.assertEqual(len(result_list[0]), 0)
self.assertTrue(isinstance(result_list[1], list))
self.assertTrue(isinstance(result_list[1][0], InstanceData))
# test loss
with pytest.raises(NotImplementedError):
rpn_head.loss(feats, [data_sample])
| 1,802 | 31.196429 | 69 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_pisa_ssd_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from math import ceil
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import PISASSDHead
class TestPISASSDHead(TestCase):
def test_pisa_ssd_head_loss(self):
"""Tests pisa ssd head loss when truth is empty and non-empty."""
s = 300
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
sampler=dict(type='PseudoSampler'),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False))
pisa_ssd_head = PISASSDHead(
num_classes=4,
in_channels=(1, 1, 1, 1, 1, 1),
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=s,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
train_cfg=cfg)
# PISA SSD head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))
for stride in pisa_ssd_head.prior_generator.strides)
cls_scores, bbox_preds = pisa_ssd_head.forward(feats)
# test without isr and carl
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = pisa_ssd_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, cls_loss and box_loss should all be zero.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = pisa_ssd_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
pisa_ssd_head.train_cfg.update(
dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))
# test with isr and carl
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = pisa_ssd_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, cls_loss and box_loss should all be zero.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = pisa_ssd_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
| 5,288 | 40.320313 | 79 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_ga_rpn_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import GARPNHead
ga_rpn_config = ConfigDict(
dict(
num_classes=1,
in_channels=4,
feat_channels=4,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=8,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[8],
strides=[4, 8, 16, 32, 64]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.14, 0.14]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.11, 0.11]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0),
train_cfg=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
ms_post=1000,
max_per_img=300,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)))
class TestGARPNHead(TestCase):
def test_ga_rpn_head_loss(self):
"""Tests ga rpn head loss."""
s = 256
img_metas = [{
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': (1, 1)
}]
ga_rpn_head = GARPNHead(**ga_rpn_config)
feats = (
torch.rand(1, 4, s // stride[1], s // stride[0])
for stride in ga_rpn_head.square_anchor_generator.strides)
outs = ga_rpn_head(feats)
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([0])
one_gt_losses = ga_rpn_head.loss_by_feat(*outs, [gt_instances],
img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_rpn_cls']).item()
onegt_box_loss = sum(one_gt_losses['loss_rpn_bbox']).item()
onegt_shape_loss = sum(one_gt_losses['loss_anchor_shape']).item()
onegt_loc_loss = sum(one_gt_losses['loss_anchor_loc']).item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(onegt_shape_loss, 0,
'shape loss should be non-zero')
self.assertGreater(onegt_loc_loss, 0,
'location loss should be non-zero')
def test_ga_rpn_head_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': (1, 1)
}]
ga_rpn_head = GARPNHead(**ga_rpn_config)
feats = (
torch.rand(1, 4, s // stride[1], s // stride[0])
for stride in ga_rpn_head.square_anchor_generator.strides)
outs = ga_rpn_head(feats)
cfg = ConfigDict(
dict(
nms_pre=2000,
nms_post=1000,
max_per_img=300,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0))
ga_rpn_head.predict_by_feat(
*outs, batch_img_metas=img_metas, cfg=cfg, rescale=True)
| 5,031 | 33.944444 | 76 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_corner_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.evaluation import bbox_overlaps
from mmdet.models.dense_heads import CornerHead
class TestCornerHead(TestCase):
def test_corner_head_loss(self):
"""Tests corner head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}]
corner_head = CornerHead(num_classes=4, in_channels=1)
# Corner head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // 4, s // 4)
for _ in range(corner_head.num_feat_levels)
]
forward_outputs = corner_head.forward(feat)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
gt_bboxes_ignore = None
empty_gt_losses = corner_head.loss_by_feat(*forward_outputs,
[gt_instances], img_metas,
gt_bboxes_ignore)
empty_det_loss = sum(empty_gt_losses['det_loss'])
empty_push_loss = sum(empty_gt_losses['push_loss'])
empty_pull_loss = sum(empty_gt_losses['pull_loss'])
empty_off_loss = sum(empty_gt_losses['off_loss'])
self.assertTrue(empty_det_loss.item() > 0,
'det loss should be non-zero')
self.assertTrue(
empty_push_loss.item() == 0,
'there should be no push loss when there are no true boxes')
self.assertTrue(
empty_pull_loss.item() == 0,
'there should be no pull loss when there are no true boxes')
self.assertTrue(
empty_off_loss.item() == 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = corner_head.loss_by_feat(*forward_outputs,
[gt_instances], img_metas,
gt_bboxes_ignore)
onegt_det_loss = sum(one_gt_losses['det_loss'])
onegt_push_loss = sum(one_gt_losses['push_loss'])
onegt_pull_loss = sum(one_gt_losses['pull_loss'])
onegt_off_loss = sum(one_gt_losses['off_loss'])
self.assertTrue(onegt_det_loss.item() > 0,
'det loss should be non-zero')
self.assertTrue(
onegt_push_loss.item() == 0,
'there should be no push loss when there are only one true box')
self.assertTrue(onegt_pull_loss.item() > 0,
'pull loss should be non-zero')
self.assertTrue(onegt_off_loss.item() > 0,
'off loss should be non-zero')
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874],
[123.6667, 123.8757, 138.6326, 251.8874]])
gt_instances.labels = torch.LongTensor([2, 3])
two_gt_losses = corner_head.loss_by_feat(*forward_outputs,
[gt_instances], img_metas,
gt_bboxes_ignore)
twogt_det_loss = sum(two_gt_losses['det_loss'])
twogt_push_loss = sum(two_gt_losses['push_loss'])
twogt_pull_loss = sum(two_gt_losses['pull_loss'])
twogt_off_loss = sum(two_gt_losses['off_loss'])
self.assertTrue(twogt_det_loss.item() > 0,
'det loss should be non-zero')
# F.relu limits push loss larger than or equal to 0.
self.assertTrue(twogt_push_loss.item() >= 0,
'push loss should be non-zero')
self.assertTrue(twogt_pull_loss.item() > 0,
'pull loss should be non-zero')
self.assertTrue(twogt_off_loss.item() > 0,
'off loss should be non-zero')
def test_corner_head_encode_and_decode_heatmap(self):
"""Tests corner head generating and decoding the heatmap."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3),
'border': (0, 0, 0, 0)
}]
gt_bboxes = [
torch.Tensor([[10, 20, 200, 240], [40, 50, 100, 200],
[10, 20, 200, 240]])
]
gt_labels = [torch.LongTensor([1, 1, 2])]
corner_head = CornerHead(
num_classes=4, in_channels=1, corner_emb_channels=1)
feat = [
torch.rand(1, 1, s // 4, s // 4)
for _ in range(corner_head.num_feat_levels)
]
targets = corner_head.get_targets(
gt_bboxes,
gt_labels,
feat[0].shape,
img_metas[0]['batch_input_shape'],
with_corner_emb=corner_head.with_corner_emb)
gt_tl_heatmap = targets['topleft_heatmap']
gt_br_heatmap = targets['bottomright_heatmap']
gt_tl_offset = targets['topleft_offset']
gt_br_offset = targets['bottomright_offset']
embedding = targets['corner_embedding']
[top, left], [bottom, right] = embedding[0][0]
gt_tl_embedding_heatmap = torch.zeros([1, 1, s // 4, s // 4])
gt_br_embedding_heatmap = torch.zeros([1, 1, s // 4, s // 4])
gt_tl_embedding_heatmap[0, 0, top, left] = 1
gt_br_embedding_heatmap[0, 0, bottom, right] = 1
batch_bboxes, batch_scores, batch_clses = corner_head._decode_heatmap(
tl_heat=gt_tl_heatmap,
br_heat=gt_br_heatmap,
tl_off=gt_tl_offset,
br_off=gt_br_offset,
tl_emb=gt_tl_embedding_heatmap,
br_emb=gt_br_embedding_heatmap,
img_meta=img_metas[0],
k=100,
kernel=3,
distance_threshold=0.5)
bboxes = batch_bboxes.view(-1, 4)
scores = batch_scores.view(-1, 1)
clses = batch_clses.view(-1, 1)
idx = scores.argsort(dim=0, descending=True)
bboxes = bboxes[idx].view(-1, 4)
scores = scores[idx].view(-1)
clses = clses[idx].view(-1)
valid_bboxes = bboxes[torch.where(scores > 0.05)]
valid_labels = clses[torch.where(scores > 0.05)]
max_coordinate = valid_bboxes.max()
offsets = valid_labels.to(valid_bboxes) * (max_coordinate + 1)
gt_offsets = gt_labels[0].to(gt_bboxes[0]) * (max_coordinate + 1)
offset_bboxes = valid_bboxes + offsets[:, None]
offset_gtbboxes = gt_bboxes[0] + gt_offsets[:, None]
iou_matrix = bbox_overlaps(offset_bboxes.numpy(),
offset_gtbboxes.numpy())
self.assertEqual((iou_matrix == 1).sum(), 3)
| 7,299 | 39.782123 | 78 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_yolo_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import Config
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import YOLOV3Head
class TestYOLOV3Head(TestCase):
def test_yolo_head_loss(self):
"""Tests YOLO head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
head = YOLOV3Head(
num_classes=4,
in_channels=[1, 1, 1],
out_channels=[1, 1, 1],
train_cfg=Config(
dict(
assigner=dict(
type='GridAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0))))
head.init_weights()
# YOLO head expects a multiple levels of features per image
feats = [
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in head.prior_generator.strides
]
predmaps, = head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = head.loss_by_feat(predmaps, [gt_instances],
img_metas)
# When there is no truth, the conf loss should be nonzero but
# cls loss and xy&wh loss should be zero
empty_cls_loss = sum(empty_gt_losses['loss_cls']).item()
empty_conf_loss = sum(empty_gt_losses['loss_conf']).item()
empty_xy_loss = sum(empty_gt_losses['loss_xy']).item()
empty_wh_loss = sum(empty_gt_losses['loss_wh']).item()
self.assertGreater(empty_conf_loss, 0, 'conf loss should be non-zero')
self.assertEqual(
empty_cls_loss, 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_xy_loss, 0,
'there should be no xy loss when there are no true boxes')
self.assertEqual(
empty_wh_loss, 0,
'there should be no wh loss when there are no true boxes')
# When truth is non-empty then all conf, cls loss and xywh loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = head.loss_by_feat(predmaps, [gt_instances], img_metas)
one_gt_cls_loss = sum(one_gt_losses['loss_cls']).item()
one_gt_conf_loss = sum(one_gt_losses['loss_conf']).item()
one_gt_xy_loss = sum(one_gt_losses['loss_xy']).item()
one_gt_wh_loss = sum(one_gt_losses['loss_wh']).item()
self.assertGreater(one_gt_conf_loss, 0, 'conf loss should be non-zero')
self.assertGreater(one_gt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(one_gt_xy_loss, 0, 'xy loss should be non-zero')
self.assertGreater(one_gt_wh_loss, 0, 'wh loss should be non-zero')
| 3,290 | 39.62963 | 79 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_fsaf_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from math import ceil
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import FSAFHead
class TestFSAFHead(TestCase):
def test_fsaf_head_loss(self):
"""Tests fsaf head loss when truth is empty and non-empty."""
s = 300
img_metas = [{
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='CenterRegionAssigner',
pos_scale=0.2,
neg_scale=0.2,
min_pos_iof=0.01),
allowed_border=-1,
pos_weight=-1,
debug=False))
fsaf_head = FSAFHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=1,
reg_decoded_bbox=True,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=1,
scales_per_octave=1,
ratios=[1.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(type='TBLRBBoxCoder', normalizer=4.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0,
reduction='none'),
loss_bbox=dict(
type='IoULoss', eps=1e-6, loss_weight=1.0, reduction='none'),
train_cfg=cfg)
# FSAF head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))
for stride in fsaf_head.prior_generator.strides)
cls_scores, bbox_preds = fsaf_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = fsaf_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# box loss should be zero
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = fsaf_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
| 3,536 | 36.62766 | 77 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_tood_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config, MessageHub
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import TOODHead
def _tood_head(anchor_type):
"""Set type of tood head."""
train_cfg = Config(
dict(
initial_epoch=4,
initial_assigner=dict(type='ATSSAssigner', topk=9),
assigner=dict(type='TaskAlignedAssigner', topk=13),
alpha=1,
beta=6,
allowed_border=-1,
pos_weight=-1,
debug=False))
test_cfg = Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
tood_head = TOODHead(
num_classes=80,
in_channels=1,
stacked_convs=1,
feat_channels=8, # the same as `la_down_rate` in TaskDecomposition
norm_cfg=None,
anchor_type=anchor_type,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
initial_loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
train_cfg=train_cfg,
test_cfg=test_cfg)
return tood_head
class TestTOODHead(TestCase):
def test_tood_head_anchor_free_loss(self):
"""Tests tood head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1
}]
tood_head = _tood_head('anchor_free')
tood_head.init_weights()
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [8, 16, 32, 64, 128]
]
cls_scores, bbox_preds = tood_head(feat)
message_hub = MessageHub.get_instance('runtime_info')
message_hub.update_info('epoch', 0)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
gt_bboxes_ignore = None
empty_gt_losses = tood_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
self.assertGreater(
sum(empty_cls_loss).item(), 0, 'cls loss should be non-zero')
self.assertEqual(
sum(empty_box_loss).item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
gt_bboxes_ignore = None
one_gt_losses = tood_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas,
gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
self.assertGreater(
sum(onegt_cls_loss).item(), 0, 'cls loss should be non-zero')
self.assertGreater(
sum(onegt_box_loss).item(), 0, 'box loss should be non-zero')
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
gt_bboxes_ignore = None
empty_gt_losses = tood_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
self.assertGreater(
sum(empty_cls_loss).item(), 0, 'cls loss should be non-zero')
self.assertEqual(
sum(empty_box_loss).item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
gt_bboxes_ignore = None
one_gt_losses = tood_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas,
gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
self.assertGreater(
sum(onegt_cls_loss).item(), 0, 'cls loss should be non-zero')
self.assertGreater(
sum(onegt_box_loss).item(), 0, 'box loss should be non-zero')
def test_tood_head_anchor_based_loss(self):
"""Tests tood head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1
}]
tood_head = _tood_head('anchor_based')
tood_head.init_weights()
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [8, 16, 32, 64, 128]
]
cls_scores, bbox_preds = tood_head(feat)
message_hub = MessageHub.get_instance('runtime_info')
message_hub.update_info('epoch', 0)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
gt_bboxes_ignore = None
empty_gt_losses = tood_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
self.assertGreater(
sum(empty_cls_loss).item(), 0, 'cls loss should be non-zero')
self.assertEqual(
sum(empty_box_loss).item(), 0,
'there should be no box loss when there are no true boxes')
| 7,843 | 39.43299 | 78 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_solo_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.models.dense_heads import (DecoupledSOLOHead,
DecoupledSOLOLightHead, SOLOHead)
from mmdet.structures.mask import BitmapMasks
def _rand_masks(num_items, bboxes, img_w, img_h):
rng = np.random.RandomState(0)
masks = np.zeros((num_items, img_h, img_w))
for i, bbox in enumerate(bboxes):
bbox = bbox.astype(np.int32)
mask = (rng.rand(1, bbox[3] - bbox[1], bbox[2] - bbox[0]) >
0.3).astype(np.int64)
masks[i:i + 1, bbox[1]:bbox[3], bbox[0]:bbox[2]] = mask
return BitmapMasks(masks, height=img_h, width=img_w)
class TestSOLOHead(TestCase):
@parameterized.expand([(SOLOHead, ), (DecoupledSOLOHead, ),
(DecoupledSOLOLightHead, )])
def test_mask_head_loss(self, MaskHead):
"""Tests mask head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}]
mask_head = MaskHead(num_classes=4, in_channels=1)
# SOLO head expects a multiple levels of features per image
feats = []
for i in range(len(mask_head.strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))))
feats = tuple(feats)
mask_outs = mask_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty(0, 4)
gt_instances.labels = torch.LongTensor([])
gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)
empty_gt_losses = mask_head.loss_by_feat(
*mask_outs,
batch_gt_instances=[gt_instances],
batch_img_metas=img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_mask_loss = empty_gt_losses['loss_mask']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_mask_loss.item(), 0,
'there should be no mask loss when there are no true mask')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)
one_gt_losses = mask_head.loss_by_feat(
*mask_outs,
batch_gt_instances=[gt_instances],
batch_img_metas=img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_mask_loss = one_gt_losses['loss_mask']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_mask_loss.item(), 0,
'mask loss should be non-zero')
def test_solo_head_empty_result(self):
s = 256
img_metas = {
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}
mask_head = SOLOHead(num_classes=4, in_channels=1)
cls_scores = torch.empty(0, 80)
mask_preds = torch.empty(0, 16, 16)
test_cfg = ConfigDict(
score_thr=0.1,
mask_thr=0.5,
)
results = mask_head._predict_by_feat_single(
cls_scores=cls_scores,
mask_preds=mask_preds,
img_meta=img_metas,
cfg=test_cfg)
self.assertIsInstance(results, InstanceData)
self.assertEqual(len(results), 0)
def test_decoupled_solo_head_empty_result(self):
s = 256
img_metas = {
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}
mask_head = DecoupledSOLOHead(num_classes=4, in_channels=1)
cls_scores = torch.empty(0, 80)
mask_preds_x = torch.empty(0, 16, 16)
mask_preds_y = torch.empty(0, 16, 16)
test_cfg = ConfigDict(
score_thr=0.1,
mask_thr=0.5,
)
results = mask_head._predict_by_feat_single(
cls_scores=cls_scores,
mask_preds_x=mask_preds_x,
mask_preds_y=mask_preds_y,
img_meta=img_metas,
cfg=test_cfg)
self.assertIsInstance(results, InstanceData)
self.assertEqual(len(results), 0)
| 5,125 | 34.351724 | 78 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_yolox_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmengine.config import Config
from mmengine.model import bias_init_with_prob
from mmengine.structures import InstanceData
from mmengine.testing import assert_allclose
from mmdet.models.dense_heads import YOLOXHead
class TestYOLOXHead(TestCase):
def test_init_weights(self):
head = YOLOXHead(
num_classes=4, in_channels=1, stacked_convs=1, use_depthwise=False)
head.init_weights()
bias_init = bias_init_with_prob(0.01)
for conv_cls, conv_obj in zip(head.multi_level_conv_cls,
head.multi_level_conv_obj):
assert_allclose(conv_cls.bias.data,
torch.ones_like(conv_cls.bias.data) * bias_init)
assert_allclose(conv_obj.bias.data,
torch.ones_like(conv_obj.bias.data) * bias_init)
def test_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': (1.0, 1.0),
}]
test_cfg = Config(
dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65)))
head = YOLOXHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
use_depthwise=False,
test_cfg=test_cfg)
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds, objectnesses = head.forward(feat)
head.predict_by_feat(
cls_scores,
bbox_preds,
objectnesses,
img_metas,
cfg=test_cfg,
rescale=True,
with_nms=True)
head.predict_by_feat(
cls_scores,
bbox_preds,
objectnesses,
img_metas,
cfg=test_cfg,
rescale=False,
with_nms=False)
def test_loss_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
train_cfg = Config(
dict(
assigner=dict(
type='SimOTAAssigner',
center_radius=2.5,
candidate_topk=10,
iou_weight=3.0,
cls_weight=1.0)))
head = YOLOXHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
use_depthwise=False,
train_cfg=train_cfg)
assert not head.use_l1
assert isinstance(head.multi_level_cls_convs[0][0], ConvModule)
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds, objectnesses = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData(
bboxes=torch.empty((0, 4)), labels=torch.LongTensor([]))
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
objectnesses, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertGreater(empty_obj_loss.item(), 0,
'objectness loss should be non-zero')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
head = YOLOXHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
use_depthwise=True,
train_cfg=train_cfg)
assert isinstance(head.multi_level_cls_convs[0][0],
DepthwiseSeparableConvModule)
head.use_l1 = True
gt_instances = InstanceData(
bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
labels=torch.LongTensor([2]))
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_obj_loss = one_gt_losses['loss_obj'].sum()
onegt_l1_loss = one_gt_losses['loss_l1'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_obj_loss.item(), 0,
'obj loss should be non-zero')
self.assertGreater(onegt_l1_loss.item(), 0,
'l1 loss should be non-zero')
# Test groud truth out of bound
gt_instances = InstanceData(
bboxes=torch.Tensor([[s * 4, s * 4, s * 4 + 10, s * 4 + 10]]),
labels=torch.LongTensor([2]))
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
objectnesses, [gt_instances],
img_metas)
# When gt_bboxes out of bound, the assign results should be empty,
# so the cls and bbox loss should be zero.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when gt_bboxes out of bound')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when gt_bboxes out of bound')
self.assertGreater(empty_obj_loss.item(), 0,
'objectness loss should be non-zero')
| 6,502 | 38.412121 | 79 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_autoassign_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import AutoAssignHead
class TestAutoAssignHead(TestCase):
def test_autoassign_head_loss(self):
"""Tests autoassign head loss when truth is empty and non-empty."""
s = 300
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
autoassign_head = AutoAssignHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=1,
strides=[8, 16, 32, 64, 128],
loss_bbox=dict(type='GIoULoss', loss_weight=5.0),
norm_cfg=None)
# Fcos head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in autoassign_head.prior_generator.strides)
cls_scores, bbox_preds, centernesses = autoassign_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = autoassign_head.loss_by_feat(cls_scores, bbox_preds,
centernesses,
[gt_instances],
img_metas)
# When there is no truth, the neg loss should be nonzero but
# pos loss and center loss should be zero
empty_pos_loss = empty_gt_losses['loss_pos'].item()
empty_neg_loss = empty_gt_losses['loss_neg'].item()
empty_ctr_loss = empty_gt_losses['loss_center'].item()
self.assertGreater(empty_neg_loss, 0, 'neg loss should be non-zero')
self.assertEqual(
empty_pos_loss, 0,
'there should be no pos loss when there are no true boxes')
self.assertEqual(
empty_ctr_loss, 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then all pos, neg loss and center loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = autoassign_head.loss_by_feat(cls_scores, bbox_preds,
centernesses,
[gt_instances], img_metas)
onegt_pos_loss = one_gt_losses['loss_pos'].item()
onegt_neg_loss = one_gt_losses['loss_neg'].item()
onegt_ctr_loss = one_gt_losses['loss_center'].item()
self.assertGreater(onegt_pos_loss, 0, 'pos loss should be non-zero')
self.assertGreater(onegt_neg_loss, 0, 'neg loss should be non-zero')
self.assertGreater(onegt_ctr_loss, 0, 'center loss should be non-zero')
| 3,186 | 41.493333 | 79 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_fovea_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import FoveaHead
class TestFOVEAHead(TestCase):
def test_fovea_head_loss(self):
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
fovea_head = FoveaHead(num_classes=4, in_channels=1)
# Anchor head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(fovea_head.prior_generator.strides)))
cls_scores, bbox_preds = fovea_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = fovea_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = fovea_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
| 2,443 | 38.419355 | 76 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_pisa_retinanet_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from math import ceil
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import PISARetinaHead
class TestPISARetinaHead(TestCase):
def test_pisa_reitnanet_head_loss(self):
"""Tests pisa retinanet head loss when truth is empty and non-empty."""
s = 300
img_metas = [{
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
isr=dict(k=2., bias=0.),
carl=dict(k=1., bias=0.2),
sampler=dict(type='PseudoSampler'),
allowed_border=-1,
pos_weight=-1,
debug=False))
pisa_retinanet_head = PISARetinaHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0),
train_cfg=cfg)
# pisa retina head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))
for stride in pisa_retinanet_head.prior_generator.strides)
cls_scores, bbox_preds = pisa_retinanet_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = pisa_retinanet_head.loss_by_feat(
cls_scores, bbox_preds, [gt_instances], img_metas)
# When there is no truth, cls_loss and box_loss should all be zero.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
empty_carl_loss = empty_gt_losses['loss_carl']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_carl_loss.item(), 0,
'there should be no carl loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = pisa_retinanet_head.loss_by_feat(
cls_scores, bbox_preds, [gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
onegt_carl_loss = one_gt_losses['loss_carl']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_carl_loss.item(), 0,
'carl loss should be non-zero')
| 4,122 | 37.896226 | 79 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_ld_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import GFLHead, LDHead
class TestLDHead(TestCase):
def test_ld_head_loss(self):
"""Tests ld head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1
}]
train_cfg = Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9, ignore_iof_thr=0.1),
allowed_border=-1,
pos_weight=-1,
debug=False))
ld_head = LDHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_ld=dict(
type='KnowledgeDistillationKLDivLoss', loss_weight=1.0),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]))
teacher_model = GFLHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds = ld_head.forward(feat)
rand_soft_target = teacher_model.forward(feat)[1]
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
batch_gt_instances_ignore = None
empty_gt_losses = ld_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas,
rand_soft_target,
batch_gt_instances_ignore)
# When there is no truth, the cls loss should be nonzero, ld loss
# should be non-negative but there should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_ld_loss = sum(empty_gt_losses['loss_ld'])
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertGreaterEqual(empty_ld_loss.item(), 0,
'ld loss should be non-negative')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
batch_gt_instances_ignore = None
one_gt_losses = ld_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas,
rand_soft_target,
batch_gt_instances_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
batch_gt_instances_ignore = gt_instances
# When truth is non-empty but ignored then the cls loss should be
# nonzero, but there should be no box loss.
ignore_gt_losses = ld_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas,
rand_soft_target,
batch_gt_instances_ignore)
ignore_cls_loss = sum(ignore_gt_losses['loss_cls'])
ignore_box_loss = sum(ignore_gt_losses['loss_bbox'])
self.assertGreater(ignore_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(ignore_box_loss.item(), 0,
'gt bbox ignored loss should be zero')
# When truth is non-empty and not ignored then both cls and box loss
# should be nonzero for random inputs
batch_gt_instances_ignore = InstanceData()
batch_gt_instances_ignore.bboxes = torch.randn(1, 4)
not_ignore_gt_losses = ld_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas,
rand_soft_target,
batch_gt_instances_ignore)
not_ignore_cls_loss = sum(not_ignore_gt_losses['loss_cls'])
not_ignore_box_loss = sum(not_ignore_gt_losses['loss_bbox'])
self.assertGreater(not_ignore_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreaterEqual(not_ignore_box_loss.item(), 0,
'gt bbox not ignored loss should be non-zero')
| 6,184 | 40.233333 | 79 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_paa_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import PAAHead, paa_head
from mmdet.models.utils import levels_to_images
class TestPAAHead(TestCase):
def test_paa_head_loss(self):
"""Tests paa head loss when truth is empty and non-empty."""
class mock_skm:
def GaussianMixture(self, *args, **kwargs):
return self
def fit(self, loss):
pass
def predict(self, loss):
components = np.zeros_like(loss, dtype=np.long)
return components.reshape(-1)
def score_samples(self, loss):
scores = np.random.random(len(loss))
return scores
paa_head.skm = mock_skm()
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
train_cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since Focal Loss is not supported on CPU
paa = PAAHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
paa.init_weights()
cls_scores, bbox_preds, iou_preds = paa(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = paa.loss_by_feat(cls_scores, bbox_preds, iou_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
empty_iou_loss = empty_gt_losses['loss_iou']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_iou_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = paa.loss_by_feat(cls_scores, bbox_preds, iou_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
onegt_iou_loss = one_gt_losses['loss_iou']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_iou_loss.item(), 0,
'box loss should be non-zero')
n, c, h, w = 10, 4, 20, 20
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
self.assertEqual(len(results), n)
self.assertEqual(results[0].size(), (h * w * 5, c))
self.assertTrue(paa.with_score_voting)
paa = PAAHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
cfg = Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
rescale = False
paa.predict_by_feat(
cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale)
| 5,675 | 37.09396 | 79 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_retina_sepBN_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import RetinaSepBNHead
class TestRetinaSepBNHead(TestCase):
def test_init(self):
"""Test init RetinaSepBN head."""
anchor_head = RetinaSepBNHead(num_classes=1, num_ins=1, in_channels=1)
anchor_head.init_weights()
self.assertTrue(anchor_head.cls_convs)
self.assertTrue(anchor_head.reg_convs)
self.assertTrue(anchor_head.retina_cls)
self.assertTrue(anchor_head.retina_reg)
def test_retina_sepbn_head_loss(self):
"""Tests RetinaSepBN head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
sampler=dict(type='PseudoSampler'
), # Focal loss should use PseudoSampler
allowed_border=-1,
pos_weight=-1,
debug=False))
anchor_head = RetinaSepBNHead(
num_classes=4, num_ins=5, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feats = []
for i in range(len(anchor_head.prior_generator.strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))))
cls_scores, bbox_preds = anchor_head.forward(tuple(feats))
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
| 3,483 | 38.146067 | 78 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_ga_retina_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmdet.models.dense_heads import GARetinaHead
ga_retina_head_config = ConfigDict(
dict(
num_classes=4,
in_channels=4,
feat_channels=4,
stacked_convs=1,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0),
train_cfg=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.4,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
center_ratio=0.2,
ignore_ratio=0.5,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
class TestGARetinaHead(TestCase):
def test_ga_retina_head_init_and_forward(self):
"""The GARetinaHead inherit loss and prediction function from
GuidedAchorHead.
Here, we only test GARetinaHet initialization and forward.
"""
# Test initializaion
ga_retina_head = GARetinaHead(**ga_retina_head_config)
# Test forward
s = 256
feats = (
torch.rand(1, 4, s // stride[1], s // stride[0])
for stride in ga_retina_head.square_anchor_generator.strides)
ga_retina_head(feats)
| 3,067 | 30.306122 | 74 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_condinst_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import CondInstBboxHead, CondInstMaskHead
from mmdet.structures.mask import BitmapMasks
def _rand_masks(num_items, bboxes, img_w, img_h):
rng = np.random.RandomState(0)
masks = np.zeros((num_items, img_h, img_w), dtype=np.float32)
for i, bbox in enumerate(bboxes):
bbox = bbox.astype(np.int32)
mask = (rng.rand(1, bbox[3] - bbox[1], bbox[2] - bbox[0]) >
0.3).astype(np.int64)
masks[i:i + 1, bbox[1]:bbox[3], bbox[0]:bbox[2]] = mask
return BitmapMasks(masks, height=img_h, width=img_w)
def _fake_mask_feature_head():
mask_feature_head = ConfigDict(
in_channels=1,
feat_channels=1,
start_level=0,
end_level=2,
out_channels=8,
mask_stride=8,
num_stacked_convs=4,
norm_cfg=dict(type='BN', requires_grad=True))
return mask_feature_head
class TestCondInstHead(TestCase):
def test_condinst_bboxhead_loss(self):
"""Tests condinst bboxhead loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
condinst_bboxhead = CondInstBboxHead(
num_classes=4,
in_channels=1,
feat_channels=1,
stacked_convs=1,
norm_cfg=None)
# Fcos head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in condinst_bboxhead.prior_generator.strides)
cls_scores, bbox_preds, centernesses, param_preds =\
condinst_bboxhead.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)
empty_gt_losses = condinst_bboxhead.loss_by_feat(
cls_scores, bbox_preds, centernesses, param_preds, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but
# box loss and centerness loss should be zero
empty_cls_loss = empty_gt_losses['loss_cls'].item()
empty_box_loss = empty_gt_losses['loss_bbox'].item()
empty_ctr_loss = empty_gt_losses['loss_centerness'].item()
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertEqual(
empty_box_loss, 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_ctr_loss, 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)
one_gt_losses = condinst_bboxhead.loss_by_feat(cls_scores, bbox_preds,
centernesses,
param_preds,
[gt_instances],
img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].item()
onegt_box_loss = one_gt_losses['loss_bbox'].item()
onegt_ctr_loss = one_gt_losses['loss_centerness'].item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(onegt_ctr_loss, 0,
'centerness loss should be non-zero')
# Test the `center_sampling` works fine.
condinst_bboxhead.center_sampling = True
ctrsamp_losses = condinst_bboxhead.loss_by_feat(
cls_scores, bbox_preds, centernesses, param_preds, [gt_instances],
img_metas)
ctrsamp_cls_loss = ctrsamp_losses['loss_cls'].item()
ctrsamp_box_loss = ctrsamp_losses['loss_bbox'].item()
ctrsamp_ctr_loss = ctrsamp_losses['loss_centerness'].item()
self.assertGreater(ctrsamp_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(ctrsamp_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(ctrsamp_ctr_loss, 0,
'centerness loss should be non-zero')
# Test the `norm_on_bbox` works fine.
condinst_bboxhead.norm_on_bbox = True
normbox_losses = condinst_bboxhead.loss_by_feat(
cls_scores, bbox_preds, centernesses, param_preds, [gt_instances],
img_metas)
normbox_cls_loss = normbox_losses['loss_cls'].item()
normbox_box_loss = normbox_losses['loss_bbox'].item()
normbox_ctr_loss = normbox_losses['loss_centerness'].item()
self.assertGreater(normbox_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(normbox_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(normbox_ctr_loss, 0,
'centerness loss should be non-zero')
def test_condinst_maskhead_loss(self):
"""Tests condinst maskhead loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
condinst_bboxhead = CondInstBboxHead(
num_classes=4,
in_channels=1,
feat_channels=1,
stacked_convs=1,
norm_cfg=None)
mask_feature_head = _fake_mask_feature_head()
condinst_maskhead = CondInstMaskHead(
mask_feature_head=mask_feature_head,
loss_mask=dict(
type='DiceLoss',
use_sigmoid=True,
activate=True,
eps=5e-6,
loss_weight=1.0))
# Fcos head expects a multiple levels of features per image
feats = []
for i in range(len(condinst_bboxhead.strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 3)), s // (2**(i + 3))))
feats = tuple(feats)
cls_scores, bbox_preds, centernesses, param_preds =\
condinst_bboxhead.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)
_ = condinst_bboxhead.loss_by_feat(cls_scores, bbox_preds,
centernesses, param_preds,
[gt_instances], img_metas)
# When truth is empty then all mask loss
# should be zero for random inputs
positive_infos = condinst_bboxhead.get_positive_infos()
mask_outs = condinst_maskhead.forward(feats, positive_infos)
empty_gt_mask_losses = condinst_maskhead.loss_by_feat(
*mask_outs, [gt_instances], img_metas, positive_infos)
loss_mask = empty_gt_mask_losses['loss_mask']
self.assertEqual(loss_mask, 0, 'mask loss should be zero')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)
_ = condinst_bboxhead.loss_by_feat(cls_scores, bbox_preds,
centernesses, param_preds,
[gt_instances], img_metas)
positive_infos = condinst_bboxhead.get_positive_infos()
mask_outs = condinst_maskhead.forward(feats, positive_infos)
one_gt_mask_losses = condinst_maskhead.loss_by_feat(
*mask_outs, [gt_instances], img_metas, positive_infos)
loss_mask = one_gt_mask_losses['loss_mask']
self.assertGreater(loss_mask, 0, 'mask loss should be nonzero')
| 8,815 | 42.860697 | 78 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_fcos_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import FCOSHead
class TestFCOSHead(TestCase):
def test_fcos_head_loss(self):
"""Tests fcos head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
fcos_head = FCOSHead(
num_classes=4,
in_channels=1,
feat_channels=1,
stacked_convs=1,
norm_cfg=None)
# Fcos head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in fcos_head.prior_generator.strides)
cls_scores, bbox_preds, centernesses = fcos_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = fcos_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but
# box loss and centerness loss should be zero
empty_cls_loss = empty_gt_losses['loss_cls'].item()
empty_box_loss = empty_gt_losses['loss_bbox'].item()
empty_ctr_loss = empty_gt_losses['loss_centerness'].item()
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertEqual(
empty_box_loss, 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_ctr_loss, 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = fcos_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].item()
onegt_box_loss = one_gt_losses['loss_bbox'].item()
onegt_ctr_loss = one_gt_losses['loss_centerness'].item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(onegt_ctr_loss, 0,
'centerness loss should be non-zero')
# Test the `center_sampling` works fine.
fcos_head.center_sampling = True
ctrsamp_losses = fcos_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
ctrsamp_cls_loss = ctrsamp_losses['loss_cls'].item()
ctrsamp_box_loss = ctrsamp_losses['loss_bbox'].item()
ctrsamp_ctr_loss = ctrsamp_losses['loss_centerness'].item()
self.assertGreater(ctrsamp_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(ctrsamp_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(ctrsamp_ctr_loss, 0,
'centerness loss should be non-zero')
# Test the `norm_on_bbox` works fine.
fcos_head.norm_on_bbox = True
normbox_losses = fcos_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
normbox_cls_loss = normbox_losses['loss_cls'].item()
normbox_box_loss = normbox_losses['loss_bbox'].item()
normbox_ctr_loss = normbox_losses['loss_centerness'].item()
self.assertGreater(normbox_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(normbox_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(normbox_ctr_loss, 0,
'centerness loss should be non-zero')
| 4,509 | 45.020408 | 78 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_centernet_update_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import CenterNetUpdateHead
class TestCenterNetUpdateHead(TestCase):
def test_centernet_update_head_loss(self):
"""Tests fcos head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
centernet_head = CenterNetUpdateHead(
num_classes=4,
in_channels=1,
feat_channels=1,
stacked_convs=1,
norm_cfg=None)
# Fcos head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in centernet_head.prior_generator.strides)
cls_scores, bbox_preds = centernet_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = centernet_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but
# box loss and centerness loss should be zero
empty_cls_loss = empty_gt_losses['loss_cls'].item()
empty_box_loss = empty_gt_losses['loss_bbox'].item()
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertEqual(
empty_box_loss, 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = centernet_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].item()
onegt_box_loss = one_gt_losses['loss_bbox'].item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')
| 2,608 | 39.765625 | 78 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_sabl_retina_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import SABLRetinaHead
class TestSABLRetinaHead(TestCase):
def test_sabl_retina_head(self):
"""Tests sabl retina head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': [1, 1],
}]
train_cfg = ConfigDict(
dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
sabl_retina_head = SABLRetinaHead(
num_classes=4,
in_channels=1,
feat_channels=1,
stacked_convs=1,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5),
train_cfg=train_cfg)
# Fcos head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in sabl_retina_head.square_anchor_generator.strides)
outs = sabl_retina_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = sabl_retina_head.loss_by_feat(
*outs, [gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# box loss and centerness loss should be zero
empty_cls_loss = sum(empty_gt_losses['loss_cls']).item()
empty_box_cls_loss = sum(empty_gt_losses['loss_bbox_cls']).item()
empty_box_reg_loss = sum(empty_gt_losses['loss_bbox_reg']).item()
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertEqual(
empty_box_cls_loss, 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_box_reg_loss, 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = sabl_retina_head.loss_by_feat(*outs, [gt_instances],
img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls']).item()
onegt_box_cls_loss = sum(one_gt_losses['loss_bbox_cls']).item()
onegt_box_reg_loss = sum(one_gt_losses['loss_bbox_reg']).item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_cls_loss, 0,
'box loss should be non-zero')
self.assertGreater(onegt_box_reg_loss, 0,
'centerness loss should be non-zero')
test_cfg = ConfigDict(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
# test predict_by_feat
sabl_retina_head.predict_by_feat(
*outs, batch_img_metas=img_metas, cfg=test_cfg, rescale=True)
| 4,672 | 38.940171 | 78 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_ddod_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import DDODHead
class TestDDODHead(TestCase):
def test_ddod_head_loss(self):
"""Tests ddod head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1
}]
cfg = Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9, alpha=0.8),
reg_assigner=dict(type='ATSSAssigner', topk=9, alpha=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False))
atss_head = DDODHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=1,
use_dcn=False,
norm_cfg=None,
train_cfg=cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_iou=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [8, 16, 32, 64, 128]
]
cls_scores, bbox_preds, centernesses = atss_head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_centerness_loss = sum(empty_gt_losses['loss_iou'])
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_centerness_loss.item(), 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_centerness_loss = sum(one_gt_losses['loss_iou'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_centerness_loss.item(), 0,
'centerness loss should be non-zero')
| 3,979 | 39.20202 | 78 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_atss_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import ATSSHead
class TestATSSHead(TestCase):
def test_atss_head_loss(self):
"""Tests atss head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1
}]
cfg = Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
atss_head = ATSSHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=1,
norm_cfg=None,
train_cfg=cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [8, 16, 32, 64, 128]
]
cls_scores, bbox_preds, centernesses = atss_head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_centerness_loss = sum(empty_gt_losses['loss_centerness'])
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_centerness_loss.item(), 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_centerness_loss = sum(one_gt_losses['loss_centerness'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_centerness_loss.item(), 0,
'centerness loss should be non-zero')
| 3,776 | 38.757895 | 78 | py |
ERD | ERD-main/tests/test_models/test_dense_heads/test_gfl_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import GFLHead
class TestGFLHead(TestCase):
def test_gfl_head_loss(self):
"""Tests gfl head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1
}]
train_cfg = Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
gfl_head = GFLHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds = gfl_head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = gfl_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_dfl_loss = sum(empty_gt_losses['loss_dfl'])
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_dfl_loss.item(), 0,
'there should be no dfl loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = gfl_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_dfl_loss = sum(one_gt_losses['loss_dfl'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_dfl_loss.item(), 0,
'dfl loss should be non-zero')
| 3,503 | 37.933333 | 78 | py |
ERD | ERD-main/tests/test_models/test_detectors/test_single_stage.py | # Copyright (c) OpenMMLab. All rights reserved.
import time
import unittest
from unittest import TestCase
import torch
from mmengine.logging import MessageHub
from parameterized import parameterized
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestSingleStageDetector(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand([
'retinanet/retinanet_r18_fpn_1x_coco.py',
'centernet/centernet_r18_8xb16-crop512-140e_coco.py',
'fsaf/fsaf_r50_fpn_1x_coco.py',
'yolox/yolox_tiny_8xb8-300e_coco.py',
'yolo/yolov3_mobilenetv2_8xb24-320-300e_coco.py',
'reppoints/reppoints-minmax_r50_fpn-gn_head-gn_1x_coco.py',
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
@parameterized.expand([
('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),
('centernet/centernet_r18_8xb16-crop512-140e_coco.py', ('cpu',
'cuda')),
('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('yolox/yolox_tiny_8xb8-300e_coco.py', ('cpu', 'cuda')),
('yolo/yolov3_mobilenetv2_8xb24-320-300e_coco.py', ('cpu', 'cuda')),
('reppoints/reppoints-minmax_r50_fpn-gn_head-gn_1x_coco.py', ('cpu',
'cuda')),
])
def test_single_stage_forward_loss_mode(self, cfg_file, devices):
message_hub = MessageHub.get_instance(
f'test_single_stage_forward_loss_mode-{time.time()}')
message_hub.update_info('iter', 0)
message_hub.update_info('epoch', 0)
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
detector.init_weights()
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, True)
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([
('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),
('centernet/centernet_r18_8xb16-crop512-140e_coco.py', ('cpu',
'cuda')),
('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('yolox/yolox_tiny_8xb8-300e_coco.py', ('cpu', 'cuda')),
('yolo/yolov3_mobilenetv2_8xb24-320-300e_coco.py', ('cpu', 'cuda')),
('reppoints/reppoints-minmax_r50_fpn-gn_head-gn_1x_coco.py', ('cpu',
'cuda')),
])
def test_single_stage_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([
('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),
('centernet/centernet_r18_8xb16-crop512-140e_coco.py', ('cpu',
'cuda')),
('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
('yolox/yolox_tiny_8xb8-300e_coco.py', ('cpu', 'cuda')),
('yolo/yolov3_mobilenetv2_8xb24-320-300e_coco.py', ('cpu', 'cuda')),
('reppoints/reppoints-minmax_r50_fpn-gn_head-gn_1x_coco.py', ('cpu',
'cuda')),
])
def test_single_stage_forward_tensor_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
batch_results = detector.forward(**data, mode='tensor')
self.assertIsInstance(batch_results, tuple)
| 5,791 | 41.588235 | 79 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.