code
stringlengths
17
6.64M
def obj2tensor(pyobj, device='cuda'): 'Serialize picklable python object to tensor.' storage = torch.ByteStorage.from_buffer(pickle.dumps(pyobj)) return torch.ByteTensor(storage).to(device=device)
def tensor2obj(tensor): 'Deserialize tensor to picklable python object.' return pickle.loads(tensor.cpu().numpy().tobytes())
@functools.lru_cache() def _get_global_gloo_group(): 'Return a process group based on gloo backend, containing all the ranks\n The result is cached.' if (dist.get_backend() == 'nccl'): return dist.new_group(backend='gloo') else: return dist.group.WORLD
def all_reduce_dict(py_dict, op='sum', group=None, to_float=True): "Apply all reduce function for python dict object.\n\n The code is modified from https://github.com/Megvii-\n BaseDetection/YOLOX/blob/main/yolox/utils/allreduce_norm.py.\n\n NOTE: make sure that py_dict in different ranks has the same keys and\n the values should be in the same shape. Currently only supports\n nccl backend.\n\n Args:\n py_dict (dict): Dict to be applied all reduce op.\n op (str): Operator, could be 'sum' or 'mean'. Default: 'sum'\n group (:obj:`torch.distributed.group`, optional): Distributed group,\n Default: None.\n to_float (bool): Whether to convert all values of dict to float.\n Default: True.\n\n Returns:\n OrderedDict: reduced python dict object.\n " warnings.warn('group` is deprecated. Currently only supports NCCL backend.') (_, world_size) = get_dist_info() if (world_size == 1): return py_dict py_key = list(py_dict.keys()) if (not isinstance(py_dict, OrderedDict)): py_key_tensor = obj2tensor(py_key) dist.broadcast(py_key_tensor, src=0) py_key = tensor2obj(py_key_tensor) tensor_shapes = [py_dict[k].shape for k in py_key] tensor_numels = [py_dict[k].numel() for k in py_key] if to_float: warnings.warn('Note: the "to_float" is True, you need to ensure that the behavior is reasonable.') flatten_tensor = torch.cat([py_dict[k].flatten().float() for k in py_key]) else: flatten_tensor = torch.cat([py_dict[k].flatten() for k in py_key]) dist.all_reduce(flatten_tensor, op=dist.ReduceOp.SUM) if (op == 'mean'): flatten_tensor /= world_size split_tensors = [x.reshape(shape) for (x, shape) in zip(torch.split(flatten_tensor, tensor_numels), tensor_shapes)] out_dict = {k: v for (k, v) in zip(py_key, split_tensors)} if isinstance(py_dict, OrderedDict): out_dict = OrderedDict(out_dict) return out_dict
def palette_val(palette): 'Convert palette to matplotlib palette.\n\n Args:\n palette List[tuple]: A list of color tuples.\n\n Returns:\n List[tuple[float]]: A list of RGB matplotlib color tuples.\n ' new_palette = [] for color in palette: color = [(c / 255) for c in color] new_palette.append(tuple(color)) return new_palette
def get_palette(palette, num_classes): 'Get palette from various inputs.\n\n Args:\n palette (list[tuple] | str | tuple | :obj:`Color`): palette inputs.\n num_classes (int): the number of classes.\n\n Returns:\n list[tuple[int]]: A list of color tuples.\n ' assert isinstance(num_classes, int) if isinstance(palette, list): dataset_palette = palette elif isinstance(palette, tuple): dataset_palette = ([palette] * num_classes) elif ((palette == 'random') or (palette is None)): state = np.random.get_state() np.random.seed(42) palette = np.random.randint(0, 256, size=(num_classes, 3)) np.random.set_state(state) dataset_palette = [tuple(c) for c in palette] elif (palette == 'coco'): from mmdet.datasets import CocoDataset, CocoPanopticDataset dataset_palette = CocoDataset.PALETTE if (len(dataset_palette) < num_classes): dataset_palette = CocoPanopticDataset.PALETTE elif (palette == 'citys'): from mmdet.datasets import CityscapesDataset dataset_palette = CityscapesDataset.PALETTE elif (palette == 'voc'): from mmdet.datasets import VOCDataset dataset_palette = VOCDataset.PALETTE elif mmcv.is_str(palette): dataset_palette = ([mmcv.color_val(palette)[::(- 1)]] * num_classes) else: raise TypeError(f'Invalid type for palette: {type(palette)}') assert (len(dataset_palette) >= num_classes), 'The length of palette should not be less than `num_classes`.' return dataset_palette
class COCO(_COCO): 'This class is almost the same as official pycocotools package.\n\n It implements some snake case function aliases. So that the COCO class has\n the same interface as LVIS class.\n ' def __init__(self, annotation_file=None): if (getattr(pycocotools, '__version__', '0') >= '12.0.2'): warnings.warn('mmpycocotools is deprecated. Please install official pycocotools by "pip install pycocotools"', UserWarning) super().__init__(annotation_file=annotation_file) self.img_ann_map = self.imgToAnns self.cat_img_map = self.catToImgs def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None): return self.getAnnIds(img_ids, cat_ids, area_rng, iscrowd) def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]): return self.getCatIds(cat_names, sup_names, cat_ids) def get_img_ids(self, img_ids=[], cat_ids=[]): return self.getImgIds(img_ids, cat_ids) def load_anns(self, ids): return self.loadAnns(ids) def load_cats(self, ids): return self.loadCats(ids) def load_imgs(self, ids): return self.loadImgs(ids)
def pq_compute_single_core(proc_id, annotation_set, gt_folder, pred_folder, categories, file_client=None): 'The single core function to evaluate the metric of Panoptic\n Segmentation.\n\n Same as the function with the same name in `panopticapi`. Only the function\n to load the images is changed to use the file client.\n\n Args:\n proc_id (int): The id of the mini process.\n gt_folder (str): The path of the ground truth images.\n pred_folder (str): The path of the prediction images.\n categories (str): The categories of the dataset.\n file_client (object): The file client of the dataset. If None,\n the backend will be set to `disk`.\n ' if (PQStat is None): raise RuntimeError('panopticapi is not installed, please install it by: pip install git+https://github.com/cocodataset/panopticapi.git.') if (file_client is None): file_client_args = dict(backend='disk') file_client = mmcv.FileClient(**file_client_args) pq_stat = PQStat() idx = 0 for (gt_ann, pred_ann) in annotation_set: if ((idx % 100) == 0): print('Core: {}, {} from {} images processed'.format(proc_id, idx, len(annotation_set))) idx += 1 img_bytes = file_client.get(os.path.join(gt_folder, gt_ann['file_name'])) pan_gt = mmcv.imfrombytes(img_bytes, flag='color', channel_order='rgb') pan_gt = rgb2id(pan_gt) pan_pred = mmcv.imread(os.path.join(pred_folder, pred_ann['file_name']), flag='color', channel_order='rgb') pan_pred = rgb2id(pan_pred) gt_segms = {el['id']: el for el in gt_ann['segments_info']} pred_segms = {el['id']: el for el in pred_ann['segments_info']} pred_labels_set = set((el['id'] for el in pred_ann['segments_info'])) (labels, labels_cnt) = np.unique(pan_pred, return_counts=True) for (label, label_cnt) in zip(labels, labels_cnt): if (label not in pred_segms): if (label == VOID): continue raise KeyError('In the image with ID {} segment with ID {} is presented in PNG and not presented in JSON.'.format(gt_ann['image_id'], label)) pred_segms[label]['area'] = label_cnt pred_labels_set.remove(label) if (pred_segms[label]['category_id'] not in categories): raise KeyError('In the image with ID {} segment with ID {} has unknown category_id {}.'.format(gt_ann['image_id'], label, pred_segms[label]['category_id'])) if (len(pred_labels_set) != 0): raise KeyError('In the image with ID {} the following segment IDs {} are presented in JSON and not presented in PNG.'.format(gt_ann['image_id'], list(pred_labels_set))) pan_gt_pred = ((pan_gt.astype(np.uint64) * OFFSET) + pan_pred.astype(np.uint64)) gt_pred_map = {} (labels, labels_cnt) = np.unique(pan_gt_pred, return_counts=True) for (label, intersection) in zip(labels, labels_cnt): gt_id = (label // OFFSET) pred_id = (label % OFFSET) gt_pred_map[(gt_id, pred_id)] = intersection gt_matched = set() pred_matched = set() for (label_tuple, intersection) in gt_pred_map.items(): (gt_label, pred_label) = label_tuple if (gt_label not in gt_segms): continue if (pred_label not in pred_segms): continue if (gt_segms[gt_label]['iscrowd'] == 1): continue if (gt_segms[gt_label]['category_id'] != pred_segms[pred_label]['category_id']): continue union = (((pred_segms[pred_label]['area'] + gt_segms[gt_label]['area']) - intersection) - gt_pred_map.get((VOID, pred_label), 0)) iou = (intersection / union) if (iou > 0.5): pq_stat[gt_segms[gt_label]['category_id']].tp += 1 pq_stat[gt_segms[gt_label]['category_id']].iou += iou gt_matched.add(gt_label) pred_matched.add(pred_label) crowd_labels_dict = {} for (gt_label, gt_info) in gt_segms.items(): if (gt_label in gt_matched): continue if (gt_info['iscrowd'] == 1): crowd_labels_dict[gt_info['category_id']] = gt_label continue pq_stat[gt_info['category_id']].fn += 1 for (pred_label, pred_info) in pred_segms.items(): if (pred_label in pred_matched): continue intersection = gt_pred_map.get((VOID, pred_label), 0) if (pred_info['category_id'] in crowd_labels_dict): intersection += gt_pred_map.get((crowd_labels_dict[pred_info['category_id']], pred_label), 0) if ((intersection / pred_info['area']) > 0.5): continue pq_stat[pred_info['category_id']].fp += 1 print('Core: {}, all {} images processed'.format(proc_id, len(annotation_set))) return pq_stat
def pq_compute_multi_core(matched_annotations_list, gt_folder, pred_folder, categories, file_client=None): 'Evaluate the metrics of Panoptic Segmentation with multithreading.\n\n Same as the function with the same name in `panopticapi`.\n\n Args:\n matched_annotations_list (list): The matched annotation list. Each\n element is a tuple of annotations of the same image with the\n format (gt_anns, pred_anns).\n gt_folder (str): The path of the ground truth images.\n pred_folder (str): The path of the prediction images.\n categories (str): The categories of the dataset.\n file_client (object): The file client of the dataset. If None,\n the backend will be set to `disk`.\n ' if (PQStat is None): raise RuntimeError('panopticapi is not installed, please install it by: pip install git+https://github.com/cocodataset/panopticapi.git.') if (file_client is None): file_client_args = dict(backend='disk') file_client = mmcv.FileClient(**file_client_args) cpu_num = multiprocessing.cpu_count() annotations_split = np.array_split(matched_annotations_list, cpu_num) print('Number of cores: {}, images per core: {}'.format(cpu_num, len(annotations_split[0]))) workers = multiprocessing.Pool(processes=cpu_num) processes = [] for (proc_id, annotation_set) in enumerate(annotations_split): p = workers.apply_async(pq_compute_single_core, (proc_id, annotation_set, gt_folder, pred_folder, categories, file_client)) processes.append(p) pq_stat = PQStat() for p in processes: pq_stat += p.get() return pq_stat
def _concat_dataset(cfg, default_args=None): from .dataset_wrappers import ConcatDataset ann_files = cfg['ann_file'] img_prefixes = cfg.get('img_prefix', None) seg_prefixes = cfg.get('seg_prefix', None) proposal_files = cfg.get('proposal_file', None) separate_eval = cfg.get('separate_eval', True) datasets = [] num_dset = len(ann_files) for i in range(num_dset): data_cfg = copy.deepcopy(cfg) if ('separate_eval' in data_cfg): data_cfg.pop('separate_eval') data_cfg['ann_file'] = ann_files[i] if isinstance(img_prefixes, (list, tuple)): data_cfg['img_prefix'] = img_prefixes[i] if isinstance(seg_prefixes, (list, tuple)): data_cfg['seg_prefix'] = seg_prefixes[i] if isinstance(proposal_files, (list, tuple)): data_cfg['proposal_file'] = proposal_files[i] datasets.append(build_dataset(data_cfg, default_args)) return ConcatDataset(datasets, separate_eval)
def build_dataset(cfg, default_args=None): from .dataset_wrappers import ClassBalancedDataset, ConcatDataset, MultiImageMixDataset, RepeatDataset if isinstance(cfg, (list, tuple)): dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) elif (cfg['type'] == 'ConcatDataset'): dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg['datasets']], cfg.get('separate_eval', True)) elif (cfg['type'] == 'RepeatDataset'): dataset = RepeatDataset(build_dataset(cfg['dataset'], default_args), cfg['times']) elif (cfg['type'] == 'ClassBalancedDataset'): dataset = ClassBalancedDataset(build_dataset(cfg['dataset'], default_args), cfg['oversample_thr']) elif (cfg['type'] == 'MultiImageMixDataset'): cp_cfg = copy.deepcopy(cfg) cp_cfg['dataset'] = build_dataset(cp_cfg['dataset']) cp_cfg.pop('type') dataset = MultiImageMixDataset(**cp_cfg) elif isinstance(cfg.get('ann_file'), (list, tuple)): dataset = _concat_dataset(cfg, default_args) else: dataset = build_from_cfg(cfg, DATASETS, default_args) return dataset
def build_dataloader(dataset, samples_per_gpu, workers_per_gpu, num_gpus=1, dist=True, shuffle=True, seed=None, runner_type='EpochBasedRunner', persistent_workers=False, **kwargs): 'Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n seed (int, Optional): Seed to be used. Default: None.\n runner_type (str): Type of runner. Default: `EpochBasedRunner`\n persistent_workers (bool): If True, the data loader will not shutdown\n the worker processes after a dataset has been consumed once.\n This allows to maintain the workers `Dataset` instances alive.\n This argument is only valid when PyTorch>=1.7.0. Default: False.\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n ' (rank, world_size) = get_dist_info() if dist: batch_size = samples_per_gpu num_workers = workers_per_gpu else: batch_size = (num_gpus * samples_per_gpu) num_workers = (num_gpus * workers_per_gpu) if (runner_type == 'IterBasedRunner'): if shuffle: batch_sampler = InfiniteGroupBatchSampler(dataset, batch_size, world_size, rank, seed=seed) else: batch_sampler = InfiniteBatchSampler(dataset, batch_size, world_size, rank, seed=seed, shuffle=False) batch_size = 1 sampler = None else: if dist: if shuffle: sampler = DistributedGroupSampler(dataset, samples_per_gpu, world_size, rank, seed=seed) else: sampler = DistributedSampler(dataset, world_size, rank, shuffle=False, seed=seed) else: sampler = (GroupSampler(dataset, samples_per_gpu) if shuffle else None) batch_sampler = None init_fn = (partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) if (seed is not None) else None) if ((TORCH_VERSION != 'parrots') and (digit_version(TORCH_VERSION) >= digit_version('1.7.0'))): kwargs['persistent_workers'] = persistent_workers elif (persistent_workers is True): warnings.warn('persistent_workers is invalid because your pytorch version is lower than 1.7.0') data_loader = DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, batch_sampler=batch_sampler, collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), pin_memory=False, worker_init_fn=init_fn, **kwargs) return data_loader
def worker_init_fn(worker_id, num_workers, rank, seed): worker_seed = (((num_workers * rank) + worker_id) + seed) np.random.seed(worker_seed) random.seed(worker_seed)
@DATASETS.register_module() class CityscapesDataset(CocoDataset): CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle') PALETTE = [(220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70), (0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32)] def _filter_imgs(self, min_size=32): 'Filter images too small or without ground truths.' valid_inds = [] ids_with_ann = set((_['image_id'] for _ in self.coco.anns.values())) ids_in_cat = set() for (i, class_id) in enumerate(self.cat_ids): ids_in_cat |= set(self.coco.cat_img_map[class_id]) ids_in_cat &= ids_with_ann valid_img_ids = [] for (i, img_info) in enumerate(self.data_infos): img_id = img_info['id'] ann_ids = self.coco.getAnnIds(imgIds=[img_id]) ann_info = self.coco.loadAnns(ann_ids) all_iscrowd = all([_['iscrowd'] for _ in ann_info]) if (self.filter_empty_gt and ((self.img_ids[i] not in ids_in_cat) or all_iscrowd)): continue if (min(img_info['width'], img_info['height']) >= min_size): valid_inds.append(i) valid_img_ids.append(img_id) self.img_ids = valid_img_ids return valid_inds def _parse_ann_info(self, img_info, ann_info): 'Parse bbox and mask annotation.\n\n Args:\n img_info (dict): Image info of an image.\n ann_info (list[dict]): Annotation info of an image.\n\n Returns:\n dict: A dict containing the following keys: bboxes, bboxes_ignore, labels, masks, seg_map. "masks" are already decoded into binary masks.\n ' gt_bboxes = [] gt_labels = [] gt_bboxes_ignore = [] gt_masks_ann = [] for (i, ann) in enumerate(ann_info): if ann.get('ignore', False): continue (x1, y1, w, h) = ann['bbox'] if ((ann['area'] <= 0) or (w < 1) or (h < 1)): continue if (ann['category_id'] not in self.cat_ids): continue bbox = [x1, y1, (x1 + w), (y1 + h)] if ann.get('iscrowd', False): gt_bboxes_ignore.append(bbox) else: gt_bboxes.append(bbox) gt_labels.append(self.cat2label[ann['category_id']]) gt_masks_ann.append(ann['segmentation']) if gt_bboxes: gt_bboxes = np.array(gt_bboxes, dtype=np.float32) gt_labels = np.array(gt_labels, dtype=np.int64) else: gt_bboxes = np.zeros((0, 4), dtype=np.float32) gt_labels = np.array([], dtype=np.int64) if gt_bboxes_ignore: gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) else: gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) ann = dict(bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann, seg_map=img_info['segm_file']) return ann def results2txt(self, results, outfile_prefix): 'Dump the detection results to a txt file.\n\n Args:\n results (list[list | tuple]): Testing results of the\n dataset.\n outfile_prefix (str): The filename prefix of the json files.\n If the prefix is "somepath/xxx",\n the txt files will be named "somepath/xxx.txt".\n\n Returns:\n list[str]: Result txt files which contains corresponding instance segmentation images.\n ' try: import cityscapesscripts.helpers.labels as CSLabels except ImportError: raise ImportError('Please run "pip install citscapesscripts" to install cityscapesscripts first.') result_files = [] os.makedirs(outfile_prefix, exist_ok=True) prog_bar = mmcv.ProgressBar(len(self)) for idx in range(len(self)): result = results[idx] filename = self.data_infos[idx]['filename'] basename = osp.splitext(osp.basename(filename))[0] pred_txt = osp.join(outfile_prefix, (basename + '_pred.txt')) (bbox_result, segm_result) = result bboxes = np.vstack(bbox_result) if isinstance(segm_result, tuple): segms = mmcv.concat_list(segm_result[0]) mask_score = segm_result[1] else: segms = mmcv.concat_list(segm_result) mask_score = [bbox[(- 1)] for bbox in bboxes] labels = [np.full(bbox.shape[0], i, dtype=np.int32) for (i, bbox) in enumerate(bbox_result)] labels = np.concatenate(labels) assert (len(bboxes) == len(segms) == len(labels)) num_instances = len(bboxes) prog_bar.update() with open(pred_txt, 'w') as fout: for i in range(num_instances): pred_class = labels[i] classes = self.CLASSES[pred_class] class_id = CSLabels.name2label[classes].id score = mask_score[i] mask = maskUtils.decode(segms[i]).astype(np.uint8) png_filename = osp.join(outfile_prefix, (basename + f'_{i}_{classes}.png')) mmcv.imwrite(mask, png_filename) fout.write(f'''{osp.basename(png_filename)} {class_id} {score} ''') result_files.append(pred_txt) return result_files def format_results(self, results, txtfile_prefix=None): 'Format the results to txt (standard format for Cityscapes\n evaluation).\n\n Args:\n results (list): Testing results of the dataset.\n txtfile_prefix (str | None): The prefix of txt files. It includes\n the file path and the prefix of filename, e.g., "a/b/prefix".\n If not specified, a temp file will be created. Default: None.\n\n Returns:\n tuple: (result_files, tmp_dir), result_files is a dict containing the json filepaths, tmp_dir is the temporal directory created for saving txt/png files when txtfile_prefix is not specified.\n ' assert isinstance(results, list), 'results must be a list' assert (len(results) == len(self)), 'The length of results is not equal to the dataset len: {} != {}'.format(len(results), len(self)) assert isinstance(results, list), 'results must be a list' assert (len(results) == len(self)), 'The length of results is not equal to the dataset len: {} != {}'.format(len(results), len(self)) if (txtfile_prefix is None): tmp_dir = tempfile.TemporaryDirectory() txtfile_prefix = osp.join(tmp_dir.name, 'results') else: tmp_dir = None result_files = self.results2txt(results, txtfile_prefix) return (result_files, tmp_dir) def evaluate(self, results, metric='bbox', logger=None, outfile_prefix=None, classwise=False, proposal_nums=(100, 300, 1000), iou_thrs=np.arange(0.5, 0.96, 0.05)): 'Evaluation in Cityscapes/COCO protocol.\n\n Args:\n results (list[list | tuple]): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated. Options are\n \'bbox\', \'segm\', \'proposal\', \'proposal_fast\'.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n outfile_prefix (str | None): The prefix of output file. It includes\n the file path and the prefix of filename, e.g., "a/b/prefix".\n If results are evaluated with COCO protocol, it would be the\n prefix of output json file. For example, the metric is \'bbox\'\n and \'segm\', then json files would be "a/b/prefix.bbox.json" and\n "a/b/prefix.segm.json".\n If results are evaluated with cityscapes protocol, it would be\n the prefix of output txt/png files. The output files would be\n png images under folder "a/b/prefix/xxx/" and the file name of\n images would be written into a txt file\n "a/b/prefix/xxx_pred.txt", where "xxx" is the video name of\n cityscapes. If not specified, a temp file will be created.\n Default: None.\n classwise (bool): Whether to evaluating the AP for each class.\n proposal_nums (Sequence[int]): Proposal number used for evaluating\n recalls, such as recall@100, recall@1000.\n Default: (100, 300, 1000).\n iou_thrs (Sequence[float]): IoU threshold used for evaluating\n recalls. If set to a list, the average recall of all IoUs will\n also be computed. Default: 0.5.\n\n Returns:\n dict[str, float]: COCO style evaluation metric or cityscapes mAP and AP@50.\n ' eval_results = dict() metrics = (metric.copy() if isinstance(metric, list) else [metric]) if ('cityscapes' in metrics): eval_results.update(self._evaluate_cityscapes(results, outfile_prefix, logger)) metrics.remove('cityscapes') if (len(metrics) > 0): self_coco = CocoDataset(self.ann_file, self.pipeline.transforms, None, self.data_root, self.img_prefix, self.seg_prefix, self.proposal_file, self.test_mode, self.filter_empty_gt) self_coco.CLASSES = self.CLASSES self_coco.data_infos = self_coco.load_annotations(self.ann_file) eval_results.update(self_coco.evaluate(results, metrics, logger, outfile_prefix, classwise, proposal_nums, iou_thrs)) return eval_results def _evaluate_cityscapes(self, results, txtfile_prefix, logger): "Evaluation in Cityscapes protocol.\n\n Args:\n results (list): Testing results of the dataset.\n txtfile_prefix (str | None): The prefix of output txt file\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n\n Returns:\n dict[str: float]: Cityscapes evaluation results, contains 'mAP' and 'AP@50'.\n " try: import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as CSEval except ImportError: raise ImportError('Please run "pip install citscapesscripts" to install cityscapesscripts first.') msg = 'Evaluating in Cityscapes style' if (logger is None): msg = ('\n' + msg) print_log(msg, logger=logger) (result_files, tmp_dir) = self.format_results(results, txtfile_prefix) if (tmp_dir is None): result_dir = osp.join(txtfile_prefix, 'results') else: result_dir = osp.join(tmp_dir.name, 'results') eval_results = OrderedDict() print_log(f'Evaluating results under {result_dir} ...', logger=logger) CSEval.args.cityscapesPath = os.path.join(self.img_prefix, '../..') CSEval.args.predictionPath = os.path.abspath(result_dir) CSEval.args.predictionWalk = None CSEval.args.JSONOutput = False CSEval.args.colorized = False CSEval.args.gtInstancesFile = os.path.join(result_dir, 'gtInstances.json') CSEval.args.groundTruthSearch = os.path.join(self.img_prefix.replace('leftImg8bit', 'gtFine'), '*/*_gtFine_instanceIds.png') groundTruthImgList = glob.glob(CSEval.args.groundTruthSearch) assert len(groundTruthImgList), f'Cannot find ground truth images in {CSEval.args.groundTruthSearch}.' predictionImgList = [] for gt in groundTruthImgList: predictionImgList.append(CSEval.getPrediction(gt, CSEval.args)) CSEval_results = CSEval.evaluateImgLists(predictionImgList, groundTruthImgList, CSEval.args)['averages'] eval_results['mAP'] = CSEval_results['allAp'] eval_results['AP@50'] = CSEval_results['allAp50%'] if (tmp_dir is not None): tmp_dir.cleanup() return eval_results
@DATASETS.register_module() class CustomDataset(Dataset): "Custom dataset for detection.\n\n The annotation format is shown as follows. The `ann` field is optional for\n testing.\n\n .. code-block:: none\n\n [\n {\n 'filename': 'a.jpg',\n 'width': 1280,\n 'height': 720,\n 'ann': {\n 'bboxes': <np.ndarray> (n, 4) in (x1, y1, x2, y2) order.\n 'labels': <np.ndarray> (n, ),\n 'bboxes_ignore': <np.ndarray> (k, 4), (optional field)\n 'labels_ignore': <np.ndarray> (k, 4) (optional field)\n }\n },\n ...\n ]\n\n Args:\n ann_file (str): Annotation file path.\n pipeline (list[dict]): Processing pipeline.\n classes (str | Sequence[str], optional): Specify classes to load.\n If is None, ``cls.CLASSES`` will be used. Default: None.\n data_root (str, optional): Data root for ``ann_file``,\n ``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified.\n test_mode (bool, optional): If set True, annotation will not be loaded.\n filter_empty_gt (bool, optional): If set true, images without bounding\n boxes of the dataset's classes will be filtered out. This option\n only works when `test_mode=False`, i.e., we never filter images\n during tests.\n " CLASSES = None PALETTE = None def __init__(self, ann_file, pipeline, classes=None, data_root=None, img_prefix='', seg_prefix=None, proposal_file=None, test_mode=False, filter_empty_gt=True, file_client_args=dict(backend='disk')): self.ann_file = ann_file self.data_root = data_root self.img_prefix = img_prefix self.seg_prefix = seg_prefix self.proposal_file = proposal_file self.test_mode = test_mode self.filter_empty_gt = filter_empty_gt self.CLASSES = self.get_classes(classes) self.file_client = mmcv.FileClient(**file_client_args) if (self.data_root is not None): if (not osp.isabs(self.ann_file)): self.ann_file = osp.join(self.data_root, self.ann_file) if (not ((self.img_prefix is None) or osp.isabs(self.img_prefix))): self.img_prefix = osp.join(self.data_root, self.img_prefix) if (not ((self.seg_prefix is None) or osp.isabs(self.seg_prefix))): self.seg_prefix = osp.join(self.data_root, self.seg_prefix) if (not ((self.proposal_file is None) or osp.isabs(self.proposal_file))): self.proposal_file = osp.join(self.data_root, self.proposal_file) if hasattr(self.file_client, 'get_local_path'): with self.file_client.get_local_path(self.ann_file) as local_path: self.data_infos = self.load_annotations(local_path) else: warnings.warn(f'The used MMCV version does not have get_local_path. We treat the {self.ann_file} as local paths and it might cause errors if the path is not a local path. Please use MMCV>= 1.3.16 if you meet errors.') self.data_infos = self.load_annotations(self.ann_file) if (self.proposal_file is not None): if hasattr(self.file_client, 'get_local_path'): with self.file_client.get_local_path(self.proposal_file) as local_path: self.proposals = self.load_proposals(local_path) else: warnings.warn(f'The used MMCV version does not have get_local_path. We treat the {self.ann_file} as local paths and it might cause errors if the path is not a local path. Please use MMCV>= 1.3.16 if you meet errors.') self.proposals = self.load_proposals(self.proposal_file) else: self.proposals = None if (not test_mode): valid_inds = self._filter_imgs() self.data_infos = [self.data_infos[i] for i in valid_inds] if (self.proposals is not None): self.proposals = [self.proposals[i] for i in valid_inds] self._set_group_flag() self.pipeline = Compose(pipeline) def __len__(self): 'Total number of samples of data.' return len(self.data_infos) def load_annotations(self, ann_file): 'Load annotation from annotation file.' return mmcv.load(ann_file) def load_proposals(self, proposal_file): 'Load proposal from proposal file.' return mmcv.load(proposal_file) def get_ann_info(self, idx): 'Get annotation by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n ' return self.data_infos[idx]['ann'] def get_cat_ids(self, idx): 'Get category ids by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n list[int]: All categories in the image of specified index.\n ' return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist() def pre_pipeline(self, results): 'Prepare results dict for pipeline.' results['img_prefix'] = self.img_prefix results['seg_prefix'] = self.seg_prefix results['proposal_file'] = self.proposal_file results['bbox_fields'] = [] results['mask_fields'] = [] results['seg_fields'] = [] def _filter_imgs(self, min_size=32): 'Filter images too small.' if self.filter_empty_gt: warnings.warn('CustomDataset does not support filtering empty gt images.') valid_inds = [] for (i, img_info) in enumerate(self.data_infos): if (min(img_info['width'], img_info['height']) >= min_size): valid_inds.append(i) return valid_inds def _set_group_flag(self): 'Set flag according to image aspect ratio.\n\n Images with aspect ratio greater than 1 will be set as group 1,\n otherwise group 0.\n ' self.flag = np.zeros(len(self), dtype=np.uint8) for i in range(len(self)): img_info = self.data_infos[i] if ((img_info['width'] / img_info['height']) > 1): self.flag[i] = 1 def _rand_another(self, idx): 'Get another random index from the same group as the given index.' pool = np.where((self.flag == self.flag[idx]))[0] return np.random.choice(pool) def __getitem__(self, idx): 'Get training/test data after pipeline.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Training/test data (with annotation if `test_mode` is set True).\n ' if self.test_mode: return self.prepare_test_img(idx) while True: data = self.prepare_train_img(idx) if (data is None): idx = self._rand_another(idx) continue return data def prepare_train_img(self, idx): 'Get training data and annotations after pipeline.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Training data and annotation after pipeline with new keys introduced by pipeline.\n ' img_info = self.data_infos[idx] ann_info = self.get_ann_info(idx) results = dict(img_info=img_info, ann_info=ann_info) if (self.proposals is not None): results['proposals'] = self.proposals[idx] self.pre_pipeline(results) return self.pipeline(results) def prepare_test_img(self, idx): 'Get testing data after pipeline.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Testing data after pipeline with new keys introduced by pipeline.\n ' img_info = self.data_infos[idx] results = dict(img_info=img_info) if (self.proposals is not None): results['proposals'] = self.proposals[idx] self.pre_pipeline(results) return self.pipeline(results) @classmethod def get_classes(cls, classes=None): 'Get class names of current dataset.\n\n Args:\n classes (Sequence[str] | str | None): If classes is None, use\n default CLASSES defined by builtin dataset. If classes is a\n string, take it as a file name. The file contains the name of\n classes where each line contains one class name. If classes is\n a tuple or list, override the CLASSES defined by the dataset.\n\n Returns:\n tuple[str] or list[str]: Names of categories of the dataset.\n ' if (classes is None): return cls.CLASSES if isinstance(classes, str): class_names = mmcv.list_from_file(classes) elif isinstance(classes, (tuple, list)): class_names = classes else: raise ValueError(f'Unsupported type {type(classes)} of classes.') return class_names def format_results(self, results, **kwargs): 'Place holder to format result to dataset specific output.' def evaluate(self, results, metric='mAP', logger=None, proposal_nums=(100, 300, 1000), iou_thr=0.5, scale_ranges=None): 'Evaluate the dataset.\n\n Args:\n results (list): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated.\n logger (logging.Logger | None | str): Logger used for printing\n related information during evaluation. Default: None.\n proposal_nums (Sequence[int]): Proposal number used for evaluating\n recalls, such as recall@100, recall@1000.\n Default: (100, 300, 1000).\n iou_thr (float | list[float]): IoU threshold. Default: 0.5.\n scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP.\n Default: None.\n ' if (not isinstance(metric, str)): assert (len(metric) == 1) metric = metric[0] allowed_metrics = ['mAP', 'recall'] if (metric not in allowed_metrics): raise KeyError(f'metric {metric} is not supported') annotations = [self.get_ann_info(i) for i in range(len(self))] eval_results = OrderedDict() iou_thrs = ([iou_thr] if isinstance(iou_thr, float) else iou_thr) if (metric == 'mAP'): assert isinstance(iou_thrs, list) mean_aps = [] for iou_thr in iou_thrs: print_log(f''' {('-' * 15)}iou_thr: {iou_thr}{('-' * 15)}''') (mean_ap, _) = eval_map(results, annotations, scale_ranges=scale_ranges, iou_thr=iou_thr, dataset=self.CLASSES, logger=logger) mean_aps.append(mean_ap) eval_results[f'AP{int((iou_thr * 100)):02d}'] = round(mean_ap, 3) eval_results['mAP'] = (sum(mean_aps) / len(mean_aps)) elif (metric == 'recall'): gt_bboxes = [ann['bboxes'] for ann in annotations] recalls = eval_recalls(gt_bboxes, results, proposal_nums, iou_thr, logger=logger) for (i, num) in enumerate(proposal_nums): for (j, iou) in enumerate(iou_thrs): eval_results[f'recall@{num}@{iou}'] = recalls[(i, j)] if (recalls.shape[1] > 1): ar = recalls.mean(axis=1) for (i, num) in enumerate(proposal_nums): eval_results[f'AR@{num}'] = ar[i] return eval_results def __repr__(self): 'Print the number of instance number.' dataset_type = ('Test' if self.test_mode else 'Train') result = f''' {self.__class__.__name__} {dataset_type} dataset with number of images {len(self)}, and instance counts: ''' if (self.CLASSES is None): result += 'Category names are not provided. \n' return result instance_count = np.zeros((len(self.CLASSES) + 1)).astype(int) for idx in range(len(self)): label = self.get_ann_info(idx)['labels'] (unique, counts) = np.unique(label, return_counts=True) if (len(unique) > 0): instance_count[unique] += counts else: instance_count[(- 1)] += 1 table_data = [(['category', 'count'] * 5)] row_data = [] for (cls, count) in enumerate(instance_count): if (cls < len(self.CLASSES)): row_data += [f'{cls} [{self.CLASSES[cls]}]', f'{count}'] else: row_data += ['-1 background', f'{count}'] if (len(row_data) == 10): table_data.append(row_data) row_data = [] if (len(row_data) >= 2): if (row_data[(- 1)] == '0'): row_data = row_data[:(- 2)] if (len(row_data) >= 2): table_data.append([]) table_data.append(row_data) table = AsciiTable(table_data) result += table.table return result
@DATASETS.register_module() class ConcatDataset(_ConcatDataset): 'A wrapper of concatenated dataset.\n\n Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but\n concat the group flag for image aspect ratio.\n\n Args:\n datasets (list[:obj:`Dataset`]): A list of datasets.\n separate_eval (bool): Whether to evaluate the results\n separately if it is used as validation dataset.\n Defaults to True.\n ' def __init__(self, datasets, separate_eval=True): super(ConcatDataset, self).__init__(datasets) self.CLASSES = datasets[0].CLASSES self.PALETTE = getattr(datasets[0], 'PALETTE', None) self.separate_eval = separate_eval if (not separate_eval): if any([isinstance(ds, CocoDataset) for ds in datasets]): raise NotImplementedError('Evaluating concatenated CocoDataset as a whole is not supported! Please set "separate_eval=True"') elif (len(set([type(ds) for ds in datasets])) != 1): raise NotImplementedError('All the datasets should have same types') if hasattr(datasets[0], 'flag'): flags = [] for i in range(0, len(datasets)): flags.append(datasets[i].flag) self.flag = np.concatenate(flags) def get_cat_ids(self, idx): 'Get category ids of concatenated dataset by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n list[int]: All categories in the image of specified index.\n ' if (idx < 0): if ((- idx) > len(self)): raise ValueError('absolute value of index should not exceed dataset length') idx = (len(self) + idx) dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) if (dataset_idx == 0): sample_idx = idx else: sample_idx = (idx - self.cumulative_sizes[(dataset_idx - 1)]) return self.datasets[dataset_idx].get_cat_ids(sample_idx) def get_ann_info(self, idx): 'Get annotation of concatenated dataset by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n ' if (idx < 0): if ((- idx) > len(self)): raise ValueError('absolute value of index should not exceed dataset length') idx = (len(self) + idx) dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) if (dataset_idx == 0): sample_idx = idx else: sample_idx = (idx - self.cumulative_sizes[(dataset_idx - 1)]) return self.datasets[dataset_idx].get_ann_info(sample_idx) def evaluate(self, results, logger=None, **kwargs): 'Evaluate the results.\n\n Args:\n results (list[list | tuple]): Testing results of the dataset.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n\n Returns:\n dict[str: float]: AP results of the total dataset or each separate\n dataset if `self.separate_eval=True`.\n ' assert (len(results) == self.cumulative_sizes[(- 1)]), f'Dataset and results have different sizes: {self.cumulative_sizes[(- 1)]} v.s. {len(results)}' for dataset in self.datasets: assert hasattr(dataset, 'evaluate'), f'{type(dataset)} does not implement evaluate function' if self.separate_eval: dataset_idx = (- 1) total_eval_results = dict() for (size, dataset) in zip(self.cumulative_sizes, self.datasets): start_idx = (0 if (dataset_idx == (- 1)) else self.cumulative_sizes[dataset_idx]) end_idx = self.cumulative_sizes[(dataset_idx + 1)] results_per_dataset = results[start_idx:end_idx] print_log(f''' Evaluateing {dataset.ann_file} with {len(results_per_dataset)} images now''', logger=logger) eval_results_per_dataset = dataset.evaluate(results_per_dataset, logger=logger, **kwargs) dataset_idx += 1 for (k, v) in eval_results_per_dataset.items(): total_eval_results.update({f'{dataset_idx}_{k}': v}) return total_eval_results elif any([isinstance(ds, CocoDataset) for ds in self.datasets]): raise NotImplementedError('Evaluating concatenated CocoDataset as a whole is not supported! Please set "separate_eval=True"') elif (len(set([type(ds) for ds in self.datasets])) != 1): raise NotImplementedError('All the datasets should have same types') else: original_data_infos = self.datasets[0].data_infos self.datasets[0].data_infos = sum([dataset.data_infos for dataset in self.datasets], []) eval_results = self.datasets[0].evaluate(results, logger=logger, **kwargs) self.datasets[0].data_infos = original_data_infos return eval_results
@DATASETS.register_module() class RepeatDataset(): 'A wrapper of repeated dataset.\n\n The length of repeated dataset will be `times` larger than the original\n dataset. This is useful when the data loading time is long but the dataset\n is small. Using RepeatDataset can reduce the data loading time between\n epochs.\n\n Args:\n dataset (:obj:`Dataset`): The dataset to be repeated.\n times (int): Repeat times.\n ' def __init__(self, dataset, times): self.dataset = dataset self.times = times self.CLASSES = dataset.CLASSES self.PALETTE = getattr(dataset, 'PALETTE', None) if hasattr(self.dataset, 'flag'): self.flag = np.tile(self.dataset.flag, times) self._ori_len = len(self.dataset) def __getitem__(self, idx): return self.dataset[(idx % self._ori_len)] def get_cat_ids(self, idx): 'Get category ids of repeat dataset by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n list[int]: All categories in the image of specified index.\n ' return self.dataset.get_cat_ids((idx % self._ori_len)) def get_ann_info(self, idx): 'Get annotation of repeat dataset by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n ' return self.dataset.get_ann_info((idx % self._ori_len)) def __len__(self): 'Length after repetition.' return (self.times * self._ori_len)
@DATASETS.register_module() class ClassBalancedDataset(): 'A wrapper of repeated dataset with repeat factor.\n\n Suitable for training on class imbalanced datasets like LVIS. Following\n the sampling strategy in the `paper <https://arxiv.org/abs/1908.03195>`_,\n in each epoch, an image may appear multiple times based on its\n "repeat factor".\n The repeat factor for an image is a function of the frequency the rarest\n category labeled in that image. The "frequency of category c" in [0, 1]\n is defined by the fraction of images in the training set (without repeats)\n in which category c appears.\n The dataset needs to instantiate :func:`self.get_cat_ids` to support\n ClassBalancedDataset.\n\n The repeat factor is computed as followed.\n\n 1. For each category c, compute the fraction # of images\n that contain it: :math:`f(c)`\n 2. For each category c, compute the category-level repeat factor:\n :math:`r(c) = max(1, sqrt(t/f(c)))`\n 3. For each image I, compute the image-level repeat factor:\n :math:`r(I) = max_{c in I} r(c)`\n\n Args:\n dataset (:obj:`CustomDataset`): The dataset to be repeated.\n oversample_thr (float): frequency threshold below which data is\n repeated. For categories with ``f_c >= oversample_thr``, there is\n no oversampling. For categories with ``f_c < oversample_thr``, the\n degree of oversampling following the square-root inverse frequency\n heuristic above.\n filter_empty_gt (bool, optional): If set true, images without bounding\n boxes will not be oversampled. Otherwise, they will be categorized\n as the pure background class and involved into the oversampling.\n Default: True.\n ' def __init__(self, dataset, oversample_thr, filter_empty_gt=True): self.dataset = dataset self.oversample_thr = oversample_thr self.filter_empty_gt = filter_empty_gt self.CLASSES = dataset.CLASSES self.PALETTE = getattr(dataset, 'PALETTE', None) repeat_factors = self._get_repeat_factors(dataset, oversample_thr) repeat_indices = [] for (dataset_idx, repeat_factor) in enumerate(repeat_factors): repeat_indices.extend(([dataset_idx] * math.ceil(repeat_factor))) self.repeat_indices = repeat_indices flags = [] if hasattr(self.dataset, 'flag'): for (flag, repeat_factor) in zip(self.dataset.flag, repeat_factors): flags.extend(([flag] * int(math.ceil(repeat_factor)))) assert (len(flags) == len(repeat_indices)) self.flag = np.asarray(flags, dtype=np.uint8) def _get_repeat_factors(self, dataset, repeat_thr): 'Get repeat factor for each images in the dataset.\n\n Args:\n dataset (:obj:`CustomDataset`): The dataset\n repeat_thr (float): The threshold of frequency. If an image\n contains the categories whose frequency below the threshold,\n it would be repeated.\n\n Returns:\n list[float]: The repeat factors for each images in the dataset.\n ' category_freq = defaultdict(int) num_images = len(dataset) for idx in range(num_images): cat_ids = set(self.dataset.get_cat_ids(idx)) if ((len(cat_ids) == 0) and (not self.filter_empty_gt)): cat_ids = set([len(self.CLASSES)]) for cat_id in cat_ids: category_freq[cat_id] += 1 for (k, v) in category_freq.items(): category_freq[k] = (v / num_images) category_repeat = {cat_id: max(1.0, math.sqrt((repeat_thr / cat_freq))) for (cat_id, cat_freq) in category_freq.items()} repeat_factors = [] for idx in range(num_images): cat_ids = set(self.dataset.get_cat_ids(idx)) if ((len(cat_ids) == 0) and (not self.filter_empty_gt)): cat_ids = set([len(self.CLASSES)]) repeat_factor = 1 if (len(cat_ids) > 0): repeat_factor = max({category_repeat[cat_id] for cat_id in cat_ids}) repeat_factors.append(repeat_factor) return repeat_factors def __getitem__(self, idx): ori_index = self.repeat_indices[idx] return self.dataset[ori_index] def get_ann_info(self, idx): 'Get annotation of dataset by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n ' ori_index = self.repeat_indices[idx] return self.dataset.get_ann_info(ori_index) def __len__(self): 'Length after repetition.' return len(self.repeat_indices)
@DATASETS.register_module() class MultiImageMixDataset(): 'A wrapper of multiple images mixed dataset.\n\n Suitable for training on multiple images mixed data augmentation like\n mosaic and mixup. For the augmentation pipeline of mixed image data,\n the `get_indexes` method needs to be provided to obtain the image\n indexes, and you can set `skip_flags` to change the pipeline running\n process. At the same time, we provide the `dynamic_scale` parameter\n to dynamically change the output image size.\n\n Args:\n dataset (:obj:`CustomDataset`): The dataset to be mixed.\n pipeline (Sequence[dict]): Sequence of transform object or\n config dict to be composed.\n dynamic_scale (tuple[int], optional): The image scale can be changed\n dynamically. Default to None. It is deprecated.\n skip_type_keys (list[str], optional): Sequence of type string to\n be skip pipeline. Default to None.\n ' def __init__(self, dataset, pipeline, dynamic_scale=None, skip_type_keys=None): if (dynamic_scale is not None): raise RuntimeError('dynamic_scale is deprecated. Please use Resize pipeline to achieve similar functions') assert isinstance(pipeline, collections.abc.Sequence) if (skip_type_keys is not None): assert all([isinstance(skip_type_key, str) for skip_type_key in skip_type_keys]) self._skip_type_keys = skip_type_keys self.pipeline = [] self.pipeline_types = [] for transform in pipeline: if isinstance(transform, dict): self.pipeline_types.append(transform['type']) transform = build_from_cfg(transform, PIPELINES) self.pipeline.append(transform) else: raise TypeError('pipeline must be a dict') self.dataset = dataset self.CLASSES = dataset.CLASSES self.PALETTE = getattr(dataset, 'PALETTE', None) if hasattr(self.dataset, 'flag'): self.flag = dataset.flag self.num_samples = len(dataset) def __len__(self): return self.num_samples def __getitem__(self, idx): results = copy.deepcopy(self.dataset[idx]) for (transform, transform_type) in zip(self.pipeline, self.pipeline_types): if ((self._skip_type_keys is not None) and (transform_type in self._skip_type_keys)): continue if hasattr(transform, 'get_indexes'): indexes = transform.get_indexes(self.dataset) if (not isinstance(indexes, collections.abc.Sequence)): indexes = [indexes] mix_results = [copy.deepcopy(self.dataset[index]) for index in indexes] results['mix_results'] = mix_results results = transform(results) if ('mix_results' in results): results.pop('mix_results') return results def update_skip_type_keys(self, skip_type_keys): 'Update skip_type_keys. It is called by an external hook.\n\n Args:\n skip_type_keys (list[str], optional): Sequence of type\n string to be skip pipeline.\n ' assert all([isinstance(skip_type_key, str) for skip_type_key in skip_type_keys]) self._skip_type_keys = skip_type_keys
@DATASETS.register_module() class DeepFashionDataset(CocoDataset): CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag', 'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair', 'skin', 'face') PALETTE = [(0, 192, 64), (0, 64, 96), (128, 192, 192), (0, 64, 64), (0, 192, 224), (0, 192, 192), (128, 192, 64), (0, 192, 96), (128, 32, 192), (0, 0, 224), (0, 0, 64), (0, 160, 192), (128, 0, 96), (128, 0, 192), (0, 32, 192)]
@PIPELINES.register_module() class Compose(): 'Compose multiple transforms sequentially.\n\n Args:\n transforms (Sequence[dict | callable]): Sequence of transform object or\n config dict to be composed.\n ' def __init__(self, transforms): assert isinstance(transforms, collections.abc.Sequence) self.transforms = [] for transform in transforms: if isinstance(transform, dict): transform = build_from_cfg(transform, PIPELINES) self.transforms.append(transform) elif callable(transform): self.transforms.append(transform) else: raise TypeError('transform must be callable or a dict') def __call__(self, data): 'Call function to apply transforms sequentially.\n\n Args:\n data (dict): A result dict contains the data to transform.\n\n Returns:\n dict: Transformed data.\n ' for t in self.transforms: data = t(data) if (data is None): return None return data def __repr__(self): format_string = (self.__class__.__name__ + '(') for t in self.transforms: str_ = t.__repr__() if ('Compose(' in str_): str_ = str_.replace('\n', '\n ') format_string += '\n' format_string += f' {str_}' format_string += '\n)' return format_string
def to_tensor(data): 'Convert objects of various python types to :obj:`torch.Tensor`.\n\n Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,\n :class:`Sequence`, :class:`int` and :class:`float`.\n\n Args:\n data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to\n be converted.\n ' if isinstance(data, torch.Tensor): return data elif isinstance(data, np.ndarray): return torch.from_numpy(data) elif (isinstance(data, Sequence) and (not mmcv.is_str(data))): return torch.tensor(data) elif isinstance(data, int): return torch.LongTensor([data]) elif isinstance(data, float): return torch.FloatTensor([data]) else: raise TypeError(f'type {type(data)} cannot be converted to tensor.')
@PIPELINES.register_module() class ToTensor(): 'Convert some results to :obj:`torch.Tensor` by given keys.\n\n Args:\n keys (Sequence[str]): Keys that need to be converted to Tensor.\n ' def __init__(self, keys): self.keys = keys def __call__(self, results): 'Call function to convert data in results to :obj:`torch.Tensor`.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data converted\n to :obj:`torch.Tensor`.\n ' for key in self.keys: results[key] = to_tensor(results[key]) return results def __repr__(self): return (self.__class__.__name__ + f'(keys={self.keys})')
@PIPELINES.register_module() class ImageToTensor(): 'Convert image to :obj:`torch.Tensor` by given keys.\n\n The dimension order of input image is (H, W, C). The pipeline will convert\n it to (C, H, W). If only 2 dimension (H, W) is given, the output would be\n (1, H, W).\n\n Args:\n keys (Sequence[str]): Key of images to be converted to Tensor.\n ' def __init__(self, keys): self.keys = keys def __call__(self, results): 'Call function to convert image in results to :obj:`torch.Tensor` and\n transpose the channel order.\n\n Args:\n results (dict): Result dict contains the image data to convert.\n\n Returns:\n dict: The result dict contains the image converted\n to :obj:`torch.Tensor` and transposed to (C, H, W) order.\n ' for key in self.keys: img = results[key] if (len(img.shape) < 3): img = np.expand_dims(img, (- 1)) results[key] = to_tensor(img.transpose(2, 0, 1)).contiguous() return results def __repr__(self): return (self.__class__.__name__ + f'(keys={self.keys})')
@PIPELINES.register_module() class Transpose(): 'Transpose some results by given keys.\n\n Args:\n keys (Sequence[str]): Keys of results to be transposed.\n order (Sequence[int]): Order of transpose.\n ' def __init__(self, keys, order): self.keys = keys self.order = order def __call__(self, results): 'Call function to transpose the channel order of data in results.\n\n Args:\n results (dict): Result dict contains the data to transpose.\n\n Returns:\n dict: The result dict contains the data transposed to ``self.order``.\n ' for key in self.keys: results[key] = results[key].transpose(self.order) return results def __repr__(self): return (self.__class__.__name__ + f'(keys={self.keys}, order={self.order})')
@PIPELINES.register_module() class ToDataContainer(): "Convert results to :obj:`mmcv.DataContainer` by given fields.\n\n Args:\n fields (Sequence[dict]): Each field is a dict like\n ``dict(key='xxx', **kwargs)``. The ``key`` in result will\n be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.\n Default: ``(dict(key='img', stack=True), dict(key='gt_bboxes'),\n dict(key='gt_labels'))``.\n " def __init__(self, fields=(dict(key='img', stack=True), dict(key='gt_bboxes'), dict(key='gt_labels'))): self.fields = fields def __call__(self, results): 'Call function to convert data in results to\n :obj:`mmcv.DataContainer`.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data converted to :obj:`mmcv.DataContainer`.\n ' for field in self.fields: field = field.copy() key = field.pop('key') results[key] = DC(results[key], **field) return results def __repr__(self): return (self.__class__.__name__ + f'(fields={self.fields})')
@PIPELINES.register_module() class DefaultFormatBundle(): 'Default formatting bundle.\n\n It simplifies the pipeline of formatting common fields, including "img",\n "proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg".\n These fields are formatted as follows.\n\n - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)\n - proposals: (1)to tensor, (2)to DataContainer\n - gt_bboxes: (1)to tensor, (2)to DataContainer\n - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer\n - gt_labels: (1)to tensor, (2)to DataContainer\n - gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True)\n - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, (3)to DataContainer (stack=True)\n\n Args:\n img_to_float (bool): Whether to force the image to be converted to\n float type. Default: True.\n pad_val (dict): A dict for padding value in batch collating,\n the default value is `dict(img=0, masks=0, seg=255)`.\n Without this argument, the padding value of "gt_semantic_seg"\n will be set to 0 by default, which should be 255.\n ' def __init__(self, img_to_float=True, pad_val=dict(img=0, masks=0, seg=255)): self.img_to_float = img_to_float self.pad_val = pad_val def __call__(self, results): 'Call function to transform and format common fields in results.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data that is formatted with default bundle.\n ' if ('img' in results): img = results['img'] if ((self.img_to_float is True) and (img.dtype == np.uint8)): img = img.astype(np.float32) results = self._add_default_meta_keys(results) if (len(img.shape) < 3): img = np.expand_dims(img, (- 1)) img = np.ascontiguousarray(img.transpose(2, 0, 1)) results['img'] = DC(to_tensor(img), padding_value=self.pad_val['img'], stack=True) for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']: if (key not in results): continue results[key] = DC(to_tensor(results[key])) if ('gt_masks' in results): results['gt_masks'] = DC(results['gt_masks'], padding_value=self.pad_val['masks'], cpu_only=True) if ('gt_semantic_seg' in results): results['gt_semantic_seg'] = DC(to_tensor(results['gt_semantic_seg'][(None, ...)]), padding_value=self.pad_val['seg'], stack=True) return results def _add_default_meta_keys(self, results): 'Add default meta keys.\n\n We set default meta keys including `pad_shape`, `scale_factor` and\n `img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and\n `Pad` are implemented during the whole pipeline.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n results (dict): Updated result dict contains the data to convert.\n ' img = results['img'] results.setdefault('pad_shape', img.shape) results.setdefault('scale_factor', 1.0) num_channels = (1 if (len(img.shape) < 3) else img.shape[2]) results.setdefault('img_norm_cfg', dict(mean=np.zeros(num_channels, dtype=np.float32), std=np.ones(num_channels, dtype=np.float32), to_rgb=False)) return results def __repr__(self): return (self.__class__.__name__ + f'(img_to_float={self.img_to_float})')
@PIPELINES.register_module() class DefaultFormatBundleFlickr(DefaultFormatBundle): def __call__(self, results): 'Call function to transform and format common fields in results.\n Args:\n results (dict): Result dict contains the data to convert.\n Returns:\n dict: The result dict contains the data that is formatted with default bundle.\n ' if ('img' in results): img = results['img'] if ((self.img_to_float is True) and (img.dtype == np.uint8)): img = img.astype(np.float32) results = self._add_default_meta_keys(results) if (len(img.shape) < 3): img = np.expand_dims(img, (- 1)) img = np.ascontiguousarray(img.transpose(2, 0, 1)) results['img'] = DC(to_tensor(img), padding_value=self.pad_val['img'], stack=True) for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']: if (key not in results): continue if (key != 'gt_labels'): results[key] = DC(to_tensor(results[key])) if ('gt_masks' in results): results['gt_masks'] = DC(results['gt_masks'], padding_value=self.pad_val['masks'], cpu_only=True) if ('gt_semantic_seg' in results): results['gt_semantic_seg'] = DC(to_tensor(results['gt_semantic_seg'][(None, ...)]), padding_value=self.pad_val['seg'], stack=True) return results
@PIPELINES.register_module() class Collect(): 'Collect data from the loader relevant to the specific task.\n\n This is usually the last stage of the data loader pipeline. Typically keys\n is set to some subset of "img", "proposals", "gt_bboxes",\n "gt_bboxes_ignore", "gt_labels", and/or "gt_masks".\n\n The "img_meta" item is always populated. The contents of the "img_meta"\n dictionary depends on "meta_keys". By default this includes:\n\n - "img_shape": shape of the image input to the network as a tuple (h, w, c). Note that images may be zero padded on the bottom/right if the batch tensor is larger than this shape.\n\n - "scale_factor": a float indicating the preprocessing scale\n\n - "flip": a boolean indicating if image flip transform was used\n\n - "filename": path to the image file\n\n - "ori_shape": original shape of the image as a tuple (h, w, c)\n\n - "pad_shape": image shape after padding\n\n - "img_norm_cfg": a dict of normalization information:\n\n - mean - per channel mean subtraction\n - std - per channel std divisor\n - to_rgb - bool indicating if bgr was converted to rgb\n\n Args:\n keys (Sequence[str]): Keys of results to be collected in ``data``.\n meta_keys (Sequence[str], optional): Meta keys to be converted to\n ``mmcv.DataContainer`` and collected in ``data[img_metas]``.\n Default: ``(\'filename\', \'ori_filename\', \'ori_shape\', \'img_shape\',\n \'pad_shape\', \'scale_factor\', \'flip\', \'flip_direction\',\n \'img_norm_cfg\')``\n ' def __init__(self, keys, meta_keys=('filename', 'ori_filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'flip', 'flip_direction', 'img_norm_cfg')): self.keys = keys self.meta_keys = meta_keys def __call__(self, results): 'Call function to collect keys in results. The keys in ``meta_keys``\n will be converted to :obj:mmcv.DataContainer.\n\n Args:\n results (dict): Result dict contains the data to collect.\n\n Returns:\n dict: The result dict contains the following keys\n\n - keys in``self.keys``\n - ``img_metas``\n ' data = {} img_meta = {} for key in self.meta_keys: img_meta[key] = results[key] data['img_metas'] = DC(img_meta, cpu_only=True) for key in self.keys: data[key] = results[key] return data def __repr__(self): return (self.__class__.__name__ + f'(keys={self.keys}, meta_keys={self.meta_keys})')
@PIPELINES.register_module() class WrapFieldsToLists(): "Wrap fields of the data dictionary into lists for evaluation.\n\n This class can be used as a last step of a test or validation\n pipeline for single image evaluation or inference.\n\n Example:\n >>> test_pipeline = [\n >>> dict(type='LoadImageFromFile'),\n >>> dict(type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n >>> dict(type='Pad', size_divisor=32),\n >>> dict(type='ImageToTensor', keys=['img']),\n >>> dict(type='Collect', keys=['img']),\n >>> dict(type='WrapFieldsToLists')\n >>> ]\n " def __call__(self, results): 'Call function to wrap fields into lists.\n\n Args:\n results (dict): Result dict contains the data to wrap.\n\n Returns:\n dict: The result dict where value of ``self.keys`` are wrapped into list.\n ' for (key, val) in results.items(): results[key] = [val] return results def __repr__(self): return f'{self.__class__.__name__}()'
@PIPELINES.register_module() class InstaBoost(): 'Data augmentation method in `InstaBoost: Boosting Instance\n Segmentation Via Probability Map Guided Copy-Pasting\n <https://arxiv.org/abs/1908.07801>`_.\n\n Refer to https://github.com/GothicAi/Instaboost for implementation details.\n\n Args:\n action_candidate (tuple): Action candidates. "normal", "horizontal", \\\n "vertical", "skip" are supported. Default: (\'normal\', \\\n \'horizontal\', \'skip\').\n action_prob (tuple): Corresponding action probabilities. Should be \\\n the same length as action_candidate. Default: (1, 0, 0).\n scale (tuple): (min scale, max scale). Default: (0.8, 1.2).\n dx (int): The maximum x-axis shift will be (instance width) / dx.\n Default 15.\n dy (int): The maximum y-axis shift will be (instance height) / dy.\n Default 15.\n theta (tuple): (min rotation degree, max rotation degree). \\\n Default: (-1, 1).\n color_prob (float): Probability of images for color augmentation.\n Default 0.5.\n heatmap_flag (bool): Whether to use heatmap guided. Default False.\n aug_ratio (float): Probability of applying this transformation. \\\n Default 0.5.\n ' def __init__(self, action_candidate=('normal', 'horizontal', 'skip'), action_prob=(1, 0, 0), scale=(0.8, 1.2), dx=15, dy=15, theta=((- 1), 1), color_prob=0.5, hflag=False, aug_ratio=0.5): try: import instaboostfast as instaboost except ImportError: raise ImportError('Please run "pip install instaboostfast" to install instaboostfast first for instaboost augmentation.') self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob, scale, dx, dy, theta, color_prob, hflag) self.aug_ratio = aug_ratio def _load_anns(self, results): labels = results['ann_info']['labels'] masks = results['ann_info']['masks'] bboxes = results['ann_info']['bboxes'] n = len(labels) anns = [] for i in range(n): label = labels[i] bbox = bboxes[i] mask = masks[i] (x1, y1, x2, y2) = bbox bbox = [x1, y1, (x2 - x1), (y2 - y1)] anns.append({'category_id': label, 'segmentation': mask, 'bbox': bbox}) return anns def _parse_anns(self, results, anns, img): gt_bboxes = [] gt_labels = [] gt_masks_ann = [] for ann in anns: (x1, y1, w, h) = ann['bbox'] if ((w <= 0) or (h <= 0)): continue bbox = [x1, y1, (x1 + w), (y1 + h)] gt_bboxes.append(bbox) gt_labels.append(ann['category_id']) gt_masks_ann.append(ann['segmentation']) gt_bboxes = np.array(gt_bboxes, dtype=np.float32) gt_labels = np.array(gt_labels, dtype=np.int64) results['ann_info']['labels'] = gt_labels results['ann_info']['bboxes'] = gt_bboxes results['ann_info']['masks'] = gt_masks_ann results['img'] = img return results def __call__(self, results): img = results['img'] orig_type = img.dtype anns = self._load_anns(results) if np.random.choice([0, 1], p=[(1 - self.aug_ratio), self.aug_ratio]): try: import instaboostfast as instaboost except ImportError: raise ImportError('Please run "pip install instaboostfast" to install instaboostfast first.') (anns, img) = instaboost.get_new_data(anns, img.astype(np.uint8), self.cfg, background=None) results = self._parse_anns(results, anns, img.astype(orig_type)) return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(cfg={self.cfg}, aug_ratio={self.aug_ratio})' return repr_str
@PIPELINES.register_module() class MultiScaleFlipAug(): 'Test-time augmentation with multiple scales and flipping.\n\n An example configuration is as followed:\n\n .. code-block::\n\n img_scale=[(1333, 400), (1333, 800)],\n flip=True,\n transforms=[\n dict(type=\'Resize\', keep_ratio=True),\n dict(type=\'RandomFlip\'),\n dict(type=\'Normalize\', **img_norm_cfg),\n dict(type=\'Pad\', size_divisor=32),\n dict(type=\'ImageToTensor\', keys=[\'img\']),\n dict(type=\'Collect\', keys=[\'img\']),\n ]\n\n After MultiScaleFLipAug with above configuration, the results are wrapped\n into lists of the same length as followed:\n\n .. code-block::\n\n dict(\n img=[...],\n img_shape=[...],\n scale=[(1333, 400), (1333, 400), (1333, 800), (1333, 800)]\n flip=[False, True, False, True]\n ...\n )\n\n Args:\n transforms (list[dict]): Transforms to apply in each augmentation.\n img_scale (tuple | list[tuple] | None): Images scales for resizing.\n scale_factor (float | list[float] | None): Scale factors for resizing.\n flip (bool): Whether apply flip augmentation. Default: False.\n flip_direction (str | list[str]): Flip augmentation directions,\n options are "horizontal", "vertical" and "diagonal". If\n flip_direction is a list, multiple flip augmentations will be\n applied. It has no effect when flip == False. Default:\n "horizontal".\n ' def __init__(self, transforms, img_scale=None, scale_factor=None, flip=False, flip_direction='horizontal'): self.transforms = Compose(transforms) assert ((img_scale is None) ^ (scale_factor is None)), 'Must have but only one variable can be set' if (img_scale is not None): self.img_scale = (img_scale if isinstance(img_scale, list) else [img_scale]) self.scale_key = 'scale' assert mmcv.is_list_of(self.img_scale, tuple) else: self.img_scale = (scale_factor if isinstance(scale_factor, list) else [scale_factor]) self.scale_key = 'scale_factor' self.flip = flip self.flip_direction = (flip_direction if isinstance(flip_direction, list) else [flip_direction]) assert mmcv.is_list_of(self.flip_direction, str) if ((not self.flip) and (self.flip_direction != ['horizontal'])): warnings.warn('flip_direction has no effect when flip is set to False') if (self.flip and (not any([(t['type'] == 'RandomFlip') for t in transforms]))): warnings.warn('flip has no effect when RandomFlip is not in transforms') def __call__(self, results): 'Call function to apply test time augment transforms on results.\n\n Args:\n results (dict): Result dict contains the data to transform.\n\n Returns:\n dict[str: list]: The augmented data, where each value is wrapped\n into a list.\n ' aug_data = [] flip_args = [(False, None)] if self.flip: flip_args += [(True, direction) for direction in self.flip_direction] for scale in self.img_scale: for (flip, direction) in flip_args: _results = results.copy() _results[self.scale_key] = scale _results['flip'] = flip _results['flip_direction'] = direction data = self.transforms(_results) aug_data.append(data) aug_data_dict = {key: [] for key in aug_data[0]} for data in aug_data: for (key, val) in data.items(): aug_data_dict[key].append(val) return aug_data_dict def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(transforms={self.transforms}, ' repr_str += f'img_scale={self.img_scale}, flip={self.flip}, ' repr_str += f'flip_direction={self.flip_direction})' return repr_str
class DistributedSampler(_DistributedSampler): def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True, seed=0): super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) self.seed = (seed if (seed is not None) else 0) def __iter__(self): if self.shuffle: g = torch.Generator() g.manual_seed((self.epoch + self.seed)) indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = torch.arange(len(self.dataset)).tolist() indices = (indices * math.ceil((self.total_size / len(indices))))[:self.total_size] assert (len(indices) == self.total_size) indices = indices[self.rank:self.total_size:self.num_replicas] assert (len(indices) == self.num_samples) return iter(indices)
class GroupSampler(Sampler): def __init__(self, dataset, samples_per_gpu=1): assert hasattr(dataset, 'flag') self.dataset = dataset self.samples_per_gpu = samples_per_gpu self.flag = dataset.flag.astype(np.int64) self.group_sizes = np.bincount(self.flag) self.num_samples = 0 for (i, size) in enumerate(self.group_sizes): self.num_samples += (int(np.ceil((size / self.samples_per_gpu))) * self.samples_per_gpu) def __iter__(self): indices = [] for (i, size) in enumerate(self.group_sizes): if (size == 0): continue indice = np.where((self.flag == i))[0] assert (len(indice) == size) np.random.shuffle(indice) num_extra = ((int(np.ceil((size / self.samples_per_gpu))) * self.samples_per_gpu) - len(indice)) indice = np.concatenate([indice, np.random.choice(indice, num_extra)]) indices.append(indice) indices = np.concatenate(indices) indices = [indices[(i * self.samples_per_gpu):((i + 1) * self.samples_per_gpu)] for i in np.random.permutation(range((len(indices) // self.samples_per_gpu)))] indices = np.concatenate(indices) indices = indices.astype(np.int64).tolist() assert (len(indices) == self.num_samples) return iter(indices) def __len__(self): return self.num_samples
class DistributedGroupSampler(Sampler): 'Sampler that restricts data loading to a subset of the dataset.\n\n It is especially useful in conjunction with\n :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\n process can pass a DistributedSampler instance as a DataLoader sampler,\n and load a subset of the original dataset that is exclusive to it.\n\n .. note::\n Dataset is assumed to be of constant size.\n\n Arguments:\n dataset: Dataset used for sampling.\n num_replicas (optional): Number of processes participating in\n distributed training.\n rank (optional): Rank of the current process within num_replicas.\n seed (int, optional): random seed used to shuffle the sampler if\n ``shuffle=True``. This number should be identical across all\n processes in the distributed group. Default: 0.\n ' def __init__(self, dataset, samples_per_gpu=1, num_replicas=None, rank=None, seed=0): (_rank, _num_replicas) = get_dist_info() if (num_replicas is None): num_replicas = _num_replicas if (rank is None): rank = _rank self.dataset = dataset self.samples_per_gpu = samples_per_gpu self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.seed = (seed if (seed is not None) else 0) assert hasattr(self.dataset, 'flag') self.flag = self.dataset.flag self.group_sizes = np.bincount(self.flag) self.num_samples = 0 for (i, j) in enumerate(self.group_sizes): self.num_samples += (int(math.ceil((((self.group_sizes[i] * 1.0) / self.samples_per_gpu) / self.num_replicas))) * self.samples_per_gpu) self.total_size = (self.num_samples * self.num_replicas) def __iter__(self): g = torch.Generator() g.manual_seed((self.epoch + self.seed)) indices = [] for (i, size) in enumerate(self.group_sizes): if (size > 0): indice = np.where((self.flag == i))[0] assert (len(indice) == size) indice = indice[list(torch.randperm(int(size), generator=g).numpy())].tolist() extra = (((int(math.ceil((((size * 1.0) / self.samples_per_gpu) / self.num_replicas))) * self.samples_per_gpu) * self.num_replicas) - len(indice)) tmp = indice.copy() for _ in range((extra // size)): indice.extend(tmp) indice.extend(tmp[:(extra % size)]) indices.extend(indice) assert (len(indices) == self.total_size) indices = [indices[j] for i in list(torch.randperm((len(indices) // self.samples_per_gpu), generator=g)) for j in range((i * self.samples_per_gpu), ((i + 1) * self.samples_per_gpu))] offset = (self.num_samples * self.rank) indices = indices[offset:(offset + self.num_samples)] assert (len(indices) == self.num_samples) return iter(indices) def __len__(self): return self.num_samples def set_epoch(self, epoch): self.epoch = epoch
class InfiniteGroupBatchSampler(Sampler): 'Similar to `BatchSampler` warping a `GroupSampler. It is designed for\n iteration-based runners like `IterBasedRunner` and yields a mini-batch\n indices each time, all indices in a batch should be in the same group.\n\n The implementation logic is referred to\n https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/samplers/grouped_batch_sampler.py\n\n Args:\n dataset (object): The dataset.\n batch_size (int): When model is :obj:`DistributedDataParallel`,\n it is the number of training samples on each GPU.\n When model is :obj:`DataParallel`, it is\n `num_gpus * samples_per_gpu`.\n Default : 1.\n world_size (int, optional): Number of processes participating in\n distributed training. Default: None.\n rank (int, optional): Rank of current process. Default: None.\n seed (int): Random seed. Default: 0.\n shuffle (bool): Whether shuffle the indices of a dummy `epoch`, it\n should be noted that `shuffle` can not guarantee that you can\n generate sequential indices because it need to ensure\n that all indices in a batch is in a group. Default: True.\n ' def __init__(self, dataset, batch_size=1, world_size=None, rank=None, seed=0, shuffle=True): (_rank, _world_size) = get_dist_info() if (world_size is None): world_size = _world_size if (rank is None): rank = _rank self.rank = rank self.world_size = world_size self.dataset = dataset self.batch_size = batch_size self.seed = (seed if (seed is not None) else 0) self.shuffle = shuffle assert hasattr(self.dataset, 'flag') self.flag = self.dataset.flag self.group_sizes = np.bincount(self.flag) self.buffer_per_group = {k: [] for k in range(len(self.group_sizes))} self.size = len(dataset) self.indices = self._indices_of_rank() def _infinite_indices(self): 'Infinitely yield a sequence of indices.' g = torch.Generator() g.manual_seed(self.seed) while True: if self.shuffle: (yield from torch.randperm(self.size, generator=g).tolist()) else: (yield from torch.arange(self.size).tolist()) def _indices_of_rank(self): 'Slice the infinite indices by rank.' (yield from itertools.islice(self._infinite_indices(), self.rank, None, self.world_size)) def __iter__(self): for idx in self.indices: flag = self.flag[idx] group_buffer = self.buffer_per_group[flag] group_buffer.append(idx) if (len(group_buffer) == self.batch_size): (yield group_buffer[:]) del group_buffer[:] def __len__(self): 'Length of base dataset.' return self.size def set_epoch(self, epoch): 'Not supported in `IterationBased` runner.' raise NotImplementedError
class InfiniteBatchSampler(Sampler): 'Similar to `BatchSampler` warping a `DistributedSampler. It is designed\n iteration-based runners like `IterBasedRunner` and yields a mini-batch\n indices each time.\n\n The implementation logic is referred to\n https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/samplers/grouped_batch_sampler.py\n\n Args:\n dataset (object): The dataset.\n batch_size (int): When model is :obj:`DistributedDataParallel`,\n it is the number of training samples on each GPU,\n When model is :obj:`DataParallel`, it is\n `num_gpus * samples_per_gpu`.\n Default : 1.\n world_size (int, optional): Number of processes participating in\n distributed training. Default: None.\n rank (int, optional): Rank of current process. Default: None.\n seed (int): Random seed. Default: 0.\n shuffle (bool): Whether shuffle the dataset or not. Default: True.\n ' def __init__(self, dataset, batch_size=1, world_size=None, rank=None, seed=0, shuffle=True): (_rank, _world_size) = get_dist_info() if (world_size is None): world_size = _world_size if (rank is None): rank = _rank self.rank = rank self.world_size = world_size self.dataset = dataset self.batch_size = batch_size self.seed = (seed if (seed is not None) else 0) self.shuffle = shuffle self.size = len(dataset) self.indices = self._indices_of_rank() def _infinite_indices(self): 'Infinitely yield a sequence of indices.' g = torch.Generator() g.manual_seed(self.seed) while True: if self.shuffle: (yield from torch.randperm(self.size, generator=g).tolist()) else: (yield from torch.arange(self.size).tolist()) def _indices_of_rank(self): 'Slice the infinite indices by rank.' (yield from itertools.islice(self._infinite_indices(), self.rank, None, self.world_size)) def __iter__(self): batch_buffer = [] for idx in self.indices: batch_buffer.append(idx) if (len(batch_buffer) == self.batch_size): (yield batch_buffer) batch_buffer = [] def __len__(self): 'Length of base dataset.' return self.size def set_epoch(self, epoch): 'Not supported in `IterationBased` runner.' raise NotImplementedError
def replace_ImageToTensor(pipelines): "Replace the ImageToTensor transform in a data pipeline to\n DefaultFormatBundle, which is normally useful in batch inference.\n\n Args:\n pipelines (list[dict]): Data pipeline configs.\n\n Returns:\n list: The new pipeline list with all ImageToTensor replaced by\n DefaultFormatBundle.\n\n Examples:\n >>> pipelines = [\n ... dict(type='LoadImageFromFile'),\n ... dict(\n ... type='MultiScaleFlipAug',\n ... img_scale=(1333, 800),\n ... flip=False,\n ... transforms=[\n ... dict(type='Resize', keep_ratio=True),\n ... dict(type='RandomFlip'),\n ... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]),\n ... dict(type='Pad', size_divisor=32),\n ... dict(type='ImageToTensor', keys=['img']),\n ... dict(type='Collect', keys=['img']),\n ... ])\n ... ]\n >>> expected_pipelines = [\n ... dict(type='LoadImageFromFile'),\n ... dict(\n ... type='MultiScaleFlipAug',\n ... img_scale=(1333, 800),\n ... flip=False,\n ... transforms=[\n ... dict(type='Resize', keep_ratio=True),\n ... dict(type='RandomFlip'),\n ... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]),\n ... dict(type='Pad', size_divisor=32),\n ... dict(type='DefaultFormatBundle'),\n ... dict(type='Collect', keys=['img']),\n ... ])\n ... ]\n >>> assert expected_pipelines == replace_ImageToTensor(pipelines)\n " pipelines = copy.deepcopy(pipelines) for (i, pipeline) in enumerate(pipelines): if (pipeline['type'] == 'MultiScaleFlipAug'): assert ('transforms' in pipeline) pipeline['transforms'] = replace_ImageToTensor(pipeline['transforms']) elif (pipeline['type'] == 'ImageToTensor'): warnings.warn('"ImageToTensor" pipeline is replaced by "DefaultFormatBundle" for batch inference. It is recommended to manually replace it in the test data pipeline in your config file.', UserWarning) pipelines[i] = {'type': 'DefaultFormatBundle'} return pipelines
def get_loading_pipeline(pipeline): "Only keep loading image and annotations related configuration.\n\n Args:\n pipeline (list[dict]): Data pipeline configs.\n\n Returns:\n list[dict]: The new pipeline list with only keep\n loading image and annotations related configuration.\n\n Examples:\n >>> pipelines = [\n ... dict(type='LoadImageFromFile'),\n ... dict(type='LoadAnnotations', with_bbox=True),\n ... dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),\n ... dict(type='RandomFlip', flip_ratio=0.5),\n ... dict(type='Normalize', **img_norm_cfg),\n ... dict(type='Pad', size_divisor=32),\n ... dict(type='DefaultFormatBundle'),\n ... dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])\n ... ]\n >>> expected_pipelines = [\n ... dict(type='LoadImageFromFile'),\n ... dict(type='LoadAnnotations', with_bbox=True)\n ... ]\n >>> assert expected_pipelines == ... get_loading_pipeline(pipelines)\n " loading_pipeline_cfg = [] for cfg in pipeline: obj_cls = PIPELINES.get(cfg['type']) if ((obj_cls is not None) and (obj_cls in (LoadImageFromFile, LoadAnnotations, LoadPanopticAnnotations))): loading_pipeline_cfg.append(cfg) assert (len(loading_pipeline_cfg) == 2), 'The data pipeline in your config file must include loading image and annotations related pipeline.' return loading_pipeline_cfg
@HOOKS.register_module() class NumClassCheckHook(Hook): def _check_head(self, runner): 'Check whether the `num_classes` in head matches the length of\n `CLASSES` in `dataset`.\n\n Args:\n runner (obj:`EpochBasedRunner`): Epoch based Runner.\n ' model = runner.model dataset = runner.data_loader.dataset if (dataset.CLASSES is None): runner.logger.warning(f'Please set `CLASSES` in the {dataset.__class__.__name__} andcheck if it is consistent with the `num_classes` of head') else: assert (type(dataset.CLASSES) is not str), f'`CLASSES` in {dataset.__class__.__name__}should be a tuple of str.Add comma if number of classes is 1 as CLASSES = ({dataset.CLASSES},)' for (name, module) in model.named_modules(): if (hasattr(module, 'num_classes') and (not isinstance(module, (RPNHead, VGG, FusedSemanticHead, GARPNHead)))): assert (module.num_classes == len(dataset.CLASSES)), f'The `num_classes` ({module.num_classes}) in {module.__class__.__name__} of {model.__class__.__name__} does not matches the length of `CLASSES` {len(dataset.CLASSES)}) in {dataset.__class__.__name__}' def before_train_epoch(self, runner): 'Check whether the training dataset is compatible with head.\n\n Args:\n runner (obj:`EpochBasedRunner`): Epoch based Runner.\n ' self._check_head(runner) def before_val_epoch(self, runner): 'Check whether the dataset in val epoch is compatible with head.\n\n Args:\n runner (obj:`EpochBasedRunner`): Epoch based Runner.\n ' self._check_head(runner)
@DATASETS.register_module() class VOCDataset(XMLDataset): CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') PALETTE = [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192), (197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255), (153, 69, 1), (120, 166, 157), (0, 182, 199), (0, 226, 252), (182, 182, 255), (0, 0, 230), (220, 20, 60), (163, 255, 0), (0, 82, 0), (3, 95, 161), (0, 80, 100), (183, 130, 88)] def __init__(self, **kwargs): super(VOCDataset, self).__init__(**kwargs) if ('VOC2007' in self.img_prefix): self.year = 2007 elif ('VOC2012' in self.img_prefix): self.year = 2012 else: raise ValueError('Cannot infer dataset year from img_prefix') def evaluate(self, results, metric='mAP', logger=None, proposal_nums=(100, 300, 1000), iou_thr=0.5, scale_ranges=None): "Evaluate in VOC protocol.\n\n Args:\n results (list[list | tuple]): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated. Options are\n 'mAP', 'recall'.\n logger (logging.Logger | str, optional): Logger used for printing\n related information during evaluation. Default: None.\n proposal_nums (Sequence[int]): Proposal number used for evaluating\n recalls, such as recall@100, recall@1000.\n Default: (100, 300, 1000).\n iou_thr (float | list[float]): IoU threshold. Default: 0.5.\n scale_ranges (list[tuple], optional): Scale ranges for evaluating\n mAP. If not specified, all bounding boxes would be included in\n evaluation. Default: None.\n\n Returns:\n dict[str, float]: AP/recall metrics.\n " if (not isinstance(metric, str)): assert (len(metric) == 1) metric = metric[0] allowed_metrics = ['mAP', 'recall'] if (metric not in allowed_metrics): raise KeyError(f'metric {metric} is not supported') annotations = [self.get_ann_info(i) for i in range(len(self))] eval_results = OrderedDict() iou_thrs = ([iou_thr] if isinstance(iou_thr, float) else iou_thr) if (metric == 'mAP'): assert isinstance(iou_thrs, list) if (self.year == 2007): ds_name = 'voc07' else: ds_name = self.CLASSES mean_aps = [] for iou_thr in iou_thrs: print_log(f''' {('-' * 15)}iou_thr: {iou_thr}{('-' * 15)}''') (mean_ap, _) = eval_map(results, annotations, scale_ranges=None, iou_thr=iou_thr, dataset=ds_name, logger=logger, use_legacy_coordinate=True) mean_aps.append(mean_ap) eval_results[f'AP{int((iou_thr * 100)):02d}'] = round(mean_ap, 3) eval_results['mAP'] = (sum(mean_aps) / len(mean_aps)) eval_results.move_to_end('mAP', last=False) elif (metric == 'recall'): gt_bboxes = [ann['bboxes'] for ann in annotations] recalls = eval_recalls(gt_bboxes, results, proposal_nums, iou_thrs, logger=logger, use_legacy_coordinate=True) for (i, num) in enumerate(proposal_nums): for (j, iou_thr) in enumerate(iou_thrs): eval_results[f'recall@{num}@{iou_thr}'] = recalls[(i, j)] if (recalls.shape[1] > 1): ar = recalls.mean(axis=1) for (i, num) in enumerate(proposal_nums): eval_results[f'AR@{num}'] = ar[i] return eval_results
@DATASETS.register_module() class WIDERFaceDataset(XMLDataset): 'Reader for the WIDER Face dataset in PASCAL VOC format.\n\n Conversion scripts can be found in\n https://github.com/sovrasov/wider-face-pascal-voc-annotations\n ' CLASSES = ('face',) PALETTE = [(0, 255, 0)] def __init__(self, **kwargs): super(WIDERFaceDataset, self).__init__(**kwargs) def load_annotations(self, ann_file): 'Load annotation from WIDERFace XML style annotation file.\n\n Args:\n ann_file (str): Path of XML file.\n\n Returns:\n list[dict]: Annotation info from XML file.\n ' data_infos = [] img_ids = mmcv.list_from_file(ann_file) for img_id in img_ids: filename = f'{img_id}.jpg' xml_path = osp.join(self.img_prefix, 'Annotations', f'{img_id}.xml') tree = ET.parse(xml_path) root = tree.getroot() size = root.find('size') width = int(size.find('width').text) height = int(size.find('height').text) folder = root.find('folder').text data_infos.append(dict(id=img_id, filename=osp.join(folder, filename), width=width, height=height)) return data_infos
@DATASETS.register_module() class XMLDataset(CustomDataset): 'XML dataset for detection.\n\n Args:\n min_size (int | float, optional): The minimum size of bounding\n boxes in the images. If the size of a bounding box is less than\n ``min_size``, it would be add to ignored field.\n img_subdir (str): Subdir where images are stored. Default: JPEGImages.\n ann_subdir (str): Subdir where annotations are. Default: Annotations.\n ' def __init__(self, min_size=None, img_subdir='JPEGImages', ann_subdir='Annotations', **kwargs): assert (self.CLASSES or kwargs.get('classes', None)), 'CLASSES in `XMLDataset` can not be None.' self.img_subdir = img_subdir self.ann_subdir = ann_subdir super(XMLDataset, self).__init__(**kwargs) self.cat2label = {cat: i for (i, cat) in enumerate(self.CLASSES)} self.min_size = min_size def load_annotations(self, ann_file): 'Load annotation from XML style ann_file.\n\n Args:\n ann_file (str): Path of XML file.\n\n Returns:\n list[dict]: Annotation info from XML file.\n ' data_infos = [] img_ids = mmcv.list_from_file(ann_file) for img_id in img_ids: filename = osp.join(self.img_subdir, f'{img_id}.jpg') xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml') tree = ET.parse(xml_path) root = tree.getroot() size = root.find('size') if (size is not None): width = int(size.find('width').text) height = int(size.find('height').text) else: img_path = osp.join(self.img_prefix, filename) img = Image.open(img_path) (width, height) = img.size data_infos.append(dict(id=img_id, filename=filename, width=width, height=height)) return data_infos def _filter_imgs(self, min_size=32): 'Filter images too small or without annotation.' valid_inds = [] for (i, img_info) in enumerate(self.data_infos): if (min(img_info['width'], img_info['height']) < min_size): continue if self.filter_empty_gt: img_id = img_info['id'] xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml') tree = ET.parse(xml_path) root = tree.getroot() for obj in root.findall('object'): name = obj.find('name').text if (name in self.CLASSES): valid_inds.append(i) break else: valid_inds.append(i) return valid_inds def get_ann_info(self, idx): 'Get annotation from XML file by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n ' img_id = self.data_infos[idx]['id'] xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml') tree = ET.parse(xml_path) root = tree.getroot() bboxes = [] labels = [] bboxes_ignore = [] labels_ignore = [] for obj in root.findall('object'): name = obj.find('name').text if (name not in self.CLASSES): continue label = self.cat2label[name] difficult = obj.find('difficult') difficult = (0 if (difficult is None) else int(difficult.text)) bnd_box = obj.find('bndbox') bbox = [int(float(bnd_box.find('xmin').text)), int(float(bnd_box.find('ymin').text)), int(float(bnd_box.find('xmax').text)), int(float(bnd_box.find('ymax').text))] ignore = False if self.min_size: assert (not self.test_mode) w = (bbox[2] - bbox[0]) h = (bbox[3] - bbox[1]) if ((w < self.min_size) or (h < self.min_size)): ignore = True if (difficult or ignore): bboxes_ignore.append(bbox) labels_ignore.append(label) else: bboxes.append(bbox) labels.append(label) if (not bboxes): bboxes = np.zeros((0, 4)) labels = np.zeros((0,)) else: bboxes = (np.array(bboxes, ndmin=2) - 1) labels = np.array(labels) if (not bboxes_ignore): bboxes_ignore = np.zeros((0, 4)) labels_ignore = np.zeros((0,)) else: bboxes_ignore = (np.array(bboxes_ignore, ndmin=2) - 1) labels_ignore = np.array(labels_ignore) ann = dict(bboxes=bboxes.astype(np.float32), labels=labels.astype(np.int64), bboxes_ignore=bboxes_ignore.astype(np.float32), labels_ignore=labels_ignore.astype(np.int64)) return ann def get_cat_ids(self, idx): 'Get category ids in XML file by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n list[int]: All categories in the image of specified index.\n ' cat_ids = [] img_id = self.data_infos[idx]['id'] xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml') tree = ET.parse(xml_path) root = tree.getroot() for obj in root.findall('object'): name = obj.find('name').text if (name not in self.CLASSES): continue label = self.cat2label[name] cat_ids.append(label) return cat_ids
class ResBlock(BaseModule): "The basic residual block used in Darknet. Each ResBlock consists of two\n ConvModules and the input is added to the final output. Each ConvModule is\n composed of Conv, BN, and LeakyReLU. In YoloV3 paper, the first convLayer\n has half of the number of the filters as much as the second convLayer. The\n first convLayer has filter size of 1x1 and the second one has the filter\n size of 3x3.\n\n Args:\n in_channels (int): The input channels. Must be even.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n Default: dict(type='BN', requires_grad=True)\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='LeakyReLU', negative_slope=0.1).\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n " def __init__(self, in_channels, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), init_cfg=None): super(ResBlock, self).__init__(init_cfg) assert ((in_channels % 2) == 0) half_in_channels = (in_channels // 2) cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.conv1 = ConvModule(in_channels, half_in_channels, 1, **cfg) self.conv2 = ConvModule(half_in_channels, in_channels, 3, padding=1, **cfg) def forward(self, x): residual = x out = self.conv1(x) out = self.conv2(out) out = (out + residual) return out
@BACKBONES.register_module() class Darknet(BaseModule): "Darknet backbone.\n\n Args:\n depth (int): Depth of Darknet. Currently only support 53.\n out_indices (Sequence[int]): Output from which stages.\n frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n -1 means not freezing any parameters. Default: -1.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n Default: dict(type='BN', requires_grad=True)\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='LeakyReLU', negative_slope=0.1).\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n pretrained (str, optional): model pretrained path. Default: None\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n\n Example:\n >>> from mmdet.models import Darknet\n >>> import torch\n >>> self = Darknet(depth=53)\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 416, 416)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n ...\n (1, 256, 52, 52)\n (1, 512, 26, 26)\n (1, 1024, 13, 13)\n " arch_settings = {53: ((1, 2, 8, 8, 4), ((32, 64), (64, 128), (128, 256), (256, 512), (512, 1024)))} def __init__(self, depth=53, out_indices=(3, 4, 5), frozen_stages=(- 1), conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), norm_eval=True, pretrained=None, init_cfg=None): super(Darknet, self).__init__(init_cfg) if (depth not in self.arch_settings): raise KeyError(f'invalid depth {depth} for darknet') self.depth = depth self.out_indices = out_indices self.frozen_stages = frozen_stages (self.layers, self.channels) = self.arch_settings[depth] cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.conv1 = ConvModule(3, 32, 3, padding=1, **cfg) self.cr_blocks = ['conv1'] for (i, n_layers) in enumerate(self.layers): layer_name = f'conv_res_block{(i + 1)}' (in_c, out_c) = self.channels[i] self.add_module(layer_name, self.make_conv_res_block(in_c, out_c, n_layers, **cfg)) self.cr_blocks.append(layer_name) self.norm_eval = norm_eval assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif (pretrained is None): if (init_cfg is None): self.init_cfg = [dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm'])] else: raise TypeError('pretrained must be a str or None') def forward(self, x): outs = [] for (i, layer_name) in enumerate(self.cr_blocks): cr_block = getattr(self, layer_name) x = cr_block(x) if (i in self.out_indices): outs.append(x) return tuple(outs) def _freeze_stages(self): if (self.frozen_stages >= 0): for i in range(self.frozen_stages): m = getattr(self, self.cr_blocks[i]) m.eval() for param in m.parameters(): param.requires_grad = False def train(self, mode=True): super(Darknet, self).train(mode) self._freeze_stages() if (mode and self.norm_eval): for m in self.modules(): if isinstance(m, _BatchNorm): m.eval() @staticmethod def make_conv_res_block(in_channels, out_channels, res_repeat, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1)): "In Darknet backbone, ConvLayer is usually followed by ResBlock. This\n function will make that. The Conv layers always have 3x3 filters with\n stride=2. The number of the filters in Conv layer is the same as the\n out channels of the ResBlock.\n\n Args:\n in_channels (int): The number of input channels.\n out_channels (int): The number of output channels.\n res_repeat (int): The number of ResBlocks.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n Default: dict(type='BN', requires_grad=True)\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='LeakyReLU', negative_slope=0.1).\n " cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) model = nn.Sequential() model.add_module('conv', ConvModule(in_channels, out_channels, 3, stride=2, padding=1, **cfg)) for idx in range(res_repeat): model.add_module('res{}'.format(idx), ResBlock(out_channels, **cfg)) return model
class Bottleneck(_Bottleneck): 'Bottleneck for the ResNet backbone in `DetectoRS\n <https://arxiv.org/pdf/2006.02334.pdf>`_.\n\n This bottleneck allows the users to specify whether to use\n SAC (Switchable Atrous Convolution) and RFP (Recursive Feature Pyramid).\n\n Args:\n inplanes (int): The number of input channels.\n planes (int): The number of output channels before expansion.\n rfp_inplanes (int, optional): The number of channels from RFP.\n Default: None. If specified, an additional conv layer will be\n added for ``rfp_feat``. Otherwise, the structure is the same as\n base class.\n sac (dict, optional): Dictionary to construct SAC. Default: None.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n ' expansion = 4 def __init__(self, inplanes, planes, rfp_inplanes=None, sac=None, init_cfg=None, **kwargs): super(Bottleneck, self).__init__(inplanes, planes, init_cfg=init_cfg, **kwargs) assert ((sac is None) or isinstance(sac, dict)) self.sac = sac self.with_sac = (sac is not None) if self.with_sac: self.conv2 = build_conv_layer(self.sac, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False) self.rfp_inplanes = rfp_inplanes if self.rfp_inplanes: self.rfp_conv = build_conv_layer(None, self.rfp_inplanes, (planes * self.expansion), 1, stride=1, bias=True) if (init_cfg is None): self.init_cfg = dict(type='Constant', val=0, override=dict(name='rfp_conv')) def rfp_forward(self, x, rfp_feat): 'The forward function that also takes the RFP features as input.' def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv1_plugin_names) out = self.conv2(out) out = self.norm2(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv2_plugin_names) out = self.conv3(out) out = self.norm3(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv3_plugin_names) if (self.downsample is not None): identity = self.downsample(x) out += identity return out if (self.with_cp and x.requires_grad): out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) if self.rfp_inplanes: rfp_feat = self.rfp_conv(rfp_feat) out = (out + rfp_feat) out = self.relu(out) return out
class ResLayer(Sequential): "ResLayer to build ResNet style backbone for RPF in detectoRS.\n\n The difference between this module and base class is that we pass\n ``rfp_inplanes`` to the first block.\n\n Args:\n block (nn.Module): block used to build ResLayer.\n inplanes (int): inplanes of block.\n planes (int): planes of block.\n num_blocks (int): number of blocks.\n stride (int): stride of the first block. Default: 1\n avg_down (bool): Use AvgPool instead of stride conv when\n downsampling in the bottleneck. Default: False\n conv_cfg (dict): dictionary to construct and config conv layer.\n Default: None\n norm_cfg (dict): dictionary to construct and config norm layer.\n Default: dict(type='BN')\n downsample_first (bool): Downsample at the first block or last block.\n False for Hourglass, True for ResNet. Default: True\n rfp_inplanes (int, optional): The number of channels from RFP.\n Default: None. If specified, an additional conv layer will be\n added for ``rfp_feat``. Otherwise, the structure is the same as\n base class.\n " def __init__(self, block, inplanes, planes, num_blocks, stride=1, avg_down=False, conv_cfg=None, norm_cfg=dict(type='BN'), downsample_first=True, rfp_inplanes=None, **kwargs): self.block = block assert downsample_first, f'downsample_first={downsample_first} is not supported in DetectoRS' downsample = None if ((stride != 1) or (inplanes != (planes * block.expansion))): downsample = [] conv_stride = stride if (avg_down and (stride != 1)): conv_stride = 1 downsample.append(nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False)) downsample.extend([build_conv_layer(conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, (planes * block.expansion))[1]]) downsample = nn.Sequential(*downsample) layers = [] layers.append(block(inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, rfp_inplanes=rfp_inplanes, **kwargs)) inplanes = (planes * block.expansion) for _ in range(1, num_blocks): layers.append(block(inplanes=inplanes, planes=planes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) super(ResLayer, self).__init__(*layers)
@BACKBONES.register_module() class DetectoRS_ResNet(ResNet): 'ResNet backbone for DetectoRS.\n\n Args:\n sac (dict, optional): Dictionary to construct SAC (Switchable Atrous\n Convolution). Default: None.\n stage_with_sac (list): Which stage to use sac. Default: (False, False,\n False, False).\n rfp_inplanes (int, optional): The number of channels from RFP.\n Default: None. If specified, an additional conv layer will be\n added for ``rfp_feat``. Otherwise, the structure is the same as\n base class.\n output_img (bool): If ``True``, the input image will be inserted into\n the starting position of output. Default: False.\n ' arch_settings = {50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))} def __init__(self, sac=None, stage_with_sac=(False, False, False, False), rfp_inplanes=None, output_img=False, pretrained=None, init_cfg=None, **kwargs): assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be specified at the same time' self.pretrained = pretrained if (init_cfg is not None): assert isinstance(init_cfg, dict), f'init_cfg must be a dict, but got {type(init_cfg)}' if ('type' in init_cfg): assert (init_cfg.get('type') == 'Pretrained'), 'Only can initialize module by loading a pretrained model' else: raise KeyError('`init_cfg` must contain the key "type"') self.pretrained = init_cfg.get('checkpoint') self.sac = sac self.stage_with_sac = stage_with_sac self.rfp_inplanes = rfp_inplanes self.output_img = output_img super(DetectoRS_ResNet, self).__init__(**kwargs) self.inplanes = self.stem_channels self.res_layers = [] for (i, num_blocks) in enumerate(self.stage_blocks): stride = self.strides[i] dilation = self.dilations[i] dcn = (self.dcn if self.stage_with_dcn[i] else None) sac = (self.sac if self.stage_with_sac[i] else None) if (self.plugins is not None): stage_plugins = self.make_stage_plugins(self.plugins, i) else: stage_plugins = None planes = (self.base_channels * (2 ** i)) res_layer = self.make_res_layer(block=self.block, inplanes=self.inplanes, planes=planes, num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, sac=sac, rfp_inplanes=(rfp_inplanes if (i > 0) else None), plugins=stage_plugins) self.inplanes = (planes * self.block.expansion) layer_name = f'layer{(i + 1)}' self.add_module(layer_name, res_layer) self.res_layers.append(layer_name) self._freeze_stages() def init_weights(self): if isinstance(self.pretrained, str): logger = get_root_logger() load_checkpoint(self, self.pretrained, strict=False, logger=logger) elif (self.pretrained is None): for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, (_BatchNorm, nn.GroupNorm)): constant_init(m, 1) if (self.dcn is not None): for m in self.modules(): if (isinstance(m, Bottleneck) and hasattr(m.conv2, 'conv_offset')): constant_init(m.conv2.conv_offset, 0) if self.zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): constant_init(m.norm3, 0) elif isinstance(m, BasicBlock): constant_init(m.norm2, 0) else: raise TypeError('pretrained must be a str or None') def make_res_layer(self, **kwargs): 'Pack all blocks in a stage into a ``ResLayer`` for DetectoRS.' return ResLayer(**kwargs) def forward(self, x): 'Forward function.' outs = list(super(DetectoRS_ResNet, self).forward(x)) if self.output_img: outs.insert(0, x) return tuple(outs) def rfp_forward(self, x, rfp_feats): 'Forward function for RFP.' if self.deep_stem: x = self.stem(x) else: x = self.conv1(x) x = self.norm1(x) x = self.relu(x) x = self.maxpool(x) outs = [] for (i, layer_name) in enumerate(self.res_layers): res_layer = getattr(self, layer_name) rfp_feat = (rfp_feats[i] if (i > 0) else None) for layer in res_layer: x = layer.rfp_forward(x, rfp_feat) if (i in self.out_indices): outs.append(x) return tuple(outs)
class Bottleneck(_Bottleneck): expansion = 4 def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, **kwargs): 'Bottleneck block for ResNeXt.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if\n it is "caffe", the stride-two layer is the first 1x1 conv layer.\n ' super(Bottleneck, self).__init__(inplanes, planes, **kwargs) if (groups == 1): width = self.planes else: width = (math.floor((self.planes * (base_width / base_channels))) * groups) (self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, width, postfix=1) (self.norm2_name, norm2) = build_norm_layer(self.norm_cfg, width, postfix=2) (self.norm3_name, norm3) = build_norm_layer(self.norm_cfg, (self.planes * self.expansion), postfix=3) self.conv1 = build_conv_layer(self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) fallback_on_stride = False self.with_modulated_dcn = False if self.with_dcn: fallback_on_stride = self.dcn.pop('fallback_on_stride', False) if self.with_sac: self.conv2 = build_conv_layer(self.sac, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) elif ((not self.with_dcn) or fallback_on_stride): self.conv2 = build_conv_layer(self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) else: assert (self.conv_cfg is None), 'conv_cfg must be None for DCN' self.conv2 = build_conv_layer(self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) self.add_module(self.norm2_name, norm2) self.conv3 = build_conv_layer(self.conv_cfg, width, (self.planes * self.expansion), kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3)
@BACKBONES.register_module() class DetectoRS_ResNeXt(DetectoRS_ResNet): 'ResNeXt backbone for DetectoRS.\n\n Args:\n groups (int): The number of groups in ResNeXt.\n base_width (int): The base width of ResNeXt.\n ' arch_settings = {50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))} def __init__(self, groups=1, base_width=4, **kwargs): self.groups = groups self.base_width = base_width super(DetectoRS_ResNeXt, self).__init__(**kwargs) def make_res_layer(self, **kwargs): return super().make_res_layer(groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, **kwargs)
class HourglassModule(BaseModule): "Hourglass Module for HourglassNet backbone.\n\n Generate module recursively and use BasicBlock as the base unit.\n\n Args:\n depth (int): Depth of current HourglassModule.\n stage_channels (list[int]): Feature channels of sub-modules in current\n and follow-up HourglassModule.\n stage_blocks (list[int]): Number of sub-modules stacked in current and\n follow-up HourglassModule.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n upsample_cfg (dict, optional): Config dict for interpolate layer.\n Default: `dict(mode='nearest')`\n " def __init__(self, depth, stage_channels, stage_blocks, norm_cfg=dict(type='BN', requires_grad=True), init_cfg=None, upsample_cfg=dict(mode='nearest')): super(HourglassModule, self).__init__(init_cfg) self.depth = depth cur_block = stage_blocks[0] next_block = stage_blocks[1] cur_channel = stage_channels[0] next_channel = stage_channels[1] self.up1 = ResLayer(BasicBlock, cur_channel, cur_channel, cur_block, norm_cfg=norm_cfg) self.low1 = ResLayer(BasicBlock, cur_channel, next_channel, cur_block, stride=2, norm_cfg=norm_cfg) if (self.depth > 1): self.low2 = HourglassModule((depth - 1), stage_channels[1:], stage_blocks[1:]) else: self.low2 = ResLayer(BasicBlock, next_channel, next_channel, next_block, norm_cfg=norm_cfg) self.low3 = ResLayer(BasicBlock, next_channel, cur_channel, cur_block, norm_cfg=norm_cfg, downsample_first=False) self.up2 = F.interpolate self.upsample_cfg = upsample_cfg def forward(self, x): 'Forward function.' up1 = self.up1(x) low1 = self.low1(x) low2 = self.low2(low1) low3 = self.low3(low2) if ('scale_factor' in self.upsample_cfg): up2 = self.up2(low3, **self.upsample_cfg) else: shape = up1.shape[2:] up2 = self.up2(low3, size=shape, **self.upsample_cfg) return (up1 + up2)
@BACKBONES.register_module() class HourglassNet(BaseModule): 'HourglassNet backbone.\n\n Stacked Hourglass Networks for Human Pose Estimation.\n More details can be found in the `paper\n <https://arxiv.org/abs/1603.06937>`_ .\n\n Args:\n downsample_times (int): Downsample times in a HourglassModule.\n num_stacks (int): Number of HourglassModule modules stacked,\n 1 for Hourglass-52, 2 for Hourglass-104.\n stage_channels (list[int]): Feature channel of each sub-module in a\n HourglassModule.\n stage_blocks (list[int]): Number of sub-modules stacked in a\n HourglassModule.\n feat_channel (int): Feature channel of conv after a HourglassModule.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n pretrained (str, optional): model pretrained path. Default: None\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n\n Example:\n >>> from mmdet.models import HourglassNet\n >>> import torch\n >>> self = HourglassNet()\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 511, 511)\n >>> level_outputs = self.forward(inputs)\n >>> for level_output in level_outputs:\n ... print(tuple(level_output.shape))\n (1, 256, 128, 128)\n (1, 256, 128, 128)\n ' def __init__(self, downsample_times=5, num_stacks=2, stage_channels=(256, 256, 384, 384, 384, 512), stage_blocks=(2, 2, 2, 2, 2, 4), feat_channel=256, norm_cfg=dict(type='BN', requires_grad=True), pretrained=None, init_cfg=None): assert (init_cfg is None), 'To prevent abnormal initialization behavior, init_cfg is not allowed to be set' super(HourglassNet, self).__init__(init_cfg) self.num_stacks = num_stacks assert (self.num_stacks >= 1) assert (len(stage_channels) == len(stage_blocks)) assert (len(stage_channels) > downsample_times) cur_channel = stage_channels[0] self.stem = nn.Sequential(ConvModule(3, (cur_channel // 2), 7, padding=3, stride=2, norm_cfg=norm_cfg), ResLayer(BasicBlock, (cur_channel // 2), cur_channel, 1, stride=2, norm_cfg=norm_cfg)) self.hourglass_modules = nn.ModuleList([HourglassModule(downsample_times, stage_channels, stage_blocks) for _ in range(num_stacks)]) self.inters = ResLayer(BasicBlock, cur_channel, cur_channel, (num_stacks - 1), norm_cfg=norm_cfg) self.conv1x1s = nn.ModuleList([ConvModule(cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) for _ in range((num_stacks - 1))]) self.out_convs = nn.ModuleList([ConvModule(cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg) for _ in range(num_stacks)]) self.remap_convs = nn.ModuleList([ConvModule(feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) for _ in range((num_stacks - 1))]) self.relu = nn.ReLU(inplace=True) def init_weights(self): 'Init module weights.' super(HourglassNet, self).init_weights() for m in self.modules(): if isinstance(m, nn.Conv2d): m.reset_parameters() def forward(self, x): 'Forward function.' inter_feat = self.stem(x) out_feats = [] for ind in range(self.num_stacks): single_hourglass = self.hourglass_modules[ind] out_conv = self.out_convs[ind] hourglass_feat = single_hourglass(inter_feat) out_feat = out_conv(hourglass_feat) out_feats.append(out_feat) if (ind < (self.num_stacks - 1)): inter_feat = (self.conv1x1s[ind](inter_feat) + self.remap_convs[ind](out_feat)) inter_feat = self.inters[ind](self.relu(inter_feat)) return out_feats
class HRModule(BaseModule): 'High-Resolution Module for HRNet.\n\n In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange\n is in this module.\n ' def __init__(self, num_branches, blocks, num_blocks, in_channels, num_channels, multiscale_output=True, with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), block_init_cfg=None, init_cfg=None): super(HRModule, self).__init__(init_cfg) self.block_init_cfg = block_init_cfg self._check_branches(num_branches, num_blocks, in_channels, num_channels) self.in_channels = in_channels self.num_branches = num_branches self.multiscale_output = multiscale_output self.norm_cfg = norm_cfg self.conv_cfg = conv_cfg self.with_cp = with_cp self.branches = self._make_branches(num_branches, blocks, num_blocks, num_channels) self.fuse_layers = self._make_fuse_layers() self.relu = nn.ReLU(inplace=False) def _check_branches(self, num_branches, num_blocks, in_channels, num_channels): if (num_branches != len(num_blocks)): error_msg = f'NUM_BRANCHES({num_branches}) != NUM_BLOCKS({len(num_blocks)})' raise ValueError(error_msg) if (num_branches != len(num_channels)): error_msg = f'NUM_BRANCHES({num_branches}) != NUM_CHANNELS({len(num_channels)})' raise ValueError(error_msg) if (num_branches != len(in_channels)): error_msg = f'NUM_BRANCHES({num_branches}) != NUM_INCHANNELS({len(in_channels)})' raise ValueError(error_msg) def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1): downsample = None if ((stride != 1) or (self.in_channels[branch_index] != (num_channels[branch_index] * block.expansion))): downsample = nn.Sequential(build_conv_layer(self.conv_cfg, self.in_channels[branch_index], (num_channels[branch_index] * block.expansion), kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, (num_channels[branch_index] * block.expansion))[1]) layers = [] layers.append(block(self.in_channels[branch_index], num_channels[branch_index], stride, downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=self.block_init_cfg)) self.in_channels[branch_index] = (num_channels[branch_index] * block.expansion) for i in range(1, num_blocks[branch_index]): layers.append(block(self.in_channels[branch_index], num_channels[branch_index], with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=self.block_init_cfg)) return Sequential(*layers) def _make_branches(self, num_branches, block, num_blocks, num_channels): branches = [] for i in range(num_branches): branches.append(self._make_one_branch(i, block, num_blocks, num_channels)) return ModuleList(branches) def _make_fuse_layers(self): if (self.num_branches == 1): return None num_branches = self.num_branches in_channels = self.in_channels fuse_layers = [] num_out_branches = (num_branches if self.multiscale_output else 1) for i in range(num_out_branches): fuse_layer = [] for j in range(num_branches): if (j > i): fuse_layer.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[i], kernel_size=1, stride=1, padding=0, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1], nn.Upsample(scale_factor=(2 ** (j - i)), mode='nearest'))) elif (j == i): fuse_layer.append(None) else: conv_downsamples = [] for k in range((i - j)): if (k == ((i - j) - 1)): conv_downsamples.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[i], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1])) else: conv_downsamples.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[j], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[j])[1], nn.ReLU(inplace=False))) fuse_layer.append(nn.Sequential(*conv_downsamples)) fuse_layers.append(nn.ModuleList(fuse_layer)) return nn.ModuleList(fuse_layers) def forward(self, x): 'Forward function.' if (self.num_branches == 1): return [self.branches[0](x[0])] for i in range(self.num_branches): x[i] = self.branches[i](x[i]) x_fuse = [] for i in range(len(self.fuse_layers)): y = 0 for j in range(self.num_branches): if (i == j): y += x[j] else: y += self.fuse_layers[i][j](x[j]) x_fuse.append(self.relu(y)) return x_fuse
@BACKBONES.register_module() class HRNet(BaseModule): "HRNet backbone.\n\n `High-Resolution Representations for Labeling Pixels and Regions\n arXiv: <https://arxiv.org/abs/1904.04514>`_.\n\n Args:\n extra (dict): Detailed configuration for each stage of HRNet.\n There must be 4 stages, the configuration for each stage must have\n 5 keys:\n\n - num_modules(int): The number of HRModule in this stage.\n - num_branches(int): The number of branches in the HRModule.\n - block(str): The type of convolution block.\n - num_blocks(tuple): The number of blocks in each branch.\n The length must be equal to num_branches.\n - num_channels(tuple): The number of channels in each branch.\n The length must be equal to num_branches.\n in_channels (int): Number of input image channels. Default: 3.\n conv_cfg (dict): Dictionary to construct and config conv layer.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only. Default: True.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed. Default: False.\n zero_init_residual (bool): Whether to use zero init for last norm layer\n in resblocks to let them behave as identity. Default: False.\n multiscale_output (bool): Whether to output multi-level features\n produced by multiple branches. If False, only the first level\n feature will be output. Default: True.\n pretrained (str, optional): Model pretrained path. Default: None.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None.\n\n Example:\n >>> from mmdet.models import HRNet\n >>> import torch\n >>> extra = dict(\n >>> stage1=dict(\n >>> num_modules=1,\n >>> num_branches=1,\n >>> block='BOTTLENECK',\n >>> num_blocks=(4, ),\n >>> num_channels=(64, )),\n >>> stage2=dict(\n >>> num_modules=1,\n >>> num_branches=2,\n >>> block='BASIC',\n >>> num_blocks=(4, 4),\n >>> num_channels=(32, 64)),\n >>> stage3=dict(\n >>> num_modules=4,\n >>> num_branches=3,\n >>> block='BASIC',\n >>> num_blocks=(4, 4, 4),\n >>> num_channels=(32, 64, 128)),\n >>> stage4=dict(\n >>> num_modules=3,\n >>> num_branches=4,\n >>> block='BASIC',\n >>> num_blocks=(4, 4, 4, 4),\n >>> num_channels=(32, 64, 128, 256)))\n >>> self = HRNet(extra, in_channels=1)\n >>> self.eval()\n >>> inputs = torch.rand(1, 1, 32, 32)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 32, 8, 8)\n (1, 64, 4, 4)\n (1, 128, 2, 2)\n (1, 256, 1, 1)\n " blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} def __init__(self, extra, in_channels=3, conv_cfg=None, norm_cfg=dict(type='BN'), norm_eval=True, with_cp=False, zero_init_residual=False, multiscale_output=True, pretrained=None, init_cfg=None): super(HRNet, self).__init__(init_cfg) self.pretrained = pretrained assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif (pretrained is None): if (init_cfg is None): self.init_cfg = [dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm'])] else: raise TypeError('pretrained must be a str or None') assert (('stage1' in extra) and ('stage2' in extra) and ('stage3' in extra) and ('stage4' in extra)) for i in range(4): cfg = extra[f'stage{(i + 1)}'] assert ((len(cfg['num_blocks']) == cfg['num_branches']) and (len(cfg['num_channels']) == cfg['num_branches'])) self.extra = extra self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.norm_eval = norm_eval self.with_cp = with_cp self.zero_init_residual = zero_init_residual (self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, 64, postfix=1) (self.norm2_name, norm2) = build_norm_layer(self.norm_cfg, 64, postfix=2) self.conv1 = build_conv_layer(self.conv_cfg, in_channels, 64, kernel_size=3, stride=2, padding=1, bias=False) self.add_module(self.norm1_name, norm1) self.conv2 = build_conv_layer(self.conv_cfg, 64, 64, kernel_size=3, stride=2, padding=1, bias=False) self.add_module(self.norm2_name, norm2) self.relu = nn.ReLU(inplace=True) self.stage1_cfg = self.extra['stage1'] num_channels = self.stage1_cfg['num_channels'][0] block_type = self.stage1_cfg['block'] num_blocks = self.stage1_cfg['num_blocks'][0] block = self.blocks_dict[block_type] stage1_out_channels = (num_channels * block.expansion) self.layer1 = self._make_layer(block, 64, num_channels, num_blocks) self.stage2_cfg = self.extra['stage2'] num_channels = self.stage2_cfg['num_channels'] block_type = self.stage2_cfg['block'] block = self.blocks_dict[block_type] num_channels = [(channel * block.expansion) for channel in num_channels] self.transition1 = self._make_transition_layer([stage1_out_channels], num_channels) (self.stage2, pre_stage_channels) = self._make_stage(self.stage2_cfg, num_channels) self.stage3_cfg = self.extra['stage3'] num_channels = self.stage3_cfg['num_channels'] block_type = self.stage3_cfg['block'] block = self.blocks_dict[block_type] num_channels = [(channel * block.expansion) for channel in num_channels] self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels) (self.stage3, pre_stage_channels) = self._make_stage(self.stage3_cfg, num_channels) self.stage4_cfg = self.extra['stage4'] num_channels = self.stage4_cfg['num_channels'] block_type = self.stage4_cfg['block'] block = self.blocks_dict[block_type] num_channels = [(channel * block.expansion) for channel in num_channels] self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels) (self.stage4, pre_stage_channels) = self._make_stage(self.stage4_cfg, num_channels, multiscale_output=multiscale_output) @property def norm1(self): 'nn.Module: the normalization layer named "norm1" ' return getattr(self, self.norm1_name) @property def norm2(self): 'nn.Module: the normalization layer named "norm2" ' return getattr(self, self.norm2_name) def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer): num_branches_cur = len(num_channels_cur_layer) num_branches_pre = len(num_channels_pre_layer) transition_layers = [] for i in range(num_branches_cur): if (i < num_branches_pre): if (num_channels_cur_layer[i] != num_channels_pre_layer[i]): transition_layers.append(nn.Sequential(build_conv_layer(self.conv_cfg, num_channels_pre_layer[i], num_channels_cur_layer[i], kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, num_channels_cur_layer[i])[1], nn.ReLU(inplace=True))) else: transition_layers.append(None) else: conv_downsamples = [] for j in range(((i + 1) - num_branches_pre)): in_channels = num_channels_pre_layer[(- 1)] out_channels = (num_channels_cur_layer[i] if (j == (i - num_branches_pre)) else in_channels) conv_downsamples.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels, out_channels, kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, out_channels)[1], nn.ReLU(inplace=True))) transition_layers.append(nn.Sequential(*conv_downsamples)) return nn.ModuleList(transition_layers) def _make_layer(self, block, inplanes, planes, blocks, stride=1): downsample = None if ((stride != 1) or (inplanes != (planes * block.expansion))): downsample = nn.Sequential(build_conv_layer(self.conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, (planes * block.expansion))[1]) layers = [] block_init_cfg = None if ((self.pretrained is None) and (not hasattr(self, 'init_cfg')) and self.zero_init_residual): if (block is BasicBlock): block_init_cfg = dict(type='Constant', val=0, override=dict(name='norm2')) elif (block is Bottleneck): block_init_cfg = dict(type='Constant', val=0, override=dict(name='norm3')) layers.append(block(inplanes, planes, stride, downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=block_init_cfg)) inplanes = (planes * block.expansion) for i in range(1, blocks): layers.append(block(inplanes, planes, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=block_init_cfg)) return Sequential(*layers) def _make_stage(self, layer_config, in_channels, multiscale_output=True): num_modules = layer_config['num_modules'] num_branches = layer_config['num_branches'] num_blocks = layer_config['num_blocks'] num_channels = layer_config['num_channels'] block = self.blocks_dict[layer_config['block']] hr_modules = [] block_init_cfg = None if ((self.pretrained is None) and (not hasattr(self, 'init_cfg')) and self.zero_init_residual): if (block is BasicBlock): block_init_cfg = dict(type='Constant', val=0, override=dict(name='norm2')) elif (block is Bottleneck): block_init_cfg = dict(type='Constant', val=0, override=dict(name='norm3')) for i in range(num_modules): if ((not multiscale_output) and (i == (num_modules - 1))): reset_multiscale_output = False else: reset_multiscale_output = True hr_modules.append(HRModule(num_branches, block, num_blocks, in_channels, num_channels, reset_multiscale_output, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, block_init_cfg=block_init_cfg)) return (Sequential(*hr_modules), in_channels) def forward(self, x): 'Forward function.' x = self.conv1(x) x = self.norm1(x) x = self.relu(x) x = self.conv2(x) x = self.norm2(x) x = self.relu(x) x = self.layer1(x) x_list = [] for i in range(self.stage2_cfg['num_branches']): if (self.transition1[i] is not None): x_list.append(self.transition1[i](x)) else: x_list.append(x) y_list = self.stage2(x_list) x_list = [] for i in range(self.stage3_cfg['num_branches']): if (self.transition2[i] is not None): x_list.append(self.transition2[i](y_list[(- 1)])) else: x_list.append(y_list[i]) y_list = self.stage3(x_list) x_list = [] for i in range(self.stage4_cfg['num_branches']): if (self.transition3[i] is not None): x_list.append(self.transition3[i](y_list[(- 1)])) else: x_list.append(y_list[i]) y_list = self.stage4(x_list) return y_list def train(self, mode=True): 'Convert the model into training mode will keeping the normalization\n layer freezed.' super(HRNet, self).train(mode) if (mode and self.norm_eval): for m in self.modules(): if isinstance(m, _BatchNorm): m.eval()
@BACKBONES.register_module() class MobileNetV2(BaseModule): "MobileNetV2 backbone.\n\n Args:\n widen_factor (float): Width multiplier, multiply number of\n channels in each layer by this amount. Default: 1.0.\n out_indices (Sequence[int], optional): Output from which stages.\n Default: (1, 2, 4, 7).\n frozen_stages (int): Stages to be frozen (all param fixed).\n Default: -1, which means not freezing any parameters.\n conv_cfg (dict, optional): Config dict for convolution layer.\n Default: None, which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='ReLU6').\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only. Default: False.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed. Default: False.\n pretrained (str, optional): model pretrained path. Default: None\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n " arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]] def __init__(self, widen_factor=1.0, out_indices=(1, 2, 4, 7), frozen_stages=(- 1), conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU6'), norm_eval=False, with_cp=False, pretrained=None, init_cfg=None): super(MobileNetV2, self).__init__(init_cfg) self.pretrained = pretrained assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif (pretrained is None): if (init_cfg is None): self.init_cfg = [dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm'])] else: raise TypeError('pretrained must be a str or None') self.widen_factor = widen_factor self.out_indices = out_indices if (not set(out_indices).issubset(set(range(0, 8)))): raise ValueError(f'out_indices must be a subset of range(0, 8). But received {out_indices}') if (frozen_stages not in range((- 1), 8)): raise ValueError(f'frozen_stages must be in range(-1, 8). But received {frozen_stages}') self.out_indices = out_indices self.frozen_stages = frozen_stages self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.norm_eval = norm_eval self.with_cp = with_cp self.in_channels = make_divisible((32 * widen_factor), 8) self.conv1 = ConvModule(in_channels=3, out_channels=self.in_channels, kernel_size=3, stride=2, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) self.layers = [] for (i, layer_cfg) in enumerate(self.arch_settings): (expand_ratio, channel, num_blocks, stride) = layer_cfg out_channels = make_divisible((channel * widen_factor), 8) inverted_res_layer = self.make_layer(out_channels=out_channels, num_blocks=num_blocks, stride=stride, expand_ratio=expand_ratio) layer_name = f'layer{(i + 1)}' self.add_module(layer_name, inverted_res_layer) self.layers.append(layer_name) if (widen_factor > 1.0): self.out_channel = int((1280 * widen_factor)) else: self.out_channel = 1280 layer = ConvModule(in_channels=self.in_channels, out_channels=self.out_channel, kernel_size=1, stride=1, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) self.add_module('conv2', layer) self.layers.append('conv2') def make_layer(self, out_channels, num_blocks, stride, expand_ratio): 'Stack InvertedResidual blocks to build a layer for MobileNetV2.\n\n Args:\n out_channels (int): out_channels of block.\n num_blocks (int): number of blocks.\n stride (int): stride of the first block. Default: 1\n expand_ratio (int): Expand the number of channels of the\n hidden layer in InvertedResidual by this ratio. Default: 6.\n ' layers = [] for i in range(num_blocks): if (i >= 1): stride = 1 layers.append(InvertedResidual(self.in_channels, out_channels, mid_channels=int(round((self.in_channels * expand_ratio))), stride=stride, with_expand_conv=(expand_ratio != 1), conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, with_cp=self.with_cp)) self.in_channels = out_channels return nn.Sequential(*layers) def _freeze_stages(self): if (self.frozen_stages >= 0): for param in self.conv1.parameters(): param.requires_grad = False for i in range(1, (self.frozen_stages + 1)): layer = getattr(self, f'layer{i}') layer.eval() for param in layer.parameters(): param.requires_grad = False def forward(self, x): 'Forward function.' x = self.conv1(x) outs = [] for (i, layer_name) in enumerate(self.layers): layer = getattr(self, layer_name) x = layer(x) if (i in self.out_indices): outs.append(x) return tuple(outs) def train(self, mode=True): 'Convert the model into training mode while keep normalization layer\n frozen.' super(MobileNetV2, self).train(mode) self._freeze_stages() if (mode and self.norm_eval): for m in self.modules(): if isinstance(m, _BatchNorm): m.eval()
@BACKBONES.register_module() class RegNet(ResNet): 'RegNet backbone.\n\n More details can be found in `paper <https://arxiv.org/abs/2003.13678>`_ .\n\n Args:\n arch (dict): The parameter of RegNets.\n\n - w0 (int): initial width\n - wa (float): slope of width\n - wm (float): quantization parameter to quantize the width\n - depth (int): depth of the backbone\n - group_w (int): width of group\n - bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck.\n strides (Sequence[int]): Strides of the first block of each stage.\n base_channels (int): Base channels after stem layer.\n in_channels (int): Number of input image channels. Default: 3.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n not freezing any parameters.\n norm_cfg (dict): dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n zero_init_residual (bool): whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n pretrained (str, optional): model pretrained path. Default: None\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n\n Example:\n >>> from mmdet.models import RegNet\n >>> import torch\n >>> self = RegNet(\n arch=dict(\n w0=88,\n wa=26.31,\n wm=2.25,\n group_w=48,\n depth=25,\n bot_mul=1.0))\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 32, 32)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 96, 8, 8)\n (1, 192, 4, 4)\n (1, 432, 2, 2)\n (1, 1008, 1, 1)\n ' arch_settings = {'regnetx_400mf': dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), 'regnetx_800mf': dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0), 'regnetx_1.6gf': dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0), 'regnetx_3.2gf': dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0), 'regnetx_4.0gf': dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0), 'regnetx_6.4gf': dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0), 'regnetx_8.0gf': dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0), 'regnetx_12gf': dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0)} def __init__(self, arch, in_channels=3, stem_channels=32, base_channels=32, strides=(2, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', deep_stem=False, avg_down=False, frozen_stages=(- 1), conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, dcn=None, stage_with_dcn=(False, False, False, False), plugins=None, with_cp=False, zero_init_residual=True, pretrained=None, init_cfg=None): super(ResNet, self).__init__(init_cfg) if isinstance(arch, str): assert (arch in self.arch_settings), f'"arch": "{arch}" is not one of the arch_settings' arch = self.arch_settings[arch] elif (not isinstance(arch, dict)): raise ValueError(f'Expect "arch" to be either a string or a dict, got {type(arch)}') (widths, num_stages) = self.generate_regnet(arch['w0'], arch['wa'], arch['wm'], arch['depth']) (stage_widths, stage_blocks) = self.get_stages_from_blocks(widths) group_widths = [arch['group_w'] for _ in range(num_stages)] self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)] (stage_widths, group_widths) = self.adjust_width_group(stage_widths, self.bottleneck_ratio, group_widths) self.stage_widths = stage_widths self.group_widths = group_widths self.depth = sum(stage_blocks) self.stem_channels = stem_channels self.base_channels = base_channels self.num_stages = num_stages assert ((num_stages >= 1) and (num_stages <= 4)) self.strides = strides self.dilations = dilations assert (len(strides) == len(dilations) == num_stages) self.out_indices = out_indices assert (max(out_indices) < num_stages) self.style = style self.deep_stem = deep_stem self.avg_down = avg_down self.frozen_stages = frozen_stages self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.with_cp = with_cp self.norm_eval = norm_eval self.dcn = dcn self.stage_with_dcn = stage_with_dcn if (dcn is not None): assert (len(stage_with_dcn) == num_stages) self.plugins = plugins self.zero_init_residual = zero_init_residual self.block = Bottleneck expansion_bak = self.block.expansion self.block.expansion = 1 self.stage_blocks = stage_blocks[:num_stages] self._make_stem_layer(in_channels, stem_channels) block_init_cfg = None assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif (pretrained is None): if (init_cfg is None): self.init_cfg = [dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm'])] if self.zero_init_residual: block_init_cfg = dict(type='Constant', val=0, override=dict(name='norm3')) else: raise TypeError('pretrained must be a str or None') self.inplanes = stem_channels self.res_layers = [] for (i, num_blocks) in enumerate(self.stage_blocks): stride = self.strides[i] dilation = self.dilations[i] group_width = self.group_widths[i] width = int(round((self.stage_widths[i] * self.bottleneck_ratio[i]))) stage_groups = (width // group_width) dcn = (self.dcn if self.stage_with_dcn[i] else None) if (self.plugins is not None): stage_plugins = self.make_stage_plugins(self.plugins, i) else: stage_plugins = None res_layer = self.make_res_layer(block=self.block, inplanes=self.inplanes, planes=self.stage_widths[i], num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, plugins=stage_plugins, groups=stage_groups, base_width=group_width, base_channels=self.stage_widths[i], init_cfg=block_init_cfg) self.inplanes = self.stage_widths[i] layer_name = f'layer{(i + 1)}' self.add_module(layer_name, res_layer) self.res_layers.append(layer_name) self._freeze_stages() self.feat_dim = stage_widths[(- 1)] self.block.expansion = expansion_bak def _make_stem_layer(self, in_channels, base_channels): self.conv1 = build_conv_layer(self.conv_cfg, in_channels, base_channels, kernel_size=3, stride=2, padding=1, bias=False) (self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, base_channels, postfix=1) self.add_module(self.norm1_name, norm1) self.relu = nn.ReLU(inplace=True) def generate_regnet(self, initial_width, width_slope, width_parameter, depth, divisor=8): 'Generates per block width from RegNet parameters.\n\n Args:\n initial_width ([int]): Initial width of the backbone\n width_slope ([float]): Slope of the quantized linear function\n width_parameter ([int]): Parameter used to quantize the width.\n depth ([int]): Depth of the backbone.\n divisor (int, optional): The divisor of channels. Defaults to 8.\n\n Returns:\n list, int: return a list of widths of each stage and the number of stages\n ' assert (width_slope >= 0) assert (initial_width > 0) assert (width_parameter > 1) assert ((initial_width % divisor) == 0) widths_cont = ((np.arange(depth) * width_slope) + initial_width) ks = np.round((np.log((widths_cont / initial_width)) / np.log(width_parameter))) widths = (initial_width * np.power(width_parameter, ks)) widths = (np.round(np.divide(widths, divisor)) * divisor) num_stages = len(np.unique(widths)) (widths, widths_cont) = (widths.astype(int).tolist(), widths_cont.tolist()) return (widths, num_stages) @staticmethod def quantize_float(number, divisor): 'Converts a float to closest non-zero int divisible by divisor.\n\n Args:\n number (int): Original number to be quantized.\n divisor (int): Divisor used to quantize the number.\n\n Returns:\n int: quantized number that is divisible by devisor.\n ' return int((round((number / divisor)) * divisor)) def adjust_width_group(self, widths, bottleneck_ratio, groups): 'Adjusts the compatibility of widths and groups.\n\n Args:\n widths (list[int]): Width of each stage.\n bottleneck_ratio (float): Bottleneck ratio.\n groups (int): number of groups in each stage\n\n Returns:\n tuple(list): The adjusted widths and groups of each stage.\n ' bottleneck_width = [int((w * b)) for (w, b) in zip(widths, bottleneck_ratio)] groups = [min(g, w_bot) for (g, w_bot) in zip(groups, bottleneck_width)] bottleneck_width = [self.quantize_float(w_bot, g) for (w_bot, g) in zip(bottleneck_width, groups)] widths = [int((w_bot / b)) for (w_bot, b) in zip(bottleneck_width, bottleneck_ratio)] return (widths, groups) def get_stages_from_blocks(self, widths): 'Gets widths/stage_blocks of network at each stage.\n\n Args:\n widths (list[int]): Width in each stage.\n\n Returns:\n tuple(list): width and depth of each stage\n ' width_diff = [(width != width_prev) for (width, width_prev) in zip((widths + [0]), ([0] + widths))] stage_widths = [width for (width, diff) in zip(widths, width_diff[:(- 1)]) if diff] stage_blocks = np.diff([depth for (depth, diff) in zip(range(len(width_diff)), width_diff) if diff]).tolist() return (stage_widths, stage_blocks) def forward(self, x): 'Forward function.' x = self.conv1(x) x = self.norm1(x) x = self.relu(x) outs = [] for (i, layer_name) in enumerate(self.res_layers): res_layer = getattr(self, layer_name) x = res_layer(x) if (i in self.out_indices): outs.append(x) return tuple(outs)
class Bottle2neck(_Bottleneck): expansion = 4 def __init__(self, inplanes, planes, scales=4, base_width=26, base_channels=64, stage_type='normal', **kwargs): 'Bottle2neck block for Res2Net.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if\n it is "caffe", the stride-two layer is the first 1x1 conv layer.\n ' super(Bottle2neck, self).__init__(inplanes, planes, **kwargs) assert (scales > 1), 'Res2Net degenerates to ResNet when scales = 1.' width = int(math.floor((self.planes * (base_width / base_channels)))) (self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, (width * scales), postfix=1) (self.norm3_name, norm3) = build_norm_layer(self.norm_cfg, (self.planes * self.expansion), postfix=3) self.conv1 = build_conv_layer(self.conv_cfg, self.inplanes, (width * scales), kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) if ((stage_type == 'stage') and (self.conv2_stride != 1)): self.pool = nn.AvgPool2d(kernel_size=3, stride=self.conv2_stride, padding=1) convs = [] bns = [] fallback_on_stride = False if self.with_dcn: fallback_on_stride = self.dcn.pop('fallback_on_stride', False) if ((not self.with_dcn) or fallback_on_stride): for i in range((scales - 1)): convs.append(build_conv_layer(self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False)) bns.append(build_norm_layer(self.norm_cfg, width, postfix=(i + 1))[1]) self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList(bns) else: assert (self.conv_cfg is None), 'conv_cfg must be None for DCN' for i in range((scales - 1)): convs.append(build_conv_layer(self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False)) bns.append(build_norm_layer(self.norm_cfg, width, postfix=(i + 1))[1]) self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList(bns) self.conv3 = build_conv_layer(self.conv_cfg, (width * scales), (self.planes * self.expansion), kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) self.stage_type = stage_type self.scales = scales self.width = width delattr(self, 'conv2') delattr(self, self.norm2_name) def forward(self, x): 'Forward function.' def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv1_plugin_names) spx = torch.split(out, self.width, 1) sp = self.convs[0](spx[0].contiguous()) sp = self.relu(self.bns[0](sp)) out = sp for i in range(1, (self.scales - 1)): if (self.stage_type == 'stage'): sp = spx[i] else: sp = (sp + spx[i]) sp = self.convs[i](sp.contiguous()) sp = self.relu(self.bns[i](sp)) out = torch.cat((out, sp), 1) if ((self.stage_type == 'normal') or (self.conv2_stride == 1)): out = torch.cat((out, spx[(self.scales - 1)]), 1) elif (self.stage_type == 'stage'): out = torch.cat((out, self.pool(spx[(self.scales - 1)])), 1) if self.with_plugins: out = self.forward_plugin(out, self.after_conv2_plugin_names) out = self.conv3(out) out = self.norm3(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv3_plugin_names) if (self.downsample is not None): identity = self.downsample(x) out += identity return out if (self.with_cp and x.requires_grad): out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out
class Res2Layer(Sequential): "Res2Layer to build Res2Net style backbone.\n\n Args:\n block (nn.Module): block used to build ResLayer.\n inplanes (int): inplanes of block.\n planes (int): planes of block.\n num_blocks (int): number of blocks.\n stride (int): stride of the first block. Default: 1\n avg_down (bool): Use AvgPool instead of stride conv when\n downsampling in the bottle2neck. Default: False\n conv_cfg (dict): dictionary to construct and config conv layer.\n Default: None\n norm_cfg (dict): dictionary to construct and config norm layer.\n Default: dict(type='BN')\n scales (int): Scales used in Res2Net. Default: 4\n base_width (int): Basic width of each scale. Default: 26\n " def __init__(self, block, inplanes, planes, num_blocks, stride=1, avg_down=True, conv_cfg=None, norm_cfg=dict(type='BN'), scales=4, base_width=26, **kwargs): self.block = block downsample = None if ((stride != 1) or (inplanes != (planes * block.expansion))): downsample = nn.Sequential(nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False), build_conv_layer(conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=1, bias=False), build_norm_layer(norm_cfg, (planes * block.expansion))[1]) layers = [] layers.append(block(inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, scales=scales, base_width=base_width, stage_type='stage', **kwargs)) inplanes = (planes * block.expansion) for i in range(1, num_blocks): layers.append(block(inplanes=inplanes, planes=planes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, scales=scales, base_width=base_width, **kwargs)) super(Res2Layer, self).__init__(*layers)
@BACKBONES.register_module() class Res2Net(ResNet): 'Res2Net backbone.\n\n Args:\n scales (int): Scales used in Res2Net. Default: 4\n base_width (int): Basic width of each scale. Default: 26\n depth (int): Depth of res2net, from {50, 101, 152}.\n in_channels (int): Number of input image channels. Default: 3.\n num_stages (int): Res2net stages. Default: 4.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv\n avg_down (bool): Use AvgPool instead of stride conv when\n downsampling in the bottle2neck.\n frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n -1 means not freezing any parameters.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n plugins (list[dict]): List of plugins for stages, each dict contains:\n\n - cfg (dict, required): Cfg dict to build plugin.\n - position (str, required): Position inside block to insert\n plugin, options are \'after_conv1\', \'after_conv2\', \'after_conv3\'.\n - stages (tuple[bool], optional): Stages to apply plugin, length\n should be same as \'num_stages\'.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n zero_init_residual (bool): Whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n pretrained (str, optional): model pretrained path. Default: None\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n\n Example:\n >>> from mmdet.models import Res2Net\n >>> import torch\n >>> self = Res2Net(depth=50, scales=4, base_width=26)\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 32, 32)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 256, 8, 8)\n (1, 512, 4, 4)\n (1, 1024, 2, 2)\n (1, 2048, 1, 1)\n ' arch_settings = {50: (Bottle2neck, (3, 4, 6, 3)), 101: (Bottle2neck, (3, 4, 23, 3)), 152: (Bottle2neck, (3, 8, 36, 3))} def __init__(self, scales=4, base_width=26, style='pytorch', deep_stem=True, avg_down=True, pretrained=None, init_cfg=None, **kwargs): self.scales = scales self.base_width = base_width super(Res2Net, self).__init__(style='pytorch', deep_stem=True, avg_down=True, pretrained=pretrained, init_cfg=init_cfg, **kwargs) def make_res_layer(self, **kwargs): return Res2Layer(scales=self.scales, base_width=self.base_width, base_channels=self.base_channels, **kwargs)
class RSoftmax(nn.Module): 'Radix Softmax module in ``SplitAttentionConv2d``.\n\n Args:\n radix (int): Radix of input.\n groups (int): Groups of input.\n ' def __init__(self, radix, groups): super().__init__() self.radix = radix self.groups = groups def forward(self, x): batch = x.size(0) if (self.radix > 1): x = x.view(batch, self.groups, self.radix, (- 1)).transpose(1, 2) x = F.softmax(x, dim=1) x = x.reshape(batch, (- 1)) else: x = torch.sigmoid(x) return x
class SplitAttentionConv2d(BaseModule): 'Split-Attention Conv2d in ResNeSt.\n\n Args:\n in_channels (int): Number of channels in the input feature map.\n channels (int): Number of intermediate channels.\n kernel_size (int | tuple[int]): Size of the convolution kernel.\n stride (int | tuple[int]): Stride of the convolution.\n padding (int | tuple[int]): Zero-padding added to both sides of\n dilation (int | tuple[int]): Spacing between kernel elements.\n groups (int): Number of blocked connections from input channels to\n output channels.\n groups (int): Same as nn.Conv2d.\n radix (int): Radix of SpltAtConv2d. Default: 2\n reduction_factor (int): Reduction factor of inter_channels. Default: 4.\n conv_cfg (dict): Config dict for convolution layer. Default: None,\n which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n dcn (dict): Config dict for DCN. Default: None.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n ' def __init__(self, in_channels, channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, radix=2, reduction_factor=4, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, init_cfg=None): super(SplitAttentionConv2d, self).__init__(init_cfg) inter_channels = max(((in_channels * radix) // reduction_factor), 32) self.radix = radix self.groups = groups self.channels = channels self.with_dcn = (dcn is not None) self.dcn = dcn fallback_on_stride = False if self.with_dcn: fallback_on_stride = self.dcn.pop('fallback_on_stride', False) if (self.with_dcn and (not fallback_on_stride)): assert (conv_cfg is None), 'conv_cfg must be None for DCN' conv_cfg = dcn self.conv = build_conv_layer(conv_cfg, in_channels, (channels * radix), kernel_size, stride=stride, padding=padding, dilation=dilation, groups=(groups * radix), bias=False) (self.norm0_name, norm0) = build_norm_layer(norm_cfg, (channels * radix), postfix=0) self.add_module(self.norm0_name, norm0) self.relu = nn.ReLU(inplace=True) self.fc1 = build_conv_layer(None, channels, inter_channels, 1, groups=self.groups) (self.norm1_name, norm1) = build_norm_layer(norm_cfg, inter_channels, postfix=1) self.add_module(self.norm1_name, norm1) self.fc2 = build_conv_layer(None, inter_channels, (channels * radix), 1, groups=self.groups) self.rsoftmax = RSoftmax(radix, groups) @property def norm0(self): 'nn.Module: the normalization layer named "norm0" ' return getattr(self, self.norm0_name) @property def norm1(self): 'nn.Module: the normalization layer named "norm1" ' return getattr(self, self.norm1_name) def forward(self, x): x = self.conv(x) x = self.norm0(x) x = self.relu(x) (batch, rchannel) = x.shape[:2] batch = x.size(0) if (self.radix > 1): splits = x.view(batch, self.radix, (- 1), *x.shape[2:]) gap = splits.sum(dim=1) else: gap = x gap = F.adaptive_avg_pool2d(gap, 1) gap = self.fc1(gap) gap = self.norm1(gap) gap = self.relu(gap) atten = self.fc2(gap) atten = self.rsoftmax(atten).view(batch, (- 1), 1, 1) if (self.radix > 1): attens = atten.view(batch, self.radix, (- 1), *atten.shape[2:]) out = torch.sum((attens * splits), dim=1) else: out = (atten * x) return out.contiguous()
class Bottleneck(_Bottleneck): 'Bottleneck block for ResNeSt.\n\n Args:\n inplane (int): Input planes of this block.\n planes (int): Middle planes of this block.\n groups (int): Groups of conv2.\n base_width (int): Base of width in terms of base channels. Default: 4.\n base_channels (int): Base of channels for calculating width.\n Default: 64.\n radix (int): Radix of SpltAtConv2d. Default: 2\n reduction_factor (int): Reduction factor of inter_channels in\n SplitAttentionConv2d. Default: 4.\n avg_down_stride (bool): Whether to use average pool for stride in\n Bottleneck. Default: True.\n kwargs (dict): Key word arguments for base class.\n ' expansion = 4 def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, radix=2, reduction_factor=4, avg_down_stride=True, **kwargs): 'Bottleneck block for ResNeSt.' super(Bottleneck, self).__init__(inplanes, planes, **kwargs) if (groups == 1): width = self.planes else: width = (math.floor((self.planes * (base_width / base_channels))) * groups) self.avg_down_stride = (avg_down_stride and (self.conv2_stride > 1)) (self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, width, postfix=1) (self.norm3_name, norm3) = build_norm_layer(self.norm_cfg, (self.planes * self.expansion), postfix=3) self.conv1 = build_conv_layer(self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) self.with_modulated_dcn = False self.conv2 = SplitAttentionConv2d(width, width, kernel_size=3, stride=(1 if self.avg_down_stride else self.conv2_stride), padding=self.dilation, dilation=self.dilation, groups=groups, radix=radix, reduction_factor=reduction_factor, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=self.dcn) delattr(self, self.norm2_name) if self.avg_down_stride: self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1) self.conv3 = build_conv_layer(self.conv_cfg, width, (self.planes * self.expansion), kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) def forward(self, x): def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv1_plugin_names) out = self.conv2(out) if self.avg_down_stride: out = self.avd_layer(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv2_plugin_names) out = self.conv3(out) out = self.norm3(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv3_plugin_names) if (self.downsample is not None): identity = self.downsample(x) out += identity return out if (self.with_cp and x.requires_grad): out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out
@BACKBONES.register_module() class ResNeSt(ResNetV1d): 'ResNeSt backbone.\n\n Args:\n groups (int): Number of groups of Bottleneck. Default: 1\n base_width (int): Base width of Bottleneck. Default: 4\n radix (int): Radix of SplitAttentionConv2d. Default: 2\n reduction_factor (int): Reduction factor of inter_channels in\n SplitAttentionConv2d. Default: 4.\n avg_down_stride (bool): Whether to use average pool for stride in\n Bottleneck. Default: True.\n kwargs (dict): Keyword arguments for ResNet.\n ' arch_settings = {50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)), 200: (Bottleneck, (3, 24, 36, 3))} def __init__(self, groups=1, base_width=4, radix=2, reduction_factor=4, avg_down_stride=True, **kwargs): self.groups = groups self.base_width = base_width self.radix = radix self.reduction_factor = reduction_factor self.avg_down_stride = avg_down_stride super(ResNeSt, self).__init__(**kwargs) def make_res_layer(self, **kwargs): 'Pack all blocks in a stage into a ``ResLayer``.' return ResLayer(groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, radix=self.radix, reduction_factor=self.reduction_factor, avg_down_stride=self.avg_down_stride, **kwargs)
class BasicBlock(BaseModule): expansion = 1 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, init_cfg=None): super(BasicBlock, self).__init__(init_cfg) assert (dcn is None), 'Not implemented yet.' assert (plugins is None), 'Not implemented yet.' (self.norm1_name, norm1) = build_norm_layer(norm_cfg, planes, postfix=1) (self.norm2_name, norm2) = build_norm_layer(norm_cfg, planes, postfix=2) self.conv1 = build_conv_layer(conv_cfg, inplanes, planes, 3, stride=stride, padding=dilation, dilation=dilation, bias=False) self.add_module(self.norm1_name, norm1) self.conv2 = build_conv_layer(conv_cfg, planes, planes, 3, padding=1, bias=False) self.add_module(self.norm2_name, norm2) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.with_cp = with_cp @property def norm1(self): 'nn.Module: normalization layer after the first convolution layer' return getattr(self, self.norm1_name) @property def norm2(self): 'nn.Module: normalization layer after the second convolution layer' return getattr(self, self.norm2_name) def forward(self, x): 'Forward function.' def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) out = self.conv2(out) out = self.norm2(out) if (self.downsample is not None): identity = self.downsample(x) out += identity return out if (self.with_cp and x.requires_grad): out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out
class Bottleneck(BaseModule): expansion = 4 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, init_cfg=None): 'Bottleneck block for ResNet.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if\n it is "caffe", the stride-two layer is the first 1x1 conv layer.\n ' super(Bottleneck, self).__init__(init_cfg) assert (style in ['pytorch', 'caffe']) assert ((dcn is None) or isinstance(dcn, dict)) assert ((plugins is None) or isinstance(plugins, list)) if (plugins is not None): allowed_position = ['after_conv1', 'after_conv2', 'after_conv3'] assert all(((p['position'] in allowed_position) for p in plugins)) self.inplanes = inplanes self.planes = planes self.stride = stride self.dilation = dilation self.style = style self.with_cp = with_cp self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.dcn = dcn self.with_dcn = (dcn is not None) self.plugins = plugins self.with_plugins = (plugins is not None) if self.with_plugins: self.after_conv1_plugins = [plugin['cfg'] for plugin in plugins if (plugin['position'] == 'after_conv1')] self.after_conv2_plugins = [plugin['cfg'] for plugin in plugins if (plugin['position'] == 'after_conv2')] self.after_conv3_plugins = [plugin['cfg'] for plugin in plugins if (plugin['position'] == 'after_conv3')] if (self.style == 'pytorch'): self.conv1_stride = 1 self.conv2_stride = stride else: self.conv1_stride = stride self.conv2_stride = 1 (self.norm1_name, norm1) = build_norm_layer(norm_cfg, planes, postfix=1) (self.norm2_name, norm2) = build_norm_layer(norm_cfg, planes, postfix=2) (self.norm3_name, norm3) = build_norm_layer(norm_cfg, (planes * self.expansion), postfix=3) self.conv1 = build_conv_layer(conv_cfg, inplanes, planes, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) fallback_on_stride = False if self.with_dcn: fallback_on_stride = dcn.pop('fallback_on_stride', False) if ((not self.with_dcn) or fallback_on_stride): self.conv2 = build_conv_layer(conv_cfg, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, bias=False) else: assert (self.conv_cfg is None), 'conv_cfg must be None for DCN' self.conv2 = build_conv_layer(dcn, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, bias=False) self.add_module(self.norm2_name, norm2) self.conv3 = build_conv_layer(conv_cfg, planes, (planes * self.expansion), kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) self.relu = nn.ReLU(inplace=True) self.downsample = downsample if self.with_plugins: self.after_conv1_plugin_names = self.make_block_plugins(planes, self.after_conv1_plugins) self.after_conv2_plugin_names = self.make_block_plugins(planes, self.after_conv2_plugins) self.after_conv3_plugin_names = self.make_block_plugins((planes * self.expansion), self.after_conv3_plugins) def make_block_plugins(self, in_channels, plugins): 'make plugins for block.\n\n Args:\n in_channels (int): Input channels of plugin.\n plugins (list[dict]): List of plugins cfg to build.\n\n Returns:\n list[str]: List of the names of plugin.\n ' assert isinstance(plugins, list) plugin_names = [] for plugin in plugins: plugin = plugin.copy() (name, layer) = build_plugin_layer(plugin, in_channels=in_channels, postfix=plugin.pop('postfix', '')) assert (not hasattr(self, name)), f'duplicate plugin {name}' self.add_module(name, layer) plugin_names.append(name) return plugin_names def forward_plugin(self, x, plugin_names): out = x for name in plugin_names: out = getattr(self, name)(x) return out @property def norm1(self): 'nn.Module: normalization layer after the first convolution layer' return getattr(self, self.norm1_name) @property def norm2(self): 'nn.Module: normalization layer after the second convolution layer' return getattr(self, self.norm2_name) @property def norm3(self): 'nn.Module: normalization layer after the third convolution layer' return getattr(self, self.norm3_name) def forward(self, x): 'Forward function.' def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv1_plugin_names) out = self.conv2(out) out = self.norm2(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv2_plugin_names) out = self.conv3(out) out = self.norm3(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv3_plugin_names) if (self.downsample is not None): identity = self.downsample(x) out += identity return out if (self.with_cp and x.requires_grad): out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out
@BACKBONES.register_module() class ResNet(BaseModule): 'ResNet backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n stem_channels (int | None): Number of stem channels. If not specified,\n it will be the same as `base_channels`. Default: None.\n base_channels (int): Number of base channels of res layer. Default: 64.\n in_channels (int): Number of input image channels. Default: 3.\n num_stages (int): Resnet stages. Default: 4.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv\n avg_down (bool): Use AvgPool instead of stride conv when\n downsampling in the bottleneck.\n frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n -1 means not freezing any parameters.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n plugins (list[dict]): List of plugins for stages, each dict contains:\n\n - cfg (dict, required): Cfg dict to build plugin.\n - position (str, required): Position inside block to insert\n plugin, options are \'after_conv1\', \'after_conv2\', \'after_conv3\'.\n - stages (tuple[bool], optional): Stages to apply plugin, length\n should be same as \'num_stages\'.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n zero_init_residual (bool): Whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n pretrained (str, optional): model pretrained path. Default: None\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n\n Example:\n >>> from mmdet.models import ResNet\n >>> import torch\n >>> self = ResNet(depth=18)\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 32, 32)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 64, 8, 8)\n (1, 128, 4, 4)\n (1, 256, 2, 2)\n (1, 512, 1, 1)\n ' arch_settings = {18: (BasicBlock, (2, 2, 2, 2)), 34: (BasicBlock, (3, 4, 6, 3)), 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))} def __init__(self, depth, in_channels=3, stem_channels=None, base_channels=64, num_stages=4, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', deep_stem=False, avg_down=False, frozen_stages=(- 1), conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, dcn=None, stage_with_dcn=(False, False, False, False), plugins=None, with_cp=False, zero_init_residual=True, pretrained=None, init_cfg=None): super(ResNet, self).__init__(init_cfg) self.zero_init_residual = zero_init_residual if (depth not in self.arch_settings): raise KeyError(f'invalid depth {depth} for resnet') block_init_cfg = None assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif (pretrained is None): if (init_cfg is None): self.init_cfg = [dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm'])] block = self.arch_settings[depth][0] if self.zero_init_residual: if (block is BasicBlock): block_init_cfg = dict(type='Constant', val=0, override=dict(name='norm2')) elif (block is Bottleneck): block_init_cfg = dict(type='Constant', val=0, override=dict(name='norm3')) else: raise TypeError('pretrained must be a str or None') self.depth = depth if (stem_channels is None): stem_channels = base_channels self.stem_channels = stem_channels self.base_channels = base_channels self.num_stages = num_stages assert ((num_stages >= 1) and (num_stages <= 4)) self.strides = strides self.dilations = dilations assert (len(strides) == len(dilations) == num_stages) self.out_indices = out_indices assert (max(out_indices) < num_stages) self.style = style self.deep_stem = deep_stem self.avg_down = avg_down self.frozen_stages = frozen_stages self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.with_cp = with_cp self.norm_eval = norm_eval self.dcn = dcn self.stage_with_dcn = stage_with_dcn if (dcn is not None): assert (len(stage_with_dcn) == num_stages) self.plugins = plugins (self.block, stage_blocks) = self.arch_settings[depth] self.stage_blocks = stage_blocks[:num_stages] self.inplanes = stem_channels self._make_stem_layer(in_channels, stem_channels) self.res_layers = [] for (i, num_blocks) in enumerate(self.stage_blocks): stride = strides[i] dilation = dilations[i] dcn = (self.dcn if self.stage_with_dcn[i] else None) if (plugins is not None): stage_plugins = self.make_stage_plugins(plugins, i) else: stage_plugins = None planes = (base_channels * (2 ** i)) res_layer = self.make_res_layer(block=self.block, inplanes=self.inplanes, planes=planes, num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, plugins=stage_plugins, init_cfg=block_init_cfg) self.inplanes = (planes * self.block.expansion) layer_name = f'layer{(i + 1)}' self.add_module(layer_name, res_layer) self.res_layers.append(layer_name) self._freeze_stages() self.feat_dim = ((self.block.expansion * base_channels) * (2 ** (len(self.stage_blocks) - 1))) def make_stage_plugins(self, plugins, stage_idx): "Make plugins for ResNet ``stage_idx`` th stage.\n\n Currently we support to insert ``context_block``,\n ``empirical_attention_block``, ``nonlocal_block`` into the backbone\n like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of\n Bottleneck.\n\n An example of plugins format could be:\n\n Examples:\n >>> plugins=[\n ... dict(cfg=dict(type='xxx', arg1='xxx'),\n ... stages=(False, True, True, True),\n ... position='after_conv2'),\n ... dict(cfg=dict(type='yyy'),\n ... stages=(True, True, True, True),\n ... position='after_conv3'),\n ... dict(cfg=dict(type='zzz', postfix='1'),\n ... stages=(True, True, True, True),\n ... position='after_conv3'),\n ... dict(cfg=dict(type='zzz', postfix='2'),\n ... stages=(True, True, True, True),\n ... position='after_conv3')\n ... ]\n >>> self = ResNet(depth=18)\n >>> stage_plugins = self.make_stage_plugins(plugins, 0)\n >>> assert len(stage_plugins) == 3\n\n Suppose ``stage_idx=0``, the structure of blocks in the stage would be:\n\n .. code-block:: none\n\n conv1-> conv2->conv3->yyy->zzz1->zzz2\n\n Suppose 'stage_idx=1', the structure of blocks in the stage would be:\n\n .. code-block:: none\n\n conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2\n\n If stages is missing, the plugin would be applied to all stages.\n\n Args:\n plugins (list[dict]): List of plugins cfg to build. The postfix is\n required if multiple same type plugins are inserted.\n stage_idx (int): Index of stage to build\n\n Returns:\n list[dict]: Plugins for current stage\n " stage_plugins = [] for plugin in plugins: plugin = plugin.copy() stages = plugin.pop('stages', None) assert ((stages is None) or (len(stages) == self.num_stages)) if ((stages is None) or stages[stage_idx]): stage_plugins.append(plugin) return stage_plugins def make_res_layer(self, **kwargs): 'Pack all blocks in a stage into a ``ResLayer``.' return ResLayer(**kwargs) @property def norm1(self): 'nn.Module: the normalization layer named "norm1" ' return getattr(self, self.norm1_name) def _make_stem_layer(self, in_channels, stem_channels): if self.deep_stem: self.stem = nn.Sequential(build_conv_layer(self.conv_cfg, in_channels, (stem_channels // 2), kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, (stem_channels // 2))[1], nn.ReLU(inplace=True), build_conv_layer(self.conv_cfg, (stem_channels // 2), (stem_channels // 2), kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, (stem_channels // 2))[1], nn.ReLU(inplace=True), build_conv_layer(self.conv_cfg, (stem_channels // 2), stem_channels, kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, stem_channels)[1], nn.ReLU(inplace=True)) else: self.conv1 = build_conv_layer(self.conv_cfg, in_channels, stem_channels, kernel_size=7, stride=2, padding=3, bias=False) (self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, stem_channels, postfix=1) self.add_module(self.norm1_name, norm1) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) def _freeze_stages(self): if (self.frozen_stages >= 0): if self.deep_stem: self.stem.eval() for param in self.stem.parameters(): param.requires_grad = False else: self.norm1.eval() for m in [self.conv1, self.norm1]: for param in m.parameters(): param.requires_grad = False for i in range(1, (self.frozen_stages + 1)): m = getattr(self, f'layer{i}') m.eval() for param in m.parameters(): param.requires_grad = False def forward(self, x): 'Forward function.' if self.deep_stem: x = self.stem(x) else: x = self.conv1(x) x = self.norm1(x) x = self.relu(x) x = self.maxpool(x) outs = [] for (i, layer_name) in enumerate(self.res_layers): res_layer = getattr(self, layer_name) x = res_layer(x) if (i in self.out_indices): outs.append(x) return tuple(outs) def train(self, mode=True): 'Convert the model into training mode while keep normalization layer\n freezed.' super(ResNet, self).train(mode) self._freeze_stages() if (mode and self.norm_eval): for m in self.modules(): if isinstance(m, _BatchNorm): m.eval()
@BACKBONES.register_module() class ResNetV1d(ResNet): 'ResNetV1d variant described in `Bag of Tricks\n <https://arxiv.org/pdf/1812.01187.pdf>`_.\n\n Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in\n the input stem with three 3x3 convs. And in the downsampling block, a 2x2\n avg_pool with stride 2 is added before conv, whose stride is changed to 1.\n ' def __init__(self, **kwargs): super(ResNetV1d, self).__init__(deep_stem=True, avg_down=True, **kwargs)
class Bottleneck(_Bottleneck): expansion = 4 def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, **kwargs): 'Bottleneck block for ResNeXt.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if\n it is "caffe", the stride-two layer is the first 1x1 conv layer.\n ' super(Bottleneck, self).__init__(inplanes, planes, **kwargs) if (groups == 1): width = self.planes else: width = (math.floor((self.planes * (base_width / base_channels))) * groups) (self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, width, postfix=1) (self.norm2_name, norm2) = build_norm_layer(self.norm_cfg, width, postfix=2) (self.norm3_name, norm3) = build_norm_layer(self.norm_cfg, (self.planes * self.expansion), postfix=3) self.conv1 = build_conv_layer(self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) fallback_on_stride = False self.with_modulated_dcn = False if self.with_dcn: fallback_on_stride = self.dcn.pop('fallback_on_stride', False) if ((not self.with_dcn) or fallback_on_stride): self.conv2 = build_conv_layer(self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) else: assert (self.conv_cfg is None), 'conv_cfg must be None for DCN' self.conv2 = build_conv_layer(self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) self.add_module(self.norm2_name, norm2) self.conv3 = build_conv_layer(self.conv_cfg, width, (self.planes * self.expansion), kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) if self.with_plugins: self._del_block_plugins(((self.after_conv1_plugin_names + self.after_conv2_plugin_names) + self.after_conv3_plugin_names)) self.after_conv1_plugin_names = self.make_block_plugins(width, self.after_conv1_plugins) self.after_conv2_plugin_names = self.make_block_plugins(width, self.after_conv2_plugins) self.after_conv3_plugin_names = self.make_block_plugins((self.planes * self.expansion), self.after_conv3_plugins) def _del_block_plugins(self, plugin_names): 'delete plugins for block if exist.\n\n Args:\n plugin_names (list[str]): List of plugins name to delete.\n ' assert isinstance(plugin_names, list) for plugin_name in plugin_names: del self._modules[plugin_name]
@BACKBONES.register_module() class ResNeXt(ResNet): 'ResNeXt backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n in_channels (int): Number of input image channels. Default: 3.\n num_stages (int): Resnet stages. Default: 4.\n groups (int): Group of resnext.\n base_width (int): Base width of resnext.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n not freezing any parameters.\n norm_cfg (dict): dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n zero_init_residual (bool): whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n ' arch_settings = {50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))} def __init__(self, groups=1, base_width=4, **kwargs): self.groups = groups self.base_width = base_width super(ResNeXt, self).__init__(**kwargs) def make_res_layer(self, **kwargs): 'Pack all blocks in a stage into a ``ResLayer``' return ResLayer(groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, **kwargs)
@BACKBONES.register_module() class SSDVGG(VGG, BaseModule): 'VGG Backbone network for single-shot-detection.\n\n Args:\n depth (int): Depth of vgg, from {11, 13, 16, 19}.\n with_last_pool (bool): Whether to add a pooling layer at the last\n of the model\n ceil_mode (bool): When True, will use `ceil` instead of `floor`\n to compute the output shape.\n out_indices (Sequence[int]): Output from which stages.\n out_feature_indices (Sequence[int]): Output from which feature map.\n pretrained (str, optional): model pretrained path. Default: None\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n input_size (int, optional): Deprecated argumment.\n Width and height of input, from {300, 512}.\n l2_norm_scale (float, optional) : Deprecated argumment.\n L2 normalization layer init scale.\n\n Example:\n >>> self = SSDVGG(input_size=300, depth=11)\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 300, 300)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 1024, 19, 19)\n (1, 512, 10, 10)\n (1, 256, 5, 5)\n (1, 256, 3, 3)\n (1, 256, 1, 1)\n ' extra_setting = {300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256), 512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128)} def __init__(self, depth, with_last_pool=False, ceil_mode=True, out_indices=(3, 4), out_feature_indices=(22, 34), pretrained=None, init_cfg=None, input_size=None, l2_norm_scale=None): super(SSDVGG, self).__init__(depth, with_last_pool=with_last_pool, ceil_mode=ceil_mode, out_indices=out_indices) self.features.add_module(str(len(self.features)), nn.MaxPool2d(kernel_size=3, stride=1, padding=1)) self.features.add_module(str(len(self.features)), nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)) self.features.add_module(str(len(self.features)), nn.ReLU(inplace=True)) self.features.add_module(str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1)) self.features.add_module(str(len(self.features)), nn.ReLU(inplace=True)) self.out_feature_indices = out_feature_indices assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be specified at the same time' if (init_cfg is not None): self.init_cfg = init_cfg elif isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif (pretrained is None): self.init_cfg = [dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer='BatchNorm2d'), dict(type='Normal', std=0.01, layer='Linear')] else: raise TypeError('pretrained must be a str or None') if (input_size is not None): warnings.warn('DeprecationWarning: input_size is deprecated') if (l2_norm_scale is not None): warnings.warn('DeprecationWarning: l2_norm_scale in VGG is deprecated, it has been moved to SSDNeck.') def init_weights(self, pretrained=None): super(VGG, self).init_weights() def forward(self, x): 'Forward function.' outs = [] for (i, layer) in enumerate(self.features): x = layer(x) if (i in self.out_feature_indices): outs.append(x) if (len(outs) == 1): return outs[0] else: return tuple(outs)
class L2Norm(ssd_neck.L2Norm): def __init__(self, **kwargs): super(L2Norm, self).__init__(**kwargs) warnings.warn('DeprecationWarning: L2Norm in ssd_vgg.py is deprecated, please use L2Norm in mmdet/models/necks/ssd_neck.py instead')
class TridentConv(BaseModule): 'Trident Convolution Module.\n\n Args:\n in_channels (int): Number of channels in input.\n out_channels (int): Number of channels in output.\n kernel_size (int): Size of convolution kernel.\n stride (int, optional): Convolution stride. Default: 1.\n trident_dilations (tuple[int, int, int], optional): Dilations of\n different trident branch. Default: (1, 2, 3).\n test_branch_idx (int, optional): In inference, all 3 branches will\n be used if `test_branch_idx==-1`, otherwise only branch with\n index `test_branch_idx` will be used. Default: 1.\n bias (bool, optional): Whether to use bias in convolution or not.\n Default: False.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n ' def __init__(self, in_channels, out_channels, kernel_size, stride=1, trident_dilations=(1, 2, 3), test_branch_idx=1, bias=False, init_cfg=None): super(TridentConv, self).__init__(init_cfg) self.num_branch = len(trident_dilations) self.with_bias = bias self.test_branch_idx = test_branch_idx self.stride = _pair(stride) self.kernel_size = _pair(kernel_size) self.paddings = _pair(trident_dilations) self.dilations = trident_dilations self.in_channels = in_channels self.out_channels = out_channels self.bias = bias self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels, *self.kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(out_channels)) else: self.bias = None def extra_repr(self): tmpstr = f'in_channels={self.in_channels}' tmpstr += f', out_channels={self.out_channels}' tmpstr += f', kernel_size={self.kernel_size}' tmpstr += f', num_branch={self.num_branch}' tmpstr += f', test_branch_idx={self.test_branch_idx}' tmpstr += f', stride={self.stride}' tmpstr += f', paddings={self.paddings}' tmpstr += f', dilations={self.dilations}' tmpstr += f', bias={self.bias}' return tmpstr def forward(self, inputs): if (self.training or (self.test_branch_idx == (- 1))): outputs = [F.conv2d(input, self.weight, self.bias, self.stride, padding, dilation) for (input, dilation, padding) in zip(inputs, self.dilations, self.paddings)] else: assert (len(inputs) == 1) outputs = [F.conv2d(inputs[0], self.weight, self.bias, self.stride, self.paddings[self.test_branch_idx], self.dilations[self.test_branch_idx])] return outputs
class TridentBottleneck(Bottleneck): 'BottleBlock for TridentResNet.\n\n Args:\n trident_dilations (tuple[int, int, int]): Dilations of different\n trident branch.\n test_branch_idx (int): In inference, all 3 branches will be used\n if `test_branch_idx==-1`, otherwise only branch with index\n `test_branch_idx` will be used.\n concat_output (bool): Whether to concat the output list to a Tensor.\n `True` only in the last Block.\n ' def __init__(self, trident_dilations, test_branch_idx, concat_output, **kwargs): super(TridentBottleneck, self).__init__(**kwargs) self.trident_dilations = trident_dilations self.num_branch = len(trident_dilations) self.concat_output = concat_output self.test_branch_idx = test_branch_idx self.conv2 = TridentConv(self.planes, self.planes, kernel_size=3, stride=self.conv2_stride, bias=False, trident_dilations=self.trident_dilations, test_branch_idx=test_branch_idx, init_cfg=dict(type='Kaiming', distribution='uniform', mode='fan_in', override=dict(name='conv2'))) def forward(self, x): def _inner_forward(x): num_branch = (self.num_branch if (self.training or (self.test_branch_idx == (- 1))) else 1) identity = x if (not isinstance(x, list)): x = ((x,) * num_branch) identity = x if (self.downsample is not None): identity = [self.downsample(b) for b in x] out = [self.conv1(b) for b in x] out = [self.norm1(b) for b in out] out = [self.relu(b) for b in out] if self.with_plugins: for k in range(len(out)): out[k] = self.forward_plugin(out[k], self.after_conv1_plugin_names) out = self.conv2(out) out = [self.norm2(b) for b in out] out = [self.relu(b) for b in out] if self.with_plugins: for k in range(len(out)): out[k] = self.forward_plugin(out[k], self.after_conv2_plugin_names) out = [self.conv3(b) for b in out] out = [self.norm3(b) for b in out] if self.with_plugins: for k in range(len(out)): out[k] = self.forward_plugin(out[k], self.after_conv3_plugin_names) out = [(out_b + identity_b) for (out_b, identity_b) in zip(out, identity)] return out if (self.with_cp and x.requires_grad): out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = [self.relu(b) for b in out] if self.concat_output: out = torch.cat(out, dim=0) return out
def make_trident_res_layer(block, inplanes, planes, num_blocks, stride=1, trident_dilations=(1, 2, 3), style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, test_branch_idx=(- 1)): 'Build Trident Res Layers.' downsample = None if ((stride != 1) or (inplanes != (planes * block.expansion))): downsample = [] conv_stride = stride downsample.extend([build_conv_layer(conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, (planes * block.expansion))[1]]) downsample = nn.Sequential(*downsample) layers = [] for i in range(num_blocks): layers.append(block(inplanes=inplanes, planes=planes, stride=(stride if (i == 0) else 1), trident_dilations=trident_dilations, downsample=(downsample if (i == 0) else None), style=style, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, plugins=plugins, test_branch_idx=test_branch_idx, concat_output=(True if (i == (num_blocks - 1)) else False))) inplanes = (planes * block.expansion) return nn.Sequential(*layers)
@BACKBONES.register_module() class TridentResNet(ResNet): 'The stem layer, stage 1 and stage 2 in Trident ResNet are identical to\n ResNet, while in stage 3, Trident BottleBlock is utilized to replace the\n normal BottleBlock to yield trident output. Different branch shares the\n convolution weight but uses different dilations to achieve multi-scale\n output.\n\n / stage3(b0) x - stem - stage1 - stage2 - stage3(b1) - output\n \\ stage3(b2) /\n\n Args:\n depth (int): Depth of resnet, from {50, 101, 152}.\n num_branch (int): Number of branches in TridentNet.\n test_branch_idx (int): In inference, all 3 branches will be used\n if `test_branch_idx==-1`, otherwise only branch with index\n `test_branch_idx` will be used.\n trident_dilations (tuple[int]): Dilations of different trident branch.\n len(trident_dilations) should be equal to num_branch.\n ' def __init__(self, depth, num_branch, test_branch_idx, trident_dilations, **kwargs): assert (num_branch == len(trident_dilations)) assert (depth in (50, 101, 152)) super(TridentResNet, self).__init__(depth, **kwargs) assert (self.num_stages == 3) self.test_branch_idx = test_branch_idx self.num_branch = num_branch last_stage_idx = (self.num_stages - 1) stride = self.strides[last_stage_idx] dilation = trident_dilations dcn = (self.dcn if self.stage_with_dcn[last_stage_idx] else None) if (self.plugins is not None): stage_plugins = self.make_stage_plugins(self.plugins, last_stage_idx) else: stage_plugins = None planes = (self.base_channels * (2 ** last_stage_idx)) res_layer = make_trident_res_layer(TridentBottleneck, inplanes=((self.block.expansion * self.base_channels) * (2 ** (last_stage_idx - 1))), planes=planes, num_blocks=self.stage_blocks[last_stage_idx], stride=stride, trident_dilations=dilation, style=self.style, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, plugins=stage_plugins, test_branch_idx=self.test_branch_idx) layer_name = f'layer{(last_stage_idx + 1)}' self.__setattr__(layer_name, res_layer) self.res_layers.pop(last_stage_idx) self.res_layers.insert(last_stage_idx, layer_name) self._freeze_stages()
def build_backbone(cfg): 'Build backbone.' return BACKBONES.build(cfg)
def build_neck(cfg): 'Build neck.' return NECKS.build(cfg)
def build_roi_extractor(cfg): 'Build roi extractor.' return ROI_EXTRACTORS.build(cfg)
def build_shared_head(cfg): 'Build shared head.' return SHARED_HEADS.build(cfg)
def build_head(cfg): 'Build head.' return HEADS.build(cfg)
def build_loss(cfg): 'Build loss.' return LOSSES.build(cfg)
def build_detector(cfg, train_cfg=None, test_cfg=None): 'Build detector.' if ((train_cfg is not None) or (test_cfg is not None)): warnings.warn('train_cfg and test_cfg is deprecated, please specify them in model', UserWarning) assert ((cfg.get('train_cfg') is None) or (train_cfg is None)), 'train_cfg specified in both outer field and model field ' assert ((cfg.get('test_cfg') is None) or (test_cfg is None)), 'test_cfg specified in both outer field and model field ' return DETECTORS.build(cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg))
@HEADS.register_module() class AnchorFreeHead(BaseDenseHead, BBoxTestMixin): 'Anchor-free head (FCOS, Fovea, RepPoints, etc.).\n\n Args:\n num_classes (int): Number of categories excluding the background\n category.\n in_channels (int): Number of channels in the input feature map.\n feat_channels (int): Number of hidden channels. Used in child classes.\n stacked_convs (int): Number of stacking convs of the head.\n strides (tuple): Downsample factor of each feature map.\n dcn_on_last_conv (bool): If true, use dcn in the last layer of\n towers. Default: False.\n conv_bias (bool | str): If specified as `auto`, it will be decided by\n the norm_cfg. Bias of conv will be set as True if `norm_cfg` is\n None, otherwise False. Default: "auto".\n loss_cls (dict): Config of classification loss.\n loss_bbox (dict): Config of localization loss.\n bbox_coder (dict): Config of bbox coder. Defaults\n \'DistancePointBBoxCoder\'.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n train_cfg (dict): Training config of anchor head.\n test_cfg (dict): Testing config of anchor head.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n ' _version = 1 def __init__(self, num_classes, in_channels, feat_channels=256, stacked_convs=4, strides=(4, 8, 16, 32, 64), dcn_on_last_conv=False, conv_bias='auto', loss_cls=dict(type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='IoULoss', loss_weight=1.0), bbox_coder=dict(type='DistancePointBBoxCoder'), conv_cfg=None, norm_cfg=None, train_cfg=None, test_cfg=None, init_cfg=dict(type='Normal', layer='Conv2d', std=0.01, override=dict(type='Normal', name='conv_cls', std=0.01, bias_prob=0.01))): super(AnchorFreeHead, self).__init__(init_cfg) self.num_classes = num_classes self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) if self.use_sigmoid_cls: self.cls_out_channels = num_classes else: self.cls_out_channels = (num_classes + 1) self.in_channels = in_channels self.feat_channels = feat_channels self.stacked_convs = stacked_convs self.strides = strides self.dcn_on_last_conv = dcn_on_last_conv assert ((conv_bias == 'auto') or isinstance(conv_bias, bool)) self.conv_bias = conv_bias self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) self.bbox_coder = build_bbox_coder(bbox_coder) self.prior_generator = MlvlPointGenerator(strides) self.num_base_priors = self.prior_generator.num_base_priors[0] self.train_cfg = train_cfg self.test_cfg = test_cfg self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.fp16_enabled = False self._init_layers() def _init_layers(self): 'Initialize layers of the head.' self._init_cls_convs() self._init_reg_convs() self._init_predictor() def _init_cls_convs(self): 'Initialize classification conv layers of the head.' self.cls_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = (self.in_channels if (i == 0) else self.feat_channels) if (self.dcn_on_last_conv and (i == (self.stacked_convs - 1))): conv_cfg = dict(type='DCNv2') else: conv_cfg = self.conv_cfg self.cls_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg, bias=self.conv_bias)) def _init_reg_convs(self): 'Initialize bbox regression conv layers of the head.' self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = (self.in_channels if (i == 0) else self.feat_channels) if (self.dcn_on_last_conv and (i == (self.stacked_convs - 1))): conv_cfg = dict(type='DCNv2') else: conv_cfg = self.conv_cfg self.reg_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=self.norm_cfg, bias=self.conv_bias)) def _init_predictor(self): 'Initialize predictor layers of the head.' self.conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 3, padding=1) self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): 'Hack some keys of the model state dict so that can load checkpoints\n of previous version.' version = local_metadata.get('version', None) if (version is None): bbox_head_keys = [k for k in state_dict.keys() if k.startswith(prefix)] ori_predictor_keys = [] new_predictor_keys = [] for key in bbox_head_keys: ori_predictor_keys.append(key) key = key.split('.') conv_name = None if key[1].endswith('cls'): conv_name = 'conv_cls' elif key[1].endswith('reg'): conv_name = 'conv_reg' elif key[1].endswith('centerness'): conv_name = 'conv_centerness' else: assert NotImplementedError if (conv_name is not None): key[1] = conv_name new_predictor_keys.append('.'.join(key)) else: ori_predictor_keys.pop((- 1)) for i in range(len(new_predictor_keys)): state_dict[new_predictor_keys[i]] = state_dict.pop(ori_predictor_keys[i]) super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def forward(self, feats): 'Forward features from the upstream network.\n\n Args:\n feats (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n\n Returns:\n tuple: Usually contain classification scores and bbox predictions.\n cls_scores (list[Tensor]): Box scores for each scale level,\n each is a 4D-tensor, the channel number is\n num_points * num_classes.\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level, each is a 4D-tensor, the channel number is\n num_points * 4.\n ' return multi_apply(self.forward_single, feats)[:2] def forward_single(self, x): 'Forward features of a single scale level.\n\n Args:\n x (Tensor): FPN feature maps of the specified stride.\n\n Returns:\n tuple: Scores for each class, bbox predictions, features\n after classification and regression conv layers, some\n models needs these features like FCOS.\n ' cls_feat = x reg_feat = x for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) cls_score = self.conv_cls(cls_feat) for reg_layer in self.reg_convs: reg_feat = reg_layer(reg_feat) bbox_pred = self.conv_reg(reg_feat) return (cls_score, bbox_pred, cls_feat, reg_feat) @abstractmethod @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): 'Compute loss of the head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level,\n each is a 4D-tensor, the channel number is\n num_points * num_classes.\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level, each is a 4D-tensor, the channel number is\n num_points * 4.\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n ' raise NotImplementedError @abstractmethod def get_targets(self, points, gt_bboxes_list, gt_labels_list): 'Compute regression, classification and centerness targets for points\n in multiple images.\n\n Args:\n points (list[Tensor]): Points of each fpn level, each has shape\n (num_points, 2).\n gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,\n each has shape (num_gt, 4).\n gt_labels_list (list[Tensor]): Ground truth labels of each box,\n each has shape (num_gt,).\n ' raise NotImplementedError def _get_points_single(self, featmap_size, stride, dtype, device, flatten=False): 'Get points of a single scale level.\n\n This function will be deprecated soon.\n ' warnings.warn('`_get_points_single` in `AnchorFreeHead` will be deprecated soon, we support a multi level point generator nowyou can get points of a single level feature map with `self.prior_generator.single_level_grid_priors` ') (h, w) = featmap_size x_range = torch.arange(w, device=device).to(dtype) y_range = torch.arange(h, device=device).to(dtype) (y, x) = torch.meshgrid(y_range, x_range) if flatten: y = y.flatten() x = x.flatten() return (y, x) def get_points(self, featmap_sizes, dtype, device, flatten=False): 'Get points according to feature map sizes.\n\n Args:\n featmap_sizes (list[tuple]): Multi-level feature map sizes.\n dtype (torch.dtype): Type of points.\n device (torch.device): Device of points.\n\n Returns:\n tuple: points of each image.\n ' warnings.warn('`get_points` in `AnchorFreeHead` will be deprecated soon, we support a multi level point generator nowyou can get points of all levels with `self.prior_generator.grid_priors` ') mlvl_points = [] for i in range(len(featmap_sizes)): mlvl_points.append(self._get_points_single(featmap_sizes[i], self.strides[i], dtype, device, flatten)) return mlvl_points def aug_test(self, feats, img_metas, rescale=False): 'Test function with test time augmentation.\n\n Args:\n feats (list[Tensor]): the outer list indicates test-time\n augmentations and inner Tensor should have a shape NxCxHxW,\n which contains features for all images in the batch.\n img_metas (list[list[dict]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch. each dict has image information.\n rescale (bool, optional): Whether to rescale the results.\n Defaults to False.\n\n Returns:\n list[ndarray]: bbox results of each class\n ' return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
class BaseMaskHead(BaseModule, metaclass=ABCMeta): 'Base class for mask heads used in One-Stage Instance Segmentation.' def __init__(self, init_cfg): super(BaseMaskHead, self).__init__(init_cfg) @abstractmethod def loss(self, **kwargs): pass @abstractmethod def get_results(self, **kwargs): 'Get precessed :obj:`InstanceData` of multiple images.' pass def forward_train(self, x, gt_labels, gt_masks, img_metas, gt_bboxes=None, gt_bboxes_ignore=None, positive_infos=None, **kwargs): '\n Args:\n x (list[Tensor] | tuple[Tensor]): Features from FPN.\n Each has a shape (B, C, H, W).\n gt_labels (list[Tensor]): Ground truth labels of all images.\n each has a shape (num_gts,).\n gt_masks (list[Tensor]) : Masks for each bbox, has a shape\n (num_gts, h , w).\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes (list[Tensor]): Ground truth bboxes of the image,\n each item has a shape (num_gts, 4).\n gt_bboxes_ignore (list[Tensor], None): Ground truth bboxes to be\n ignored, each item has a shape (num_ignored_gts, 4).\n positive_infos (list[:obj:`InstanceData`], optional): Information\n of positive samples. Used when the label assignment is\n done outside the MaskHead, e.g., in BboxHead in\n YOLACT or CondInst, etc. When the label assignment is done in\n MaskHead, it would be None, like SOLO. All values\n in it should have shape (num_positive_samples, *).\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n ' if (positive_infos is None): outs = self(x) else: outs = self(x, positive_infos) assert isinstance(outs, tuple), 'Forward results should be a tuple, even if only one item is returned' loss = self.loss(*outs, gt_labels=gt_labels, gt_masks=gt_masks, img_metas=img_metas, gt_bboxes=gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, positive_infos=positive_infos, **kwargs) return loss def simple_test(self, feats, img_metas, rescale=False, instances_list=None, **kwargs): 'Test function without test-time augmentation.\n\n Args:\n feats (tuple[torch.Tensor]): Multi-level features from the\n upstream network, each is a 4D-tensor.\n img_metas (list[dict]): List of image information.\n rescale (bool, optional): Whether to rescale the results.\n Defaults to False.\n instances_list (list[obj:`InstanceData`], optional): Detection\n results of each image after the post process. Only exist\n if there is a `bbox_head`, like `YOLACT`, `CondInst`, etc.\n\n Returns:\n list[obj:`InstanceData`]: Instance segmentation results of each image after the post process. Each item usually contains following keys. \n - scores (Tensor): Classification scores, has a shape\n (num_instance,)\n - labels (Tensor): Has a shape (num_instances,).\n - masks (Tensor): Processed mask results, has a\n shape (num_instances, h, w).\n ' if (instances_list is None): outs = self(feats) else: outs = self(feats, instances_list=instances_list) mask_inputs = (outs + (img_metas,)) results_list = self.get_results(*mask_inputs, rescale=rescale, instances_list=instances_list, **kwargs) return results_list def onnx_export(self, img, img_metas): raise NotImplementedError(f'{self.__class__.__name__} does not support ONNX EXPORT')
@HEADS.register_module() class GARetinaHead(GuidedAnchorHead): 'Guided-Anchor-based RetinaNet head.' def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, init_cfg=None, **kwargs): if (init_cfg is None): init_cfg = dict(type='Normal', layer='Conv2d', std=0.01, override=[dict(type='Normal', name='conv_loc', std=0.01, bias_prob=0.01), dict(type='Normal', name='retina_cls', std=0.01, bias_prob=0.01)]) self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg super(GARetinaHead, self).__init__(num_classes, in_channels, init_cfg=init_cfg, **kwargs) def _init_layers(self): 'Initialize layers of the head.' self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = (self.in_channels if (i == 0) else self.feat_channels) self.cls_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1) self.conv_shape = nn.Conv2d(self.feat_channels, (self.num_anchors * 2), 1) self.feature_adaption_cls = FeatureAdaption(self.feat_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.feature_adaption_reg = FeatureAdaption(self.feat_channels, self.feat_channels, kernel_size=3, deform_groups=self.deform_groups) self.retina_cls = MaskedConv2d(self.feat_channels, (self.num_base_priors * self.cls_out_channels), 3, padding=1) self.retina_reg = MaskedConv2d(self.feat_channels, (self.num_base_priors * 4), 3, padding=1) def forward_single(self, x): 'Forward feature map of a single scale level.' cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) loc_pred = self.conv_loc(cls_feat) shape_pred = self.conv_shape(reg_feat) cls_feat = self.feature_adaption_cls(cls_feat, shape_pred) reg_feat = self.feature_adaption_reg(reg_feat, shape_pred) if (not self.training): mask = (loc_pred.sigmoid()[0] >= self.loc_filter_thr) else: mask = None cls_score = self.retina_cls(cls_feat, mask) bbox_pred = self.retina_reg(reg_feat, mask) return (cls_score, bbox_pred, shape_pred, loc_pred)
@HEADS.register_module() class LADHead(PAAHead): 'Label Assignment Head from the paper: `Improving Object Detection by\n Label Assignment Distillation <https://arxiv.org/pdf/2108.10520.pdf>`_' @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds')) def get_label_assignment(self, cls_scores, bbox_preds, iou_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): 'Get label assignment (from teacher).\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level.\n Has shape (N, num_anchors * num_classes, H, W)\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W)\n iou_preds (list[Tensor]): iou_preds for each scale\n level with shape (N, num_anchors * 1, H, W)\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (list[Tensor] | None): Specify which bounding\n boxes can be ignored when are computing the loss.\n\n Returns:\n tuple: Returns a tuple containing label assignment variables.\n\n - labels (Tensor): Labels of all anchors, each with\n shape (num_anchors,).\n - labels_weight (Tensor): Label weights of all anchor.\n each with shape (num_anchors,).\n - bboxes_target (Tensor): BBox targets of all anchors.\n each with shape (num_anchors, 4).\n - bboxes_weight (Tensor): BBox weights of all anchors.\n each with shape (num_anchors, 4).\n - pos_inds_flatten (Tensor): Contains all index of positive\n sample in all anchor.\n - pos_anchors (Tensor): Positive anchors.\n - num_pos (int): Number of positive anchors.\n ' featmap_sizes = [featmap.size()[(- 2):] for featmap in cls_scores] assert (len(featmap_sizes) == self.prior_generator.num_levels) device = cls_scores[0].device (anchor_list, valid_flag_list) = self.get_anchors(featmap_sizes, img_metas, device=device) label_channels = (self.cls_out_channels if self.use_sigmoid_cls else 1) cls_reg_targets = self.get_targets(anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels) (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds, pos_gt_index) = cls_reg_targets cls_scores = levels_to_images(cls_scores) cls_scores = [item.reshape((- 1), self.cls_out_channels) for item in cls_scores] bbox_preds = levels_to_images(bbox_preds) bbox_preds = [item.reshape((- 1), 4) for item in bbox_preds] (pos_losses_list,) = multi_apply(self.get_pos_loss, anchor_list, cls_scores, bbox_preds, labels, labels_weight, bboxes_target, bboxes_weight, pos_inds) with torch.no_grad(): (reassign_labels, reassign_label_weight, reassign_bbox_weights, num_pos) = multi_apply(self.paa_reassign, pos_losses_list, labels, labels_weight, bboxes_weight, pos_inds, pos_gt_index, anchor_list) num_pos = sum(num_pos) labels = torch.cat(reassign_labels, 0).view((- 1)) flatten_anchors = torch.cat([torch.cat(item, 0) for item in anchor_list]) labels_weight = torch.cat(reassign_label_weight, 0).view((- 1)) bboxes_target = torch.cat(bboxes_target, 0).view((- 1), bboxes_target[0].size((- 1))) pos_inds_flatten = ((labels >= 0) & (labels < self.num_classes)).nonzero().reshape((- 1)) if num_pos: pos_anchors = flatten_anchors[pos_inds_flatten] else: pos_anchors = None label_assignment_results = (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds_flatten, pos_anchors, num_pos) return label_assignment_results def forward_train(self, x, label_assignment_results, img_metas, gt_bboxes, gt_labels=None, gt_bboxes_ignore=None, **kwargs): 'Forward train with the available label assignment (student receives\n from teacher).\n\n Args:\n x (list[Tensor]): Features from FPN.\n label_assignment_results (tuple): As the outputs defined in the\n function `self.get_label_assignment`.\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes (Tensor): Ground truth bboxes of the image,\n shape (num_gts, 4).\n gt_labels (Tensor): Ground truth labels of each box,\n shape (num_gts,).\n gt_bboxes_ignore (Tensor): Ground truth bboxes to be\n ignored, shape (num_ignored_gts, 4).\n\n Returns:\n losses: (dict[str, Tensor]): A dictionary of loss components.\n ' outs = self(x) if (gt_labels is None): loss_inputs = (outs + (gt_bboxes, img_metas)) else: loss_inputs = (outs + (gt_bboxes, gt_labels, img_metas)) losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore, label_assignment_results=label_assignment_results) return losses @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds')) def loss(self, cls_scores, bbox_preds, iou_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None, label_assignment_results=None): 'Compute losses of the head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level\n Has shape (N, num_anchors * num_classes, H, W)\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W)\n iou_preds (list[Tensor]): iou_preds for each scale\n level with shape (N, num_anchors * 1, H, W)\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (list[Tensor] | None): Specify which bounding\n boxes can be ignored when are computing the loss.\n label_assignment_results (tuple): As the outputs defined in the\n function `self.get_label_assignment`.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss gmm_assignment.\n ' (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds_flatten, pos_anchors, num_pos) = label_assignment_results cls_scores = levels_to_images(cls_scores) cls_scores = [item.reshape((- 1), self.cls_out_channels) for item in cls_scores] bbox_preds = levels_to_images(bbox_preds) bbox_preds = [item.reshape((- 1), 4) for item in bbox_preds] iou_preds = levels_to_images(iou_preds) iou_preds = [item.reshape((- 1), 1) for item in iou_preds] cls_scores = torch.cat(cls_scores, 0).view((- 1), cls_scores[0].size((- 1))) bbox_preds = torch.cat(bbox_preds, 0).view((- 1), bbox_preds[0].size((- 1))) iou_preds = torch.cat(iou_preds, 0).view((- 1), iou_preds[0].size((- 1))) losses_cls = self.loss_cls(cls_scores, labels, labels_weight, avg_factor=max(num_pos, len(img_metas))) if num_pos: pos_bbox_pred = self.bbox_coder.decode(pos_anchors, bbox_preds[pos_inds_flatten]) pos_bbox_target = bboxes_target[pos_inds_flatten] iou_target = bbox_overlaps(pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True) losses_iou = self.loss_centerness(iou_preds[pos_inds_flatten], iou_target.unsqueeze((- 1)), avg_factor=num_pos) losses_bbox = self.loss_bbox(pos_bbox_pred, pos_bbox_target, avg_factor=num_pos) else: losses_iou = (iou_preds.sum() * 0) losses_bbox = (bbox_preds.sum() * 0) return dict(loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou)
@HEADS.register_module() class NASFCOSHead(FCOSHead): 'Anchor-free head used in `NASFCOS <https://arxiv.org/abs/1906.04423>`_.\n\n It is quite similar with FCOS head, except for the searched structure of\n classification branch and bbox regression branch, where a structure of\n "dconv3x3, conv3x3, dconv3x3, conv1x1" is utilized instead.\n ' def __init__(self, *args, init_cfg=None, **kwargs): if (init_cfg is None): init_cfg = [dict(type='Caffe2Xavier', layer=['ConvModule', 'Conv2d']), dict(type='Normal', std=0.01, override=[dict(name='conv_reg'), dict(name='conv_centerness'), dict(name='conv_cls', type='Normal', std=0.01, bias_prob=0.01)])] super(NASFCOSHead, self).__init__(*args, init_cfg=init_cfg, **kwargs) def _init_layers(self): 'Initialize layers of the head.' dconv3x3_config = dict(type='DCNv2', kernel_size=3, use_bias=True, deform_groups=2, padding=1) conv3x3_config = dict(type='Conv', kernel_size=3, padding=1) conv1x1_config = dict(type='Conv', kernel_size=1) self.arch_config = [dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config] self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for (i, op_) in enumerate(self.arch_config): op = copy.deepcopy(op_) chn = (self.in_channels if (i == 0) else self.feat_channels) assert isinstance(op, dict) use_bias = op.pop('use_bias', False) padding = op.pop('padding', 0) kernel_size = op.pop('kernel_size') module = ConvModule(chn, self.feat_channels, kernel_size, stride=1, padding=padding, norm_cfg=self.norm_cfg, bias=use_bias, conv_cfg=op) self.cls_convs.append(copy.deepcopy(module)) self.reg_convs.append(copy.deepcopy(module)) self.conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 3, padding=1) self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1) self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
@HEADS.register_module() class PISARetinaHead(RetinaHead): 'PISA Retinanet Head.\n\n The head owns the same structure with Retinanet Head, but differs in two\n aspects:\n 1. Importance-based Sample Reweighting Positive (ISR-P) is applied to\n change the positive loss weights.\n 2. Classification-aware regression loss is adopted as a third loss.\n ' @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): 'Compute losses of the head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level\n Has shape (N, num_anchors * num_classes, H, W)\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W)\n gt_bboxes (list[Tensor]): Ground truth bboxes of each image\n with shape (num_obj, 4).\n gt_labels (list[Tensor]): Ground truth labels of each image\n with shape (num_obj, 4).\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image.\n Default: None.\n\n Returns:\n dict: Loss dict, comprise classification loss, regression loss and\n carl loss.\n ' featmap_sizes = [featmap.size()[(- 2):] for featmap in cls_scores] assert (len(featmap_sizes) == self.prior_generator.num_levels) device = cls_scores[0].device (anchor_list, valid_flag_list) = self.get_anchors(featmap_sizes, img_metas, device=device) label_channels = (self.cls_out_channels if self.use_sigmoid_cls else 1) cls_reg_targets = self.get_targets(anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, return_sampling_results=True) if (cls_reg_targets is None): return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets num_total_samples = ((num_total_pos + num_total_neg) if self.sampling else num_total_pos) num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] concat_anchor_list = [] for i in range(len(anchor_list)): concat_anchor_list.append(torch.cat(anchor_list[i])) all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors) num_imgs = len(img_metas) flatten_cls_scores = [cls_score.permute(0, 2, 3, 1).reshape(num_imgs, (- 1), label_channels) for cls_score in cls_scores] flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).reshape((- 1), flatten_cls_scores[0].size((- 1))) flatten_bbox_preds = [bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, (- 1), 4) for bbox_pred in bbox_preds] flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1).view((- 1), flatten_bbox_preds[0].size((- 1))) flatten_labels = torch.cat(labels_list, dim=1).reshape((- 1)) flatten_label_weights = torch.cat(label_weights_list, dim=1).reshape((- 1)) flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape((- 1), 4) flatten_bbox_targets = torch.cat(bbox_targets_list, dim=1).reshape((- 1), 4) flatten_bbox_weights = torch.cat(bbox_weights_list, dim=1).reshape((- 1), 4) isr_cfg = self.train_cfg.get('isr', None) if (isr_cfg is not None): all_targets = (flatten_labels, flatten_label_weights, flatten_bbox_targets, flatten_bbox_weights) with torch.no_grad(): all_targets = isr_p(flatten_cls_scores, flatten_bbox_preds, all_targets, flatten_anchors, sampling_results_list, bbox_coder=self.bbox_coder, loss_cls=self.loss_cls, num_class=self.num_classes, **self.train_cfg.isr) (flatten_labels, flatten_label_weights, flatten_bbox_targets, flatten_bbox_weights) = all_targets losses_cls = self.loss_cls(flatten_cls_scores, flatten_labels, flatten_label_weights, avg_factor=num_total_samples) losses_bbox = self.loss_bbox(flatten_bbox_preds, flatten_bbox_targets, flatten_bbox_weights, avg_factor=num_total_samples) loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) carl_cfg = self.train_cfg.get('carl', None) if (carl_cfg is not None): loss_carl = carl_loss(flatten_cls_scores, flatten_labels, flatten_bbox_preds, flatten_bbox_targets, self.loss_bbox, **self.train_cfg.carl, avg_factor=num_total_pos, sigmoid=True, num_class=self.num_classes) loss_dict.update(loss_carl) return loss_dict
@HEADS.register_module() class PISASSDHead(SSDHead): def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): 'Compute losses of the head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level\n Has shape (N, num_anchors * num_classes, H, W)\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W)\n gt_bboxes (list[Tensor]): Ground truth bboxes of each image\n with shape (num_obj, 4).\n gt_labels (list[Tensor]): Ground truth labels of each image\n with shape (num_obj, 4).\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image.\n Default: None.\n\n Returns:\n dict: Loss dict, comprise classification loss regression loss and\n carl loss.\n ' featmap_sizes = [featmap.size()[(- 2):] for featmap in cls_scores] assert (len(featmap_sizes) == self.prior_generator.num_levels) device = cls_scores[0].device (anchor_list, valid_flag_list) = self.get_anchors(featmap_sizes, img_metas, device=device) cls_reg_targets = self.get_targets(anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=1, unmap_outputs=False, return_sampling_results=True) if (cls_reg_targets is None): return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets num_images = len(img_metas) all_cls_scores = torch.cat([s.permute(0, 2, 3, 1).reshape(num_images, (- 1), self.cls_out_channels) for s in cls_scores], 1) all_labels = torch.cat(labels_list, (- 1)).view(num_images, (- 1)) all_label_weights = torch.cat(label_weights_list, (- 1)).view(num_images, (- 1)) all_bbox_preds = torch.cat([b.permute(0, 2, 3, 1).reshape(num_images, (- 1), 4) for b in bbox_preds], (- 2)) all_bbox_targets = torch.cat(bbox_targets_list, (- 2)).view(num_images, (- 1), 4) all_bbox_weights = torch.cat(bbox_weights_list, (- 2)).view(num_images, (- 1), 4) all_anchors = [] for i in range(num_images): all_anchors.append(torch.cat(anchor_list[i])) isr_cfg = self.train_cfg.get('isr', None) all_targets = (all_labels.view((- 1)), all_label_weights.view((- 1)), all_bbox_targets.view((- 1), 4), all_bbox_weights.view((- 1), 4)) if (isr_cfg is not None): all_targets = isr_p(all_cls_scores.view((- 1), all_cls_scores.size((- 1))), all_bbox_preds.view((- 1), 4), all_targets, torch.cat(all_anchors), sampling_results_list, loss_cls=CrossEntropyLoss(), bbox_coder=self.bbox_coder, **self.train_cfg.isr, num_class=self.num_classes) (new_labels, new_label_weights, new_bbox_targets, new_bbox_weights) = all_targets all_labels = new_labels.view(all_labels.shape) all_label_weights = new_label_weights.view(all_label_weights.shape) all_bbox_targets = new_bbox_targets.view(all_bbox_targets.shape) all_bbox_weights = new_bbox_weights.view(all_bbox_weights.shape) carl_loss_cfg = self.train_cfg.get('carl', None) if (carl_loss_cfg is not None): loss_carl = carl_loss(all_cls_scores.view((- 1), all_cls_scores.size((- 1))), all_targets[0], all_bbox_preds.view((- 1), 4), all_targets[2], SmoothL1Loss(beta=1.0), **self.train_cfg.carl, avg_factor=num_total_pos, num_class=self.num_classes) assert torch.isfinite(all_cls_scores).all().item(), 'classification scores become infinite or NaN!' assert torch.isfinite(all_bbox_preds).all().item(), 'bbox predications become infinite or NaN!' (losses_cls, losses_bbox) = multi_apply(self.loss_single, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, num_total_samples=num_total_pos) loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) if (carl_loss_cfg is not None): loss_dict.update(loss_carl) return loss_dict
@HEADS.register_module() class RetinaHead(AnchorHead): 'An anchor-based head used in `RetinaNet\n <https://arxiv.org/pdf/1708.02002.pdf>`_.\n\n The head contains two subnetworks. The first classifies anchor boxes and\n the second regresses deltas for the anchors.\n\n Example:\n >>> import torch\n >>> self = RetinaHead(11, 7)\n >>> x = torch.rand(1, 7, 32, 32)\n >>> cls_score, bbox_pred = self.forward_single(x)\n >>> # Each anchor predicts a score for each class except background\n >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors\n >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors\n >>> assert cls_per_anchor == (self.num_classes)\n >>> assert box_per_anchor == 4\n ' def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, anchor_generator=dict(type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), init_cfg=dict(type='Normal', layer='Conv2d', std=0.01, override=dict(type='Normal', name='retina_cls', std=0.01, bias_prob=0.01)), **kwargs): self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg super(RetinaHead, self).__init__(num_classes, in_channels, anchor_generator=anchor_generator, init_cfg=init_cfg, **kwargs) def _init_layers(self): 'Initialize layers of the head.' self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = (self.in_channels if (i == 0) else self.feat_channels) self.cls_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.retina_cls = nn.Conv2d(self.feat_channels, (self.num_base_priors * self.cls_out_channels), 3, padding=1) self.retina_reg = nn.Conv2d(self.feat_channels, (self.num_base_priors * 4), 3, padding=1) def forward_single(self, x): 'Forward feature of a single scale level.\n\n Args:\n x (Tensor): Features of a single scale level.\n\n Returns:\n tuple:\n cls_score (Tensor): Cls scores for a single scale level\n the channels number is num_anchors * num_classes.\n bbox_pred (Tensor): Box energies / deltas for a single scale\n level, the channels number is num_anchors * 4.\n ' cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.retina_cls(cls_feat) bbox_pred = self.retina_reg(reg_feat) return (cls_score, bbox_pred)
@HEADS.register_module() class RetinaSepBNHead(AnchorHead): '"RetinaHead with separate BN.\n\n In RetinaHead, conv/norm layers are shared across different FPN levels,\n while in RetinaSepBNHead, conv layers are shared across different FPN\n levels, but BN layers are separated.\n ' def __init__(self, num_classes, num_ins, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, init_cfg=None, **kwargs): assert (init_cfg is None), 'To prevent abnormal initialization behavior, init_cfg is not allowed to be set' self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.num_ins = num_ins super(RetinaSepBNHead, self).__init__(num_classes, in_channels, init_cfg=init_cfg, **kwargs) def _init_layers(self): 'Initialize layers of the head.' self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.num_ins): cls_convs = nn.ModuleList() reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = (self.in_channels if (i == 0) else self.feat_channels) cls_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) reg_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.cls_convs.append(cls_convs) self.reg_convs.append(reg_convs) for i in range(self.stacked_convs): for j in range(1, self.num_ins): self.cls_convs[j][i].conv = self.cls_convs[0][i].conv self.reg_convs[j][i].conv = self.reg_convs[0][i].conv self.retina_cls = nn.Conv2d(self.feat_channels, (self.num_base_priors * self.cls_out_channels), 3, padding=1) self.retina_reg = nn.Conv2d(self.feat_channels, (self.num_base_priors * 4), 3, padding=1) def init_weights(self): 'Initialize weights of the head.' super(RetinaSepBNHead, self).init_weights() for m in self.cls_convs[0]: normal_init(m.conv, std=0.01) for m in self.reg_convs[0]: normal_init(m.conv, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.retina_cls, std=0.01, bias=bias_cls) normal_init(self.retina_reg, std=0.01) def forward(self, feats): 'Forward features from the upstream network.\n\n Args:\n feats (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n\n Returns:\n tuple: Usually a tuple of classification scores and bbox prediction\n cls_scores (list[Tensor]): Classification scores for all scale\n levels, each is a 4D-tensor, the channels number is\n num_anchors * num_classes.\n bbox_preds (list[Tensor]): Box energies / deltas for all scale\n levels, each is a 4D-tensor, the channels number is\n num_anchors * 4.\n ' cls_scores = [] bbox_preds = [] for (i, x) in enumerate(feats): cls_feat = feats[i] reg_feat = feats[i] for cls_conv in self.cls_convs[i]: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs[i]: reg_feat = reg_conv(reg_feat) cls_score = self.retina_cls(cls_feat) bbox_pred = self.retina_reg(reg_feat) cls_scores.append(cls_score) bbox_preds.append(bbox_pred) return (cls_scores, bbox_preds)
@HEADS.register_module() class SSDHead(AnchorHead): 'SSD head used in https://arxiv.org/abs/1512.02325.\n\n Args:\n num_classes (int): Number of categories excluding the background\n category.\n in_channels (int): Number of channels in the input feature map.\n stacked_convs (int): Number of conv layers in cls and reg tower.\n Default: 0.\n feat_channels (int): Number of hidden channels when stacked_convs\n > 0. Default: 256.\n use_depthwise (bool): Whether to use DepthwiseSeparableConv.\n Default: False.\n conv_cfg (dict): Dictionary to construct and config conv layer.\n Default: None.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n Default: None.\n act_cfg (dict): Dictionary to construct and config activation layer.\n Default: None.\n anchor_generator (dict): Config dict for anchor generator\n bbox_coder (dict): Config of bounding box coder.\n reg_decoded_bbox (bool): If true, the regression loss would be\n applied directly on decoded bounding boxes, converting both\n the predicted boxes and regression targets to absolute\n coordinates format. Default False. It should be `True` when\n using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.\n train_cfg (dict): Training config of anchor head.\n test_cfg (dict): Testing config of anchor head.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n ' def __init__(self, num_classes=80, in_channels=(512, 1024, 512, 256, 256, 256), stacked_convs=0, feat_channels=256, use_depthwise=False, conv_cfg=None, norm_cfg=None, act_cfg=None, anchor_generator=dict(type='SSDAnchorGenerator', scale_major=False, input_size=300, strides=[8, 16, 32, 64, 100, 300], ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]), basesize_ratio_range=(0.1, 0.9)), bbox_coder=dict(type='DeltaXYWHBBoxCoder', clip_border=True, target_means=[0.0, 0.0, 0.0, 0.0], target_stds=[1.0, 1.0, 1.0, 1.0]), reg_decoded_bbox=False, train_cfg=None, test_cfg=None, init_cfg=dict(type='Xavier', layer='Conv2d', distribution='uniform', bias=0)): super(AnchorHead, self).__init__(init_cfg) self.num_classes = num_classes self.in_channels = in_channels self.stacked_convs = stacked_convs self.feat_channels = feat_channels self.use_depthwise = use_depthwise self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.cls_out_channels = (num_classes + 1) self.prior_generator = build_prior_generator(anchor_generator) self.num_base_priors = self.prior_generator.num_base_priors self._init_layers() self.bbox_coder = build_bbox_coder(bbox_coder) self.reg_decoded_bbox = reg_decoded_bbox self.use_sigmoid_cls = False self.cls_focal_loss = False self.train_cfg = train_cfg self.test_cfg = test_cfg self.sampling = False if self.train_cfg: self.assigner = build_assigner(self.train_cfg.assigner) sampler_cfg = dict(type='PseudoSampler') self.sampler = build_sampler(sampler_cfg, context=self) self.fp16_enabled = False @property def num_anchors(self): '\n Returns:\n list[int]: Number of base_anchors on each point of each level.\n ' warnings.warn('DeprecationWarning: `num_anchors` is deprecated, please use "num_base_priors" instead') return self.num_base_priors def _init_layers(self): 'Initialize layers of the head.' self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() conv = (DepthwiseSeparableConvModule if self.use_depthwise else ConvModule) for (channel, num_base_priors) in zip(self.in_channels, self.num_base_priors): cls_layers = [] reg_layers = [] in_channel = channel for i in range(self.stacked_convs): cls_layers.append(conv(in_channel, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) reg_layers.append(conv(in_channel, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) in_channel = self.feat_channels if self.use_depthwise: cls_layers.append(ConvModule(in_channel, in_channel, 3, padding=1, groups=in_channel, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) reg_layers.append(ConvModule(in_channel, in_channel, 3, padding=1, groups=in_channel, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) cls_layers.append(nn.Conv2d(in_channel, (num_base_priors * self.cls_out_channels), kernel_size=(1 if self.use_depthwise else 3), padding=(0 if self.use_depthwise else 1))) reg_layers.append(nn.Conv2d(in_channel, (num_base_priors * 4), kernel_size=(1 if self.use_depthwise else 3), padding=(0 if self.use_depthwise else 1))) self.cls_convs.append(nn.Sequential(*cls_layers)) self.reg_convs.append(nn.Sequential(*reg_layers)) def forward(self, feats): 'Forward features from the upstream network.\n\n Args:\n feats (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n\n Returns:\n tuple:\n cls_scores (list[Tensor]): Classification scores for all scale\n levels, each is a 4D-tensor, the channels number is\n num_anchors * num_classes.\n bbox_preds (list[Tensor]): Box energies / deltas for all scale\n levels, each is a 4D-tensor, the channels number is\n num_anchors * 4.\n ' cls_scores = [] bbox_preds = [] for (feat, reg_conv, cls_conv) in zip(feats, self.reg_convs, self.cls_convs): cls_scores.append(cls_conv(feat)) bbox_preds.append(reg_conv(feat)) return (cls_scores, bbox_preds) def loss_single(self, cls_score, bbox_pred, anchor, labels, label_weights, bbox_targets, bbox_weights, num_total_samples): 'Compute loss of a single image.\n\n Args:\n cls_score (Tensor): Box scores for eachimage\n Has shape (num_total_anchors, num_classes).\n bbox_pred (Tensor): Box energies / deltas for each image\n level with shape (num_total_anchors, 4).\n anchors (Tensor): Box reference for each scale level with shape\n (num_total_anchors, 4).\n labels (Tensor): Labels of each anchors with shape\n (num_total_anchors,).\n label_weights (Tensor): Label weights of each anchor with shape\n (num_total_anchors,)\n bbox_targets (Tensor): BBox regression targets of each anchor\n weight shape (num_total_anchors, 4).\n bbox_weights (Tensor): BBox regression loss weights of each anchor\n with shape (num_total_anchors, 4).\n num_total_samples (int): If sampling, num total samples equal to\n the number of total anchors; Otherwise, it is the number of\n positive anchors.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n ' loss_cls_all = (F.cross_entropy(cls_score, labels, reduction='none') * label_weights) pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero(as_tuple=False).reshape((- 1)) neg_inds = (labels == self.num_classes).nonzero(as_tuple=False).view((- 1)) num_pos_samples = pos_inds.size(0) num_neg_samples = (self.train_cfg.neg_pos_ratio * num_pos_samples) if (num_neg_samples > neg_inds.size(0)): num_neg_samples = neg_inds.size(0) (topk_loss_cls_neg, _) = loss_cls_all[neg_inds].topk(num_neg_samples) loss_cls_pos = loss_cls_all[pos_inds].sum() loss_cls_neg = topk_loss_cls_neg.sum() loss_cls = ((loss_cls_pos + loss_cls_neg) / num_total_samples) if self.reg_decoded_bbox: bbox_pred = self.bbox_coder.decode(anchor, bbox_pred) loss_bbox = smooth_l1_loss(bbox_pred, bbox_targets, bbox_weights, beta=self.train_cfg.smoothl1_beta, avg_factor=num_total_samples) return (loss_cls[None], loss_bbox) @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None): 'Compute losses of the head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level\n Has shape (N, num_anchors * num_classes, H, W)\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W)\n gt_bboxes (list[Tensor]): each item are the truth boxes for each\n image in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n ' featmap_sizes = [featmap.size()[(- 2):] for featmap in cls_scores] assert (len(featmap_sizes) == self.prior_generator.num_levels) device = cls_scores[0].device (anchor_list, valid_flag_list) = self.get_anchors(featmap_sizes, img_metas, device=device) cls_reg_targets = self.get_targets(anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=1, unmap_outputs=False) if (cls_reg_targets is None): return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_images = len(img_metas) all_cls_scores = torch.cat([s.permute(0, 2, 3, 1).reshape(num_images, (- 1), self.cls_out_channels) for s in cls_scores], 1) all_labels = torch.cat(labels_list, (- 1)).view(num_images, (- 1)) all_label_weights = torch.cat(label_weights_list, (- 1)).view(num_images, (- 1)) all_bbox_preds = torch.cat([b.permute(0, 2, 3, 1).reshape(num_images, (- 1), 4) for b in bbox_preds], (- 2)) all_bbox_targets = torch.cat(bbox_targets_list, (- 2)).view(num_images, (- 1), 4) all_bbox_weights = torch.cat(bbox_weights_list, (- 2)).view(num_images, (- 1), 4) all_anchors = [] for i in range(num_images): all_anchors.append(torch.cat(anchor_list[i])) (losses_cls, losses_bbox) = multi_apply(self.loss_single, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, num_total_samples=num_total_pos) return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
@DETECTORS.register_module() class ATSS(SingleStageDetector): 'Implementation of `ATSS <https://arxiv.org/abs/1912.02424>`_.' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
@DETECTORS.register_module() class AutoAssign(SingleStageDetector): 'Implementation of `AutoAssign: Differentiable Label Assignment for Dense\n Object Detection <https://arxiv.org/abs/2007.03496>`_.' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None): super(AutoAssign, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained)
class BaseDetector(BaseModule, metaclass=ABCMeta): 'Base class for detectors.' def __init__(self, init_cfg=None): super(BaseDetector, self).__init__(init_cfg) self.fp16_enabled = False @property def with_neck(self): 'bool: whether the detector has a neck' return (hasattr(self, 'neck') and (self.neck is not None)) @property def with_shared_head(self): 'bool: whether the detector has a shared head in the RoI Head' return (hasattr(self, 'roi_head') and self.roi_head.with_shared_head) @property def with_bbox(self): 'bool: whether the detector has a bbox head' return ((hasattr(self, 'roi_head') and self.roi_head.with_bbox) or (hasattr(self, 'bbox_head') and (self.bbox_head is not None))) @property def with_mask(self): 'bool: whether the detector has a mask head' return ((hasattr(self, 'roi_head') and self.roi_head.with_mask) or (hasattr(self, 'mask_head') and (self.mask_head is not None))) @abstractmethod def extract_feat(self, imgs): 'Extract features from images.' pass def extract_feats(self, imgs): 'Extract features from multiple images.\n\n Args:\n imgs (list[torch.Tensor]): A list of images. The images are\n augmented from the same image but in different ways.\n\n Returns:\n list[torch.Tensor]: Features of different images\n ' assert isinstance(imgs, list) return [self.extract_feat(img) for img in imgs] def forward_train(self, imgs, img_metas, **kwargs): "\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): List of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys, see\n :class:`mmdet.datasets.pipelines.Collect`.\n kwargs (keyword arguments): Specific to concrete implementation.\n " batch_input_shape = tuple(imgs[0].size()[(- 2):]) for img_meta in img_metas: img_meta['batch_input_shape'] = batch_input_shape async def async_simple_test(self, img, img_metas, **kwargs): raise NotImplementedError @abstractmethod def simple_test(self, img, img_metas, **kwargs): pass @abstractmethod def aug_test(self, imgs, img_metas, **kwargs): 'Test function with test time augmentation.' pass async def aforward_test(self, *, img, img_metas, **kwargs): for (var, name) in [(img, 'img'), (img_metas, 'img_metas')]: if (not isinstance(var, list)): raise TypeError(f'{name} must be a list, but got {type(var)}') num_augs = len(img) if (num_augs != len(img_metas)): raise ValueError(f'num of augmentations ({len(img)}) != num of image metas ({len(img_metas)})') samples_per_gpu = img[0].size(0) assert (samples_per_gpu == 1) if (num_augs == 1): return (await self.async_simple_test(img[0], img_metas[0], **kwargs)) else: raise NotImplementedError def forward_test(self, imgs, img_metas, **kwargs): '\n Args:\n imgs (List[Tensor]): the outer list indicates test-time\n augmentations and inner Tensor should have a shape NxCxHxW,\n which contains all images in the batch.\n img_metas (List[List[dict]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch.\n ' for (var, name) in [(imgs, 'imgs'), (img_metas, 'img_metas')]: if (not isinstance(var, list)): raise TypeError(f'{name} must be a list, but got {type(var)}') num_augs = len(imgs) if (num_augs != len(img_metas)): raise ValueError(f'num of augmentations ({len(imgs)}) != num of image meta ({len(img_metas)})') for (img, img_meta) in zip(imgs, img_metas): batch_size = len(img_meta) for img_id in range(batch_size): img_meta[img_id]['batch_input_shape'] = tuple(img.size()[(- 2):]) if (num_augs == 1): if ('proposals' in kwargs): kwargs['proposals'] = kwargs['proposals'][0] return self.simple_test(imgs[0], img_metas[0], **kwargs) else: assert (imgs[0].size(0) == 1), f'aug test does not support inference with batch size {imgs[0].size(0)}' assert ('proposals' not in kwargs) return self.aug_test(imgs, img_metas, **kwargs) @auto_fp16(apply_to=('img',)) def forward(self, img, img_metas, return_loss=True, **kwargs): 'Calls either :func:`forward_train` or :func:`forward_test` depending\n on whether ``return_loss`` is ``True``.\n\n Note this setting will change the expected inputs. When\n ``return_loss=True``, img and img_meta are single-nested (i.e. Tensor\n and List[dict]), and when ``resturn_loss=False``, img and img_meta\n should be double nested (i.e. List[Tensor], List[List[dict]]), with\n the outer list indicating test time augmentations.\n ' if torch.onnx.is_in_onnx_export(): assert (len(img_metas) == 1) return self.onnx_export(img[0], img_metas[0]) if return_loss: return self.forward_train(img, img_metas, **kwargs) else: return self.forward_test(img, img_metas, **kwargs) def _parse_losses(self, losses): 'Parse the raw outputs (losses) of the network.\n\n Args:\n losses (dict): Raw output of the network, which usually contain\n losses and other necessary information.\n\n Returns:\n tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor which may be a weighted sum of all losses, log_vars contains all the variables to be sent to the logger.\n ' log_vars = OrderedDict() for (loss_name, loss_value) in losses.items(): if isinstance(loss_value, torch.Tensor): log_vars[loss_name] = loss_value.mean() elif isinstance(loss_value, list): log_vars[loss_name] = sum((_loss.mean() for _loss in loss_value)) else: raise TypeError(f'{loss_name} is not a tensor or list of tensors') loss = sum((_value for (_key, _value) in log_vars.items() if ('loss' in _key))) if (dist.is_available() and dist.is_initialized()): log_var_length = torch.tensor(len(log_vars), device=loss.device) dist.all_reduce(log_var_length) message = (((f'rank {dist.get_rank()}' + f' len(log_vars): {len(log_vars)}') + ' keys: ') + ','.join(log_vars.keys())) assert (log_var_length == (len(log_vars) * dist.get_world_size())), ('loss log variables are different across GPUs!\n' + message) log_vars['loss'] = loss for (loss_name, loss_value) in log_vars.items(): if (dist.is_available() and dist.is_initialized()): loss_value = loss_value.data.clone() dist.all_reduce(loss_value.div_(dist.get_world_size())) log_vars[loss_name] = loss_value.item() return (loss, log_vars) def train_step(self, data, optimizer): 'The iteration step during training.\n\n This method defines an iteration step during training, except for the\n back propagation and optimizer updating, which are done in an optimizer\n hook. Note that in some complicated cases or models, the whole process\n including back propagation and optimizer updating is also defined in\n this method, such as GAN.\n\n Args:\n data (dict): The output of dataloader.\n optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of\n runner is passed to ``train_step()``. This argument is unused\n and reserved.\n\n Returns:\n dict: It should contain at least 3 keys: ``loss``, ``log_vars``, ``num_samples``.\n\n - ``loss`` is a tensor for back propagation, which can be a\n weighted sum of multiple losses.\n - ``log_vars`` contains all the variables to be sent to the\n logger.\n - ``num_samples`` indicates the batch size (when the model is\n DDP, it means the batch size on each GPU), which is used for\n averaging the logs.\n ' losses = self(**data) (loss, log_vars) = self._parse_losses(losses) outputs = dict(loss=loss, log_vars=log_vars, num_samples=len(data['img_metas'])) return outputs def val_step(self, data, optimizer=None): 'The iteration step during validation.\n\n This method shares the same signature as :func:`train_step`, but used\n during val epochs. Note that the evaluation after training epochs is\n not implemented with this method, but an evaluation hook.\n ' losses = self(**data) (loss, log_vars) = self._parse_losses(losses) outputs = dict(loss=loss, log_vars=log_vars, num_samples=len(data['img_metas'])) return outputs def show_result(self, img, result, score_thr=0.3, bbox_color=(72, 101, 241), text_color=(72, 101, 241), mask_color=None, thickness=2, font_size=13, win_name='', show=False, wait_time=0, out_file=None): "Draw `result` over `img`.\n\n Args:\n img (str or Tensor): The image to be displayed.\n result (Tensor or tuple): The results to draw over `img`\n bbox_result or (bbox_result, segm_result).\n score_thr (float, optional): Minimum score of bboxes to be shown.\n Default: 0.3.\n bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.\n The tuple of color should be in BGR order. Default: 'green'\n text_color (str or tuple(int) or :obj:`Color`):Color of texts.\n The tuple of color should be in BGR order. Default: 'green'\n mask_color (None or str or tuple(int) or :obj:`Color`):\n Color of masks. The tuple of color should be in BGR order.\n Default: None\n thickness (int): Thickness of lines. Default: 2\n font_size (int): Font size of texts. Default: 13\n win_name (str): The window name. Default: ''\n wait_time (float): Value of waitKey param.\n Default: 0.\n show (bool): Whether to show the image.\n Default: False.\n out_file (str or None): The filename to write the image.\n Default: None.\n\n Returns:\n img (Tensor): Only if not `show` or `out_file`\n " img = mmcv.imread(img) img = img.copy() if isinstance(result, tuple): (bbox_result, segm_result) = result if isinstance(segm_result, tuple): segm_result = segm_result[0] else: (bbox_result, segm_result) = (result, None) bboxes = np.vstack(bbox_result) labels = [np.full(bbox.shape[0], i, dtype=np.int32) for (i, bbox) in enumerate(bbox_result)] labels = np.concatenate(labels) segms = None if ((segm_result is not None) and (len(labels) > 0)): segms = mmcv.concat_list(segm_result) if isinstance(segms[0], torch.Tensor): segms = torch.stack(segms, dim=0).detach().cpu().numpy() else: segms = np.stack(segms, axis=0) if (out_file is not None): show = False img = imshow_det_bboxes(img, bboxes, labels, segms, class_names=self.CLASSES, score_thr=score_thr, bbox_color=bbox_color, text_color=text_color, mask_color=mask_color, thickness=thickness, font_size=font_size, win_name=win_name, show=show, wait_time=wait_time, out_file=out_file) if (not (show or out_file)): return img def onnx_export(self, img, img_metas): raise NotImplementedError(f'{self.__class__.__name__} does not support ONNX EXPORT')
@DETECTORS.register_module() class CascadeRCNN(TwoStageDetector): 'Implementation of `Cascade R-CNN: Delving into High Quality Object\n Detection <https://arxiv.org/abs/1906.09756>`_' def __init__(self, backbone, neck=None, rpn_head=None, roi_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(CascadeRCNN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg) def show_result(self, data, result, **kwargs): 'Show prediction results of the detector.\n\n Args:\n data (str or np.ndarray): Image filename or loaded image.\n result (Tensor or tuple): The results to draw over `img`\n bbox_result or (bbox_result, segm_result).\n\n Returns:\n np.ndarray: The image with bboxes drawn on it.\n ' if self.with_mask: (ms_bbox_result, ms_segm_result) = result if isinstance(ms_bbox_result, dict): result = (ms_bbox_result['ensemble'], ms_segm_result['ensemble']) elif isinstance(result, dict): result = result['ensemble'] return super(CascadeRCNN, self).show_result(data, result, **kwargs)
@DETECTORS.register_module() class CenterNet(SingleStageDetector): 'Implementation of CenterNet(Objects as Points)\n\n <https://arxiv.org/abs/1904.07850>.\n ' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(CenterNet, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) def merge_aug_results(self, aug_results, with_nms): 'Merge augmented detection bboxes and score.\n\n Args:\n aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each\n image.\n with_nms (bool): If True, do nms before return boxes.\n\n Returns:\n tuple: (out_bboxes, out_labels)\n ' (recovered_bboxes, aug_labels) = ([], []) for single_result in aug_results: recovered_bboxes.append(single_result[0][0]) aug_labels.append(single_result[0][1]) bboxes = torch.cat(recovered_bboxes, dim=0).contiguous() labels = torch.cat(aug_labels).contiguous() if with_nms: (out_bboxes, out_labels) = self.bbox_head._bboxes_nms(bboxes, labels, self.bbox_head.test_cfg) else: (out_bboxes, out_labels) = (bboxes, labels) return (out_bboxes, out_labels) def aug_test(self, imgs, img_metas, rescale=True): 'Augment testing of CenterNet. Aug test must have flipped image pair,\n and unlike CornerNet, it will perform an averaging operation on the\n feature map instead of detecting bbox.\n\n Args:\n imgs (list[Tensor]): Augmented images.\n img_metas (list[list[dict]]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n rescale (bool): If True, return boxes in original image space.\n Default: True.\n\n Note:\n ``imgs`` must including flipped image pairs.\n\n Returns:\n list[list[np.ndarray]]: BBox results of each image and classes.\n The outer list corresponds to each image. The inner list\n corresponds to each class.\n ' img_inds = list(range(len(imgs))) assert (img_metas[0][0]['flip'] + img_metas[1][0]['flip']), 'aug test must have flipped image pair' aug_results = [] for (ind, flip_ind) in zip(img_inds[0::2], img_inds[1::2]): flip_direction = img_metas[flip_ind][0]['flip_direction'] img_pair = torch.cat([imgs[ind], imgs[flip_ind]]) x = self.extract_feat(img_pair) (center_heatmap_preds, wh_preds, offset_preds) = self.bbox_head(x) assert (len(center_heatmap_preds) == len(wh_preds) == len(offset_preds) == 1) center_heatmap_preds[0] = ((center_heatmap_preds[0][0:1] + flip_tensor(center_heatmap_preds[0][1:2], flip_direction)) / 2) wh_preds[0] = ((wh_preds[0][0:1] + flip_tensor(wh_preds[0][1:2], flip_direction)) / 2) bbox_list = self.bbox_head.get_bboxes(center_heatmap_preds, wh_preds, [offset_preds[0][0:1]], img_metas[ind], rescale=rescale, with_nms=False) aug_results.append(bbox_list) nms_cfg = self.bbox_head.test_cfg.get('nms_cfg', None) if (nms_cfg is None): with_nms = False else: with_nms = True bbox_list = [self.merge_aug_results(aug_results, with_nms)] bbox_results = [bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) for (det_bboxes, det_labels) in bbox_list] return bbox_results
@DETECTORS.register_module() class DeformableDETR(DETR): def __init__(self, *args, **kwargs): super(DETR, self).__init__(*args, **kwargs)
@DETECTORS.register_module() class DETR(SingleStageDetector): 'Implementation of `DETR: End-to-End Object Detection with\n Transformers <https://arxiv.org/pdf/2005.12872>`_' def __init__(self, backbone, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(DETR, self).__init__(backbone, None, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) def forward_dummy(self, img): 'Used for computing network flops.\n\n See `mmdetection/tools/analysis_tools/get_flops.py`\n ' warnings.warn('Warning! MultiheadAttention in DETR does not support flops computation! Do not use the results in your papers!') (batch_size, _, height, width) = img.shape dummy_img_metas = [dict(batch_input_shape=(height, width), img_shape=(height, width, 3)) for _ in range(batch_size)] x = self.extract_feat(img) outs = self.bbox_head(x, dummy_img_metas) return outs def onnx_export(self, img, img_metas): 'Test function for exporting to ONNX, without test time augmentation.\n\n Args:\n img (torch.Tensor): input images.\n img_metas (list[dict]): List of image information.\n\n Returns:\n tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]\n and class labels of shape [N, num_det].\n ' x = self.extract_feat(img) outs = self.bbox_head.forward_onnx(x, img_metas) img_shape = torch._shape_as_tensor(img)[2:] img_metas[0]['img_shape_for_onnx'] = img_shape (det_bboxes, det_labels) = self.bbox_head.onnx_export(*outs, img_metas) return (det_bboxes, det_labels)
@DETECTORS.register_module() class FastRCNN(TwoStageDetector): 'Implementation of `Fast R-CNN <https://arxiv.org/abs/1504.08083>`_' def __init__(self, backbone, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(FastRCNN, self).__init__(backbone=backbone, neck=neck, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg) def forward_test(self, imgs, img_metas, proposals, **kwargs): '\n Args:\n imgs (List[Tensor]): the outer list indicates test-time\n augmentations and inner Tensor should have a shape NxCxHxW,\n which contains all images in the batch.\n img_metas (List[List[dict]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch.\n proposals (List[List[Tensor]]): the outer list indicates test-time\n augs (multiscale, flip, etc.) and the inner list indicates\n images in a batch. The Tensor should have a shape Px4, where\n P is the number of proposals.\n ' for (var, name) in [(imgs, 'imgs'), (img_metas, 'img_metas')]: if (not isinstance(var, list)): raise TypeError(f'{name} must be a list, but got {type(var)}') num_augs = len(imgs) if (num_augs != len(img_metas)): raise ValueError(f'num of augmentations ({len(imgs)}) != num of image meta ({len(img_metas)})') if (num_augs == 1): return self.simple_test(imgs[0], img_metas[0], proposals[0], **kwargs) else: assert NotImplementedError