repo
stringlengths
1
99
file
stringlengths
13
215
code
stringlengths
12
59.2M
file_length
int64
12
59.2M
avg_line_length
float64
3.82
1.48M
max_line_length
int64
12
2.51M
extension_type
stringclasses
1 value
ERD
ERD-main/mmdet/models/roi_heads/bbox_heads/bbox_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from mmengine.config import ConfigDict from mmengine.model import BaseModule from mmengine.structures import InstanceData from torch import Tensor from torch.nn.modules.utils import _pair from mmdet.models.layers import multiclass_nms from mmdet.models.losses import accuracy from mmdet.models.task_modules.samplers import SamplingResult from mmdet.models.utils import empty_instances, multi_apply from mmdet.registry import MODELS, TASK_UTILS from mmdet.structures.bbox import get_box_tensor, scale_boxes from mmdet.utils import ConfigType, InstanceList, OptMultiConfig @MODELS.register_module() class BBoxHead(BaseModule): """Simplest RoI head, with only two fc layers for classification and regression respectively.""" def __init__(self, with_avg_pool: bool = False, with_cls: bool = True, with_reg: bool = True, roi_feat_size: int = 7, in_channels: int = 256, num_classes: int = 80, bbox_coder: ConfigType = dict( type='DeltaXYWHBBoxCoder', clip_border=True, target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), predict_box_type: str = 'hbox', reg_class_agnostic: bool = False, reg_decoded_bbox: bool = False, reg_predictor_cfg: ConfigType = dict(type='Linear'), cls_predictor_cfg: ConfigType = dict(type='Linear'), loss_cls: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox: ConfigType = dict( type='SmoothL1Loss', beta=1.0, loss_weight=1.0), init_cfg: OptMultiConfig = None) -> None: super().__init__(init_cfg=init_cfg) assert with_cls or with_reg self.with_avg_pool = with_avg_pool self.with_cls = with_cls self.with_reg = with_reg self.roi_feat_size = _pair(roi_feat_size) self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1] self.in_channels = in_channels self.num_classes = num_classes self.predict_box_type = predict_box_type self.reg_class_agnostic = reg_class_agnostic self.reg_decoded_bbox = reg_decoded_bbox self.reg_predictor_cfg = reg_predictor_cfg self.cls_predictor_cfg = cls_predictor_cfg self.bbox_coder = TASK_UTILS.build(bbox_coder) self.loss_cls = MODELS.build(loss_cls) self.loss_bbox = MODELS.build(loss_bbox) in_channels = self.in_channels if self.with_avg_pool: self.avg_pool = nn.AvgPool2d(self.roi_feat_size) else: in_channels *= self.roi_feat_area if self.with_cls: # need to add background class if self.custom_cls_channels: cls_channels = self.loss_cls.get_cls_channels(self.num_classes) else: cls_channels = num_classes + 1 cls_predictor_cfg_ = self.cls_predictor_cfg.copy() cls_predictor_cfg_.update( in_features=in_channels, out_features=cls_channels) self.fc_cls = MODELS.build(cls_predictor_cfg_) if self.with_reg: box_dim = self.bbox_coder.encode_size out_dim_reg = box_dim if reg_class_agnostic else \ box_dim * num_classes reg_predictor_cfg_ = self.reg_predictor_cfg.copy() if isinstance(reg_predictor_cfg_, (dict, ConfigDict)): reg_predictor_cfg_.update( in_features=in_channels, out_features=out_dim_reg) self.fc_reg = MODELS.build(reg_predictor_cfg_) self.debug_imgs = None if init_cfg is None: self.init_cfg = [] if self.with_cls: self.init_cfg += [ dict( type='Normal', std=0.01, override=dict(name='fc_cls')) ] if self.with_reg: self.init_cfg += [ dict( type='Normal', std=0.001, override=dict(name='fc_reg')) ] # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead @property def custom_cls_channels(self) -> bool: """get custom_cls_channels from loss_cls.""" return getattr(self.loss_cls, 'custom_cls_channels', False) # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead @property def custom_activation(self) -> bool: """get custom_activation from loss_cls.""" return getattr(self.loss_cls, 'custom_activation', False) # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead @property def custom_accuracy(self) -> bool: """get custom_accuracy from loss_cls.""" return getattr(self.loss_cls, 'custom_accuracy', False) def forward(self, x: Tuple[Tensor]) -> tuple: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of classification scores and bbox prediction. - cls_score (Tensor): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * num_classes. - bbox_pred (Tensor): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 4. """ if self.with_avg_pool: if x.numel() > 0: x = self.avg_pool(x) x = x.view(x.size(0), -1) else: # avg_pool does not support empty tensor, # so use torch.mean instead it x = torch.mean(x, dim=(-1, -2)) cls_score = self.fc_cls(x) if self.with_cls else None bbox_pred = self.fc_reg(x) if self.with_reg else None return cls_score, bbox_pred def _get_targets_single(self, pos_priors: Tensor, neg_priors: Tensor, pos_gt_bboxes: Tensor, pos_gt_labels: Tensor, cfg: ConfigDict) -> tuple: """Calculate the ground truth for proposals in the single image according to the sampling results. Args: pos_priors (Tensor): Contains all the positive boxes, has shape (num_pos, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. neg_priors (Tensor): Contains all the negative boxes, has shape (num_neg, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. pos_gt_bboxes (Tensor): Contains gt_boxes for all positive samples, has shape (num_pos, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. pos_gt_labels (Tensor): Contains gt_labels for all positive samples, has shape (num_pos, ). cfg (obj:`ConfigDict`): `train_cfg` of R-CNN. Returns: Tuple[Tensor]: Ground truth for proposals in a single image. Containing the following Tensors: - labels(Tensor): Gt_labels for all proposals, has shape (num_proposals,). - label_weights(Tensor): Labels_weights for all proposals, has shape (num_proposals,). - bbox_targets(Tensor):Regression target for all proposals, has shape (num_proposals, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - bbox_weights(Tensor):Regression weights for all proposals, has shape (num_proposals, 4). """ num_pos = pos_priors.size(0) num_neg = neg_priors.size(0) num_samples = num_pos + num_neg # original implementation uses new_zeros since BG are set to be 0 # now use empty & fill because BG cat_id = num_classes, # FG cat_id = [0, num_classes-1] labels = pos_priors.new_full((num_samples, ), self.num_classes, dtype=torch.long) reg_dim = pos_gt_bboxes.size(-1) if self.reg_decoded_bbox \ else self.bbox_coder.encode_size label_weights = pos_priors.new_zeros(num_samples) bbox_targets = pos_priors.new_zeros(num_samples, reg_dim) bbox_weights = pos_priors.new_zeros(num_samples, reg_dim) if num_pos > 0: labels[:num_pos] = pos_gt_labels pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight label_weights[:num_pos] = pos_weight if not self.reg_decoded_bbox: pos_bbox_targets = self.bbox_coder.encode( pos_priors, pos_gt_bboxes) else: # When the regression loss (e.g. `IouLoss`, `GIouLoss`) # is applied directly on the decoded bounding boxes, both # the predicted boxes and regression targets should be with # absolute coordinate format. pos_bbox_targets = get_box_tensor(pos_gt_bboxes) bbox_targets[:num_pos, :] = pos_bbox_targets bbox_weights[:num_pos, :] = 1 if num_neg > 0: label_weights[-num_neg:] = 1.0 return labels, label_weights, bbox_targets, bbox_weights def get_targets(self, sampling_results: List[SamplingResult], rcnn_train_cfg: ConfigDict, concat: bool = True) -> tuple: """Calculate the ground truth for all samples in a batch according to the sampling_results. Almost the same as the implementation in bbox_head, we passed additional parameters pos_inds_list and neg_inds_list to `_get_targets_single` function. Args: sampling_results (List[obj:SamplingResult]): Assign results of all images in a batch after sampling. rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. concat (bool): Whether to concatenate the results of all the images in a single batch. Returns: Tuple[Tensor]: Ground truth for proposals in a single image. Containing the following list of Tensors: - labels (list[Tensor],Tensor): Gt_labels for all proposals in a batch, each tensor in list has shape (num_proposals,) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals,). - label_weights (list[Tensor]): Labels_weights for all proposals in a batch, each tensor in list has shape (num_proposals,) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals,). - bbox_targets (list[Tensor],Tensor): Regression target for all proposals in a batch, each tensor in list has shape (num_proposals, 4) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - bbox_weights (list[tensor],Tensor): Regression weights for all proposals in a batch, each tensor in list has shape (num_proposals, 4) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals, 4). """ pos_priors_list = [res.pos_priors for res in sampling_results] neg_priors_list = [res.neg_priors for res in sampling_results] pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results] pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results] labels, label_weights, bbox_targets, bbox_weights = multi_apply( self._get_targets_single, pos_priors_list, neg_priors_list, pos_gt_bboxes_list, pos_gt_labels_list, cfg=rcnn_train_cfg) if concat: labels = torch.cat(labels, 0) label_weights = torch.cat(label_weights, 0) bbox_targets = torch.cat(bbox_targets, 0) bbox_weights = torch.cat(bbox_weights, 0) return labels, label_weights, bbox_targets, bbox_weights def loss_and_target(self, cls_score: Tensor, bbox_pred: Tensor, rois: Tensor, sampling_results: List[SamplingResult], rcnn_train_cfg: ConfigDict, concat: bool = True, reduction_override: Optional[str] = None) -> dict: """Calculate the loss based on the features extracted by the bbox head. Args: cls_score (Tensor): Classification prediction results of all class, has shape (batch_size * num_proposals_single_image, num_classes) bbox_pred (Tensor): Regression prediction results, has shape (batch_size * num_proposals_single_image, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. rois (Tensor): RoIs with the shape (batch_size * num_proposals_single_image, 5) where the first column indicates batch id of each RoI. sampling_results (List[obj:SamplingResult]): Assign results of all images in a batch after sampling. rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. concat (bool): Whether to concatenate the results of all the images in a single batch. Defaults to True. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Defaults to None, Returns: dict: A dictionary of loss and targets components. The targets are only used for cascade rcnn. """ cls_reg_targets = self.get_targets( sampling_results, rcnn_train_cfg, concat=concat) losses = self.loss( cls_score, bbox_pred, rois, *cls_reg_targets, reduction_override=reduction_override) # cls_reg_targets is only for cascade rcnn return dict(loss_bbox=losses, bbox_targets=cls_reg_targets) def loss(self, cls_score: Tensor, bbox_pred: Tensor, rois: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, bbox_weights: Tensor, reduction_override: Optional[str] = None) -> dict: """Calculate the loss based on the network predictions and targets. Args: cls_score (Tensor): Classification prediction results of all class, has shape (batch_size * num_proposals_single_image, num_classes) bbox_pred (Tensor): Regression prediction results, has shape (batch_size * num_proposals_single_image, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. rois (Tensor): RoIs with the shape (batch_size * num_proposals_single_image, 5) where the first column indicates batch id of each RoI. labels (Tensor): Gt_labels for all proposals in a batch, has shape (batch_size * num_proposals_single_image, ). label_weights (Tensor): Labels_weights for all proposals in a batch, has shape (batch_size * num_proposals_single_image, ). bbox_targets (Tensor): Regression target for all proposals in a batch, has shape (batch_size * num_proposals_single_image, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. bbox_weights (Tensor): Regression weights for all proposals in a batch, has shape (batch_size * num_proposals_single_image, 4). reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Defaults to None, Returns: dict: A dictionary of loss. """ losses = dict() if cls_score is not None: avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) if cls_score.numel() > 0: loss_cls_ = self.loss_cls( cls_score, labels, label_weights, avg_factor=avg_factor, reduction_override=reduction_override) if isinstance(loss_cls_, dict): losses.update(loss_cls_) else: losses['loss_cls'] = loss_cls_ if self.custom_activation: acc_ = self.loss_cls.get_accuracy(cls_score, labels) losses.update(acc_) else: losses['acc'] = accuracy(cls_score, labels) if bbox_pred is not None: bg_class_ind = self.num_classes # 0~self.num_classes-1 are FG, self.num_classes is BG pos_inds = (labels >= 0) & (labels < bg_class_ind) # do not perform bounding box regression for BG anymore. if pos_inds.any(): if self.reg_decoded_bbox: # When the regression loss (e.g. `IouLoss`, # `GIouLoss`, `DIouLoss`) is applied directly on # the decoded bounding boxes, it decodes the # already encoded coordinates to absolute format. bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred) bbox_pred = get_box_tensor(bbox_pred) if self.reg_class_agnostic: pos_bbox_pred = bbox_pred.view( bbox_pred.size(0), -1)[pos_inds.type(torch.bool)] else: pos_bbox_pred = bbox_pred.view( bbox_pred.size(0), self.num_classes, -1)[pos_inds.type(torch.bool), labels[pos_inds.type(torch.bool)]] losses['loss_bbox'] = self.loss_bbox( pos_bbox_pred, bbox_targets[pos_inds.type(torch.bool)], bbox_weights[pos_inds.type(torch.bool)], avg_factor=bbox_targets.size(0), reduction_override=reduction_override) else: losses['loss_bbox'] = bbox_pred[pos_inds].sum() return losses def predict_by_feat(self, rois: Tuple[Tensor], cls_scores: Tuple[Tensor], bbox_preds: Tuple[Tensor], batch_img_metas: List[dict], rcnn_test_cfg: Optional[ConfigDict] = None, rescale: bool = False) -> InstanceList: """Transform a batch of output features extracted from the head into bbox results. Args: rois (tuple[Tensor]): Tuple of boxes to be transformed. Each has shape (num_boxes, 5). last dimension 5 arrange as (batch_index, x1, y1, x2, y2). cls_scores (tuple[Tensor]): Tuple of box scores, each has shape (num_boxes, num_classes + 1). bbox_preds (tuple[Tensor]): Tuple of box energies / deltas, each has shape (num_boxes, num_classes * 4). batch_img_metas (list[dict]): List of image information. rcnn_test_cfg (obj:`ConfigDict`, optional): `test_cfg` of R-CNN. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[:obj:`InstanceData`]: Instance segmentation results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert len(cls_scores) == len(bbox_preds) result_list = [] for img_id in range(len(batch_img_metas)): img_meta = batch_img_metas[img_id] results = self._predict_by_feat_single( roi=rois[img_id], cls_score=cls_scores[img_id], bbox_pred=bbox_preds[img_id], img_meta=img_meta, rescale=rescale, rcnn_test_cfg=rcnn_test_cfg) result_list.append(results) return result_list def _predict_by_feat_single( self, roi: Tensor, cls_score: Tensor, bbox_pred: Tensor, img_meta: dict, rescale: bool = False, rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5). last dimension 5 arrange as (batch_index, x1, y1, x2, y2). cls_score (Tensor): Box scores, has shape (num_boxes, num_classes + 1). bbox_pred (Tensor): Box energies / deltas. has shape (num_boxes, num_classes * 4). img_meta (dict): image information. rescale (bool): If True, return boxes in original image space. Defaults to False. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Defaults to None Returns: :obj:`InstanceData`: Detection results of each image\ Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ results = InstanceData() if roi.shape[0] == 0: return empty_instances([img_meta], roi.device, task_type='bbox', instance_results=[results], box_type=self.predict_box_type, use_box_type=False, num_classes=self.num_classes, score_per_cls=rcnn_test_cfg is None)[0] # some loss (Seesaw loss..) may have custom activation if self.custom_cls_channels: scores = self.loss_cls.get_activation(cls_score) else: scores = F.softmax( cls_score, dim=-1) if cls_score is not None else None img_shape = img_meta['img_shape'] num_rois = roi.size(0) # bbox_pred would be None in some detector when with_reg is False, # e.g. Grid R-CNN. if bbox_pred is not None: num_classes = 1 if self.reg_class_agnostic else self.num_classes roi = roi.repeat_interleave(num_classes, dim=0) bbox_pred = bbox_pred.view(-1, self.bbox_coder.encode_size) bboxes = self.bbox_coder.decode( roi[..., 1:], bbox_pred, max_shape=img_shape) else: bboxes = roi[:, 1:].clone() if img_shape is not None and bboxes.size(-1) == 4: bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1]) bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0]) if rescale and bboxes.size(0) > 0: assert img_meta.get('scale_factor') is not None scale_factor = [1 / s for s in img_meta['scale_factor']] bboxes = scale_boxes(bboxes, scale_factor) # Get the inside tensor when `bboxes` is a box type bboxes = get_box_tensor(bboxes) box_dim = bboxes.size(-1) bboxes = bboxes.view(num_rois, -1) if rcnn_test_cfg is None: # This means that it is aug test. # It needs to return the raw results without nms. results.bboxes = bboxes results.scores = scores else: det_bboxes, det_labels = multiclass_nms( bboxes, scores, rcnn_test_cfg.score_thr, rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img, box_dim=box_dim) results.bboxes = det_bboxes[:, :-1] results.scores = det_bboxes[:, -1] results.labels = det_labels return results def refine_bboxes(self, sampling_results: Union[List[SamplingResult], InstanceList], bbox_results: dict, batch_img_metas: List[dict]) -> InstanceList: """Refine bboxes during training. Args: sampling_results (List[:obj:`SamplingResult`] or List[:obj:`InstanceData`]): Sampling results. :obj:`SamplingResult` is the real sampling results calculate from bbox_head, while :obj:`InstanceData` is fake sampling results, e.g., in Sparse R-CNN or QueryInst, etc. bbox_results (dict): Usually is a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `rois` (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. - `bbox_targets` (tuple): Ground truth for proposals in a single image. Containing the following list of Tensors: (labels, label_weights, bbox_targets, bbox_weights) batch_img_metas (List[dict]): List of image information. Returns: list[:obj:`InstanceData`]: Refined bboxes of each image. Example: >>> # xdoctest: +REQUIRES(module:kwarray) >>> import numpy as np >>> from mmdet.models.task_modules.samplers. ... sampling_result import random_boxes >>> from mmdet.models.task_modules.samplers import SamplingResult >>> self = BBoxHead(reg_class_agnostic=True) >>> n_roi = 2 >>> n_img = 4 >>> scale = 512 >>> rng = np.random.RandomState(0) ... batch_img_metas = [{'img_shape': (scale, scale)} >>> for _ in range(n_img)] >>> sampling_results = [SamplingResult.random(rng=10) ... for _ in range(n_img)] >>> # Create rois in the expected format >>> roi_boxes = random_boxes(n_roi, scale=scale, rng=rng) >>> img_ids = torch.randint(0, n_img, (n_roi,)) >>> img_ids = img_ids.float() >>> rois = torch.cat([img_ids[:, None], roi_boxes], dim=1) >>> # Create other args >>> labels = torch.randint(0, 81, (scale,)).long() >>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng) >>> cls_score = torch.randn((scale, 81)) ... # For each image, pretend random positive boxes are gts >>> bbox_targets = (labels, None, None, None) ... bbox_results = dict(rois=rois, bbox_pred=bbox_preds, ... cls_score=cls_score, ... bbox_targets=bbox_targets) >>> bboxes_list = self.refine_bboxes(sampling_results, ... bbox_results, ... batch_img_metas) >>> print(bboxes_list) """ pos_is_gts = [res.pos_is_gt for res in sampling_results] # bbox_targets is a tuple labels = bbox_results['bbox_targets'][0] cls_scores = bbox_results['cls_score'] rois = bbox_results['rois'] bbox_preds = bbox_results['bbox_pred'] if self.custom_activation: # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead cls_scores = self.loss_cls.get_activation(cls_scores) if cls_scores.numel() == 0: return None if cls_scores.shape[-1] == self.num_classes + 1: # remove background class cls_scores = cls_scores[:, :-1] elif cls_scores.shape[-1] != self.num_classes: raise ValueError('The last dim of `cls_scores` should equal to ' '`num_classes` or `num_classes + 1`,' f'but got {cls_scores.shape[-1]}.') labels = torch.where(labels == self.num_classes, cls_scores.argmax(1), labels) img_ids = rois[:, 0].long().unique(sorted=True) assert img_ids.numel() <= len(batch_img_metas) results_list = [] for i in range(len(batch_img_metas)): inds = torch.nonzero( rois[:, 0] == i, as_tuple=False).squeeze(dim=1) num_rois = inds.numel() bboxes_ = rois[inds, 1:] label_ = labels[inds] bbox_pred_ = bbox_preds[inds] img_meta_ = batch_img_metas[i] pos_is_gts_ = pos_is_gts[i] bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_, img_meta_) # filter gt bboxes pos_keep = 1 - pos_is_gts_ keep_inds = pos_is_gts_.new_ones(num_rois) keep_inds[:len(pos_is_gts_)] = pos_keep results = InstanceData(bboxes=bboxes[keep_inds.type(torch.bool)]) results_list.append(results) return results_list def regress_by_class(self, priors: Tensor, label: Tensor, bbox_pred: Tensor, img_meta: dict) -> Tensor: """Regress the bbox for the predicted class. Used in Cascade R-CNN. Args: priors (Tensor): Priors from `rpn_head` or last stage `bbox_head`, has shape (num_proposals, 4). label (Tensor): Only used when `self.reg_class_agnostic` is False, has shape (num_proposals, ). bbox_pred (Tensor): Regression prediction of current stage `bbox_head`. When `self.reg_class_agnostic` is False, it has shape (n, num_classes * 4), otherwise it has shape (n, 4). img_meta (dict): Image meta info. Returns: Tensor: Regressed bboxes, the same shape as input rois. """ reg_dim = self.bbox_coder.encode_size if not self.reg_class_agnostic: label = label * reg_dim inds = torch.stack([label + i for i in range(reg_dim)], 1) bbox_pred = torch.gather(bbox_pred, 1, inds) assert bbox_pred.size()[1] == reg_dim max_shape = img_meta['img_shape'] regressed_bboxes = self.bbox_coder.decode( priors, bbox_pred, max_shape=max_shape) return regressed_bboxes
32,325
44.593794
79
py
ERD
ERD-main/mmdet/models/roi_heads/bbox_heads/sabl_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Sequence, Tuple import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmengine.config import ConfigDict from mmengine.structures import InstanceData from torch import Tensor from mmdet.models.layers import multiclass_nms from mmdet.models.losses import accuracy from mmdet.models.task_modules import SamplingResult from mmdet.models.utils import multi_apply from mmdet.registry import MODELS, TASK_UTILS from mmdet.utils import ConfigType, InstanceList, OptConfigType, OptMultiConfig from .bbox_head import BBoxHead @MODELS.register_module() class SABLHead(BBoxHead): """Side-Aware Boundary Localization (SABL) for RoI-Head. Side-Aware features are extracted by conv layers with an attention mechanism. Boundary Localization with Bucketing and Bucketing Guided Rescoring are implemented in BucketingBBoxCoder. Please refer to https://arxiv.org/abs/1912.04260 for more details. Args: cls_in_channels (int): Input channels of cls RoI feature. \ Defaults to 256. reg_in_channels (int): Input channels of reg RoI feature. \ Defaults to 256. roi_feat_size (int): Size of RoI features. Defaults to 7. reg_feat_up_ratio (int): Upsample ratio of reg features. \ Defaults to 2. reg_pre_kernel (int): Kernel of 2D conv layers before \ attention pooling. Defaults to 3. reg_post_kernel (int): Kernel of 1D conv layers after \ attention pooling. Defaults to 3. reg_pre_num (int): Number of pre convs. Defaults to 2. reg_post_num (int): Number of post convs. Defaults to 1. num_classes (int): Number of classes in dataset. Defaults to 80. cls_out_channels (int): Hidden channels in cls fcs. Defaults to 1024. reg_offset_out_channels (int): Hidden and output channel \ of reg offset branch. Defaults to 256. reg_cls_out_channels (int): Hidden and output channel \ of reg cls branch. Defaults to 256. num_cls_fcs (int): Number of fcs for cls branch. Defaults to 1. num_reg_fcs (int): Number of fcs for reg branch.. Defaults to 0. reg_class_agnostic (bool): Class agnostic regression or not. \ Defaults to True. norm_cfg (dict): Config of norm layers. Defaults to None. bbox_coder (dict): Config of bbox coder. Defaults 'BucketingBBoxCoder'. loss_cls (dict): Config of classification loss. loss_bbox_cls (dict): Config of classification loss for bbox branch. loss_bbox_reg (dict): Config of regression loss for bbox branch. init_cfg (dict or list[dict], optional): Initialization config dict. Defaults to None. """ def __init__(self, num_classes: int, cls_in_channels: int = 256, reg_in_channels: int = 256, roi_feat_size: int = 7, reg_feat_up_ratio: int = 2, reg_pre_kernel: int = 3, reg_post_kernel: int = 3, reg_pre_num: int = 2, reg_post_num: int = 1, cls_out_channels: int = 1024, reg_offset_out_channels: int = 256, reg_cls_out_channels: int = 256, num_cls_fcs: int = 1, num_reg_fcs: int = 0, reg_class_agnostic: bool = True, norm_cfg: OptConfigType = None, bbox_coder: ConfigType = dict( type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), loss_cls: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox_cls: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox_reg: ConfigType = dict( type='SmoothL1Loss', beta=0.1, loss_weight=1.0), init_cfg: OptMultiConfig = None) -> None: super(BBoxHead, self).__init__(init_cfg=init_cfg) self.cls_in_channels = cls_in_channels self.reg_in_channels = reg_in_channels self.roi_feat_size = roi_feat_size self.reg_feat_up_ratio = int(reg_feat_up_ratio) self.num_buckets = bbox_coder['num_buckets'] assert self.reg_feat_up_ratio // 2 >= 1 self.up_reg_feat_size = roi_feat_size * self.reg_feat_up_ratio assert self.up_reg_feat_size == bbox_coder['num_buckets'] self.reg_pre_kernel = reg_pre_kernel self.reg_post_kernel = reg_post_kernel self.reg_pre_num = reg_pre_num self.reg_post_num = reg_post_num self.num_classes = num_classes self.cls_out_channels = cls_out_channels self.reg_offset_out_channels = reg_offset_out_channels self.reg_cls_out_channels = reg_cls_out_channels self.num_cls_fcs = num_cls_fcs self.num_reg_fcs = num_reg_fcs self.reg_class_agnostic = reg_class_agnostic assert self.reg_class_agnostic self.norm_cfg = norm_cfg self.bbox_coder = TASK_UTILS.build(bbox_coder) self.loss_cls = MODELS.build(loss_cls) self.loss_bbox_cls = MODELS.build(loss_bbox_cls) self.loss_bbox_reg = MODELS.build(loss_bbox_reg) self.cls_fcs = self._add_fc_branch(self.num_cls_fcs, self.cls_in_channels, self.roi_feat_size, self.cls_out_channels) self.side_num = int(np.ceil(self.num_buckets / 2)) if self.reg_feat_up_ratio > 1: self.upsample_x = nn.ConvTranspose1d( reg_in_channels, reg_in_channels, self.reg_feat_up_ratio, stride=self.reg_feat_up_ratio) self.upsample_y = nn.ConvTranspose1d( reg_in_channels, reg_in_channels, self.reg_feat_up_ratio, stride=self.reg_feat_up_ratio) self.reg_pre_convs = nn.ModuleList() for i in range(self.reg_pre_num): reg_pre_conv = ConvModule( reg_in_channels, reg_in_channels, kernel_size=reg_pre_kernel, padding=reg_pre_kernel // 2, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU')) self.reg_pre_convs.append(reg_pre_conv) self.reg_post_conv_xs = nn.ModuleList() for i in range(self.reg_post_num): reg_post_conv_x = ConvModule( reg_in_channels, reg_in_channels, kernel_size=(1, reg_post_kernel), padding=(0, reg_post_kernel // 2), norm_cfg=norm_cfg, act_cfg=dict(type='ReLU')) self.reg_post_conv_xs.append(reg_post_conv_x) self.reg_post_conv_ys = nn.ModuleList() for i in range(self.reg_post_num): reg_post_conv_y = ConvModule( reg_in_channels, reg_in_channels, kernel_size=(reg_post_kernel, 1), padding=(reg_post_kernel // 2, 0), norm_cfg=norm_cfg, act_cfg=dict(type='ReLU')) self.reg_post_conv_ys.append(reg_post_conv_y) self.reg_conv_att_x = nn.Conv2d(reg_in_channels, 1, 1) self.reg_conv_att_y = nn.Conv2d(reg_in_channels, 1, 1) self.fc_cls = nn.Linear(self.cls_out_channels, self.num_classes + 1) self.relu = nn.ReLU(inplace=True) self.reg_cls_fcs = self._add_fc_branch(self.num_reg_fcs, self.reg_in_channels, 1, self.reg_cls_out_channels) self.reg_offset_fcs = self._add_fc_branch(self.num_reg_fcs, self.reg_in_channels, 1, self.reg_offset_out_channels) self.fc_reg_cls = nn.Linear(self.reg_cls_out_channels, 1) self.fc_reg_offset = nn.Linear(self.reg_offset_out_channels, 1) if init_cfg is None: self.init_cfg = [ dict( type='Xavier', layer='Linear', distribution='uniform', override=[ dict(type='Normal', name='reg_conv_att_x', std=0.01), dict(type='Normal', name='reg_conv_att_y', std=0.01), dict(type='Normal', name='fc_reg_cls', std=0.01), dict(type='Normal', name='fc_cls', std=0.01), dict(type='Normal', name='fc_reg_offset', std=0.001) ]) ] if self.reg_feat_up_ratio > 1: self.init_cfg += [ dict( type='Kaiming', distribution='normal', override=[ dict(name='upsample_x'), dict(name='upsample_y') ]) ] def _add_fc_branch(self, num_branch_fcs: int, in_channels: int, roi_feat_size: int, fc_out_channels: int) -> nn.ModuleList: """build fc layers.""" in_channels = in_channels * roi_feat_size * roi_feat_size branch_fcs = nn.ModuleList() for i in range(num_branch_fcs): fc_in_channels = (in_channels if i == 0 else fc_out_channels) branch_fcs.append(nn.Linear(fc_in_channels, fc_out_channels)) return branch_fcs def cls_forward(self, cls_x: Tensor) -> Tensor: """forward of classification fc layers.""" cls_x = cls_x.view(cls_x.size(0), -1) for fc in self.cls_fcs: cls_x = self.relu(fc(cls_x)) cls_score = self.fc_cls(cls_x) return cls_score def attention_pool(self, reg_x: Tensor) -> tuple: """Extract direction-specific features fx and fy with attention methanism.""" reg_fx = reg_x reg_fy = reg_x reg_fx_att = self.reg_conv_att_x(reg_fx).sigmoid() reg_fy_att = self.reg_conv_att_y(reg_fy).sigmoid() reg_fx_att = reg_fx_att / reg_fx_att.sum(dim=2).unsqueeze(2) reg_fy_att = reg_fy_att / reg_fy_att.sum(dim=3).unsqueeze(3) reg_fx = (reg_fx * reg_fx_att).sum(dim=2) reg_fy = (reg_fy * reg_fy_att).sum(dim=3) return reg_fx, reg_fy def side_aware_feature_extractor(self, reg_x: Tensor) -> tuple: """Refine and extract side-aware features without split them.""" for reg_pre_conv in self.reg_pre_convs: reg_x = reg_pre_conv(reg_x) reg_fx, reg_fy = self.attention_pool(reg_x) if self.reg_post_num > 0: reg_fx = reg_fx.unsqueeze(2) reg_fy = reg_fy.unsqueeze(3) for i in range(self.reg_post_num): reg_fx = self.reg_post_conv_xs[i](reg_fx) reg_fy = self.reg_post_conv_ys[i](reg_fy) reg_fx = reg_fx.squeeze(2) reg_fy = reg_fy.squeeze(3) if self.reg_feat_up_ratio > 1: reg_fx = self.relu(self.upsample_x(reg_fx)) reg_fy = self.relu(self.upsample_y(reg_fy)) reg_fx = torch.transpose(reg_fx, 1, 2) reg_fy = torch.transpose(reg_fy, 1, 2) return reg_fx.contiguous(), reg_fy.contiguous() def reg_pred(self, x: Tensor, offset_fcs: nn.ModuleList, cls_fcs: nn.ModuleList) -> tuple: """Predict bucketing estimation (cls_pred) and fine regression (offset pred) with side-aware features.""" x_offset = x.view(-1, self.reg_in_channels) x_cls = x.view(-1, self.reg_in_channels) for fc in offset_fcs: x_offset = self.relu(fc(x_offset)) for fc in cls_fcs: x_cls = self.relu(fc(x_cls)) offset_pred = self.fc_reg_offset(x_offset) cls_pred = self.fc_reg_cls(x_cls) offset_pred = offset_pred.view(x.size(0), -1) cls_pred = cls_pred.view(x.size(0), -1) return offset_pred, cls_pred def side_aware_split(self, feat: Tensor) -> Tensor: """Split side-aware features aligned with orders of bucketing targets.""" l_end = int(np.ceil(self.up_reg_feat_size / 2)) r_start = int(np.floor(self.up_reg_feat_size / 2)) feat_fl = feat[:, :l_end] feat_fr = feat[:, r_start:].flip(dims=(1, )) feat_fl = feat_fl.contiguous() feat_fr = feat_fr.contiguous() feat = torch.cat([feat_fl, feat_fr], dim=-1) return feat def bbox_pred_split(self, bbox_pred: tuple, num_proposals_per_img: Sequence[int]) -> tuple: """Split batch bbox prediction back to each image.""" bucket_cls_preds, bucket_offset_preds = bbox_pred bucket_cls_preds = bucket_cls_preds.split(num_proposals_per_img, 0) bucket_offset_preds = bucket_offset_preds.split( num_proposals_per_img, 0) bbox_pred = tuple(zip(bucket_cls_preds, bucket_offset_preds)) return bbox_pred def reg_forward(self, reg_x: Tensor) -> tuple: """forward of regression branch.""" outs = self.side_aware_feature_extractor(reg_x) edge_offset_preds = [] edge_cls_preds = [] reg_fx = outs[0] reg_fy = outs[1] offset_pred_x, cls_pred_x = self.reg_pred(reg_fx, self.reg_offset_fcs, self.reg_cls_fcs) offset_pred_y, cls_pred_y = self.reg_pred(reg_fy, self.reg_offset_fcs, self.reg_cls_fcs) offset_pred_x = self.side_aware_split(offset_pred_x) offset_pred_y = self.side_aware_split(offset_pred_y) cls_pred_x = self.side_aware_split(cls_pred_x) cls_pred_y = self.side_aware_split(cls_pred_y) edge_offset_preds = torch.cat([offset_pred_x, offset_pred_y], dim=-1) edge_cls_preds = torch.cat([cls_pred_x, cls_pred_y], dim=-1) return edge_cls_preds, edge_offset_preds def forward(self, x: Tensor) -> tuple: """Forward features from the upstream network.""" bbox_pred = self.reg_forward(x) cls_score = self.cls_forward(x) return cls_score, bbox_pred def get_targets(self, sampling_results: List[SamplingResult], rcnn_train_cfg: ConfigDict, concat: bool = True) -> tuple: """Calculate the ground truth for all samples in a batch according to the sampling_results.""" pos_proposals = [res.pos_bboxes for res in sampling_results] neg_proposals = [res.neg_bboxes for res in sampling_results] pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results] pos_gt_labels = [res.pos_gt_labels for res in sampling_results] cls_reg_targets = self.bucket_target( pos_proposals, neg_proposals, pos_gt_bboxes, pos_gt_labels, rcnn_train_cfg, concat=concat) (labels, label_weights, bucket_cls_targets, bucket_cls_weights, bucket_offset_targets, bucket_offset_weights) = cls_reg_targets return (labels, label_weights, (bucket_cls_targets, bucket_offset_targets), (bucket_cls_weights, bucket_offset_weights)) def bucket_target(self, pos_proposals_list: list, neg_proposals_list: list, pos_gt_bboxes_list: list, pos_gt_labels_list: list, rcnn_train_cfg: ConfigDict, concat: bool = True) -> tuple: """Compute bucketing estimation targets and fine regression targets for a batch of images.""" (labels, label_weights, bucket_cls_targets, bucket_cls_weights, bucket_offset_targets, bucket_offset_weights) = multi_apply( self._bucket_target_single, pos_proposals_list, neg_proposals_list, pos_gt_bboxes_list, pos_gt_labels_list, cfg=rcnn_train_cfg) if concat: labels = torch.cat(labels, 0) label_weights = torch.cat(label_weights, 0) bucket_cls_targets = torch.cat(bucket_cls_targets, 0) bucket_cls_weights = torch.cat(bucket_cls_weights, 0) bucket_offset_targets = torch.cat(bucket_offset_targets, 0) bucket_offset_weights = torch.cat(bucket_offset_weights, 0) return (labels, label_weights, bucket_cls_targets, bucket_cls_weights, bucket_offset_targets, bucket_offset_weights) def _bucket_target_single(self, pos_proposals: Tensor, neg_proposals: Tensor, pos_gt_bboxes: Tensor, pos_gt_labels: Tensor, cfg: ConfigDict) -> tuple: """Compute bucketing estimation targets and fine regression targets for a single image. Args: pos_proposals (Tensor): positive proposals of a single image, Shape (n_pos, 4) neg_proposals (Tensor): negative proposals of a single image, Shape (n_neg, 4). pos_gt_bboxes (Tensor): gt bboxes assigned to positive proposals of a single image, Shape (n_pos, 4). pos_gt_labels (Tensor): gt labels assigned to positive proposals of a single image, Shape (n_pos, ). cfg (dict): Config of calculating targets Returns: tuple: - labels (Tensor): Labels in a single image. Shape (n,). - label_weights (Tensor): Label weights in a single image. Shape (n,) - bucket_cls_targets (Tensor): Bucket cls targets in a single image. Shape (n, num_buckets*2). - bucket_cls_weights (Tensor): Bucket cls weights in a single image. Shape (n, num_buckets*2). - bucket_offset_targets (Tensor): Bucket offset targets in a single image. Shape (n, num_buckets*2). - bucket_offset_targets (Tensor): Bucket offset weights in a single image. Shape (n, num_buckets*2). """ num_pos = pos_proposals.size(0) num_neg = neg_proposals.size(0) num_samples = num_pos + num_neg labels = pos_gt_bboxes.new_full((num_samples, ), self.num_classes, dtype=torch.long) label_weights = pos_proposals.new_zeros(num_samples) bucket_cls_targets = pos_proposals.new_zeros(num_samples, 4 * self.side_num) bucket_cls_weights = pos_proposals.new_zeros(num_samples, 4 * self.side_num) bucket_offset_targets = pos_proposals.new_zeros( num_samples, 4 * self.side_num) bucket_offset_weights = pos_proposals.new_zeros( num_samples, 4 * self.side_num) if num_pos > 0: labels[:num_pos] = pos_gt_labels label_weights[:num_pos] = 1.0 (pos_bucket_offset_targets, pos_bucket_offset_weights, pos_bucket_cls_targets, pos_bucket_cls_weights) = self.bbox_coder.encode( pos_proposals, pos_gt_bboxes) bucket_cls_targets[:num_pos, :] = pos_bucket_cls_targets bucket_cls_weights[:num_pos, :] = pos_bucket_cls_weights bucket_offset_targets[:num_pos, :] = pos_bucket_offset_targets bucket_offset_weights[:num_pos, :] = pos_bucket_offset_weights if num_neg > 0: label_weights[-num_neg:] = 1.0 return (labels, label_weights, bucket_cls_targets, bucket_cls_weights, bucket_offset_targets, bucket_offset_weights) def loss(self, cls_score: Tensor, bbox_pred: Tuple[Tensor, Tensor], rois: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tuple[Tensor, Tensor], bbox_weights: Tuple[Tensor, Tensor], reduction_override: Optional[str] = None) -> dict: """Calculate the loss based on the network predictions and targets. Args: cls_score (Tensor): Classification prediction results of all class, has shape (batch_size * num_proposals_single_image, num_classes) bbox_pred (Tensor): A tuple of regression prediction results containing `bucket_cls_preds and` `bucket_offset_preds`. rois (Tensor): RoIs with the shape (batch_size * num_proposals_single_image, 5) where the first column indicates batch id of each RoI. labels (Tensor): Gt_labels for all proposals in a batch, has shape (batch_size * num_proposals_single_image, ). label_weights (Tensor): Labels_weights for all proposals in a batch, has shape (batch_size * num_proposals_single_image, ). bbox_targets (Tuple[Tensor, Tensor]): A tuple of regression target containing `bucket_cls_targets` and `bucket_offset_targets`. the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. bbox_weights (Tuple[Tensor, Tensor]): A tuple of regression weights containing `bucket_cls_weights` and `bucket_offset_weights`. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Defaults to None, Returns: dict: A dictionary of loss. """ losses = dict() if cls_score is not None: avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) losses['loss_cls'] = self.loss_cls( cls_score, labels, label_weights, avg_factor=avg_factor, reduction_override=reduction_override) losses['acc'] = accuracy(cls_score, labels) if bbox_pred is not None: bucket_cls_preds, bucket_offset_preds = bbox_pred bucket_cls_targets, bucket_offset_targets = bbox_targets bucket_cls_weights, bucket_offset_weights = bbox_weights # edge cls bucket_cls_preds = bucket_cls_preds.view(-1, self.side_num) bucket_cls_targets = bucket_cls_targets.view(-1, self.side_num) bucket_cls_weights = bucket_cls_weights.view(-1, self.side_num) losses['loss_bbox_cls'] = self.loss_bbox_cls( bucket_cls_preds, bucket_cls_targets, bucket_cls_weights, avg_factor=bucket_cls_targets.size(0), reduction_override=reduction_override) losses['loss_bbox_reg'] = self.loss_bbox_reg( bucket_offset_preds, bucket_offset_targets, bucket_offset_weights, avg_factor=bucket_offset_targets.size(0), reduction_override=reduction_override) return losses def _predict_by_feat_single( self, roi: Tensor, cls_score: Tensor, bbox_pred: Tuple[Tensor, Tensor], img_meta: dict, rescale: bool = False, rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5). last dimension 5 arrange as (batch_index, x1, y1, x2, y2). cls_score (Tensor): Box scores, has shape (num_boxes, num_classes + 1). bbox_pred (Tuple[Tensor, Tensor]): Box cls preds and offset preds. img_meta (dict): image information. rescale (bool): If True, return boxes in original image space. Defaults to False. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Defaults to None Returns: :obj:`InstanceData`: Detection results of each image Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ results = InstanceData() if isinstance(cls_score, list): cls_score = sum(cls_score) / float(len(cls_score)) scores = F.softmax(cls_score, dim=1) if cls_score is not None else None img_shape = img_meta['img_shape'] if bbox_pred is not None: bboxes, confidences = self.bbox_coder.decode( roi[:, 1:], bbox_pred, img_shape) else: bboxes = roi[:, 1:].clone() confidences = None if img_shape is not None: bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1) bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1) if rescale and bboxes.size(0) > 0: assert img_meta.get('scale_factor') is not None scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat( (1, 2)) bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view( bboxes.size()[0], -1) if rcnn_test_cfg is None: results.bboxes = bboxes results.scores = scores else: det_bboxes, det_labels = multiclass_nms( bboxes, scores, rcnn_test_cfg.score_thr, rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img, score_factors=confidences) results.bboxes = det_bboxes[:, :4] results.scores = det_bboxes[:, -1] results.labels = det_labels return results def refine_bboxes(self, sampling_results: List[SamplingResult], bbox_results: dict, batch_img_metas: List[dict]) -> InstanceList: """Refine bboxes during training. Args: sampling_results (List[:obj:`SamplingResult`]): Sampling results. bbox_results (dict): Usually is a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `rois` (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. - `bbox_targets` (tuple): Ground truth for proposals in a single image. Containing the following list of Tensors: (labels, label_weights, bbox_targets, bbox_weights) batch_img_metas (List[dict]): List of image information. Returns: list[:obj:`InstanceData`]: Refined bboxes of each image. """ pos_is_gts = [res.pos_is_gt for res in sampling_results] # bbox_targets is a tuple labels = bbox_results['bbox_targets'][0] cls_scores = bbox_results['cls_score'] rois = bbox_results['rois'] bbox_preds = bbox_results['bbox_pred'] if cls_scores.numel() == 0: return None labels = torch.where(labels == self.num_classes, cls_scores[:, :-1].argmax(1), labels) img_ids = rois[:, 0].long().unique(sorted=True) assert img_ids.numel() <= len(batch_img_metas) results_list = [] for i in range(len(batch_img_metas)): inds = torch.nonzero( rois[:, 0] == i, as_tuple=False).squeeze(dim=1) num_rois = inds.numel() bboxes_ = rois[inds, 1:] label_ = labels[inds] edge_cls_preds, edge_offset_preds = bbox_preds edge_cls_preds_ = edge_cls_preds[inds] edge_offset_preds_ = edge_offset_preds[inds] bbox_pred_ = (edge_cls_preds_, edge_offset_preds_) img_meta_ = batch_img_metas[i] pos_is_gts_ = pos_is_gts[i] bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_, img_meta_) # filter gt bboxes pos_keep = 1 - pos_is_gts_ keep_inds = pos_is_gts_.new_ones(num_rois) keep_inds[:len(pos_is_gts_)] = pos_keep results = InstanceData(bboxes=bboxes[keep_inds.type(torch.bool)]) results_list.append(results) return results_list def regress_by_class(self, rois: Tensor, label: Tensor, bbox_pred: tuple, img_meta: dict) -> Tensor: """Regress the bbox for the predicted class. Used in Cascade R-CNN. Args: rois (Tensor): shape (n, 4) or (n, 5) label (Tensor): shape (n, ) bbox_pred (Tuple[Tensor]): shape [(n, num_buckets *2), \ (n, num_buckets *2)] img_meta (dict): Image meta info. Returns: Tensor: Regressed bboxes, the same shape as input rois. """ assert rois.size(1) == 4 or rois.size(1) == 5 if rois.size(1) == 4: new_rois, _ = self.bbox_coder.decode(rois, bbox_pred, img_meta['img_shape']) else: bboxes, _ = self.bbox_coder.decode(rois[:, 1:], bbox_pred, img_meta['img_shape']) new_rois = torch.cat((rois[:, [0]], bboxes), dim=1) return new_rois
30,500
43.527007
79
py
ERD
ERD-main/mmdet/models/roi_heads/bbox_heads/dii_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List import torch import torch.nn as nn from mmcv.cnn import build_activation_layer, build_norm_layer from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention from mmengine.config import ConfigDict from mmengine.model import bias_init_with_prob from torch import Tensor from mmdet.models.losses import accuracy from mmdet.models.task_modules import SamplingResult from mmdet.models.utils import multi_apply from mmdet.registry import MODELS from mmdet.utils import ConfigType, OptConfigType, reduce_mean from .bbox_head import BBoxHead @MODELS.register_module() class DIIHead(BBoxHead): r"""Dynamic Instance Interactive Head for `Sparse R-CNN: End-to-End Object Detection with Learnable Proposals <https://arxiv.org/abs/2011.12450>`_ Args: num_classes (int): Number of class in dataset. Defaults to 80. num_ffn_fcs (int): The number of fully-connected layers in FFNs. Defaults to 2. num_heads (int): The hidden dimension of FFNs. Defaults to 8. num_cls_fcs (int): The number of fully-connected layers in classification subnet. Defaults to 1. num_reg_fcs (int): The number of fully-connected layers in regression subnet. Defaults to 3. feedforward_channels (int): The hidden dimension of FFNs. Defaults to 2048 in_channels (int): Hidden_channels of MultiheadAttention. Defaults to 256. dropout (float): Probability of drop the channel. Defaults to 0.0 ffn_act_cfg (:obj:`ConfigDict` or dict): The activation config for FFNs. dynamic_conv_cfg (:obj:`ConfigDict` or dict): The convolution config for DynamicConv. loss_iou (:obj:`ConfigDict` or dict): The config for iou or giou loss. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict]): Initialization config dict. Defaults to None. """ def __init__(self, num_classes: int = 80, num_ffn_fcs: int = 2, num_heads: int = 8, num_cls_fcs: int = 1, num_reg_fcs: int = 3, feedforward_channels: int = 2048, in_channels: int = 256, dropout: float = 0.0, ffn_act_cfg: ConfigType = dict(type='ReLU', inplace=True), dynamic_conv_cfg: ConfigType = dict( type='DynamicConv', in_channels=256, feat_channels=64, out_channels=256, input_feat_shape=7, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN')), loss_iou: ConfigType = dict(type='GIoULoss', loss_weight=2.0), init_cfg: OptConfigType = None, **kwargs) -> None: assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' super().__init__( num_classes=num_classes, reg_decoded_bbox=True, reg_class_agnostic=True, init_cfg=init_cfg, **kwargs) self.loss_iou = MODELS.build(loss_iou) self.in_channels = in_channels self.fp16_enabled = False self.attention = MultiheadAttention(in_channels, num_heads, dropout) self.attention_norm = build_norm_layer(dict(type='LN'), in_channels)[1] self.instance_interactive_conv = MODELS.build(dynamic_conv_cfg) self.instance_interactive_conv_dropout = nn.Dropout(dropout) self.instance_interactive_conv_norm = build_norm_layer( dict(type='LN'), in_channels)[1] self.ffn = FFN( in_channels, feedforward_channels, num_ffn_fcs, act_cfg=ffn_act_cfg, dropout=dropout) self.ffn_norm = build_norm_layer(dict(type='LN'), in_channels)[1] self.cls_fcs = nn.ModuleList() for _ in range(num_cls_fcs): self.cls_fcs.append( nn.Linear(in_channels, in_channels, bias=False)) self.cls_fcs.append( build_norm_layer(dict(type='LN'), in_channels)[1]) self.cls_fcs.append( build_activation_layer(dict(type='ReLU', inplace=True))) # over load the self.fc_cls in BBoxHead if self.loss_cls.use_sigmoid: self.fc_cls = nn.Linear(in_channels, self.num_classes) else: self.fc_cls = nn.Linear(in_channels, self.num_classes + 1) self.reg_fcs = nn.ModuleList() for _ in range(num_reg_fcs): self.reg_fcs.append( nn.Linear(in_channels, in_channels, bias=False)) self.reg_fcs.append( build_norm_layer(dict(type='LN'), in_channels)[1]) self.reg_fcs.append( build_activation_layer(dict(type='ReLU', inplace=True))) # over load the self.fc_cls in BBoxHead self.fc_reg = nn.Linear(in_channels, 4) assert self.reg_class_agnostic, 'DIIHead only ' \ 'suppport `reg_class_agnostic=True` ' assert self.reg_decoded_bbox, 'DIIHead only ' \ 'suppport `reg_decoded_bbox=True`' def init_weights(self) -> None: """Use xavier initialization for all weight parameter and set classification head bias as a specific value when use focal loss.""" super().init_weights() for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) else: # adopt the default initialization for # the weight and bias of the layer norm pass if self.loss_cls.use_sigmoid: bias_init = bias_init_with_prob(0.01) nn.init.constant_(self.fc_cls.bias, bias_init) def forward(self, roi_feat: Tensor, proposal_feat: Tensor) -> tuple: """Forward function of Dynamic Instance Interactive Head. Args: roi_feat (Tensor): Roi-pooling features with shape (batch_size*num_proposals, feature_dimensions, pooling_h , pooling_w). proposal_feat (Tensor): Intermediate feature get from diihead in last stage, has shape (batch_size, num_proposals, feature_dimensions) Returns: tuple[Tensor]: Usually a tuple of classification scores and bbox prediction and a intermediate feature. - cls_scores (Tensor): Classification scores for all proposals, has shape (batch_size, num_proposals, num_classes). - bbox_preds (Tensor): Box energies / deltas for all proposals, has shape (batch_size, num_proposals, 4). - obj_feat (Tensor): Object feature before classification and regression subnet, has shape (batch_size, num_proposal, feature_dimensions). - attn_feats (Tensor): Intermediate feature. """ N, num_proposals = proposal_feat.shape[:2] # Self attention proposal_feat = proposal_feat.permute(1, 0, 2) proposal_feat = self.attention_norm(self.attention(proposal_feat)) attn_feats = proposal_feat.permute(1, 0, 2) # instance interactive proposal_feat = attn_feats.reshape(-1, self.in_channels) proposal_feat_iic = self.instance_interactive_conv( proposal_feat, roi_feat) proposal_feat = proposal_feat + self.instance_interactive_conv_dropout( proposal_feat_iic) obj_feat = self.instance_interactive_conv_norm(proposal_feat) # FFN obj_feat = self.ffn_norm(self.ffn(obj_feat)) cls_feat = obj_feat reg_feat = obj_feat for cls_layer in self.cls_fcs: cls_feat = cls_layer(cls_feat) for reg_layer in self.reg_fcs: reg_feat = reg_layer(reg_feat) cls_score = self.fc_cls(cls_feat).view( N, num_proposals, self.num_classes if self.loss_cls.use_sigmoid else self.num_classes + 1) bbox_delta = self.fc_reg(reg_feat).view(N, num_proposals, 4) return cls_score, bbox_delta, obj_feat.view( N, num_proposals, self.in_channels), attn_feats def loss_and_target(self, cls_score: Tensor, bbox_pred: Tensor, sampling_results: List[SamplingResult], rcnn_train_cfg: ConfigType, imgs_whwh: Tensor, concat: bool = True, reduction_override: str = None) -> dict: """Calculate the loss based on the features extracted by the DIIHead. Args: cls_score (Tensor): Classification prediction results of all class, has shape (batch_size * num_proposals_single_image, num_classes) bbox_pred (Tensor): Regression prediction results, has shape (batch_size * num_proposals_single_image, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. sampling_results (List[obj:SamplingResult]): Assign results of all images in a batch after sampling. rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. imgs_whwh (Tensor): imgs_whwh (Tensor): Tensor with\ shape (batch_size, num_proposals, 4), the last dimension means [img_width,img_height, img_width, img_height]. concat (bool): Whether to concatenate the results of all the images in a single batch. Defaults to True. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Defaults to None. Returns: dict: A dictionary of loss and targets components. The targets are only used for cascade rcnn. """ cls_reg_targets = self.get_targets( sampling_results=sampling_results, rcnn_train_cfg=rcnn_train_cfg, concat=concat) (labels, label_weights, bbox_targets, bbox_weights) = cls_reg_targets losses = dict() bg_class_ind = self.num_classes # note in spare rcnn num_gt == num_pos pos_inds = (labels >= 0) & (labels < bg_class_ind) num_pos = pos_inds.sum().float() avg_factor = reduce_mean(num_pos) if cls_score is not None: if cls_score.numel() > 0: losses['loss_cls'] = self.loss_cls( cls_score, labels, label_weights, avg_factor=avg_factor, reduction_override=reduction_override) losses['pos_acc'] = accuracy(cls_score[pos_inds], labels[pos_inds]) if bbox_pred is not None: # 0~self.num_classes-1 are FG, self.num_classes is BG # do not perform bounding box regression for BG anymore. if pos_inds.any(): pos_bbox_pred = bbox_pred.reshape(bbox_pred.size(0), 4)[pos_inds.type(torch.bool)] imgs_whwh = imgs_whwh.reshape(bbox_pred.size(0), 4)[pos_inds.type(torch.bool)] losses['loss_bbox'] = self.loss_bbox( pos_bbox_pred / imgs_whwh, bbox_targets[pos_inds.type(torch.bool)] / imgs_whwh, bbox_weights[pos_inds.type(torch.bool)], avg_factor=avg_factor) losses['loss_iou'] = self.loss_iou( pos_bbox_pred, bbox_targets[pos_inds.type(torch.bool)], bbox_weights[pos_inds.type(torch.bool)], avg_factor=avg_factor) else: losses['loss_bbox'] = bbox_pred.sum() * 0 losses['loss_iou'] = bbox_pred.sum() * 0 return dict(loss_bbox=losses, bbox_targets=cls_reg_targets) def _get_targets_single(self, pos_inds: Tensor, neg_inds: Tensor, pos_priors: Tensor, neg_priors: Tensor, pos_gt_bboxes: Tensor, pos_gt_labels: Tensor, cfg: ConfigDict) -> tuple: """Calculate the ground truth for proposals in the single image according to the sampling results. Almost the same as the implementation in `bbox_head`, we add pos_inds and neg_inds to select positive and negative samples instead of selecting the first num_pos as positive samples. Args: pos_inds (Tensor): The length is equal to the positive sample numbers contain all index of the positive sample in the origin proposal set. neg_inds (Tensor): The length is equal to the negative sample numbers contain all index of the negative sample in the origin proposal set. pos_priors (Tensor): Contains all the positive boxes, has shape (num_pos, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. neg_priors (Tensor): Contains all the negative boxes, has shape (num_neg, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. pos_gt_bboxes (Tensor): Contains gt_boxes for all positive samples, has shape (num_pos, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. pos_gt_labels (Tensor): Contains gt_labels for all positive samples, has shape (num_pos, ). cfg (obj:`ConfigDict`): `train_cfg` of R-CNN. Returns: Tuple[Tensor]: Ground truth for proposals in a single image. Containing the following Tensors: - labels(Tensor): Gt_labels for all proposals, has shape (num_proposals,). - label_weights(Tensor): Labels_weights for all proposals, has shape (num_proposals,). - bbox_targets(Tensor):Regression target for all proposals, has shape (num_proposals, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - bbox_weights(Tensor):Regression weights for all proposals, has shape (num_proposals, 4). """ num_pos = pos_priors.size(0) num_neg = neg_priors.size(0) num_samples = num_pos + num_neg # original implementation uses new_zeros since BG are set to be 0 # now use empty & fill because BG cat_id = num_classes, # FG cat_id = [0, num_classes-1] labels = pos_priors.new_full((num_samples, ), self.num_classes, dtype=torch.long) label_weights = pos_priors.new_zeros(num_samples) bbox_targets = pos_priors.new_zeros(num_samples, 4) bbox_weights = pos_priors.new_zeros(num_samples, 4) if num_pos > 0: labels[pos_inds] = pos_gt_labels pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight label_weights[pos_inds] = pos_weight if not self.reg_decoded_bbox: pos_bbox_targets = self.bbox_coder.encode( pos_priors, pos_gt_bboxes) else: pos_bbox_targets = pos_gt_bboxes bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1 if num_neg > 0: label_weights[neg_inds] = 1.0 return labels, label_weights, bbox_targets, bbox_weights def get_targets(self, sampling_results: List[SamplingResult], rcnn_train_cfg: ConfigDict, concat: bool = True) -> tuple: """Calculate the ground truth for all samples in a batch according to the sampling_results. Almost the same as the implementation in bbox_head, we passed additional parameters pos_inds_list and neg_inds_list to `_get_targets_single` function. Args: sampling_results (List[obj:SamplingResult]): Assign results of all images in a batch after sampling. rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. concat (bool): Whether to concatenate the results of all the images in a single batch. Returns: Tuple[Tensor]: Ground truth for proposals in a single image. Containing the following list of Tensors: - labels (list[Tensor],Tensor): Gt_labels for all proposals in a batch, each tensor in list has shape (num_proposals,) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals,). - label_weights (list[Tensor]): Labels_weights for all proposals in a batch, each tensor in list has shape (num_proposals,) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals,). - bbox_targets (list[Tensor],Tensor): Regression target for all proposals in a batch, each tensor in list has shape (num_proposals, 4) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - bbox_weights (list[tensor],Tensor): Regression weights for all proposals in a batch, each tensor in list has shape (num_proposals, 4) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals, 4). """ pos_inds_list = [res.pos_inds for res in sampling_results] neg_inds_list = [res.neg_inds for res in sampling_results] pos_priors_list = [res.pos_priors for res in sampling_results] neg_priors_list = [res.neg_priors for res in sampling_results] pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results] pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results] labels, label_weights, bbox_targets, bbox_weights = multi_apply( self._get_targets_single, pos_inds_list, neg_inds_list, pos_priors_list, neg_priors_list, pos_gt_bboxes_list, pos_gt_labels_list, cfg=rcnn_train_cfg) if concat: labels = torch.cat(labels, 0) label_weights = torch.cat(label_weights, 0) bbox_targets = torch.cat(bbox_targets, 0) bbox_weights = torch.cat(bbox_weights, 0) return labels, label_weights, bbox_targets, bbox_weights
19,266
44.548463
79
py
ERD
ERD-main/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Tuple, Union import torch.nn as nn from mmcv.cnn import ConvModule from mmengine.config import ConfigDict from torch import Tensor from mmdet.registry import MODELS from .bbox_head import BBoxHead @MODELS.register_module() class ConvFCBBoxHead(BBoxHead): r"""More general bbox head, with shared conv and fc layers and two optional separated branches. .. code-block:: none /-> cls convs -> cls fcs -> cls shared convs -> shared fcs \-> reg convs -> reg fcs -> reg """ # noqa: W605 def __init__(self, num_shared_convs: int = 0, num_shared_fcs: int = 0, num_cls_convs: int = 0, num_cls_fcs: int = 0, num_reg_convs: int = 0, num_reg_fcs: int = 0, conv_out_channels: int = 256, fc_out_channels: int = 1024, conv_cfg: Optional[Union[dict, ConfigDict]] = None, norm_cfg: Optional[Union[dict, ConfigDict]] = None, init_cfg: Optional[Union[dict, ConfigDict]] = None, *args, **kwargs) -> None: super().__init__(*args, init_cfg=init_cfg, **kwargs) assert (num_shared_convs + num_shared_fcs + num_cls_convs + num_cls_fcs + num_reg_convs + num_reg_fcs > 0) if num_cls_convs > 0 or num_reg_convs > 0: assert num_shared_fcs == 0 if not self.with_cls: assert num_cls_convs == 0 and num_cls_fcs == 0 if not self.with_reg: assert num_reg_convs == 0 and num_reg_fcs == 0 self.num_shared_convs = num_shared_convs self.num_shared_fcs = num_shared_fcs self.num_cls_convs = num_cls_convs self.num_cls_fcs = num_cls_fcs self.num_reg_convs = num_reg_convs self.num_reg_fcs = num_reg_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg # add shared convs and fcs self.shared_convs, self.shared_fcs, last_layer_dim = \ self._add_conv_fc_branch( self.num_shared_convs, self.num_shared_fcs, self.in_channels, True) self.shared_out_channels = last_layer_dim # add cls specific branch self.cls_convs, self.cls_fcs, self.cls_last_dim = \ self._add_conv_fc_branch( self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) # add reg specific branch self.reg_convs, self.reg_fcs, self.reg_last_dim = \ self._add_conv_fc_branch( self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) if self.num_shared_fcs == 0 and not self.with_avg_pool: if self.num_cls_fcs == 0: self.cls_last_dim *= self.roi_feat_area if self.num_reg_fcs == 0: self.reg_last_dim *= self.roi_feat_area self.relu = nn.ReLU(inplace=True) # reconstruct fc_cls and fc_reg since input channels are changed if self.with_cls: if self.custom_cls_channels: cls_channels = self.loss_cls.get_cls_channels(self.num_classes) else: cls_channels = self.num_classes + 1 cls_predictor_cfg_ = self.cls_predictor_cfg.copy() cls_predictor_cfg_.update( in_features=self.cls_last_dim, out_features=cls_channels) self.fc_cls = MODELS.build(cls_predictor_cfg_) if self.with_reg: box_dim = self.bbox_coder.encode_size out_dim_reg = box_dim if self.reg_class_agnostic else \ box_dim * self.num_classes reg_predictor_cfg_ = self.reg_predictor_cfg.copy() if isinstance(reg_predictor_cfg_, (dict, ConfigDict)): reg_predictor_cfg_.update( in_features=self.reg_last_dim, out_features=out_dim_reg) self.fc_reg = MODELS.build(reg_predictor_cfg_) if init_cfg is None: # when init_cfg is None, # It has been set to # [[dict(type='Normal', std=0.01, override=dict(name='fc_cls'))], # [dict(type='Normal', std=0.001, override=dict(name='fc_reg'))] # after `super(ConvFCBBoxHead, self).__init__()` # we only need to append additional configuration # for `shared_fcs`, `cls_fcs` and `reg_fcs` self.init_cfg += [ dict( type='Xavier', distribution='uniform', override=[ dict(name='shared_fcs'), dict(name='cls_fcs'), dict(name='reg_fcs') ]) ] def _add_conv_fc_branch(self, num_branch_convs: int, num_branch_fcs: int, in_channels: int, is_shared: bool = False) -> tuple: """Add shared or separable branch. convs -> avg pool (optional) -> fcs """ last_layer_dim = in_channels # add branch specific conv layers branch_convs = nn.ModuleList() if num_branch_convs > 0: for i in range(num_branch_convs): conv_in_channels = ( last_layer_dim if i == 0 else self.conv_out_channels) branch_convs.append( ConvModule( conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) last_layer_dim = self.conv_out_channels # add branch specific fc layers branch_fcs = nn.ModuleList() if num_branch_fcs > 0: # for shared branch, only consider self.with_avg_pool # for separated branches, also consider self.num_shared_fcs if (is_shared or self.num_shared_fcs == 0) and not self.with_avg_pool: last_layer_dim *= self.roi_feat_area for i in range(num_branch_fcs): fc_in_channels = ( last_layer_dim if i == 0 else self.fc_out_channels) branch_fcs.append( nn.Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels return branch_convs, branch_fcs, last_layer_dim def forward(self, x: Tuple[Tensor]) -> tuple: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of classification scores and bbox prediction. - cls_score (Tensor): Classification scores for all \ scale levels, each is a 4D-tensor, the channels number \ is num_base_priors * num_classes. - bbox_pred (Tensor): Box energies / deltas for all \ scale levels, each is a 4D-tensor, the channels number \ is num_base_priors * 4. """ # shared part if self.num_shared_convs > 0: for conv in self.shared_convs: x = conv(x) if self.num_shared_fcs > 0: if self.with_avg_pool: x = self.avg_pool(x) x = x.flatten(1) for fc in self.shared_fcs: x = self.relu(fc(x)) # separate branches x_cls = x x_reg = x for conv in self.cls_convs: x_cls = conv(x_cls) if x_cls.dim() > 2: if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.flatten(1) for fc in self.cls_fcs: x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs: x_reg = conv(x_reg) if x_reg.dim() > 2: if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.flatten(1) for fc in self.reg_fcs: x_reg = self.relu(fc(x_reg)) cls_score = self.fc_cls(x_cls) if self.with_cls else None bbox_pred = self.fc_reg(x_reg) if self.with_reg else None return cls_score, bbox_pred @MODELS.register_module() class Shared2FCBBoxHead(ConvFCBBoxHead): def __init__(self, fc_out_channels: int = 1024, *args, **kwargs) -> None: super().__init__( num_shared_convs=0, num_shared_fcs=2, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, fc_out_channels=fc_out_channels, *args, **kwargs) @MODELS.register_module() class Shared4Conv1FCBBoxHead(ConvFCBBoxHead): def __init__(self, fc_out_channels: int = 1024, *args, **kwargs) -> None: super().__init__( num_shared_convs=4, num_shared_fcs=1, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, fc_out_channels=fc_out_channels, *args, **kwargs)
9,510
37.044
79
py
ERD
ERD-main/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Tuple import torch.nn as nn from mmcv.cnn import ConvModule from mmengine.model import BaseModule, ModuleList from torch import Tensor from mmdet.models.backbones.resnet import Bottleneck from mmdet.registry import MODELS from mmdet.utils import ConfigType, MultiConfig, OptConfigType, OptMultiConfig from .bbox_head import BBoxHead class BasicResBlock(BaseModule): """Basic residual block. This block is a little different from the block in the ResNet backbone. The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock. Args: in_channels (int): Channels of the input feature map. out_channels (int): Channels of the output feature map. conv_cfg (:obj:`ConfigDict` or dict, optional): The config dict for convolution layers. norm_cfg (:obj:`ConfigDict` or dict): The config dict for normalization layers. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict], optional): Initialization config dict. Defaults to None """ def __init__(self, in_channels: int, out_channels: int, conv_cfg: OptConfigType = None, norm_cfg: ConfigType = dict(type='BN'), init_cfg: OptMultiConfig = None) -> None: super().__init__(init_cfg=init_cfg) # main path self.conv1 = ConvModule( in_channels, in_channels, kernel_size=3, padding=1, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg) self.conv2 = ConvModule( in_channels, out_channels, kernel_size=1, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) # identity path self.conv_identity = ConvModule( in_channels, out_channels, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) self.relu = nn.ReLU(inplace=True) def forward(self, x: Tensor) -> Tensor: """Forward function.""" identity = x x = self.conv1(x) x = self.conv2(x) identity = self.conv_identity(identity) out = x + identity out = self.relu(out) return out @MODELS.register_module() class DoubleConvFCBBoxHead(BBoxHead): r"""Bbox head used in Double-Head R-CNN .. code-block:: none /-> cls /-> shared convs -> \-> reg roi features /-> cls \-> shared fc -> \-> reg """ # noqa: W605 def __init__(self, num_convs: int = 0, num_fcs: int = 0, conv_out_channels: int = 1024, fc_out_channels: int = 1024, conv_cfg: OptConfigType = None, norm_cfg: ConfigType = dict(type='BN'), init_cfg: MultiConfig = dict( type='Normal', override=[ dict(type='Normal', name='fc_cls', std=0.01), dict(type='Normal', name='fc_reg', std=0.001), dict( type='Xavier', name='fc_branch', distribution='uniform') ]), **kwargs) -> None: kwargs.setdefault('with_avg_pool', True) super().__init__(init_cfg=init_cfg, **kwargs) assert self.with_avg_pool assert num_convs > 0 assert num_fcs > 0 self.num_convs = num_convs self.num_fcs = num_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg # increase the channel of input features self.res_block = BasicResBlock(self.in_channels, self.conv_out_channels) # add conv heads self.conv_branch = self._add_conv_branch() # add fc heads self.fc_branch = self._add_fc_branch() out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg) self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes + 1) self.relu = nn.ReLU() def _add_conv_branch(self) -> None: """Add the fc branch which consists of a sequential of conv layers.""" branch_convs = ModuleList() for i in range(self.num_convs): branch_convs.append( Bottleneck( inplanes=self.conv_out_channels, planes=self.conv_out_channels // 4, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) return branch_convs def _add_fc_branch(self) -> None: """Add the fc branch which consists of a sequential of fc layers.""" branch_fcs = ModuleList() for i in range(self.num_fcs): fc_in_channels = ( self.in_channels * self.roi_feat_area if i == 0 else self.fc_out_channels) branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels)) return branch_fcs def forward(self, x_cls: Tensor, x_reg: Tensor) -> Tuple[Tensor]: """Forward features from the upstream network. Args: x_cls (Tensor): Classification features of rois x_reg (Tensor): Regression features from the upstream network. Returns: tuple: A tuple of classification scores and bbox prediction. - cls_score (Tensor): Classification score predictions of rois. each roi predicts num_classes + 1 channels. - bbox_pred (Tensor): BBox deltas predictions of rois. each roi predicts 4 * num_classes channels. """ # conv head x_conv = self.res_block(x_reg) for conv in self.conv_branch: x_conv = conv(x_conv) if self.with_avg_pool: x_conv = self.avg_pool(x_conv) x_conv = x_conv.view(x_conv.size(0), -1) bbox_pred = self.fc_reg(x_conv) # fc head x_fc = x_cls.view(x_cls.size(0), -1) for fc in self.fc_branch: x_fc = self.relu(fc(x_fc)) cls_score = self.fc_cls(x_fc) return cls_score, bbox_pred
6,769
32.85
79
py
ERD
ERD-main/mmdet/models/roi_heads/shared_heads/res_layer.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch.nn as nn from mmengine.model import BaseModule from mmdet.models.backbones import ResNet from mmdet.models.layers import ResLayer as _ResLayer from mmdet.registry import MODELS @MODELS.register_module() class ResLayer(BaseModule): def __init__(self, depth, stage=3, stride=2, dilation=1, style='pytorch', norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, with_cp=False, dcn=None, pretrained=None, init_cfg=None): super(ResLayer, self).__init__(init_cfg) self.norm_eval = norm_eval self.norm_cfg = norm_cfg self.stage = stage self.fp16_enabled = False block, stage_blocks = ResNet.arch_settings[depth] stage_block = stage_blocks[stage] planes = 64 * 2**stage inplanes = 64 * 2**(stage - 1) * block.expansion res_layer = _ResLayer( block, inplanes, planes, stage_block, stride=stride, dilation=dilation, style=style, with_cp=with_cp, norm_cfg=self.norm_cfg, dcn=dcn) self.add_module(f'layer{stage + 1}', res_layer) assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is a deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: if init_cfg is None: self.init_cfg = [ dict(type='Kaiming', layer='Conv2d'), dict( type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) ] else: raise TypeError('pretrained must be a str or None') def forward(self, x): res_layer = getattr(self, f'layer{self.stage + 1}') out = res_layer(x) return out def train(self, mode=True): super(ResLayer, self).train(mode) if self.norm_eval: for m in self.modules(): if isinstance(m, nn.BatchNorm2d): m.eval()
2,545
30.825
76
py
ERD
ERD-main/mmdet/models/roi_heads/mask_heads/grid_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, List, Tuple import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmengine.config import ConfigDict from mmengine.model import BaseModule from mmengine.structures import InstanceData from torch import Tensor from mmdet.models.task_modules.samplers import SamplingResult from mmdet.registry import MODELS from mmdet.utils import ConfigType, InstanceList, MultiConfig, OptConfigType @MODELS.register_module() class GridHead(BaseModule): """Implementation of `Grid Head <https://arxiv.org/abs/1811.12030>`_ Args: grid_points (int): The number of grid points. Defaults to 9. num_convs (int): The number of convolution layers. Defaults to 8. roi_feat_size (int): RoI feature size. Default to 14. in_channels (int): The channel number of inputs features. Defaults to 256. conv_kernel_size (int): The kernel size of convolution layers. Defaults to 3. point_feat_channels (int): The number of channels of each point features. Defaults to 64. class_agnostic (bool): Whether use class agnostic classification. If so, the output channels of logits will be 1. Defaults to False. loss_grid (:obj:`ConfigDict` or dict): Config of grid loss. conv_cfg (:obj:`ConfigDict` or dict, optional) dictionary to construct and config conv layer. norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and config norm layer. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict]): Initialization config dict. """ def __init__( self, grid_points: int = 9, num_convs: int = 8, roi_feat_size: int = 14, in_channels: int = 256, conv_kernel_size: int = 3, point_feat_channels: int = 64, deconv_kernel_size: int = 4, class_agnostic: bool = False, loss_grid: ConfigType = dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=15), conv_cfg: OptConfigType = None, norm_cfg: ConfigType = dict(type='GN', num_groups=36), init_cfg: MultiConfig = [ dict(type='Kaiming', layer=['Conv2d', 'Linear']), dict( type='Normal', layer='ConvTranspose2d', std=0.001, override=dict( type='Normal', name='deconv2', std=0.001, bias=-np.log(0.99 / 0.01))) ] ) -> None: super().__init__(init_cfg=init_cfg) self.grid_points = grid_points self.num_convs = num_convs self.roi_feat_size = roi_feat_size self.in_channels = in_channels self.conv_kernel_size = conv_kernel_size self.point_feat_channels = point_feat_channels self.conv_out_channels = self.point_feat_channels * self.grid_points self.class_agnostic = class_agnostic self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg if isinstance(norm_cfg, dict) and norm_cfg['type'] == 'GN': assert self.conv_out_channels % norm_cfg['num_groups'] == 0 assert self.grid_points >= 4 self.grid_size = int(np.sqrt(self.grid_points)) if self.grid_size * self.grid_size != self.grid_points: raise ValueError('grid_points must be a square number') # the predicted heatmap is half of whole_map_size if not isinstance(self.roi_feat_size, int): raise ValueError('Only square RoIs are supporeted in Grid R-CNN') self.whole_map_size = self.roi_feat_size * 4 # compute point-wise sub-regions self.sub_regions = self.calc_sub_regions() self.convs = [] for i in range(self.num_convs): in_channels = ( self.in_channels if i == 0 else self.conv_out_channels) stride = 2 if i == 0 else 1 padding = (self.conv_kernel_size - 1) // 2 self.convs.append( ConvModule( in_channels, self.conv_out_channels, self.conv_kernel_size, stride=stride, padding=padding, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=True)) self.convs = nn.Sequential(*self.convs) self.deconv1 = nn.ConvTranspose2d( self.conv_out_channels, self.conv_out_channels, kernel_size=deconv_kernel_size, stride=2, padding=(deconv_kernel_size - 2) // 2, groups=grid_points) self.norm1 = nn.GroupNorm(grid_points, self.conv_out_channels) self.deconv2 = nn.ConvTranspose2d( self.conv_out_channels, grid_points, kernel_size=deconv_kernel_size, stride=2, padding=(deconv_kernel_size - 2) // 2, groups=grid_points) # find the 4-neighbor of each grid point self.neighbor_points = [] grid_size = self.grid_size for i in range(grid_size): # i-th column for j in range(grid_size): # j-th row neighbors = [] if i > 0: # left: (i - 1, j) neighbors.append((i - 1) * grid_size + j) if j > 0: # up: (i, j - 1) neighbors.append(i * grid_size + j - 1) if j < grid_size - 1: # down: (i, j + 1) neighbors.append(i * grid_size + j + 1) if i < grid_size - 1: # right: (i + 1, j) neighbors.append((i + 1) * grid_size + j) self.neighbor_points.append(tuple(neighbors)) # total edges in the grid self.num_edges = sum([len(p) for p in self.neighbor_points]) self.forder_trans = nn.ModuleList() # first-order feature transition self.sorder_trans = nn.ModuleList() # second-order feature transition for neighbors in self.neighbor_points: fo_trans = nn.ModuleList() so_trans = nn.ModuleList() for _ in range(len(neighbors)): # each transition module consists of a 5x5 depth-wise conv and # 1x1 conv. fo_trans.append( nn.Sequential( nn.Conv2d( self.point_feat_channels, self.point_feat_channels, 5, stride=1, padding=2, groups=self.point_feat_channels), nn.Conv2d(self.point_feat_channels, self.point_feat_channels, 1))) so_trans.append( nn.Sequential( nn.Conv2d( self.point_feat_channels, self.point_feat_channels, 5, 1, 2, groups=self.point_feat_channels), nn.Conv2d(self.point_feat_channels, self.point_feat_channels, 1))) self.forder_trans.append(fo_trans) self.sorder_trans.append(so_trans) self.loss_grid = MODELS.build(loss_grid) def forward(self, x: Tensor) -> Dict[str, Tensor]: """forward function of ``GridHead``. Args: x (Tensor): RoI features, has shape (num_rois, num_channels, roi_feat_size, roi_feat_size). Returns: Dict[str, Tensor]: Return a dict including fused and unfused heatmap. """ assert x.shape[-1] == x.shape[-2] == self.roi_feat_size # RoI feature transformation, downsample 2x x = self.convs(x) c = self.point_feat_channels # first-order fusion x_fo = [None for _ in range(self.grid_points)] for i, points in enumerate(self.neighbor_points): x_fo[i] = x[:, i * c:(i + 1) * c] for j, point_idx in enumerate(points): x_fo[i] = x_fo[i] + self.forder_trans[i][j]( x[:, point_idx * c:(point_idx + 1) * c]) # second-order fusion x_so = [None for _ in range(self.grid_points)] for i, points in enumerate(self.neighbor_points): x_so[i] = x[:, i * c:(i + 1) * c] for j, point_idx in enumerate(points): x_so[i] = x_so[i] + self.sorder_trans[i][j](x_fo[point_idx]) # predicted heatmap with fused features x2 = torch.cat(x_so, dim=1) x2 = self.deconv1(x2) x2 = F.relu(self.norm1(x2), inplace=True) heatmap = self.deconv2(x2) # predicted heatmap with original features (applicable during training) if self.training: x1 = x x1 = self.deconv1(x1) x1 = F.relu(self.norm1(x1), inplace=True) heatmap_unfused = self.deconv2(x1) else: heatmap_unfused = heatmap return dict(fused=heatmap, unfused=heatmap_unfused) def calc_sub_regions(self) -> List[Tuple[float]]: """Compute point specific representation regions. See `Grid R-CNN Plus <https://arxiv.org/abs/1906.05688>`_ for details. """ # to make it consistent with the original implementation, half_size # is computed as 2 * quarter_size, which is smaller half_size = self.whole_map_size // 4 * 2 sub_regions = [] for i in range(self.grid_points): x_idx = i // self.grid_size y_idx = i % self.grid_size if x_idx == 0: sub_x1 = 0 elif x_idx == self.grid_size - 1: sub_x1 = half_size else: ratio = x_idx / (self.grid_size - 1) - 0.25 sub_x1 = max(int(ratio * self.whole_map_size), 0) if y_idx == 0: sub_y1 = 0 elif y_idx == self.grid_size - 1: sub_y1 = half_size else: ratio = y_idx / (self.grid_size - 1) - 0.25 sub_y1 = max(int(ratio * self.whole_map_size), 0) sub_regions.append( (sub_x1, sub_y1, sub_x1 + half_size, sub_y1 + half_size)) return sub_regions def get_targets(self, sampling_results: List[SamplingResult], rcnn_train_cfg: ConfigDict) -> Tensor: """Calculate the ground truth for all samples in a batch according to the sampling_results.". Args: sampling_results (List[:obj:`SamplingResult`]): Assign results of all images in a batch after sampling. rcnn_train_cfg (:obj:`ConfigDict`): `train_cfg` of RCNN. Returns: Tensor: Grid heatmap targets. """ # mix all samples (across images) together. pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results], dim=0).cpu() pos_gt_bboxes = torch.cat( [res.pos_gt_bboxes for res in sampling_results], dim=0).cpu() assert pos_bboxes.shape == pos_gt_bboxes.shape # expand pos_bboxes to 2x of original size x1 = pos_bboxes[:, 0] - (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2 y1 = pos_bboxes[:, 1] - (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2 x2 = pos_bboxes[:, 2] + (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2 y2 = pos_bboxes[:, 3] + (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2 pos_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) pos_bbox_ws = (pos_bboxes[:, 2] - pos_bboxes[:, 0]).unsqueeze(-1) pos_bbox_hs = (pos_bboxes[:, 3] - pos_bboxes[:, 1]).unsqueeze(-1) num_rois = pos_bboxes.shape[0] map_size = self.whole_map_size # this is not the final target shape targets = torch.zeros((num_rois, self.grid_points, map_size, map_size), dtype=torch.float) # pre-compute interpolation factors for all grid points. # the first item is the factor of x-dim, and the second is y-dim. # for a 9-point grid, factors are like (1, 0), (0.5, 0.5), (0, 1) factors = [] for j in range(self.grid_points): x_idx = j // self.grid_size y_idx = j % self.grid_size factors.append((1 - x_idx / (self.grid_size - 1), 1 - y_idx / (self.grid_size - 1))) radius = rcnn_train_cfg.pos_radius radius2 = radius**2 for i in range(num_rois): # ignore small bboxes if (pos_bbox_ws[i] <= self.grid_size or pos_bbox_hs[i] <= self.grid_size): continue # for each grid point, mark a small circle as positive for j in range(self.grid_points): factor_x, factor_y = factors[j] gridpoint_x = factor_x * pos_gt_bboxes[i, 0] + ( 1 - factor_x) * pos_gt_bboxes[i, 2] gridpoint_y = factor_y * pos_gt_bboxes[i, 1] + ( 1 - factor_y) * pos_gt_bboxes[i, 3] cx = int((gridpoint_x - pos_bboxes[i, 0]) / pos_bbox_ws[i] * map_size) cy = int((gridpoint_y - pos_bboxes[i, 1]) / pos_bbox_hs[i] * map_size) for x in range(cx - radius, cx + radius + 1): for y in range(cy - radius, cy + radius + 1): if x >= 0 and x < map_size and y >= 0 and y < map_size: if (x - cx)**2 + (y - cy)**2 <= radius2: targets[i, j, y, x] = 1 # reduce the target heatmap size by a half # proposed in Grid R-CNN Plus (https://arxiv.org/abs/1906.05688). sub_targets = [] for i in range(self.grid_points): sub_x1, sub_y1, sub_x2, sub_y2 = self.sub_regions[i] sub_targets.append(targets[:, [i], sub_y1:sub_y2, sub_x1:sub_x2]) sub_targets = torch.cat(sub_targets, dim=1) sub_targets = sub_targets.to(sampling_results[0].pos_bboxes.device) return sub_targets def loss(self, grid_pred: Tensor, sample_idx: Tensor, sampling_results: List[SamplingResult], rcnn_train_cfg: ConfigDict) -> dict: """Calculate the loss based on the features extracted by the grid head. Args: grid_pred (dict[str, Tensor]): Outputs of grid_head forward. sample_idx (Tensor): The sampling index of ``grid_pred``. sampling_results (List[obj:SamplingResult]): Assign results of all images in a batch after sampling. rcnn_train_cfg (obj:`ConfigDict`): `train_cfg` of RCNN. Returns: dict: A dictionary of loss and targets components. """ grid_targets = self.get_targets(sampling_results, rcnn_train_cfg) grid_targets = grid_targets[sample_idx] loss_fused = self.loss_grid(grid_pred['fused'], grid_targets) loss_unfused = self.loss_grid(grid_pred['unfused'], grid_targets) loss_grid = loss_fused + loss_unfused return dict(loss_grid=loss_grid) def predict_by_feat(self, grid_preds: Dict[str, Tensor], results_list: List[InstanceData], batch_img_metas: List[dict], rescale: bool = False) -> InstanceList: """Adjust the predicted bboxes from bbox head. Args: grid_preds (dict[str, Tensor]): dictionary outputted by forward function. results_list (list[:obj:`InstanceData`]): Detection results of each image. batch_img_metas (list[dict]): List of image information. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[:obj:`InstanceData`]: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape \ (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last \ dimension 4 arrange as (x1, y1, x2, y2). """ num_roi_per_img = tuple(res.bboxes.size(0) for res in results_list) grid_preds = { k: v.split(num_roi_per_img, 0) for k, v in grid_preds.items() } for i, results in enumerate(results_list): if len(results) != 0: bboxes = self._predict_by_feat_single( grid_pred=grid_preds['fused'][i], bboxes=results.bboxes, img_meta=batch_img_metas[i], rescale=rescale) results.bboxes = bboxes return results_list def _predict_by_feat_single(self, grid_pred: Tensor, bboxes: Tensor, img_meta: dict, rescale: bool = False) -> Tensor: """Adjust ``bboxes`` according to ``grid_pred``. Args: grid_pred (Tensor): Grid fused heatmap. bboxes (Tensor): Predicted bboxes, has shape (n, 4) img_meta (dict): image information. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: Tensor: adjusted bboxes. """ assert bboxes.size(0) == grid_pred.size(0) grid_pred = grid_pred.sigmoid() R, c, h, w = grid_pred.shape half_size = self.whole_map_size // 4 * 2 assert h == w == half_size assert c == self.grid_points # find the point with max scores in the half-sized heatmap grid_pred = grid_pred.view(R * c, h * w) pred_scores, pred_position = grid_pred.max(dim=1) xs = pred_position % w ys = pred_position // w # get the position in the whole heatmap instead of half-sized heatmap for i in range(self.grid_points): xs[i::self.grid_points] += self.sub_regions[i][0] ys[i::self.grid_points] += self.sub_regions[i][1] # reshape to (num_rois, grid_points) pred_scores, xs, ys = tuple( map(lambda x: x.view(R, c), [pred_scores, xs, ys])) # get expanded pos_bboxes widths = (bboxes[:, 2] - bboxes[:, 0]).unsqueeze(-1) heights = (bboxes[:, 3] - bboxes[:, 1]).unsqueeze(-1) x1 = (bboxes[:, 0, None] - widths / 2) y1 = (bboxes[:, 1, None] - heights / 2) # map the grid point to the absolute coordinates abs_xs = (xs.float() + 0.5) / w * widths + x1 abs_ys = (ys.float() + 0.5) / h * heights + y1 # get the grid points indices that fall on the bbox boundaries x1_inds = [i for i in range(self.grid_size)] y1_inds = [i * self.grid_size for i in range(self.grid_size)] x2_inds = [ self.grid_points - self.grid_size + i for i in range(self.grid_size) ] y2_inds = [(i + 1) * self.grid_size - 1 for i in range(self.grid_size)] # voting of all grid points on some boundary bboxes_x1 = (abs_xs[:, x1_inds] * pred_scores[:, x1_inds]).sum( dim=1, keepdim=True) / ( pred_scores[:, x1_inds].sum(dim=1, keepdim=True)) bboxes_y1 = (abs_ys[:, y1_inds] * pred_scores[:, y1_inds]).sum( dim=1, keepdim=True) / ( pred_scores[:, y1_inds].sum(dim=1, keepdim=True)) bboxes_x2 = (abs_xs[:, x2_inds] * pred_scores[:, x2_inds]).sum( dim=1, keepdim=True) / ( pred_scores[:, x2_inds].sum(dim=1, keepdim=True)) bboxes_y2 = (abs_ys[:, y2_inds] * pred_scores[:, y2_inds]).sum( dim=1, keepdim=True) / ( pred_scores[:, y2_inds].sum(dim=1, keepdim=True)) bboxes = torch.cat([bboxes_x1, bboxes_y1, bboxes_x2, bboxes_y2], dim=1) bboxes[:, [0, 2]].clamp_(min=0, max=img_meta['img_shape'][1]) bboxes[:, [1, 3]].clamp_(min=0, max=img_meta['img_shape'][0]) if rescale: assert img_meta.get('scale_factor') is not None bboxes /= bboxes.new_tensor(img_meta['scale_factor']).repeat( (1, 2)) return bboxes
20,849
41.464358
79
py
ERD
ERD-main/mmdet/models/roi_heads/mask_heads/dynamic_mask_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List import torch import torch.nn as nn from mmengine.config import ConfigDict from torch import Tensor from mmdet.models.task_modules import SamplingResult from mmdet.registry import MODELS from mmdet.utils import ConfigType, InstanceList, OptConfigType, reduce_mean from .fcn_mask_head import FCNMaskHead @MODELS.register_module() class DynamicMaskHead(FCNMaskHead): r"""Dynamic Mask Head for `Instances as Queries <http://arxiv.org/abs/2105.01928>`_ Args: num_convs (int): Number of convolution layer. Defaults to 4. roi_feat_size (int): The output size of RoI extractor, Defaults to 14. in_channels (int): Input feature channels. Defaults to 256. conv_kernel_size (int): Kernel size of convolution layers. Defaults to 3. conv_out_channels (int): Output channels of convolution layers. Defaults to 256. num_classes (int): Number of classes. Defaults to 80 class_agnostic (int): Whether generate class agnostic prediction. Defaults to False. dropout (float): Probability of drop the channel. Defaults to 0.0 upsample_cfg (:obj:`ConfigDict` or dict): The config for upsample layer. conv_cfg (:obj:`ConfigDict` or dict, optional): The convolution layer config. norm_cfg (:obj:`ConfigDict` or dict, optional): The norm layer config. dynamic_conv_cfg (:obj:`ConfigDict` or dict): The dynamic convolution layer config. loss_mask (:obj:`ConfigDict` or dict): The config for mask loss. """ def __init__(self, num_convs: int = 4, roi_feat_size: int = 14, in_channels: int = 256, conv_kernel_size: int = 3, conv_out_channels: int = 256, num_classes: int = 80, class_agnostic: bool = False, upsample_cfg: ConfigType = dict( type='deconv', scale_factor=2), conv_cfg: OptConfigType = None, norm_cfg: OptConfigType = None, dynamic_conv_cfg: ConfigType = dict( type='DynamicConv', in_channels=256, feat_channels=64, out_channels=256, input_feat_shape=14, with_proj=False, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN')), loss_mask: ConfigType = dict( type='DiceLoss', loss_weight=8.0), **kwargs) -> None: super().__init__( num_convs=num_convs, roi_feat_size=roi_feat_size, in_channels=in_channels, conv_kernel_size=conv_kernel_size, conv_out_channels=conv_out_channels, num_classes=num_classes, class_agnostic=class_agnostic, upsample_cfg=upsample_cfg, conv_cfg=conv_cfg, norm_cfg=norm_cfg, loss_mask=loss_mask, **kwargs) assert class_agnostic is False, \ 'DynamicMaskHead only support class_agnostic=False' self.fp16_enabled = False self.instance_interactive_conv = MODELS.build(dynamic_conv_cfg) def init_weights(self) -> None: """Use xavier initialization for all weight parameter and set classification head bias as a specific value when use focal loss.""" for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) nn.init.constant_(self.conv_logits.bias, 0.) def forward(self, roi_feat: Tensor, proposal_feat: Tensor) -> Tensor: """Forward function of DynamicMaskHead. Args: roi_feat (Tensor): Roi-pooling features with shape (batch_size*num_proposals, feature_dimensions, pooling_h , pooling_w). proposal_feat (Tensor): Intermediate feature get from diihead in last stage, has shape (batch_size*num_proposals, feature_dimensions) Returns: mask_preds (Tensor): Predicted foreground masks with shape (batch_size*num_proposals, num_classes, pooling_h*2, pooling_w*2). """ proposal_feat = proposal_feat.reshape(-1, self.in_channels) proposal_feat_iic = self.instance_interactive_conv( proposal_feat, roi_feat) x = proposal_feat_iic.permute(0, 2, 1).reshape(roi_feat.size()) for conv in self.convs: x = conv(x) if self.upsample is not None: x = self.upsample(x) if self.upsample_method == 'deconv': x = self.relu(x) mask_preds = self.conv_logits(x) return mask_preds def loss_and_target(self, mask_preds: Tensor, sampling_results: List[SamplingResult], batch_gt_instances: InstanceList, rcnn_train_cfg: ConfigDict) -> dict: """Calculate the loss based on the features extracted by the mask head. Args: mask_preds (Tensor): Predicted foreground masks, has shape (num_pos, num_classes, h, w). sampling_results (List[obj:SamplingResult]): Assign results of all images in a batch after sampling. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``labels``, and ``masks`` attributes. rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. Returns: dict: A dictionary of loss and targets components. """ mask_targets = self.get_targets( sampling_results=sampling_results, batch_gt_instances=batch_gt_instances, rcnn_train_cfg=rcnn_train_cfg) pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) num_pos = pos_labels.new_ones(pos_labels.size()).float().sum() avg_factor = torch.clamp(reduce_mean(num_pos), min=1.).item() loss = dict() if mask_preds.size(0) == 0: loss_mask = mask_preds.sum() else: loss_mask = self.loss_mask( mask_preds[torch.arange(num_pos).long(), pos_labels, ...].sigmoid(), mask_targets, avg_factor=avg_factor) loss['loss_mask'] = loss_mask return dict(loss_mask=loss, mask_targets=mask_targets)
6,739
39.359281
79
py
ERD
ERD-main/mmdet/models/roi_heads/mask_heads/coarse_mask_head.py
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.cnn import ConvModule, Linear from mmengine.model import ModuleList from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import MultiConfig from .fcn_mask_head import FCNMaskHead @MODELS.register_module() class CoarseMaskHead(FCNMaskHead): """Coarse mask head used in PointRend. Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample the input feature map instead of upsample it. Args: num_convs (int): Number of conv layers in the head. Defaults to 0. num_fcs (int): Number of fc layers in the head. Defaults to 2. fc_out_channels (int): Number of output channels of fc layer. Defaults to 1024. downsample_factor (int): The factor that feature map is downsampled by. Defaults to 2. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_convs: int = 0, num_fcs: int = 2, fc_out_channels: int = 1024, downsample_factor: int = 2, init_cfg: MultiConfig = dict( type='Xavier', override=[ dict(name='fcs'), dict(type='Constant', val=0.001, name='fc_logits') ]), *arg, **kwarg) -> None: super().__init__( *arg, num_convs=num_convs, upsample_cfg=dict(type=None), init_cfg=None, **kwarg) self.init_cfg = init_cfg self.num_fcs = num_fcs assert self.num_fcs > 0 self.fc_out_channels = fc_out_channels self.downsample_factor = downsample_factor assert self.downsample_factor >= 1 # remove conv_logit delattr(self, 'conv_logits') if downsample_factor > 1: downsample_in_channels = ( self.conv_out_channels if self.num_convs > 0 else self.in_channels) self.downsample_conv = ConvModule( downsample_in_channels, self.conv_out_channels, kernel_size=downsample_factor, stride=downsample_factor, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) else: self.downsample_conv = None self.output_size = (self.roi_feat_size[0] // downsample_factor, self.roi_feat_size[1] // downsample_factor) self.output_area = self.output_size[0] * self.output_size[1] last_layer_dim = self.conv_out_channels * self.output_area self.fcs = ModuleList() for i in range(num_fcs): fc_in_channels = ( last_layer_dim if i == 0 else self.fc_out_channels) self.fcs.append(Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels output_channels = self.num_classes * self.output_area self.fc_logits = Linear(last_layer_dim, output_channels) def init_weights(self) -> None: """Initialize weights.""" super(FCNMaskHead, self).init_weights() def forward(self, x: Tensor) -> Tensor: """Forward features from the upstream network. Args: x (Tensor): Extract mask RoI features. Returns: Tensor: Predicted foreground masks. """ for conv in self.convs: x = conv(x) if self.downsample_conv is not None: x = self.downsample_conv(x) x = x.flatten(1) for fc in self.fcs: x = self.relu(fc(x)) mask_preds = self.fc_logits(x).view( x.size(0), self.num_classes, *self.output_size) return mask_preds
3,887
34.027027
79
py
ERD
ERD-main/mmdet/models/roi_heads/mask_heads/maskiou_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple import numpy as np import torch import torch.nn as nn from mmcv.cnn import Conv2d, Linear, MaxPool2d from mmengine.config import ConfigDict from mmengine.model import BaseModule from mmengine.structures import InstanceData from torch import Tensor from torch.nn.modules.utils import _pair from mmdet.models.task_modules.samplers import SamplingResult from mmdet.registry import MODELS from mmdet.utils import ConfigType, InstanceList, OptMultiConfig @MODELS.register_module() class MaskIoUHead(BaseModule): """Mask IoU Head. This head predicts the IoU of predicted masks and corresponding gt masks. Args: num_convs (int): The number of convolution layers. Defaults to 4. num_fcs (int): The number of fully connected layers. Defaults to 2. roi_feat_size (int): RoI feature size. Default to 14. in_channels (int): The channel number of inputs features. Defaults to 256. conv_out_channels (int): The feature channels of convolution layers. Defaults to 256. fc_out_channels (int): The feature channels of fully connected layers. Defaults to 1024. num_classes (int): Number of categories excluding the background category. Defaults to 80. loss_iou (:obj:`ConfigDict` or dict): IoU loss. init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict], optional): Initialization config dict. """ def __init__( self, num_convs: int = 4, num_fcs: int = 2, roi_feat_size: int = 14, in_channels: int = 256, conv_out_channels: int = 256, fc_out_channels: int = 1024, num_classes: int = 80, loss_iou: ConfigType = dict(type='MSELoss', loss_weight=0.5), init_cfg: OptMultiConfig = [ dict(type='Kaiming', override=dict(name='convs')), dict(type='Caffe2Xavier', override=dict(name='fcs')), dict(type='Normal', std=0.01, override=dict(name='fc_mask_iou')) ] ) -> None: super().__init__(init_cfg=init_cfg) self.in_channels = in_channels self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.num_classes = num_classes self.convs = nn.ModuleList() for i in range(num_convs): if i == 0: # concatenation of mask feature and mask prediction in_channels = self.in_channels + 1 else: in_channels = self.conv_out_channels stride = 2 if i == num_convs - 1 else 1 self.convs.append( Conv2d( in_channels, self.conv_out_channels, 3, stride=stride, padding=1)) roi_feat_size = _pair(roi_feat_size) pooled_area = (roi_feat_size[0] // 2) * (roi_feat_size[1] // 2) self.fcs = nn.ModuleList() for i in range(num_fcs): in_channels = ( self.conv_out_channels * pooled_area if i == 0 else self.fc_out_channels) self.fcs.append(Linear(in_channels, self.fc_out_channels)) self.fc_mask_iou = Linear(self.fc_out_channels, self.num_classes) self.relu = nn.ReLU() self.max_pool = MaxPool2d(2, 2) self.loss_iou = MODELS.build(loss_iou) def forward(self, mask_feat: Tensor, mask_preds: Tensor) -> Tensor: """Forward function. Args: mask_feat (Tensor): Mask features from upstream models. mask_preds (Tensor): Mask predictions from mask head. Returns: Tensor: Mask IoU predictions. """ mask_preds = mask_preds.sigmoid() mask_pred_pooled = self.max_pool(mask_preds.unsqueeze(1)) x = torch.cat((mask_feat, mask_pred_pooled), 1) for conv in self.convs: x = self.relu(conv(x)) x = x.flatten(1) for fc in self.fcs: x = self.relu(fc(x)) mask_iou = self.fc_mask_iou(x) return mask_iou def loss_and_target(self, mask_iou_pred: Tensor, mask_preds: Tensor, mask_targets: Tensor, sampling_results: List[SamplingResult], batch_gt_instances: InstanceList, rcnn_train_cfg: ConfigDict) -> dict: """Calculate the loss and targets of MaskIoUHead. Args: mask_iou_pred (Tensor): Mask IoU predictions results, has shape (num_pos, num_classes) mask_preds (Tensor): Mask predictions from mask head, has shape (num_pos, mask_size, mask_size). mask_targets (Tensor): The ground truth masks assigned with predictions, has shape (num_pos, mask_size, mask_size). sampling_results (List[obj:SamplingResult]): Assign results of all images in a batch after sampling. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It includes ``masks`` inside. rcnn_train_cfg (obj:`ConfigDict`): `train_cfg` of RCNN. Returns: dict: A dictionary of loss and targets components. The targets are only used for cascade rcnn. """ mask_iou_targets = self.get_targets( sampling_results=sampling_results, batch_gt_instances=batch_gt_instances, mask_preds=mask_preds, mask_targets=mask_targets, rcnn_train_cfg=rcnn_train_cfg) pos_inds = mask_iou_targets > 0 if pos_inds.sum() > 0: loss_mask_iou = self.loss_iou(mask_iou_pred[pos_inds], mask_iou_targets[pos_inds]) else: loss_mask_iou = mask_iou_pred.sum() * 0 return dict(loss_mask_iou=loss_mask_iou) def get_targets(self, sampling_results: List[SamplingResult], batch_gt_instances: InstanceList, mask_preds: Tensor, mask_targets: Tensor, rcnn_train_cfg: ConfigDict) -> Tensor: """Compute target of mask IoU. Mask IoU target is the IoU of the predicted mask (inside a bbox) and the gt mask of corresponding gt mask (the whole instance). The intersection area is computed inside the bbox, and the gt mask area is computed with two steps, firstly we compute the gt area inside the bbox, then divide it by the area ratio of gt area inside the bbox and the gt area of the whole instance. Args: sampling_results (list[:obj:`SamplingResult`]): sampling results. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It includes ``masks`` inside. mask_preds (Tensor): Predicted masks of each positive proposal, shape (num_pos, h, w). mask_targets (Tensor): Gt mask of each positive proposal, binary map of the shape (num_pos, h, w). rcnn_train_cfg (obj:`ConfigDict`): Training config for R-CNN part. Returns: Tensor: mask iou target (length == num positive). """ pos_proposals = [res.pos_priors for res in sampling_results] pos_assigned_gt_inds = [ res.pos_assigned_gt_inds for res in sampling_results ] gt_masks = [res.masks for res in batch_gt_instances] # compute the area ratio of gt areas inside the proposals and # the whole instance area_ratios = map(self._get_area_ratio, pos_proposals, pos_assigned_gt_inds, gt_masks) area_ratios = torch.cat(list(area_ratios)) assert mask_targets.size(0) == area_ratios.size(0) mask_preds = (mask_preds > rcnn_train_cfg.mask_thr_binary).float() mask_pred_areas = mask_preds.sum((-1, -2)) # mask_preds and mask_targets are binary maps overlap_areas = (mask_preds * mask_targets).sum((-1, -2)) # compute the mask area of the whole instance gt_full_areas = mask_targets.sum((-1, -2)) / (area_ratios + 1e-7) mask_iou_targets = overlap_areas / ( mask_pred_areas + gt_full_areas - overlap_areas) return mask_iou_targets def _get_area_ratio(self, pos_proposals: Tensor, pos_assigned_gt_inds: Tensor, gt_masks: InstanceData) -> Tensor: """Compute area ratio of the gt mask inside the proposal and the gt mask of the corresponding instance. Args: pos_proposals (Tensor): Positive proposals, has shape (num_pos, 4). pos_assigned_gt_inds (Tensor): positive proposals assigned ground truth index. gt_masks (BitmapMask or PolygonMask): Gt masks (the whole instance) of each image, with the same shape of the input image. Returns: Tensor: The area ratio of the gt mask inside the proposal and the gt mask of the corresponding instance. """ num_pos = pos_proposals.size(0) if num_pos > 0: area_ratios = [] proposals_np = pos_proposals.cpu().numpy() pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() # compute mask areas of gt instances (batch processing for speedup) gt_instance_mask_area = gt_masks.areas for i in range(num_pos): gt_mask = gt_masks[pos_assigned_gt_inds[i]] # crop the gt mask inside the proposal bbox = proposals_np[i, :].astype(np.int32) gt_mask_in_proposal = gt_mask.crop(bbox) ratio = gt_mask_in_proposal.areas[0] / ( gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7) area_ratios.append(ratio) area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to( pos_proposals.device) else: area_ratios = pos_proposals.new_zeros((0, )) return area_ratios def predict_by_feat(self, mask_iou_preds: Tuple[Tensor], results_list: InstanceList) -> InstanceList: """Predict the mask iou and calculate it into ``results.scores``. Args: mask_iou_preds (Tensor): Mask IoU predictions results, has shape (num_proposals, num_classes) results_list (list[:obj:`InstanceData`]): Detection results of each image. Returns: list[:obj:`InstanceData`]: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - masks (Tensor): Has a shape (num_instances, H, W). """ assert len(mask_iou_preds) == len(results_list) for results, mask_iou_pred in zip(results_list, mask_iou_preds): labels = results.labels scores = results.scores results.scores = scores * mask_iou_pred[range(labels.size(0)), labels] return results_list
11,667
40.971223
79
py
ERD
ERD-main/mmdet/models/roi_heads/mask_heads/feature_relay_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional import torch.nn as nn from mmengine.model import BaseModule from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import MultiConfig @MODELS.register_module() class FeatureRelayHead(BaseModule): """Feature Relay Head used in `SCNet <https://arxiv.org/abs/2012.10150>`_. Args: in_channels (int): number of input channels. Defaults to 256. conv_out_channels (int): number of output channels before classification layer. Defaults to 256. roi_feat_size (int): roi feat size at box head. Default: 7. scale_factor (int): scale factor to match roi feat size at mask head. Defaults to 2. init_cfg (:obj:`ConfigDict` or dict or list[dict] or list[:obj:`ConfigDict`]): Initialization config dict. Defaults to dict(type='Kaiming', layer='Linear'). """ def __init__( self, in_channels: int = 1024, out_conv_channels: int = 256, roi_feat_size: int = 7, scale_factor: int = 2, init_cfg: MultiConfig = dict(type='Kaiming', layer='Linear') ) -> None: super().__init__(init_cfg=init_cfg) assert isinstance(roi_feat_size, int) self.in_channels = in_channels self.out_conv_channels = out_conv_channels self.roi_feat_size = roi_feat_size self.out_channels = (roi_feat_size**2) * out_conv_channels self.scale_factor = scale_factor self.fp16_enabled = False self.fc = nn.Linear(self.in_channels, self.out_channels) self.upsample = nn.Upsample( scale_factor=scale_factor, mode='bilinear', align_corners=True) def forward(self, x: Tensor) -> Optional[Tensor]: """Forward function. Args: x (Tensor): Input feature. Returns: Optional[Tensor]: Output feature. When the first dim of input is 0, None is returned. """ N, _ = x.shape if N > 0: out_C = self.out_conv_channels out_HW = self.roi_feat_size x = self.fc(x) x = x.reshape(N, out_C, out_HW, out_HW) x = self.upsample(x) return x return None
2,295
32.275362
78
py
ERD
ERD-main/mmdet/models/roi_heads/mask_heads/global_context_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple import torch.nn as nn from mmcv.cnn import ConvModule from mmengine.model import BaseModule from torch import Tensor from mmdet.models.layers import ResLayer, SimplifiedBasicBlock from mmdet.registry import MODELS from mmdet.utils import MultiConfig, OptConfigType @MODELS.register_module() class GlobalContextHead(BaseModule): """Global context head used in `SCNet <https://arxiv.org/abs/2012.10150>`_. Args: num_convs (int, optional): number of convolutional layer in GlbCtxHead. Defaults to 4. in_channels (int, optional): number of input channels. Defaults to 256. conv_out_channels (int, optional): number of output channels before classification layer. Defaults to 256. num_classes (int, optional): number of classes. Defaults to 80. loss_weight (float, optional): global context loss weight. Defaults to 1. conv_cfg (dict, optional): config to init conv layer. Defaults to None. norm_cfg (dict, optional): config to init norm layer. Defaults to None. conv_to_res (bool, optional): if True, 2 convs will be grouped into 1 `SimplifiedBasicBlock` using a skip connection. Defaults to False. init_cfg (:obj:`ConfigDict` or dict or list[dict] or list[:obj:`ConfigDict`]): Initialization config dict. Defaults to dict(type='Normal', std=0.01, override=dict(name='fc')). """ def __init__( self, num_convs: int = 4, in_channels: int = 256, conv_out_channels: int = 256, num_classes: int = 80, loss_weight: float = 1.0, conv_cfg: OptConfigType = None, norm_cfg: OptConfigType = None, conv_to_res: bool = False, init_cfg: MultiConfig = dict( type='Normal', std=0.01, override=dict(name='fc')) ) -> None: super().__init__(init_cfg=init_cfg) self.num_convs = num_convs self.in_channels = in_channels self.conv_out_channels = conv_out_channels self.num_classes = num_classes self.loss_weight = loss_weight self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.conv_to_res = conv_to_res self.fp16_enabled = False if self.conv_to_res: num_res_blocks = num_convs // 2 self.convs = ResLayer( SimplifiedBasicBlock, in_channels, self.conv_out_channels, num_res_blocks, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) self.num_convs = num_res_blocks else: self.convs = nn.ModuleList() for i in range(self.num_convs): in_channels = self.in_channels if i == 0 else conv_out_channels self.convs.append( ConvModule( in_channels, conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(conv_out_channels, num_classes) self.criterion = nn.BCEWithLogitsLoss() def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor]: """Forward function. Args: feats (Tuple[Tensor]): Multi-scale feature maps. Returns: Tuple[Tensor]: - mc_pred (Tensor): Multi-class prediction. - x (Tensor): Global context feature. """ x = feats[-1] for i in range(self.num_convs): x = self.convs[i](x) x = self.pool(x) # multi-class prediction mc_pred = x.reshape(x.size(0), -1) mc_pred = self.fc(mc_pred) return mc_pred, x def loss(self, pred: Tensor, labels: List[Tensor]) -> Tensor: """Loss function. Args: pred (Tensor): Logits. labels (list[Tensor]): Grouth truths. Returns: Tensor: Loss. """ labels = [lbl.unique() for lbl in labels] targets = pred.new_zeros(pred.size()) for i, label in enumerate(labels): targets[i, label] = 1.0 loss = self.loss_weight * self.criterion(pred, targets) return loss
4,458
33.835938
79
py
ERD
ERD-main/mmdet/models/roi_heads/mask_heads/htc_mask_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Union from mmcv.cnn import ConvModule from torch import Tensor from mmdet.registry import MODELS from .fcn_mask_head import FCNMaskHead @MODELS.register_module() class HTCMaskHead(FCNMaskHead): """Mask head for HTC. Args: with_conv_res (bool): Whether add conv layer for ``res_feat``. Defaults to True. """ def __init__(self, with_conv_res: bool = True, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self.with_conv_res = with_conv_res if self.with_conv_res: self.conv_res = ConvModule( self.conv_out_channels, self.conv_out_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) def forward(self, x: Tensor, res_feat: Optional[Tensor] = None, return_logits: bool = True, return_feat: bool = True) -> Union[Tensor, List[Tensor]]: """ Args: x (Tensor): Feature map. res_feat (Tensor, optional): Feature for residual connection. Defaults to None. return_logits (bool): Whether return mask logits. Defaults to True. return_feat (bool): Whether return feature map. Defaults to True. Returns: Union[Tensor, List[Tensor]]: The return result is one of three results: res_feat, logits, or [logits, res_feat]. """ assert not (not return_logits and not return_feat) if res_feat is not None: assert self.with_conv_res res_feat = self.conv_res(res_feat) x = x + res_feat for conv in self.convs: x = conv(x) res_feat = x outs = [] if return_logits: x = self.upsample(x) if self.upsample_method == 'deconv': x = self.relu(x) mask_preds = self.conv_logits(x) outs.append(mask_preds) if return_feat: outs.append(res_feat) return outs if len(outs) > 1 else outs[0]
2,190
32.19697
79
py
ERD
ERD-main/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Tuple import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, build_conv_layer, build_upsample_layer from mmcv.ops.carafe import CARAFEPack from mmengine.config import ConfigDict from mmengine.model import BaseModule, ModuleList from mmengine.structures import InstanceData from torch import Tensor from torch.nn.modules.utils import _pair from mmdet.models.task_modules.samplers import SamplingResult from mmdet.models.utils import empty_instances from mmdet.registry import MODELS from mmdet.structures.mask import mask_target from mmdet.utils import ConfigType, InstanceList, OptConfigType, OptMultiConfig BYTES_PER_FLOAT = 4 # TODO: This memory limit may be too much or too little. It would be better to # determine it based on available resources. GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit @MODELS.register_module() class FCNMaskHead(BaseModule): def __init__(self, num_convs: int = 4, roi_feat_size: int = 14, in_channels: int = 256, conv_kernel_size: int = 3, conv_out_channels: int = 256, num_classes: int = 80, class_agnostic: int = False, upsample_cfg: ConfigType = dict( type='deconv', scale_factor=2), conv_cfg: OptConfigType = None, norm_cfg: OptConfigType = None, predictor_cfg: ConfigType = dict(type='Conv'), loss_mask: ConfigType = dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0), init_cfg: OptMultiConfig = None) -> None: assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' super().__init__(init_cfg=init_cfg) self.upsample_cfg = upsample_cfg.copy() if self.upsample_cfg['type'] not in [ None, 'deconv', 'nearest', 'bilinear', 'carafe' ]: raise ValueError( f'Invalid upsample method {self.upsample_cfg["type"]}, ' 'accepted methods are "deconv", "nearest", "bilinear", ' '"carafe"') self.num_convs = num_convs # WARN: roi_feat_size is reserved and not used self.roi_feat_size = _pair(roi_feat_size) self.in_channels = in_channels self.conv_kernel_size = conv_kernel_size self.conv_out_channels = conv_out_channels self.upsample_method = self.upsample_cfg.get('type') self.scale_factor = self.upsample_cfg.pop('scale_factor', None) self.num_classes = num_classes self.class_agnostic = class_agnostic self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.predictor_cfg = predictor_cfg self.loss_mask = MODELS.build(loss_mask) self.convs = ModuleList() for i in range(self.num_convs): in_channels = ( self.in_channels if i == 0 else self.conv_out_channels) padding = (self.conv_kernel_size - 1) // 2 self.convs.append( ConvModule( in_channels, self.conv_out_channels, self.conv_kernel_size, padding=padding, conv_cfg=conv_cfg, norm_cfg=norm_cfg)) upsample_in_channels = ( self.conv_out_channels if self.num_convs > 0 else in_channels) upsample_cfg_ = self.upsample_cfg.copy() if self.upsample_method is None: self.upsample = None elif self.upsample_method == 'deconv': upsample_cfg_.update( in_channels=upsample_in_channels, out_channels=self.conv_out_channels, kernel_size=self.scale_factor, stride=self.scale_factor) self.upsample = build_upsample_layer(upsample_cfg_) elif self.upsample_method == 'carafe': upsample_cfg_.update( channels=upsample_in_channels, scale_factor=self.scale_factor) self.upsample = build_upsample_layer(upsample_cfg_) else: # suppress warnings align_corners = (None if self.upsample_method == 'nearest' else False) upsample_cfg_.update( scale_factor=self.scale_factor, mode=self.upsample_method, align_corners=align_corners) self.upsample = build_upsample_layer(upsample_cfg_) out_channels = 1 if self.class_agnostic else self.num_classes logits_in_channel = ( self.conv_out_channels if self.upsample_method == 'deconv' else upsample_in_channels) self.conv_logits = build_conv_layer(self.predictor_cfg, logits_in_channel, out_channels, 1) self.relu = nn.ReLU(inplace=True) self.debug_imgs = None def init_weights(self) -> None: """Initialize the weights.""" super().init_weights() for m in [self.upsample, self.conv_logits]: if m is None: continue elif isinstance(m, CARAFEPack): m.init_weights() elif hasattr(m, 'weight') and hasattr(m, 'bias'): nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu') nn.init.constant_(m.bias, 0) def forward(self, x: Tensor) -> Tensor: """Forward features from the upstream network. Args: x (Tensor): Extract mask RoI features. Returns: Tensor: Predicted foreground masks. """ for conv in self.convs: x = conv(x) if self.upsample is not None: x = self.upsample(x) if self.upsample_method == 'deconv': x = self.relu(x) mask_preds = self.conv_logits(x) return mask_preds def get_targets(self, sampling_results: List[SamplingResult], batch_gt_instances: InstanceList, rcnn_train_cfg: ConfigDict) -> Tensor: """Calculate the ground truth for all samples in a batch according to the sampling_results. Args: sampling_results (List[obj:SamplingResult]): Assign results of all images in a batch after sampling. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``labels``, and ``masks`` attributes. rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. Returns: Tensor: Mask target of each positive proposals in the image. """ pos_proposals = [res.pos_priors for res in sampling_results] pos_assigned_gt_inds = [ res.pos_assigned_gt_inds for res in sampling_results ] gt_masks = [res.masks for res in batch_gt_instances] mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, gt_masks, rcnn_train_cfg) return mask_targets def loss_and_target(self, mask_preds: Tensor, sampling_results: List[SamplingResult], batch_gt_instances: InstanceList, rcnn_train_cfg: ConfigDict) -> dict: """Calculate the loss based on the features extracted by the mask head. Args: mask_preds (Tensor): Predicted foreground masks, has shape (num_pos, num_classes, h, w). sampling_results (List[obj:SamplingResult]): Assign results of all images in a batch after sampling. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``labels``, and ``masks`` attributes. rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. Returns: dict: A dictionary of loss and targets components. """ mask_targets = self.get_targets( sampling_results=sampling_results, batch_gt_instances=batch_gt_instances, rcnn_train_cfg=rcnn_train_cfg) pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) loss = dict() if mask_preds.size(0) == 0: loss_mask = mask_preds.sum() else: if self.class_agnostic: loss_mask = self.loss_mask(mask_preds, mask_targets, torch.zeros_like(pos_labels)) else: loss_mask = self.loss_mask(mask_preds, mask_targets, pos_labels) loss['loss_mask'] = loss_mask # TODO: which algorithm requires mask_targets? return dict(loss_mask=loss, mask_targets=mask_targets) def predict_by_feat(self, mask_preds: Tuple[Tensor], results_list: List[InstanceData], batch_img_metas: List[dict], rcnn_test_cfg: ConfigDict, rescale: bool = False, activate_map: bool = False) -> InstanceList: """Transform a batch of output features extracted from the head into mask results. Args: mask_preds (tuple[Tensor]): Tuple of predicted foreground masks, each has shape (n, num_classes, h, w). results_list (list[:obj:`InstanceData`]): Detection results of each image. batch_img_metas (list[dict]): List of image information. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. rescale (bool): If True, return boxes in original image space. Defaults to False. activate_map (book): Whether get results with augmentations test. If True, the `mask_preds` will not process with sigmoid. Defaults to False. Returns: list[:obj:`InstanceData`]: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - masks (Tensor): Has a shape (num_instances, H, W). """ assert len(mask_preds) == len(results_list) == len(batch_img_metas) for img_id in range(len(batch_img_metas)): img_meta = batch_img_metas[img_id] results = results_list[img_id] bboxes = results.bboxes if bboxes.shape[0] == 0: results_list[img_id] = empty_instances( [img_meta], bboxes.device, task_type='mask', instance_results=[results], mask_thr_binary=rcnn_test_cfg.mask_thr_binary)[0] else: im_mask = self._predict_by_feat_single( mask_preds=mask_preds[img_id], bboxes=bboxes, labels=results.labels, img_meta=img_meta, rcnn_test_cfg=rcnn_test_cfg, rescale=rescale, activate_map=activate_map) results.masks = im_mask return results_list def _predict_by_feat_single(self, mask_preds: Tensor, bboxes: Tensor, labels: Tensor, img_meta: dict, rcnn_test_cfg: ConfigDict, rescale: bool = False, activate_map: bool = False) -> Tensor: """Get segmentation masks from mask_preds and bboxes. Args: mask_preds (Tensor): Predicted foreground masks, has shape (n, num_classes, h, w). bboxes (Tensor): Predicted bboxes, has shape (n, 4) labels (Tensor): Labels of bboxes, has shape (n, ) img_meta (dict): image information. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. activate_map (book): Whether get results with augmentations test. If True, the `mask_preds` will not process with sigmoid. Defaults to False. Returns: Tensor: Encoded masks, has shape (n, img_w, img_h) Example: >>> from mmengine.config import Config >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA >>> N = 7 # N = number of extracted ROIs >>> C, H, W = 11, 32, 32 >>> # Create example instance of FCN Mask Head. >>> self = FCNMaskHead(num_classes=C, num_convs=0) >>> inputs = torch.rand(N, self.in_channels, H, W) >>> mask_preds = self.forward(inputs) >>> # Each input is associated with some bounding box >>> bboxes = torch.Tensor([[1, 1, 42, 42 ]] * N) >>> labels = torch.randint(0, C, size=(N,)) >>> rcnn_test_cfg = Config({'mask_thr_binary': 0, }) >>> ori_shape = (H * 4, W * 4) >>> scale_factor = (1, 1) >>> rescale = False >>> img_meta = {'scale_factor': scale_factor, ... 'ori_shape': ori_shape} >>> # Encoded masks are a list for each category. >>> encoded_masks = self._get_seg_masks_single( ... mask_preds, bboxes, labels, ... img_meta, rcnn_test_cfg, rescale) >>> assert encoded_masks.size()[0] == N >>> assert encoded_masks.size()[1:] == ori_shape """ scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat( (1, 2)) img_h, img_w = img_meta['ori_shape'][:2] device = bboxes.device if not activate_map: mask_preds = mask_preds.sigmoid() else: # In AugTest, has been activated before mask_preds = bboxes.new_tensor(mask_preds) if rescale: # in-placed rescale the bboxes bboxes /= scale_factor else: w_scale, h_scale = scale_factor[0, 0], scale_factor[0, 1] img_h = np.round(img_h * h_scale.item()).astype(np.int32) img_w = np.round(img_w * w_scale.item()).astype(np.int32) N = len(mask_preds) # The actual implementation split the input into chunks, # and paste them chunk by chunk. if device.type == 'cpu': # CPU is most efficient when they are pasted one by one with # skip_empty=True, so that it performs minimal number of # operations. num_chunks = N else: # GPU benefits from parallelism for larger chunks, # but may have memory issue # the types of img_w and img_h are np.int32, # when the image resolution is large, # the calculation of num_chunks will overflow. # so we need to change the types of img_w and img_h to int. # See https://github.com/open-mmlab/mmdetection/pull/5191 num_chunks = int( np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT / GPU_MEM_LIMIT)) assert (num_chunks <= N), 'Default GPU_MEM_LIMIT is too small; try increasing it' chunks = torch.chunk(torch.arange(N, device=device), num_chunks) threshold = rcnn_test_cfg.mask_thr_binary im_mask = torch.zeros( N, img_h, img_w, device=device, dtype=torch.bool if threshold >= 0 else torch.uint8) if not self.class_agnostic: mask_preds = mask_preds[range(N), labels][:, None] for inds in chunks: masks_chunk, spatial_inds = _do_paste_mask( mask_preds[inds], bboxes[inds], img_h, img_w, skip_empty=device.type == 'cpu') if threshold >= 0: masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool) else: # for visualization and debugging masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8) im_mask[(inds, ) + spatial_inds] = masks_chunk return im_mask def _do_paste_mask(masks: Tensor, boxes: Tensor, img_h: int, img_w: int, skip_empty: bool = True) -> tuple: """Paste instance masks according to boxes. This implementation is modified from https://github.com/facebookresearch/detectron2/ Args: masks (Tensor): N, 1, H, W boxes (Tensor): N, 4 img_h (int): Height of the image to be pasted. img_w (int): Width of the image to be pasted. skip_empty (bool): Only paste masks within the region that tightly bound all boxes, and returns the results this region only. An important optimization for CPU. Returns: tuple: (Tensor, tuple). The first item is mask tensor, the second one is the slice object. If skip_empty == False, the whole image will be pasted. It will return a mask of shape (N, img_h, img_w) and an empty tuple. If skip_empty == True, only area around the mask will be pasted. A mask of shape (N, h', w') and its start and end coordinates in the original image will be returned. """ # On GPU, paste all masks together (up to chunk size) # by using the entire image to sample the masks # Compared to pasting them one by one, # this has more operations but is faster on COCO-scale dataset. device = masks.device if skip_empty: x0_int, y0_int = torch.clamp( boxes.min(dim=0).values.floor()[:2] - 1, min=0).to(dtype=torch.int32) x1_int = torch.clamp( boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32) y1_int = torch.clamp( boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32) else: x0_int, y0_int = 0, 0 x1_int, y1_int = img_w, img_h x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1 N = masks.shape[0] img_y = torch.arange(y0_int, y1_int, device=device).to(torch.float32) + 0.5 img_x = torch.arange(x0_int, x1_int, device=device).to(torch.float32) + 0.5 img_y = (img_y - y0) / (y1 - y0) * 2 - 1 img_x = (img_x - x0) / (x1 - x0) * 2 - 1 # img_x, img_y have shapes (N, w), (N, h) # IsInf op is not supported with ONNX<=1.7.0 if not torch.onnx.is_in_onnx_export(): if torch.isinf(img_x).any(): inds = torch.where(torch.isinf(img_x)) img_x[inds] = 0 if torch.isinf(img_y).any(): inds = torch.where(torch.isinf(img_y)) img_y[inds] = 0 gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1)) gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1)) grid = torch.stack([gx, gy], dim=3) img_masks = F.grid_sample( masks.to(dtype=torch.float32), grid, align_corners=False) if skip_empty: return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int)) else: return img_masks[:, 0], ()
20,127
41.374737
85
py
ERD
ERD-main/mmdet/models/roi_heads/mask_heads/fused_semantic_head.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings from typing import Tuple import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmengine.config import ConfigDict from mmengine.model import BaseModule from torch import Tensor from mmdet.registry import MODELS from mmdet.utils import MultiConfig, OptConfigType @MODELS.register_module() class FusedSemanticHead(BaseModule): r"""Multi-level fused semantic segmentation head. .. code-block:: none in_1 -> 1x1 conv --- | in_2 -> 1x1 conv -- | || in_3 -> 1x1 conv - || ||| /-> 1x1 conv (mask prediction) in_4 -> 1x1 conv -----> 3x3 convs (*4) | \-> 1x1 conv (feature) in_5 -> 1x1 conv --- """ # noqa: W605 def __init__( self, num_ins: int, fusion_level: int, seg_scale_factor=1 / 8, num_convs: int = 4, in_channels: int = 256, conv_out_channels: int = 256, num_classes: int = 183, conv_cfg: OptConfigType = None, norm_cfg: OptConfigType = None, ignore_label: int = None, loss_weight: float = None, loss_seg: ConfigDict = dict( type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2), init_cfg: MultiConfig = dict( type='Kaiming', override=dict(name='conv_logits')) ) -> None: super().__init__(init_cfg=init_cfg) self.num_ins = num_ins self.fusion_level = fusion_level self.seg_scale_factor = seg_scale_factor self.num_convs = num_convs self.in_channels = in_channels self.conv_out_channels = conv_out_channels self.num_classes = num_classes self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.fp16_enabled = False self.lateral_convs = nn.ModuleList() for i in range(self.num_ins): self.lateral_convs.append( ConvModule( self.in_channels, self.in_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, inplace=False)) self.convs = nn.ModuleList() for i in range(self.num_convs): in_channels = self.in_channels if i == 0 else conv_out_channels self.convs.append( ConvModule( in_channels, conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_embedding = ConvModule( conv_out_channels, conv_out_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1) if ignore_label: loss_seg['ignore_index'] = ignore_label if loss_weight: loss_seg['loss_weight'] = loss_weight if ignore_label or loss_weight: warnings.warn('``ignore_label`` and ``loss_weight`` would be ' 'deprecated soon. Please set ``ingore_index`` and ' '``loss_weight`` in ``loss_seg`` instead.') self.criterion = MODELS.build(loss_seg) def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor]: """Forward function. Args: feats (tuple[Tensor]): Multi scale feature maps. Returns: tuple[Tensor]: - mask_preds (Tensor): Predicted mask logits. - x (Tensor): Fused feature. """ x = self.lateral_convs[self.fusion_level](feats[self.fusion_level]) fused_size = tuple(x.shape[-2:]) for i, feat in enumerate(feats): if i != self.fusion_level: feat = F.interpolate( feat, size=fused_size, mode='bilinear', align_corners=True) # fix runtime error of "+=" inplace operation in PyTorch 1.10 x = x + self.lateral_convs[i](feat) for i in range(self.num_convs): x = self.convs[i](x) mask_preds = self.conv_logits(x) x = self.conv_embedding(x) return mask_preds, x def loss(self, mask_preds: Tensor, labels: Tensor) -> Tensor: """Loss function. Args: mask_preds (Tensor): Predicted mask logits. labels (Tensor): Ground truth. Returns: Tensor: Semantic segmentation loss. """ labels = F.interpolate( labels.float(), scale_factor=self.seg_scale_factor, mode='nearest') labels = labels.squeeze(1).long() loss_semantic_seg = self.criterion(mask_preds, labels) return loss_semantic_seg
4,978
33.337931
79
py
ERD
ERD-main/mmdet/models/roi_heads/mask_heads/mask_point_head.py
# Copyright (c) OpenMMLab. All rights reserved. # Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa from typing import List, Tuple import torch import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point from mmengine.model import BaseModule from mmengine.structures import InstanceData from torch import Tensor from mmdet.models.task_modules.samplers import SamplingResult from mmdet.models.utils import (get_uncertain_point_coords_with_randomness, get_uncertainty) from mmdet.registry import MODELS from mmdet.structures.bbox import bbox2roi from mmdet.utils import ConfigType, InstanceList, MultiConfig, OptConfigType @MODELS.register_module() class MaskPointHead(BaseModule): """A mask point head use in PointRend. ``MaskPointHead`` use shared multi-layer perceptron (equivalent to nn.Conv1d) to predict the logit of input points. The fine-grained feature and coarse feature will be concatenate together for predication. Args: num_fcs (int): Number of fc layers in the head. Defaults to 3. in_channels (int): Number of input channels. Defaults to 256. fc_channels (int): Number of fc channels. Defaults to 256. num_classes (int): Number of classes for logits. Defaults to 80. class_agnostic (bool): Whether use class agnostic classification. If so, the output channels of logits will be 1. Defaults to False. coarse_pred_each_layer (bool): Whether concatenate coarse feature with the output of each fc layer. Defaults to True. conv_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and config conv layer. Defaults to dict(type='Conv1d')). norm_cfg (:obj:`ConfigDict` or dict, optional): Dictionary to construct and config norm layer. Defaults to None. loss_point (:obj:`ConfigDict` or dict): Dictionary to construct and config loss layer of point head. Defaults to dict(type='CrossEntropyLoss', use_mask=True, loss_weight=1.0). init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ dict], optional): Initialization config dict. """ def __init__( self, num_classes: int, num_fcs: int = 3, in_channels: int = 256, fc_channels: int = 256, class_agnostic: bool = False, coarse_pred_each_layer: bool = True, conv_cfg: ConfigType = dict(type='Conv1d'), norm_cfg: OptConfigType = None, act_cfg: ConfigType = dict(type='ReLU'), loss_point: ConfigType = dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0), init_cfg: MultiConfig = dict( type='Normal', std=0.001, override=dict(name='fc_logits')) ) -> None: super().__init__(init_cfg=init_cfg) self.num_fcs = num_fcs self.in_channels = in_channels self.fc_channels = fc_channels self.num_classes = num_classes self.class_agnostic = class_agnostic self.coarse_pred_each_layer = coarse_pred_each_layer self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.loss_point = MODELS.build(loss_point) fc_in_channels = in_channels + num_classes self.fcs = nn.ModuleList() for _ in range(num_fcs): fc = ConvModule( fc_in_channels, fc_channels, kernel_size=1, stride=1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.fcs.append(fc) fc_in_channels = fc_channels fc_in_channels += num_classes if self.coarse_pred_each_layer else 0 out_channels = 1 if self.class_agnostic else self.num_classes self.fc_logits = nn.Conv1d( fc_in_channels, out_channels, kernel_size=1, stride=1, padding=0) def forward(self, fine_grained_feats: Tensor, coarse_feats: Tensor) -> Tensor: """Classify each point base on fine grained and coarse feats. Args: fine_grained_feats (Tensor): Fine grained feature sampled from FPN, shape (num_rois, in_channels, num_points). coarse_feats (Tensor): Coarse feature sampled from CoarseMaskHead, shape (num_rois, num_classes, num_points). Returns: Tensor: Point classification results, shape (num_rois, num_class, num_points). """ x = torch.cat([fine_grained_feats, coarse_feats], dim=1) for fc in self.fcs: x = fc(x) if self.coarse_pred_each_layer: x = torch.cat((x, coarse_feats), dim=1) return self.fc_logits(x) def get_targets(self, rois: Tensor, rel_roi_points: Tensor, sampling_results: List[SamplingResult], batch_gt_instances: InstanceList, cfg: ConfigType) -> Tensor: """Get training targets of MaskPointHead for all images. Args: rois (Tensor): Region of Interest, shape (num_rois, 5). rel_roi_points (Tensor): Points coordinates relative to RoI, shape (num_rois, num_points, 2). sampling_results (:obj:`SamplingResult`): Sampling result after sampling and assignment. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``labels``, and ``masks`` attributes. cfg (obj:`ConfigDict` or dict): Training cfg. Returns: Tensor: Point target, shape (num_rois, num_points). """ num_imgs = len(sampling_results) rois_list = [] rel_roi_points_list = [] for batch_ind in range(num_imgs): inds = (rois[:, 0] == batch_ind) rois_list.append(rois[inds]) rel_roi_points_list.append(rel_roi_points[inds]) pos_assigned_gt_inds_list = [ res.pos_assigned_gt_inds for res in sampling_results ] cfg_list = [cfg for _ in range(num_imgs)] point_targets = map(self._get_targets_single, rois_list, rel_roi_points_list, pos_assigned_gt_inds_list, batch_gt_instances, cfg_list) point_targets = list(point_targets) if len(point_targets) > 0: point_targets = torch.cat(point_targets) return point_targets def _get_targets_single(self, rois: Tensor, rel_roi_points: Tensor, pos_assigned_gt_inds: Tensor, gt_instances: InstanceData, cfg: ConfigType) -> Tensor: """Get training target of MaskPointHead for each image.""" num_pos = rois.size(0) num_points = cfg.num_points if num_pos > 0: gt_masks_th = ( gt_instances.masks.to_tensor(rois.dtype, rois.device).index_select( 0, pos_assigned_gt_inds)) gt_masks_th = gt_masks_th.unsqueeze(1) rel_img_points = rel_roi_point_to_rel_img_point( rois, rel_roi_points, gt_masks_th) point_targets = point_sample(gt_masks_th, rel_img_points).squeeze(1) else: point_targets = rois.new_zeros((0, num_points)) return point_targets def loss_and_target(self, point_pred: Tensor, rel_roi_points: Tensor, sampling_results: List[SamplingResult], batch_gt_instances: InstanceList, cfg: ConfigType) -> dict: """Calculate loss for MaskPointHead. Args: point_pred (Tensor): Point predication result, shape (num_rois, num_classes, num_points). rel_roi_points (Tensor): Points coordinates relative to RoI, shape (num_rois, num_points, 2). sampling_results (:obj:`SamplingResult`): Sampling result after sampling and assignment. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes``, ``labels``, and ``masks`` attributes. cfg (obj:`ConfigDict` or dict): Training cfg. Returns: dict: a dictionary of point loss and point target. """ rois = bbox2roi([res.pos_bboxes for res in sampling_results]) pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) point_target = self.get_targets(rois, rel_roi_points, sampling_results, batch_gt_instances, cfg) if self.class_agnostic: loss_point = self.loss_point(point_pred, point_target, torch.zeros_like(pos_labels)) else: loss_point = self.loss_point(point_pred, point_target, pos_labels) return dict(loss_point=loss_point, point_target=point_target) def get_roi_rel_points_train(self, mask_preds: Tensor, labels: Tensor, cfg: ConfigType) -> Tensor: """Get ``num_points`` most uncertain points with random points during train. Sample points in [0, 1] x [0, 1] coordinate space based on their uncertainty. The uncertainties are calculated for each point using '_get_uncertainty()' function that takes point's logit prediction as input. Args: mask_preds (Tensor): A tensor of shape (num_rois, num_classes, mask_height, mask_width) for class-specific or class-agnostic prediction. labels (Tensor): The ground truth class for each instance. cfg (:obj:`ConfigDict` or dict): Training config of point head. Returns: point_coords (Tensor): A tensor of shape (num_rois, num_points, 2) that contains the coordinates sampled points. """ point_coords = get_uncertain_point_coords_with_randomness( mask_preds, labels, cfg.num_points, cfg.oversample_ratio, cfg.importance_sample_ratio) return point_coords def get_roi_rel_points_test(self, mask_preds: Tensor, label_preds: Tensor, cfg: ConfigType) -> Tuple[Tensor, Tensor]: """Get ``num_points`` most uncertain points during test. Args: mask_preds (Tensor): A tensor of shape (num_rois, num_classes, mask_height, mask_width) for class-specific or class-agnostic prediction. label_preds (Tensor): The predication class for each instance. cfg (:obj:`ConfigDict` or dict): Testing config of point head. Returns: tuple: - point_indices (Tensor): A tensor of shape (num_rois, num_points) that contains indices from [0, mask_height x mask_width) of the most uncertain points. - point_coords (Tensor): A tensor of shape (num_rois, num_points, 2) that contains [0, 1] x [0, 1] normalized coordinates of the most uncertain points from the [mask_height, mask_width] grid. """ num_points = cfg.subdivision_num_points uncertainty_map = get_uncertainty(mask_preds, label_preds) num_rois, _, mask_height, mask_width = uncertainty_map.shape # During ONNX exporting, the type of each elements of 'shape' is # `Tensor(float)`, while it is `float` during PyTorch inference. if isinstance(mask_height, torch.Tensor): h_step = 1.0 / mask_height.float() w_step = 1.0 / mask_width.float() else: h_step = 1.0 / mask_height w_step = 1.0 / mask_width # cast to int to avoid dynamic K for TopK op in ONNX mask_size = int(mask_height * mask_width) uncertainty_map = uncertainty_map.view(num_rois, mask_size) num_points = min(mask_size, num_points) point_indices = uncertainty_map.topk(num_points, dim=1)[1] xs = w_step / 2.0 + (point_indices % mask_width).float() * w_step ys = h_step / 2.0 + (point_indices // mask_width).float() * h_step point_coords = torch.stack([xs, ys], dim=2) return point_indices, point_coords
12,664
43.438596
126
py
ERD
ERD-main/mmdet/models/losses/ghm_loss.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmdet.registry import MODELS from .utils import weight_reduce_loss def _expand_onehot_labels(labels, label_weights, label_channels): bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero( (labels >= 0) & (labels < label_channels), as_tuple=False).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds]] = 1 bin_label_weights = label_weights.view(-1, 1).expand( label_weights.size(0), label_channels) return bin_labels, bin_label_weights # TODO: code refactoring to make it consistent with other losses @MODELS.register_module() class GHMC(nn.Module): """GHM Classification Loss. Details of the theorem can be viewed in the paper `Gradient Harmonized Single-stage Detector <https://arxiv.org/abs/1811.05181>`_. Args: bins (int): Number of the unit regions for distribution calculation. momentum (float): The parameter for moving average. use_sigmoid (bool): Can only be true for BCE based loss now. loss_weight (float): The weight of the total GHM-C loss. reduction (str): Options are "none", "mean" and "sum". Defaults to "mean" """ def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0, reduction='mean'): super(GHMC, self).__init__() self.bins = bins self.momentum = momentum edges = torch.arange(bins + 1).float() / bins self.register_buffer('edges', edges) self.edges[-1] += 1e-6 if momentum > 0: acc_sum = torch.zeros(bins) self.register_buffer('acc_sum', acc_sum) self.use_sigmoid = use_sigmoid if not self.use_sigmoid: raise NotImplementedError self.loss_weight = loss_weight self.reduction = reduction def forward(self, pred, target, label_weight, reduction_override=None, **kwargs): """Calculate the GHM-C loss. Args: pred (float tensor of size [batch_num, class_num]): The direct prediction of classification fc layer. target (float tensor of size [batch_num, class_num]): Binary class target for each sample. label_weight (float tensor of size [batch_num, class_num]): the value is 1 if the sample is valid and 0 if ignored. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. Returns: The gradient harmonized loss. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) # the target should be binary class label if pred.dim() != target.dim(): target, label_weight = _expand_onehot_labels( target, label_weight, pred.size(-1)) target, label_weight = target.float(), label_weight.float() edges = self.edges mmt = self.momentum weights = torch.zeros_like(pred) # gradient length g = torch.abs(pred.sigmoid().detach() - target) valid = label_weight > 0 tot = max(valid.float().sum().item(), 1.0) n = 0 # n valid bins for i in range(self.bins): inds = (g >= edges[i]) & (g < edges[i + 1]) & valid num_in_bin = inds.sum().item() if num_in_bin > 0: if mmt > 0: self.acc_sum[i] = mmt * self.acc_sum[i] \ + (1 - mmt) * num_in_bin weights[inds] = tot / self.acc_sum[i] else: weights[inds] = tot / num_in_bin n += 1 if n > 0: weights = weights / n loss = F.binary_cross_entropy_with_logits( pred, target, reduction='none') loss = weight_reduce_loss( loss, weights, reduction=reduction, avg_factor=tot) return loss * self.loss_weight # TODO: code refactoring to make it consistent with other losses @MODELS.register_module() class GHMR(nn.Module): """GHM Regression Loss. Details of the theorem can be viewed in the paper `Gradient Harmonized Single-stage Detector <https://arxiv.org/abs/1811.05181>`_. Args: mu (float): The parameter for the Authentic Smooth L1 loss. bins (int): Number of the unit regions for distribution calculation. momentum (float): The parameter for moving average. loss_weight (float): The weight of the total GHM-R loss. reduction (str): Options are "none", "mean" and "sum". Defaults to "mean" """ def __init__(self, mu=0.02, bins=10, momentum=0, loss_weight=1.0, reduction='mean'): super(GHMR, self).__init__() self.mu = mu self.bins = bins edges = torch.arange(bins + 1).float() / bins self.register_buffer('edges', edges) self.edges[-1] = 1e3 self.momentum = momentum if momentum > 0: acc_sum = torch.zeros(bins) self.register_buffer('acc_sum', acc_sum) self.loss_weight = loss_weight self.reduction = reduction # TODO: support reduction parameter def forward(self, pred, target, label_weight, avg_factor=None, reduction_override=None): """Calculate the GHM-R loss. Args: pred (float tensor of size [batch_num, 4 (* class_num)]): The prediction of box regression layer. Channel number can be 4 or 4 * class_num depending on whether it is class-agnostic. target (float tensor of size [batch_num, 4 (* class_num)]): The target regression values with the same size of pred. label_weight (float tensor of size [batch_num, 4 (* class_num)]): The weight of each sample, 0 if ignored. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. Returns: The gradient harmonized loss. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) mu = self.mu edges = self.edges mmt = self.momentum # ASL1 loss diff = pred - target loss = torch.sqrt(diff * diff + mu * mu) - mu # gradient length g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach() weights = torch.zeros_like(g) valid = label_weight > 0 tot = max(label_weight.float().sum().item(), 1.0) n = 0 # n: valid bins for i in range(self.bins): inds = (g >= edges[i]) & (g < edges[i + 1]) & valid num_in_bin = inds.sum().item() if num_in_bin > 0: n += 1 if mmt > 0: self.acc_sum[i] = mmt * self.acc_sum[i] \ + (1 - mmt) * num_in_bin weights[inds] = tot / self.acc_sum[i] else: weights[inds] = tot / num_in_bin if n > 0: weights /= n loss = weight_reduce_loss( loss, weights, reduction=reduction, avg_factor=tot) return loss * self.loss_weight
7,928
36.051402
79
py
ERD
ERD-main/mmdet/models/losses/mse_loss.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional import torch.nn as nn import torch.nn.functional as F from torch import Tensor from mmdet.registry import MODELS from .utils import weighted_loss @weighted_loss def mse_loss(pred: Tensor, target: Tensor) -> Tensor: """A Wrapper of MSE loss. Args: pred (Tensor): The prediction. target (Tensor): The learning target of the prediction. Returns: Tensor: loss Tensor """ return F.mse_loss(pred, target, reduction='none') @MODELS.register_module() class MSELoss(nn.Module): """MSELoss. Args: reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, reduction: str = 'mean', loss_weight: float = 1.0) -> None: super().__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred: Tensor, target: Tensor, weight: Optional[Tensor] = None, avg_factor: Optional[int] = None, reduction_override: Optional[str] = None) -> Tensor: """Forward function of loss. Args: pred (Tensor): The prediction. target (Tensor): The learning target of the prediction. weight (Tensor, optional): Weight of the loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. Returns: Tensor: The calculated loss. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss = self.loss_weight * mse_loss( pred, target, weight, reduction=reduction, avg_factor=avg_factor) return loss
2,267
31.4
78
py
ERD
ERD-main/mmdet/models/losses/dice_loss.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn from mmdet.registry import MODELS from .utils import weight_reduce_loss def dice_loss(pred, target, weight=None, eps=1e-3, reduction='mean', naive_dice=False, avg_factor=None): """Calculate dice loss, there are two forms of dice loss is supported: - the one proposed in `V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation <https://arxiv.org/abs/1606.04797>`_. - the dice loss in which the power of the number in the denominator is the first power instead of the second power. Args: pred (torch.Tensor): The prediction, has a shape (n, *) target (torch.Tensor): The learning label of the prediction, shape (n, *), same shape of pred. weight (torch.Tensor, optional): The weight of loss for each prediction, has a shape (n,). Defaults to None. eps (float): Avoid dividing by zero. Default: 1e-3. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". naive_dice (bool, optional): If false, use the dice loss defined in the V-Net paper, otherwise, use the naive dice loss in which the power of the number in the denominator is the first power instead of the second power.Defaults to False. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ input = pred.flatten(1) target = target.flatten(1).float() a = torch.sum(input * target, 1) if naive_dice: b = torch.sum(input, 1) c = torch.sum(target, 1) d = (2 * a + eps) / (b + c + eps) else: b = torch.sum(input * input, 1) + eps c = torch.sum(target * target, 1) + eps d = (2 * a) / (b + c) loss = 1 - d if weight is not None: assert weight.ndim == loss.ndim assert len(weight) == len(pred) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss @MODELS.register_module() class DiceLoss(nn.Module): def __init__(self, use_sigmoid=True, activate=True, reduction='mean', naive_dice=False, loss_weight=1.0, eps=1e-3): """Compute dice loss. Args: use_sigmoid (bool, optional): Whether to the prediction is used for sigmoid or softmax. Defaults to True. activate (bool): Whether to activate the predictions inside, this will disable the inside sigmoid operation. Defaults to True. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". Defaults to 'mean'. naive_dice (bool, optional): If false, use the dice loss defined in the V-Net paper, otherwise, use the naive dice loss in which the power of the number in the denominator is the first power instead of the second power. Defaults to False. loss_weight (float, optional): Weight of loss. Defaults to 1.0. eps (float): Avoid dividing by zero. Defaults to 1e-3. """ super(DiceLoss, self).__init__() self.use_sigmoid = use_sigmoid self.reduction = reduction self.naive_dice = naive_dice self.loss_weight = loss_weight self.eps = eps self.activate = activate def forward(self, pred, target, weight=None, reduction_override=None, avg_factor=None): """Forward function. Args: pred (torch.Tensor): The prediction, has a shape (n, *). target (torch.Tensor): The label of the prediction, shape (n, *), same shape of pred. weight (torch.Tensor, optional): The weight of loss for each prediction, has a shape (n,). Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if self.activate: if self.use_sigmoid: pred = pred.sigmoid() else: raise NotImplementedError loss = self.loss_weight * dice_loss( pred, target, weight, eps=self.eps, reduction=reduction, naive_dice=self.naive_dice, avg_factor=avg_factor) return loss
5,329
35.258503
78
py
ERD
ERD-main/mmdet/models/losses/pisa_loss.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Tuple import torch import torch.nn as nn from torch import Tensor from mmdet.structures.bbox import bbox_overlaps from ..task_modules.coders import BaseBBoxCoder from ..task_modules.samplers import SamplingResult def isr_p(cls_score: Tensor, bbox_pred: Tensor, bbox_targets: Tuple[Tensor], rois: Tensor, sampling_results: List[SamplingResult], loss_cls: nn.Module, bbox_coder: BaseBBoxCoder, k: float = 2, bias: float = 0, num_class: int = 80) -> tuple: """Importance-based Sample Reweighting (ISR_P), positive part. Args: cls_score (Tensor): Predicted classification scores. bbox_pred (Tensor): Predicted bbox deltas. bbox_targets (tuple[Tensor]): A tuple of bbox targets, the are labels, label_weights, bbox_targets, bbox_weights, respectively. rois (Tensor): Anchors (single_stage) in shape (n, 4) or RoIs (two_stage) in shape (n, 5). sampling_results (:obj:`SamplingResult`): Sampling results. loss_cls (:obj:`nn.Module`): Classification loss func of the head. bbox_coder (:obj:`BaseBBoxCoder`): BBox coder of the head. k (float): Power of the non-linear mapping. Defaults to 2. bias (float): Shift of the non-linear mapping. Defaults to 0. num_class (int): Number of classes, defaults to 80. Return: tuple([Tensor]): labels, imp_based_label_weights, bbox_targets, bbox_target_weights """ labels, label_weights, bbox_targets, bbox_weights = bbox_targets pos_label_inds = ((labels >= 0) & (labels < num_class)).nonzero().reshape(-1) pos_labels = labels[pos_label_inds] # if no positive samples, return the original targets num_pos = float(pos_label_inds.size(0)) if num_pos == 0: return labels, label_weights, bbox_targets, bbox_weights # merge pos_assigned_gt_inds of per image to a single tensor gts = list() last_max_gt = 0 for i in range(len(sampling_results)): gt_i = sampling_results[i].pos_assigned_gt_inds gts.append(gt_i + last_max_gt) if len(gt_i) != 0: last_max_gt = gt_i.max() + 1 gts = torch.cat(gts) assert len(gts) == num_pos cls_score = cls_score.detach() bbox_pred = bbox_pred.detach() # For single stage detectors, rois here indicate anchors, in shape (N, 4) # For two stage detectors, rois are in shape (N, 5) if rois.size(-1) == 5: pos_rois = rois[pos_label_inds][:, 1:] else: pos_rois = rois[pos_label_inds] if bbox_pred.size(-1) > 4: bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4) pos_delta_pred = bbox_pred[pos_label_inds, pos_labels].view(-1, 4) else: pos_delta_pred = bbox_pred[pos_label_inds].view(-1, 4) # compute iou of the predicted bbox and the corresponding GT pos_delta_target = bbox_targets[pos_label_inds].view(-1, 4) pos_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_pred) target_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_target) ious = bbox_overlaps(pos_bbox_pred, target_bbox_pred, is_aligned=True) pos_imp_weights = label_weights[pos_label_inds] # Two steps to compute IoU-HLR. Samples are first sorted by IoU locally, # then sorted again within the same-rank group max_l_num = pos_labels.bincount().max() for label in pos_labels.unique(): l_inds = (pos_labels == label).nonzero().view(-1) l_gts = gts[l_inds] for t in l_gts.unique(): t_inds = l_inds[l_gts == t] t_ious = ious[t_inds] _, t_iou_rank_idx = t_ious.sort(descending=True) _, t_iou_rank = t_iou_rank_idx.sort() ious[t_inds] += max_l_num - t_iou_rank.float() l_ious = ious[l_inds] _, l_iou_rank_idx = l_ious.sort(descending=True) _, l_iou_rank = l_iou_rank_idx.sort() # IoU-HLR # linearly map HLR to label weights pos_imp_weights[l_inds] *= (max_l_num - l_iou_rank.float()) / max_l_num pos_imp_weights = (bias + pos_imp_weights * (1 - bias)).pow(k) # normalize to make the new weighted loss value equal to the original loss pos_loss_cls = loss_cls( cls_score[pos_label_inds], pos_labels, reduction_override='none') if pos_loss_cls.dim() > 1: ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds][:, None] new_pos_loss_cls = pos_loss_cls * pos_imp_weights[:, None] else: ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds] new_pos_loss_cls = pos_loss_cls * pos_imp_weights pos_loss_cls_ratio = ori_pos_loss_cls.sum() / new_pos_loss_cls.sum() pos_imp_weights = pos_imp_weights * pos_loss_cls_ratio label_weights[pos_label_inds] = pos_imp_weights bbox_targets = labels, label_weights, bbox_targets, bbox_weights return bbox_targets def carl_loss(cls_score: Tensor, labels: Tensor, bbox_pred: Tensor, bbox_targets: Tensor, loss_bbox: nn.Module, k: float = 1, bias: float = 0.2, avg_factor: Optional[int] = None, sigmoid: bool = False, num_class: int = 80) -> dict: """Classification-Aware Regression Loss (CARL). Args: cls_score (Tensor): Predicted classification scores. labels (Tensor): Targets of classification. bbox_pred (Tensor): Predicted bbox deltas. bbox_targets (Tensor): Target of bbox regression. loss_bbox (func): Regression loss func of the head. bbox_coder (obj): BBox coder of the head. k (float): Power of the non-linear mapping. Defaults to 1. bias (float): Shift of the non-linear mapping. Defaults to 0.2. avg_factor (int, optional): Average factor used in regression loss. sigmoid (bool): Activation of the classification score. num_class (int): Number of classes, defaults to 80. Return: dict: CARL loss dict. """ pos_label_inds = ((labels >= 0) & (labels < num_class)).nonzero().reshape(-1) if pos_label_inds.numel() == 0: return dict(loss_carl=cls_score.sum()[None] * 0.) pos_labels = labels[pos_label_inds] # multiply pos_cls_score with the corresponding bbox weight # and remain gradient if sigmoid: pos_cls_score = cls_score.sigmoid()[pos_label_inds, pos_labels] else: pos_cls_score = cls_score.softmax(-1)[pos_label_inds, pos_labels] carl_loss_weights = (bias + (1 - bias) * pos_cls_score).pow(k) # normalize carl_loss_weight to make its sum equal to num positive num_pos = float(pos_cls_score.size(0)) weight_ratio = num_pos / carl_loss_weights.sum() carl_loss_weights *= weight_ratio if avg_factor is None: avg_factor = bbox_targets.size(0) # if is class agnostic, bbox pred is in shape (N, 4) # otherwise, bbox pred is in shape (N, #classes, 4) if bbox_pred.size(-1) > 4: bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4) pos_bbox_preds = bbox_pred[pos_label_inds, pos_labels] else: pos_bbox_preds = bbox_pred[pos_label_inds] ori_loss_reg = loss_bbox( pos_bbox_preds, bbox_targets[pos_label_inds], reduction_override='none') / avg_factor loss_carl = (ori_loss_reg * carl_loss_weights[:, None]).sum() return dict(loss_carl=loss_carl[None])
7,670
39.803191
79
py
ERD
ERD-main/mmdet/models/losses/balanced_l1_loss.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch import torch.nn as nn from mmdet.registry import MODELS from .utils import weighted_loss @weighted_loss def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5, reduction='mean'): """Calculate balanced L1 loss. Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_ Args: pred (torch.Tensor): The prediction with shape (N, 4). target (torch.Tensor): The learning target of the prediction with shape (N, 4). beta (float): The loss is a piecewise function of prediction and target and ``beta`` serves as a threshold for the difference between the prediction and target. Defaults to 1.0. alpha (float): The denominator ``alpha`` in the balanced L1 loss. Defaults to 0.5. gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5. reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert beta > 0 if target.numel() == 0: return pred.sum() * 0 assert pred.size() == target.size() diff = torch.abs(pred - target) b = np.e**(gamma / alpha) - 1 loss = torch.where( diff < beta, alpha / b * (b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff, gamma * diff + gamma / b - alpha * beta) return loss @MODELS.register_module() class BalancedL1Loss(nn.Module): """Balanced L1 Loss. arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019) Args: alpha (float): The denominator ``alpha`` in the balanced L1 loss. Defaults to 0.5. gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5. beta (float, optional): The loss is a piecewise function of prediction and target. ``beta`` serves as a threshold for the difference between the prediction and target. Defaults to 1.0. reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, alpha=0.5, gamma=1.5, beta=1.0, reduction='mean', loss_weight=1.0): super(BalancedL1Loss, self).__init__() self.alpha = alpha self.gamma = gamma self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): """Forward function of loss. Args: pred (torch.Tensor): The prediction with shape (N, 4). target (torch.Tensor): The learning target of the prediction with shape (N, 4). weight (torch.Tensor, optional): Sample-wise loss weight with shape (N, ). avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_bbox = self.loss_weight * balanced_l1_loss( pred, target, weight, alpha=self.alpha, gamma=self.gamma, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss_bbox
4,205
33.195122
79
py
ERD
ERD-main/mmdet/models/losses/iou_loss.py
# Copyright (c) OpenMMLab. All rights reserved. import math import warnings from typing import Optional import torch import torch.nn as nn from torch import Tensor from mmdet.registry import MODELS from mmdet.structures.bbox import bbox_overlaps from .utils import weighted_loss @weighted_loss def iou_loss(pred: Tensor, target: Tensor, linear: bool = False, mode: str = 'log', eps: float = 1e-6) -> Tensor: """IoU loss. Computing the IoU loss between a set of predicted bboxes and target bboxes. The loss is calculated as negative log of IoU. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): Corresponding gt bboxes, shape (n, 4). linear (bool, optional): If True, use linear scale of loss instead of log scale. Default: False. mode (str): Loss scaling mode, including "linear", "square", and "log". Default: 'log' eps (float): Epsilon to avoid log(0). Return: Tensor: Loss tensor. """ assert mode in ['linear', 'square', 'log'] if linear: mode = 'linear' warnings.warn('DeprecationWarning: Setting "linear=True" in ' 'iou_loss is deprecated, please use "mode=`linear`" ' 'instead.') ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps) if mode == 'linear': loss = 1 - ious elif mode == 'square': loss = 1 - ious**2 elif mode == 'log': loss = -ious.log() else: raise NotImplementedError return loss @weighted_loss def bounded_iou_loss(pred: Tensor, target: Tensor, beta: float = 0.2, eps: float = 1e-3) -> Tensor: """BIoULoss. This is an implementation of paper `Improving Object Localization with Fitness NMS and Bounded IoU Loss. <https://arxiv.org/abs/1711.00164>`_. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): Corresponding gt bboxes, shape (n, 4). beta (float, optional): Beta parameter in smoothl1. eps (float, optional): Epsilon to avoid NaN values. Return: Tensor: Loss tensor. """ pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5 pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5 pred_w = pred[:, 2] - pred[:, 0] pred_h = pred[:, 3] - pred[:, 1] with torch.no_grad(): target_ctrx = (target[:, 0] + target[:, 2]) * 0.5 target_ctry = (target[:, 1] + target[:, 3]) * 0.5 target_w = target[:, 2] - target[:, 0] target_h = target[:, 3] - target[:, 1] dx = target_ctrx - pred_ctrx dy = target_ctry - pred_ctry loss_dx = 1 - torch.max( (target_w - 2 * dx.abs()) / (target_w + 2 * dx.abs() + eps), torch.zeros_like(dx)) loss_dy = 1 - torch.max( (target_h - 2 * dy.abs()) / (target_h + 2 * dy.abs() + eps), torch.zeros_like(dy)) loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w / (target_w + eps)) loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h / (target_h + eps)) # view(..., -1) does not work for empty tensor loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh], dim=-1).flatten(1) loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta, loss_comb - 0.5 * beta) return loss @weighted_loss def giou_loss(pred: Tensor, target: Tensor, eps: float = 1e-7) -> Tensor: r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding Box Regression <https://arxiv.org/abs/1902.09630>`_. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Epsilon to avoid log(0). Return: Tensor: Loss tensor. """ gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps) loss = 1 - gious return loss @weighted_loss def diou_loss(pred: Tensor, target: Tensor, eps: float = 1e-7) -> Tensor: r"""Implementation of `Distance-IoU Loss: Faster and Better Learning for Bounding Box Regression https://arxiv.org/abs/1911.08287`_. Code is modified from https://github.com/Zzh-tju/DIoU. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Epsilon to avoid log(0). Return: Tensor: Loss tensor. """ # overlap lt = torch.max(pred[:, :2], target[:, :2]) rb = torch.min(pred[:, 2:], target[:, 2:]) wh = (rb - lt).clamp(min=0) overlap = wh[:, 0] * wh[:, 1] # union ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1]) ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1]) union = ap + ag - overlap + eps # IoU ious = overlap / union # enclose area enclose_x1y1 = torch.min(pred[:, :2], target[:, :2]) enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:]) enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0) cw = enclose_wh[:, 0] ch = enclose_wh[:, 1] c2 = cw**2 + ch**2 + eps b1_x1, b1_y1 = pred[:, 0], pred[:, 1] b1_x2, b1_y2 = pred[:, 2], pred[:, 3] b2_x1, b2_y1 = target[:, 0], target[:, 1] b2_x2, b2_y2 = target[:, 2], target[:, 3] left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4 right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4 rho2 = left + right # DIoU dious = ious - rho2 / c2 loss = 1 - dious return loss @weighted_loss def ciou_loss(pred: Tensor, target: Tensor, eps: float = 1e-7) -> Tensor: r"""`Implementation of paper `Enhancing Geometric Factors into Model Learning and Inference for Object Detection and Instance Segmentation <https://arxiv.org/abs/2005.03572>`_. Code is modified from https://github.com/Zzh-tju/CIoU. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Epsilon to avoid log(0). Return: Tensor: Loss tensor. """ # overlap lt = torch.max(pred[:, :2], target[:, :2]) rb = torch.min(pred[:, 2:], target[:, 2:]) wh = (rb - lt).clamp(min=0) overlap = wh[:, 0] * wh[:, 1] # union ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1]) ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1]) union = ap + ag - overlap + eps # IoU ious = overlap / union # enclose area enclose_x1y1 = torch.min(pred[:, :2], target[:, :2]) enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:]) enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0) cw = enclose_wh[:, 0] ch = enclose_wh[:, 1] c2 = cw**2 + ch**2 + eps b1_x1, b1_y1 = pred[:, 0], pred[:, 1] b1_x2, b1_y2 = pred[:, 2], pred[:, 3] b2_x1, b2_y1 = target[:, 0], target[:, 1] b2_x2, b2_y2 = target[:, 2], target[:, 3] w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4 right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4 rho2 = left + right factor = 4 / math.pi**2 v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) with torch.no_grad(): alpha = (ious > 0.5).float() * v / (1 - ious + v) # CIoU cious = ious - (rho2 / c2 + alpha * v) loss = 1 - cious.clamp(min=-1.0, max=1.0) return loss @weighted_loss def eiou_loss(pred: Tensor, target: Tensor, smooth_point: float = 0.1, eps: float = 1e-7) -> Tensor: r"""Implementation of paper `Extended-IoU Loss: A Systematic IoU-Related Method: Beyond Simplified Regression for Better Localization <https://ieeexplore.ieee.org/abstract/document/9429909>`_ Code is modified from https://github.com//ShiqiYu/libfacedetection.train. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): Corresponding gt bboxes, shape (n, 4). smooth_point (float): hyperparameter, default is 0.1. eps (float): Epsilon to avoid log(0). Return: Tensor: Loss tensor. """ px1, py1, px2, py2 = pred[:, 0], pred[:, 1], pred[:, 2], pred[:, 3] tx1, ty1, tx2, ty2 = target[:, 0], target[:, 1], target[:, 2], target[:, 3] # extent top left ex1 = torch.min(px1, tx1) ey1 = torch.min(py1, ty1) # intersection coordinates ix1 = torch.max(px1, tx1) iy1 = torch.max(py1, ty1) ix2 = torch.min(px2, tx2) iy2 = torch.min(py2, ty2) # extra xmin = torch.min(ix1, ix2) ymin = torch.min(iy1, iy2) xmax = torch.max(ix1, ix2) ymax = torch.max(iy1, iy2) # Intersection intersection = (ix2 - ex1) * (iy2 - ey1) + (xmin - ex1) * (ymin - ey1) - ( ix1 - ex1) * (ymax - ey1) - (xmax - ex1) * ( iy1 - ey1) # Union union = (px2 - px1) * (py2 - py1) + (tx2 - tx1) * ( ty2 - ty1) - intersection + eps # IoU ious = 1 - (intersection / union) # Smooth-EIoU smooth_sign = (ious < smooth_point).detach().float() loss = 0.5 * smooth_sign * (ious**2) / smooth_point + (1 - smooth_sign) * ( ious - 0.5 * smooth_point) return loss @MODELS.register_module() class IoULoss(nn.Module): """IoULoss. Computing the IoU loss between a set of predicted bboxes and target bboxes. Args: linear (bool): If True, use linear scale of loss else determined by mode. Default: False. eps (float): Epsilon to avoid log(0). reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Weight of loss. mode (str): Loss scaling mode, including "linear", "square", and "log". Default: 'log' """ def __init__(self, linear: bool = False, eps: float = 1e-6, reduction: str = 'mean', loss_weight: float = 1.0, mode: str = 'log') -> None: super().__init__() assert mode in ['linear', 'square', 'log'] if linear: mode = 'linear' warnings.warn('DeprecationWarning: Setting "linear=True" in ' 'IOULoss is deprecated, please use "mode=`linear`" ' 'instead.') self.mode = mode self.linear = linear self.eps = eps self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred: Tensor, target: Tensor, weight: Optional[Tensor] = None, avg_factor: Optional[int] = None, reduction_override: Optional[str] = None, **kwargs) -> Tensor: """Forward function. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): The learning target of the prediction, shape (n, 4). weight (Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. Options are "none", "mean" and "sum". Return: Tensor: Loss tensor. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if (weight is not None) and (not torch.any(weight > 0)) and ( reduction != 'none'): if pred.dim() == weight.dim() + 1: weight = weight.unsqueeze(1) return (pred * weight).sum() # 0 if weight is not None and weight.dim() > 1: # TODO: remove this in the future # reduce the weight of shape (n, 4) to (n,) to match the # iou_loss of shape (n,) assert weight.shape == pred.shape weight = weight.mean(-1) loss = self.loss_weight * iou_loss( pred, target, weight, mode=self.mode, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss @MODELS.register_module() class BoundedIoULoss(nn.Module): """BIoULoss. This is an implementation of paper `Improving Object Localization with Fitness NMS and Bounded IoU Loss. <https://arxiv.org/abs/1711.00164>`_. Args: beta (float, optional): Beta parameter in smoothl1. eps (float, optional): Epsilon to avoid NaN values. reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Weight of loss. """ def __init__(self, beta: float = 0.2, eps: float = 1e-3, reduction: str = 'mean', loss_weight: float = 1.0) -> None: super().__init__() self.beta = beta self.eps = eps self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred: Tensor, target: Tensor, weight: Optional[Tensor] = None, avg_factor: Optional[int] = None, reduction_override: Optional[str] = None, **kwargs) -> Tensor: """Forward function. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): The learning target of the prediction, shape (n, 4). weight (Optional[Tensor], optional): The weight of loss for each prediction. Defaults to None. avg_factor (Optional[int], optional): Average factor that is used to average the loss. Defaults to None. reduction_override (Optional[str], optional): The reduction method used to override the original reduction method of the loss. Defaults to None. Options are "none", "mean" and "sum". Returns: Tensor: Loss tensor. """ if weight is not None and not torch.any(weight > 0): if pred.dim() == weight.dim() + 1: weight = weight.unsqueeze(1) return (pred * weight).sum() # 0 assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss = self.loss_weight * bounded_iou_loss( pred, target, weight, beta=self.beta, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss @MODELS.register_module() class GIoULoss(nn.Module): r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding Box Regression <https://arxiv.org/abs/1902.09630>`_. Args: eps (float): Epsilon to avoid log(0). reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Weight of loss. """ def __init__(self, eps: float = 1e-6, reduction: str = 'mean', loss_weight: float = 1.0) -> None: super().__init__() self.eps = eps self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred: Tensor, target: Tensor, weight: Optional[Tensor] = None, avg_factor: Optional[int] = None, reduction_override: Optional[str] = None, **kwargs) -> Tensor: """Forward function. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): The learning target of the prediction, shape (n, 4). weight (Optional[Tensor], optional): The weight of loss for each prediction. Defaults to None. avg_factor (Optional[int], optional): Average factor that is used to average the loss. Defaults to None. reduction_override (Optional[str], optional): The reduction method used to override the original reduction method of the loss. Defaults to None. Options are "none", "mean" and "sum". Returns: Tensor: Loss tensor. """ if weight is not None and not torch.any(weight > 0): if pred.dim() == weight.dim() + 1: weight = weight.unsqueeze(1) return (pred * weight).sum() # 0 assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if weight is not None and weight.dim() > 1: # TODO: remove this in the future # reduce the weight of shape (n, 4) to (n,) to match the # giou_loss of shape (n,) assert weight.shape == pred.shape weight = weight.mean(-1) loss = self.loss_weight * giou_loss( pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss @MODELS.register_module() class DIoULoss(nn.Module): r"""Implementation of `Distance-IoU Loss: Faster and Better Learning for Bounding Box Regression https://arxiv.org/abs/1911.08287`_. Code is modified from https://github.com/Zzh-tju/DIoU. Args: eps (float): Epsilon to avoid log(0). reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Weight of loss. """ def __init__(self, eps: float = 1e-6, reduction: str = 'mean', loss_weight: float = 1.0) -> None: super().__init__() self.eps = eps self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred: Tensor, target: Tensor, weight: Optional[Tensor] = None, avg_factor: Optional[int] = None, reduction_override: Optional[str] = None, **kwargs) -> Tensor: """Forward function. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): The learning target of the prediction, shape (n, 4). weight (Optional[Tensor], optional): The weight of loss for each prediction. Defaults to None. avg_factor (Optional[int], optional): Average factor that is used to average the loss. Defaults to None. reduction_override (Optional[str], optional): The reduction method used to override the original reduction method of the loss. Defaults to None. Options are "none", "mean" and "sum". Returns: Tensor: Loss tensor. """ if weight is not None and not torch.any(weight > 0): if pred.dim() == weight.dim() + 1: weight = weight.unsqueeze(1) return (pred * weight).sum() # 0 assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if weight is not None and weight.dim() > 1: # TODO: remove this in the future # reduce the weight of shape (n, 4) to (n,) to match the # giou_loss of shape (n,) assert weight.shape == pred.shape weight = weight.mean(-1) loss = self.loss_weight * diou_loss( pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss @MODELS.register_module() class CIoULoss(nn.Module): r"""`Implementation of paper `Enhancing Geometric Factors into Model Learning and Inference for Object Detection and Instance Segmentation <https://arxiv.org/abs/2005.03572>`_. Code is modified from https://github.com/Zzh-tju/CIoU. Args: eps (float): Epsilon to avoid log(0). reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Weight of loss. """ def __init__(self, eps: float = 1e-6, reduction: str = 'mean', loss_weight: float = 1.0) -> None: super().__init__() self.eps = eps self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred: Tensor, target: Tensor, weight: Optional[Tensor] = None, avg_factor: Optional[int] = None, reduction_override: Optional[str] = None, **kwargs) -> Tensor: """Forward function. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): The learning target of the prediction, shape (n, 4). weight (Optional[Tensor], optional): The weight of loss for each prediction. Defaults to None. avg_factor (Optional[int], optional): Average factor that is used to average the loss. Defaults to None. reduction_override (Optional[str], optional): The reduction method used to override the original reduction method of the loss. Defaults to None. Options are "none", "mean" and "sum". Returns: Tensor: Loss tensor. """ if weight is not None and not torch.any(weight > 0): if pred.dim() == weight.dim() + 1: weight = weight.unsqueeze(1) return (pred * weight).sum() # 0 assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if weight is not None and weight.dim() > 1: # TODO: remove this in the future # reduce the weight of shape (n, 4) to (n,) to match the # giou_loss of shape (n,) assert weight.shape == pred.shape weight = weight.mean(-1) loss = self.loss_weight * ciou_loss( pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss @MODELS.register_module() class EIoULoss(nn.Module): r"""Implementation of paper `Extended-IoU Loss: A Systematic IoU-Related Method: Beyond Simplified Regression for Better Localization <https://ieeexplore.ieee.org/abstract/document/9429909>`_ Code is modified from https://github.com//ShiqiYu/libfacedetection.train. Args: eps (float): Epsilon to avoid log(0). reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Weight of loss. smooth_point (float): hyperparameter, default is 0.1. """ def __init__(self, eps: float = 1e-6, reduction: str = 'mean', loss_weight: float = 1.0, smooth_point: float = 0.1) -> None: super().__init__() self.eps = eps self.reduction = reduction self.loss_weight = loss_weight self.smooth_point = smooth_point def forward(self, pred: Tensor, target: Tensor, weight: Optional[Tensor] = None, avg_factor: Optional[int] = None, reduction_override: Optional[str] = None, **kwargs) -> Tensor: """Forward function. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): The learning target of the prediction, shape (n, 4). weight (Optional[Tensor], optional): The weight of loss for each prediction. Defaults to None. avg_factor (Optional[int], optional): Average factor that is used to average the loss. Defaults to None. reduction_override (Optional[str], optional): The reduction method used to override the original reduction method of the loss. Defaults to None. Options are "none", "mean" and "sum". Returns: Tensor: Loss tensor. """ if weight is not None and not torch.any(weight > 0): if pred.dim() == weight.dim() + 1: weight = weight.unsqueeze(1) return (pred * weight).sum() # 0 assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if weight is not None and weight.dim() > 1: assert weight.shape == pred.shape weight = weight.mean(-1) loss = self.loss_weight * eiou_loss( pred, target, weight, smooth_point=self.smooth_point, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss
26,172
34.131544
79
py
ERD
ERD-main/mmdet/models/losses/smooth_l1_loss.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional import torch import torch.nn as nn from torch import Tensor from mmdet.registry import MODELS from .utils import weighted_loss @weighted_loss def smooth_l1_loss(pred: Tensor, target: Tensor, beta: float = 1.0) -> Tensor: """Smooth L1 loss. Args: pred (Tensor): The prediction. target (Tensor): The learning target of the prediction. beta (float, optional): The threshold in the piecewise function. Defaults to 1.0. Returns: Tensor: Calculated loss """ assert beta > 0 if target.numel() == 0: return pred.sum() * 0 assert pred.size() == target.size() diff = torch.abs(pred - target) loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta) return loss @weighted_loss def l1_loss(pred: Tensor, target: Tensor) -> Tensor: """L1 loss. Args: pred (Tensor): The prediction. target (Tensor): The learning target of the prediction. Returns: Tensor: Calculated loss """ if target.numel() == 0: return pred.sum() * 0 assert pred.size() == target.size() loss = torch.abs(pred - target) return loss @MODELS.register_module() class SmoothL1Loss(nn.Module): """Smooth L1 loss. Args: beta (float, optional): The threshold in the piecewise function. Defaults to 1.0. reduction (str, optional): The method to reduce the loss. Options are "none", "mean" and "sum". Defaults to "mean". loss_weight (float, optional): The weight of loss. """ def __init__(self, beta: float = 1.0, reduction: str = 'mean', loss_weight: float = 1.0) -> None: super().__init__() self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred: Tensor, target: Tensor, weight: Optional[Tensor] = None, avg_factor: Optional[int] = None, reduction_override: Optional[str] = None, **kwargs) -> Tensor: """Forward function. Args: pred (Tensor): The prediction. target (Tensor): The learning target of the prediction. weight (Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. Returns: Tensor: Calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_bbox = self.loss_weight * smooth_l1_loss( pred, target, weight, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss_bbox @MODELS.register_module() class L1Loss(nn.Module): """L1 loss. Args: reduction (str, optional): The method to reduce the loss. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of loss. """ def __init__(self, reduction: str = 'mean', loss_weight: float = 1.0) -> None: super().__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred: Tensor, target: Tensor, weight: Optional[Tensor] = None, avg_factor: Optional[int] = None, reduction_override: Optional[str] = None) -> Tensor: """Forward function. Args: pred (Tensor): The prediction. target (Tensor): The learning target of the prediction. weight (Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. Returns: Tensor: Calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_bbox = self.loss_weight * l1_loss( pred, target, weight, reduction=reduction, avg_factor=avg_factor) return loss_bbox
4,971
30.468354
78
py
ERD
ERD-main/mmdet/models/losses/gfocal_loss.py
# Copyright (c) OpenMMLab. All rights reserved. from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from mmdet.models.losses.utils import weighted_loss from mmdet.registry import MODELS @weighted_loss def quality_focal_loss(pred, target, beta=2.0): r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection <https://arxiv.org/abs/2006.04388>`_. Args: pred (torch.Tensor): Predicted joint representation of classification and quality (IoU) estimation with shape (N, C), C is the number of classes. target (tuple([torch.Tensor])): Target category label with shape (N,) and target quality label with shape (N,). beta (float): The beta parameter for calculating the modulating factor. Defaults to 2.0. Returns: torch.Tensor: Loss tensor with shape (N,). """ assert len(target) == 2, """target for QFL must be a tuple of two elements, including category label and quality label, respectively""" # label denotes the category id, score denotes the quality score label, score = target # negatives are supervised by 0 quality score pred_sigmoid = pred.sigmoid() scale_factor = pred_sigmoid zerolabel = scale_factor.new_zeros(pred.shape) loss = F.binary_cross_entropy_with_logits( pred, zerolabel, reduction='none') * scale_factor.pow(beta) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = pred.size(1) pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1) pos_label = label[pos].long() # positives are supervised by bbox quality (IoU) score scale_factor = score[pos] - pred_sigmoid[pos, pos_label] loss[pos, pos_label] = F.binary_cross_entropy_with_logits( pred[pos, pos_label], score[pos], reduction='none') * scale_factor.abs().pow(beta) loss = loss.sum(dim=1, keepdim=False) return loss @weighted_loss def quality_focal_loss_tensor_target(pred, target, beta=2.0, activated=False): """`QualityFocal Loss <https://arxiv.org/abs/2008.13367>`_ Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes target (torch.Tensor): The learning target of the iou-aware classification score with shape (N, C), C is the number of classes. beta (float): The beta parameter for calculating the modulating factor. Defaults to 2.0. activated (bool): Whether the input is activated. If True, it means the input has been activated and can be treated as probabilities. Else, it should be treated as logits. Defaults to False. """ # pred and target should be of the same size assert pred.size() == target.size() if activated: pred_sigmoid = pred loss_function = F.binary_cross_entropy else: pred_sigmoid = pred.sigmoid() loss_function = F.binary_cross_entropy_with_logits scale_factor = pred_sigmoid target = target.type_as(pred) zerolabel = scale_factor.new_zeros(pred.shape) loss = loss_function( pred, zerolabel, reduction='none') * scale_factor.pow(beta) pos = (target != 0) scale_factor = target[pos] - pred_sigmoid[pos] loss[pos] = loss_function( pred[pos], target[pos], reduction='none') * scale_factor.abs().pow(beta) loss = loss.sum(dim=1, keepdim=False) return loss @weighted_loss def quality_focal_loss_with_prob(pred, target, beta=2.0): r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection <https://arxiv.org/abs/2006.04388>`_. Different from `quality_focal_loss`, this function accepts probability as input. Args: pred (torch.Tensor): Predicted joint representation of classification and quality (IoU) estimation with shape (N, C), C is the number of classes. target (tuple([torch.Tensor])): Target category label with shape (N,) and target quality label with shape (N,). beta (float): The beta parameter for calculating the modulating factor. Defaults to 2.0. Returns: torch.Tensor: Loss tensor with shape (N,). """ assert len(target) == 2, """target for QFL must be a tuple of two elements, including category label and quality label, respectively""" # label denotes the category id, score denotes the quality score label, score = target # negatives are supervised by 0 quality score pred_sigmoid = pred scale_factor = pred_sigmoid zerolabel = scale_factor.new_zeros(pred.shape) loss = F.binary_cross_entropy( pred, zerolabel, reduction='none') * scale_factor.pow(beta) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = pred.size(1) pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1) pos_label = label[pos].long() # positives are supervised by bbox quality (IoU) score scale_factor = score[pos] - pred_sigmoid[pos, pos_label] loss[pos, pos_label] = F.binary_cross_entropy( pred[pos, pos_label], score[pos], reduction='none') * scale_factor.abs().pow(beta) loss = loss.sum(dim=1, keepdim=False) return loss @weighted_loss def distribution_focal_loss(pred, label): r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection <https://arxiv.org/abs/2006.04388>`_. Args: pred (torch.Tensor): Predicted general distribution of bounding boxes (before softmax) with shape (N, n+1), n is the max value of the integral set `{0, ..., n}` in paper. label (torch.Tensor): Target distance label for bounding boxes with shape (N,). Returns: torch.Tensor: Loss tensor with shape (N,). """ dis_left = label.long() dis_right = dis_left + 1 weight_left = dis_right.float() - label weight_right = label - dis_left.float() loss = F.cross_entropy(pred, dis_left, reduction='none') * weight_left \ + F.cross_entropy(pred, dis_right, reduction='none') * weight_right return loss @MODELS.register_module() class QualityFocalLoss(nn.Module): r"""Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection <https://arxiv.org/abs/2006.04388>`_. Args: use_sigmoid (bool): Whether sigmoid operation is conducted in QFL. Defaults to True. beta (float): The beta parameter for calculating the modulating factor. Defaults to 2.0. reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Loss weight of current loss. activated (bool, optional): Whether the input is activated. If True, it means the input has been activated and can be treated as probabilities. Else, it should be treated as logits. Defaults to False. """ def __init__(self, use_sigmoid=True, beta=2.0, reduction='mean', loss_weight=1.0, activated=False): super(QualityFocalLoss, self).__init__() assert use_sigmoid is True, 'Only sigmoid in QFL supported now.' self.use_sigmoid = use_sigmoid self.beta = beta self.reduction = reduction self.loss_weight = loss_weight self.activated = activated def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): Predicted joint representation of classification and quality (IoU) estimation with shape (N, C), C is the number of classes. target (Union(tuple([torch.Tensor]),Torch.Tensor)): The type is tuple, it should be included Target category label with shape (N,) and target quality label with shape (N,).The type is torch.Tensor, the target should be one-hot form with soft weights. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if self.use_sigmoid: if self.activated: calculate_loss_func = quality_focal_loss_with_prob else: calculate_loss_func = quality_focal_loss if isinstance(target, torch.Tensor): # the target shape with (N,C) or (N,C,...), which means # the target is one-hot form with soft weights. calculate_loss_func = partial( quality_focal_loss_tensor_target, activated=self.activated) loss_cls = self.loss_weight * calculate_loss_func( pred, target, weight, beta=self.beta, reduction=reduction, avg_factor=avg_factor) else: raise NotImplementedError return loss_cls @MODELS.register_module() class DistributionFocalLoss(nn.Module): r"""Distribution Focal Loss (DFL) is a variant of `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection <https://arxiv.org/abs/2006.04388>`_. Args: reduction (str): Options are `'none'`, `'mean'` and `'sum'`. loss_weight (float): Loss weight of current loss. """ def __init__(self, reduction='mean', loss_weight=1.0): super(DistributionFocalLoss, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): Predicted general distribution of bounding boxes (before softmax) with shape (N, n+1), n is the max value of the integral set `{0, ..., n}` in paper. target (torch.Tensor): Target distance label for bounding boxes with shape (N,). weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_cls = self.loss_weight * distribution_focal_loss( pred, target, weight, reduction=reduction, avg_factor=avg_factor) return loss_cls
11,838
38.996622
79
py
ERD
ERD-main/mmdet/models/losses/varifocal_loss.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional import torch.nn as nn import torch.nn.functional as F from torch import Tensor from mmdet.registry import MODELS from .utils import weight_reduce_loss def varifocal_loss(pred: Tensor, target: Tensor, weight: Optional[Tensor] = None, alpha: float = 0.75, gamma: float = 2.0, iou_weighted: bool = True, reduction: str = 'mean', avg_factor: Optional[int] = None) -> Tensor: """`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_ Args: pred (Tensor): The prediction with shape (N, C), C is the number of classes. target (Tensor): The learning target of the iou-aware classification score with shape (N, C), C is the number of classes. weight (Tensor, optional): The weight of loss for each prediction. Defaults to None. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive example with the iou target. Defaults to True. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. Returns: Tensor: Loss tensor. """ # pred and target should be of the same size assert pred.size() == target.size() pred_sigmoid = pred.sigmoid() target = target.type_as(pred) if iou_weighted: focal_weight = target * (target > 0.0).float() + \ alpha * (pred_sigmoid - target).abs().pow(gamma) * \ (target <= 0.0).float() else: focal_weight = (target > 0.0).float() + \ alpha * (pred_sigmoid - target).abs().pow(gamma) * \ (target <= 0.0).float() loss = F.binary_cross_entropy_with_logits( pred, target, reduction='none') * focal_weight loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss @MODELS.register_module() class VarifocalLoss(nn.Module): def __init__(self, use_sigmoid: bool = True, alpha: float = 0.75, gamma: float = 2.0, iou_weighted: bool = True, reduction: str = 'mean', loss_weight: float = 1.0) -> None: """`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_ Args: use_sigmoid (bool, optional): Whether the prediction is used for sigmoid or softmax. Defaults to True. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive examples with the iou target. Defaults to True. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". loss_weight (float, optional): Weight of loss. Defaults to 1.0. """ super().__init__() assert use_sigmoid is True, \ 'Only sigmoid varifocal loss supported now.' assert alpha >= 0.0 self.use_sigmoid = use_sigmoid self.alpha = alpha self.gamma = gamma self.iou_weighted = iou_weighted self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred: Tensor, target: Tensor, weight: Optional[Tensor] = None, avg_factor: Optional[int] = None, reduction_override: Optional[str] = None) -> Tensor: """Forward function. Args: pred (Tensor): The prediction with shape (N, C), C is the number of classes. target (Tensor): The learning target of the iou-aware classification score with shape (N, C), C is the number of classes. weight (Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Returns: Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if self.use_sigmoid: loss_cls = self.loss_weight * varifocal_loss( pred, target, weight, alpha=self.alpha, gamma=self.gamma, iou_weighted=self.iou_weighted, reduction=reduction, avg_factor=avg_factor) else: raise NotImplementedError return loss_cls
5,749
39.492958
79
py
ERD
ERD-main/mmdet/models/losses/utils.py
# Copyright (c) OpenMMLab. All rights reserved. import functools from typing import Callable, Optional import torch import torch.nn.functional as F from torch import Tensor def reduce_loss(loss: Tensor, reduction: str) -> Tensor: """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) # none: 0, elementwise_mean:1, sum: 2 if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss: Tensor, weight: Optional[Tensor] = None, reduction: str = 'mean', avg_factor: Optional[float] = None) -> Tensor: """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Optional[Tensor], optional): Element-wise weights. Defaults to None. reduction (str, optional): Same as built-in losses of PyTorch. Defaults to 'mean'. avg_factor (Optional[float], optional): Average factor when computing the mean of losses. Defaults to None. Returns: Tensor: Processed loss values. """ # if weight is specified, apply element-wise weight if weight is not None: loss = loss * weight # if avg_factor is not specified, just reduce the loss if avg_factor is None: loss = reduce_loss(loss, reduction) else: # if reduction is mean, then average the loss by avg_factor if reduction == 'mean': # Avoid causing ZeroDivisionError when avg_factor is 0.0, # i.e., all labels of an image belong to ignore index. eps = torch.finfo(torch.float32).eps loss = loss.sum() / (avg_factor + eps) # if reduction is 'none', then do nothing, otherwise raise an error elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func: Callable) -> Callable: """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred: Tensor, target: Tensor, weight: Optional[Tensor] = None, reduction: str = 'mean', avg_factor: Optional[int] = None, **kwargs) -> Tensor: """ Args: pred (Tensor): The prediction. target (Tensor): Target bboxes. weight (Optional[Tensor], optional): The weight of loss for each prediction. Defaults to None. reduction (str, optional): Options are "none", "mean" and "sum". Defaults to 'mean'. avg_factor (Optional[int], optional): Average factor that is used to average the loss. Defaults to None. Returns: Tensor: Loss tensor. """ # get element-wise loss loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper
4,256
32.785714
79
py
ERD
ERD-main/mmdet/models/losses/seesaw_loss.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor from mmdet.registry import MODELS from .accuracy import accuracy from .cross_entropy_loss import cross_entropy from .utils import weight_reduce_loss def seesaw_ce_loss(cls_score: Tensor, labels: Tensor, label_weights: Tensor, cum_samples: Tensor, num_classes: int, p: float, q: float, eps: float, reduction: str = 'mean', avg_factor: Optional[int] = None) -> Tensor: """Calculate the Seesaw CrossEntropy loss. Args: cls_score (Tensor): The prediction with shape (N, C), C is the number of classes. labels (Tensor): The learning label of the prediction. label_weights (Tensor): Sample-wise loss weight. cum_samples (Tensor): Cumulative samples for each category. num_classes (int): The number of classes. p (float): The ``p`` in the mitigation factor. q (float): The ``q`` in the compenstation factor. eps (float): The minimal value of divisor to smooth the computation of compensation factor reduction (str, optional): The method used to reduce the loss. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. Returns: Tensor: The calculated loss """ assert cls_score.size(-1) == num_classes assert len(cum_samples) == num_classes onehot_labels = F.one_hot(labels, num_classes) seesaw_weights = cls_score.new_ones(onehot_labels.size()) # mitigation factor if p > 0: sample_ratio_matrix = cum_samples[None, :].clamp( min=1) / cum_samples[:, None].clamp(min=1) index = (sample_ratio_matrix < 1.0).float() sample_weights = sample_ratio_matrix.pow(p) * index + (1 - index) mitigation_factor = sample_weights[labels.long(), :] seesaw_weights = seesaw_weights * mitigation_factor # compensation factor if q > 0: scores = F.softmax(cls_score.detach(), dim=1) self_scores = scores[ torch.arange(0, len(scores)).to(scores.device).long(), labels.long()] score_matrix = scores / self_scores[:, None].clamp(min=eps) index = (score_matrix > 1.0).float() compensation_factor = score_matrix.pow(q) * index + (1 - index) seesaw_weights = seesaw_weights * compensation_factor cls_score = cls_score + (seesaw_weights.log() * (1 - onehot_labels)) loss = F.cross_entropy(cls_score, labels, weight=None, reduction='none') if label_weights is not None: label_weights = label_weights.float() loss = weight_reduce_loss( loss, weight=label_weights, reduction=reduction, avg_factor=avg_factor) return loss @MODELS.register_module() class SeesawLoss(nn.Module): """ Seesaw Loss for Long-Tailed Instance Segmentation (CVPR 2021) arXiv: https://arxiv.org/abs/2008.10032 Args: use_sigmoid (bool, optional): Whether the prediction uses sigmoid of softmax. Only False is supported. p (float, optional): The ``p`` in the mitigation factor. Defaults to 0.8. q (float, optional): The ``q`` in the compenstation factor. Defaults to 2.0. num_classes (int, optional): The number of classes. Default to 1203 for LVIS v1 dataset. eps (float, optional): The minimal value of divisor to smooth the computation of compensation factor reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 return_dict (bool, optional): Whether return the losses as a dict. Default to True. """ def __init__(self, use_sigmoid: bool = False, p: float = 0.8, q: float = 2.0, num_classes: int = 1203, eps: float = 1e-2, reduction: str = 'mean', loss_weight: float = 1.0, return_dict: bool = True) -> None: super().__init__() assert not use_sigmoid self.use_sigmoid = False self.p = p self.q = q self.num_classes = num_classes self.eps = eps self.reduction = reduction self.loss_weight = loss_weight self.return_dict = return_dict # 0 for pos, 1 for neg self.cls_criterion = seesaw_ce_loss # cumulative samples for each category self.register_buffer( 'cum_samples', torch.zeros(self.num_classes + 1, dtype=torch.float)) # custom output channels of the classifier self.custom_cls_channels = True # custom activation of cls_score self.custom_activation = True # custom accuracy of the classsifier self.custom_accuracy = True def _split_cls_score(self, cls_score: Tensor) -> Tuple[Tensor, Tensor]: """split cls_score. Args: cls_score (Tensor): The prediction with shape (N, C + 2). Returns: Tuple[Tensor, Tensor]: The score for classes and objectness, respectively """ # split cls_score to cls_score_classes and cls_score_objectness assert cls_score.size(-1) == self.num_classes + 2 cls_score_classes = cls_score[..., :-2] cls_score_objectness = cls_score[..., -2:] return cls_score_classes, cls_score_objectness def get_cls_channels(self, num_classes: int) -> int: """Get custom classification channels. Args: num_classes (int): The number of classes. Returns: int: The custom classification channels. """ assert num_classes == self.num_classes return num_classes + 2 def get_activation(self, cls_score: Tensor) -> Tensor: """Get custom activation of cls_score. Args: cls_score (Tensor): The prediction with shape (N, C + 2). Returns: Tensor: The custom activation of cls_score with shape (N, C + 1). """ cls_score_classes, cls_score_objectness = self._split_cls_score( cls_score) score_classes = F.softmax(cls_score_classes, dim=-1) score_objectness = F.softmax(cls_score_objectness, dim=-1) score_pos = score_objectness[..., [0]] score_neg = score_objectness[..., [1]] score_classes = score_classes * score_pos scores = torch.cat([score_classes, score_neg], dim=-1) return scores def get_accuracy(self, cls_score: Tensor, labels: Tensor) -> Dict[str, Tensor]: """Get custom accuracy w.r.t. cls_score and labels. Args: cls_score (Tensor): The prediction with shape (N, C + 2). labels (Tensor): The learning label of the prediction. Returns: Dict [str, Tensor]: The accuracy for objectness and classes, respectively. """ pos_inds = labels < self.num_classes obj_labels = (labels == self.num_classes).long() cls_score_classes, cls_score_objectness = self._split_cls_score( cls_score) acc_objectness = accuracy(cls_score_objectness, obj_labels) acc_classes = accuracy(cls_score_classes[pos_inds], labels[pos_inds]) acc = dict() acc['acc_objectness'] = acc_objectness acc['acc_classes'] = acc_classes return acc def forward( self, cls_score: Tensor, labels: Tensor, label_weights: Optional[Tensor] = None, avg_factor: Optional[int] = None, reduction_override: Optional[str] = None ) -> Union[Tensor, Dict[str, Tensor]]: """Forward function. Args: cls_score (Tensor): The prediction with shape (N, C + 2). labels (Tensor): The learning label of the prediction. label_weights (Tensor, optional): Sample-wise loss weight. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". Returns: Tensor | Dict [str, Tensor]: if return_dict == False: The calculated loss | if return_dict == True: The dict of calculated losses for objectness and classes, respectively. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) assert cls_score.size(-1) == self.num_classes + 2 pos_inds = labels < self.num_classes # 0 for pos, 1 for neg obj_labels = (labels == self.num_classes).long() # accumulate the samples for each category unique_labels = labels.unique() for u_l in unique_labels: inds_ = labels == u_l.item() self.cum_samples[u_l] += inds_.sum() if label_weights is not None: label_weights = label_weights.float() else: label_weights = labels.new_ones(labels.size(), dtype=torch.float) cls_score_classes, cls_score_objectness = self._split_cls_score( cls_score) # calculate loss_cls_classes (only need pos samples) if pos_inds.sum() > 0: loss_cls_classes = self.loss_weight * self.cls_criterion( cls_score_classes[pos_inds], labels[pos_inds], label_weights[pos_inds], self.cum_samples[:self.num_classes], self.num_classes, self.p, self.q, self.eps, reduction, avg_factor) else: loss_cls_classes = cls_score_classes[pos_inds].sum() # calculate loss_cls_objectness loss_cls_objectness = self.loss_weight * cross_entropy( cls_score_objectness, obj_labels, label_weights, reduction, avg_factor) if self.return_dict: loss_cls = dict() loss_cls['loss_cls_objectness'] = loss_cls_objectness loss_cls['loss_cls_classes'] = loss_cls_classes else: loss_cls = loss_cls_classes + loss_cls_objectness return loss_cls
10,723
37.437276
79
py
ERD
ERD-main/mmdet/models/losses/ae_loss.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmdet.registry import MODELS def ae_loss_per_image(tl_preds, br_preds, match): """Associative Embedding Loss in one image. Associative Embedding Loss including two parts: pull loss and push loss. Pull loss makes embedding vectors from same object closer to each other. Push loss distinguish embedding vector from different objects, and makes the gap between them is large enough. During computing, usually there are 3 cases: - no object in image: both pull loss and push loss will be 0. - one object in image: push loss will be 0 and pull loss is computed by the two corner of the only object. - more than one objects in image: pull loss is computed by corner pairs from each object, push loss is computed by each object with all other objects. We use confusion matrix with 0 in diagonal to compute the push loss. Args: tl_preds (tensor): Embedding feature map of left-top corner. br_preds (tensor): Embedding feature map of bottim-right corner. match (list): Downsampled coordinates pair of each ground truth box. """ tl_list, br_list, me_list = [], [], [] if len(match) == 0: # no object in image pull_loss = tl_preds.sum() * 0. push_loss = tl_preds.sum() * 0. else: for m in match: [tl_y, tl_x], [br_y, br_x] = m tl_e = tl_preds[:, tl_y, tl_x].view(-1, 1) br_e = br_preds[:, br_y, br_x].view(-1, 1) tl_list.append(tl_e) br_list.append(br_e) me_list.append((tl_e + br_e) / 2.0) tl_list = torch.cat(tl_list) br_list = torch.cat(br_list) me_list = torch.cat(me_list) assert tl_list.size() == br_list.size() # N is object number in image, M is dimension of embedding vector N, M = tl_list.size() pull_loss = (tl_list - me_list).pow(2) + (br_list - me_list).pow(2) pull_loss = pull_loss.sum() / N margin = 1 # exp setting of CornerNet, details in section 3.3 of paper # confusion matrix of push loss conf_mat = me_list.expand((N, N, M)).permute(1, 0, 2) - me_list conf_weight = 1 - torch.eye(N).type_as(me_list) conf_mat = conf_weight * (margin - conf_mat.sum(-1).abs()) if N > 1: # more than one object in current image push_loss = F.relu(conf_mat).sum() / (N * (N - 1)) else: push_loss = tl_preds.sum() * 0. return pull_loss, push_loss @MODELS.register_module() class AssociativeEmbeddingLoss(nn.Module): """Associative Embedding Loss. More details can be found in `Associative Embedding <https://arxiv.org/abs/1611.05424>`_ and `CornerNet <https://arxiv.org/abs/1808.01244>`_ . Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L180>`_ # noqa: E501 Args: pull_weight (float): Loss weight for corners from same object. push_weight (float): Loss weight for corners from different object. """ def __init__(self, pull_weight=0.25, push_weight=0.25): super(AssociativeEmbeddingLoss, self).__init__() self.pull_weight = pull_weight self.push_weight = push_weight def forward(self, pred, target, match): """Forward function.""" batch = pred.size(0) pull_all, push_all = 0.0, 0.0 for i in range(batch): pull, push = ae_loss_per_image(pred[i], target[i], match[i]) pull_all += self.pull_weight * pull push_all += self.push_weight * push return pull_all, push_all
3,810
36.362745
143
py
ERD
ERD-main/mmdet/models/losses/accuracy.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn def accuracy(pred, target, topk=1, thresh=None): """Calculate accuracy according to the prediction and target. Args: pred (torch.Tensor): The model prediction, shape (N, num_class) target (torch.Tensor): The target of each prediction, shape (N, ) topk (int | tuple[int], optional): If the predictions in ``topk`` matches the target, the predictions will be regarded as correct ones. Defaults to 1. thresh (float, optional): If not None, predictions with scores under this threshold are considered incorrect. Default to None. Returns: float | tuple[float]: If the input ``topk`` is a single integer, the function will return a single float as accuracy. If ``topk`` is a tuple containing multiple integers, the function will return a tuple containing accuracies of each ``topk`` number. """ assert isinstance(topk, (int, tuple)) if isinstance(topk, int): topk = (topk, ) return_single = True else: return_single = False maxk = max(topk) if pred.size(0) == 0: accu = [pred.new_tensor(0.) for i in range(len(topk))] return accu[0] if return_single else accu assert pred.ndim == 2 and target.ndim == 1 assert pred.size(0) == target.size(0) assert maxk <= pred.size(1), \ f'maxk {maxk} exceeds pred dimension {pred.size(1)}' pred_value, pred_label = pred.topk(maxk, dim=1) pred_label = pred_label.t() # transpose to shape (maxk, N) correct = pred_label.eq(target.view(1, -1).expand_as(pred_label)) if thresh is not None: # Only prediction values larger than thresh are counted as correct correct = correct & (pred_value > thresh).t() res = [] for k in topk: correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / pred.size(0))) return res[0] if return_single else res class Accuracy(nn.Module): def __init__(self, topk=(1, ), thresh=None): """Module to calculate the accuracy. Args: topk (tuple, optional): The criterion used to calculate the accuracy. Defaults to (1,). thresh (float, optional): If not None, predictions with scores under this threshold are considered incorrect. Default to None. """ super().__init__() self.topk = topk self.thresh = thresh def forward(self, pred, target): """Forward function to calculate accuracy. Args: pred (torch.Tensor): Prediction of models. target (torch.Tensor): Target for each prediction. Returns: tuple[float]: The accuracies under different topk criterions. """ return accuracy(pred, target, self.topk, self.thresh)
2,953
36.871795
79
py
ERD
ERD-main/mmdet/models/losses/focal_loss.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss from mmdet.registry import MODELS from .utils import weight_reduce_loss # This method is only for debugging def py_sigmoid_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25, reduction='mean', avg_factor=None): """PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes target (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. alpha (float, optional): A balanced form for Focal Loss. Defaults to 0.25. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ pred_sigmoid = pred.sigmoid() target = target.type_as(pred) pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * pt.pow(gamma) loss = F.binary_cross_entropy_with_logits( pred, target, reduction='none') * focal_weight if weight is not None: if weight.shape != loss.shape: if weight.size(0) == loss.size(0): # For most cases, weight is of shape (num_priors, ), # which means it does not have the second axis num_class weight = weight.view(-1, 1) else: # Sometimes, weight per anchor per class is also needed. e.g. # in FSAF. But it may be flattened of shape # (num_priors x num_class, ), while loss is still of shape # (num_priors, num_class). assert weight.numel() == loss.numel() weight = weight.view(loss.size(0), -1) assert weight.ndim == loss.ndim loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss def py_focal_loss_with_prob(pred, target, weight=None, gamma=2.0, alpha=0.25, reduction='mean', avg_factor=None): """PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_. Different from `py_sigmoid_focal_loss`, this function accepts probability as input. Args: pred (torch.Tensor): The prediction probability with shape (N, C), C is the number of classes. target (torch.Tensor): The learning label of the prediction. The target shape support (N,C) or (N,), (N,C) means one-hot form. weight (torch.Tensor, optional): Sample-wise loss weight. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. alpha (float, optional): A balanced form for Focal Loss. Defaults to 0.25. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ if pred.dim() != target.dim(): num_classes = pred.size(1) target = F.one_hot(target, num_classes=num_classes + 1) target = target[:, :num_classes] target = target.type_as(pred) pt = (1 - pred) * target + pred * (1 - target) focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * pt.pow(gamma) loss = F.binary_cross_entropy( pred, target, reduction='none') * focal_weight if weight is not None: if weight.shape != loss.shape: if weight.size(0) == loss.size(0): # For most cases, weight is of shape (num_priors, ), # which means it does not have the second axis num_class weight = weight.view(-1, 1) else: # Sometimes, weight per anchor per class is also needed. e.g. # in FSAF. But it may be flattened of shape # (num_priors x num_class, ), while loss is still of shape # (num_priors, num_class). assert weight.numel() == loss.numel() weight = weight.view(loss.size(0), -1) assert weight.ndim == loss.ndim loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss def sigmoid_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25, reduction='mean', avg_factor=None): r"""A wrapper of cuda version `Focal Loss <https://arxiv.org/abs/1708.02002>`_. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes. target (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. alpha (float, optional): A balanced form for Focal Loss. Defaults to 0.25. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ # Function.apply does not accept keyword arguments, so the decorator # "weighted_loss" is not applicable loss = _sigmoid_focal_loss(pred.contiguous(), target.contiguous(), gamma, alpha, None, 'none') if weight is not None: if weight.shape != loss.shape: if weight.size(0) == loss.size(0): # For most cases, weight is of shape (num_priors, ), # which means it does not have the second axis num_class weight = weight.view(-1, 1) else: # Sometimes, weight per anchor per class is also needed. e.g. # in FSAF. But it may be flattened of shape # (num_priors x num_class, ), while loss is still of shape # (num_priors, num_class). assert weight.numel() == loss.numel() weight = weight.view(loss.size(0), -1) assert weight.ndim == loss.ndim loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss @MODELS.register_module() class FocalLoss(nn.Module): def __init__(self, use_sigmoid=True, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0, activated=False): """`Focal Loss <https://arxiv.org/abs/1708.02002>`_ Args: use_sigmoid (bool, optional): Whether to the prediction is used for sigmoid or softmax. Defaults to True. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. alpha (float, optional): A balanced form for Focal Loss. Defaults to 0.25. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". loss_weight (float, optional): Weight of loss. Defaults to 1.0. activated (bool, optional): Whether the input is activated. If True, it means the input has been activated and can be treated as probabilities. Else, it should be treated as logits. Defaults to False. """ super(FocalLoss, self).__init__() assert use_sigmoid is True, 'Only sigmoid focal loss supported now.' self.use_sigmoid = use_sigmoid self.gamma = gamma self.alpha = alpha self.reduction = reduction self.loss_weight = loss_weight self.activated = activated def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning label of the prediction. The target shape support (N,C) or (N,), (N,C) means one-hot form. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if self.use_sigmoid: if self.activated: calculate_loss_func = py_focal_loss_with_prob else: if pred.dim() == target.dim(): # this means that target is already in One-Hot form. calculate_loss_func = py_sigmoid_focal_loss elif torch.cuda.is_available() and pred.is_cuda: calculate_loss_func = sigmoid_focal_loss else: num_classes = pred.size(1) target = F.one_hot(target, num_classes=num_classes + 1) target = target[:, :num_classes] calculate_loss_func = py_sigmoid_focal_loss loss_cls = self.loss_weight * calculate_loss_func( pred, target, weight, gamma=self.gamma, alpha=self.alpha, reduction=reduction, avg_factor=avg_factor) else: raise NotImplementedError return loss_cls
10,834
41.996032
79
py
ERD
ERD-main/mmdet/models/losses/cross_entropy_loss.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch import torch.nn as nn import torch.nn.functional as F from mmdet.registry import MODELS from .utils import weight_reduce_loss def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, ignore_index=-100, avg_non_ignore=False): """Calculate the CrossEntropy loss. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes. label (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. reduction (str, optional): The method used to reduce the loss. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. ignore_index (int | None): The label index to be ignored. If None, it will be set to default value. Default: -100. avg_non_ignore (bool): The flag decides to whether the loss is only averaged over non-ignored targets. Default: False. Returns: torch.Tensor: The calculated loss """ # The default value of ignore_index is the same as F.cross_entropy ignore_index = -100 if ignore_index is None else ignore_index # element-wise losses loss = F.cross_entropy( pred, label, weight=class_weight, reduction='none', ignore_index=ignore_index) # average loss over non-ignored elements # pytorch's official cross_entropy average loss over non-ignored elements # refer to https://github.com/pytorch/pytorch/blob/56b43f4fec1f76953f15a627694d4bba34588969/torch/nn/functional.py#L2660 # noqa if (avg_factor is None) and avg_non_ignore and reduction == 'mean': avg_factor = label.numel() - (label == ignore_index).sum().item() # apply weights and do the reduction if weight is not None: weight = weight.float() loss = weight_reduce_loss( loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index): """Expand onehot labels to match the size of prediction.""" bin_labels = labels.new_full((labels.size(0), label_channels), 0) valid_mask = (labels >= 0) & (labels != ignore_index) inds = torch.nonzero( valid_mask & (labels < label_channels), as_tuple=False) if inds.numel() > 0: bin_labels[inds, labels[inds]] = 1 valid_mask = valid_mask.view(-1, 1).expand(labels.size(0), label_channels).float() if label_weights is None: bin_label_weights = valid_mask else: bin_label_weights = label_weights.view(-1, 1).repeat(1, label_channels) bin_label_weights *= valid_mask return bin_labels, bin_label_weights, valid_mask def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, ignore_index=-100, avg_non_ignore=False): """Calculate the binary CrossEntropy loss. Args: pred (torch.Tensor): The prediction with shape (N, 1) or (N, ). When the shape of pred is (N, 1), label will be expanded to one-hot format, and when the shape of pred is (N, ), label will not be expanded to one-hot format. label (torch.Tensor): The learning label of the prediction, with shape (N, ). weight (torch.Tensor, optional): Sample-wise loss weight. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. ignore_index (int | None): The label index to be ignored. If None, it will be set to default value. Default: -100. avg_non_ignore (bool): The flag decides to whether the loss is only averaged over non-ignored targets. Default: False. Returns: torch.Tensor: The calculated loss. """ # The default value of ignore_index is the same as F.cross_entropy ignore_index = -100 if ignore_index is None else ignore_index if pred.dim() != label.dim(): label, weight, valid_mask = _expand_onehot_labels( label, weight, pred.size(-1), ignore_index) else: # should mask out the ignored elements valid_mask = ((label >= 0) & (label != ignore_index)).float() if weight is not None: # The inplace writing method will have a mismatched broadcast # shape error if the weight and valid_mask dimensions # are inconsistent such as (B,N,1) and (B,N,C). weight = weight * valid_mask else: weight = valid_mask # average loss over non-ignored elements if (avg_factor is None) and avg_non_ignore and reduction == 'mean': avg_factor = valid_mask.sum().item() # weighted element-wise losses weight = weight.float() loss = F.binary_cross_entropy_with_logits( pred, label.float(), pos_weight=class_weight, reduction='none') # do the reduction for the weighted loss loss = weight_reduce_loss( loss, weight, reduction=reduction, avg_factor=avg_factor) return loss def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None, class_weight=None, ignore_index=None, **kwargs): """Calculate the CrossEntropy loss for masks. Args: pred (torch.Tensor): The prediction with shape (N, C, *), C is the number of classes. The trailing * indicates arbitrary shape. target (torch.Tensor): The learning label of the prediction. label (torch.Tensor): ``label`` indicates the class label of the mask corresponding object. This will be used to select the mask in the of the class which the object belongs to when the mask prediction if not class-agnostic. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. ignore_index (None): Placeholder, to be consistent with other loss. Default: None. Returns: torch.Tensor: The calculated loss Example: >>> N, C = 3, 11 >>> H, W = 2, 2 >>> pred = torch.randn(N, C, H, W) * 1000 >>> target = torch.rand(N, H, W) >>> label = torch.randint(0, C, size=(N,)) >>> reduction = 'mean' >>> avg_factor = None >>> class_weights = None >>> loss = mask_cross_entropy(pred, target, label, reduction, >>> avg_factor, class_weights) >>> assert loss.shape == (1,) """ assert ignore_index is None, 'BCE loss does not support ignore_index' # TODO: handle these two reserved arguments assert reduction == 'mean' and avg_factor is None num_rois = pred.size()[0] inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) pred_slice = pred[inds, label].squeeze(1) return F.binary_cross_entropy_with_logits( pred_slice, target, weight=class_weight, reduction='mean')[None] @MODELS.register_module() class CrossEntropyLoss(nn.Module): def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean', class_weight=None, ignore_index=None, loss_weight=1.0, avg_non_ignore=False): """CrossEntropyLoss. Args: use_sigmoid (bool, optional): Whether the prediction uses sigmoid of softmax. Defaults to False. use_mask (bool, optional): Whether to use mask cross entropy loss. Defaults to False. reduction (str, optional): . Defaults to 'mean'. Options are "none", "mean" and "sum". class_weight (list[float], optional): Weight of each class. Defaults to None. ignore_index (int | None): The label index to be ignored. Defaults to None. loss_weight (float, optional): Weight of the loss. Defaults to 1.0. avg_non_ignore (bool): The flag decides to whether the loss is only averaged over non-ignored targets. Default: False. """ super(CrossEntropyLoss, self).__init__() assert (use_sigmoid is False) or (use_mask is False) self.use_sigmoid = use_sigmoid self.use_mask = use_mask self.reduction = reduction self.loss_weight = loss_weight self.class_weight = class_weight self.ignore_index = ignore_index self.avg_non_ignore = avg_non_ignore if ((ignore_index is not None) and not self.avg_non_ignore and self.reduction == 'mean'): warnings.warn( 'Default ``avg_non_ignore`` is False, if you would like to ' 'ignore the certain label and average loss over non-ignore ' 'labels, which is the same with PyTorch official ' 'cross_entropy, set ``avg_non_ignore=True``.') if self.use_sigmoid: self.cls_criterion = binary_cross_entropy elif self.use_mask: self.cls_criterion = mask_cross_entropy else: self.cls_criterion = cross_entropy def extra_repr(self): """Extra repr.""" s = f'avg_non_ignore={self.avg_non_ignore}' return s def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_override=None, ignore_index=None, **kwargs): """Forward function. Args: cls_score (torch.Tensor): The prediction. label (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". ignore_index (int | None): The label index to be ignored. If not None, it will override the default value. Default: None. Returns: torch.Tensor: The calculated loss. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if ignore_index is None: ignore_index = self.ignore_index if self.class_weight is not None: class_weight = cls_score.new_tensor( self.class_weight, device=cls_score.device) else: class_weight = None loss_cls = self.loss_weight * self.cls_criterion( cls_score, label, weight, class_weight=class_weight, reduction=reduction, avg_factor=avg_factor, ignore_index=ignore_index, avg_non_ignore=self.avg_non_ignore, **kwargs) return loss_cls
12,148
39.228477
132
py
ERD
ERD-main/mmdet/models/losses/gaussian_focal_loss.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Union import torch.nn as nn from torch import Tensor from mmdet.registry import MODELS from .utils import weight_reduce_loss, weighted_loss @weighted_loss def gaussian_focal_loss(pred: Tensor, gaussian_target: Tensor, alpha: float = 2.0, gamma: float = 4.0, pos_weight: float = 1.0, neg_weight: float = 1.0) -> Tensor: """`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian distribution. Args: pred (torch.Tensor): The prediction. gaussian_target (torch.Tensor): The learning target of the prediction in gaussian distribution. alpha (float, optional): A balanced form for Focal Loss. Defaults to 2.0. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 4.0. pos_weight(float): Positive sample loss weight. Defaults to 1.0. neg_weight(float): Negative sample loss weight. Defaults to 1.0. """ eps = 1e-12 pos_weights = gaussian_target.eq(1) neg_weights = (1 - gaussian_target).pow(gamma) pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights return pos_weight * pos_loss + neg_weight * neg_loss def gaussian_focal_loss_with_pos_inds( pred: Tensor, gaussian_target: Tensor, pos_inds: Tensor, pos_labels: Tensor, alpha: float = 2.0, gamma: float = 4.0, pos_weight: float = 1.0, neg_weight: float = 1.0, reduction: str = 'mean', avg_factor: Optional[Union[int, float]] = None) -> Tensor: """`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian distribution. Note: The index with a value of 1 in ``gaussian_target`` in the ``gaussian_focal_loss`` function is a positive sample, but in ``gaussian_focal_loss_with_pos_inds`` the positive sample is passed in through the ``pos_inds`` parameter. Args: pred (torch.Tensor): The prediction. The shape is (N, num_classes). gaussian_target (torch.Tensor): The learning target of the prediction in gaussian distribution. The shape is (N, num_classes). pos_inds (torch.Tensor): The positive sample index. The shape is (M, ). pos_labels (torch.Tensor): The label corresponding to the positive sample index. The shape is (M, ). alpha (float, optional): A balanced form for Focal Loss. Defaults to 2.0. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 4.0. pos_weight(float): Positive sample loss weight. Defaults to 1.0. neg_weight(float): Negative sample loss weight. Defaults to 1.0. reduction (str): Options are "none", "mean" and "sum". Defaults to 'mean`. avg_factor (int, float, optional): Average factor that is used to average the loss. Defaults to None. """ eps = 1e-12 neg_weights = (1 - gaussian_target).pow(gamma) pos_pred_pix = pred[pos_inds] pos_pred = pos_pred_pix.gather(1, pos_labels.unsqueeze(1)) pos_loss = -(pos_pred + eps).log() * (1 - pos_pred).pow(alpha) pos_loss = weight_reduce_loss(pos_loss, None, reduction, avg_factor) neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights neg_loss = weight_reduce_loss(neg_loss, None, reduction, avg_factor) return pos_weight * pos_loss + neg_weight * neg_loss @MODELS.register_module() class GaussianFocalLoss(nn.Module): """GaussianFocalLoss is a variant of focal loss. More details can be found in the `paper <https://arxiv.org/abs/1808.01244>`_ Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501 Please notice that the target in GaussianFocalLoss is a gaussian heatmap, not 0/1 binary target. Args: alpha (float): Power of prediction. gamma (float): Power of target for negative samples. reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Loss weight of current loss. pos_weight(float): Positive sample loss weight. Defaults to 1.0. neg_weight(float): Negative sample loss weight. Defaults to 1.0. """ def __init__(self, alpha: float = 2.0, gamma: float = 4.0, reduction: str = 'mean', loss_weight: float = 1.0, pos_weight: float = 1.0, neg_weight: float = 1.0) -> None: super().__init__() self.alpha = alpha self.gamma = gamma self.reduction = reduction self.loss_weight = loss_weight self.pos_weight = pos_weight self.neg_weight = neg_weight def forward(self, pred: Tensor, target: Tensor, pos_inds: Optional[Tensor] = None, pos_labels: Optional[Tensor] = None, weight: Optional[Tensor] = None, avg_factor: Optional[Union[int, float]] = None, reduction_override: Optional[str] = None) -> Tensor: """Forward function. If you want to manually determine which positions are positive samples, you can set the pos_index and pos_label parameter. Currently, only the CenterNet update version uses the parameter. Args: pred (torch.Tensor): The prediction. The shape is (N, num_classes). target (torch.Tensor): The learning target of the prediction in gaussian distribution. The shape is (N, num_classes). pos_inds (torch.Tensor): The positive sample index. Defaults to None. pos_labels (torch.Tensor): The label corresponding to the positive sample index. Defaults to None. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, float, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if pos_inds is not None: assert pos_labels is not None # Only used by centernet update version loss_reg = self.loss_weight * gaussian_focal_loss_with_pos_inds( pred, target, pos_inds, pos_labels, alpha=self.alpha, gamma=self.gamma, pos_weight=self.pos_weight, neg_weight=self.neg_weight, reduction=reduction, avg_factor=avg_factor) else: loss_reg = self.loss_weight * gaussian_focal_loss( pred, target, weight, alpha=self.alpha, gamma=self.gamma, pos_weight=self.pos_weight, neg_weight=self.neg_weight, reduction=reduction, avg_factor=avg_factor) return loss_reg
7,689
40.122995
108
py
ERD
ERD-main/mmdet/models/losses/kd_loss.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional import torch.nn as nn import torch.nn.functional as F from torch import Tensor from mmdet.registry import MODELS from .utils import weighted_loss @weighted_loss def knowledge_distillation_kl_div_loss(pred: Tensor, soft_label: Tensor, T: int, detach_target: bool = True) -> Tensor: r"""Loss function for knowledge distilling using KL divergence. Args: pred (Tensor): Predicted logits with shape (N, n + 1). soft_label (Tensor): Target logits with shape (N, N + 1). T (int): Temperature for distillation. detach_target (bool): Remove soft_label from automatic differentiation Returns: Tensor: Loss tensor with shape (N,). """ assert pred.size() == soft_label.size() target = F.softmax(soft_label / T, dim=1) if detach_target: target = target.detach() kd_loss = F.kl_div( F.log_softmax(pred / T, dim=1), target, reduction='none').mean(1) * ( T * T) return kd_loss @MODELS.register_module() class KnowledgeDistillationKLDivLoss(nn.Module): """Loss function for knowledge distilling using KL divergence. Args: reduction (str): Options are `'none'`, `'mean'` and `'sum'`. loss_weight (float): Loss weight of current loss. T (int): Temperature for distillation. """ def __init__(self, reduction: str = 'mean', loss_weight: float = 1.0, T: int = 10) -> None: super().__init__() assert T >= 1 self.reduction = reduction self.loss_weight = loss_weight self.T = T def forward(self, pred: Tensor, soft_label: Tensor, weight: Optional[Tensor] = None, avg_factor: Optional[int] = None, reduction_override: Optional[str] = None) -> Tensor: """Forward function. Args: pred (Tensor): Predicted logits with shape (N, n + 1). soft_label (Tensor): Target logits with shape (N, N + 1). weight (Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. Returns: Tensor: Loss tensor. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_kd = self.loss_weight * knowledge_distillation_kl_div_loss( pred, soft_label, weight, reduction=reduction, avg_factor=avg_factor, T=self.T) return loss_kd
3,123
31.541667
78
py
ERD
ERD-main/mmdet/models/backbones/pvt.py
# Copyright (c) OpenMMLab. All rights reserved. import math import warnings from collections import OrderedDict import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import Conv2d, build_activation_layer, build_norm_layer from mmcv.cnn.bricks.drop import build_dropout from mmcv.cnn.bricks.transformer import MultiheadAttention from mmengine.logging import MMLogger from mmengine.model import (BaseModule, ModuleList, Sequential, constant_init, normal_init, trunc_normal_init) from mmengine.model.weight_init import trunc_normal_ from mmengine.runner.checkpoint import CheckpointLoader, load_state_dict from torch.nn.modules.utils import _pair as to_2tuple from mmdet.registry import MODELS from ..layers import PatchEmbed, nchw_to_nlc, nlc_to_nchw class MixFFN(BaseModule): """An implementation of MixFFN of PVT. The differences between MixFFN & FFN: 1. Use 1X1 Conv to replace Linear layer. 2. Introduce 3X3 Depth-wise Conv to encode positional information. Args: embed_dims (int): The feature dimension. Same as `MultiheadAttention`. feedforward_channels (int): The hidden dimension of FFNs. act_cfg (dict, optional): The activation config for FFNs. Default: dict(type='GELU'). ffn_drop (float, optional): Probability of an element to be zeroed in FFN. Default 0.0. dropout_layer (obj:`ConfigDict`): The dropout_layer used when adding the shortcut. Default: None. use_conv (bool): If True, add 3x3 DWConv between two Linear layers. Defaults: False. init_cfg (obj:`mmengine.ConfigDict`): The Config for initialization. Default: None. """ def __init__(self, embed_dims, feedforward_channels, act_cfg=dict(type='GELU'), ffn_drop=0., dropout_layer=None, use_conv=False, init_cfg=None): super(MixFFN, self).__init__(init_cfg=init_cfg) self.embed_dims = embed_dims self.feedforward_channels = feedforward_channels self.act_cfg = act_cfg activate = build_activation_layer(act_cfg) in_channels = embed_dims fc1 = Conv2d( in_channels=in_channels, out_channels=feedforward_channels, kernel_size=1, stride=1, bias=True) if use_conv: # 3x3 depth wise conv to provide positional encode information dw_conv = Conv2d( in_channels=feedforward_channels, out_channels=feedforward_channels, kernel_size=3, stride=1, padding=(3 - 1) // 2, bias=True, groups=feedforward_channels) fc2 = Conv2d( in_channels=feedforward_channels, out_channels=in_channels, kernel_size=1, stride=1, bias=True) drop = nn.Dropout(ffn_drop) layers = [fc1, activate, drop, fc2, drop] if use_conv: layers.insert(1, dw_conv) self.layers = Sequential(*layers) self.dropout_layer = build_dropout( dropout_layer) if dropout_layer else torch.nn.Identity() def forward(self, x, hw_shape, identity=None): out = nlc_to_nchw(x, hw_shape) out = self.layers(out) out = nchw_to_nlc(out) if identity is None: identity = x return identity + self.dropout_layer(out) class SpatialReductionAttention(MultiheadAttention): """An implementation of Spatial Reduction Attention of PVT. This module is modified from MultiheadAttention which is a module from mmcv.cnn.bricks.transformer. Args: embed_dims (int): The embedding dimension. num_heads (int): Parallel attention heads. attn_drop (float): A Dropout layer on attn_output_weights. Default: 0.0. proj_drop (float): A Dropout layer after `nn.MultiheadAttention`. Default: 0.0. dropout_layer (obj:`ConfigDict`): The dropout_layer used when adding the shortcut. Default: None. batch_first (bool): Key, Query and Value are shape of (batch, n, embed_dim) or (n, batch, embed_dim). Default: False. qkv_bias (bool): enable bias for qkv if True. Default: True. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='LN'). sr_ratio (int): The ratio of spatial reduction of Spatial Reduction Attention of PVT. Default: 1. init_cfg (obj:`mmengine.ConfigDict`): The Config for initialization. Default: None. """ def __init__(self, embed_dims, num_heads, attn_drop=0., proj_drop=0., dropout_layer=None, batch_first=True, qkv_bias=True, norm_cfg=dict(type='LN'), sr_ratio=1, init_cfg=None): super().__init__( embed_dims, num_heads, attn_drop, proj_drop, batch_first=batch_first, dropout_layer=dropout_layer, bias=qkv_bias, init_cfg=init_cfg) self.sr_ratio = sr_ratio if sr_ratio > 1: self.sr = Conv2d( in_channels=embed_dims, out_channels=embed_dims, kernel_size=sr_ratio, stride=sr_ratio) # The ret[0] of build_norm_layer is norm name. self.norm = build_norm_layer(norm_cfg, embed_dims)[1] # handle the BC-breaking from https://github.com/open-mmlab/mmcv/pull/1418 # noqa from mmdet import digit_version, mmcv_version if mmcv_version < digit_version('1.3.17'): warnings.warn('The legacy version of forward function in' 'SpatialReductionAttention is deprecated in' 'mmcv>=1.3.17 and will no longer support in the' 'future. Please upgrade your mmcv.') self.forward = self.legacy_forward def forward(self, x, hw_shape, identity=None): x_q = x if self.sr_ratio > 1: x_kv = nlc_to_nchw(x, hw_shape) x_kv = self.sr(x_kv) x_kv = nchw_to_nlc(x_kv) x_kv = self.norm(x_kv) else: x_kv = x if identity is None: identity = x_q # Because the dataflow('key', 'query', 'value') of # ``torch.nn.MultiheadAttention`` is (num_queries, batch, # embed_dims), We should adjust the shape of dataflow from # batch_first (batch, num_queries, embed_dims) to num_queries_first # (num_queries ,batch, embed_dims), and recover ``attn_output`` # from num_queries_first to batch_first. if self.batch_first: x_q = x_q.transpose(0, 1) x_kv = x_kv.transpose(0, 1) out = self.attn(query=x_q, key=x_kv, value=x_kv)[0] if self.batch_first: out = out.transpose(0, 1) return identity + self.dropout_layer(self.proj_drop(out)) def legacy_forward(self, x, hw_shape, identity=None): """multi head attention forward in mmcv version < 1.3.17.""" x_q = x if self.sr_ratio > 1: x_kv = nlc_to_nchw(x, hw_shape) x_kv = self.sr(x_kv) x_kv = nchw_to_nlc(x_kv) x_kv = self.norm(x_kv) else: x_kv = x if identity is None: identity = x_q out = self.attn(query=x_q, key=x_kv, value=x_kv)[0] return identity + self.dropout_layer(self.proj_drop(out)) class PVTEncoderLayer(BaseModule): """Implements one encoder layer in PVT. Args: embed_dims (int): The feature dimension. num_heads (int): Parallel attention heads. feedforward_channels (int): The hidden dimension for FFNs. drop_rate (float): Probability of an element to be zeroed. after the feed forward layer. Default: 0.0. attn_drop_rate (float): The drop out rate for attention layer. Default: 0.0. drop_path_rate (float): stochastic depth rate. Default: 0.0. qkv_bias (bool): enable bias for qkv if True. Default: True. act_cfg (dict): The activation config for FFNs. Default: dict(type='GELU'). norm_cfg (dict): Config dict for normalization layer. Default: dict(type='LN'). sr_ratio (int): The ratio of spatial reduction of Spatial Reduction Attention of PVT. Default: 1. use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN. Default: False. init_cfg (dict, optional): Initialization config dict. Default: None. """ def __init__(self, embed_dims, num_heads, feedforward_channels, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., qkv_bias=True, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN'), sr_ratio=1, use_conv_ffn=False, init_cfg=None): super(PVTEncoderLayer, self).__init__(init_cfg=init_cfg) # The ret[0] of build_norm_layer is norm name. self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] self.attn = SpatialReductionAttention( embed_dims=embed_dims, num_heads=num_heads, attn_drop=attn_drop_rate, proj_drop=drop_rate, dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), qkv_bias=qkv_bias, norm_cfg=norm_cfg, sr_ratio=sr_ratio) # The ret[0] of build_norm_layer is norm name. self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] self.ffn = MixFFN( embed_dims=embed_dims, feedforward_channels=feedforward_channels, ffn_drop=drop_rate, dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), use_conv=use_conv_ffn, act_cfg=act_cfg) def forward(self, x, hw_shape): x = self.attn(self.norm1(x), hw_shape, identity=x) x = self.ffn(self.norm2(x), hw_shape, identity=x) return x class AbsolutePositionEmbedding(BaseModule): """An implementation of the absolute position embedding in PVT. Args: pos_shape (int): The shape of the absolute position embedding. pos_dim (int): The dimension of the absolute position embedding. drop_rate (float): Probability of an element to be zeroed. Default: 0.0. """ def __init__(self, pos_shape, pos_dim, drop_rate=0., init_cfg=None): super().__init__(init_cfg=init_cfg) if isinstance(pos_shape, int): pos_shape = to_2tuple(pos_shape) elif isinstance(pos_shape, tuple): if len(pos_shape) == 1: pos_shape = to_2tuple(pos_shape[0]) assert len(pos_shape) == 2, \ f'The size of image should have length 1 or 2, ' \ f'but got {len(pos_shape)}' self.pos_shape = pos_shape self.pos_dim = pos_dim self.pos_embed = nn.Parameter( torch.zeros(1, pos_shape[0] * pos_shape[1], pos_dim)) self.drop = nn.Dropout(p=drop_rate) def init_weights(self): trunc_normal_(self.pos_embed, std=0.02) def resize_pos_embed(self, pos_embed, input_shape, mode='bilinear'): """Resize pos_embed weights. Resize pos_embed using bilinear interpolate method. Args: pos_embed (torch.Tensor): Position embedding weights. input_shape (tuple): Tuple for (downsampled input image height, downsampled input image width). mode (str): Algorithm used for upsampling: ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` | ``'trilinear'``. Default: ``'bilinear'``. Return: torch.Tensor: The resized pos_embed of shape [B, L_new, C]. """ assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]' pos_h, pos_w = self.pos_shape pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):] pos_embed_weight = pos_embed_weight.reshape( 1, pos_h, pos_w, self.pos_dim).permute(0, 3, 1, 2).contiguous() pos_embed_weight = F.interpolate( pos_embed_weight, size=input_shape, mode=mode) pos_embed_weight = torch.flatten(pos_embed_weight, 2).transpose(1, 2).contiguous() pos_embed = pos_embed_weight return pos_embed def forward(self, x, hw_shape, mode='bilinear'): pos_embed = self.resize_pos_embed(self.pos_embed, hw_shape, mode) return self.drop(x + pos_embed) @MODELS.register_module() class PyramidVisionTransformer(BaseModule): """Pyramid Vision Transformer (PVT) Implementation of `Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions <https://arxiv.org/pdf/2102.12122.pdf>`_. Args: pretrain_img_size (int | tuple[int]): The size of input image when pretrain. Defaults: 224. in_channels (int): Number of input channels. Default: 3. embed_dims (int): Embedding dimension. Default: 64. num_stags (int): The num of stages. Default: 4. num_layers (Sequence[int]): The layer number of each transformer encode layer. Default: [3, 4, 6, 3]. num_heads (Sequence[int]): The attention heads of each transformer encode layer. Default: [1, 2, 5, 8]. patch_sizes (Sequence[int]): The patch_size of each patch embedding. Default: [4, 2, 2, 2]. strides (Sequence[int]): The stride of each patch embedding. Default: [4, 2, 2, 2]. paddings (Sequence[int]): The padding of each patch embedding. Default: [0, 0, 0, 0]. sr_ratios (Sequence[int]): The spatial reduction rate of each transformer encode layer. Default: [8, 4, 2, 1]. out_indices (Sequence[int] | int): Output from which stages. Default: (0, 1, 2, 3). mlp_ratios (Sequence[int]): The ratio of the mlp hidden dim to the embedding dim of each transformer encode layer. Default: [8, 8, 4, 4]. qkv_bias (bool): Enable bias for qkv if True. Default: True. drop_rate (float): Probability of an element to be zeroed. Default 0.0. attn_drop_rate (float): The drop out rate for attention layer. Default 0.0. drop_path_rate (float): stochastic depth rate. Default 0.1. use_abs_pos_embed (bool): If True, add absolute position embedding to the patch embedding. Defaults: True. use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN. Default: False. act_cfg (dict): The activation config for FFNs. Default: dict(type='GELU'). norm_cfg (dict): Config dict for normalization layer. Default: dict(type='LN'). pretrained (str, optional): model pretrained path. Default: None. convert_weights (bool): The flag indicates whether the pre-trained model is from the original repo. We may need to convert some keys to make it compatible. Default: True. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None. """ def __init__(self, pretrain_img_size=224, in_channels=3, embed_dims=64, num_stages=4, num_layers=[3, 4, 6, 3], num_heads=[1, 2, 5, 8], patch_sizes=[4, 2, 2, 2], strides=[4, 2, 2, 2], paddings=[0, 0, 0, 0], sr_ratios=[8, 4, 2, 1], out_indices=(0, 1, 2, 3), mlp_ratios=[8, 8, 4, 4], qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, use_abs_pos_embed=True, norm_after_stage=False, use_conv_ffn=False, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN', eps=1e-6), pretrained=None, convert_weights=True, init_cfg=None): super().__init__(init_cfg=init_cfg) self.convert_weights = convert_weights if isinstance(pretrain_img_size, int): pretrain_img_size = to_2tuple(pretrain_img_size) elif isinstance(pretrain_img_size, tuple): if len(pretrain_img_size) == 1: pretrain_img_size = to_2tuple(pretrain_img_size[0]) assert len(pretrain_img_size) == 2, \ f'The size of image should have length 1 or 2, ' \ f'but got {len(pretrain_img_size)}' assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be setting at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: self.init_cfg = init_cfg else: raise TypeError('pretrained must be a str or None') self.embed_dims = embed_dims self.num_stages = num_stages self.num_layers = num_layers self.num_heads = num_heads self.patch_sizes = patch_sizes self.strides = strides self.sr_ratios = sr_ratios assert num_stages == len(num_layers) == len(num_heads) \ == len(patch_sizes) == len(strides) == len(sr_ratios) self.out_indices = out_indices assert max(out_indices) < self.num_stages self.pretrained = pretrained # transformer encoder dpr = [ x.item() for x in torch.linspace(0, drop_path_rate, sum(num_layers)) ] # stochastic num_layer decay rule cur = 0 self.layers = ModuleList() for i, num_layer in enumerate(num_layers): embed_dims_i = embed_dims * num_heads[i] patch_embed = PatchEmbed( in_channels=in_channels, embed_dims=embed_dims_i, kernel_size=patch_sizes[i], stride=strides[i], padding=paddings[i], bias=True, norm_cfg=norm_cfg) layers = ModuleList() if use_abs_pos_embed: pos_shape = pretrain_img_size // np.prod(patch_sizes[:i + 1]) pos_embed = AbsolutePositionEmbedding( pos_shape=pos_shape, pos_dim=embed_dims_i, drop_rate=drop_rate) layers.append(pos_embed) layers.extend([ PVTEncoderLayer( embed_dims=embed_dims_i, num_heads=num_heads[i], feedforward_channels=mlp_ratios[i] * embed_dims_i, drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_rate=dpr[cur + idx], qkv_bias=qkv_bias, act_cfg=act_cfg, norm_cfg=norm_cfg, sr_ratio=sr_ratios[i], use_conv_ffn=use_conv_ffn) for idx in range(num_layer) ]) in_channels = embed_dims_i # The ret[0] of build_norm_layer is norm name. if norm_after_stage: norm = build_norm_layer(norm_cfg, embed_dims_i)[1] else: norm = nn.Identity() self.layers.append(ModuleList([patch_embed, layers, norm])) cur += num_layer def init_weights(self): logger = MMLogger.get_current_instance() if self.init_cfg is None: logger.warn(f'No pre-trained weights for ' f'{self.__class__.__name__}, ' f'training start from scratch') for m in self.modules(): if isinstance(m, nn.Linear): trunc_normal_init(m, std=.02, bias=0.) elif isinstance(m, nn.LayerNorm): constant_init(m, 1.0) elif isinstance(m, nn.Conv2d): fan_out = m.kernel_size[0] * m.kernel_size[ 1] * m.out_channels fan_out //= m.groups normal_init(m, 0, math.sqrt(2.0 / fan_out)) elif isinstance(m, AbsolutePositionEmbedding): m.init_weights() else: assert 'checkpoint' in self.init_cfg, f'Only support ' \ f'specify `Pretrained` in ' \ f'`init_cfg` in ' \ f'{self.__class__.__name__} ' checkpoint = CheckpointLoader.load_checkpoint( self.init_cfg.checkpoint, logger=logger, map_location='cpu') logger.warn(f'Load pre-trained model for ' f'{self.__class__.__name__} from original repo') if 'state_dict' in checkpoint: state_dict = checkpoint['state_dict'] elif 'model' in checkpoint: state_dict = checkpoint['model'] else: state_dict = checkpoint if self.convert_weights: # Because pvt backbones are not supported by mmcls, # so we need to convert pre-trained weights to match this # implementation. state_dict = pvt_convert(state_dict) load_state_dict(self, state_dict, strict=False, logger=logger) def forward(self, x): outs = [] for i, layer in enumerate(self.layers): x, hw_shape = layer[0](x) for block in layer[1]: x = block(x, hw_shape) x = layer[2](x) x = nlc_to_nchw(x, hw_shape) if i in self.out_indices: outs.append(x) return outs @MODELS.register_module() class PyramidVisionTransformerV2(PyramidVisionTransformer): """Implementation of `PVTv2: Improved Baselines with Pyramid Vision Transformer <https://arxiv.org/pdf/2106.13797.pdf>`_.""" def __init__(self, **kwargs): super(PyramidVisionTransformerV2, self).__init__( patch_sizes=[7, 3, 3, 3], paddings=[3, 1, 1, 1], use_abs_pos_embed=False, norm_after_stage=True, use_conv_ffn=True, **kwargs) def pvt_convert(ckpt): new_ckpt = OrderedDict() # Process the concat between q linear weights and kv linear weights use_abs_pos_embed = False use_conv_ffn = False for k in ckpt.keys(): if k.startswith('pos_embed'): use_abs_pos_embed = True if k.find('dwconv') >= 0: use_conv_ffn = True for k, v in ckpt.items(): if k.startswith('head'): continue if k.startswith('norm.'): continue if k.startswith('cls_token'): continue if k.startswith('pos_embed'): stage_i = int(k.replace('pos_embed', '')) new_k = k.replace(f'pos_embed{stage_i}', f'layers.{stage_i - 1}.1.0.pos_embed') if stage_i == 4 and v.size(1) == 50: # 1 (cls token) + 7 * 7 new_v = v[:, 1:, :] # remove cls token else: new_v = v elif k.startswith('patch_embed'): stage_i = int(k.split('.')[0].replace('patch_embed', '')) new_k = k.replace(f'patch_embed{stage_i}', f'layers.{stage_i - 1}.0') new_v = v if 'proj.' in new_k: new_k = new_k.replace('proj.', 'projection.') elif k.startswith('block'): stage_i = int(k.split('.')[0].replace('block', '')) layer_i = int(k.split('.')[1]) new_layer_i = layer_i + use_abs_pos_embed new_k = k.replace(f'block{stage_i}.{layer_i}', f'layers.{stage_i - 1}.1.{new_layer_i}') new_v = v if 'attn.q.' in new_k: sub_item_k = k.replace('q.', 'kv.') new_k = new_k.replace('q.', 'attn.in_proj_') new_v = torch.cat([v, ckpt[sub_item_k]], dim=0) elif 'attn.kv.' in new_k: continue elif 'attn.proj.' in new_k: new_k = new_k.replace('proj.', 'attn.out_proj.') elif 'attn.sr.' in new_k: new_k = new_k.replace('sr.', 'sr.') elif 'mlp.' in new_k: string = f'{new_k}-' new_k = new_k.replace('mlp.', 'ffn.layers.') if 'fc1.weight' in new_k or 'fc2.weight' in new_k: new_v = v.reshape((*v.shape, 1, 1)) new_k = new_k.replace('fc1.', '0.') new_k = new_k.replace('dwconv.dwconv.', '1.') if use_conv_ffn: new_k = new_k.replace('fc2.', '4.') else: new_k = new_k.replace('fc2.', '3.') string += f'{new_k} {v.shape}-{new_v.shape}' elif k.startswith('norm'): stage_i = int(k[4]) new_k = k.replace(f'norm{stage_i}', f'layers.{stage_i - 1}.2') new_v = v else: new_k = k new_v = v new_ckpt[new_k] = new_v return new_ckpt
26,272
38.448949
89
py
ERD
ERD-main/mmdet/models/backbones/hrnet.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch.nn as nn from mmcv.cnn import build_conv_layer, build_norm_layer from mmengine.model import BaseModule, ModuleList, Sequential from torch.nn.modules.batchnorm import _BatchNorm from mmdet.registry import MODELS from .resnet import BasicBlock, Bottleneck class HRModule(BaseModule): """High-Resolution Module for HRNet. In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange is in this module. """ def __init__(self, num_branches, blocks, num_blocks, in_channels, num_channels, multiscale_output=True, with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), block_init_cfg=None, init_cfg=None): super(HRModule, self).__init__(init_cfg) self.block_init_cfg = block_init_cfg self._check_branches(num_branches, num_blocks, in_channels, num_channels) self.in_channels = in_channels self.num_branches = num_branches self.multiscale_output = multiscale_output self.norm_cfg = norm_cfg self.conv_cfg = conv_cfg self.with_cp = with_cp self.branches = self._make_branches(num_branches, blocks, num_blocks, num_channels) self.fuse_layers = self._make_fuse_layers() self.relu = nn.ReLU(inplace=False) def _check_branches(self, num_branches, num_blocks, in_channels, num_channels): if num_branches != len(num_blocks): error_msg = f'NUM_BRANCHES({num_branches}) ' \ f'!= NUM_BLOCKS({len(num_blocks)})' raise ValueError(error_msg) if num_branches != len(num_channels): error_msg = f'NUM_BRANCHES({num_branches}) ' \ f'!= NUM_CHANNELS({len(num_channels)})' raise ValueError(error_msg) if num_branches != len(in_channels): error_msg = f'NUM_BRANCHES({num_branches}) ' \ f'!= NUM_INCHANNELS({len(in_channels)})' raise ValueError(error_msg) def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1): downsample = None if stride != 1 or \ self.in_channels[branch_index] != \ num_channels[branch_index] * block.expansion: downsample = nn.Sequential( build_conv_layer( self.conv_cfg, self.in_channels[branch_index], num_channels[branch_index] * block.expansion, kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, num_channels[branch_index] * block.expansion)[1]) layers = [] layers.append( block( self.in_channels[branch_index], num_channels[branch_index], stride, downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=self.block_init_cfg)) self.in_channels[branch_index] = \ num_channels[branch_index] * block.expansion for i in range(1, num_blocks[branch_index]): layers.append( block( self.in_channels[branch_index], num_channels[branch_index], with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=self.block_init_cfg)) return Sequential(*layers) def _make_branches(self, num_branches, block, num_blocks, num_channels): branches = [] for i in range(num_branches): branches.append( self._make_one_branch(i, block, num_blocks, num_channels)) return ModuleList(branches) def _make_fuse_layers(self): if self.num_branches == 1: return None num_branches = self.num_branches in_channels = self.in_channels fuse_layers = [] num_out_branches = num_branches if self.multiscale_output else 1 for i in range(num_out_branches): fuse_layer = [] for j in range(num_branches): if j > i: fuse_layer.append( nn.Sequential( build_conv_layer( self.conv_cfg, in_channels[j], in_channels[i], kernel_size=1, stride=1, padding=0, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1], nn.Upsample( scale_factor=2**(j - i), mode='nearest'))) elif j == i: fuse_layer.append(None) else: conv_downsamples = [] for k in range(i - j): if k == i - j - 1: conv_downsamples.append( nn.Sequential( build_conv_layer( self.conv_cfg, in_channels[j], in_channels[i], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1])) else: conv_downsamples.append( nn.Sequential( build_conv_layer( self.conv_cfg, in_channels[j], in_channels[j], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[j])[1], nn.ReLU(inplace=False))) fuse_layer.append(nn.Sequential(*conv_downsamples)) fuse_layers.append(nn.ModuleList(fuse_layer)) return nn.ModuleList(fuse_layers) def forward(self, x): """Forward function.""" if self.num_branches == 1: return [self.branches[0](x[0])] for i in range(self.num_branches): x[i] = self.branches[i](x[i]) x_fuse = [] for i in range(len(self.fuse_layers)): y = 0 for j in range(self.num_branches): if i == j: y += x[j] else: y += self.fuse_layers[i][j](x[j]) x_fuse.append(self.relu(y)) return x_fuse @MODELS.register_module() class HRNet(BaseModule): """HRNet backbone. `High-Resolution Representations for Labeling Pixels and Regions arXiv: <https://arxiv.org/abs/1904.04514>`_. Args: extra (dict): Detailed configuration for each stage of HRNet. There must be 4 stages, the configuration for each stage must have 5 keys: - num_modules(int): The number of HRModule in this stage. - num_branches(int): The number of branches in the HRModule. - block(str): The type of convolution block. - num_blocks(tuple): The number of blocks in each branch. The length must be equal to num_branches. - num_channels(tuple): The number of channels in each branch. The length must be equal to num_branches. in_channels (int): Number of input image channels. Default: 3. conv_cfg (dict): Dictionary to construct and config conv layer. norm_cfg (dict): Dictionary to construct and config norm layer. norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. Default: True. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. zero_init_residual (bool): Whether to use zero init for last norm layer in resblocks to let them behave as identity. Default: False. multiscale_output (bool): Whether to output multi-level features produced by multiple branches. If False, only the first level feature will be output. Default: True. pretrained (str, optional): Model pretrained path. Default: None. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None. Example: >>> from mmdet.models import HRNet >>> import torch >>> extra = dict( >>> stage1=dict( >>> num_modules=1, >>> num_branches=1, >>> block='BOTTLENECK', >>> num_blocks=(4, ), >>> num_channels=(64, )), >>> stage2=dict( >>> num_modules=1, >>> num_branches=2, >>> block='BASIC', >>> num_blocks=(4, 4), >>> num_channels=(32, 64)), >>> stage3=dict( >>> num_modules=4, >>> num_branches=3, >>> block='BASIC', >>> num_blocks=(4, 4, 4), >>> num_channels=(32, 64, 128)), >>> stage4=dict( >>> num_modules=3, >>> num_branches=4, >>> block='BASIC', >>> num_blocks=(4, 4, 4, 4), >>> num_channels=(32, 64, 128, 256))) >>> self = HRNet(extra, in_channels=1) >>> self.eval() >>> inputs = torch.rand(1, 1, 32, 32) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) (1, 32, 8, 8) (1, 64, 4, 4) (1, 128, 2, 2) (1, 256, 1, 1) """ blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} def __init__(self, extra, in_channels=3, conv_cfg=None, norm_cfg=dict(type='BN'), norm_eval=True, with_cp=False, zero_init_residual=False, multiscale_output=True, pretrained=None, init_cfg=None): super(HRNet, self).__init__(init_cfg) self.pretrained = pretrained assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: if init_cfg is None: self.init_cfg = [ dict(type='Kaiming', layer='Conv2d'), dict( type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) ] else: raise TypeError('pretrained must be a str or None') # Assert configurations of 4 stages are in extra assert 'stage1' in extra and 'stage2' in extra \ and 'stage3' in extra and 'stage4' in extra # Assert whether the length of `num_blocks` and `num_channels` are # equal to `num_branches` for i in range(4): cfg = extra[f'stage{i + 1}'] assert len(cfg['num_blocks']) == cfg['num_branches'] and \ len(cfg['num_channels']) == cfg['num_branches'] self.extra = extra self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.norm_eval = norm_eval self.with_cp = with_cp self.zero_init_residual = zero_init_residual # stem net self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2) self.conv1 = build_conv_layer( self.conv_cfg, in_channels, 64, kernel_size=3, stride=2, padding=1, bias=False) self.add_module(self.norm1_name, norm1) self.conv2 = build_conv_layer( self.conv_cfg, 64, 64, kernel_size=3, stride=2, padding=1, bias=False) self.add_module(self.norm2_name, norm2) self.relu = nn.ReLU(inplace=True) # stage 1 self.stage1_cfg = self.extra['stage1'] num_channels = self.stage1_cfg['num_channels'][0] block_type = self.stage1_cfg['block'] num_blocks = self.stage1_cfg['num_blocks'][0] block = self.blocks_dict[block_type] stage1_out_channels = num_channels * block.expansion self.layer1 = self._make_layer(block, 64, num_channels, num_blocks) # stage 2 self.stage2_cfg = self.extra['stage2'] num_channels = self.stage2_cfg['num_channels'] block_type = self.stage2_cfg['block'] block = self.blocks_dict[block_type] num_channels = [channel * block.expansion for channel in num_channels] self.transition1 = self._make_transition_layer([stage1_out_channels], num_channels) self.stage2, pre_stage_channels = self._make_stage( self.stage2_cfg, num_channels) # stage 3 self.stage3_cfg = self.extra['stage3'] num_channels = self.stage3_cfg['num_channels'] block_type = self.stage3_cfg['block'] block = self.blocks_dict[block_type] num_channels = [channel * block.expansion for channel in num_channels] self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels) self.stage3, pre_stage_channels = self._make_stage( self.stage3_cfg, num_channels) # stage 4 self.stage4_cfg = self.extra['stage4'] num_channels = self.stage4_cfg['num_channels'] block_type = self.stage4_cfg['block'] block = self.blocks_dict[block_type] num_channels = [channel * block.expansion for channel in num_channels] self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels) self.stage4, pre_stage_channels = self._make_stage( self.stage4_cfg, num_channels, multiscale_output=multiscale_output) @property def norm1(self): """nn.Module: the normalization layer named "norm1" """ return getattr(self, self.norm1_name) @property def norm2(self): """nn.Module: the normalization layer named "norm2" """ return getattr(self, self.norm2_name) def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer): num_branches_cur = len(num_channels_cur_layer) num_branches_pre = len(num_channels_pre_layer) transition_layers = [] for i in range(num_branches_cur): if i < num_branches_pre: if num_channels_cur_layer[i] != num_channels_pre_layer[i]: transition_layers.append( nn.Sequential( build_conv_layer( self.conv_cfg, num_channels_pre_layer[i], num_channels_cur_layer[i], kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, num_channels_cur_layer[i])[1], nn.ReLU(inplace=True))) else: transition_layers.append(None) else: conv_downsamples = [] for j in range(i + 1 - num_branches_pre): in_channels = num_channels_pre_layer[-1] out_channels = num_channels_cur_layer[i] \ if j == i - num_branches_pre else in_channels conv_downsamples.append( nn.Sequential( build_conv_layer( self.conv_cfg, in_channels, out_channels, kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, out_channels)[1], nn.ReLU(inplace=True))) transition_layers.append(nn.Sequential(*conv_downsamples)) return nn.ModuleList(transition_layers) def _make_layer(self, block, inplanes, planes, blocks, stride=1): downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = nn.Sequential( build_conv_layer( self.conv_cfg, inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, planes * block.expansion)[1]) layers = [] block_init_cfg = None if self.pretrained is None and not hasattr( self, 'init_cfg') and self.zero_init_residual: if block is BasicBlock: block_init_cfg = dict( type='Constant', val=0, override=dict(name='norm2')) elif block is Bottleneck: block_init_cfg = dict( type='Constant', val=0, override=dict(name='norm3')) layers.append( block( inplanes, planes, stride, downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=block_init_cfg, )) inplanes = planes * block.expansion for i in range(1, blocks): layers.append( block( inplanes, planes, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=block_init_cfg)) return Sequential(*layers) def _make_stage(self, layer_config, in_channels, multiscale_output=True): num_modules = layer_config['num_modules'] num_branches = layer_config['num_branches'] num_blocks = layer_config['num_blocks'] num_channels = layer_config['num_channels'] block = self.blocks_dict[layer_config['block']] hr_modules = [] block_init_cfg = None if self.pretrained is None and not hasattr( self, 'init_cfg') and self.zero_init_residual: if block is BasicBlock: block_init_cfg = dict( type='Constant', val=0, override=dict(name='norm2')) elif block is Bottleneck: block_init_cfg = dict( type='Constant', val=0, override=dict(name='norm3')) for i in range(num_modules): # multi_scale_output is only used for the last module if not multiscale_output and i == num_modules - 1: reset_multiscale_output = False else: reset_multiscale_output = True hr_modules.append( HRModule( num_branches, block, num_blocks, in_channels, num_channels, reset_multiscale_output, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, block_init_cfg=block_init_cfg)) return Sequential(*hr_modules), in_channels def forward(self, x): """Forward function.""" x = self.conv1(x) x = self.norm1(x) x = self.relu(x) x = self.conv2(x) x = self.norm2(x) x = self.relu(x) x = self.layer1(x) x_list = [] for i in range(self.stage2_cfg['num_branches']): if self.transition1[i] is not None: x_list.append(self.transition1[i](x)) else: x_list.append(x) y_list = self.stage2(x_list) x_list = [] for i in range(self.stage3_cfg['num_branches']): if self.transition2[i] is not None: x_list.append(self.transition2[i](y_list[-1])) else: x_list.append(y_list[i]) y_list = self.stage3(x_list) x_list = [] for i in range(self.stage4_cfg['num_branches']): if self.transition3[i] is not None: x_list.append(self.transition3[i](y_list[-1])) else: x_list.append(y_list[i]) y_list = self.stage4(x_list) return y_list def train(self, mode=True): """Convert the model into training mode will keeping the normalization layer freezed.""" super(HRNet, self).train(mode) if mode and self.norm_eval: for m in self.modules(): # trick: eval have effect on BatchNorm only if isinstance(m, _BatchNorm): m.eval()
23,108
38.167797
79
py
ERD
ERD-main/mmdet/models/backbones/regnet.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import numpy as np import torch.nn as nn from mmcv.cnn import build_conv_layer, build_norm_layer from mmdet.registry import MODELS from .resnet import ResNet from .resnext import Bottleneck @MODELS.register_module() class RegNet(ResNet): """RegNet backbone. More details can be found in `paper <https://arxiv.org/abs/2003.13678>`_ . Args: arch (dict): The parameter of RegNets. - w0 (int): initial width - wa (float): slope of width - wm (float): quantization parameter to quantize the width - depth (int): depth of the backbone - group_w (int): width of group - bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck. strides (Sequence[int]): Strides of the first block of each stage. base_channels (int): Base channels after stem layer. in_channels (int): Number of input image channels. Default: 3. dilations (Sequence[int]): Dilation of each stage. out_indices (Sequence[int]): Output from which stages. style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is the first 1x1 conv layer. frozen_stages (int): Stages to be frozen (all param fixed). -1 means not freezing any parameters. norm_cfg (dict): dictionary to construct and config norm layer. norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. zero_init_residual (bool): whether to use zero init for last norm layer in resblocks to let them behave as identity. pretrained (str, optional): model pretrained path. Default: None init_cfg (dict or list[dict], optional): Initialization config dict. Default: None Example: >>> from mmdet.models import RegNet >>> import torch >>> self = RegNet( arch=dict( w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0)) >>> self.eval() >>> inputs = torch.rand(1, 3, 32, 32) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) (1, 96, 8, 8) (1, 192, 4, 4) (1, 432, 2, 2) (1, 1008, 1, 1) """ arch_settings = { 'regnetx_400mf': dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), 'regnetx_800mf': dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0), 'regnetx_1.6gf': dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0), 'regnetx_3.2gf': dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0), 'regnetx_4.0gf': dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0), 'regnetx_6.4gf': dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0), 'regnetx_8.0gf': dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0), 'regnetx_12gf': dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0), } def __init__(self, arch, in_channels=3, stem_channels=32, base_channels=32, strides=(2, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', deep_stem=False, avg_down=False, frozen_stages=-1, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, dcn=None, stage_with_dcn=(False, False, False, False), plugins=None, with_cp=False, zero_init_residual=True, pretrained=None, init_cfg=None): super(ResNet, self).__init__(init_cfg) # Generate RegNet parameters first if isinstance(arch, str): assert arch in self.arch_settings, \ f'"arch": "{arch}" is not one of the' \ ' arch_settings' arch = self.arch_settings[arch] elif not isinstance(arch, dict): raise ValueError('Expect "arch" to be either a string ' f'or a dict, got {type(arch)}') widths, num_stages = self.generate_regnet( arch['w0'], arch['wa'], arch['wm'], arch['depth'], ) # Convert to per stage format stage_widths, stage_blocks = self.get_stages_from_blocks(widths) # Generate group widths and bot muls group_widths = [arch['group_w'] for _ in range(num_stages)] self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)] # Adjust the compatibility of stage_widths and group_widths stage_widths, group_widths = self.adjust_width_group( stage_widths, self.bottleneck_ratio, group_widths) # Group params by stage self.stage_widths = stage_widths self.group_widths = group_widths self.depth = sum(stage_blocks) self.stem_channels = stem_channels self.base_channels = base_channels self.num_stages = num_stages assert num_stages >= 1 and num_stages <= 4 self.strides = strides self.dilations = dilations assert len(strides) == len(dilations) == num_stages self.out_indices = out_indices assert max(out_indices) < num_stages self.style = style self.deep_stem = deep_stem self.avg_down = avg_down self.frozen_stages = frozen_stages self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.with_cp = with_cp self.norm_eval = norm_eval self.dcn = dcn self.stage_with_dcn = stage_with_dcn if dcn is not None: assert len(stage_with_dcn) == num_stages self.plugins = plugins self.zero_init_residual = zero_init_residual self.block = Bottleneck expansion_bak = self.block.expansion self.block.expansion = 1 self.stage_blocks = stage_blocks[:num_stages] self._make_stem_layer(in_channels, stem_channels) block_init_cfg = None assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: if init_cfg is None: self.init_cfg = [ dict(type='Kaiming', layer='Conv2d'), dict( type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) ] if self.zero_init_residual: block_init_cfg = dict( type='Constant', val=0, override=dict(name='norm3')) else: raise TypeError('pretrained must be a str or None') self.inplanes = stem_channels self.res_layers = [] for i, num_blocks in enumerate(self.stage_blocks): stride = self.strides[i] dilation = self.dilations[i] group_width = self.group_widths[i] width = int(round(self.stage_widths[i] * self.bottleneck_ratio[i])) stage_groups = width // group_width dcn = self.dcn if self.stage_with_dcn[i] else None if self.plugins is not None: stage_plugins = self.make_stage_plugins(self.plugins, i) else: stage_plugins = None res_layer = self.make_res_layer( block=self.block, inplanes=self.inplanes, planes=self.stage_widths[i], num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, plugins=stage_plugins, groups=stage_groups, base_width=group_width, base_channels=self.stage_widths[i], init_cfg=block_init_cfg) self.inplanes = self.stage_widths[i] layer_name = f'layer{i + 1}' self.add_module(layer_name, res_layer) self.res_layers.append(layer_name) self._freeze_stages() self.feat_dim = stage_widths[-1] self.block.expansion = expansion_bak def _make_stem_layer(self, in_channels, base_channels): self.conv1 = build_conv_layer( self.conv_cfg, in_channels, base_channels, kernel_size=3, stride=2, padding=1, bias=False) self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, base_channels, postfix=1) self.add_module(self.norm1_name, norm1) self.relu = nn.ReLU(inplace=True) def generate_regnet(self, initial_width, width_slope, width_parameter, depth, divisor=8): """Generates per block width from RegNet parameters. Args: initial_width ([int]): Initial width of the backbone width_slope ([float]): Slope of the quantized linear function width_parameter ([int]): Parameter used to quantize the width. depth ([int]): Depth of the backbone. divisor (int, optional): The divisor of channels. Defaults to 8. Returns: list, int: return a list of widths of each stage and the number \ of stages """ assert width_slope >= 0 assert initial_width > 0 assert width_parameter > 1 assert initial_width % divisor == 0 widths_cont = np.arange(depth) * width_slope + initial_width ks = np.round( np.log(widths_cont / initial_width) / np.log(width_parameter)) widths = initial_width * np.power(width_parameter, ks) widths = np.round(np.divide(widths, divisor)) * divisor num_stages = len(np.unique(widths)) widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist() return widths, num_stages @staticmethod def quantize_float(number, divisor): """Converts a float to closest non-zero int divisible by divisor. Args: number (int): Original number to be quantized. divisor (int): Divisor used to quantize the number. Returns: int: quantized number that is divisible by devisor. """ return int(round(number / divisor) * divisor) def adjust_width_group(self, widths, bottleneck_ratio, groups): """Adjusts the compatibility of widths and groups. Args: widths (list[int]): Width of each stage. bottleneck_ratio (float): Bottleneck ratio. groups (int): number of groups in each stage Returns: tuple(list): The adjusted widths and groups of each stage. """ bottleneck_width = [ int(w * b) for w, b in zip(widths, bottleneck_ratio) ] groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_width)] bottleneck_width = [ self.quantize_float(w_bot, g) for w_bot, g in zip(bottleneck_width, groups) ] widths = [ int(w_bot / b) for w_bot, b in zip(bottleneck_width, bottleneck_ratio) ] return widths, groups def get_stages_from_blocks(self, widths): """Gets widths/stage_blocks of network at each stage. Args: widths (list[int]): Width in each stage. Returns: tuple(list): width and depth of each stage """ width_diff = [ width != width_prev for width, width_prev in zip(widths + [0], [0] + widths) ] stage_widths = [ width for width, diff in zip(widths, width_diff[:-1]) if diff ] stage_blocks = np.diff([ depth for depth, diff in zip(range(len(width_diff)), width_diff) if diff ]).tolist() return stage_widths, stage_blocks def forward(self, x): """Forward function.""" x = self.conv1(x) x = self.norm1(x) x = self.relu(x) outs = [] for i, layer_name in enumerate(self.res_layers): res_layer = getattr(self, layer_name) x = res_layer(x) if i in self.out_indices: outs.append(x) return tuple(outs)
13,604
37.109244
79
py
ERD
ERD-main/mmdet/models/backbones/mobilenet_v2.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch.nn as nn from mmcv.cnn import ConvModule from mmengine.model import BaseModule from torch.nn.modules.batchnorm import _BatchNorm from mmdet.registry import MODELS from ..layers import InvertedResidual from ..utils import make_divisible @MODELS.register_module() class MobileNetV2(BaseModule): """MobileNetV2 backbone. Args: widen_factor (float): Width multiplier, multiply number of channels in each layer by this amount. Default: 1.0. out_indices (Sequence[int], optional): Output from which stages. Default: (1, 2, 4, 7). frozen_stages (int): Stages to be frozen (all param fixed). Default: -1, which means not freezing any parameters. conv_cfg (dict, optional): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN'). act_cfg (dict): Config dict for activation layer. Default: dict(type='ReLU6'). norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. Default: False. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. pretrained (str, optional): model pretrained path. Default: None init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ # Parameters to build layers. 4 parameters are needed to construct a # layer, from left to right: expand_ratio, channel, num_blocks, stride. arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]] def __init__(self, widen_factor=1., out_indices=(1, 2, 4, 7), frozen_stages=-1, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU6'), norm_eval=False, with_cp=False, pretrained=None, init_cfg=None): super(MobileNetV2, self).__init__(init_cfg) self.pretrained = pretrained assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: if init_cfg is None: self.init_cfg = [ dict(type='Kaiming', layer='Conv2d'), dict( type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) ] else: raise TypeError('pretrained must be a str or None') self.widen_factor = widen_factor self.out_indices = out_indices if not set(out_indices).issubset(set(range(0, 8))): raise ValueError('out_indices must be a subset of range' f'(0, 8). But received {out_indices}') if frozen_stages not in range(-1, 8): raise ValueError('frozen_stages must be in range(-1, 8). ' f'But received {frozen_stages}') self.out_indices = out_indices self.frozen_stages = frozen_stages self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.norm_eval = norm_eval self.with_cp = with_cp self.in_channels = make_divisible(32 * widen_factor, 8) self.conv1 = ConvModule( in_channels=3, out_channels=self.in_channels, kernel_size=3, stride=2, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) self.layers = [] for i, layer_cfg in enumerate(self.arch_settings): expand_ratio, channel, num_blocks, stride = layer_cfg out_channels = make_divisible(channel * widen_factor, 8) inverted_res_layer = self.make_layer( out_channels=out_channels, num_blocks=num_blocks, stride=stride, expand_ratio=expand_ratio) layer_name = f'layer{i + 1}' self.add_module(layer_name, inverted_res_layer) self.layers.append(layer_name) if widen_factor > 1.0: self.out_channel = int(1280 * widen_factor) else: self.out_channel = 1280 layer = ConvModule( in_channels=self.in_channels, out_channels=self.out_channel, kernel_size=1, stride=1, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) self.add_module('conv2', layer) self.layers.append('conv2') def make_layer(self, out_channels, num_blocks, stride, expand_ratio): """Stack InvertedResidual blocks to build a layer for MobileNetV2. Args: out_channels (int): out_channels of block. num_blocks (int): number of blocks. stride (int): stride of the first block. Default: 1 expand_ratio (int): Expand the number of channels of the hidden layer in InvertedResidual by this ratio. Default: 6. """ layers = [] for i in range(num_blocks): if i >= 1: stride = 1 layers.append( InvertedResidual( self.in_channels, out_channels, mid_channels=int(round(self.in_channels * expand_ratio)), stride=stride, with_expand_conv=expand_ratio != 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, with_cp=self.with_cp)) self.in_channels = out_channels return nn.Sequential(*layers) def _freeze_stages(self): if self.frozen_stages >= 0: for param in self.conv1.parameters(): param.requires_grad = False for i in range(1, self.frozen_stages + 1): layer = getattr(self, f'layer{i}') layer.eval() for param in layer.parameters(): param.requires_grad = False def forward(self, x): """Forward function.""" x = self.conv1(x) outs = [] for i, layer_name in enumerate(self.layers): layer = getattr(self, layer_name) x = layer(x) if i in self.out_indices: outs.append(x) return tuple(outs) def train(self, mode=True): """Convert the model into training mode while keep normalization layer frozen.""" super(MobileNetV2, self).train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): # trick: eval have effect on BatchNorm only if isinstance(m, _BatchNorm): m.eval()
7,621
37.301508
78
py
ERD
ERD-main/mmdet/models/backbones/swin.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings from collections import OrderedDict from copy import deepcopy import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as cp from mmcv.cnn import build_norm_layer from mmcv.cnn.bricks.transformer import FFN, build_dropout from mmengine.logging import MMLogger from mmengine.model import BaseModule, ModuleList from mmengine.model.weight_init import (constant_init, trunc_normal_, trunc_normal_init) from mmengine.runner.checkpoint import CheckpointLoader from mmengine.utils import to_2tuple from mmdet.registry import MODELS from ..layers import PatchEmbed, PatchMerging class WindowMSA(BaseModule): """Window based multi-head self-attention (W-MSA) module with relative position bias. Args: embed_dims (int): Number of input channels. num_heads (int): Number of attention heads. window_size (tuple[int]): The height and width of the window. qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. Default: True. qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. Default: None. attn_drop_rate (float, optional): Dropout ratio of attention weight. Default: 0.0 proj_drop_rate (float, optional): Dropout ratio of output. Default: 0. init_cfg (dict | None, optional): The Config for initialization. Default: None. """ def __init__(self, embed_dims, num_heads, window_size, qkv_bias=True, qk_scale=None, attn_drop_rate=0., proj_drop_rate=0., init_cfg=None): super().__init__() self.embed_dims = embed_dims self.window_size = window_size # Wh, Ww self.num_heads = num_heads head_embed_dims = embed_dims // num_heads self.scale = qk_scale or head_embed_dims**-0.5 self.init_cfg = init_cfg # define a parameter table of relative position bias self.relative_position_bias_table = nn.Parameter( torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH # About 2x faster than original impl Wh, Ww = self.window_size rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww) rel_position_index = rel_index_coords + rel_index_coords.T rel_position_index = rel_position_index.flip(1).contiguous() self.register_buffer('relative_position_index', rel_position_index) self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop_rate) self.proj = nn.Linear(embed_dims, embed_dims) self.proj_drop = nn.Dropout(proj_drop_rate) self.softmax = nn.Softmax(dim=-1) def init_weights(self): trunc_normal_(self.relative_position_bias_table, std=0.02) def forward(self, x, mask=None): """ Args: x (tensor): input features with shape of (num_windows*B, N, C) mask (tensor | None, Optional): mask with shape of (num_windows, Wh*Ww, Wh*Ww), value should be between (-inf, 0]. """ B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) # make torchscript happy (cannot use tensor as tuple) q, k, v = qkv[0], qkv[1], qkv[2] q = q * self.scale attn = (q @ k.transpose(-2, -1)) relative_position_bias = self.relative_position_bias_table[ self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH relative_position_bias = relative_position_bias.permute( 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww attn = attn + relative_position_bias.unsqueeze(0) if mask is not None: nW = mask.shape[0] attn = attn.view(B // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, N, N) attn = self.softmax(attn) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x @staticmethod def double_step_seq(step1, len1, step2, len2): seq1 = torch.arange(0, step1 * len1, step1) seq2 = torch.arange(0, step2 * len2, step2) return (seq1[:, None] + seq2[None, :]).reshape(1, -1) class ShiftWindowMSA(BaseModule): """Shifted Window Multihead Self-Attention Module. Args: embed_dims (int): Number of input channels. num_heads (int): Number of attention heads. window_size (int): The height and width of the window. shift_size (int, optional): The shift step of each window towards right-bottom. If zero, act as regular window-msa. Defaults to 0. qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. Default: True qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. Defaults: None. attn_drop_rate (float, optional): Dropout ratio of attention weight. Defaults: 0. proj_drop_rate (float, optional): Dropout ratio of output. Defaults: 0. dropout_layer (dict, optional): The dropout_layer used before output. Defaults: dict(type='DropPath', drop_prob=0.). init_cfg (dict, optional): The extra config for initialization. Default: None. """ def __init__(self, embed_dims, num_heads, window_size, shift_size=0, qkv_bias=True, qk_scale=None, attn_drop_rate=0, proj_drop_rate=0, dropout_layer=dict(type='DropPath', drop_prob=0.), init_cfg=None): super().__init__(init_cfg) self.window_size = window_size self.shift_size = shift_size assert 0 <= self.shift_size < self.window_size self.w_msa = WindowMSA( embed_dims=embed_dims, num_heads=num_heads, window_size=to_2tuple(window_size), qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop_rate=attn_drop_rate, proj_drop_rate=proj_drop_rate, init_cfg=None) self.drop = build_dropout(dropout_layer) def forward(self, query, hw_shape): B, L, C = query.shape H, W = hw_shape assert L == H * W, 'input feature has wrong size' query = query.view(B, H, W, C) # pad feature maps to multiples of window size pad_r = (self.window_size - W % self.window_size) % self.window_size pad_b = (self.window_size - H % self.window_size) % self.window_size query = F.pad(query, (0, 0, 0, pad_r, 0, pad_b)) H_pad, W_pad = query.shape[1], query.shape[2] # cyclic shift if self.shift_size > 0: shifted_query = torch.roll( query, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) # calculate attention mask for SW-MSA img_mask = torch.zeros((1, H_pad, W_pad, 1), device=query.device) h_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) w_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) cnt = 0 for h in h_slices: for w in w_slices: img_mask[:, h, w, :] = cnt cnt += 1 # nW, window_size, window_size, 1 mask_windows = self.window_partition(img_mask) mask_windows = mask_windows.view( -1, self.window_size * self.window_size) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill( attn_mask == 0, float(0.0)) else: shifted_query = query attn_mask = None # nW*B, window_size, window_size, C query_windows = self.window_partition(shifted_query) # nW*B, window_size*window_size, C query_windows = query_windows.view(-1, self.window_size**2, C) # W-MSA/SW-MSA (nW*B, window_size*window_size, C) attn_windows = self.w_msa(query_windows, mask=attn_mask) # merge windows attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) # B H' W' C shifted_x = self.window_reverse(attn_windows, H_pad, W_pad) # reverse cyclic shift if self.shift_size > 0: x = torch.roll( shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: x = shifted_x if pad_r > 0 or pad_b: x = x[:, :H, :W, :].contiguous() x = x.view(B, H * W, C) x = self.drop(x) return x def window_reverse(self, windows, H, W): """ Args: windows: (num_windows*B, window_size, window_size, C) H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ window_size = self.window_size B = int(windows.shape[0] / (H * W / window_size / window_size)) x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x def window_partition(self, x): """ Args: x: (B, H, W, C) Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape window_size = self.window_size x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous() windows = windows.view(-1, window_size, window_size, C) return windows class SwinBlock(BaseModule): """" Args: embed_dims (int): The feature dimension. num_heads (int): Parallel attention heads. feedforward_channels (int): The hidden dimension for FFNs. window_size (int, optional): The local window scale. Default: 7. shift (bool, optional): whether to shift window or not. Default False. qkv_bias (bool, optional): enable bias for qkv if True. Default: True. qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. Default: None. drop_rate (float, optional): Dropout rate. Default: 0. attn_drop_rate (float, optional): Attention dropout rate. Default: 0. drop_path_rate (float, optional): Stochastic depth rate. Default: 0. act_cfg (dict, optional): The config dict of activation function. Default: dict(type='GELU'). norm_cfg (dict, optional): The config dict of normalization. Default: dict(type='LN'). with_cp (bool, optional): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. init_cfg (dict | list | None, optional): The init config. Default: None. """ def __init__(self, embed_dims, num_heads, feedforward_channels, window_size=7, shift=False, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN'), with_cp=False, init_cfg=None): super(SwinBlock, self).__init__() self.init_cfg = init_cfg self.with_cp = with_cp self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] self.attn = ShiftWindowMSA( embed_dims=embed_dims, num_heads=num_heads, window_size=window_size, shift_size=window_size // 2 if shift else 0, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop_rate=attn_drop_rate, proj_drop_rate=drop_rate, dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), init_cfg=None) self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] self.ffn = FFN( embed_dims=embed_dims, feedforward_channels=feedforward_channels, num_fcs=2, ffn_drop=drop_rate, dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), act_cfg=act_cfg, add_identity=True, init_cfg=None) def forward(self, x, hw_shape): def _inner_forward(x): identity = x x = self.norm1(x) x = self.attn(x, hw_shape) x = x + identity identity = x x = self.norm2(x) x = self.ffn(x, identity=identity) return x if self.with_cp and x.requires_grad: x = cp.checkpoint(_inner_forward, x) else: x = _inner_forward(x) return x class SwinBlockSequence(BaseModule): """Implements one stage in Swin Transformer. Args: embed_dims (int): The feature dimension. num_heads (int): Parallel attention heads. feedforward_channels (int): The hidden dimension for FFNs. depth (int): The number of blocks in this stage. window_size (int, optional): The local window scale. Default: 7. qkv_bias (bool, optional): enable bias for qkv if True. Default: True. qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. Default: None. drop_rate (float, optional): Dropout rate. Default: 0. attn_drop_rate (float, optional): Attention dropout rate. Default: 0. drop_path_rate (float | list[float], optional): Stochastic depth rate. Default: 0. downsample (BaseModule | None, optional): The downsample operation module. Default: None. act_cfg (dict, optional): The config dict of activation function. Default: dict(type='GELU'). norm_cfg (dict, optional): The config dict of normalization. Default: dict(type='LN'). with_cp (bool, optional): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. init_cfg (dict | list | None, optional): The init config. Default: None. """ def __init__(self, embed_dims, num_heads, feedforward_channels, depth, window_size=7, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., downsample=None, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN'), with_cp=False, init_cfg=None): super().__init__(init_cfg=init_cfg) if isinstance(drop_path_rate, list): drop_path_rates = drop_path_rate assert len(drop_path_rates) == depth else: drop_path_rates = [deepcopy(drop_path_rate) for _ in range(depth)] self.blocks = ModuleList() for i in range(depth): block = SwinBlock( embed_dims=embed_dims, num_heads=num_heads, feedforward_channels=feedforward_channels, window_size=window_size, shift=False if i % 2 == 0 else True, qkv_bias=qkv_bias, qk_scale=qk_scale, drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_rate=drop_path_rates[i], act_cfg=act_cfg, norm_cfg=norm_cfg, with_cp=with_cp, init_cfg=None) self.blocks.append(block) self.downsample = downsample def forward(self, x, hw_shape): for block in self.blocks: x = block(x, hw_shape) if self.downsample: x_down, down_hw_shape = self.downsample(x, hw_shape) return x_down, down_hw_shape, x, hw_shape else: return x, hw_shape, x, hw_shape @MODELS.register_module() class SwinTransformer(BaseModule): """ Swin Transformer A PyTorch implement of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - https://arxiv.org/abs/2103.14030 Inspiration from https://github.com/microsoft/Swin-Transformer Args: pretrain_img_size (int | tuple[int]): The size of input image when pretrain. Defaults: 224. in_channels (int): The num of input channels. Defaults: 3. embed_dims (int): The feature dimension. Default: 96. patch_size (int | tuple[int]): Patch size. Default: 4. window_size (int): Window size. Default: 7. mlp_ratio (int): Ratio of mlp hidden dim to embedding dim. Default: 4. depths (tuple[int]): Depths of each Swin Transformer stage. Default: (2, 2, 6, 2). num_heads (tuple[int]): Parallel attention heads of each Swin Transformer stage. Default: (3, 6, 12, 24). strides (tuple[int]): The patch merging or patch embedding stride of each Swin Transformer stage. (In swin, we set kernel size equal to stride.) Default: (4, 2, 2, 2). out_indices (tuple[int]): Output from which stages. Default: (0, 1, 2, 3). qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. Default: None. patch_norm (bool): If add a norm layer for patch embed and patch merging. Default: True. drop_rate (float): Dropout rate. Defaults: 0. attn_drop_rate (float): Attention dropout rate. Default: 0. drop_path_rate (float): Stochastic depth rate. Defaults: 0.1. use_abs_pos_embed (bool): If True, add absolute position embedding to the patch embedding. Defaults: False. act_cfg (dict): Config dict for activation layer. Default: dict(type='GELU'). norm_cfg (dict): Config dict for normalization layer at output of backone. Defaults: dict(type='LN'). with_cp (bool, optional): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. pretrained (str, optional): model pretrained path. Default: None. convert_weights (bool): The flag indicates whether the pre-trained model is from the original repo. We may need to convert some keys to make it compatible. Default: False. frozen_stages (int): Stages to be frozen (stop grad and set eval mode). Default: -1 (-1 means not freezing any parameters). init_cfg (dict, optional): The Config for initialization. Defaults to None. """ def __init__(self, pretrain_img_size=224, in_channels=3, embed_dims=96, patch_size=4, window_size=7, mlp_ratio=4, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), strides=(4, 2, 2, 2), out_indices=(0, 1, 2, 3), qkv_bias=True, qk_scale=None, patch_norm=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, use_abs_pos_embed=False, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN'), with_cp=False, pretrained=None, convert_weights=False, frozen_stages=-1, init_cfg=None): self.convert_weights = convert_weights self.frozen_stages = frozen_stages if isinstance(pretrain_img_size, int): pretrain_img_size = to_2tuple(pretrain_img_size) elif isinstance(pretrain_img_size, tuple): if len(pretrain_img_size) == 1: pretrain_img_size = to_2tuple(pretrain_img_size[0]) assert len(pretrain_img_size) == 2, \ f'The size of image should have length 1 or 2, ' \ f'but got {len(pretrain_img_size)}' assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: self.init_cfg = init_cfg else: raise TypeError('pretrained must be a str or None') super(SwinTransformer, self).__init__(init_cfg=init_cfg) num_layers = len(depths) self.out_indices = out_indices self.use_abs_pos_embed = use_abs_pos_embed assert strides[0] == patch_size, 'Use non-overlapping patch embed.' self.patch_embed = PatchEmbed( in_channels=in_channels, embed_dims=embed_dims, conv_type='Conv2d', kernel_size=patch_size, stride=strides[0], norm_cfg=norm_cfg if patch_norm else None, init_cfg=None) if self.use_abs_pos_embed: patch_row = pretrain_img_size[0] // patch_size patch_col = pretrain_img_size[1] // patch_size num_patches = patch_row * patch_col self.absolute_pos_embed = nn.Parameter( torch.zeros((1, num_patches, embed_dims))) self.drop_after_pos = nn.Dropout(p=drop_rate) # set stochastic depth decay rule total_depth = sum(depths) dpr = [ x.item() for x in torch.linspace(0, drop_path_rate, total_depth) ] self.stages = ModuleList() in_channels = embed_dims for i in range(num_layers): if i < num_layers - 1: downsample = PatchMerging( in_channels=in_channels, out_channels=2 * in_channels, stride=strides[i + 1], norm_cfg=norm_cfg if patch_norm else None, init_cfg=None) else: downsample = None stage = SwinBlockSequence( embed_dims=in_channels, num_heads=num_heads[i], feedforward_channels=mlp_ratio * in_channels, depth=depths[i], window_size=window_size, qkv_bias=qkv_bias, qk_scale=qk_scale, drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_rate=dpr[sum(depths[:i]):sum(depths[:i + 1])], downsample=downsample, act_cfg=act_cfg, norm_cfg=norm_cfg, with_cp=with_cp, init_cfg=None) self.stages.append(stage) if downsample: in_channels = downsample.out_channels self.num_features = [int(embed_dims * 2**i) for i in range(num_layers)] # Add a norm layer for each output for i in out_indices: layer = build_norm_layer(norm_cfg, self.num_features[i])[1] layer_name = f'norm{i}' self.add_module(layer_name, layer) def train(self, mode=True): """Convert the model into training mode while keep layers freezed.""" super(SwinTransformer, self).train(mode) self._freeze_stages() def _freeze_stages(self): if self.frozen_stages >= 0: self.patch_embed.eval() for param in self.patch_embed.parameters(): param.requires_grad = False if self.use_abs_pos_embed: self.absolute_pos_embed.requires_grad = False self.drop_after_pos.eval() for i in range(1, self.frozen_stages + 1): if (i - 1) in self.out_indices: norm_layer = getattr(self, f'norm{i-1}') norm_layer.eval() for param in norm_layer.parameters(): param.requires_grad = False m = self.stages[i - 1] m.eval() for param in m.parameters(): param.requires_grad = False def init_weights(self): logger = MMLogger.get_current_instance() if self.init_cfg is None: logger.warn(f'No pre-trained weights for ' f'{self.__class__.__name__}, ' f'training start from scratch') if self.use_abs_pos_embed: trunc_normal_(self.absolute_pos_embed, std=0.02) for m in self.modules(): if isinstance(m, nn.Linear): trunc_normal_init(m, std=.02, bias=0.) elif isinstance(m, nn.LayerNorm): constant_init(m, 1.0) else: assert 'checkpoint' in self.init_cfg, f'Only support ' \ f'specify `Pretrained` in ' \ f'`init_cfg` in ' \ f'{self.__class__.__name__} ' ckpt = CheckpointLoader.load_checkpoint( self.init_cfg.checkpoint, logger=logger, map_location='cpu') if 'state_dict' in ckpt: _state_dict = ckpt['state_dict'] elif 'model' in ckpt: _state_dict = ckpt['model'] else: _state_dict = ckpt if self.convert_weights: # supported loading weight from original repo, _state_dict = swin_converter(_state_dict) state_dict = OrderedDict() for k, v in _state_dict.items(): if k.startswith('backbone.'): state_dict[k[9:]] = v # strip prefix of state_dict if list(state_dict.keys())[0].startswith('module.'): state_dict = {k[7:]: v for k, v in state_dict.items()} # reshape absolute position embedding if state_dict.get('absolute_pos_embed') is not None: absolute_pos_embed = state_dict['absolute_pos_embed'] N1, L, C1 = absolute_pos_embed.size() N2, C2, H, W = self.absolute_pos_embed.size() if N1 != N2 or C1 != C2 or L != H * W: logger.warning('Error in loading absolute_pos_embed, pass') else: state_dict['absolute_pos_embed'] = absolute_pos_embed.view( N2, H, W, C2).permute(0, 3, 1, 2).contiguous() # interpolate position bias table if needed relative_position_bias_table_keys = [ k for k in state_dict.keys() if 'relative_position_bias_table' in k ] for table_key in relative_position_bias_table_keys: table_pretrained = state_dict[table_key] table_current = self.state_dict()[table_key] L1, nH1 = table_pretrained.size() L2, nH2 = table_current.size() if nH1 != nH2: logger.warning(f'Error in loading {table_key}, pass') elif L1 != L2: S1 = int(L1**0.5) S2 = int(L2**0.5) table_pretrained_resized = F.interpolate( table_pretrained.permute(1, 0).reshape(1, nH1, S1, S1), size=(S2, S2), mode='bicubic') state_dict[table_key] = table_pretrained_resized.view( nH2, L2).permute(1, 0).contiguous() # load state_dict self.load_state_dict(state_dict, False) def forward(self, x): x, hw_shape = self.patch_embed(x) if self.use_abs_pos_embed: x = x + self.absolute_pos_embed x = self.drop_after_pos(x) outs = [] for i, stage in enumerate(self.stages): x, hw_shape, out, out_hw_shape = stage(x, hw_shape) if i in self.out_indices: norm_layer = getattr(self, f'norm{i}') out = norm_layer(out) out = out.view(-1, *out_hw_shape, self.num_features[i]).permute(0, 3, 1, 2).contiguous() outs.append(out) return outs def swin_converter(ckpt): new_ckpt = OrderedDict() def correct_unfold_reduction_order(x): out_channel, in_channel = x.shape x = x.reshape(out_channel, 4, in_channel // 4) x = x[:, [0, 2, 1, 3], :].transpose(1, 2).reshape(out_channel, in_channel) return x def correct_unfold_norm_order(x): in_channel = x.shape[0] x = x.reshape(4, in_channel // 4) x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel) return x for k, v in ckpt.items(): if k.startswith('head'): continue elif k.startswith('layers'): new_v = v if 'attn.' in k: new_k = k.replace('attn.', 'attn.w_msa.') elif 'mlp.' in k: if 'mlp.fc1.' in k: new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.') elif 'mlp.fc2.' in k: new_k = k.replace('mlp.fc2.', 'ffn.layers.1.') else: new_k = k.replace('mlp.', 'ffn.') elif 'downsample' in k: new_k = k if 'reduction.' in k: new_v = correct_unfold_reduction_order(v) elif 'norm.' in k: new_v = correct_unfold_norm_order(v) else: new_k = k new_k = new_k.replace('layers', 'stages', 1) elif k.startswith('patch_embed'): new_v = v if 'proj' in k: new_k = k.replace('proj', 'projection') else: new_k = k else: new_v = v new_k = k new_ckpt['backbone.' + new_k] = new_v return new_ckpt
31,958
37.97439
79
py
ERD
ERD-main/mmdet/models/backbones/trident_resnet.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as cp from mmcv.cnn import build_conv_layer, build_norm_layer from mmengine.model import BaseModule from torch.nn.modules.utils import _pair from mmdet.models.backbones.resnet import Bottleneck, ResNet from mmdet.registry import MODELS class TridentConv(BaseModule): """Trident Convolution Module. Args: in_channels (int): Number of channels in input. out_channels (int): Number of channels in output. kernel_size (int): Size of convolution kernel. stride (int, optional): Convolution stride. Default: 1. trident_dilations (tuple[int, int, int], optional): Dilations of different trident branch. Default: (1, 2, 3). test_branch_idx (int, optional): In inference, all 3 branches will be used if `test_branch_idx==-1`, otherwise only branch with index `test_branch_idx` will be used. Default: 1. bias (bool, optional): Whether to use bias in convolution or not. Default: False. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, trident_dilations=(1, 2, 3), test_branch_idx=1, bias=False, init_cfg=None): super(TridentConv, self).__init__(init_cfg) self.num_branch = len(trident_dilations) self.with_bias = bias self.test_branch_idx = test_branch_idx self.stride = _pair(stride) self.kernel_size = _pair(kernel_size) self.paddings = _pair(trident_dilations) self.dilations = trident_dilations self.in_channels = in_channels self.out_channels = out_channels self.bias = bias self.weight = nn.Parameter( torch.Tensor(out_channels, in_channels, *self.kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(out_channels)) else: self.bias = None def extra_repr(self): tmpstr = f'in_channels={self.in_channels}' tmpstr += f', out_channels={self.out_channels}' tmpstr += f', kernel_size={self.kernel_size}' tmpstr += f', num_branch={self.num_branch}' tmpstr += f', test_branch_idx={self.test_branch_idx}' tmpstr += f', stride={self.stride}' tmpstr += f', paddings={self.paddings}' tmpstr += f', dilations={self.dilations}' tmpstr += f', bias={self.bias}' return tmpstr def forward(self, inputs): if self.training or self.test_branch_idx == -1: outputs = [ F.conv2d(input, self.weight, self.bias, self.stride, padding, dilation) for input, dilation, padding in zip( inputs, self.dilations, self.paddings) ] else: assert len(inputs) == 1 outputs = [ F.conv2d(inputs[0], self.weight, self.bias, self.stride, self.paddings[self.test_branch_idx], self.dilations[self.test_branch_idx]) ] return outputs # Since TridentNet is defined over ResNet50 and ResNet101, here we # only support TridentBottleneckBlock. class TridentBottleneck(Bottleneck): """BottleBlock for TridentResNet. Args: trident_dilations (tuple[int, int, int]): Dilations of different trident branch. test_branch_idx (int): In inference, all 3 branches will be used if `test_branch_idx==-1`, otherwise only branch with index `test_branch_idx` will be used. concat_output (bool): Whether to concat the output list to a Tensor. `True` only in the last Block. """ def __init__(self, trident_dilations, test_branch_idx, concat_output, **kwargs): super(TridentBottleneck, self).__init__(**kwargs) self.trident_dilations = trident_dilations self.num_branch = len(trident_dilations) self.concat_output = concat_output self.test_branch_idx = test_branch_idx self.conv2 = TridentConv( self.planes, self.planes, kernel_size=3, stride=self.conv2_stride, bias=False, trident_dilations=self.trident_dilations, test_branch_idx=test_branch_idx, init_cfg=dict( type='Kaiming', distribution='uniform', mode='fan_in', override=dict(name='conv2'))) def forward(self, x): def _inner_forward(x): num_branch = ( self.num_branch if self.training or self.test_branch_idx == -1 else 1) identity = x if not isinstance(x, list): x = (x, ) * num_branch identity = x if self.downsample is not None: identity = [self.downsample(b) for b in x] out = [self.conv1(b) for b in x] out = [self.norm1(b) for b in out] out = [self.relu(b) for b in out] if self.with_plugins: for k in range(len(out)): out[k] = self.forward_plugin(out[k], self.after_conv1_plugin_names) out = self.conv2(out) out = [self.norm2(b) for b in out] out = [self.relu(b) for b in out] if self.with_plugins: for k in range(len(out)): out[k] = self.forward_plugin(out[k], self.after_conv2_plugin_names) out = [self.conv3(b) for b in out] out = [self.norm3(b) for b in out] if self.with_plugins: for k in range(len(out)): out[k] = self.forward_plugin(out[k], self.after_conv3_plugin_names) out = [ out_b + identity_b for out_b, identity_b in zip(out, identity) ] return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = [self.relu(b) for b in out] if self.concat_output: out = torch.cat(out, dim=0) return out def make_trident_res_layer(block, inplanes, planes, num_blocks, stride=1, trident_dilations=(1, 2, 3), style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, test_branch_idx=-1): """Build Trident Res Layers.""" downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = [] conv_stride = stride downsample.extend([ build_conv_layer( conv_cfg, inplanes, planes * block.expansion, kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, planes * block.expansion)[1] ]) downsample = nn.Sequential(*downsample) layers = [] for i in range(num_blocks): layers.append( block( inplanes=inplanes, planes=planes, stride=stride if i == 0 else 1, trident_dilations=trident_dilations, downsample=downsample if i == 0 else None, style=style, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, plugins=plugins, test_branch_idx=test_branch_idx, concat_output=True if i == num_blocks - 1 else False)) inplanes = planes * block.expansion return nn.Sequential(*layers) @MODELS.register_module() class TridentResNet(ResNet): """The stem layer, stage 1 and stage 2 in Trident ResNet are identical to ResNet, while in stage 3, Trident BottleBlock is utilized to replace the normal BottleBlock to yield trident output. Different branch shares the convolution weight but uses different dilations to achieve multi-scale output. / stage3(b0) \ x - stem - stage1 - stage2 - stage3(b1) - output \ stage3(b2) / Args: depth (int): Depth of resnet, from {50, 101, 152}. num_branch (int): Number of branches in TridentNet. test_branch_idx (int): In inference, all 3 branches will be used if `test_branch_idx==-1`, otherwise only branch with index `test_branch_idx` will be used. trident_dilations (tuple[int]): Dilations of different trident branch. len(trident_dilations) should be equal to num_branch. """ # noqa def __init__(self, depth, num_branch, test_branch_idx, trident_dilations, **kwargs): assert num_branch == len(trident_dilations) assert depth in (50, 101, 152) super(TridentResNet, self).__init__(depth, **kwargs) assert self.num_stages == 3 self.test_branch_idx = test_branch_idx self.num_branch = num_branch last_stage_idx = self.num_stages - 1 stride = self.strides[last_stage_idx] dilation = trident_dilations dcn = self.dcn if self.stage_with_dcn[last_stage_idx] else None if self.plugins is not None: stage_plugins = self.make_stage_plugins(self.plugins, last_stage_idx) else: stage_plugins = None planes = self.base_channels * 2**last_stage_idx res_layer = make_trident_res_layer( TridentBottleneck, inplanes=(self.block.expansion * self.base_channels * 2**(last_stage_idx - 1)), planes=planes, num_blocks=self.stage_blocks[last_stage_idx], stride=stride, trident_dilations=dilation, style=self.style, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, plugins=stage_plugins, test_branch_idx=self.test_branch_idx) layer_name = f'layer{last_stage_idx + 1}' self.__setattr__(layer_name, res_layer) self.res_layers.pop(last_stage_idx) self.res_layers.insert(last_stage_idx, layer_name) self._freeze_stages()
11,120
36.19398
79
py
ERD
ERD-main/mmdet/models/backbones/detectors_resnext.py
# Copyright (c) OpenMMLab. All rights reserved. import math from mmcv.cnn import build_conv_layer, build_norm_layer from mmdet.registry import MODELS from .detectors_resnet import Bottleneck as _Bottleneck from .detectors_resnet import DetectoRS_ResNet class Bottleneck(_Bottleneck): expansion = 4 def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, **kwargs): """Bottleneck block for ResNeXt. If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is "caffe", the stride-two layer is the first 1x1 conv layer. """ super(Bottleneck, self).__init__(inplanes, planes, **kwargs) if groups == 1: width = self.planes else: width = math.floor(self.planes * (base_width / base_channels)) * groups self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, width, postfix=1) self.norm2_name, norm2 = build_norm_layer( self.norm_cfg, width, postfix=2) self.norm3_name, norm3 = build_norm_layer( self.norm_cfg, self.planes * self.expansion, postfix=3) self.conv1 = build_conv_layer( self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) fallback_on_stride = False self.with_modulated_dcn = False if self.with_dcn: fallback_on_stride = self.dcn.pop('fallback_on_stride', False) if self.with_sac: self.conv2 = build_conv_layer( self.sac, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) elif not self.with_dcn or fallback_on_stride: self.conv2 = build_conv_layer( self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) else: assert self.conv_cfg is None, 'conv_cfg must be None for DCN' self.conv2 = build_conv_layer( self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) self.add_module(self.norm2_name, norm2) self.conv3 = build_conv_layer( self.conv_cfg, width, self.planes * self.expansion, kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) @MODELS.register_module() class DetectoRS_ResNeXt(DetectoRS_ResNet): """ResNeXt backbone for DetectoRS. Args: groups (int): The number of groups in ResNeXt. base_width (int): The base width of ResNeXt. """ arch_settings = { 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)) } def __init__(self, groups=1, base_width=4, **kwargs): self.groups = groups self.base_width = base_width super(DetectoRS_ResNeXt, self).__init__(**kwargs) def make_res_layer(self, **kwargs): return super().make_res_layer( groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, **kwargs)
3,919
30.612903
77
py
ERD
ERD-main/mmdet/models/backbones/efficientnet.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import math from functools import partial import torch import torch.nn as nn import torch.utils.checkpoint as cp from mmcv.cnn.bricks import ConvModule, DropPath from mmengine.model import BaseModule, Sequential from mmdet.registry import MODELS from ..layers import InvertedResidual, SELayer from ..utils import make_divisible class EdgeResidual(BaseModule): """Edge Residual Block. Args: in_channels (int): The input channels of this module. out_channels (int): The output channels of this module. mid_channels (int): The input channels of the second convolution. kernel_size (int): The kernel size of the first convolution. Defaults to 3. stride (int): The stride of the first convolution. Defaults to 1. se_cfg (dict, optional): Config dict for se layer. Defaults to None, which means no se layer. with_residual (bool): Use residual connection. Defaults to True. conv_cfg (dict, optional): Config dict for convolution layer. Defaults to None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Defaults to ``dict(type='BN')``. act_cfg (dict): Config dict for activation layer. Defaults to ``dict(type='ReLU')``. drop_path_rate (float): stochastic depth rate. Defaults to 0. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Defaults to False. init_cfg (dict | list[dict], optional): Initialization config dict. """ def __init__(self, in_channels, out_channels, mid_channels, kernel_size=3, stride=1, se_cfg=None, with_residual=True, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), drop_path_rate=0., with_cp=False, init_cfg=None, **kwargs): super(EdgeResidual, self).__init__(init_cfg=init_cfg) assert stride in [1, 2] self.with_cp = with_cp self.drop_path = DropPath( drop_path_rate) if drop_path_rate > 0 else nn.Identity() self.with_se = se_cfg is not None self.with_residual = ( stride == 1 and in_channels == out_channels and with_residual) if self.with_se: assert isinstance(se_cfg, dict) self.conv1 = ConvModule( in_channels=in_channels, out_channels=mid_channels, kernel_size=kernel_size, stride=1, padding=kernel_size // 2, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) if self.with_se: self.se = SELayer(**se_cfg) self.conv2 = ConvModule( in_channels=mid_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) def forward(self, x): def _inner_forward(x): out = x out = self.conv1(out) if self.with_se: out = self.se(out) out = self.conv2(out) if self.with_residual: return x + self.drop_path(out) else: return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) return out def model_scaling(layer_setting, arch_setting): """Scaling operation to the layer's parameters according to the arch_setting.""" # scale width new_layer_setting = copy.deepcopy(layer_setting) for layer_cfg in new_layer_setting: for block_cfg in layer_cfg: block_cfg[1] = make_divisible(block_cfg[1] * arch_setting[0], 8) # scale depth split_layer_setting = [new_layer_setting[0]] for layer_cfg in new_layer_setting[1:-1]: tmp_index = [0] for i in range(len(layer_cfg) - 1): if layer_cfg[i + 1][1] != layer_cfg[i][1]: tmp_index.append(i + 1) tmp_index.append(len(layer_cfg)) for i in range(len(tmp_index) - 1): split_layer_setting.append(layer_cfg[tmp_index[i]:tmp_index[i + 1]]) split_layer_setting.append(new_layer_setting[-1]) num_of_layers = [len(layer_cfg) for layer_cfg in split_layer_setting[1:-1]] new_layers = [ int(math.ceil(arch_setting[1] * num)) for num in num_of_layers ] merge_layer_setting = [split_layer_setting[0]] for i, layer_cfg in enumerate(split_layer_setting[1:-1]): if new_layers[i] <= num_of_layers[i]: tmp_layer_cfg = layer_cfg[:new_layers[i]] else: tmp_layer_cfg = copy.deepcopy(layer_cfg) + [layer_cfg[-1]] * ( new_layers[i] - num_of_layers[i]) if tmp_layer_cfg[0][3] == 1 and i != 0: merge_layer_setting[-1] += tmp_layer_cfg.copy() else: merge_layer_setting.append(tmp_layer_cfg.copy()) merge_layer_setting.append(split_layer_setting[-1]) return merge_layer_setting @MODELS.register_module() class EfficientNet(BaseModule): """EfficientNet backbone. Args: arch (str): Architecture of efficientnet. Defaults to b0. out_indices (Sequence[int]): Output from which stages. Defaults to (6, ). frozen_stages (int): Stages to be frozen (all param fixed). Defaults to 0, which means not freezing any parameters. conv_cfg (dict): Config dict for convolution layer. Defaults to None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Defaults to dict(type='BN'). act_cfg (dict): Config dict for activation layer. Defaults to dict(type='Swish'). norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. Defaults to False. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Defaults to False. """ # Parameters to build layers. # 'b' represents the architecture of normal EfficientNet family includes # 'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8'. # 'e' represents the architecture of EfficientNet-EdgeTPU including 'es', # 'em', 'el'. # 6 parameters are needed to construct a layer, From left to right: # - kernel_size: The kernel size of the block # - out_channel: The number of out_channels of the block # - se_ratio: The sequeeze ratio of SELayer. # - stride: The stride of the block # - expand_ratio: The expand_ratio of the mid_channels # - block_type: -1: Not a block, 0: InvertedResidual, 1: EdgeResidual layer_settings = { 'b': [[[3, 32, 0, 2, 0, -1]], [[3, 16, 4, 1, 1, 0]], [[3, 24, 4, 2, 6, 0], [3, 24, 4, 1, 6, 0]], [[5, 40, 4, 2, 6, 0], [5, 40, 4, 1, 6, 0]], [[3, 80, 4, 2, 6, 0], [3, 80, 4, 1, 6, 0], [3, 80, 4, 1, 6, 0], [5, 112, 4, 1, 6, 0], [5, 112, 4, 1, 6, 0], [5, 112, 4, 1, 6, 0]], [[5, 192, 4, 2, 6, 0], [5, 192, 4, 1, 6, 0], [5, 192, 4, 1, 6, 0], [5, 192, 4, 1, 6, 0], [3, 320, 4, 1, 6, 0]], [[1, 1280, 0, 1, 0, -1]] ], 'e': [[[3, 32, 0, 2, 0, -1]], [[3, 24, 0, 1, 3, 1]], [[3, 32, 0, 2, 8, 1], [3, 32, 0, 1, 8, 1]], [[3, 48, 0, 2, 8, 1], [3, 48, 0, 1, 8, 1], [3, 48, 0, 1, 8, 1], [3, 48, 0, 1, 8, 1]], [[5, 96, 0, 2, 8, 0], [5, 96, 0, 1, 8, 0], [5, 96, 0, 1, 8, 0], [5, 96, 0, 1, 8, 0], [5, 96, 0, 1, 8, 0], [5, 144, 0, 1, 8, 0], [5, 144, 0, 1, 8, 0], [5, 144, 0, 1, 8, 0], [5, 144, 0, 1, 8, 0]], [[5, 192, 0, 2, 8, 0], [5, 192, 0, 1, 8, 0]], [[1, 1280, 0, 1, 0, -1]] ] } # yapf: disable # Parameters to build different kinds of architecture. # From left to right: scaling factor for width, scaling factor for depth, # resolution. arch_settings = { 'b0': (1.0, 1.0, 224), 'b1': (1.0, 1.1, 240), 'b2': (1.1, 1.2, 260), 'b3': (1.2, 1.4, 300), 'b4': (1.4, 1.8, 380), 'b5': (1.6, 2.2, 456), 'b6': (1.8, 2.6, 528), 'b7': (2.0, 3.1, 600), 'b8': (2.2, 3.6, 672), 'es': (1.0, 1.0, 224), 'em': (1.0, 1.1, 240), 'el': (1.2, 1.4, 300) } def __init__(self, arch='b0', drop_path_rate=0., out_indices=(6, ), frozen_stages=0, conv_cfg=dict(type='Conv2dAdaptivePadding'), norm_cfg=dict(type='BN', eps=1e-3), act_cfg=dict(type='Swish'), norm_eval=False, with_cp=False, init_cfg=[ dict(type='Kaiming', layer='Conv2d'), dict( type='Constant', layer=['_BatchNorm', 'GroupNorm'], val=1) ]): super(EfficientNet, self).__init__(init_cfg) assert arch in self.arch_settings, \ f'"{arch}" is not one of the arch_settings ' \ f'({", ".join(self.arch_settings.keys())})' self.arch_setting = self.arch_settings[arch] self.layer_setting = self.layer_settings[arch[:1]] for index in out_indices: if index not in range(0, len(self.layer_setting)): raise ValueError('the item in out_indices must in ' f'range(0, {len(self.layer_setting)}). ' f'But received {index}') if frozen_stages not in range(len(self.layer_setting) + 1): raise ValueError('frozen_stages must be in range(0, ' f'{len(self.layer_setting) + 1}). ' f'But received {frozen_stages}') self.drop_path_rate = drop_path_rate self.out_indices = out_indices self.frozen_stages = frozen_stages self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.norm_eval = norm_eval self.with_cp = with_cp self.layer_setting = model_scaling(self.layer_setting, self.arch_setting) block_cfg_0 = self.layer_setting[0][0] block_cfg_last = self.layer_setting[-1][0] self.in_channels = make_divisible(block_cfg_0[1], 8) self.out_channels = block_cfg_last[1] self.layers = nn.ModuleList() self.layers.append( ConvModule( in_channels=3, out_channels=self.in_channels, kernel_size=block_cfg_0[0], stride=block_cfg_0[3], padding=block_cfg_0[0] // 2, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) self.make_layer() # Avoid building unused layers in mmdetection. if len(self.layers) < max(self.out_indices) + 1: self.layers.append( ConvModule( in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=block_cfg_last[0], stride=block_cfg_last[3], padding=block_cfg_last[0] // 2, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) def make_layer(self): # Without the first and the final conv block. layer_setting = self.layer_setting[1:-1] total_num_blocks = sum([len(x) for x in layer_setting]) block_idx = 0 dpr = [ x.item() for x in torch.linspace(0, self.drop_path_rate, total_num_blocks) ] # stochastic depth decay rule for i, layer_cfg in enumerate(layer_setting): # Avoid building unused layers in mmdetection. if i > max(self.out_indices) - 1: break layer = [] for i, block_cfg in enumerate(layer_cfg): (kernel_size, out_channels, se_ratio, stride, expand_ratio, block_type) = block_cfg mid_channels = int(self.in_channels * expand_ratio) out_channels = make_divisible(out_channels, 8) if se_ratio <= 0: se_cfg = None else: # In mmdetection, the `divisor` is deleted to align # the logic of SELayer with mmcls. se_cfg = dict( channels=mid_channels, ratio=expand_ratio * se_ratio, act_cfg=(self.act_cfg, dict(type='Sigmoid'))) if block_type == 1: # edge tpu if i > 0 and expand_ratio == 3: with_residual = False expand_ratio = 4 else: with_residual = True mid_channels = int(self.in_channels * expand_ratio) if se_cfg is not None: # In mmdetection, the `divisor` is deleted to align # the logic of SELayer with mmcls. se_cfg = dict( channels=mid_channels, ratio=se_ratio * expand_ratio, act_cfg=(self.act_cfg, dict(type='Sigmoid'))) block = partial(EdgeResidual, with_residual=with_residual) else: block = InvertedResidual layer.append( block( in_channels=self.in_channels, out_channels=out_channels, mid_channels=mid_channels, kernel_size=kernel_size, stride=stride, se_cfg=se_cfg, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, drop_path_rate=dpr[block_idx], with_cp=self.with_cp, # In mmdetection, `with_expand_conv` is set to align # the logic of InvertedResidual with mmcls. with_expand_conv=(mid_channels != self.in_channels))) self.in_channels = out_channels block_idx += 1 self.layers.append(Sequential(*layer)) def forward(self, x): outs = [] for i, layer in enumerate(self.layers): x = layer(x) if i in self.out_indices: outs.append(x) return tuple(outs) def _freeze_stages(self): for i in range(self.frozen_stages): m = self.layers[i] m.eval() for param in m.parameters(): param.requires_grad = False def train(self, mode=True): super(EfficientNet, self).train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): if isinstance(m, nn.BatchNorm2d): m.eval()
16,240
37.761337
79
py
ERD
ERD-main/mmdet/models/backbones/resnet.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch.nn as nn import torch.utils.checkpoint as cp from mmcv.cnn import build_conv_layer, build_norm_layer, build_plugin_layer from mmengine.model import BaseModule from torch.nn.modules.batchnorm import _BatchNorm from mmdet.registry import MODELS from ..layers import ResLayer class BasicBlock(BaseModule): expansion = 1 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, init_cfg=None): super(BasicBlock, self).__init__(init_cfg) assert dcn is None, 'Not implemented yet.' assert plugins is None, 'Not implemented yet.' self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) self.conv1 = build_conv_layer( conv_cfg, inplanes, planes, 3, stride=stride, padding=dilation, dilation=dilation, bias=False) self.add_module(self.norm1_name, norm1) self.conv2 = build_conv_layer( conv_cfg, planes, planes, 3, padding=1, bias=False) self.add_module(self.norm2_name, norm2) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.with_cp = with_cp @property def norm1(self): """nn.Module: normalization layer after the first convolution layer""" return getattr(self, self.norm1_name) @property def norm2(self): """nn.Module: normalization layer after the second convolution layer""" return getattr(self, self.norm2_name) def forward(self, x): """Forward function.""" def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) out = self.conv2(out) out = self.norm2(out) if self.downsample is not None: identity = self.downsample(x) out += identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out class Bottleneck(BaseModule): expansion = 4 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, init_cfg=None): """Bottleneck block for ResNet. If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is "caffe", the stride-two layer is the first 1x1 conv layer. """ super(Bottleneck, self).__init__(init_cfg) assert style in ['pytorch', 'caffe'] assert dcn is None or isinstance(dcn, dict) assert plugins is None or isinstance(plugins, list) if plugins is not None: allowed_position = ['after_conv1', 'after_conv2', 'after_conv3'] assert all(p['position'] in allowed_position for p in plugins) self.inplanes = inplanes self.planes = planes self.stride = stride self.dilation = dilation self.style = style self.with_cp = with_cp self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.dcn = dcn self.with_dcn = dcn is not None self.plugins = plugins self.with_plugins = plugins is not None if self.with_plugins: # collect plugins for conv1/conv2/conv3 self.after_conv1_plugins = [ plugin['cfg'] for plugin in plugins if plugin['position'] == 'after_conv1' ] self.after_conv2_plugins = [ plugin['cfg'] for plugin in plugins if plugin['position'] == 'after_conv2' ] self.after_conv3_plugins = [ plugin['cfg'] for plugin in plugins if plugin['position'] == 'after_conv3' ] if self.style == 'pytorch': self.conv1_stride = 1 self.conv2_stride = stride else: self.conv1_stride = stride self.conv2_stride = 1 self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) self.norm3_name, norm3 = build_norm_layer( norm_cfg, planes * self.expansion, postfix=3) self.conv1 = build_conv_layer( conv_cfg, inplanes, planes, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) fallback_on_stride = False if self.with_dcn: fallback_on_stride = dcn.pop('fallback_on_stride', False) if not self.with_dcn or fallback_on_stride: self.conv2 = build_conv_layer( conv_cfg, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, bias=False) else: assert self.conv_cfg is None, 'conv_cfg must be None for DCN' self.conv2 = build_conv_layer( dcn, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, bias=False) self.add_module(self.norm2_name, norm2) self.conv3 = build_conv_layer( conv_cfg, planes, planes * self.expansion, kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) self.relu = nn.ReLU(inplace=True) self.downsample = downsample if self.with_plugins: self.after_conv1_plugin_names = self.make_block_plugins( planes, self.after_conv1_plugins) self.after_conv2_plugin_names = self.make_block_plugins( planes, self.after_conv2_plugins) self.after_conv3_plugin_names = self.make_block_plugins( planes * self.expansion, self.after_conv3_plugins) def make_block_plugins(self, in_channels, plugins): """make plugins for block. Args: in_channels (int): Input channels of plugin. plugins (list[dict]): List of plugins cfg to build. Returns: list[str]: List of the names of plugin. """ assert isinstance(plugins, list) plugin_names = [] for plugin in plugins: plugin = plugin.copy() name, layer = build_plugin_layer( plugin, in_channels=in_channels, postfix=plugin.pop('postfix', '')) assert not hasattr(self, name), f'duplicate plugin {name}' self.add_module(name, layer) plugin_names.append(name) return plugin_names def forward_plugin(self, x, plugin_names): out = x for name in plugin_names: out = getattr(self, name)(out) return out @property def norm1(self): """nn.Module: normalization layer after the first convolution layer""" return getattr(self, self.norm1_name) @property def norm2(self): """nn.Module: normalization layer after the second convolution layer""" return getattr(self, self.norm2_name) @property def norm3(self): """nn.Module: normalization layer after the third convolution layer""" return getattr(self, self.norm3_name) def forward(self, x): """Forward function.""" def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv1_plugin_names) out = self.conv2(out) out = self.norm2(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv2_plugin_names) out = self.conv3(out) out = self.norm3(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv3_plugin_names) if self.downsample is not None: identity = self.downsample(x) out += identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out @MODELS.register_module() class ResNet(BaseModule): """ResNet backbone. Args: depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. stem_channels (int | None): Number of stem channels. If not specified, it will be the same as `base_channels`. Default: None. base_channels (int): Number of base channels of res layer. Default: 64. in_channels (int): Number of input image channels. Default: 3. num_stages (int): Resnet stages. Default: 4. strides (Sequence[int]): Strides of the first block of each stage. dilations (Sequence[int]): Dilation of each stage. out_indices (Sequence[int]): Output from which stages. style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is the first 1x1 conv layer. deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv avg_down (bool): Use AvgPool instead of stride conv when downsampling in the bottleneck. frozen_stages (int): Stages to be frozen (stop grad and set eval mode). -1 means not freezing any parameters. norm_cfg (dict): Dictionary to construct and config norm layer. norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. plugins (list[dict]): List of plugins for stages, each dict contains: - cfg (dict, required): Cfg dict to build plugin. - position (str, required): Position inside block to insert plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'. - stages (tuple[bool], optional): Stages to apply plugin, length should be same as 'num_stages'. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. zero_init_residual (bool): Whether to use zero init for last norm layer in resblocks to let them behave as identity. pretrained (str, optional): model pretrained path. Default: None init_cfg (dict or list[dict], optional): Initialization config dict. Default: None Example: >>> from mmdet.models import ResNet >>> import torch >>> self = ResNet(depth=18) >>> self.eval() >>> inputs = torch.rand(1, 3, 32, 32) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) (1, 64, 8, 8) (1, 128, 4, 4) (1, 256, 2, 2) (1, 512, 1, 1) """ arch_settings = { 18: (BasicBlock, (2, 2, 2, 2)), 34: (BasicBlock, (3, 4, 6, 3)), 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)) } def __init__(self, depth, in_channels=3, stem_channels=None, base_channels=64, num_stages=4, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', deep_stem=False, avg_down=False, frozen_stages=-1, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, dcn=None, stage_with_dcn=(False, False, False, False), plugins=None, with_cp=False, zero_init_residual=True, pretrained=None, init_cfg=None): super(ResNet, self).__init__(init_cfg) self.zero_init_residual = zero_init_residual if depth not in self.arch_settings: raise KeyError(f'invalid depth {depth} for resnet') block_init_cfg = None assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: if init_cfg is None: self.init_cfg = [ dict(type='Kaiming', layer='Conv2d'), dict( type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) ] block = self.arch_settings[depth][0] if self.zero_init_residual: if block is BasicBlock: block_init_cfg = dict( type='Constant', val=0, override=dict(name='norm2')) elif block is Bottleneck: block_init_cfg = dict( type='Constant', val=0, override=dict(name='norm3')) else: raise TypeError('pretrained must be a str or None') self.depth = depth if stem_channels is None: stem_channels = base_channels self.stem_channels = stem_channels self.base_channels = base_channels self.num_stages = num_stages assert num_stages >= 1 and num_stages <= 4 self.strides = strides self.dilations = dilations assert len(strides) == len(dilations) == num_stages self.out_indices = out_indices assert max(out_indices) < num_stages self.style = style self.deep_stem = deep_stem self.avg_down = avg_down self.frozen_stages = frozen_stages self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.with_cp = with_cp self.norm_eval = norm_eval self.dcn = dcn self.stage_with_dcn = stage_with_dcn if dcn is not None: assert len(stage_with_dcn) == num_stages self.plugins = plugins self.block, stage_blocks = self.arch_settings[depth] self.stage_blocks = stage_blocks[:num_stages] self.inplanes = stem_channels self._make_stem_layer(in_channels, stem_channels) self.res_layers = [] for i, num_blocks in enumerate(self.stage_blocks): stride = strides[i] dilation = dilations[i] dcn = self.dcn if self.stage_with_dcn[i] else None if plugins is not None: stage_plugins = self.make_stage_plugins(plugins, i) else: stage_plugins = None planes = base_channels * 2**i res_layer = self.make_res_layer( block=self.block, inplanes=self.inplanes, planes=planes, num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, plugins=stage_plugins, init_cfg=block_init_cfg) self.inplanes = planes * self.block.expansion layer_name = f'layer{i + 1}' self.add_module(layer_name, res_layer) self.res_layers.append(layer_name) self._freeze_stages() self.feat_dim = self.block.expansion * base_channels * 2**( len(self.stage_blocks) - 1) def make_stage_plugins(self, plugins, stage_idx): """Make plugins for ResNet ``stage_idx`` th stage. Currently we support to insert ``context_block``, ``empirical_attention_block``, ``nonlocal_block`` into the backbone like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of Bottleneck. An example of plugins format could be: Examples: >>> plugins=[ ... dict(cfg=dict(type='xxx', arg1='xxx'), ... stages=(False, True, True, True), ... position='after_conv2'), ... dict(cfg=dict(type='yyy'), ... stages=(True, True, True, True), ... position='after_conv3'), ... dict(cfg=dict(type='zzz', postfix='1'), ... stages=(True, True, True, True), ... position='after_conv3'), ... dict(cfg=dict(type='zzz', postfix='2'), ... stages=(True, True, True, True), ... position='after_conv3') ... ] >>> self = ResNet(depth=18) >>> stage_plugins = self.make_stage_plugins(plugins, 0) >>> assert len(stage_plugins) == 3 Suppose ``stage_idx=0``, the structure of blocks in the stage would be: .. code-block:: none conv1-> conv2->conv3->yyy->zzz1->zzz2 Suppose 'stage_idx=1', the structure of blocks in the stage would be: .. code-block:: none conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2 If stages is missing, the plugin would be applied to all stages. Args: plugins (list[dict]): List of plugins cfg to build. The postfix is required if multiple same type plugins are inserted. stage_idx (int): Index of stage to build Returns: list[dict]: Plugins for current stage """ stage_plugins = [] for plugin in plugins: plugin = plugin.copy() stages = plugin.pop('stages', None) assert stages is None or len(stages) == self.num_stages # whether to insert plugin into current stage if stages is None or stages[stage_idx]: stage_plugins.append(plugin) return stage_plugins def make_res_layer(self, **kwargs): """Pack all blocks in a stage into a ``ResLayer``.""" return ResLayer(**kwargs) @property def norm1(self): """nn.Module: the normalization layer named "norm1" """ return getattr(self, self.norm1_name) def _make_stem_layer(self, in_channels, stem_channels): if self.deep_stem: self.stem = nn.Sequential( build_conv_layer( self.conv_cfg, in_channels, stem_channels // 2, kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, stem_channels // 2)[1], nn.ReLU(inplace=True), build_conv_layer( self.conv_cfg, stem_channels // 2, stem_channels // 2, kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, stem_channels // 2)[1], nn.ReLU(inplace=True), build_conv_layer( self.conv_cfg, stem_channels // 2, stem_channels, kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, stem_channels)[1], nn.ReLU(inplace=True)) else: self.conv1 = build_conv_layer( self.conv_cfg, in_channels, stem_channels, kernel_size=7, stride=2, padding=3, bias=False) self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, stem_channels, postfix=1) self.add_module(self.norm1_name, norm1) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) def _freeze_stages(self): if self.frozen_stages >= 0: if self.deep_stem: self.stem.eval() for param in self.stem.parameters(): param.requires_grad = False else: self.norm1.eval() for m in [self.conv1, self.norm1]: for param in m.parameters(): param.requires_grad = False for i in range(1, self.frozen_stages + 1): m = getattr(self, f'layer{i}') m.eval() for param in m.parameters(): param.requires_grad = False def forward(self, x): """Forward function.""" if self.deep_stem: x = self.stem(x) else: x = self.conv1(x) x = self.norm1(x) x = self.relu(x) x = self.maxpool(x) outs = [] for i, layer_name in enumerate(self.res_layers): res_layer = getattr(self, layer_name) x = res_layer(x) if i in self.out_indices: outs.append(x) return tuple(outs) def train(self, mode=True): """Convert the model into training mode while keep normalization layer freezed.""" super(ResNet, self).train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): # trick: eval have effect on BatchNorm only if isinstance(m, _BatchNorm): m.eval() @MODELS.register_module() class ResNetV1d(ResNet): r"""ResNetV1d variant described in `Bag of Tricks <https://arxiv.org/pdf/1812.01187.pdf>`_. Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in the input stem with three 3x3 convs. And in the downsampling block, a 2x2 avg_pool with stride 2 is added before conv, whose stride is changed to 1. """ def __init__(self, **kwargs): super(ResNetV1d, self).__init__( deep_stem=True, avg_down=True, **kwargs)
23,840
34.424963
79
py
ERD
ERD-main/mmdet/models/backbones/detectors_resnet.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn import torch.utils.checkpoint as cp from mmcv.cnn import build_conv_layer, build_norm_layer from mmengine.logging import MMLogger from mmengine.model import Sequential, constant_init, kaiming_init from mmengine.runner.checkpoint import load_checkpoint from torch.nn.modules.batchnorm import _BatchNorm from mmdet.registry import MODELS from .resnet import BasicBlock from .resnet import Bottleneck as _Bottleneck from .resnet import ResNet class Bottleneck(_Bottleneck): r"""Bottleneck for the ResNet backbone in `DetectoRS <https://arxiv.org/pdf/2006.02334.pdf>`_. This bottleneck allows the users to specify whether to use SAC (Switchable Atrous Convolution) and RFP (Recursive Feature Pyramid). Args: inplanes (int): The number of input channels. planes (int): The number of output channels before expansion. rfp_inplanes (int, optional): The number of channels from RFP. Default: None. If specified, an additional conv layer will be added for ``rfp_feat``. Otherwise, the structure is the same as base class. sac (dict, optional): Dictionary to construct SAC. Default: None. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ expansion = 4 def __init__(self, inplanes, planes, rfp_inplanes=None, sac=None, init_cfg=None, **kwargs): super(Bottleneck, self).__init__( inplanes, planes, init_cfg=init_cfg, **kwargs) assert sac is None or isinstance(sac, dict) self.sac = sac self.with_sac = sac is not None if self.with_sac: self.conv2 = build_conv_layer( self.sac, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False) self.rfp_inplanes = rfp_inplanes if self.rfp_inplanes: self.rfp_conv = build_conv_layer( None, self.rfp_inplanes, planes * self.expansion, 1, stride=1, bias=True) if init_cfg is None: self.init_cfg = dict( type='Constant', val=0, override=dict(name='rfp_conv')) def rfp_forward(self, x, rfp_feat): """The forward function that also takes the RFP features as input.""" def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv1_plugin_names) out = self.conv2(out) out = self.norm2(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv2_plugin_names) out = self.conv3(out) out = self.norm3(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv3_plugin_names) if self.downsample is not None: identity = self.downsample(x) out += identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) if self.rfp_inplanes: rfp_feat = self.rfp_conv(rfp_feat) out = out + rfp_feat out = self.relu(out) return out class ResLayer(Sequential): """ResLayer to build ResNet style backbone for RPF in detectoRS. The difference between this module and base class is that we pass ``rfp_inplanes`` to the first block. Args: block (nn.Module): block used to build ResLayer. inplanes (int): inplanes of block. planes (int): planes of block. num_blocks (int): number of blocks. stride (int): stride of the first block. Default: 1 avg_down (bool): Use AvgPool instead of stride conv when downsampling in the bottleneck. Default: False conv_cfg (dict): dictionary to construct and config conv layer. Default: None norm_cfg (dict): dictionary to construct and config norm layer. Default: dict(type='BN') downsample_first (bool): Downsample at the first block or last block. False for Hourglass, True for ResNet. Default: True rfp_inplanes (int, optional): The number of channels from RFP. Default: None. If specified, an additional conv layer will be added for ``rfp_feat``. Otherwise, the structure is the same as base class. """ def __init__(self, block, inplanes, planes, num_blocks, stride=1, avg_down=False, conv_cfg=None, norm_cfg=dict(type='BN'), downsample_first=True, rfp_inplanes=None, **kwargs): self.block = block assert downsample_first, f'downsample_first={downsample_first} is ' \ 'not supported in DetectoRS' downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = [] conv_stride = stride if avg_down and stride != 1: conv_stride = 1 downsample.append( nn.AvgPool2d( kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False)) downsample.extend([ build_conv_layer( conv_cfg, inplanes, planes * block.expansion, kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, planes * block.expansion)[1] ]) downsample = nn.Sequential(*downsample) layers = [] layers.append( block( inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, rfp_inplanes=rfp_inplanes, **kwargs)) inplanes = planes * block.expansion for _ in range(1, num_blocks): layers.append( block( inplanes=inplanes, planes=planes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) super(ResLayer, self).__init__(*layers) @MODELS.register_module() class DetectoRS_ResNet(ResNet): """ResNet backbone for DetectoRS. Args: sac (dict, optional): Dictionary to construct SAC (Switchable Atrous Convolution). Default: None. stage_with_sac (list): Which stage to use sac. Default: (False, False, False, False). rfp_inplanes (int, optional): The number of channels from RFP. Default: None. If specified, an additional conv layer will be added for ``rfp_feat``. Otherwise, the structure is the same as base class. output_img (bool): If ``True``, the input image will be inserted into the starting position of output. Default: False. """ arch_settings = { 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)) } def __init__(self, sac=None, stage_with_sac=(False, False, False, False), rfp_inplanes=None, output_img=False, pretrained=None, init_cfg=None, **kwargs): assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' self.pretrained = pretrained if init_cfg is not None: assert isinstance(init_cfg, dict), \ f'init_cfg must be a dict, but got {type(init_cfg)}' if 'type' in init_cfg: assert init_cfg.get('type') == 'Pretrained', \ 'Only can initialize module by loading a pretrained model' else: raise KeyError('`init_cfg` must contain the key "type"') self.pretrained = init_cfg.get('checkpoint') self.sac = sac self.stage_with_sac = stage_with_sac self.rfp_inplanes = rfp_inplanes self.output_img = output_img super(DetectoRS_ResNet, self).__init__(**kwargs) self.inplanes = self.stem_channels self.res_layers = [] for i, num_blocks in enumerate(self.stage_blocks): stride = self.strides[i] dilation = self.dilations[i] dcn = self.dcn if self.stage_with_dcn[i] else None sac = self.sac if self.stage_with_sac[i] else None if self.plugins is not None: stage_plugins = self.make_stage_plugins(self.plugins, i) else: stage_plugins = None planes = self.base_channels * 2**i res_layer = self.make_res_layer( block=self.block, inplanes=self.inplanes, planes=planes, num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, sac=sac, rfp_inplanes=rfp_inplanes if i > 0 else None, plugins=stage_plugins) self.inplanes = planes * self.block.expansion layer_name = f'layer{i + 1}' self.add_module(layer_name, res_layer) self.res_layers.append(layer_name) self._freeze_stages() # In order to be properly initialized by RFP def init_weights(self): # Calling this method will cause parameter initialization exception # super(DetectoRS_ResNet, self).init_weights() if isinstance(self.pretrained, str): logger = MMLogger.get_current_instance() load_checkpoint(self, self.pretrained, strict=False, logger=logger) elif self.pretrained is None: for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, (_BatchNorm, nn.GroupNorm)): constant_init(m, 1) if self.dcn is not None: for m in self.modules(): if isinstance(m, Bottleneck) and hasattr( m.conv2, 'conv_offset'): constant_init(m.conv2.conv_offset, 0) if self.zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): constant_init(m.norm3, 0) elif isinstance(m, BasicBlock): constant_init(m.norm2, 0) else: raise TypeError('pretrained must be a str or None') def make_res_layer(self, **kwargs): """Pack all blocks in a stage into a ``ResLayer`` for DetectoRS.""" return ResLayer(**kwargs) def forward(self, x): """Forward function.""" outs = list(super(DetectoRS_ResNet, self).forward(x)) if self.output_img: outs.insert(0, x) return tuple(outs) def rfp_forward(self, x, rfp_feats): """Forward function for RFP.""" if self.deep_stem: x = self.stem(x) else: x = self.conv1(x) x = self.norm1(x) x = self.relu(x) x = self.maxpool(x) outs = [] for i, layer_name in enumerate(self.res_layers): res_layer = getattr(self, layer_name) rfp_feat = rfp_feats[i] if i > 0 else None for layer in res_layer: x = layer.rfp_forward(x, rfp_feat) if i in self.out_indices: outs.append(x) return tuple(outs)
12,764
35.059322
79
py
ERD
ERD-main/mmdet/models/backbones/ssd_vgg.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch.nn as nn from mmcv.cnn import VGG from mmengine.model import BaseModule from mmdet.registry import MODELS from ..necks import ssd_neck @MODELS.register_module() class SSDVGG(VGG, BaseModule): """VGG Backbone network for single-shot-detection. Args: depth (int): Depth of vgg, from {11, 13, 16, 19}. with_last_pool (bool): Whether to add a pooling layer at the last of the model ceil_mode (bool): When True, will use `ceil` instead of `floor` to compute the output shape. out_indices (Sequence[int]): Output from which stages. out_feature_indices (Sequence[int]): Output from which feature map. pretrained (str, optional): model pretrained path. Default: None init_cfg (dict or list[dict], optional): Initialization config dict. Default: None input_size (int, optional): Deprecated argumment. Width and height of input, from {300, 512}. l2_norm_scale (float, optional) : Deprecated argumment. L2 normalization layer init scale. Example: >>> self = SSDVGG(input_size=300, depth=11) >>> self.eval() >>> inputs = torch.rand(1, 3, 300, 300) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) (1, 1024, 19, 19) (1, 512, 10, 10) (1, 256, 5, 5) (1, 256, 3, 3) (1, 256, 1, 1) """ extra_setting = { 300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256), 512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128), } def __init__(self, depth, with_last_pool=False, ceil_mode=True, out_indices=(3, 4), out_feature_indices=(22, 34), pretrained=None, init_cfg=None, input_size=None, l2_norm_scale=None): # TODO: in_channels for mmcv.VGG super(SSDVGG, self).__init__( depth, with_last_pool=with_last_pool, ceil_mode=ceil_mode, out_indices=out_indices) self.features.add_module( str(len(self.features)), nn.MaxPool2d(kernel_size=3, stride=1, padding=1)) self.features.add_module( str(len(self.features)), nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)) self.features.add_module( str(len(self.features)), nn.ReLU(inplace=True)) self.features.add_module( str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1)) self.features.add_module( str(len(self.features)), nn.ReLU(inplace=True)) self.out_feature_indices = out_feature_indices assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if init_cfg is not None: self.init_cfg = init_cfg elif isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: self.init_cfg = [ dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer='BatchNorm2d'), dict(type='Normal', std=0.01, layer='Linear'), ] else: raise TypeError('pretrained must be a str or None') if input_size is not None: warnings.warn('DeprecationWarning: input_size is deprecated') if l2_norm_scale is not None: warnings.warn('DeprecationWarning: l2_norm_scale in VGG is ' 'deprecated, it has been moved to SSDNeck.') def init_weights(self, pretrained=None): super(VGG, self).init_weights() def forward(self, x): """Forward function.""" outs = [] for i, layer in enumerate(self.features): x = layer(x) if i in self.out_feature_indices: outs.append(x) if len(outs) == 1: return outs[0] else: return tuple(outs) class L2Norm(ssd_neck.L2Norm): def __init__(self, **kwargs): super(L2Norm, self).__init__(**kwargs) warnings.warn('DeprecationWarning: L2Norm in ssd_vgg.py ' 'is deprecated, please use L2Norm in ' 'mmdet/models/necks/ssd_neck.py instead')
4,707
35.496124
79
py
ERD
ERD-main/mmdet/models/backbones/resnext.py
# Copyright (c) OpenMMLab. All rights reserved. import math from mmcv.cnn import build_conv_layer, build_norm_layer from mmdet.registry import MODELS from ..layers import ResLayer from .resnet import Bottleneck as _Bottleneck from .resnet import ResNet class Bottleneck(_Bottleneck): expansion = 4 def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, **kwargs): """Bottleneck block for ResNeXt. If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is "caffe", the stride-two layer is the first 1x1 conv layer. """ super(Bottleneck, self).__init__(inplanes, planes, **kwargs) if groups == 1: width = self.planes else: width = math.floor(self.planes * (base_width / base_channels)) * groups self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, width, postfix=1) self.norm2_name, norm2 = build_norm_layer( self.norm_cfg, width, postfix=2) self.norm3_name, norm3 = build_norm_layer( self.norm_cfg, self.planes * self.expansion, postfix=3) self.conv1 = build_conv_layer( self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) fallback_on_stride = False self.with_modulated_dcn = False if self.with_dcn: fallback_on_stride = self.dcn.pop('fallback_on_stride', False) if not self.with_dcn or fallback_on_stride: self.conv2 = build_conv_layer( self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) else: assert self.conv_cfg is None, 'conv_cfg must be None for DCN' self.conv2 = build_conv_layer( self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) self.add_module(self.norm2_name, norm2) self.conv3 = build_conv_layer( self.conv_cfg, width, self.planes * self.expansion, kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) if self.with_plugins: self._del_block_plugins(self.after_conv1_plugin_names + self.after_conv2_plugin_names + self.after_conv3_plugin_names) self.after_conv1_plugin_names = self.make_block_plugins( width, self.after_conv1_plugins) self.after_conv2_plugin_names = self.make_block_plugins( width, self.after_conv2_plugins) self.after_conv3_plugin_names = self.make_block_plugins( self.planes * self.expansion, self.after_conv3_plugins) def _del_block_plugins(self, plugin_names): """delete plugins for block if exist. Args: plugin_names (list[str]): List of plugins name to delete. """ assert isinstance(plugin_names, list) for plugin_name in plugin_names: del self._modules[plugin_name] @MODELS.register_module() class ResNeXt(ResNet): """ResNeXt backbone. Args: depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. in_channels (int): Number of input image channels. Default: 3. num_stages (int): Resnet stages. Default: 4. groups (int): Group of resnext. base_width (int): Base width of resnext. strides (Sequence[int]): Strides of the first block of each stage. dilations (Sequence[int]): Dilation of each stage. out_indices (Sequence[int]): Output from which stages. style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is the first 1x1 conv layer. frozen_stages (int): Stages to be frozen (all param fixed). -1 means not freezing any parameters. norm_cfg (dict): dictionary to construct and config norm layer. norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. zero_init_residual (bool): whether to use zero init for last norm layer in resblocks to let them behave as identity. """ arch_settings = { 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)) } def __init__(self, groups=1, base_width=4, **kwargs): self.groups = groups self.base_width = base_width super(ResNeXt, self).__init__(**kwargs) def make_res_layer(self, **kwargs): """Pack all blocks in a stage into a ``ResLayer``""" return ResLayer( groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, **kwargs)
5,712
35.858065
79
py
ERD
ERD-main/mmdet/models/backbones/cspnext.py
# Copyright (c) OpenMMLab. All rights reserved. import math from typing import Sequence, Tuple import torch.nn as nn from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule from mmengine.model import BaseModule from torch import Tensor from torch.nn.modules.batchnorm import _BatchNorm from mmdet.registry import MODELS from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig from ..layers import CSPLayer from .csp_darknet import SPPBottleneck @MODELS.register_module() class CSPNeXt(BaseModule): """CSPNeXt backbone used in RTMDet. Args: arch (str): Architecture of CSPNeXt, from {P5, P6}. Defaults to P5. expand_ratio (float): Ratio to adjust the number of channels of the hidden layer. Defaults to 0.5. deepen_factor (float): Depth multiplier, multiply number of blocks in CSP layer by this amount. Defaults to 1.0. widen_factor (float): Width multiplier, multiply number of channels in each layer by this amount. Defaults to 1.0. out_indices (Sequence[int]): Output from which stages. Defaults to (2, 3, 4). frozen_stages (int): Stages to be frozen (stop grad and set eval mode). -1 means not freezing any parameters. Defaults to -1. use_depthwise (bool): Whether to use depthwise separable convolution. Defaults to False. arch_ovewrite (list): Overwrite default arch settings. Defaults to None. spp_kernel_sizes: (tuple[int]): Sequential of kernel sizes of SPP layers. Defaults to (5, 9, 13). channel_attention (bool): Whether to add channel attention in each stage. Defaults to True. conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for convolution layer. Defaults to None. norm_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and config norm layer. Defaults to dict(type='BN', requires_grad=True). act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer. Defaults to dict(type='SiLU'). norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. init_cfg (:obj:`ConfigDict` or dict or list[dict] or list[:obj:`ConfigDict`]): Initialization config dict. """ # From left to right: # in_channels, out_channels, num_blocks, add_identity, use_spp arch_settings = { 'P5': [[64, 128, 3, True, False], [128, 256, 6, True, False], [256, 512, 6, True, False], [512, 1024, 3, False, True]], 'P6': [[64, 128, 3, True, False], [128, 256, 6, True, False], [256, 512, 6, True, False], [512, 768, 3, True, False], [768, 1024, 3, False, True]] } def __init__( self, arch: str = 'P5', deepen_factor: float = 1.0, widen_factor: float = 1.0, out_indices: Sequence[int] = (2, 3, 4), frozen_stages: int = -1, use_depthwise: bool = False, expand_ratio: float = 0.5, arch_ovewrite: dict = None, spp_kernel_sizes: Sequence[int] = (5, 9, 13), channel_attention: bool = True, conv_cfg: OptConfigType = None, norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001), act_cfg: ConfigType = dict(type='SiLU'), norm_eval: bool = False, init_cfg: OptMultiConfig = dict( type='Kaiming', layer='Conv2d', a=math.sqrt(5), distribution='uniform', mode='fan_in', nonlinearity='leaky_relu') ) -> None: super().__init__(init_cfg=init_cfg) arch_setting = self.arch_settings[arch] if arch_ovewrite: arch_setting = arch_ovewrite assert set(out_indices).issubset( i for i in range(len(arch_setting) + 1)) if frozen_stages not in range(-1, len(arch_setting) + 1): raise ValueError('frozen_stages must be in range(-1, ' 'len(arch_setting) + 1). But received ' f'{frozen_stages}') self.out_indices = out_indices self.frozen_stages = frozen_stages self.use_depthwise = use_depthwise self.norm_eval = norm_eval conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule self.stem = nn.Sequential( ConvModule( 3, int(arch_setting[0][0] * widen_factor // 2), 3, padding=1, stride=2, norm_cfg=norm_cfg, act_cfg=act_cfg), ConvModule( int(arch_setting[0][0] * widen_factor // 2), int(arch_setting[0][0] * widen_factor // 2), 3, padding=1, stride=1, norm_cfg=norm_cfg, act_cfg=act_cfg), ConvModule( int(arch_setting[0][0] * widen_factor // 2), int(arch_setting[0][0] * widen_factor), 3, padding=1, stride=1, norm_cfg=norm_cfg, act_cfg=act_cfg)) self.layers = ['stem'] for i, (in_channels, out_channels, num_blocks, add_identity, use_spp) in enumerate(arch_setting): in_channels = int(in_channels * widen_factor) out_channels = int(out_channels * widen_factor) num_blocks = max(round(num_blocks * deepen_factor), 1) stage = [] conv_layer = conv( in_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) stage.append(conv_layer) if use_spp: spp = SPPBottleneck( out_channels, out_channels, kernel_sizes=spp_kernel_sizes, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) stage.append(spp) csp_layer = CSPLayer( out_channels, out_channels, num_blocks=num_blocks, add_identity=add_identity, use_depthwise=use_depthwise, use_cspnext_block=True, expand_ratio=expand_ratio, channel_attention=channel_attention, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) stage.append(csp_layer) self.add_module(f'stage{i + 1}', nn.Sequential(*stage)) self.layers.append(f'stage{i + 1}') def _freeze_stages(self) -> None: if self.frozen_stages >= 0: for i in range(self.frozen_stages + 1): m = getattr(self, self.layers[i]) m.eval() for param in m.parameters(): param.requires_grad = False def train(self, mode=True) -> None: super().train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): if isinstance(m, _BatchNorm): m.eval() def forward(self, x: Tuple[Tensor, ...]) -> Tuple[Tensor, ...]: outs = [] for i, layer_name in enumerate(self.layers): layer = getattr(self, layer_name) x = layer(x) if i in self.out_indices: outs.append(x) return tuple(outs)
7,784
38.719388
79
py
ERD
ERD-main/mmdet/models/backbones/resnest.py
# Copyright (c) OpenMMLab. All rights reserved. import math import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as cp from mmcv.cnn import build_conv_layer, build_norm_layer from mmengine.model import BaseModule from mmdet.registry import MODELS from ..layers import ResLayer from .resnet import Bottleneck as _Bottleneck from .resnet import ResNetV1d class RSoftmax(nn.Module): """Radix Softmax module in ``SplitAttentionConv2d``. Args: radix (int): Radix of input. groups (int): Groups of input. """ def __init__(self, radix, groups): super().__init__() self.radix = radix self.groups = groups def forward(self, x): batch = x.size(0) if self.radix > 1: x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2) x = F.softmax(x, dim=1) x = x.reshape(batch, -1) else: x = torch.sigmoid(x) return x class SplitAttentionConv2d(BaseModule): """Split-Attention Conv2d in ResNeSt. Args: in_channels (int): Number of channels in the input feature map. channels (int): Number of intermediate channels. kernel_size (int | tuple[int]): Size of the convolution kernel. stride (int | tuple[int]): Stride of the convolution. padding (int | tuple[int]): Zero-padding added to both sides of dilation (int | tuple[int]): Spacing between kernel elements. groups (int): Number of blocked connections from input channels to output channels. groups (int): Same as nn.Conv2d. radix (int): Radix of SpltAtConv2d. Default: 2 reduction_factor (int): Reduction factor of inter_channels. Default: 4. conv_cfg (dict): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: None. dcn (dict): Config dict for DCN. Default: None. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, in_channels, channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, radix=2, reduction_factor=4, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, init_cfg=None): super(SplitAttentionConv2d, self).__init__(init_cfg) inter_channels = max(in_channels * radix // reduction_factor, 32) self.radix = radix self.groups = groups self.channels = channels self.with_dcn = dcn is not None self.dcn = dcn fallback_on_stride = False if self.with_dcn: fallback_on_stride = self.dcn.pop('fallback_on_stride', False) if self.with_dcn and not fallback_on_stride: assert conv_cfg is None, 'conv_cfg must be None for DCN' conv_cfg = dcn self.conv = build_conv_layer( conv_cfg, in_channels, channels * radix, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups * radix, bias=False) # To be consistent with original implementation, starting from 0 self.norm0_name, norm0 = build_norm_layer( norm_cfg, channels * radix, postfix=0) self.add_module(self.norm0_name, norm0) self.relu = nn.ReLU(inplace=True) self.fc1 = build_conv_layer( None, channels, inter_channels, 1, groups=self.groups) self.norm1_name, norm1 = build_norm_layer( norm_cfg, inter_channels, postfix=1) self.add_module(self.norm1_name, norm1) self.fc2 = build_conv_layer( None, inter_channels, channels * radix, 1, groups=self.groups) self.rsoftmax = RSoftmax(radix, groups) @property def norm0(self): """nn.Module: the normalization layer named "norm0" """ return getattr(self, self.norm0_name) @property def norm1(self): """nn.Module: the normalization layer named "norm1" """ return getattr(self, self.norm1_name) def forward(self, x): x = self.conv(x) x = self.norm0(x) x = self.relu(x) batch, rchannel = x.shape[:2] batch = x.size(0) if self.radix > 1: splits = x.view(batch, self.radix, -1, *x.shape[2:]) gap = splits.sum(dim=1) else: gap = x gap = F.adaptive_avg_pool2d(gap, 1) gap = self.fc1(gap) gap = self.norm1(gap) gap = self.relu(gap) atten = self.fc2(gap) atten = self.rsoftmax(atten).view(batch, -1, 1, 1) if self.radix > 1: attens = atten.view(batch, self.radix, -1, *atten.shape[2:]) out = torch.sum(attens * splits, dim=1) else: out = atten * x return out.contiguous() class Bottleneck(_Bottleneck): """Bottleneck block for ResNeSt. Args: inplane (int): Input planes of this block. planes (int): Middle planes of this block. groups (int): Groups of conv2. base_width (int): Base of width in terms of base channels. Default: 4. base_channels (int): Base of channels for calculating width. Default: 64. radix (int): Radix of SpltAtConv2d. Default: 2 reduction_factor (int): Reduction factor of inter_channels in SplitAttentionConv2d. Default: 4. avg_down_stride (bool): Whether to use average pool for stride in Bottleneck. Default: True. kwargs (dict): Key word arguments for base class. """ expansion = 4 def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, radix=2, reduction_factor=4, avg_down_stride=True, **kwargs): """Bottleneck block for ResNeSt.""" super(Bottleneck, self).__init__(inplanes, planes, **kwargs) if groups == 1: width = self.planes else: width = math.floor(self.planes * (base_width / base_channels)) * groups self.avg_down_stride = avg_down_stride and self.conv2_stride > 1 self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, width, postfix=1) self.norm3_name, norm3 = build_norm_layer( self.norm_cfg, self.planes * self.expansion, postfix=3) self.conv1 = build_conv_layer( self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) self.with_modulated_dcn = False self.conv2 = SplitAttentionConv2d( width, width, kernel_size=3, stride=1 if self.avg_down_stride else self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, radix=radix, reduction_factor=reduction_factor, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=self.dcn) delattr(self, self.norm2_name) if self.avg_down_stride: self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1) self.conv3 = build_conv_layer( self.conv_cfg, width, self.planes * self.expansion, kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) def forward(self, x): def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv1_plugin_names) out = self.conv2(out) if self.avg_down_stride: out = self.avd_layer(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv2_plugin_names) out = self.conv3(out) out = self.norm3(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv3_plugin_names) if self.downsample is not None: identity = self.downsample(x) out += identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out @MODELS.register_module() class ResNeSt(ResNetV1d): """ResNeSt backbone. Args: groups (int): Number of groups of Bottleneck. Default: 1 base_width (int): Base width of Bottleneck. Default: 4 radix (int): Radix of SplitAttentionConv2d. Default: 2 reduction_factor (int): Reduction factor of inter_channels in SplitAttentionConv2d. Default: 4. avg_down_stride (bool): Whether to use average pool for stride in Bottleneck. Default: True. kwargs (dict): Keyword arguments for ResNet. """ arch_settings = { 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)), 200: (Bottleneck, (3, 24, 36, 3)) } def __init__(self, groups=1, base_width=4, radix=2, reduction_factor=4, avg_down_stride=True, **kwargs): self.groups = groups self.base_width = base_width self.radix = radix self.reduction_factor = reduction_factor self.avg_down_stride = avg_down_stride super(ResNeSt, self).__init__(**kwargs) def make_res_layer(self, **kwargs): """Pack all blocks in a stage into a ``ResLayer``.""" return ResLayer( groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, radix=self.radix, reduction_factor=self.reduction_factor, avg_down_stride=self.avg_down_stride, **kwargs)
10,582
31.764706
79
py
ERD
ERD-main/mmdet/models/backbones/csp_darknet.py
# Copyright (c) OpenMMLab. All rights reserved. import math import torch import torch.nn as nn from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule from mmengine.model import BaseModule from torch.nn.modules.batchnorm import _BatchNorm from mmdet.registry import MODELS from ..layers import CSPLayer class Focus(nn.Module): """Focus width and height information into channel space. Args: in_channels (int): The input channels of this Module. out_channels (int): The output channels of this Module. kernel_size (int): The kernel size of the convolution. Default: 1 stride (int): The stride of the convolution. Default: 1 conv_cfg (dict): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN', momentum=0.03, eps=0.001). act_cfg (dict): Config dict for activation layer. Default: dict(type='Swish'). """ def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish')): super().__init__() self.conv = ConvModule( in_channels * 4, out_channels, kernel_size, stride, padding=(kernel_size - 1) // 2, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) def forward(self, x): # shape of x (b,c,w,h) -> y(b,4c,w/2,h/2) patch_top_left = x[..., ::2, ::2] patch_top_right = x[..., ::2, 1::2] patch_bot_left = x[..., 1::2, ::2] patch_bot_right = x[..., 1::2, 1::2] x = torch.cat( ( patch_top_left, patch_bot_left, patch_top_right, patch_bot_right, ), dim=1, ) return self.conv(x) class SPPBottleneck(BaseModule): """Spatial pyramid pooling layer used in YOLOv3-SPP. Args: in_channels (int): The input channels of this Module. out_channels (int): The output channels of this Module. kernel_sizes (tuple[int]): Sequential of kernel sizes of pooling layers. Default: (5, 9, 13). conv_cfg (dict): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN'). act_cfg (dict): Config dict for activation layer. Default: dict(type='Swish'). init_cfg (dict or list[dict], optional): Initialization config dict. Default: None. """ def __init__(self, in_channels, out_channels, kernel_sizes=(5, 9, 13), conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish'), init_cfg=None): super().__init__(init_cfg) mid_channels = in_channels // 2 self.conv1 = ConvModule( in_channels, mid_channels, 1, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.poolings = nn.ModuleList([ nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2) for ks in kernel_sizes ]) conv2_channels = mid_channels * (len(kernel_sizes) + 1) self.conv2 = ConvModule( conv2_channels, out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) def forward(self, x): x = self.conv1(x) with torch.cuda.amp.autocast(enabled=False): x = torch.cat( [x] + [pooling(x) for pooling in self.poolings], dim=1) x = self.conv2(x) return x @MODELS.register_module() class CSPDarknet(BaseModule): """CSP-Darknet backbone used in YOLOv5 and YOLOX. Args: arch (str): Architecture of CSP-Darknet, from {P5, P6}. Default: P5. deepen_factor (float): Depth multiplier, multiply number of blocks in CSP layer by this amount. Default: 1.0. widen_factor (float): Width multiplier, multiply number of channels in each layer by this amount. Default: 1.0. out_indices (Sequence[int]): Output from which stages. Default: (2, 3, 4). frozen_stages (int): Stages to be frozen (stop grad and set eval mode). -1 means not freezing any parameters. Default: -1. use_depthwise (bool): Whether to use depthwise separable convolution. Default: False. arch_ovewrite(list): Overwrite default arch settings. Default: None. spp_kernal_sizes: (tuple[int]): Sequential of kernel sizes of SPP layers. Default: (5, 9, 13). conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True). act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None. Example: >>> from mmdet.models import CSPDarknet >>> import torch >>> self = CSPDarknet(depth=53) >>> self.eval() >>> inputs = torch.rand(1, 3, 416, 416) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) ... (1, 256, 52, 52) (1, 512, 26, 26) (1, 1024, 13, 13) """ # From left to right: # in_channels, out_channels, num_blocks, add_identity, use_spp arch_settings = { 'P5': [[64, 128, 3, True, False], [128, 256, 9, True, False], [256, 512, 9, True, False], [512, 1024, 3, False, True]], 'P6': [[64, 128, 3, True, False], [128, 256, 9, True, False], [256, 512, 9, True, False], [512, 768, 3, True, False], [768, 1024, 3, False, True]] } def __init__(self, arch='P5', deepen_factor=1.0, widen_factor=1.0, out_indices=(2, 3, 4), frozen_stages=-1, use_depthwise=False, arch_ovewrite=None, spp_kernal_sizes=(5, 9, 13), conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish'), norm_eval=False, init_cfg=dict( type='Kaiming', layer='Conv2d', a=math.sqrt(5), distribution='uniform', mode='fan_in', nonlinearity='leaky_relu')): super().__init__(init_cfg) arch_setting = self.arch_settings[arch] if arch_ovewrite: arch_setting = arch_ovewrite assert set(out_indices).issubset( i for i in range(len(arch_setting) + 1)) if frozen_stages not in range(-1, len(arch_setting) + 1): raise ValueError('frozen_stages must be in range(-1, ' 'len(arch_setting) + 1). But received ' f'{frozen_stages}') self.out_indices = out_indices self.frozen_stages = frozen_stages self.use_depthwise = use_depthwise self.norm_eval = norm_eval conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule self.stem = Focus( 3, int(arch_setting[0][0] * widen_factor), kernel_size=3, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.layers = ['stem'] for i, (in_channels, out_channels, num_blocks, add_identity, use_spp) in enumerate(arch_setting): in_channels = int(in_channels * widen_factor) out_channels = int(out_channels * widen_factor) num_blocks = max(round(num_blocks * deepen_factor), 1) stage = [] conv_layer = conv( in_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) stage.append(conv_layer) if use_spp: spp = SPPBottleneck( out_channels, out_channels, kernel_sizes=spp_kernal_sizes, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) stage.append(spp) csp_layer = CSPLayer( out_channels, out_channels, num_blocks=num_blocks, add_identity=add_identity, use_depthwise=use_depthwise, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) stage.append(csp_layer) self.add_module(f'stage{i + 1}', nn.Sequential(*stage)) self.layers.append(f'stage{i + 1}') def _freeze_stages(self): if self.frozen_stages >= 0: for i in range(self.frozen_stages + 1): m = getattr(self, self.layers[i]) m.eval() for param in m.parameters(): param.requires_grad = False def train(self, mode=True): super(CSPDarknet, self).train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): if isinstance(m, _BatchNorm): m.eval() def forward(self, x): outs = [] for i, layer_name in enumerate(self.layers): layer = getattr(self, layer_name) x = layer(x) if i in self.out_indices: outs.append(x) return tuple(outs)
10,620
36.006969
77
py
ERD
ERD-main/mmdet/models/backbones/hourglass.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Sequence import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmengine.model import BaseModule from mmdet.registry import MODELS from mmdet.utils import ConfigType, OptMultiConfig from ..layers import ResLayer from .resnet import BasicBlock class HourglassModule(BaseModule): """Hourglass Module for HourglassNet backbone. Generate module recursively and use BasicBlock as the base unit. Args: depth (int): Depth of current HourglassModule. stage_channels (list[int]): Feature channels of sub-modules in current and follow-up HourglassModule. stage_blocks (list[int]): Number of sub-modules stacked in current and follow-up HourglassModule. norm_cfg (ConfigType): Dictionary to construct and config norm layer. Defaults to `dict(type='BN', requires_grad=True)` upsample_cfg (ConfigType): Config dict for interpolate layer. Defaults to `dict(mode='nearest')` init_cfg (dict or ConfigDict, optional): the config to control the initialization. """ def __init__(self, depth: int, stage_channels: List[int], stage_blocks: List[int], norm_cfg: ConfigType = dict(type='BN', requires_grad=True), upsample_cfg: ConfigType = dict(mode='nearest'), init_cfg: OptMultiConfig = None) -> None: super().__init__(init_cfg) self.depth = depth cur_block = stage_blocks[0] next_block = stage_blocks[1] cur_channel = stage_channels[0] next_channel = stage_channels[1] self.up1 = ResLayer( BasicBlock, cur_channel, cur_channel, cur_block, norm_cfg=norm_cfg) self.low1 = ResLayer( BasicBlock, cur_channel, next_channel, cur_block, stride=2, norm_cfg=norm_cfg) if self.depth > 1: self.low2 = HourglassModule(depth - 1, stage_channels[1:], stage_blocks[1:]) else: self.low2 = ResLayer( BasicBlock, next_channel, next_channel, next_block, norm_cfg=norm_cfg) self.low3 = ResLayer( BasicBlock, next_channel, cur_channel, cur_block, norm_cfg=norm_cfg, downsample_first=False) self.up2 = F.interpolate self.upsample_cfg = upsample_cfg def forward(self, x: torch.Tensor) -> nn.Module: """Forward function.""" up1 = self.up1(x) low1 = self.low1(x) low2 = self.low2(low1) low3 = self.low3(low2) # Fixing `scale factor` (e.g. 2) is common for upsampling, but # in some cases the spatial size is mismatched and error will arise. if 'scale_factor' in self.upsample_cfg: up2 = self.up2(low3, **self.upsample_cfg) else: shape = up1.shape[2:] up2 = self.up2(low3, size=shape, **self.upsample_cfg) return up1 + up2 @MODELS.register_module() class HourglassNet(BaseModule): """HourglassNet backbone. Stacked Hourglass Networks for Human Pose Estimation. More details can be found in the `paper <https://arxiv.org/abs/1603.06937>`_ . Args: downsample_times (int): Downsample times in a HourglassModule. num_stacks (int): Number of HourglassModule modules stacked, 1 for Hourglass-52, 2 for Hourglass-104. stage_channels (Sequence[int]): Feature channel of each sub-module in a HourglassModule. stage_blocks (Sequence[int]): Number of sub-modules stacked in a HourglassModule. feat_channel (int): Feature channel of conv after a HourglassModule. norm_cfg (norm_cfg): Dictionary to construct and config norm layer. init_cfg (dict or ConfigDict, optional): the config to control the initialization. Example: >>> from mmdet.models import HourglassNet >>> import torch >>> self = HourglassNet() >>> self.eval() >>> inputs = torch.rand(1, 3, 511, 511) >>> level_outputs = self.forward(inputs) >>> for level_output in level_outputs: ... print(tuple(level_output.shape)) (1, 256, 128, 128) (1, 256, 128, 128) """ def __init__(self, downsample_times: int = 5, num_stacks: int = 2, stage_channels: Sequence = (256, 256, 384, 384, 384, 512), stage_blocks: Sequence = (2, 2, 2, 2, 2, 4), feat_channel: int = 256, norm_cfg: ConfigType = dict(type='BN', requires_grad=True), init_cfg: OptMultiConfig = None) -> None: assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' super().__init__(init_cfg) self.num_stacks = num_stacks assert self.num_stacks >= 1 assert len(stage_channels) == len(stage_blocks) assert len(stage_channels) > downsample_times cur_channel = stage_channels[0] self.stem = nn.Sequential( ConvModule( 3, cur_channel // 2, 7, padding=3, stride=2, norm_cfg=norm_cfg), ResLayer( BasicBlock, cur_channel // 2, cur_channel, 1, stride=2, norm_cfg=norm_cfg)) self.hourglass_modules = nn.ModuleList([ HourglassModule(downsample_times, stage_channels, stage_blocks) for _ in range(num_stacks) ]) self.inters = ResLayer( BasicBlock, cur_channel, cur_channel, num_stacks - 1, norm_cfg=norm_cfg) self.conv1x1s = nn.ModuleList([ ConvModule( cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) for _ in range(num_stacks - 1) ]) self.out_convs = nn.ModuleList([ ConvModule( cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg) for _ in range(num_stacks) ]) self.remap_convs = nn.ModuleList([ ConvModule( feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) for _ in range(num_stacks - 1) ]) self.relu = nn.ReLU(inplace=True) def init_weights(self) -> None: """Init module weights.""" # Training Centripetal Model needs to reset parameters for Conv2d super().init_weights() for m in self.modules(): if isinstance(m, nn.Conv2d): m.reset_parameters() def forward(self, x: torch.Tensor) -> List[torch.Tensor]: """Forward function.""" inter_feat = self.stem(x) out_feats = [] for ind in range(self.num_stacks): single_hourglass = self.hourglass_modules[ind] out_conv = self.out_convs[ind] hourglass_feat = single_hourglass(inter_feat) out_feat = out_conv(hourglass_feat) out_feats.append(out_feat) if ind < self.num_stacks - 1: inter_feat = self.conv1x1s[ind]( inter_feat) + self.remap_convs[ind]( out_feat) inter_feat = self.inters[ind](self.relu(inter_feat)) return out_feats
7,744
33.269912
79
py
ERD
ERD-main/mmdet/models/backbones/res2net.py
# Copyright (c) OpenMMLab. All rights reserved. import math import torch import torch.nn as nn import torch.utils.checkpoint as cp from mmcv.cnn import build_conv_layer, build_norm_layer from mmengine.model import Sequential from mmdet.registry import MODELS from .resnet import Bottleneck as _Bottleneck from .resnet import ResNet class Bottle2neck(_Bottleneck): expansion = 4 def __init__(self, inplanes, planes, scales=4, base_width=26, base_channels=64, stage_type='normal', **kwargs): """Bottle2neck block for Res2Net. If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is "caffe", the stride-two layer is the first 1x1 conv layer. """ super(Bottle2neck, self).__init__(inplanes, planes, **kwargs) assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.' width = int(math.floor(self.planes * (base_width / base_channels))) self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, width * scales, postfix=1) self.norm3_name, norm3 = build_norm_layer( self.norm_cfg, self.planes * self.expansion, postfix=3) self.conv1 = build_conv_layer( self.conv_cfg, self.inplanes, width * scales, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) if stage_type == 'stage' and self.conv2_stride != 1: self.pool = nn.AvgPool2d( kernel_size=3, stride=self.conv2_stride, padding=1) convs = [] bns = [] fallback_on_stride = False if self.with_dcn: fallback_on_stride = self.dcn.pop('fallback_on_stride', False) if not self.with_dcn or fallback_on_stride: for i in range(scales - 1): convs.append( build_conv_layer( self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False)) bns.append( build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1]) self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList(bns) else: assert self.conv_cfg is None, 'conv_cfg must be None for DCN' for i in range(scales - 1): convs.append( build_conv_layer( self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False)) bns.append( build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1]) self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList(bns) self.conv3 = build_conv_layer( self.conv_cfg, width * scales, self.planes * self.expansion, kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) self.stage_type = stage_type self.scales = scales self.width = width delattr(self, 'conv2') delattr(self, self.norm2_name) def forward(self, x): """Forward function.""" def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv1_plugin_names) spx = torch.split(out, self.width, 1) sp = self.convs[0](spx[0].contiguous()) sp = self.relu(self.bns[0](sp)) out = sp for i in range(1, self.scales - 1): if self.stage_type == 'stage': sp = spx[i] else: sp = sp + spx[i] sp = self.convs[i](sp.contiguous()) sp = self.relu(self.bns[i](sp)) out = torch.cat((out, sp), 1) if self.stage_type == 'normal' or self.conv2_stride == 1: out = torch.cat((out, spx[self.scales - 1]), 1) elif self.stage_type == 'stage': out = torch.cat((out, self.pool(spx[self.scales - 1])), 1) if self.with_plugins: out = self.forward_plugin(out, self.after_conv2_plugin_names) out = self.conv3(out) out = self.norm3(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv3_plugin_names) if self.downsample is not None: identity = self.downsample(x) out += identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out class Res2Layer(Sequential): """Res2Layer to build Res2Net style backbone. Args: block (nn.Module): block used to build ResLayer. inplanes (int): inplanes of block. planes (int): planes of block. num_blocks (int): number of blocks. stride (int): stride of the first block. Default: 1 avg_down (bool): Use AvgPool instead of stride conv when downsampling in the bottle2neck. Default: False conv_cfg (dict): dictionary to construct and config conv layer. Default: None norm_cfg (dict): dictionary to construct and config norm layer. Default: dict(type='BN') scales (int): Scales used in Res2Net. Default: 4 base_width (int): Basic width of each scale. Default: 26 """ def __init__(self, block, inplanes, planes, num_blocks, stride=1, avg_down=True, conv_cfg=None, norm_cfg=dict(type='BN'), scales=4, base_width=26, **kwargs): self.block = block downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = nn.Sequential( nn.AvgPool2d( kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False), build_conv_layer( conv_cfg, inplanes, planes * block.expansion, kernel_size=1, stride=1, bias=False), build_norm_layer(norm_cfg, planes * block.expansion)[1], ) layers = [] layers.append( block( inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, scales=scales, base_width=base_width, stage_type='stage', **kwargs)) inplanes = planes * block.expansion for i in range(1, num_blocks): layers.append( block( inplanes=inplanes, planes=planes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, scales=scales, base_width=base_width, **kwargs)) super(Res2Layer, self).__init__(*layers) @MODELS.register_module() class Res2Net(ResNet): """Res2Net backbone. Args: scales (int): Scales used in Res2Net. Default: 4 base_width (int): Basic width of each scale. Default: 26 depth (int): Depth of res2net, from {50, 101, 152}. in_channels (int): Number of input image channels. Default: 3. num_stages (int): Res2net stages. Default: 4. strides (Sequence[int]): Strides of the first block of each stage. dilations (Sequence[int]): Dilation of each stage. out_indices (Sequence[int]): Output from which stages. style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is the first 1x1 conv layer. deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv avg_down (bool): Use AvgPool instead of stride conv when downsampling in the bottle2neck. frozen_stages (int): Stages to be frozen (stop grad and set eval mode). -1 means not freezing any parameters. norm_cfg (dict): Dictionary to construct and config norm layer. norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. plugins (list[dict]): List of plugins for stages, each dict contains: - cfg (dict, required): Cfg dict to build plugin. - position (str, required): Position inside block to insert plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'. - stages (tuple[bool], optional): Stages to apply plugin, length should be same as 'num_stages'. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. zero_init_residual (bool): Whether to use zero init for last norm layer in resblocks to let them behave as identity. pretrained (str, optional): model pretrained path. Default: None init_cfg (dict or list[dict], optional): Initialization config dict. Default: None Example: >>> from mmdet.models import Res2Net >>> import torch >>> self = Res2Net(depth=50, scales=4, base_width=26) >>> self.eval() >>> inputs = torch.rand(1, 3, 32, 32) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) (1, 256, 8, 8) (1, 512, 4, 4) (1, 1024, 2, 2) (1, 2048, 1, 1) """ arch_settings = { 50: (Bottle2neck, (3, 4, 6, 3)), 101: (Bottle2neck, (3, 4, 23, 3)), 152: (Bottle2neck, (3, 8, 36, 3)) } def __init__(self, scales=4, base_width=26, style='pytorch', deep_stem=True, avg_down=True, pretrained=None, init_cfg=None, **kwargs): self.scales = scales self.base_width = base_width super(Res2Net, self).__init__( style='pytorch', deep_stem=True, avg_down=True, pretrained=pretrained, init_cfg=init_cfg, **kwargs) def make_res_layer(self, **kwargs): return Res2Layer( scales=self.scales, base_width=self.base_width, base_channels=self.base_channels, **kwargs)
11,661
34.554878
79
py
ERD
ERD-main/mmdet/models/backbones/darknet.py
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) 2019 Western Digital Corporation or its affiliates. import warnings import torch.nn as nn from mmcv.cnn import ConvModule from mmengine.model import BaseModule from torch.nn.modules.batchnorm import _BatchNorm from mmdet.registry import MODELS class ResBlock(BaseModule): """The basic residual block used in Darknet. Each ResBlock consists of two ConvModules and the input is added to the final output. Each ConvModule is composed of Conv, BN, and LeakyReLU. In YoloV3 paper, the first convLayer has half of the number of the filters as much as the second convLayer. The first convLayer has filter size of 1x1 and the second one has the filter size of 3x3. Args: in_channels (int): The input channels. Must be even. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, in_channels, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), init_cfg=None): super(ResBlock, self).__init__(init_cfg) assert in_channels % 2 == 0 # ensure the in_channels is even half_in_channels = in_channels // 2 # shortcut cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.conv1 = ConvModule(in_channels, half_in_channels, 1, **cfg) self.conv2 = ConvModule( half_in_channels, in_channels, 3, padding=1, **cfg) def forward(self, x): residual = x out = self.conv1(x) out = self.conv2(out) out = out + residual return out @MODELS.register_module() class Darknet(BaseModule): """Darknet backbone. Args: depth (int): Depth of Darknet. Currently only support 53. out_indices (Sequence[int]): Output from which stages. frozen_stages (int): Stages to be frozen (stop grad and set eval mode). -1 means not freezing any parameters. Default: -1. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. pretrained (str, optional): model pretrained path. Default: None init_cfg (dict or list[dict], optional): Initialization config dict. Default: None Example: >>> from mmdet.models import Darknet >>> import torch >>> self = Darknet(depth=53) >>> self.eval() >>> inputs = torch.rand(1, 3, 416, 416) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) ... (1, 256, 52, 52) (1, 512, 26, 26) (1, 1024, 13, 13) """ # Dict(depth: (layers, channels)) arch_settings = { 53: ((1, 2, 8, 8, 4), ((32, 64), (64, 128), (128, 256), (256, 512), (512, 1024))) } def __init__(self, depth=53, out_indices=(3, 4, 5), frozen_stages=-1, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), norm_eval=True, pretrained=None, init_cfg=None): super(Darknet, self).__init__(init_cfg) if depth not in self.arch_settings: raise KeyError(f'invalid depth {depth} for darknet') self.depth = depth self.out_indices = out_indices self.frozen_stages = frozen_stages self.layers, self.channels = self.arch_settings[depth] cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.conv1 = ConvModule(3, 32, 3, padding=1, **cfg) self.cr_blocks = ['conv1'] for i, n_layers in enumerate(self.layers): layer_name = f'conv_res_block{i + 1}' in_c, out_c = self.channels[i] self.add_module( layer_name, self.make_conv_res_block(in_c, out_c, n_layers, **cfg)) self.cr_blocks.append(layer_name) self.norm_eval = norm_eval assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: if init_cfg is None: self.init_cfg = [ dict(type='Kaiming', layer='Conv2d'), dict( type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) ] else: raise TypeError('pretrained must be a str or None') def forward(self, x): outs = [] for i, layer_name in enumerate(self.cr_blocks): cr_block = getattr(self, layer_name) x = cr_block(x) if i in self.out_indices: outs.append(x) return tuple(outs) def _freeze_stages(self): if self.frozen_stages >= 0: for i in range(self.frozen_stages): m = getattr(self, self.cr_blocks[i]) m.eval() for param in m.parameters(): param.requires_grad = False def train(self, mode=True): super(Darknet, self).train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): if isinstance(m, _BatchNorm): m.eval() @staticmethod def make_conv_res_block(in_channels, out_channels, res_repeat, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1)): """In Darknet backbone, ConvLayer is usually followed by ResBlock. This function will make that. The Conv layers always have 3x3 filters with stride=2. The number of the filters in Conv layer is the same as the out channels of the ResBlock. Args: in_channels (int): The number of input channels. out_channels (int): The number of output channels. res_repeat (int): The number of ResBlocks. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). """ cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) model = nn.Sequential() model.add_module( 'conv', ConvModule( in_channels, out_channels, 3, stride=2, padding=1, **cfg)) for idx in range(res_repeat): model.add_module('res{}'.format(idx), ResBlock(out_channels, **cfg)) return model
8,235
37.485981
79
py
ERD
ERD-main/mmdet/datasets/samplers/batch_sampler.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Sequence from torch.utils.data import BatchSampler, Sampler from mmdet.registry import DATA_SAMPLERS # TODO: maybe replace with a data_loader wrapper @DATA_SAMPLERS.register_module() class AspectRatioBatchSampler(BatchSampler): """A sampler wrapper for grouping images with similar aspect ratio (< 1 or. >= 1) into a same batch. Args: sampler (Sampler): Base sampler. batch_size (int): Size of mini-batch. drop_last (bool): If ``True``, the sampler will drop the last batch if its size would be less than ``batch_size``. """ def __init__(self, sampler: Sampler, batch_size: int, drop_last: bool = False) -> None: if not isinstance(sampler, Sampler): raise TypeError('sampler should be an instance of ``Sampler``, ' f'but got {sampler}') if not isinstance(batch_size, int) or batch_size <= 0: raise ValueError('batch_size should be a positive integer value, ' f'but got batch_size={batch_size}') self.sampler = sampler self.batch_size = batch_size self.drop_last = drop_last # two groups for w < h and w >= h self._aspect_ratio_buckets = [[] for _ in range(2)] def __iter__(self) -> Sequence[int]: for idx in self.sampler: data_info = self.sampler.dataset.get_data_info(idx) width, height = data_info['width'], data_info['height'] bucket_id = 0 if width < height else 1 bucket = self._aspect_ratio_buckets[bucket_id] bucket.append(idx) # yield a batch of indices in the same aspect ratio group if len(bucket) == self.batch_size: yield bucket[:] del bucket[:] # yield the rest data and reset the bucket left_data = self._aspect_ratio_buckets[0] + self._aspect_ratio_buckets[ 1] self._aspect_ratio_buckets = [[] for _ in range(2)] while len(left_data) > 0: if len(left_data) <= self.batch_size: if not self.drop_last: yield left_data[:] left_data = [] else: yield left_data[:self.batch_size] left_data = left_data[self.batch_size:] def __len__(self) -> int: if self.drop_last: return len(self.sampler) // self.batch_size else: return (len(self.sampler) + self.batch_size - 1) // self.batch_size
2,637
37.231884
79
py
ERD
ERD-main/mmdet/datasets/samplers/multi_source_sampler.py
# Copyright (c) OpenMMLab. All rights reserved. import itertools from typing import Iterator, List, Optional, Sized, Union import numpy as np import torch from mmengine.dataset import BaseDataset from mmengine.dist import get_dist_info, sync_random_seed from torch.utils.data import Sampler from mmdet.registry import DATA_SAMPLERS @DATA_SAMPLERS.register_module() class MultiSourceSampler(Sampler): r"""Multi-Source Infinite Sampler. According to the sampling ratio, sample data from different datasets to form batches. Args: dataset (Sized): The dataset. batch_size (int): Size of mini-batch. source_ratio (list[int | float]): The sampling ratio of different source datasets in a mini-batch. shuffle (bool): Whether shuffle the dataset or not. Defaults to True. seed (int, optional): Random seed. If None, set a random seed. Defaults to None. Examples: >>> dataset_type = 'ConcatDataset' >>> sub_dataset_type = 'CocoDataset' >>> data_root = 'data/coco/' >>> sup_ann = '../coco_semi_annos/instances_train2017.1@10.json' >>> unsup_ann = '../coco_semi_annos/' \ >>> 'instances_train2017.1@10-unlabeled.json' >>> dataset = dict(type=dataset_type, >>> datasets=[ >>> dict( >>> type=sub_dataset_type, >>> data_root=data_root, >>> ann_file=sup_ann, >>> data_prefix=dict(img='train2017/'), >>> filter_cfg=dict(filter_empty_gt=True, min_size=32), >>> pipeline=sup_pipeline), >>> dict( >>> type=sub_dataset_type, >>> data_root=data_root, >>> ann_file=unsup_ann, >>> data_prefix=dict(img='train2017/'), >>> filter_cfg=dict(filter_empty_gt=True, min_size=32), >>> pipeline=unsup_pipeline), >>> ]) >>> train_dataloader = dict( >>> batch_size=5, >>> num_workers=5, >>> persistent_workers=True, >>> sampler=dict(type='MultiSourceSampler', >>> batch_size=5, source_ratio=[1, 4]), >>> batch_sampler=None, >>> dataset=dataset) """ def __init__(self, dataset: Sized, batch_size: int, source_ratio: List[Union[int, float]], shuffle: bool = True, seed: Optional[int] = None) -> None: assert hasattr(dataset, 'cumulative_sizes'),\ f'The dataset must be ConcatDataset, but get {dataset}' assert isinstance(batch_size, int) and batch_size > 0, \ 'batch_size must be a positive integer value, ' \ f'but got batch_size={batch_size}' assert isinstance(source_ratio, list), \ f'source_ratio must be a list, but got source_ratio={source_ratio}' assert len(source_ratio) == len(dataset.cumulative_sizes), \ 'The length of source_ratio must be equal to ' \ f'the number of datasets, but got source_ratio={source_ratio}' rank, world_size = get_dist_info() self.rank = rank self.world_size = world_size self.dataset = dataset self.cumulative_sizes = [0] + dataset.cumulative_sizes self.batch_size = batch_size self.source_ratio = source_ratio self.num_per_source = [ int(batch_size * sr / sum(source_ratio)) for sr in source_ratio ] self.num_per_source[0] = batch_size - sum(self.num_per_source[1:]) assert sum(self.num_per_source) == batch_size, \ 'The sum of num_per_source must be equal to ' \ f'batch_size, but get {self.num_per_source}' self.seed = sync_random_seed() if seed is None else seed self.shuffle = shuffle self.source2inds = { source: self._indices_of_rank(len(ds)) for source, ds in enumerate(dataset.datasets) } def _infinite_indices(self, sample_size: int) -> Iterator[int]: """Infinitely yield a sequence of indices.""" g = torch.Generator() g.manual_seed(self.seed) while True: if self.shuffle: yield from torch.randperm(sample_size, generator=g).tolist() else: yield from torch.arange(sample_size).tolist() def _indices_of_rank(self, sample_size: int) -> Iterator[int]: """Slice the infinite indices by rank.""" yield from itertools.islice( self._infinite_indices(sample_size), self.rank, None, self.world_size) def __iter__(self) -> Iterator[int]: batch_buffer = [] while True: for source, num in enumerate(self.num_per_source): batch_buffer_per_source = [] for idx in self.source2inds[source]: idx += self.cumulative_sizes[source] batch_buffer_per_source.append(idx) if len(batch_buffer_per_source) == num: batch_buffer += batch_buffer_per_source break yield from batch_buffer batch_buffer = [] def __len__(self) -> int: return len(self.dataset) def set_epoch(self, epoch: int) -> None: """Not supported in `epoch-based runner.""" pass @DATA_SAMPLERS.register_module() class GroupMultiSourceSampler(MultiSourceSampler): r"""Group Multi-Source Infinite Sampler. According to the sampling ratio, sample data from different datasets but the same group to form batches. Args: dataset (Sized): The dataset. batch_size (int): Size of mini-batch. source_ratio (list[int | float]): The sampling ratio of different source datasets in a mini-batch. shuffle (bool): Whether shuffle the dataset or not. Defaults to True. seed (int, optional): Random seed. If None, set a random seed. Defaults to None. """ def __init__(self, dataset: BaseDataset, batch_size: int, source_ratio: List[Union[int, float]], shuffle: bool = True, seed: Optional[int] = None) -> None: super().__init__( dataset=dataset, batch_size=batch_size, source_ratio=source_ratio, shuffle=shuffle, seed=seed) self._get_source_group_info() self.group_source2inds = [{ source: self._indices_of_rank(self.group2size_per_source[source][group]) for source in range(len(dataset.datasets)) } for group in range(len(self.group_ratio))] def _get_source_group_info(self) -> None: self.group2size_per_source = [{0: 0, 1: 0}, {0: 0, 1: 0}] self.group2inds_per_source = [{0: [], 1: []}, {0: [], 1: []}] for source, dataset in enumerate(self.dataset.datasets): for idx in range(len(dataset)): data_info = dataset.get_data_info(idx) width, height = data_info['width'], data_info['height'] group = 0 if width < height else 1 self.group2size_per_source[source][group] += 1 self.group2inds_per_source[source][group].append(idx) self.group_sizes = np.zeros(2, dtype=np.int64) for group2size in self.group2size_per_source: for group, size in group2size.items(): self.group_sizes[group] += size self.group_ratio = self.group_sizes / sum(self.group_sizes) def __iter__(self) -> Iterator[int]: batch_buffer = [] while True: group = np.random.choice( list(range(len(self.group_ratio))), p=self.group_ratio) for source, num in enumerate(self.num_per_source): batch_buffer_per_source = [] for idx in self.group_source2inds[group][source]: idx = self.group2inds_per_source[source][group][ idx] + self.cumulative_sizes[source] batch_buffer_per_source.append(idx) if len(batch_buffer_per_source) == num: batch_buffer += batch_buffer_per_source break yield from batch_buffer batch_buffer = []
8,580
38.911628
79
py
ERD
ERD-main/mmdet/datasets/samplers/class_aware_sampler.py
# Copyright (c) OpenMMLab. All rights reserved. import math from typing import Dict, Iterator, Optional, Union import numpy as np import torch from mmengine.dataset import BaseDataset from mmengine.dist import get_dist_info, sync_random_seed from torch.utils.data import Sampler from mmdet.registry import DATA_SAMPLERS @DATA_SAMPLERS.register_module() class ClassAwareSampler(Sampler): r"""Sampler that restricts data loading to the label of the dataset. A class-aware sampling strategy to effectively tackle the non-uniform class distribution. The length of the training data is consistent with source data. Simple improvements based on `Relay Backpropagation for Effective Learning of Deep Convolutional Neural Networks <https://arxiv.org/abs/1512.05830>`_ The implementation logic is referred to https://github.com/Sense-X/TSD/blob/master/mmdet/datasets/samplers/distributed_classaware_sampler.py Args: dataset: Dataset used for sampling. seed (int, optional): random seed used to shuffle the sampler. This number should be identical across all processes in the distributed group. Defaults to None. num_sample_class (int): The number of samples taken from each per-label list. Defaults to 1. """ def __init__(self, dataset: BaseDataset, seed: Optional[int] = None, num_sample_class: int = 1) -> None: rank, world_size = get_dist_info() self.rank = rank self.world_size = world_size self.dataset = dataset self.epoch = 0 # Must be the same across all workers. If None, will use a # random seed shared among workers # (require synchronization among all workers) if seed is None: seed = sync_random_seed() self.seed = seed # The number of samples taken from each per-label list assert num_sample_class > 0 and isinstance(num_sample_class, int) self.num_sample_class = num_sample_class # Get per-label image list from dataset self.cat_dict = self.get_cat2imgs() self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / world_size)) self.total_size = self.num_samples * self.world_size # get number of images containing each category self.num_cat_imgs = [len(x) for x in self.cat_dict.values()] # filter labels without images self.valid_cat_inds = [ i for i, length in enumerate(self.num_cat_imgs) if length != 0 ] self.num_classes = len(self.valid_cat_inds) def get_cat2imgs(self) -> Dict[int, list]: """Get a dict with class as key and img_ids as values. Returns: dict[int, list]: A dict of per-label image list, the item of the dict indicates a label index, corresponds to the image index that contains the label. """ classes = self.dataset.metainfo.get('classes', None) if classes is None: raise ValueError('dataset metainfo must contain `classes`') # sort the label index cat2imgs = {i: [] for i in range(len(classes))} for i in range(len(self.dataset)): cat_ids = set(self.dataset.get_cat_ids(i)) for cat in cat_ids: cat2imgs[cat].append(i) return cat2imgs def __iter__(self) -> Iterator[int]: # deterministically shuffle based on epoch g = torch.Generator() g.manual_seed(self.epoch + self.seed) # initialize label list label_iter_list = RandomCycleIter(self.valid_cat_inds, generator=g) # initialize each per-label image list data_iter_dict = dict() for i in self.valid_cat_inds: data_iter_dict[i] = RandomCycleIter(self.cat_dict[i], generator=g) def gen_cat_img_inds(cls_list, data_dict, num_sample_cls): """Traverse the categories and extract `num_sample_cls` image indexes of the corresponding categories one by one.""" id_indices = [] for _ in range(len(cls_list)): cls_idx = next(cls_list) for _ in range(num_sample_cls): id = next(data_dict[cls_idx]) id_indices.append(id) return id_indices # deterministically shuffle based on epoch num_bins = int( math.ceil(self.total_size * 1.0 / self.num_classes / self.num_sample_class)) indices = [] for i in range(num_bins): indices += gen_cat_img_inds(label_iter_list, data_iter_dict, self.num_sample_class) # fix extra samples to make it evenly divisible if len(indices) >= self.total_size: indices = indices[:self.total_size] else: indices += indices[:(self.total_size - len(indices))] assert len(indices) == self.total_size # subsample offset = self.num_samples * self.rank indices = indices[offset:offset + self.num_samples] assert len(indices) == self.num_samples return iter(indices) def __len__(self) -> int: """The number of samples in this rank.""" return self.num_samples def set_epoch(self, epoch: int) -> None: """Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas use a different random ordering for each epoch. Otherwise, the next iteration of this sampler will yield the same ordering. Args: epoch (int): Epoch number. """ self.epoch = epoch class RandomCycleIter: """Shuffle the list and do it again after the list have traversed. The implementation logic is referred to https://github.com/wutong16/DistributionBalancedLoss/blob/master/mllt/datasets/loader/sampler.py Example: >>> label_list = [0, 1, 2, 4, 5] >>> g = torch.Generator() >>> g.manual_seed(0) >>> label_iter_list = RandomCycleIter(label_list, generator=g) >>> index = next(label_iter_list) Args: data (list or ndarray): The data that needs to be shuffled. generator: An torch.Generator object, which is used in setting the seed for generating random numbers. """ # noqa: W605 def __init__(self, data: Union[list, np.ndarray], generator: torch.Generator = None) -> None: self.data = data self.length = len(data) self.index = torch.randperm(self.length, generator=generator).numpy() self.i = 0 self.generator = generator def __iter__(self) -> Iterator: return self def __len__(self) -> int: return len(self.data) def __next__(self): if self.i == self.length: self.index = torch.randperm( self.length, generator=self.generator).numpy() self.i = 0 idx = self.data[self.index[self.i]] self.i += 1 return idx
7,132
35.958549
104
py
ERD
ERD-main/mmdet/datasets/transforms/formatting.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np from mmcv.transforms import to_tensor from mmcv.transforms.base import BaseTransform from mmengine.structures import InstanceData, PixelData from mmdet.registry import TRANSFORMS from mmdet.structures import DetDataSample from mmdet.structures.bbox import BaseBoxes @TRANSFORMS.register_module() class PackDetInputs(BaseTransform): """Pack the inputs data for the detection / semantic segmentation / panoptic segmentation. The ``img_meta`` item is always populated. The contents of the ``img_meta`` dictionary depends on ``meta_keys``. By default this includes: - ``img_id``: id of the image - ``img_path``: path to the image file - ``ori_shape``: original shape of the image as a tuple (h, w) - ``img_shape``: shape of the image input to the network as a tuple \ (h, w). Note that images may be zero padded on the \ bottom/right if the batch tensor is larger than this shape. - ``scale_factor``: a float indicating the preprocessing scale - ``flip``: a boolean indicating if image flip transform was used - ``flip_direction``: the flipping direction Args: meta_keys (Sequence[str], optional): Meta keys to be converted to ``mmcv.DataContainer`` and collected in ``data[img_metas]``. Default: ``('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor', 'flip', 'flip_direction')`` """ mapping_table = { 'gt_bboxes': 'bboxes', 'gt_bboxes_labels': 'labels', 'gt_masks': 'masks' } def __init__(self, meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor', 'flip', 'flip_direction')): self.meta_keys = meta_keys def transform(self, results: dict) -> dict: """Method to pack the input data. Args: results (dict): Result dict from the data pipeline. Returns: dict: - 'inputs' (obj:`torch.Tensor`): The forward data of models. - 'data_sample' (obj:`DetDataSample`): The annotation info of the sample. """ packed_results = dict() if 'img' in results: img = results['img'] if len(img.shape) < 3: img = np.expand_dims(img, -1) # To improve the computational speed by by 3-5 times, apply: # If image is not contiguous, use # `numpy.transpose()` followed by `numpy.ascontiguousarray()` # If image is already contiguous, use # `torch.permute()` followed by `torch.contiguous()` # Refer to https://github.com/open-mmlab/mmdetection/pull/9533 # for more details if not img.flags.c_contiguous: img = np.ascontiguousarray(img.transpose(2, 0, 1)) img = to_tensor(img) else: img = to_tensor(img).permute(2, 0, 1).contiguous() packed_results['inputs'] = img if 'gt_ignore_flags' in results: valid_idx = np.where(results['gt_ignore_flags'] == 0)[0] ignore_idx = np.where(results['gt_ignore_flags'] == 1)[0] data_sample = DetDataSample() instance_data = InstanceData() ignore_instance_data = InstanceData() for key in self.mapping_table.keys(): if key not in results: continue if key == 'gt_masks' or isinstance(results[key], BaseBoxes): if 'gt_ignore_flags' in results: instance_data[ self.mapping_table[key]] = results[key][valid_idx] ignore_instance_data[ self.mapping_table[key]] = results[key][ignore_idx] else: instance_data[self.mapping_table[key]] = results[key] else: if 'gt_ignore_flags' in results: instance_data[self.mapping_table[key]] = to_tensor( results[key][valid_idx]) ignore_instance_data[self.mapping_table[key]] = to_tensor( results[key][ignore_idx]) else: instance_data[self.mapping_table[key]] = to_tensor( results[key]) data_sample.gt_instances = instance_data data_sample.ignored_instances = ignore_instance_data if 'proposals' in results: proposals = InstanceData( bboxes=to_tensor(results['proposals']), scores=to_tensor(results['proposals_scores'])) data_sample.proposals = proposals if 'gt_seg_map' in results: gt_sem_seg_data = dict( sem_seg=to_tensor(results['gt_seg_map'][None, ...].copy())) data_sample.gt_sem_seg = PixelData(**gt_sem_seg_data) img_meta = {} for key in self.meta_keys: assert key in results, f'`{key}` is not found in `results`, ' \ f'the valid keys are {list(results)}.' img_meta[key] = results[key] data_sample.set_metainfo(img_meta) packed_results['data_samples'] = data_sample return packed_results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(meta_keys={self.meta_keys})' return repr_str @TRANSFORMS.register_module() class ToTensor: """Convert some results to :obj:`torch.Tensor` by given keys. Args: keys (Sequence[str]): Keys that need to be converted to Tensor. """ def __init__(self, keys): self.keys = keys def __call__(self, results): """Call function to convert data in results to :obj:`torch.Tensor`. Args: results (dict): Result dict contains the data to convert. Returns: dict: The result dict contains the data converted to :obj:`torch.Tensor`. """ for key in self.keys: results[key] = to_tensor(results[key]) return results def __repr__(self): return self.__class__.__name__ + f'(keys={self.keys})' @TRANSFORMS.register_module() class ImageToTensor: """Convert image to :obj:`torch.Tensor` by given keys. The dimension order of input image is (H, W, C). The pipeline will convert it to (C, H, W). If only 2 dimension (H, W) is given, the output would be (1, H, W). Args: keys (Sequence[str]): Key of images to be converted to Tensor. """ def __init__(self, keys): self.keys = keys def __call__(self, results): """Call function to convert image in results to :obj:`torch.Tensor` and transpose the channel order. Args: results (dict): Result dict contains the image data to convert. Returns: dict: The result dict contains the image converted to :obj:`torch.Tensor` and permuted to (C, H, W) order. """ for key in self.keys: img = results[key] if len(img.shape) < 3: img = np.expand_dims(img, -1) results[key] = to_tensor(img).permute(2, 0, 1).contiguous() return results def __repr__(self): return self.__class__.__name__ + f'(keys={self.keys})' @TRANSFORMS.register_module() class Transpose: """Transpose some results by given keys. Args: keys (Sequence[str]): Keys of results to be transposed. order (Sequence[int]): Order of transpose. """ def __init__(self, keys, order): self.keys = keys self.order = order def __call__(self, results): """Call function to transpose the channel order of data in results. Args: results (dict): Result dict contains the data to transpose. Returns: dict: The result dict contains the data transposed to \ ``self.order``. """ for key in self.keys: results[key] = results[key].transpose(self.order) return results def __repr__(self): return self.__class__.__name__ + \ f'(keys={self.keys}, order={self.order})' @TRANSFORMS.register_module() class WrapFieldsToLists: """Wrap fields of the data dictionary into lists for evaluation. This class can be used as a last step of a test or validation pipeline for single image evaluation or inference. Example: >>> test_pipeline = [ >>> dict(type='LoadImageFromFile'), >>> dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), >>> dict(type='Pad', size_divisor=32), >>> dict(type='ImageToTensor', keys=['img']), >>> dict(type='Collect', keys=['img']), >>> dict(type='WrapFieldsToLists') >>> ] """ def __call__(self, results): """Call function to wrap fields into lists. Args: results (dict): Result dict contains the data to wrap. Returns: dict: The result dict where value of ``self.keys`` are wrapped \ into list. """ # Wrap dict fields into lists for key, val in results.items(): results[key] = [val] return results def __repr__(self): return f'{self.__class__.__name__}()'
9,565
32.80212
79
py
ERD
ERD-main/mmdet/datasets/transforms/loading.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Tuple, Union import mmcv import numpy as np import pycocotools.mask as maskUtils import torch from mmcv.transforms import BaseTransform from mmcv.transforms import LoadAnnotations as MMCV_LoadAnnotations from mmcv.transforms import LoadImageFromFile from mmengine.fileio import get from mmengine.structures import BaseDataElement from mmdet.registry import TRANSFORMS from mmdet.structures.bbox import get_box_type from mmdet.structures.bbox.box_type import autocast_box_type from mmdet.structures.mask import BitmapMasks, PolygonMasks @TRANSFORMS.register_module() class LoadImageFromNDArray(LoadImageFromFile): """Load an image from ``results['img']``. Similar with :obj:`LoadImageFromFile`, but the image has been loaded as :obj:`np.ndarray` in ``results['img']``. Can be used when loading image from webcam. Required Keys: - img Modified Keys: - img - img_path - img_shape - ori_shape Args: to_float32 (bool): Whether to convert the loaded image to a float32 numpy array. If set to False, the loaded image is an uint8 array. Defaults to False. """ def transform(self, results: dict) -> dict: """Transform function to add image meta information. Args: results (dict): Result dict with Webcam read image in ``results['img']``. Returns: dict: The dict contains loaded image and meta information. """ img = results['img'] if self.to_float32: img = img.astype(np.float32) results['img_path'] = None results['img'] = img results['img_shape'] = img.shape[:2] results['ori_shape'] = img.shape[:2] return results @TRANSFORMS.register_module() class LoadMultiChannelImageFromFiles(BaseTransform): """Load multi-channel images from a list of separate channel files. Required Keys: - img_path Modified Keys: - img - img_shape - ori_shape Args: to_float32 (bool): Whether to convert the loaded image to a float32 numpy array. If set to False, the loaded image is an uint8 array. Defaults to False. color_type (str): The flag argument for :func:``mmcv.imfrombytes``. Defaults to 'unchanged'. imdecode_backend (str): The image decoding backend type. The backend argument for :func:``mmcv.imfrombytes``. See :func:``mmcv.imfrombytes`` for details. Defaults to 'cv2'. file_client_args (dict): Arguments to instantiate the corresponding backend in mmdet <= 3.0.0rc6. Defaults to None. backend_args (dict, optional): Arguments to instantiate the corresponding backend in mmdet >= 3.0.0rc7. Defaults to None. """ def __init__( self, to_float32: bool = False, color_type: str = 'unchanged', imdecode_backend: str = 'cv2', file_client_args: dict = None, backend_args: dict = None, ) -> None: self.to_float32 = to_float32 self.color_type = color_type self.imdecode_backend = imdecode_backend self.backend_args = backend_args if file_client_args is not None: raise RuntimeError( 'The `file_client_args` is deprecated, ' 'please use `backend_args` instead, please refer to' 'https://github.com/open-mmlab/mmdetection/blob/main/configs/_base_/datasets/coco_detection.py' # noqa: E501 ) def transform(self, results: dict) -> dict: """Transform functions to load multiple images and get images meta information. Args: results (dict): Result dict from :obj:`mmdet.CustomDataset`. Returns: dict: The dict contains loaded images and meta information. """ assert isinstance(results['img_path'], list) img = [] for name in results['img_path']: img_bytes = get(name, backend_args=self.backend_args) img.append( mmcv.imfrombytes( img_bytes, flag=self.color_type, backend=self.imdecode_backend)) img = np.stack(img, axis=-1) if self.to_float32: img = img.astype(np.float32) results['img'] = img results['img_shape'] = img.shape[:2] results['ori_shape'] = img.shape[:2] return results def __repr__(self): repr_str = (f'{self.__class__.__name__}(' f'to_float32={self.to_float32}, ' f"color_type='{self.color_type}', " f"imdecode_backend='{self.imdecode_backend}', " f'backend_args={self.backend_args})') return repr_str @TRANSFORMS.register_module() class LoadAnnotations(MMCV_LoadAnnotations): """Load and process the ``instances`` and ``seg_map`` annotation provided by dataset. The annotation format is as the following: .. code-block:: python { 'instances': [ { # List of 4 numbers representing the bounding box of the # instance, in (x1, y1, x2, y2) order. 'bbox': [x1, y1, x2, y2], # Label of image classification. 'bbox_label': 1, # Used in instance/panoptic segmentation. The segmentation mask # of the instance or the information of segments. # 1. If list[list[float]], it represents a list of polygons, # one for each connected component of the object. Each # list[float] is one simple polygon in the format of # [x1, y1, ..., xn, yn] (n≥3). The Xs and Ys are absolute # coordinates in unit of pixels. # 2. If dict, it represents the per-pixel segmentation mask in # COCO’s compressed RLE format. The dict should have keys # “size” and “counts”. Can be loaded by pycocotools 'mask': list[list[float]] or dict, } ] # Filename of semantic or panoptic segmentation ground truth file. 'seg_map_path': 'a/b/c' } After this module, the annotation has been changed to the format below: .. code-block:: python { # In (x1, y1, x2, y2) order, float type. N is the number of bboxes # in an image 'gt_bboxes': BaseBoxes(N, 4) # In int type. 'gt_bboxes_labels': np.ndarray(N, ) # In built-in class 'gt_masks': PolygonMasks (H, W) or BitmapMasks (H, W) # In uint8 type. 'gt_seg_map': np.ndarray (H, W) # in (x, y, v) order, float type. } Required Keys: - height - width - instances - bbox (optional) - bbox_label - mask (optional) - ignore_flag - seg_map_path (optional) Added Keys: - gt_bboxes (BaseBoxes[torch.float32]) - gt_bboxes_labels (np.int64) - gt_masks (BitmapMasks | PolygonMasks) - gt_seg_map (np.uint8) - gt_ignore_flags (bool) Args: with_bbox (bool): Whether to parse and load the bbox annotation. Defaults to True. with_label (bool): Whether to parse and load the label annotation. Defaults to True. with_mask (bool): Whether to parse and load the mask annotation. Default: False. with_seg (bool): Whether to parse and load the semantic segmentation annotation. Defaults to False. poly2mask (bool): Whether to convert mask to bitmap. Default: True. box_type (str): The box type used to wrap the bboxes. If ``box_type`` is None, gt_bboxes will keep being np.ndarray. Defaults to 'hbox'. imdecode_backend (str): The image decoding backend type. The backend argument for :func:``mmcv.imfrombytes``. See :fun:``mmcv.imfrombytes`` for details. Defaults to 'cv2'. backend_args (dict, optional): Arguments to instantiate the corresponding backend. Defaults to None. """ def __init__(self, with_mask: bool = False, poly2mask: bool = True, box_type: str = 'hbox', **kwargs) -> None: super(LoadAnnotations, self).__init__(**kwargs) self.with_mask = with_mask self.poly2mask = poly2mask self.box_type = box_type def _load_bboxes(self, results: dict) -> None: """Private function to load bounding box annotations. Args: results (dict): Result dict from :obj:``mmengine.BaseDataset``. Returns: dict: The dict contains loaded bounding box annotations. """ gt_bboxes = [] gt_ignore_flags = [] for instance in results.get('instances', []): gt_bboxes.append(instance['bbox']) gt_ignore_flags.append(instance['ignore_flag']) if self.box_type is None: results['gt_bboxes'] = np.array( gt_bboxes, dtype=np.float32).reshape((-1, 4)) else: _, box_type_cls = get_box_type(self.box_type) results['gt_bboxes'] = box_type_cls(gt_bboxes, dtype=torch.float32) results['gt_ignore_flags'] = np.array(gt_ignore_flags, dtype=bool) def _load_labels(self, results: dict) -> None: """Private function to load label annotations. Args: results (dict): Result dict from :obj:``mmengine.BaseDataset``. Returns: dict: The dict contains loaded label annotations. """ gt_bboxes_labels = [] for instance in results.get('instances', []): gt_bboxes_labels.append(instance['bbox_label']) # TODO: Inconsistent with mmcv, consider how to deal with it later. results['gt_bboxes_labels'] = np.array( gt_bboxes_labels, dtype=np.int64) def _poly2mask(self, mask_ann: Union[list, dict], img_h: int, img_w: int) -> np.ndarray: """Private function to convert masks represented with polygon to bitmaps. Args: mask_ann (list | dict): Polygon mask annotation input. img_h (int): The height of output mask. img_w (int): The width of output mask. Returns: np.ndarray: The decode bitmap mask of shape (img_h, img_w). """ if isinstance(mask_ann, list): # polygon -- a single object might consist of multiple parts # we merge all parts into one mask rle code rles = maskUtils.frPyObjects(mask_ann, img_h, img_w) rle = maskUtils.merge(rles) elif isinstance(mask_ann['counts'], list): # uncompressed RLE rle = maskUtils.frPyObjects(mask_ann, img_h, img_w) else: # rle rle = mask_ann mask = maskUtils.decode(rle) return mask def _process_masks(self, results: dict) -> list: """Process gt_masks and filter invalid polygons. Args: results (dict): Result dict from :obj:``mmengine.BaseDataset``. Returns: list: Processed gt_masks. """ gt_masks = [] gt_ignore_flags = [] for instance in results.get('instances', []): gt_mask = instance['mask'] # If the annotation of segmentation mask is invalid, # ignore the whole instance. if isinstance(gt_mask, list): gt_mask = [ np.array(polygon) for polygon in gt_mask if len(polygon) % 2 == 0 and len(polygon) >= 6 ] if len(gt_mask) == 0: # ignore this instance and set gt_mask to a fake mask instance['ignore_flag'] = 1 gt_mask = [np.zeros(6)] elif not self.poly2mask: # `PolygonMasks` requires a ploygon of format List[np.array], # other formats are invalid. instance['ignore_flag'] = 1 gt_mask = [np.zeros(6)] elif isinstance(gt_mask, dict) and \ not (gt_mask.get('counts') is not None and gt_mask.get('size') is not None and isinstance(gt_mask['counts'], (list, str))): # if gt_mask is a dict, it should include `counts` and `size`, # so that `BitmapMasks` can uncompressed RLE instance['ignore_flag'] = 1 gt_mask = [np.zeros(6)] gt_masks.append(gt_mask) # re-process gt_ignore_flags gt_ignore_flags.append(instance['ignore_flag']) results['gt_ignore_flags'] = np.array(gt_ignore_flags, dtype=bool) return gt_masks def _load_masks(self, results: dict) -> None: """Private function to load mask annotations. Args: results (dict): Result dict from :obj:``mmengine.BaseDataset``. """ h, w = results['ori_shape'] gt_masks = self._process_masks(results) if self.poly2mask: gt_masks = BitmapMasks( [self._poly2mask(mask, h, w) for mask in gt_masks], h, w) else: # fake polygon masks will be ignored in `PackDetInputs` gt_masks = PolygonMasks([mask for mask in gt_masks], h, w) results['gt_masks'] = gt_masks def transform(self, results: dict) -> dict: """Function to load multiple types annotations. Args: results (dict): Result dict from :obj:``mmengine.BaseDataset``. Returns: dict: The dict contains loaded bounding box, label and semantic segmentation. """ if self.with_bbox: self._load_bboxes(results) if self.with_label: self._load_labels(results) if self.with_mask: self._load_masks(results) if self.with_seg: self._load_seg_map(results) return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(with_bbox={self.with_bbox}, ' repr_str += f'with_label={self.with_label}, ' repr_str += f'with_mask={self.with_mask}, ' repr_str += f'with_seg={self.with_seg}, ' repr_str += f'poly2mask={self.poly2mask}, ' repr_str += f"imdecode_backend='{self.imdecode_backend}', " repr_str += f'backend_args={self.backend_args})' return repr_str @TRANSFORMS.register_module() class LoadPanopticAnnotations(LoadAnnotations): """Load multiple types of panoptic annotations. The annotation format is as the following: .. code-block:: python { 'instances': [ { # List of 4 numbers representing the bounding box of the # instance, in (x1, y1, x2, y2) order. 'bbox': [x1, y1, x2, y2], # Label of image classification. 'bbox_label': 1, }, ... ] 'segments_info': [ { # id = cls_id + instance_id * INSTANCE_OFFSET 'id': int, # Contiguous category id defined in dataset. 'category': int # Thing flag. 'is_thing': bool }, ... ] # Filename of semantic or panoptic segmentation ground truth file. 'seg_map_path': 'a/b/c' } After this module, the annotation has been changed to the format below: .. code-block:: python { # In (x1, y1, x2, y2) order, float type. N is the number of bboxes # in an image 'gt_bboxes': BaseBoxes(N, 4) # In int type. 'gt_bboxes_labels': np.ndarray(N, ) # In built-in class 'gt_masks': PolygonMasks (H, W) or BitmapMasks (H, W) # In uint8 type. 'gt_seg_map': np.ndarray (H, W) # in (x, y, v) order, float type. } Required Keys: - height - width - instances - bbox - bbox_label - ignore_flag - segments_info - id - category - is_thing - seg_map_path Added Keys: - gt_bboxes (BaseBoxes[torch.float32]) - gt_bboxes_labels (np.int64) - gt_masks (BitmapMasks | PolygonMasks) - gt_seg_map (np.uint8) - gt_ignore_flags (bool) Args: with_bbox (bool): Whether to parse and load the bbox annotation. Defaults to True. with_label (bool): Whether to parse and load the label annotation. Defaults to True. with_mask (bool): Whether to parse and load the mask annotation. Defaults to True. with_seg (bool): Whether to parse and load the semantic segmentation annotation. Defaults to False. box_type (str): The box mode used to wrap the bboxes. imdecode_backend (str): The image decoding backend type. The backend argument for :func:``mmcv.imfrombytes``. See :fun:``mmcv.imfrombytes`` for details. Defaults to 'cv2'. backend_args (dict, optional): Arguments to instantiate the corresponding backend in mmdet >= 3.0.0rc7. Defaults to None. """ def __init__(self, with_bbox: bool = True, with_label: bool = True, with_mask: bool = True, with_seg: bool = True, box_type: str = 'hbox', imdecode_backend: str = 'cv2', backend_args: dict = None) -> None: try: from panopticapi import utils except ImportError: raise ImportError( 'panopticapi is not installed, please install it by: ' 'pip install git+https://github.com/cocodataset/' 'panopticapi.git.') self.rgb2id = utils.rgb2id super(LoadPanopticAnnotations, self).__init__( with_bbox=with_bbox, with_label=with_label, with_mask=with_mask, with_seg=with_seg, with_keypoints=False, box_type=box_type, imdecode_backend=imdecode_backend, backend_args=backend_args) def _load_masks_and_semantic_segs(self, results: dict) -> None: """Private function to load mask and semantic segmentation annotations. In gt_semantic_seg, the foreground label is from ``0`` to ``num_things - 1``, the background label is from ``num_things`` to ``num_things + num_stuff - 1``, 255 means the ignored label (``VOID``). Args: results (dict): Result dict from :obj:``mmdet.CustomDataset``. """ # seg_map_path is None, when inference on the dataset without gts. if results.get('seg_map_path', None) is None: return img_bytes = get( results['seg_map_path'], backend_args=self.backend_args) pan_png = mmcv.imfrombytes( img_bytes, flag='color', channel_order='rgb').squeeze() pan_png = self.rgb2id(pan_png) gt_masks = [] gt_seg = np.zeros_like(pan_png) + 255 # 255 as ignore for segment_info in results['segments_info']: mask = (pan_png == segment_info['id']) gt_seg = np.where(mask, segment_info['category'], gt_seg) # The legal thing masks if segment_info.get('is_thing'): gt_masks.append(mask.astype(np.uint8)) if self.with_mask: h, w = results['ori_shape'] gt_masks = BitmapMasks(gt_masks, h, w) results['gt_masks'] = gt_masks if self.with_seg: results['gt_seg_map'] = gt_seg def transform(self, results: dict) -> dict: """Function to load multiple types panoptic annotations. Args: results (dict): Result dict from :obj:``mmdet.CustomDataset``. Returns: dict: The dict contains loaded bounding box, label, mask and semantic segmentation annotations. """ if self.with_bbox: self._load_bboxes(results) if self.with_label: self._load_labels(results) if self.with_mask or self.with_seg: # The tasks completed by '_load_masks' and '_load_semantic_segs' # in LoadAnnotations are merged to one function. self._load_masks_and_semantic_segs(results) return results @TRANSFORMS.register_module() class LoadProposals(BaseTransform): """Load proposal pipeline. Required Keys: - proposals Modified Keys: - proposals Args: num_max_proposals (int, optional): Maximum number of proposals to load. If not specified, all proposals will be loaded. """ def __init__(self, num_max_proposals: Optional[int] = None) -> None: self.num_max_proposals = num_max_proposals def transform(self, results: dict) -> dict: """Transform function to load proposals from file. Args: results (dict): Result dict from :obj:`mmdet.CustomDataset`. Returns: dict: The dict contains loaded proposal annotations. """ proposals = results['proposals'] # the type of proposals should be `dict` or `InstanceData` assert isinstance(proposals, dict) \ or isinstance(proposals, BaseDataElement) bboxes = proposals['bboxes'].astype(np.float32) assert bboxes.shape[1] == 4, \ f'Proposals should have shapes (n, 4), but found {bboxes.shape}' if 'scores' in proposals: scores = proposals['scores'].astype(np.float32) assert bboxes.shape[0] == scores.shape[0] else: scores = np.zeros(bboxes.shape[0], dtype=np.float32) if self.num_max_proposals is not None: # proposals should sort by scores during dumping the proposals bboxes = bboxes[:self.num_max_proposals] scores = scores[:self.num_max_proposals] if len(bboxes) == 0: bboxes = np.zeros((0, 4), dtype=np.float32) scores = np.zeros(0, dtype=np.float32) results['proposals'] = bboxes results['proposals_scores'] = scores return results def __repr__(self): return self.__class__.__name__ + \ f'(num_max_proposals={self.num_max_proposals})' @TRANSFORMS.register_module() class FilterAnnotations(BaseTransform): """Filter invalid annotations. Required Keys: - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_ignore_flags (bool) (optional) Modified Keys: - gt_bboxes (optional) - gt_bboxes_labels (optional) - gt_masks (optional) - gt_ignore_flags (optional) Args: min_gt_bbox_wh (tuple[float]): Minimum width and height of ground truth boxes. Default: (1., 1.) min_gt_mask_area (int): Minimum foreground area of ground truth masks. Default: 1 by_box (bool): Filter instances with bounding boxes not meeting the min_gt_bbox_wh threshold. Default: True by_mask (bool): Filter instances with masks not meeting min_gt_mask_area threshold. Default: False keep_empty (bool): Whether to return None when it becomes an empty bbox after filtering. Defaults to True. """ def __init__(self, min_gt_bbox_wh: Tuple[int, int] = (1, 1), min_gt_mask_area: int = 1, by_box: bool = True, by_mask: bool = False, keep_empty: bool = True) -> None: # TODO: add more filter options assert by_box or by_mask self.min_gt_bbox_wh = min_gt_bbox_wh self.min_gt_mask_area = min_gt_mask_area self.by_box = by_box self.by_mask = by_mask self.keep_empty = keep_empty @autocast_box_type() def transform(self, results: dict) -> Union[dict, None]: """Transform function to filter annotations. Args: results (dict): Result dict. Returns: dict: Updated result dict. """ assert 'gt_bboxes' in results gt_bboxes = results['gt_bboxes'] if gt_bboxes.shape[0] == 0: return results tests = [] if self.by_box: tests.append( ((gt_bboxes.widths > self.min_gt_bbox_wh[0]) & (gt_bboxes.heights > self.min_gt_bbox_wh[1])).numpy()) if self.by_mask: assert 'gt_masks' in results gt_masks = results['gt_masks'] tests.append(gt_masks.areas >= self.min_gt_mask_area) keep = tests[0] for t in tests[1:]: keep = keep & t if not keep.any(): if self.keep_empty: return None keys = ('gt_bboxes', 'gt_bboxes_labels', 'gt_masks', 'gt_ignore_flags') for key in keys: if key in results: results[key] = results[key][keep] return results def __repr__(self): return self.__class__.__name__ + \ f'(min_gt_bbox_wh={self.min_gt_bbox_wh}, ' \ f'keep_empty={self.keep_empty})' @TRANSFORMS.register_module() class LoadEmptyAnnotations(BaseTransform): """Load Empty Annotations for unlabeled images. Added Keys: - gt_bboxes (np.float32) - gt_bboxes_labels (np.int64) - gt_masks (BitmapMasks | PolygonMasks) - gt_seg_map (np.uint8) - gt_ignore_flags (bool) Args: with_bbox (bool): Whether to load the pseudo bbox annotation. Defaults to True. with_label (bool): Whether to load the pseudo label annotation. Defaults to True. with_mask (bool): Whether to load the pseudo mask annotation. Default: False. with_seg (bool): Whether to load the pseudo semantic segmentation annotation. Defaults to False. seg_ignore_label (int): The fill value used for segmentation map. Note this value must equals ``ignore_label`` in ``semantic_head`` of the corresponding config. Defaults to 255. """ def __init__(self, with_bbox: bool = True, with_label: bool = True, with_mask: bool = False, with_seg: bool = False, seg_ignore_label: int = 255) -> None: self.with_bbox = with_bbox self.with_label = with_label self.with_mask = with_mask self.with_seg = with_seg self.seg_ignore_label = seg_ignore_label def transform(self, results: dict) -> dict: """Transform function to load empty annotations. Args: results (dict): Result dict. Returns: dict: Updated result dict. """ if self.with_bbox: results['gt_bboxes'] = np.zeros((0, 4), dtype=np.float32) results['gt_ignore_flags'] = np.zeros((0, ), dtype=bool) if self.with_label: results['gt_bboxes_labels'] = np.zeros((0, ), dtype=np.int64) if self.with_mask: # TODO: support PolygonMasks h, w = results['img_shape'] gt_masks = np.zeros((0, h, w), dtype=np.uint8) results['gt_masks'] = BitmapMasks(gt_masks, h, w) if self.with_seg: h, w = results['img_shape'] results['gt_seg_map'] = self.seg_ignore_label * np.ones( (h, w), dtype=np.uint8) return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(with_bbox={self.with_bbox}, ' repr_str += f'with_label={self.with_label}, ' repr_str += f'with_mask={self.with_mask}, ' repr_str += f'with_seg={self.with_seg}, ' repr_str += f'seg_ignore_label={self.seg_ignore_label})' return repr_str @TRANSFORMS.register_module() class InferencerLoader(BaseTransform): """Load an image from ``results['img']``. Similar with :obj:`LoadImageFromFile`, but the image has been loaded as :obj:`np.ndarray` in ``results['img']``. Can be used when loading image from webcam. Required Keys: - img Modified Keys: - img - img_path - img_shape - ori_shape Args: to_float32 (bool): Whether to convert the loaded image to a float32 numpy array. If set to False, the loaded image is an uint8 array. Defaults to False. """ def __init__(self, **kwargs) -> None: super().__init__() self.from_file = TRANSFORMS.build( dict(type='LoadImageFromFile', **kwargs)) self.from_ndarray = TRANSFORMS.build( dict(type='mmdet.LoadImageFromNDArray', **kwargs)) def transform(self, results: Union[str, np.ndarray, dict]) -> dict: """Transform function to add image meta information. Args: results (str, np.ndarray or dict): The result. Returns: dict: The dict contains loaded image and meta information. """ if isinstance(results, str): inputs = dict(img_path=results) elif isinstance(results, np.ndarray): inputs = dict(img=results) elif isinstance(results, dict): inputs = results else: raise NotImplementedError if 'img' in inputs: return self.from_ndarray(inputs) return self.from_file(inputs)
30,139
33.25
125
py
ERD
ERD-main/mmdet/datasets/transforms/geometric.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Union import cv2 import mmcv import numpy as np from mmcv.transforms import BaseTransform from mmcv.transforms.utils import cache_randomness from mmdet.registry import TRANSFORMS from mmdet.structures.bbox import autocast_box_type from .augment_wrappers import _MAX_LEVEL, level_to_mag @TRANSFORMS.register_module() class GeomTransform(BaseTransform): """Base class for geometric transformations. All geometric transformations need to inherit from this base class. ``GeomTransform`` unifies the class attributes and class functions of geometric transformations (ShearX, ShearY, Rotate, TranslateX, and TranslateY), and records the homography matrix. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - gt_bboxes - gt_masks - gt_seg_map Added Keys: - homography_matrix Args: prob (float): The probability for performing the geometric transformation and should be in range [0, 1]. Defaults to 1.0. level (int, optional): The level should be in range [0, _MAX_LEVEL]. If level is None, it will generate from [0, _MAX_LEVEL] randomly. Defaults to None. min_mag (float): The minimum magnitude for geometric transformation. Defaults to 0.0. max_mag (float): The maximum magnitude for geometric transformation. Defaults to 1.0. reversal_prob (float): The probability that reverses the geometric transformation magnitude. Should be in range [0,1]. Defaults to 0.5. img_border_value (int | float | tuple): The filled values for image border. If float, the same fill value will be used for all the three channels of image. If tuple, it should be 3 elements. Defaults to 128. mask_border_value (int): The fill value used for masks. Defaults to 0. seg_ignore_label (int): The fill value used for segmentation map. Note this value must equals ``ignore_label`` in ``semantic_head`` of the corresponding config. Defaults to 255. interpolation (str): Interpolation method, accepted values are "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' backend, "nearest", "bilinear" for 'pillow' backend. Defaults to 'bilinear'. """ def __init__(self, prob: float = 1.0, level: Optional[int] = None, min_mag: float = 0.0, max_mag: float = 1.0, reversal_prob: float = 0.5, img_border_value: Union[int, float, tuple] = 128, mask_border_value: int = 0, seg_ignore_label: int = 255, interpolation: str = 'bilinear') -> None: assert 0 <= prob <= 1.0, f'The probability of the transformation ' \ f'should be in range [0,1], got {prob}.' assert level is None or isinstance(level, int), \ f'The level should be None or type int, got {type(level)}.' assert level is None or 0 <= level <= _MAX_LEVEL, \ f'The level should be in range [0,{_MAX_LEVEL}], got {level}.' assert isinstance(min_mag, float), \ f'min_mag should be type float, got {type(min_mag)}.' assert isinstance(max_mag, float), \ f'max_mag should be type float, got {type(max_mag)}.' assert min_mag <= max_mag, \ f'min_mag should smaller than max_mag, ' \ f'got min_mag={min_mag} and max_mag={max_mag}' assert isinstance(reversal_prob, float), \ f'reversal_prob should be type float, got {type(max_mag)}.' assert 0 <= reversal_prob <= 1.0, \ f'The reversal probability of the transformation magnitude ' \ f'should be type float, got {type(reversal_prob)}.' if isinstance(img_border_value, (float, int)): img_border_value = tuple([float(img_border_value)] * 3) elif isinstance(img_border_value, tuple): assert len(img_border_value) == 3, \ f'img_border_value as tuple must have 3 elements, ' \ f'got {len(img_border_value)}.' img_border_value = tuple([float(val) for val in img_border_value]) else: raise ValueError( 'img_border_value must be float or tuple with 3 elements.') assert np.all([0 <= val <= 255 for val in img_border_value]), 'all ' \ 'elements of img_border_value should between range [0,255].' \ f'got {img_border_value}.' self.prob = prob self.level = level self.min_mag = min_mag self.max_mag = max_mag self.reversal_prob = reversal_prob self.img_border_value = img_border_value self.mask_border_value = mask_border_value self.seg_ignore_label = seg_ignore_label self.interpolation = interpolation def _transform_img(self, results: dict, mag: float) -> None: """Transform the image.""" pass def _transform_masks(self, results: dict, mag: float) -> None: """Transform the masks.""" pass def _transform_seg(self, results: dict, mag: float) -> None: """Transform the segmentation map.""" pass def _get_homography_matrix(self, results: dict, mag: float) -> np.ndarray: """Get the homography matrix for the geometric transformation.""" return np.eye(3, dtype=np.float32) def _transform_bboxes(self, results: dict, mag: float) -> None: """Transform the bboxes.""" results['gt_bboxes'].project_(self.homography_matrix) results['gt_bboxes'].clip_(results['img_shape']) def _record_homography_matrix(self, results: dict) -> None: """Record the homography matrix for the geometric transformation.""" if results.get('homography_matrix', None) is None: results['homography_matrix'] = self.homography_matrix else: results['homography_matrix'] = self.homography_matrix @ results[ 'homography_matrix'] @cache_randomness def _random_disable(self): """Randomly disable the transform.""" return np.random.rand() > self.prob @cache_randomness def _get_mag(self): """Get the magnitude of the transform.""" mag = level_to_mag(self.level, self.min_mag, self.max_mag) return -mag if np.random.rand() > self.reversal_prob else mag @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function for images, bounding boxes, masks and semantic segmentation map. Args: results (dict): Result dict from loading pipeline. Returns: dict: Transformed results. """ if self._random_disable(): return results mag = self._get_mag() self.homography_matrix = self._get_homography_matrix(results, mag) self._record_homography_matrix(results) self._transform_img(results, mag) if results.get('gt_bboxes', None) is not None: self._transform_bboxes(results, mag) if results.get('gt_masks', None) is not None: self._transform_masks(results, mag) if results.get('gt_seg_map', None) is not None: self._transform_seg(results, mag) return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(prob={self.prob}, ' repr_str += f'level={self.level}, ' repr_str += f'min_mag={self.min_mag}, ' repr_str += f'max_mag={self.max_mag}, ' repr_str += f'reversal_prob={self.reversal_prob}, ' repr_str += f'img_border_value={self.img_border_value}, ' repr_str += f'mask_border_value={self.mask_border_value}, ' repr_str += f'seg_ignore_label={self.seg_ignore_label}, ' repr_str += f'interpolation={self.interpolation})' return repr_str @TRANSFORMS.register_module() class ShearX(GeomTransform): """Shear the images, bboxes, masks and segmentation map horizontally. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - gt_bboxes - gt_masks - gt_seg_map Added Keys: - homography_matrix Args: prob (float): The probability for performing Shear and should be in range [0, 1]. Defaults to 1.0. level (int, optional): The level should be in range [0, _MAX_LEVEL]. If level is None, it will generate from [0, _MAX_LEVEL] randomly. Defaults to None. min_mag (float): The minimum angle for the horizontal shear. Defaults to 0.0. max_mag (float): The maximum angle for the horizontal shear. Defaults to 30.0. reversal_prob (float): The probability that reverses the horizontal shear magnitude. Should be in range [0,1]. Defaults to 0.5. img_border_value (int | float | tuple): The filled values for image border. If float, the same fill value will be used for all the three channels of image. If tuple, it should be 3 elements. Defaults to 128. mask_border_value (int): The fill value used for masks. Defaults to 0. seg_ignore_label (int): The fill value used for segmentation map. Note this value must equals ``ignore_label`` in ``semantic_head`` of the corresponding config. Defaults to 255. interpolation (str): Interpolation method, accepted values are "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' backend, "nearest", "bilinear" for 'pillow' backend. Defaults to 'bilinear'. """ def __init__(self, prob: float = 1.0, level: Optional[int] = None, min_mag: float = 0.0, max_mag: float = 30.0, reversal_prob: float = 0.5, img_border_value: Union[int, float, tuple] = 128, mask_border_value: int = 0, seg_ignore_label: int = 255, interpolation: str = 'bilinear') -> None: assert 0. <= min_mag <= 90., \ f'min_mag angle for ShearX should be ' \ f'in range [0, 90], got {min_mag}.' assert 0. <= max_mag <= 90., \ f'max_mag angle for ShearX should be ' \ f'in range [0, 90], got {max_mag}.' super().__init__( prob=prob, level=level, min_mag=min_mag, max_mag=max_mag, reversal_prob=reversal_prob, img_border_value=img_border_value, mask_border_value=mask_border_value, seg_ignore_label=seg_ignore_label, interpolation=interpolation) @cache_randomness def _get_mag(self): """Get the magnitude of the transform.""" mag = level_to_mag(self.level, self.min_mag, self.max_mag) mag = np.tan(mag * np.pi / 180) return -mag if np.random.rand() > self.reversal_prob else mag def _get_homography_matrix(self, results: dict, mag: float) -> np.ndarray: """Get the homography matrix for ShearX.""" return np.array([[1, mag, 0], [0, 1, 0], [0, 0, 1]], dtype=np.float32) def _transform_img(self, results: dict, mag: float) -> None: """Shear the image horizontally.""" results['img'] = mmcv.imshear( results['img'], mag, direction='horizontal', border_value=self.img_border_value, interpolation=self.interpolation) def _transform_masks(self, results: dict, mag: float) -> None: """Shear the masks horizontally.""" results['gt_masks'] = results['gt_masks'].shear( results['img_shape'], mag, direction='horizontal', border_value=self.mask_border_value, interpolation=self.interpolation) def _transform_seg(self, results: dict, mag: float) -> None: """Shear the segmentation map horizontally.""" results['gt_seg_map'] = mmcv.imshear( results['gt_seg_map'], mag, direction='horizontal', border_value=self.seg_ignore_label, interpolation='nearest') @TRANSFORMS.register_module() class ShearY(GeomTransform): """Shear the images, bboxes, masks and segmentation map vertically. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - gt_bboxes - gt_masks - gt_seg_map Added Keys: - homography_matrix Args: prob (float): The probability for performing ShearY and should be in range [0, 1]. Defaults to 1.0. level (int, optional): The level should be in range [0,_MAX_LEVEL]. If level is None, it will generate from [0, _MAX_LEVEL] randomly. Defaults to None. min_mag (float): The minimum angle for the vertical shear. Defaults to 0.0. max_mag (float): The maximum angle for the vertical shear. Defaults to 30.0. reversal_prob (float): The probability that reverses the vertical shear magnitude. Should be in range [0,1]. Defaults to 0.5. img_border_value (int | float | tuple): The filled values for image border. If float, the same fill value will be used for all the three channels of image. If tuple, it should be 3 elements. Defaults to 128. mask_border_value (int): The fill value used for masks. Defaults to 0. seg_ignore_label (int): The fill value used for segmentation map. Note this value must equals ``ignore_label`` in ``semantic_head`` of the corresponding config. Defaults to 255. interpolation (str): Interpolation method, accepted values are "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' backend, "nearest", "bilinear" for 'pillow' backend. Defaults to 'bilinear'. """ def __init__(self, prob: float = 1.0, level: Optional[int] = None, min_mag: float = 0.0, max_mag: float = 30., reversal_prob: float = 0.5, img_border_value: Union[int, float, tuple] = 128, mask_border_value: int = 0, seg_ignore_label: int = 255, interpolation: str = 'bilinear') -> None: assert 0. <= min_mag <= 90., \ f'min_mag angle for ShearY should be ' \ f'in range [0, 90], got {min_mag}.' assert 0. <= max_mag <= 90., \ f'max_mag angle for ShearY should be ' \ f'in range [0, 90], got {max_mag}.' super().__init__( prob=prob, level=level, min_mag=min_mag, max_mag=max_mag, reversal_prob=reversal_prob, img_border_value=img_border_value, mask_border_value=mask_border_value, seg_ignore_label=seg_ignore_label, interpolation=interpolation) @cache_randomness def _get_mag(self): """Get the magnitude of the transform.""" mag = level_to_mag(self.level, self.min_mag, self.max_mag) mag = np.tan(mag * np.pi / 180) return -mag if np.random.rand() > self.reversal_prob else mag def _get_homography_matrix(self, results: dict, mag: float) -> np.ndarray: """Get the homography matrix for ShearY.""" return np.array([[1, 0, 0], [mag, 1, 0], [0, 0, 1]], dtype=np.float32) def _transform_img(self, results: dict, mag: float) -> None: """Shear the image vertically.""" results['img'] = mmcv.imshear( results['img'], mag, direction='vertical', border_value=self.img_border_value, interpolation=self.interpolation) def _transform_masks(self, results: dict, mag: float) -> None: """Shear the masks vertically.""" results['gt_masks'] = results['gt_masks'].shear( results['img_shape'], mag, direction='vertical', border_value=self.mask_border_value, interpolation=self.interpolation) def _transform_seg(self, results: dict, mag: float) -> None: """Shear the segmentation map vertically.""" results['gt_seg_map'] = mmcv.imshear( results['gt_seg_map'], mag, direction='vertical', border_value=self.seg_ignore_label, interpolation='nearest') @TRANSFORMS.register_module() class Rotate(GeomTransform): """Rotate the images, bboxes, masks and segmentation map. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - gt_bboxes - gt_masks - gt_seg_map Added Keys: - homography_matrix Args: prob (float): The probability for perform transformation and should be in range 0 to 1. Defaults to 1.0. level (int, optional): The level should be in range [0, _MAX_LEVEL]. If level is None, it will generate from [0, _MAX_LEVEL] randomly. Defaults to None. min_mag (float): The maximum angle for rotation. Defaults to 0.0. max_mag (float): The maximum angle for rotation. Defaults to 30.0. reversal_prob (float): The probability that reverses the rotation magnitude. Should be in range [0,1]. Defaults to 0.5. img_border_value (int | float | tuple): The filled values for image border. If float, the same fill value will be used for all the three channels of image. If tuple, it should be 3 elements. Defaults to 128. mask_border_value (int): The fill value used for masks. Defaults to 0. seg_ignore_label (int): The fill value used for segmentation map. Note this value must equals ``ignore_label`` in ``semantic_head`` of the corresponding config. Defaults to 255. interpolation (str): Interpolation method, accepted values are "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' backend, "nearest", "bilinear" for 'pillow' backend. Defaults to 'bilinear'. """ def __init__(self, prob: float = 1.0, level: Optional[int] = None, min_mag: float = 0.0, max_mag: float = 30.0, reversal_prob: float = 0.5, img_border_value: Union[int, float, tuple] = 128, mask_border_value: int = 0, seg_ignore_label: int = 255, interpolation: str = 'bilinear') -> None: assert 0. <= min_mag <= 180., \ f'min_mag for Rotate should be in range [0,180], got {min_mag}.' assert 0. <= max_mag <= 180., \ f'max_mag for Rotate should be in range [0,180], got {max_mag}.' super().__init__( prob=prob, level=level, min_mag=min_mag, max_mag=max_mag, reversal_prob=reversal_prob, img_border_value=img_border_value, mask_border_value=mask_border_value, seg_ignore_label=seg_ignore_label, interpolation=interpolation) def _get_homography_matrix(self, results: dict, mag: float) -> np.ndarray: """Get the homography matrix for Rotate.""" img_shape = results['img_shape'] center = ((img_shape[1] - 1) * 0.5, (img_shape[0] - 1) * 0.5) cv2_rotation_matrix = cv2.getRotationMatrix2D(center, -mag, 1.0) return np.concatenate( [cv2_rotation_matrix, np.array([0, 0, 1]).reshape((1, 3))]).astype(np.float32) def _transform_img(self, results: dict, mag: float) -> None: """Rotate the image.""" results['img'] = mmcv.imrotate( results['img'], mag, border_value=self.img_border_value, interpolation=self.interpolation) def _transform_masks(self, results: dict, mag: float) -> None: """Rotate the masks.""" results['gt_masks'] = results['gt_masks'].rotate( results['img_shape'], mag, border_value=self.mask_border_value, interpolation=self.interpolation) def _transform_seg(self, results: dict, mag: float) -> None: """Rotate the segmentation map.""" results['gt_seg_map'] = mmcv.imrotate( results['gt_seg_map'], mag, border_value=self.seg_ignore_label, interpolation='nearest') @TRANSFORMS.register_module() class TranslateX(GeomTransform): """Translate the images, bboxes, masks and segmentation map horizontally. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - gt_bboxes - gt_masks - gt_seg_map Added Keys: - homography_matrix Args: prob (float): The probability for perform transformation and should be in range 0 to 1. Defaults to 1.0. level (int, optional): The level should be in range [0, _MAX_LEVEL]. If level is None, it will generate from [0, _MAX_LEVEL] randomly. Defaults to None. min_mag (float): The minimum pixel's offset ratio for horizontal translation. Defaults to 0.0. max_mag (float): The maximum pixel's offset ratio for horizontal translation. Defaults to 0.1. reversal_prob (float): The probability that reverses the horizontal translation magnitude. Should be in range [0,1]. Defaults to 0.5. img_border_value (int | float | tuple): The filled values for image border. If float, the same fill value will be used for all the three channels of image. If tuple, it should be 3 elements. Defaults to 128. mask_border_value (int): The fill value used for masks. Defaults to 0. seg_ignore_label (int): The fill value used for segmentation map. Note this value must equals ``ignore_label`` in ``semantic_head`` of the corresponding config. Defaults to 255. interpolation (str): Interpolation method, accepted values are "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' backend, "nearest", "bilinear" for 'pillow' backend. Defaults to 'bilinear'. """ def __init__(self, prob: float = 1.0, level: Optional[int] = None, min_mag: float = 0.0, max_mag: float = 0.1, reversal_prob: float = 0.5, img_border_value: Union[int, float, tuple] = 128, mask_border_value: int = 0, seg_ignore_label: int = 255, interpolation: str = 'bilinear') -> None: assert 0. <= min_mag <= 1., \ f'min_mag ratio for TranslateX should be ' \ f'in range [0, 1], got {min_mag}.' assert 0. <= max_mag <= 1., \ f'max_mag ratio for TranslateX should be ' \ f'in range [0, 1], got {max_mag}.' super().__init__( prob=prob, level=level, min_mag=min_mag, max_mag=max_mag, reversal_prob=reversal_prob, img_border_value=img_border_value, mask_border_value=mask_border_value, seg_ignore_label=seg_ignore_label, interpolation=interpolation) def _get_homography_matrix(self, results: dict, mag: float) -> np.ndarray: """Get the homography matrix for TranslateX.""" mag = int(results['img_shape'][1] * mag) return np.array([[1, 0, mag], [0, 1, 0], [0, 0, 1]], dtype=np.float32) def _transform_img(self, results: dict, mag: float) -> None: """Translate the image horizontally.""" mag = int(results['img_shape'][1] * mag) results['img'] = mmcv.imtranslate( results['img'], mag, direction='horizontal', border_value=self.img_border_value, interpolation=self.interpolation) def _transform_masks(self, results: dict, mag: float) -> None: """Translate the masks horizontally.""" mag = int(results['img_shape'][1] * mag) results['gt_masks'] = results['gt_masks'].translate( results['img_shape'], mag, direction='horizontal', border_value=self.mask_border_value, interpolation=self.interpolation) def _transform_seg(self, results: dict, mag: float) -> None: """Translate the segmentation map horizontally.""" mag = int(results['img_shape'][1] * mag) results['gt_seg_map'] = mmcv.imtranslate( results['gt_seg_map'], mag, direction='horizontal', border_value=self.seg_ignore_label, interpolation='nearest') @TRANSFORMS.register_module() class TranslateY(GeomTransform): """Translate the images, bboxes, masks and segmentation map vertically. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - gt_bboxes - gt_masks - gt_seg_map Added Keys: - homography_matrix Args: prob (float): The probability for perform transformation and should be in range 0 to 1. Defaults to 1.0. level (int, optional): The level should be in range [0, _MAX_LEVEL]. If level is None, it will generate from [0, _MAX_LEVEL] randomly. Defaults to None. min_mag (float): The minimum pixel's offset ratio for vertical translation. Defaults to 0.0. max_mag (float): The maximum pixel's offset ratio for vertical translation. Defaults to 0.1. reversal_prob (float): The probability that reverses the vertical translation magnitude. Should be in range [0,1]. Defaults to 0.5. img_border_value (int | float | tuple): The filled values for image border. If float, the same fill value will be used for all the three channels of image. If tuple, it should be 3 elements. Defaults to 128. mask_border_value (int): The fill value used for masks. Defaults to 0. seg_ignore_label (int): The fill value used for segmentation map. Note this value must equals ``ignore_label`` in ``semantic_head`` of the corresponding config. Defaults to 255. interpolation (str): Interpolation method, accepted values are "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' backend, "nearest", "bilinear" for 'pillow' backend. Defaults to 'bilinear'. """ def __init__(self, prob: float = 1.0, level: Optional[int] = None, min_mag: float = 0.0, max_mag: float = 0.1, reversal_prob: float = 0.5, img_border_value: Union[int, float, tuple] = 128, mask_border_value: int = 0, seg_ignore_label: int = 255, interpolation: str = 'bilinear') -> None: assert 0. <= min_mag <= 1., \ f'min_mag ratio for TranslateY should be ' \ f'in range [0,1], got {min_mag}.' assert 0. <= max_mag <= 1., \ f'max_mag ratio for TranslateY should be ' \ f'in range [0,1], got {max_mag}.' super().__init__( prob=prob, level=level, min_mag=min_mag, max_mag=max_mag, reversal_prob=reversal_prob, img_border_value=img_border_value, mask_border_value=mask_border_value, seg_ignore_label=seg_ignore_label, interpolation=interpolation) def _get_homography_matrix(self, results: dict, mag: float) -> np.ndarray: """Get the homography matrix for TranslateY.""" mag = int(results['img_shape'][0] * mag) return np.array([[1, 0, 0], [0, 1, mag], [0, 0, 1]], dtype=np.float32) def _transform_img(self, results: dict, mag: float) -> None: """Translate the image vertically.""" mag = int(results['img_shape'][0] * mag) results['img'] = mmcv.imtranslate( results['img'], mag, direction='vertical', border_value=self.img_border_value, interpolation=self.interpolation) def _transform_masks(self, results: dict, mag: float) -> None: """Translate masks vertically.""" mag = int(results['img_shape'][0] * mag) results['gt_masks'] = results['gt_masks'].translate( results['img_shape'], mag, direction='vertical', border_value=self.mask_border_value, interpolation=self.interpolation) def _transform_seg(self, results: dict, mag: float) -> None: """Translate segmentation map vertically.""" mag = int(results['img_shape'][0] * mag) results['gt_seg_map'] = mmcv.imtranslate( results['gt_seg_map'], mag, direction='vertical', border_value=self.seg_ignore_label, interpolation='nearest')
30,322
39.162914
79
py
ERD
ERD-main/mmdet/datasets/transforms/wrappers.py
# Copyright (c) OpenMMLab. All rights reserved. import copy from typing import Callable, Dict, List, Optional, Union import numpy as np from mmcv.transforms import BaseTransform, Compose from mmcv.transforms.utils import cache_random_params, cache_randomness from mmdet.registry import TRANSFORMS @TRANSFORMS.register_module() class MultiBranch(BaseTransform): r"""Multiple branch pipeline wrapper. Generate multiple data-augmented versions of the same image. `MultiBranch` needs to specify the branch names of all pipelines of the dataset, perform corresponding data augmentation for the current branch, and return None for other branches, which ensures the consistency of return format across different samples. Args: branch_field (list): List of branch names. branch_pipelines (dict): Dict of different pipeline configs to be composed. Examples: >>> branch_field = ['sup', 'unsup_teacher', 'unsup_student'] >>> sup_pipeline = [ >>> dict(type='LoadImageFromFile'), >>> dict(type='LoadAnnotations', with_bbox=True), >>> dict(type='Resize', scale=(1333, 800), keep_ratio=True), >>> dict(type='RandomFlip', prob=0.5), >>> dict( >>> type='MultiBranch', >>> branch_field=branch_field, >>> sup=dict(type='PackDetInputs')) >>> ] >>> weak_pipeline = [ >>> dict(type='LoadImageFromFile'), >>> dict(type='LoadAnnotations', with_bbox=True), >>> dict(type='Resize', scale=(1333, 800), keep_ratio=True), >>> dict(type='RandomFlip', prob=0.0), >>> dict( >>> type='MultiBranch', >>> branch_field=branch_field, >>> sup=dict(type='PackDetInputs')) >>> ] >>> strong_pipeline = [ >>> dict(type='LoadImageFromFile'), >>> dict(type='LoadAnnotations', with_bbox=True), >>> dict(type='Resize', scale=(1333, 800), keep_ratio=True), >>> dict(type='RandomFlip', prob=1.0), >>> dict( >>> type='MultiBranch', >>> branch_field=branch_field, >>> sup=dict(type='PackDetInputs')) >>> ] >>> unsup_pipeline = [ >>> dict(type='LoadImageFromFile'), >>> dict(type='LoadEmptyAnnotations'), >>> dict( >>> type='MultiBranch', >>> branch_field=branch_field, >>> unsup_teacher=weak_pipeline, >>> unsup_student=strong_pipeline) >>> ] >>> from mmcv.transforms import Compose >>> sup_branch = Compose(sup_pipeline) >>> unsup_branch = Compose(unsup_pipeline) >>> print(sup_branch) >>> Compose( >>> LoadImageFromFile(ignore_empty=False, to_float32=False, color_type='color', imdecode_backend='cv2') # noqa >>> LoadAnnotations(with_bbox=True, with_label=True, with_mask=False, with_seg=False, poly2mask=True, imdecode_backend='cv2') # noqa >>> Resize(scale=(1333, 800), scale_factor=None, keep_ratio=True, clip_object_border=True), backend=cv2), interpolation=bilinear) # noqa >>> RandomFlip(prob=0.5, direction=horizontal) >>> MultiBranch(branch_pipelines=['sup']) >>> ) >>> print(unsup_branch) >>> Compose( >>> LoadImageFromFile(ignore_empty=False, to_float32=False, color_type='color', imdecode_backend='cv2') # noqa >>> LoadEmptyAnnotations(with_bbox=True, with_label=True, with_mask=False, with_seg=False, seg_ignore_label=255) # noqa >>> MultiBranch(branch_pipelines=['unsup_teacher', 'unsup_student']) >>> ) """ def __init__(self, branch_field: List[str], **branch_pipelines: dict) -> None: self.branch_field = branch_field self.branch_pipelines = { branch: Compose(pipeline) for branch, pipeline in branch_pipelines.items() } def transform(self, results: dict) -> dict: """Transform function to apply transforms sequentially. Args: results (dict): Result dict from loading pipeline. Returns: dict: - 'inputs' (Dict[str, obj:`torch.Tensor`]): The forward data of models from different branches. - 'data_sample' (Dict[str,obj:`DetDataSample`]): The annotation info of the sample from different branches. """ multi_results = {} for branch in self.branch_field: multi_results[branch] = {'inputs': None, 'data_samples': None} for branch, pipeline in self.branch_pipelines.items(): branch_results = pipeline(copy.deepcopy(results)) # If one branch pipeline returns None, # it will sample another data from dataset. if branch_results is None: return None multi_results[branch] = branch_results format_results = {} for branch, results in multi_results.items(): for key in results.keys(): if format_results.get(key, None) is None: format_results[key] = {branch: results[key]} else: format_results[key][branch] = results[key] return format_results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(branch_pipelines={list(self.branch_pipelines.keys())})' return repr_str @TRANSFORMS.register_module() class RandomOrder(Compose): """Shuffle the transform Sequence.""" @cache_randomness def _random_permutation(self): return np.random.permutation(len(self.transforms)) def transform(self, results: Dict) -> Optional[Dict]: """Transform function to apply transforms in random order. Args: results (dict): A result dict contains the results to transform. Returns: dict or None: Transformed results. """ inds = self._random_permutation() for idx in inds: t = self.transforms[idx] results = t(results) if results is None: return None return results def __repr__(self): """Compute the string representation.""" format_string = self.__class__.__name__ + '(' for t in self.transforms: format_string += f'{t.__class__.__name__}, ' format_string += ')' return format_string @TRANSFORMS.register_module() class ProposalBroadcaster(BaseTransform): """A transform wrapper to apply the wrapped transforms to process both `gt_bboxes` and `proposals` without adding any codes. It will do the following steps: 1. Scatter the broadcasting targets to a list of inputs of the wrapped transforms. The type of the list should be list[dict, dict], which the first is the original inputs, the second is the processing results that `gt_bboxes` being rewritten by the `proposals`. 2. Apply ``self.transforms``, with same random parameters, which is sharing with a context manager. The type of the outputs is a list[dict, dict]. 3. Gather the outputs, update the `proposals` in the first item of the outputs with the `gt_bboxes` in the second . Args: transforms (list, optional): Sequence of transform object or config dict to be wrapped. Defaults to []. Note: The `TransformBroadcaster` in MMCV can achieve the same operation as `ProposalBroadcaster`, but need to set more complex parameters. Examples: >>> pipeline = [ >>> dict(type='LoadImageFromFile'), >>> dict(type='LoadProposals', num_max_proposals=2000), >>> dict(type='LoadAnnotations', with_bbox=True), >>> dict( >>> type='ProposalBroadcaster', >>> transforms=[ >>> dict(type='Resize', scale=(1333, 800), >>> keep_ratio=True), >>> dict(type='RandomFlip', prob=0.5), >>> ]), >>> dict(type='PackDetInputs')] """ def __init__(self, transforms: List[Union[dict, Callable]] = []) -> None: self.transforms = Compose(transforms) def transform(self, results: dict) -> dict: """Apply wrapped transform functions to process both `gt_bboxes` and `proposals`. Args: results (dict): Result dict from loading pipeline. Returns: dict: Updated result dict. """ assert results.get('proposals', None) is not None, \ '`proposals` should be in the results, please delete ' \ '`ProposalBroadcaster` in your configs, or check whether ' \ 'you have load proposals successfully.' inputs = self._process_input(results) outputs = self._apply_transforms(inputs) outputs = self._process_output(outputs) return outputs def _process_input(self, data: dict) -> list: """Scatter the broadcasting targets to a list of inputs of the wrapped transforms. Args: data (dict): The original input data. Returns: list[dict]: A list of input data. """ cp_data = copy.deepcopy(data) cp_data['gt_bboxes'] = cp_data['proposals'] scatters = [data, cp_data] return scatters def _apply_transforms(self, inputs: list) -> list: """Apply ``self.transforms``. Args: inputs (list[dict, dict]): list of input data. Returns: list[dict]: The output of the wrapped pipeline. """ assert len(inputs) == 2 ctx = cache_random_params with ctx(self.transforms): output_scatters = [self.transforms(_input) for _input in inputs] return output_scatters def _process_output(self, output_scatters: list) -> dict: """Gathering and renaming data items. Args: output_scatters (list[dict, dict]): The output of the wrapped pipeline. Returns: dict: Updated result dict. """ assert isinstance(output_scatters, list) and \ isinstance(output_scatters[0], dict) and \ len(output_scatters) == 2 outputs = output_scatters[0] outputs['proposals'] = output_scatters[1]['gt_bboxes'] return outputs
10,689
37.453237
148
py
ERD
ERD-main/mmdet/datasets/transforms/augment_wrappers.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Union import numpy as np from mmcv.transforms import RandomChoice from mmcv.transforms.utils import cache_randomness from mmengine.config import ConfigDict from mmdet.registry import TRANSFORMS # AutoAugment uses reinforcement learning to search for # some widely useful data augmentation strategies, # here we provide AUTOAUG_POLICIES_V0. # For AUTOAUG_POLICIES_V0, each tuple is an augmentation # operation of the form (operation, probability, magnitude). # Each element in policies is a policy that will be applied # sequentially on the image. # RandAugment defines a data augmentation search space, RANDAUG_SPACE, # sampling 1~3 data augmentations each time, and # setting the magnitude of each data augmentation randomly, # which will be applied sequentially on the image. _MAX_LEVEL = 10 AUTOAUG_POLICIES_V0 = [ [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], [('Color', 0.4, 9), ('Equalize', 0.6, 3)], [('Color', 0.4, 1), ('Rotate', 0.6, 8)], [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], [('Color', 0.2, 0), ('Equalize', 0.8, 8)], [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], [('Color', 0.6, 1), ('Equalize', 1.0, 2)], [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], [('Color', 0.4, 7), ('Equalize', 0.6, 0)], [('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)], [('Solarize', 0.6, 8), ('Color', 0.6, 9)], [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], [('Rotate', 1.0, 7), ('TranslateY', 0.8, 9)], [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], [('ShearY', 0.8, 0), ('Color', 0.6, 4)], [('Color', 1.0, 0), ('Rotate', 0.6, 2)], [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], [('Posterize', 0.8, 2), ('Solarize', 0.6, 10)], [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], [('Color', 0.8, 6), ('Rotate', 0.4, 5)], ] def policies_v0(): """Autoaugment policies that was used in AutoAugment Paper.""" policies = list() for policy_args in AUTOAUG_POLICIES_V0: policy = list() for args in policy_args: policy.append(dict(type=args[0], prob=args[1], level=args[2])) policies.append(policy) return policies RANDAUG_SPACE = [[dict(type='AutoContrast')], [dict(type='Equalize')], [dict(type='Invert')], [dict(type='Rotate')], [dict(type='Posterize')], [dict(type='Solarize')], [dict(type='SolarizeAdd')], [dict(type='Color')], [dict(type='Contrast')], [dict(type='Brightness')], [dict(type='Sharpness')], [dict(type='ShearX')], [dict(type='ShearY')], [dict(type='TranslateX')], [dict(type='TranslateY')]] def level_to_mag(level: Optional[int], min_mag: float, max_mag: float) -> float: """Map from level to magnitude.""" if level is None: return round(np.random.rand() * (max_mag - min_mag) + min_mag, 1) else: return round(level / _MAX_LEVEL * (max_mag - min_mag) + min_mag, 1) @TRANSFORMS.register_module() class AutoAugment(RandomChoice): """Auto augmentation. This data augmentation is proposed in `AutoAugment: Learning Augmentation Policies from Data <https://arxiv.org/abs/1805.09501>`_ and in `Learning Data Augmentation Strategies for Object Detection <https://arxiv.org/pdf/1906.11172>`_. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_ignore_flags (bool) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes - gt_bboxes_labels - gt_masks - gt_ignore_flags - gt_seg_map Added Keys: - homography_matrix Args: policies (List[List[Union[dict, ConfigDict]]]): The policies of auto augmentation.Each policy in ``policies`` is a specific augmentation policy, and is composed by several augmentations. When AutoAugment is called, a random policy in ``policies`` will be selected to augment images. Defaults to policy_v0(). prob (list[float], optional): The probabilities associated with each policy. The length should be equal to the policy number and the sum should be 1. If not given, a uniform distribution will be assumed. Defaults to None. Examples: >>> policies = [ >>> [ >>> dict(type='Sharpness', prob=0.0, level=8), >>> dict(type='ShearX', prob=0.4, level=0,) >>> ], >>> [ >>> dict(type='Rotate', prob=0.6, level=10), >>> dict(type='Color', prob=1.0, level=6) >>> ] >>> ] >>> augmentation = AutoAugment(policies) >>> img = np.ones(100, 100, 3) >>> gt_bboxes = np.ones(10, 4) >>> results = dict(img=img, gt_bboxes=gt_bboxes) >>> results = augmentation(results) """ def __init__(self, policies: List[List[Union[dict, ConfigDict]]] = policies_v0(), prob: Optional[List[float]] = None) -> None: assert isinstance(policies, list) and len(policies) > 0, \ 'Policies must be a non-empty list.' for policy in policies: assert isinstance(policy, list) and len(policy) > 0, \ 'Each policy in policies must be a non-empty list.' for augment in policy: assert isinstance(augment, dict) and 'type' in augment, \ 'Each specific augmentation must be a dict with key' \ ' "type".' super().__init__(transforms=policies, prob=prob) self.policies = policies def __repr__(self) -> str: return f'{self.__class__.__name__}(policies={self.policies}, ' \ f'prob={self.prob})' @TRANSFORMS.register_module() class RandAugment(RandomChoice): """Rand augmentation. This data augmentation is proposed in `RandAugment: Practical automated data augmentation with a reduced search space <https://arxiv.org/abs/1909.13719>`_. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_ignore_flags (bool) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes - gt_bboxes_labels - gt_masks - gt_ignore_flags - gt_seg_map Added Keys: - homography_matrix Args: aug_space (List[List[Union[dict, ConfigDict]]]): The augmentation space of rand augmentation. Each augmentation transform in ``aug_space`` is a specific transform, and is composed by several augmentations. When RandAugment is called, a random transform in ``aug_space`` will be selected to augment images. Defaults to aug_space. aug_num (int): Number of augmentation to apply equentially. Defaults to 2. prob (list[float], optional): The probabilities associated with each augmentation. The length should be equal to the augmentation space and the sum should be 1. If not given, a uniform distribution will be assumed. Defaults to None. Examples: >>> aug_space = [ >>> dict(type='Sharpness'), >>> dict(type='ShearX'), >>> dict(type='Color'), >>> ], >>> augmentation = RandAugment(aug_space) >>> img = np.ones(100, 100, 3) >>> gt_bboxes = np.ones(10, 4) >>> results = dict(img=img, gt_bboxes=gt_bboxes) >>> results = augmentation(results) """ def __init__(self, aug_space: List[Union[dict, ConfigDict]] = RANDAUG_SPACE, aug_num: int = 2, prob: Optional[List[float]] = None) -> None: assert isinstance(aug_space, list) and len(aug_space) > 0, \ 'Augmentation space must be a non-empty list.' for aug in aug_space: assert isinstance(aug, list) and len(aug) == 1, \ 'Each augmentation in aug_space must be a list.' for transform in aug: assert isinstance(transform, dict) and 'type' in transform, \ 'Each specific transform must be a dict with key' \ ' "type".' super().__init__(transforms=aug_space, prob=prob) self.aug_space = aug_space self.aug_num = aug_num @cache_randomness def random_pipeline_index(self): indices = np.arange(len(self.transforms)) return np.random.choice( indices, self.aug_num, p=self.prob, replace=False) def transform(self, results: dict) -> dict: """Transform function to use RandAugment. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with RandAugment. """ for idx in self.random_pipeline_index(): results = self.transforms[idx](results) return results def __repr__(self) -> str: return f'{self.__class__.__name__}(' \ f'aug_space={self.aug_space}, '\ f'aug_num={self.aug_num}, ' \ f'prob={self.prob})'
9,738
35.750943
79
py
ERD
ERD-main/mmdet/datasets/transforms/transforms.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import inspect import math from typing import List, Optional, Sequence, Tuple, Union import cv2 import mmcv import numpy as np from mmcv.image.geometric import _scale_size from mmcv.transforms import BaseTransform from mmcv.transforms import Pad as MMCV_Pad from mmcv.transforms import RandomFlip as MMCV_RandomFlip from mmcv.transforms import Resize as MMCV_Resize from mmcv.transforms.utils import avoid_cache_randomness, cache_randomness from mmengine.dataset import BaseDataset from mmengine.utils import is_str from numpy import random from mmdet.registry import TRANSFORMS from mmdet.structures.bbox import HorizontalBoxes, autocast_box_type from mmdet.structures.mask import BitmapMasks, PolygonMasks from mmdet.utils import log_img_scale try: from imagecorruptions import corrupt except ImportError: corrupt = None try: import albumentations from albumentations import Compose except ImportError: albumentations = None Compose = None Number = Union[int, float] @TRANSFORMS.register_module() class Resize(MMCV_Resize): """Resize images & bbox & seg. This transform resizes the input image according to ``scale`` or ``scale_factor``. Bboxes, masks, and seg map are then resized with the same scale factor. if ``scale`` and ``scale_factor`` are both set, it will use ``scale`` to resize. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes - gt_masks - gt_seg_map Added Keys: - scale - scale_factor - keep_ratio - homography_matrix Args: scale (int or tuple): Images scales for resizing. Defaults to None scale_factor (float or tuple[float]): Scale factors for resizing. Defaults to None. keep_ratio (bool): Whether to keep the aspect ratio when resizing the image. Defaults to False. clip_object_border (bool): Whether to clip the objects outside the border of the image. In some dataset like MOT17, the gt bboxes are allowed to cross the border of images. Therefore, we don't need to clip the gt bboxes in these cases. Defaults to True. backend (str): Image resize backend, choices are 'cv2' and 'pillow'. These two backends generates slightly different results. Defaults to 'cv2'. interpolation (str): Interpolation method, accepted values are "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' backend, "nearest", "bilinear" for 'pillow' backend. Defaults to 'bilinear'. """ def _resize_masks(self, results: dict) -> None: """Resize masks with ``results['scale']``""" if results.get('gt_masks', None) is not None: if self.keep_ratio: results['gt_masks'] = results['gt_masks'].rescale( results['scale']) else: results['gt_masks'] = results['gt_masks'].resize( results['img_shape']) def _resize_bboxes(self, results: dict) -> None: """Resize bounding boxes with ``results['scale_factor']``.""" if results.get('gt_bboxes', None) is not None: results['gt_bboxes'].rescale_(results['scale_factor']) if self.clip_object_border: results['gt_bboxes'].clip_(results['img_shape']) def _resize_seg(self, results: dict) -> None: """Resize semantic segmentation map with ``results['scale']``.""" if results.get('gt_seg_map', None) is not None: if self.keep_ratio: gt_seg = mmcv.imrescale( results['gt_seg_map'], results['scale'], interpolation='nearest', backend=self.backend) else: gt_seg = mmcv.imresize( results['gt_seg_map'], results['scale'], interpolation='nearest', backend=self.backend) results['gt_seg_map'] = gt_seg def _record_homography_matrix(self, results: dict) -> None: """Record the homography matrix for the Resize.""" w_scale, h_scale = results['scale_factor'] homography_matrix = np.array( [[w_scale, 0, 0], [0, h_scale, 0], [0, 0, 1]], dtype=np.float32) if results.get('homography_matrix', None) is None: results['homography_matrix'] = homography_matrix else: results['homography_matrix'] = homography_matrix @ results[ 'homography_matrix'] @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to resize images, bounding boxes and semantic segmentation map. Args: results (dict): Result dict from loading pipeline. Returns: dict: Resized results, 'img', 'gt_bboxes', 'gt_seg_map', 'scale', 'scale_factor', 'height', 'width', and 'keep_ratio' keys are updated in result dict. """ if self.scale: results['scale'] = self.scale else: img_shape = results['img'].shape[:2] results['scale'] = _scale_size(img_shape[::-1], self.scale_factor) self._resize_img(results) self._resize_bboxes(results) self._resize_masks(results) self._resize_seg(results) self._record_homography_matrix(results) return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(scale={self.scale}, ' repr_str += f'scale_factor={self.scale_factor}, ' repr_str += f'keep_ratio={self.keep_ratio}, ' repr_str += f'clip_object_border={self.clip_object_border}), ' repr_str += f'backend={self.backend}), ' repr_str += f'interpolation={self.interpolation})' return repr_str @TRANSFORMS.register_module() class FixShapeResize(Resize): """Resize images & bbox & seg to the specified size. This transform resizes the input image according to ``width`` and ``height``. Bboxes, masks, and seg map are then resized with the same parameters. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes - gt_masks - gt_seg_map Added Keys: - scale - scale_factor - keep_ratio - homography_matrix Args: width (int): width for resizing. height (int): height for resizing. Defaults to None. pad_val (Number | dict[str, Number], optional): Padding value for if the pad_mode is "constant". If it is a single number, the value to pad the image is the number and to pad the semantic segmentation map is 255. If it is a dict, it should have the following keys: - img: The value to pad the image. - seg: The value to pad the semantic segmentation map. Defaults to dict(img=0, seg=255). keep_ratio (bool): Whether to keep the aspect ratio when resizing the image. Defaults to False. clip_object_border (bool): Whether to clip the objects outside the border of the image. In some dataset like MOT17, the gt bboxes are allowed to cross the border of images. Therefore, we don't need to clip the gt bboxes in these cases. Defaults to True. backend (str): Image resize backend, choices are 'cv2' and 'pillow'. These two backends generates slightly different results. Defaults to 'cv2'. interpolation (str): Interpolation method, accepted values are "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' backend, "nearest", "bilinear" for 'pillow' backend. Defaults to 'bilinear'. """ def __init__(self, width: int, height: int, pad_val: Union[Number, dict] = dict(img=0, seg=255), keep_ratio: bool = False, clip_object_border: bool = True, backend: str = 'cv2', interpolation: str = 'bilinear') -> None: assert width is not None and height is not None, ( '`width` and' '`height` can not be `None`') self.width = width self.height = height self.scale = (width, height) self.backend = backend self.interpolation = interpolation self.keep_ratio = keep_ratio self.clip_object_border = clip_object_border if keep_ratio is True: # padding to the fixed size when keep_ratio=True self.pad_transform = Pad(size=self.scale, pad_val=pad_val) @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to resize images, bounding boxes and semantic segmentation map. Args: results (dict): Result dict from loading pipeline. Returns: dict: Resized results, 'img', 'gt_bboxes', 'gt_seg_map', 'scale', 'scale_factor', 'height', 'width', and 'keep_ratio' keys are updated in result dict. """ img = results['img'] h, w = img.shape[:2] if self.keep_ratio: scale_factor = min(self.width / w, self.height / h) results['scale_factor'] = (scale_factor, scale_factor) real_w, real_h = int(w * float(scale_factor) + 0.5), int(h * float(scale_factor) + 0.5) img, scale_factor = mmcv.imrescale( results['img'], (real_w, real_h), interpolation=self.interpolation, return_scale=True, backend=self.backend) # the w_scale and h_scale has minor difference # a real fix should be done in the mmcv.imrescale in the future results['img'] = img results['img_shape'] = img.shape[:2] results['keep_ratio'] = self.keep_ratio results['scale'] = (real_w, real_h) else: results['scale'] = (self.width, self.height) results['scale_factor'] = (self.width / w, self.height / h) super()._resize_img(results) self._resize_bboxes(results) self._resize_masks(results) self._resize_seg(results) self._record_homography_matrix(results) if self.keep_ratio: self.pad_transform(results) return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(width={self.width}, height={self.height}, ' repr_str += f'keep_ratio={self.keep_ratio}, ' repr_str += f'clip_object_border={self.clip_object_border}), ' repr_str += f'backend={self.backend}), ' repr_str += f'interpolation={self.interpolation})' return repr_str @TRANSFORMS.register_module() class RandomFlip(MMCV_RandomFlip): """Flip the image & bbox & mask & segmentation map. Added or Updated keys: flip, flip_direction, img, gt_bboxes, and gt_seg_map. There are 3 flip modes: - ``prob`` is float, ``direction`` is string: the image will be ``direction``ly flipped with probability of ``prob`` . E.g., ``prob=0.5``, ``direction='horizontal'``, then image will be horizontally flipped with probability of 0.5. - ``prob`` is float, ``direction`` is list of string: the image will be ``direction[i]``ly flipped with probability of ``prob/len(direction)``. E.g., ``prob=0.5``, ``direction=['horizontal', 'vertical']``, then image will be horizontally flipped with probability of 0.25, vertically with probability of 0.25. - ``prob`` is list of float, ``direction`` is list of string: given ``len(prob) == len(direction)``, the image will be ``direction[i]``ly flipped with probability of ``prob[i]``. E.g., ``prob=[0.3, 0.5]``, ``direction=['horizontal', 'vertical']``, then image will be horizontally flipped with probability of 0.3, vertically with probability of 0.5. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - gt_bboxes - gt_masks - gt_seg_map Added Keys: - flip - flip_direction - homography_matrix Args: prob (float | list[float], optional): The flipping probability. Defaults to None. direction(str | list[str]): The flipping direction. Options If input is a list, the length must equal ``prob``. Each element in ``prob`` indicates the flip probability of corresponding direction. Defaults to 'horizontal'. """ def _record_homography_matrix(self, results: dict) -> None: """Record the homography matrix for the RandomFlip.""" cur_dir = results['flip_direction'] h, w = results['img'].shape[:2] if cur_dir == 'horizontal': homography_matrix = np.array([[-1, 0, w], [0, 1, 0], [0, 0, 1]], dtype=np.float32) elif cur_dir == 'vertical': homography_matrix = np.array([[1, 0, 0], [0, -1, h], [0, 0, 1]], dtype=np.float32) elif cur_dir == 'diagonal': homography_matrix = np.array([[-1, 0, w], [0, -1, h], [0, 0, 1]], dtype=np.float32) else: homography_matrix = np.eye(3, dtype=np.float32) if results.get('homography_matrix', None) is None: results['homography_matrix'] = homography_matrix else: results['homography_matrix'] = homography_matrix @ results[ 'homography_matrix'] @autocast_box_type() def _flip(self, results: dict) -> None: """Flip images, bounding boxes, and semantic segmentation map.""" # flip image results['img'] = mmcv.imflip( results['img'], direction=results['flip_direction']) img_shape = results['img'].shape[:2] # flip bboxes if results.get('gt_bboxes', None) is not None: results['gt_bboxes'].flip_(img_shape, results['flip_direction']) # flip masks if results.get('gt_masks', None) is not None: results['gt_masks'] = results['gt_masks'].flip( results['flip_direction']) # flip segs if results.get('gt_seg_map', None) is not None: results['gt_seg_map'] = mmcv.imflip( results['gt_seg_map'], direction=results['flip_direction']) # record homography matrix for flip self._record_homography_matrix(results) @TRANSFORMS.register_module() class RandomShift(BaseTransform): """Shift the image and box given shift pixels and probability. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) - gt_bboxes_labels (np.int64) - gt_ignore_flags (bool) (optional) Modified Keys: - img - gt_bboxes - gt_bboxes_labels - gt_ignore_flags (bool) (optional) Args: prob (float): Probability of shifts. Defaults to 0.5. max_shift_px (int): The max pixels for shifting. Defaults to 32. filter_thr_px (int): The width and height threshold for filtering. The bbox and the rest of the targets below the width and height threshold will be filtered. Defaults to 1. """ def __init__(self, prob: float = 0.5, max_shift_px: int = 32, filter_thr_px: int = 1) -> None: assert 0 <= prob <= 1 assert max_shift_px >= 0 self.prob = prob self.max_shift_px = max_shift_px self.filter_thr_px = int(filter_thr_px) @cache_randomness def _random_prob(self) -> float: return random.uniform(0, 1) @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to random shift images, bounding boxes. Args: results (dict): Result dict from loading pipeline. Returns: dict: Shift results. """ if self._random_prob() < self.prob: img_shape = results['img'].shape[:2] random_shift_x = random.randint(-self.max_shift_px, self.max_shift_px) random_shift_y = random.randint(-self.max_shift_px, self.max_shift_px) new_x = max(0, random_shift_x) ori_x = max(0, -random_shift_x) new_y = max(0, random_shift_y) ori_y = max(0, -random_shift_y) # TODO: support mask and semantic segmentation maps. bboxes = results['gt_bboxes'].clone() bboxes.translate_([random_shift_x, random_shift_y]) # clip border bboxes.clip_(img_shape) # remove invalid bboxes valid_inds = (bboxes.widths > self.filter_thr_px).numpy() & ( bboxes.heights > self.filter_thr_px).numpy() # If the shift does not contain any gt-bbox area, skip this # image. if not valid_inds.any(): return results bboxes = bboxes[valid_inds] results['gt_bboxes'] = bboxes results['gt_bboxes_labels'] = results['gt_bboxes_labels'][ valid_inds] if results.get('gt_ignore_flags', None) is not None: results['gt_ignore_flags'] = \ results['gt_ignore_flags'][valid_inds] # shift img img = results['img'] new_img = np.zeros_like(img) img_h, img_w = img.shape[:2] new_h = img_h - np.abs(random_shift_y) new_w = img_w - np.abs(random_shift_x) new_img[new_y:new_y + new_h, new_x:new_x + new_w] \ = img[ori_y:ori_y + new_h, ori_x:ori_x + new_w] results['img'] = new_img return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(prob={self.prob}, ' repr_str += f'max_shift_px={self.max_shift_px}, ' repr_str += f'filter_thr_px={self.filter_thr_px})' return repr_str @TRANSFORMS.register_module() class Pad(MMCV_Pad): """Pad the image & segmentation map. There are three padding modes: (1) pad to a fixed size and (2) pad to the minimum size that is divisible by some number. and (3)pad to square. Also, pad to square and pad to the minimum size can be used as the same time. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_masks - gt_seg_map Added Keys: - pad_shape - pad_fixed_size - pad_size_divisor Args: size (tuple, optional): Fixed padding size. Expected padding shape (width, height). Defaults to None. size_divisor (int, optional): The divisor of padded size. Defaults to None. pad_to_square (bool): Whether to pad the image into a square. Currently only used for YOLOX. Defaults to False. pad_val (Number | dict[str, Number], optional) - Padding value for if the pad_mode is "constant". If it is a single number, the value to pad the image is the number and to pad the semantic segmentation map is 255. If it is a dict, it should have the following keys: - img: The value to pad the image. - seg: The value to pad the semantic segmentation map. Defaults to dict(img=0, seg=255). padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. Defaults to 'constant'. - constant: pads with a constant value, this value is specified with pad_val. - edge: pads with the last value at the edge of the image. - reflect: pads with reflection of image without repeating the last value on the edge. For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode will result in [3, 2, 1, 2, 3, 4, 3, 2]. - symmetric: pads with reflection of image repeating the last value on the edge. For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode will result in [2, 1, 1, 2, 3, 4, 4, 3] """ def _pad_masks(self, results: dict) -> None: """Pad masks according to ``results['pad_shape']``.""" if results.get('gt_masks', None) is not None: pad_val = self.pad_val.get('masks', 0) pad_shape = results['pad_shape'][:2] results['gt_masks'] = results['gt_masks'].pad( pad_shape, pad_val=pad_val) def transform(self, results: dict) -> dict: """Call function to pad images, masks, semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. Returns: dict: Updated result dict. """ self._pad_img(results) self._pad_seg(results) self._pad_masks(results) return results @TRANSFORMS.register_module() class RandomCrop(BaseTransform): """Random crop the image & bboxes & masks. The absolute ``crop_size`` is sampled based on ``crop_type`` and ``image_size``, then the cropped results are generated. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_ignore_flags (bool) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes (optional) - gt_bboxes_labels (optional) - gt_masks (optional) - gt_ignore_flags (optional) - gt_seg_map (optional) Added Keys: - homography_matrix Args: crop_size (tuple): The relative ratio or absolute pixels of (width, height). crop_type (str, optional): One of "relative_range", "relative", "absolute", "absolute_range". "relative" randomly crops (h * crop_size[0], w * crop_size[1]) part from an input of size (h, w). "relative_range" uniformly samples relative crop size from range [crop_size[0], 1] and [crop_size[1], 1] for height and width respectively. "absolute" crops from an input with absolute size (crop_size[0], crop_size[1]). "absolute_range" uniformly samples crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w in range [crop_size[0], min(w, crop_size[1])]. Defaults to "absolute". allow_negative_crop (bool, optional): Whether to allow a crop that does not contain any bbox area. Defaults to False. recompute_bbox (bool, optional): Whether to re-compute the boxes based on cropped instance masks. Defaults to False. bbox_clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. Note: - If the image is smaller than the absolute crop size, return the original image. - The keys for bboxes, labels and masks must be aligned. That is, ``gt_bboxes`` corresponds to ``gt_labels`` and ``gt_masks``, and ``gt_bboxes_ignore`` corresponds to ``gt_labels_ignore`` and ``gt_masks_ignore``. - If the crop does not contain any gt-bbox region and ``allow_negative_crop`` is set to False, skip this image. """ def __init__(self, crop_size: tuple, crop_type: str = 'absolute', allow_negative_crop: bool = False, recompute_bbox: bool = False, bbox_clip_border: bool = True) -> None: if crop_type not in [ 'relative_range', 'relative', 'absolute', 'absolute_range' ]: raise ValueError(f'Invalid crop_type {crop_type}.') if crop_type in ['absolute', 'absolute_range']: assert crop_size[0] > 0 and crop_size[1] > 0 assert isinstance(crop_size[0], int) and isinstance( crop_size[1], int) if crop_type == 'absolute_range': assert crop_size[0] <= crop_size[1] else: assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1 self.crop_size = crop_size self.crop_type = crop_type self.allow_negative_crop = allow_negative_crop self.bbox_clip_border = bbox_clip_border self.recompute_bbox = recompute_bbox def _crop_data(self, results: dict, crop_size: Tuple[int, int], allow_negative_crop: bool) -> Union[dict, None]: """Function to randomly crop images, bounding boxes, masks, semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. crop_size (Tuple[int, int]): Expected absolute size after cropping, (h, w). allow_negative_crop (bool): Whether to allow a crop that does not contain any bbox area. Returns: results (Union[dict, None]): Randomly cropped results, 'img_shape' key in result dict is updated according to crop size. None will be returned when there is no valid bbox after cropping. """ assert crop_size[0] > 0 and crop_size[1] > 0 img = results['img'] margin_h = max(img.shape[0] - crop_size[0], 0) margin_w = max(img.shape[1] - crop_size[1], 0) offset_h, offset_w = self._rand_offset((margin_h, margin_w)) crop_y1, crop_y2 = offset_h, offset_h + crop_size[0] crop_x1, crop_x2 = offset_w, offset_w + crop_size[1] # Record the homography matrix for the RandomCrop homography_matrix = np.array( [[1, 0, -offset_w], [0, 1, -offset_h], [0, 0, 1]], dtype=np.float32) if results.get('homography_matrix', None) is None: results['homography_matrix'] = homography_matrix else: results['homography_matrix'] = homography_matrix @ results[ 'homography_matrix'] # crop the image img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...] img_shape = img.shape results['img'] = img results['img_shape'] = img_shape[:2] # crop bboxes accordingly and clip to the image boundary if results.get('gt_bboxes', None) is not None: bboxes = results['gt_bboxes'] bboxes.translate_([-offset_w, -offset_h]) if self.bbox_clip_border: bboxes.clip_(img_shape[:2]) valid_inds = bboxes.is_inside(img_shape[:2]).numpy() # If the crop does not contain any gt-bbox area and # allow_negative_crop is False, skip this image. if (not valid_inds.any() and not allow_negative_crop): return None results['gt_bboxes'] = bboxes[valid_inds] if results.get('gt_ignore_flags', None) is not None: results['gt_ignore_flags'] = \ results['gt_ignore_flags'][valid_inds] if results.get('gt_bboxes_labels', None) is not None: results['gt_bboxes_labels'] = \ results['gt_bboxes_labels'][valid_inds] if results.get('gt_masks', None) is not None: results['gt_masks'] = results['gt_masks'][ valid_inds.nonzero()[0]].crop( np.asarray([crop_x1, crop_y1, crop_x2, crop_y2])) if self.recompute_bbox: results['gt_bboxes'] = results['gt_masks'].get_bboxes( type(results['gt_bboxes'])) # crop semantic seg if results.get('gt_seg_map', None) is not None: results['gt_seg_map'] = results['gt_seg_map'][crop_y1:crop_y2, crop_x1:crop_x2] return results @cache_randomness def _rand_offset(self, margin: Tuple[int, int]) -> Tuple[int, int]: """Randomly generate crop offset. Args: margin (Tuple[int, int]): The upper bound for the offset generated randomly. Returns: Tuple[int, int]: The random offset for the crop. """ margin_h, margin_w = margin offset_h = np.random.randint(0, margin_h + 1) offset_w = np.random.randint(0, margin_w + 1) return offset_h, offset_w @cache_randomness def _get_crop_size(self, image_size: Tuple[int, int]) -> Tuple[int, int]: """Randomly generates the absolute crop size based on `crop_type` and `image_size`. Args: image_size (Tuple[int, int]): (h, w). Returns: crop_size (Tuple[int, int]): (crop_h, crop_w) in absolute pixels. """ h, w = image_size if self.crop_type == 'absolute': return min(self.crop_size[1], h), min(self.crop_size[0], w) elif self.crop_type == 'absolute_range': crop_h = np.random.randint( min(h, self.crop_size[0]), min(h, self.crop_size[1]) + 1) crop_w = np.random.randint( min(w, self.crop_size[0]), min(w, self.crop_size[1]) + 1) return crop_h, crop_w elif self.crop_type == 'relative': crop_w, crop_h = self.crop_size return int(h * crop_h + 0.5), int(w * crop_w + 0.5) else: # 'relative_range' crop_size = np.asarray(self.crop_size, dtype=np.float32) crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size) return int(h * crop_h + 0.5), int(w * crop_w + 0.5) @autocast_box_type() def transform(self, results: dict) -> Union[dict, None]: """Transform function to randomly crop images, bounding boxes, masks, semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. Returns: results (Union[dict, None]): Randomly cropped results, 'img_shape' key in result dict is updated according to crop size. None will be returned when there is no valid bbox after cropping. """ image_size = results['img'].shape[:2] crop_size = self._get_crop_size(image_size) results = self._crop_data(results, crop_size, self.allow_negative_crop) return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(crop_size={self.crop_size}, ' repr_str += f'crop_type={self.crop_type}, ' repr_str += f'allow_negative_crop={self.allow_negative_crop}, ' repr_str += f'recompute_bbox={self.recompute_bbox}, ' repr_str += f'bbox_clip_border={self.bbox_clip_border})' return repr_str @TRANSFORMS.register_module() class SegRescale(BaseTransform): """Rescale semantic segmentation maps. This transform rescale the ``gt_seg_map`` according to ``scale_factor``. Required Keys: - gt_seg_map Modified Keys: - gt_seg_map Args: scale_factor (float): The scale factor of the final output. Defaults to 1. backend (str): Image rescale backend, choices are 'cv2' and 'pillow'. These two backends generates slightly different results. Defaults to 'cv2'. """ def __init__(self, scale_factor: float = 1, backend: str = 'cv2') -> None: self.scale_factor = scale_factor self.backend = backend def transform(self, results: dict) -> dict: """Transform function to scale the semantic segmentation map. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with semantic segmentation map scaled. """ if self.scale_factor != 1: results['gt_seg_map'] = mmcv.imrescale( results['gt_seg_map'], self.scale_factor, interpolation='nearest', backend=self.backend) return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(scale_factor={self.scale_factor}, ' repr_str += f'backend={self.backend})' return repr_str @TRANSFORMS.register_module() class PhotoMetricDistortion(BaseTransform): """Apply photometric distortion to image sequentially, every transformation is applied with a probability of 0.5. The position of random contrast is in second or second to last. 1. random brightness 2. random contrast (mode 0) 3. convert color from BGR to HSV 4. random saturation 5. random hue 6. convert color from HSV to BGR 7. random contrast (mode 1) 8. randomly swap channels Required Keys: - img (np.uint8) Modified Keys: - img (np.float32) Args: brightness_delta (int): delta of brightness. contrast_range (sequence): range of contrast. saturation_range (sequence): range of saturation. hue_delta (int): delta of hue. """ def __init__(self, brightness_delta: int = 32, contrast_range: Sequence[Number] = (0.5, 1.5), saturation_range: Sequence[Number] = (0.5, 1.5), hue_delta: int = 18) -> None: self.brightness_delta = brightness_delta self.contrast_lower, self.contrast_upper = contrast_range self.saturation_lower, self.saturation_upper = saturation_range self.hue_delta = hue_delta @cache_randomness def _random_flags(self) -> Sequence[Number]: mode = random.randint(2) brightness_flag = random.randint(2) contrast_flag = random.randint(2) saturation_flag = random.randint(2) hue_flag = random.randint(2) swap_flag = random.randint(2) delta_value = random.uniform(-self.brightness_delta, self.brightness_delta) alpha_value = random.uniform(self.contrast_lower, self.contrast_upper) saturation_value = random.uniform(self.saturation_lower, self.saturation_upper) hue_value = random.uniform(-self.hue_delta, self.hue_delta) swap_value = random.permutation(3) return (mode, brightness_flag, contrast_flag, saturation_flag, hue_flag, swap_flag, delta_value, alpha_value, saturation_value, hue_value, swap_value) def transform(self, results: dict) -> dict: """Transform function to perform photometric distortion on images. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images distorted. """ assert 'img' in results, '`img` is not found in results' img = results['img'] img = img.astype(np.float32) (mode, brightness_flag, contrast_flag, saturation_flag, hue_flag, swap_flag, delta_value, alpha_value, saturation_value, hue_value, swap_value) = self._random_flags() # random brightness if brightness_flag: img += delta_value # mode == 0 --> do random contrast first # mode == 1 --> do random contrast last if mode == 1: if contrast_flag: img *= alpha_value # convert color from BGR to HSV img = mmcv.bgr2hsv(img) # random saturation if saturation_flag: img[..., 1] *= saturation_value # For image(type=float32), after convert bgr to hsv by opencv, # valid saturation value range is [0, 1] if saturation_value > 1: img[..., 1] = img[..., 1].clip(0, 1) # random hue if hue_flag: img[..., 0] += hue_value img[..., 0][img[..., 0] > 360] -= 360 img[..., 0][img[..., 0] < 0] += 360 # convert color from HSV to BGR img = mmcv.hsv2bgr(img) # random contrast if mode == 0: if contrast_flag: img *= alpha_value # randomly swap channels if swap_flag: img = img[..., swap_value] results['img'] = img return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(brightness_delta={self.brightness_delta}, ' repr_str += 'contrast_range=' repr_str += f'{(self.contrast_lower, self.contrast_upper)}, ' repr_str += 'saturation_range=' repr_str += f'{(self.saturation_lower, self.saturation_upper)}, ' repr_str += f'hue_delta={self.hue_delta})' return repr_str @TRANSFORMS.register_module() class Expand(BaseTransform): """Random expand the image & bboxes & masks & segmentation map. Randomly place the original image on a canvas of ``ratio`` x original image size filled with mean values. The ratio is in the range of ratio_range. Required Keys: - img - img_shape - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes - gt_masks - gt_seg_map Args: mean (sequence): mean value of dataset. to_rgb (bool): if need to convert the order of mean to align with RGB. ratio_range (sequence)): range of expand ratio. seg_ignore_label (int): label of ignore segmentation map. prob (float): probability of applying this transformation """ def __init__(self, mean: Sequence[Number] = (0, 0, 0), to_rgb: bool = True, ratio_range: Sequence[Number] = (1, 4), seg_ignore_label: int = None, prob: float = 0.5) -> None: self.to_rgb = to_rgb self.ratio_range = ratio_range if to_rgb: self.mean = mean[::-1] else: self.mean = mean self.min_ratio, self.max_ratio = ratio_range self.seg_ignore_label = seg_ignore_label self.prob = prob @cache_randomness def _random_prob(self) -> float: return random.uniform(0, 1) @cache_randomness def _random_ratio(self) -> float: return random.uniform(self.min_ratio, self.max_ratio) @cache_randomness def _random_left_top(self, ratio: float, h: int, w: int) -> Tuple[int, int]: left = int(random.uniform(0, w * ratio - w)) top = int(random.uniform(0, h * ratio - h)) return left, top @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to expand images, bounding boxes, masks, segmentation map. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images, bounding boxes, masks, segmentation map expanded. """ if self._random_prob() > self.prob: return results assert 'img' in results, '`img` is not found in results' img = results['img'] h, w, c = img.shape ratio = self._random_ratio() # speedup expand when meets large image if np.all(self.mean == self.mean[0]): expand_img = np.empty((int(h * ratio), int(w * ratio), c), img.dtype) expand_img.fill(self.mean[0]) else: expand_img = np.full((int(h * ratio), int(w * ratio), c), self.mean, dtype=img.dtype) left, top = self._random_left_top(ratio, h, w) expand_img[top:top + h, left:left + w] = img results['img'] = expand_img results['img_shape'] = expand_img.shape[:2] # expand bboxes if results.get('gt_bboxes', None) is not None: results['gt_bboxes'].translate_([left, top]) # expand masks if results.get('gt_masks', None) is not None: results['gt_masks'] = results['gt_masks'].expand( int(h * ratio), int(w * ratio), top, left) # expand segmentation map if results.get('gt_seg_map', None) is not None: gt_seg = results['gt_seg_map'] expand_gt_seg = np.full((int(h * ratio), int(w * ratio)), self.seg_ignore_label, dtype=gt_seg.dtype) expand_gt_seg[top:top + h, left:left + w] = gt_seg results['gt_seg_map'] = expand_gt_seg return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, ' repr_str += f'ratio_range={self.ratio_range}, ' repr_str += f'seg_ignore_label={self.seg_ignore_label}, ' repr_str += f'prob={self.prob})' return repr_str @TRANSFORMS.register_module() class MinIoURandomCrop(BaseTransform): """Random crop the image & bboxes & masks & segmentation map, the cropped patches have minimum IoU requirement with original image & bboxes & masks. & segmentation map, the IoU threshold is randomly selected from min_ious. Required Keys: - img - img_shape - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_ignore_flags (bool) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes - gt_bboxes_labels - gt_masks - gt_ignore_flags - gt_seg_map Args: min_ious (Sequence[float]): minimum IoU threshold for all intersections with bounding boxes. min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w, where a >= min_crop_size). bbox_clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. """ def __init__(self, min_ious: Sequence[float] = (0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size: float = 0.3, bbox_clip_border: bool = True) -> None: self.min_ious = min_ious self.sample_mode = (1, *min_ious, 0) self.min_crop_size = min_crop_size self.bbox_clip_border = bbox_clip_border @cache_randomness def _random_mode(self) -> Number: return random.choice(self.sample_mode) @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to crop images and bounding boxes with minimum IoU constraint. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images and bounding boxes cropped, \ 'img_shape' key is updated. """ assert 'img' in results, '`img` is not found in results' assert 'gt_bboxes' in results, '`gt_bboxes` is not found in results' img = results['img'] boxes = results['gt_bboxes'] h, w, c = img.shape while True: mode = self._random_mode() self.mode = mode if mode == 1: return results min_iou = self.mode for i in range(50): new_w = random.uniform(self.min_crop_size * w, w) new_h = random.uniform(self.min_crop_size * h, h) # h / w in [0.5, 2] if new_h / new_w < 0.5 or new_h / new_w > 2: continue left = random.uniform(w - new_w) top = random.uniform(h - new_h) patch = np.array( (int(left), int(top), int(left + new_w), int(top + new_h))) # Line or point crop is not allowed if patch[2] == patch[0] or patch[3] == patch[1]: continue overlaps = boxes.overlaps( HorizontalBoxes(patch.reshape(-1, 4).astype(np.float32)), boxes).numpy().reshape(-1) if len(overlaps) > 0 and overlaps.min() < min_iou: continue # center of boxes should inside the crop img # only adjust boxes and instance masks when the gt is not empty if len(overlaps) > 0: # adjust boxes def is_center_of_bboxes_in_patch(boxes, patch): centers = boxes.centers.numpy() mask = ((centers[:, 0] > patch[0]) * (centers[:, 1] > patch[1]) * (centers[:, 0] < patch[2]) * (centers[:, 1] < patch[3])) return mask mask = is_center_of_bboxes_in_patch(boxes, patch) if not mask.any(): continue if results.get('gt_bboxes', None) is not None: boxes = results['gt_bboxes'] mask = is_center_of_bboxes_in_patch(boxes, patch) boxes = boxes[mask] boxes.translate_([-patch[0], -patch[1]]) if self.bbox_clip_border: boxes.clip_( [patch[3] - patch[1], patch[2] - patch[0]]) results['gt_bboxes'] = boxes # ignore_flags if results.get('gt_ignore_flags', None) is not None: results['gt_ignore_flags'] = \ results['gt_ignore_flags'][mask] # labels if results.get('gt_bboxes_labels', None) is not None: results['gt_bboxes_labels'] = results[ 'gt_bboxes_labels'][mask] # mask fields if results.get('gt_masks', None) is not None: results['gt_masks'] = results['gt_masks'][ mask.nonzero()[0]].crop(patch) # adjust the img no matter whether the gt is empty before crop img = img[patch[1]:patch[3], patch[0]:patch[2]] results['img'] = img results['img_shape'] = img.shape[:2] # seg fields if results.get('gt_seg_map', None) is not None: results['gt_seg_map'] = results['gt_seg_map'][ patch[1]:patch[3], patch[0]:patch[2]] return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(min_ious={self.min_ious}, ' repr_str += f'min_crop_size={self.min_crop_size}, ' repr_str += f'bbox_clip_border={self.bbox_clip_border})' return repr_str @TRANSFORMS.register_module() class Corrupt(BaseTransform): """Corruption augmentation. Corruption transforms implemented based on `imagecorruptions <https://github.com/bethgelab/imagecorruptions>`_. Required Keys: - img (np.uint8) Modified Keys: - img (np.uint8) Args: corruption (str): Corruption name. severity (int): The severity of corruption. Defaults to 1. """ def __init__(self, corruption: str, severity: int = 1) -> None: self.corruption = corruption self.severity = severity def transform(self, results: dict) -> dict: """Call function to corrupt image. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images corrupted. """ if corrupt is None: raise RuntimeError('imagecorruptions is not installed') results['img'] = corrupt( results['img'].astype(np.uint8), corruption_name=self.corruption, severity=self.severity) return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(corruption={self.corruption}, ' repr_str += f'severity={self.severity})' return repr_str @TRANSFORMS.register_module() @avoid_cache_randomness class Albu(BaseTransform): """Albumentation augmentation. Adds custom transformations from Albumentations library. Please, visit `https://albumentations.readthedocs.io` to get more information. Required Keys: - img (np.uint8) - gt_bboxes (HorizontalBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) Modified Keys: - img (np.uint8) - gt_bboxes (HorizontalBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - img_shape (tuple) An example of ``transforms`` is as followed: .. code-block:: [ dict( type='ShiftScaleRotate', shift_limit=0.0625, scale_limit=0.0, rotate_limit=0, interpolation=1, p=0.5), dict( type='RandomBrightnessContrast', brightness_limit=[0.1, 0.3], contrast_limit=[0.1, 0.3], p=0.2), dict(type='ChannelShuffle', p=0.1), dict( type='OneOf', transforms=[ dict(type='Blur', blur_limit=3, p=1.0), dict(type='MedianBlur', blur_limit=3, p=1.0) ], p=0.1), ] Args: transforms (list[dict]): A list of albu transformations bbox_params (dict, optional): Bbox_params for albumentation `Compose` keymap (dict, optional): Contains {'input key':'albumentation-style key'} skip_img_without_anno (bool): Whether to skip the image if no ann left after aug. Defaults to False. """ def __init__(self, transforms: List[dict], bbox_params: Optional[dict] = None, keymap: Optional[dict] = None, skip_img_without_anno: bool = False) -> None: if Compose is None: raise RuntimeError('albumentations is not installed') # Args will be modified later, copying it will be safer transforms = copy.deepcopy(transforms) if bbox_params is not None: bbox_params = copy.deepcopy(bbox_params) if keymap is not None: keymap = copy.deepcopy(keymap) self.transforms = transforms self.filter_lost_elements = False self.skip_img_without_anno = skip_img_without_anno # A simple workaround to remove masks without boxes if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params and 'filter_lost_elements' in bbox_params): self.filter_lost_elements = True self.origin_label_fields = bbox_params['label_fields'] bbox_params['label_fields'] = ['idx_mapper'] del bbox_params['filter_lost_elements'] self.bbox_params = ( self.albu_builder(bbox_params) if bbox_params else None) self.aug = Compose([self.albu_builder(t) for t in self.transforms], bbox_params=self.bbox_params) if not keymap: self.keymap_to_albu = { 'img': 'image', 'gt_masks': 'masks', 'gt_bboxes': 'bboxes' } else: self.keymap_to_albu = keymap self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()} def albu_builder(self, cfg: dict) -> albumentations: """Import a module from albumentations. It inherits some of :func:`build_from_cfg` logic. Args: cfg (dict): Config dict. It should at least contain the key "type". Returns: obj: The constructed object. """ assert isinstance(cfg, dict) and 'type' in cfg args = cfg.copy() obj_type = args.pop('type') if is_str(obj_type): if albumentations is None: raise RuntimeError('albumentations is not installed') obj_cls = getattr(albumentations, obj_type) elif inspect.isclass(obj_type): obj_cls = obj_type else: raise TypeError( f'type must be a str or valid type, but got {type(obj_type)}') if 'transforms' in args: args['transforms'] = [ self.albu_builder(transform) for transform in args['transforms'] ] return obj_cls(**args) @staticmethod def mapper(d: dict, keymap: dict) -> dict: """Dictionary mapper. Renames keys according to keymap provided. Args: d (dict): old dict keymap (dict): {'old_key':'new_key'} Returns: dict: new dict. """ updated_dict = {} for k, v in zip(d.keys(), d.values()): new_k = keymap.get(k, k) updated_dict[new_k] = d[k] return updated_dict @autocast_box_type() def transform(self, results: dict) -> Union[dict, None]: """Transform function of Albu.""" # TODO: gt_seg_map is not currently supported # dict to albumentations format results = self.mapper(results, self.keymap_to_albu) results, ori_masks = self._preprocess_results(results) results = self.aug(**results) results = self._postprocess_results(results, ori_masks) if results is None: return None # back to the original format results = self.mapper(results, self.keymap_back) results['img_shape'] = results['img'].shape[:2] return results def _preprocess_results(self, results: dict) -> tuple: """Pre-processing results to facilitate the use of Albu.""" if 'bboxes' in results: # to list of boxes if not isinstance(results['bboxes'], HorizontalBoxes): raise NotImplementedError( 'Albu only supports horizontal boxes now') bboxes = results['bboxes'].numpy() results['bboxes'] = [x for x in bboxes] # add pseudo-field for filtration if self.filter_lost_elements: results['idx_mapper'] = np.arange(len(results['bboxes'])) # TODO: Support mask structure in albu ori_masks = None if 'masks' in results: if isinstance(results['masks'], PolygonMasks): raise NotImplementedError( 'Albu only supports BitMap masks now') ori_masks = results['masks'] if albumentations.__version__ < '0.5': results['masks'] = results['masks'].masks else: results['masks'] = [mask for mask in results['masks'].masks] return results, ori_masks def _postprocess_results( self, results: dict, ori_masks: Optional[Union[BitmapMasks, PolygonMasks]] = None) -> dict: """Post-processing Albu output.""" # albumentations may return np.array or list on different versions if 'gt_bboxes_labels' in results and isinstance( results['gt_bboxes_labels'], list): results['gt_bboxes_labels'] = np.array( results['gt_bboxes_labels'], dtype=np.int64) if 'gt_ignore_flags' in results and isinstance( results['gt_ignore_flags'], list): results['gt_ignore_flags'] = np.array( results['gt_ignore_flags'], dtype=bool) if 'bboxes' in results: if isinstance(results['bboxes'], list): results['bboxes'] = np.array( results['bboxes'], dtype=np.float32) results['bboxes'] = results['bboxes'].reshape(-1, 4) results['bboxes'] = HorizontalBoxes(results['bboxes']) # filter label_fields if self.filter_lost_elements: for label in self.origin_label_fields: results[label] = np.array( [results[label][i] for i in results['idx_mapper']]) if 'masks' in results: assert ori_masks is not None results['masks'] = np.array( [results['masks'][i] for i in results['idx_mapper']]) results['masks'] = ori_masks.__class__( results['masks'], ori_masks.height, ori_masks.width) if (not len(results['idx_mapper']) and self.skip_img_without_anno): return None elif 'masks' in results: results['masks'] = ori_masks.__class__(results['masks'], ori_masks.height, ori_masks.width) return results def __repr__(self) -> str: repr_str = self.__class__.__name__ + f'(transforms={self.transforms})' return repr_str @TRANSFORMS.register_module() @avoid_cache_randomness class RandomCenterCropPad(BaseTransform): """Random center crop and random around padding for CornerNet. This operation generates randomly cropped image from the original image and pads it simultaneously. Different from :class:`RandomCrop`, the output shape may not equal to ``crop_size`` strictly. We choose a random value from ``ratios`` and the output shape could be larger or smaller than ``crop_size``. The padding operation is also different from :class:`Pad`, here we use around padding instead of right-bottom padding. The relation between output image (padding image) and original image: .. code:: text output image +----------------------------+ | padded area | +------|----------------------------|----------+ | | cropped area | | | | +---------------+ | | | | | . center | | | original image | | | range | | | | | +---------------+ | | +------|----------------------------|----------+ | padded area | +----------------------------+ There are 5 main areas in the figure: - output image: output image of this operation, also called padding image in following instruction. - original image: input image of this operation. - padded area: non-intersect area of output image and original image. - cropped area: the overlap of output image and original image. - center range: a smaller area where random center chosen from. center range is computed by ``border`` and original image's shape to avoid our random center is too close to original image's border. Also this operation act differently in train and test mode, the summary pipeline is listed below. Train pipeline: 1. Choose a ``random_ratio`` from ``ratios``, the shape of padding image will be ``random_ratio * crop_size``. 2. Choose a ``random_center`` in center range. 3. Generate padding image with center matches the ``random_center``. 4. Initialize the padding image with pixel value equals to ``mean``. 5. Copy the cropped area to padding image. 6. Refine annotations. Test pipeline: 1. Compute output shape according to ``test_pad_mode``. 2. Generate padding image with center matches the original image center. 3. Initialize the padding image with pixel value equals to ``mean``. 4. Copy the ``cropped area`` to padding image. Required Keys: - img (np.float32) - img_shape (tuple) - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_ignore_flags (bool) (optional) Modified Keys: - img (np.float32) - img_shape (tuple) - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_ignore_flags (bool) (optional) Args: crop_size (tuple, optional): expected size after crop, final size will computed according to ratio. Requires (width, height) in train mode, and None in test mode. ratios (tuple, optional): random select a ratio from tuple and crop image to (crop_size[0] * ratio) * (crop_size[1] * ratio). Only available in train mode. Defaults to (0.9, 1.0, 1.1). border (int, optional): max distance from center select area to image border. Only available in train mode. Defaults to 128. mean (sequence, optional): Mean values of 3 channels. std (sequence, optional): Std values of 3 channels. to_rgb (bool, optional): Whether to convert the image from BGR to RGB. test_mode (bool): whether involve random variables in transform. In train mode, crop_size is fixed, center coords and ratio is random selected from predefined lists. In test mode, crop_size is image's original shape, center coords and ratio is fixed. Defaults to False. test_pad_mode (tuple, optional): padding method and padding shape value, only available in test mode. Default is using 'logical_or' with 127 as padding shape value. - 'logical_or': final_shape = input_shape | padding_shape_value - 'size_divisor': final_shape = int( ceil(input_shape / padding_shape_value) * padding_shape_value) Defaults to ('logical_or', 127). test_pad_add_pix (int): Extra padding pixel in test mode. Defaults to 0. bbox_clip_border (bool): Whether clip the objects outside the border of the image. Defaults to True. """ def __init__(self, crop_size: Optional[tuple] = None, ratios: Optional[tuple] = (0.9, 1.0, 1.1), border: Optional[int] = 128, mean: Optional[Sequence] = None, std: Optional[Sequence] = None, to_rgb: Optional[bool] = None, test_mode: bool = False, test_pad_mode: Optional[tuple] = ('logical_or', 127), test_pad_add_pix: int = 0, bbox_clip_border: bool = True) -> None: if test_mode: assert crop_size is None, 'crop_size must be None in test mode' assert ratios is None, 'ratios must be None in test mode' assert border is None, 'border must be None in test mode' assert isinstance(test_pad_mode, (list, tuple)) assert test_pad_mode[0] in ['logical_or', 'size_divisor'] else: assert isinstance(crop_size, (list, tuple)) assert crop_size[0] > 0 and crop_size[1] > 0, ( 'crop_size must > 0 in train mode') assert isinstance(ratios, (list, tuple)) assert test_pad_mode is None, ( 'test_pad_mode must be None in train mode') self.crop_size = crop_size self.ratios = ratios self.border = border # We do not set default value to mean, std and to_rgb because these # hyper-parameters are easy to forget but could affect the performance. # Please use the same setting as Normalize for performance assurance. assert mean is not None and std is not None and to_rgb is not None self.to_rgb = to_rgb self.input_mean = mean self.input_std = std if to_rgb: self.mean = mean[::-1] self.std = std[::-1] else: self.mean = mean self.std = std self.test_mode = test_mode self.test_pad_mode = test_pad_mode self.test_pad_add_pix = test_pad_add_pix self.bbox_clip_border = bbox_clip_border def _get_border(self, border, size): """Get final border for the target size. This function generates a ``final_border`` according to image's shape. The area between ``final_border`` and ``size - final_border`` is the ``center range``. We randomly choose center from the ``center range`` to avoid our random center is too close to original image's border. Also ``center range`` should be larger than 0. Args: border (int): The initial border, default is 128. size (int): The width or height of original image. Returns: int: The final border. """ k = 2 * border / size i = pow(2, np.ceil(np.log2(np.ceil(k))) + (k == int(k))) return border // i def _filter_boxes(self, patch, boxes): """Check whether the center of each box is in the patch. Args: patch (list[int]): The cropped area, [left, top, right, bottom]. boxes (numpy array, (N x 4)): Ground truth boxes. Returns: mask (numpy array, (N,)): Each box is inside or outside the patch. """ center = boxes.centers.numpy() mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * ( center[:, 0] < patch[2]) * ( center[:, 1] < patch[3]) return mask def _crop_image_and_paste(self, image, center, size): """Crop image with a given center and size, then paste the cropped image to a blank image with two centers align. This function is equivalent to generating a blank image with ``size`` as its shape. Then cover it on the original image with two centers ( the center of blank image and the random center of original image) aligned. The overlap area is paste from the original image and the outside area is filled with ``mean pixel``. Args: image (np array, H x W x C): Original image. center (list[int]): Target crop center coord. size (list[int]): Target crop size. [target_h, target_w] Returns: cropped_img (np array, target_h x target_w x C): Cropped image. border (np array, 4): The distance of four border of ``cropped_img`` to the original image area, [top, bottom, left, right] patch (list[int]): The cropped area, [left, top, right, bottom]. """ center_y, center_x = center target_h, target_w = size img_h, img_w, img_c = image.shape x0 = max(0, center_x - target_w // 2) x1 = min(center_x + target_w // 2, img_w) y0 = max(0, center_y - target_h // 2) y1 = min(center_y + target_h // 2, img_h) patch = np.array((int(x0), int(y0), int(x1), int(y1))) left, right = center_x - x0, x1 - center_x top, bottom = center_y - y0, y1 - center_y cropped_center_y, cropped_center_x = target_h // 2, target_w // 2 cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype) for i in range(img_c): cropped_img[:, :, i] += self.mean[i] y_slice = slice(cropped_center_y - top, cropped_center_y + bottom) x_slice = slice(cropped_center_x - left, cropped_center_x + right) cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :] border = np.array([ cropped_center_y - top, cropped_center_y + bottom, cropped_center_x - left, cropped_center_x + right ], dtype=np.float32) return cropped_img, border, patch def _train_aug(self, results): """Random crop and around padding the original image. Args: results (dict): Image infomations in the augment pipeline. Returns: results (dict): The updated dict. """ img = results['img'] h, w, c = img.shape gt_bboxes = results['gt_bboxes'] while True: scale = random.choice(self.ratios) new_h = int(self.crop_size[1] * scale) new_w = int(self.crop_size[0] * scale) h_border = self._get_border(self.border, h) w_border = self._get_border(self.border, w) for i in range(50): center_x = random.randint(low=w_border, high=w - w_border) center_y = random.randint(low=h_border, high=h - h_border) cropped_img, border, patch = self._crop_image_and_paste( img, [center_y, center_x], [new_h, new_w]) if len(gt_bboxes) == 0: results['img'] = cropped_img results['img_shape'] = cropped_img.shape[:2] return results # if image do not have valid bbox, any crop patch is valid. mask = self._filter_boxes(patch, gt_bboxes) if not mask.any(): continue results['img'] = cropped_img results['img_shape'] = cropped_img.shape[:2] x0, y0, x1, y1 = patch left_w, top_h = center_x - x0, center_y - y0 cropped_center_x, cropped_center_y = new_w // 2, new_h // 2 # crop bboxes accordingly and clip to the image boundary gt_bboxes = gt_bboxes[mask] gt_bboxes.translate_([ cropped_center_x - left_w - x0, cropped_center_y - top_h - y0 ]) if self.bbox_clip_border: gt_bboxes.clip_([new_h, new_w]) keep = gt_bboxes.is_inside([new_h, new_w]).numpy() gt_bboxes = gt_bboxes[keep] results['gt_bboxes'] = gt_bboxes # ignore_flags if results.get('gt_ignore_flags', None) is not None: gt_ignore_flags = results['gt_ignore_flags'][mask] results['gt_ignore_flags'] = \ gt_ignore_flags[keep] # labels if results.get('gt_bboxes_labels', None) is not None: gt_labels = results['gt_bboxes_labels'][mask] results['gt_bboxes_labels'] = gt_labels[keep] if 'gt_masks' in results or 'gt_seg_map' in results: raise NotImplementedError( 'RandomCenterCropPad only supports bbox.') return results def _test_aug(self, results): """Around padding the original image without cropping. The padding mode and value are from ``test_pad_mode``. Args: results (dict): Image infomations in the augment pipeline. Returns: results (dict): The updated dict. """ img = results['img'] h, w, c = img.shape if self.test_pad_mode[0] in ['logical_or']: # self.test_pad_add_pix is only used for centernet target_h = (h | self.test_pad_mode[1]) + self.test_pad_add_pix target_w = (w | self.test_pad_mode[1]) + self.test_pad_add_pix elif self.test_pad_mode[0] in ['size_divisor']: divisor = self.test_pad_mode[1] target_h = int(np.ceil(h / divisor)) * divisor target_w = int(np.ceil(w / divisor)) * divisor else: raise NotImplementedError( 'RandomCenterCropPad only support two testing pad mode:' 'logical-or and size_divisor.') cropped_img, border, _ = self._crop_image_and_paste( img, [h // 2, w // 2], [target_h, target_w]) results['img'] = cropped_img results['img_shape'] = cropped_img.shape[:2] results['border'] = border return results @autocast_box_type() def transform(self, results: dict) -> dict: img = results['img'] assert img.dtype == np.float32, ( 'RandomCenterCropPad needs the input image of dtype np.float32,' ' please set "to_float32=True" in "LoadImageFromFile" pipeline') h, w, c = img.shape assert c == len(self.mean) if self.test_mode: return self._test_aug(results) else: return self._train_aug(results) def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(crop_size={self.crop_size}, ' repr_str += f'ratios={self.ratios}, ' repr_str += f'border={self.border}, ' repr_str += f'mean={self.input_mean}, ' repr_str += f'std={self.input_std}, ' repr_str += f'to_rgb={self.to_rgb}, ' repr_str += f'test_mode={self.test_mode}, ' repr_str += f'test_pad_mode={self.test_pad_mode}, ' repr_str += f'bbox_clip_border={self.bbox_clip_border})' return repr_str @TRANSFORMS.register_module() class CutOut(BaseTransform): """CutOut operation. Randomly drop some regions of image used in `Cutout <https://arxiv.org/abs/1708.04552>`_. Required Keys: - img Modified Keys: - img Args: n_holes (int or tuple[int, int]): Number of regions to be dropped. If it is given as a list, number of holes will be randomly selected from the closed interval [``n_holes[0]``, ``n_holes[1]``]. cutout_shape (tuple[int, int] or list[tuple[int, int]], optional): The candidate shape of dropped regions. It can be ``tuple[int, int]`` to use a fixed cutout shape, or ``list[tuple[int, int]]`` to randomly choose shape from the list. Defaults to None. cutout_ratio (tuple[float, float] or list[tuple[float, float]], optional): The candidate ratio of dropped regions. It can be ``tuple[float, float]`` to use a fixed ratio or ``list[tuple[float, float]]`` to randomly choose ratio from the list. Please note that ``cutout_shape`` and ``cutout_ratio`` cannot be both given at the same time. Defaults to None. fill_in (tuple[float, float, float] or tuple[int, int, int]): The value of pixel to fill in the dropped regions. Defaults to (0, 0, 0). """ def __init__( self, n_holes: Union[int, Tuple[int, int]], cutout_shape: Optional[Union[Tuple[int, int], List[Tuple[int, int]]]] = None, cutout_ratio: Optional[Union[Tuple[float, float], List[Tuple[float, float]]]] = None, fill_in: Union[Tuple[float, float, float], Tuple[int, int, int]] = (0, 0, 0) ) -> None: assert (cutout_shape is None) ^ (cutout_ratio is None), \ 'Either cutout_shape or cutout_ratio should be specified.' assert (isinstance(cutout_shape, (list, tuple)) or isinstance(cutout_ratio, (list, tuple))) if isinstance(n_holes, tuple): assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1] else: n_holes = (n_holes, n_holes) self.n_holes = n_holes self.fill_in = fill_in self.with_ratio = cutout_ratio is not None self.candidates = cutout_ratio if self.with_ratio else cutout_shape if not isinstance(self.candidates, list): self.candidates = [self.candidates] @autocast_box_type() def transform(self, results: dict) -> dict: """Call function to drop some regions of image.""" h, w, c = results['img'].shape n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1) for _ in range(n_holes): x1 = np.random.randint(0, w) y1 = np.random.randint(0, h) index = np.random.randint(0, len(self.candidates)) if not self.with_ratio: cutout_w, cutout_h = self.candidates[index] else: cutout_w = int(self.candidates[index][0] * w) cutout_h = int(self.candidates[index][1] * h) x2 = np.clip(x1 + cutout_w, 0, w) y2 = np.clip(y1 + cutout_h, 0, h) results['img'][y1:y2, x1:x2, :] = self.fill_in return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(n_holes={self.n_holes}, ' repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio else f'cutout_shape={self.candidates}, ') repr_str += f'fill_in={self.fill_in})' return repr_str @TRANSFORMS.register_module() class Mosaic(BaseTransform): """Mosaic augmentation. Given 4 images, mosaic transform combines them into one output image. The output image is composed of the parts from each sub- image. .. code:: text mosaic transform center_x +------------------------------+ | pad | pad | | +-----------+ | | | | | | | image1 |--------+ | | | | | | | | | image2 | | center_y |----+-------------+-----------| | | cropped | | |pad | image3 | image4 | | | | | +----|-------------+-----------+ | | +-------------+ The mosaic transform steps are as follows: 1. Choose the mosaic center as the intersections of 4 images 2. Get the left top image according to the index, and randomly sample another 3 images from the custom dataset. 3. Sub image will be cropped if image is larger than mosaic patch Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_ignore_flags (bool) (optional) - mix_results (List[dict]) Modified Keys: - img - img_shape - gt_bboxes (optional) - gt_bboxes_labels (optional) - gt_ignore_flags (optional) Args: img_scale (Sequence[int]): Image size after mosaic pipeline of single image. The shape order should be (width, height). Defaults to (640, 640). center_ratio_range (Sequence[float]): Center ratio range of mosaic output. Defaults to (0.5, 1.5). bbox_clip_border (bool, optional): Whether to clip the objects outside the border of the image. In some dataset like MOT17, the gt bboxes are allowed to cross the border of images. Therefore, we don't need to clip the gt bboxes in these cases. Defaults to True. pad_val (int): Pad value. Defaults to 114. prob (float): Probability of applying this transformation. Defaults to 1.0. """ def __init__(self, img_scale: Tuple[int, int] = (640, 640), center_ratio_range: Tuple[float, float] = (0.5, 1.5), bbox_clip_border: bool = True, pad_val: float = 114.0, prob: float = 1.0) -> None: assert isinstance(img_scale, tuple) assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. ' \ f'got {prob}.' log_img_scale(img_scale, skip_square=True, shape_order='wh') self.img_scale = img_scale self.center_ratio_range = center_ratio_range self.bbox_clip_border = bbox_clip_border self.pad_val = pad_val self.prob = prob @cache_randomness def get_indexes(self, dataset: BaseDataset) -> int: """Call function to collect indexes. Args: dataset (:obj:`MultiImageMixDataset`): The dataset. Returns: list: indexes. """ indexes = [random.randint(0, len(dataset)) for _ in range(3)] return indexes @autocast_box_type() def transform(self, results: dict) -> dict: """Mosaic transform function. Args: results (dict): Result dict. Returns: dict: Updated result dict. """ if random.uniform(0, 1) > self.prob: return results assert 'mix_results' in results mosaic_bboxes = [] mosaic_bboxes_labels = [] mosaic_ignore_flags = [] if len(results['img'].shape) == 3: mosaic_img = np.full( (int(self.img_scale[1] * 2), int(self.img_scale[0] * 2), 3), self.pad_val, dtype=results['img'].dtype) else: mosaic_img = np.full( (int(self.img_scale[1] * 2), int(self.img_scale[0] * 2)), self.pad_val, dtype=results['img'].dtype) # mosaic center x, y center_x = int( random.uniform(*self.center_ratio_range) * self.img_scale[0]) center_y = int( random.uniform(*self.center_ratio_range) * self.img_scale[1]) center_position = (center_x, center_y) loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right') for i, loc in enumerate(loc_strs): if loc == 'top_left': results_patch = copy.deepcopy(results) else: results_patch = copy.deepcopy(results['mix_results'][i - 1]) img_i = results_patch['img'] h_i, w_i = img_i.shape[:2] # keep_ratio resize scale_ratio_i = min(self.img_scale[1] / h_i, self.img_scale[0] / w_i) img_i = mmcv.imresize( img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i))) # compute the combine parameters paste_coord, crop_coord = self._mosaic_combine( loc, center_position, img_i.shape[:2][::-1]) x1_p, y1_p, x2_p, y2_p = paste_coord x1_c, y1_c, x2_c, y2_c = crop_coord # crop and paste image mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c] # adjust coordinate gt_bboxes_i = results_patch['gt_bboxes'] gt_bboxes_labels_i = results_patch['gt_bboxes_labels'] gt_ignore_flags_i = results_patch['gt_ignore_flags'] padw = x1_p - x1_c padh = y1_p - y1_c gt_bboxes_i.rescale_([scale_ratio_i, scale_ratio_i]) gt_bboxes_i.translate_([padw, padh]) mosaic_bboxes.append(gt_bboxes_i) mosaic_bboxes_labels.append(gt_bboxes_labels_i) mosaic_ignore_flags.append(gt_ignore_flags_i) mosaic_bboxes = mosaic_bboxes[0].cat(mosaic_bboxes, 0) mosaic_bboxes_labels = np.concatenate(mosaic_bboxes_labels, 0) mosaic_ignore_flags = np.concatenate(mosaic_ignore_flags, 0) if self.bbox_clip_border: mosaic_bboxes.clip_([2 * self.img_scale[1], 2 * self.img_scale[0]]) # remove outside bboxes inside_inds = mosaic_bboxes.is_inside( [2 * self.img_scale[1], 2 * self.img_scale[0]]).numpy() mosaic_bboxes = mosaic_bboxes[inside_inds] mosaic_bboxes_labels = mosaic_bboxes_labels[inside_inds] mosaic_ignore_flags = mosaic_ignore_flags[inside_inds] results['img'] = mosaic_img results['img_shape'] = mosaic_img.shape[:2] results['gt_bboxes'] = mosaic_bboxes results['gt_bboxes_labels'] = mosaic_bboxes_labels results['gt_ignore_flags'] = mosaic_ignore_flags return results def _mosaic_combine( self, loc: str, center_position_xy: Sequence[float], img_shape_wh: Sequence[int]) -> Tuple[Tuple[int], Tuple[int]]: """Calculate global coordinate of mosaic image and local coordinate of cropped sub-image. Args: loc (str): Index for the sub-image, loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right'). center_position_xy (Sequence[float]): Mixing center for 4 images, (x, y). img_shape_wh (Sequence[int]): Width and height of sub-image Returns: tuple[tuple[float]]: Corresponding coordinate of pasting and cropping - paste_coord (tuple): paste corner coordinate in mosaic image. - crop_coord (tuple): crop corner coordinate in mosaic image. """ assert loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right') if loc == 'top_left': # index0 to top left part of image x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ max(center_position_xy[1] - img_shape_wh[1], 0), \ center_position_xy[0], \ center_position_xy[1] crop_coord = img_shape_wh[0] - (x2 - x1), img_shape_wh[1] - ( y2 - y1), img_shape_wh[0], img_shape_wh[1] elif loc == 'top_right': # index1 to top right part of image x1, y1, x2, y2 = center_position_xy[0], \ max(center_position_xy[1] - img_shape_wh[1], 0), \ min(center_position_xy[0] + img_shape_wh[0], self.img_scale[0] * 2), \ center_position_xy[1] crop_coord = 0, img_shape_wh[1] - (y2 - y1), min( img_shape_wh[0], x2 - x1), img_shape_wh[1] elif loc == 'bottom_left': # index2 to bottom left part of image x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ center_position_xy[1], \ center_position_xy[0], \ min(self.img_scale[1] * 2, center_position_xy[1] + img_shape_wh[1]) crop_coord = img_shape_wh[0] - (x2 - x1), 0, img_shape_wh[0], min( y2 - y1, img_shape_wh[1]) else: # index3 to bottom right part of image x1, y1, x2, y2 = center_position_xy[0], \ center_position_xy[1], \ min(center_position_xy[0] + img_shape_wh[0], self.img_scale[0] * 2), \ min(self.img_scale[1] * 2, center_position_xy[1] + img_shape_wh[1]) crop_coord = 0, 0, min(img_shape_wh[0], x2 - x1), min(y2 - y1, img_shape_wh[1]) paste_coord = x1, y1, x2, y2 return paste_coord, crop_coord def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(img_scale={self.img_scale}, ' repr_str += f'center_ratio_range={self.center_ratio_range}, ' repr_str += f'pad_val={self.pad_val}, ' repr_str += f'prob={self.prob})' return repr_str @TRANSFORMS.register_module() class MixUp(BaseTransform): """MixUp data augmentation. .. code:: text mixup transform +------------------------------+ | mixup image | | | +--------|--------+ | | | | | | |---------------+ | | | | | | | | image | | | | | | | | | | | |-----------------+ | | pad | +------------------------------+ The mixup transform steps are as follows: 1. Another random image is picked by dataset and embedded in the top left patch(after padding and resizing) 2. The target of mixup transform is the weighted average of mixup image and origin image. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_ignore_flags (bool) (optional) - mix_results (List[dict]) Modified Keys: - img - img_shape - gt_bboxes (optional) - gt_bboxes_labels (optional) - gt_ignore_flags (optional) Args: img_scale (Sequence[int]): Image output size after mixup pipeline. The shape order should be (width, height). Defaults to (640, 640). ratio_range (Sequence[float]): Scale ratio of mixup image. Defaults to (0.5, 1.5). flip_ratio (float): Horizontal flip ratio of mixup image. Defaults to 0.5. pad_val (int): Pad value. Defaults to 114. max_iters (int): The maximum number of iterations. If the number of iterations is greater than `max_iters`, but gt_bbox is still empty, then the iteration is terminated. Defaults to 15. bbox_clip_border (bool, optional): Whether to clip the objects outside the border of the image. In some dataset like MOT17, the gt bboxes are allowed to cross the border of images. Therefore, we don't need to clip the gt bboxes in these cases. Defaults to True. """ def __init__(self, img_scale: Tuple[int, int] = (640, 640), ratio_range: Tuple[float, float] = (0.5, 1.5), flip_ratio: float = 0.5, pad_val: float = 114.0, max_iters: int = 15, bbox_clip_border: bool = True) -> None: assert isinstance(img_scale, tuple) log_img_scale(img_scale, skip_square=True, shape_order='wh') self.dynamic_scale = img_scale self.ratio_range = ratio_range self.flip_ratio = flip_ratio self.pad_val = pad_val self.max_iters = max_iters self.bbox_clip_border = bbox_clip_border @cache_randomness def get_indexes(self, dataset: BaseDataset) -> int: """Call function to collect indexes. Args: dataset (:obj:`MultiImageMixDataset`): The dataset. Returns: list: indexes. """ for i in range(self.max_iters): index = random.randint(0, len(dataset)) gt_bboxes_i = dataset[index]['gt_bboxes'] if len(gt_bboxes_i) != 0: break return index @autocast_box_type() def transform(self, results: dict) -> dict: """MixUp transform function. Args: results (dict): Result dict. Returns: dict: Updated result dict. """ assert 'mix_results' in results assert len( results['mix_results']) == 1, 'MixUp only support 2 images now !' if results['mix_results'][0]['gt_bboxes'].shape[0] == 0: # empty bbox return results retrieve_results = results['mix_results'][0] retrieve_img = retrieve_results['img'] jit_factor = random.uniform(*self.ratio_range) is_filp = random.uniform(0, 1) > self.flip_ratio if len(retrieve_img.shape) == 3: out_img = np.ones( (self.dynamic_scale[1], self.dynamic_scale[0], 3), dtype=retrieve_img.dtype) * self.pad_val else: out_img = np.ones( self.dynamic_scale[::-1], dtype=retrieve_img.dtype) * self.pad_val # 1. keep_ratio resize scale_ratio = min(self.dynamic_scale[1] / retrieve_img.shape[0], self.dynamic_scale[0] / retrieve_img.shape[1]) retrieve_img = mmcv.imresize( retrieve_img, (int(retrieve_img.shape[1] * scale_ratio), int(retrieve_img.shape[0] * scale_ratio))) # 2. paste out_img[:retrieve_img.shape[0], :retrieve_img.shape[1]] = retrieve_img # 3. scale jit scale_ratio *= jit_factor out_img = mmcv.imresize(out_img, (int(out_img.shape[1] * jit_factor), int(out_img.shape[0] * jit_factor))) # 4. flip if is_filp: out_img = out_img[:, ::-1, :] # 5. random crop ori_img = results['img'] origin_h, origin_w = out_img.shape[:2] target_h, target_w = ori_img.shape[:2] padded_img = np.ones((max(origin_h, target_h), max( origin_w, target_w), 3)) * self.pad_val padded_img = padded_img.astype(np.uint8) padded_img[:origin_h, :origin_w] = out_img x_offset, y_offset = 0, 0 if padded_img.shape[0] > target_h: y_offset = random.randint(0, padded_img.shape[0] - target_h) if padded_img.shape[1] > target_w: x_offset = random.randint(0, padded_img.shape[1] - target_w) padded_cropped_img = padded_img[y_offset:y_offset + target_h, x_offset:x_offset + target_w] # 6. adjust bbox retrieve_gt_bboxes = retrieve_results['gt_bboxes'] retrieve_gt_bboxes.rescale_([scale_ratio, scale_ratio]) if self.bbox_clip_border: retrieve_gt_bboxes.clip_([origin_h, origin_w]) if is_filp: retrieve_gt_bboxes.flip_([origin_h, origin_w], direction='horizontal') # 7. filter cp_retrieve_gt_bboxes = retrieve_gt_bboxes.clone() cp_retrieve_gt_bboxes.translate_([-x_offset, -y_offset]) if self.bbox_clip_border: cp_retrieve_gt_bboxes.clip_([target_h, target_w]) # 8. mix up ori_img = ori_img.astype(np.float32) mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img.astype(np.float32) retrieve_gt_bboxes_labels = retrieve_results['gt_bboxes_labels'] retrieve_gt_ignore_flags = retrieve_results['gt_ignore_flags'] mixup_gt_bboxes = cp_retrieve_gt_bboxes.cat( (results['gt_bboxes'], cp_retrieve_gt_bboxes), dim=0) mixup_gt_bboxes_labels = np.concatenate( (results['gt_bboxes_labels'], retrieve_gt_bboxes_labels), axis=0) mixup_gt_ignore_flags = np.concatenate( (results['gt_ignore_flags'], retrieve_gt_ignore_flags), axis=0) # remove outside bbox inside_inds = mixup_gt_bboxes.is_inside([target_h, target_w]).numpy() mixup_gt_bboxes = mixup_gt_bboxes[inside_inds] mixup_gt_bboxes_labels = mixup_gt_bboxes_labels[inside_inds] mixup_gt_ignore_flags = mixup_gt_ignore_flags[inside_inds] results['img'] = mixup_img.astype(np.uint8) results['img_shape'] = mixup_img.shape[:2] results['gt_bboxes'] = mixup_gt_bboxes results['gt_bboxes_labels'] = mixup_gt_bboxes_labels results['gt_ignore_flags'] = mixup_gt_ignore_flags return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(dynamic_scale={self.dynamic_scale}, ' repr_str += f'ratio_range={self.ratio_range}, ' repr_str += f'flip_ratio={self.flip_ratio}, ' repr_str += f'pad_val={self.pad_val}, ' repr_str += f'max_iters={self.max_iters}, ' repr_str += f'bbox_clip_border={self.bbox_clip_border})' return repr_str @TRANSFORMS.register_module() class RandomAffine(BaseTransform): """Random affine transform data augmentation. This operation randomly generates affine transform matrix which including rotation, translation, shear and scaling transforms. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_ignore_flags (bool) (optional) Modified Keys: - img - img_shape - gt_bboxes (optional) - gt_bboxes_labels (optional) - gt_ignore_flags (optional) Args: max_rotate_degree (float): Maximum degrees of rotation transform. Defaults to 10. max_translate_ratio (float): Maximum ratio of translation. Defaults to 0.1. scaling_ratio_range (tuple[float]): Min and max ratio of scaling transform. Defaults to (0.5, 1.5). max_shear_degree (float): Maximum degrees of shear transform. Defaults to 2. border (tuple[int]): Distance from width and height sides of input image to adjust output shape. Only used in mosaic dataset. Defaults to (0, 0). border_val (tuple[int]): Border padding values of 3 channels. Defaults to (114, 114, 114). bbox_clip_border (bool, optional): Whether to clip the objects outside the border of the image. In some dataset like MOT17, the gt bboxes are allowed to cross the border of images. Therefore, we don't need to clip the gt bboxes in these cases. Defaults to True. """ def __init__(self, max_rotate_degree: float = 10.0, max_translate_ratio: float = 0.1, scaling_ratio_range: Tuple[float, float] = (0.5, 1.5), max_shear_degree: float = 2.0, border: Tuple[int, int] = (0, 0), border_val: Tuple[int, int, int] = (114, 114, 114), bbox_clip_border: bool = True) -> None: assert 0 <= max_translate_ratio <= 1 assert scaling_ratio_range[0] <= scaling_ratio_range[1] assert scaling_ratio_range[0] > 0 self.max_rotate_degree = max_rotate_degree self.max_translate_ratio = max_translate_ratio self.scaling_ratio_range = scaling_ratio_range self.max_shear_degree = max_shear_degree self.border = border self.border_val = border_val self.bbox_clip_border = bbox_clip_border @cache_randomness def _get_random_homography_matrix(self, height, width): # Rotation rotation_degree = random.uniform(-self.max_rotate_degree, self.max_rotate_degree) rotation_matrix = self._get_rotation_matrix(rotation_degree) # Scaling scaling_ratio = random.uniform(self.scaling_ratio_range[0], self.scaling_ratio_range[1]) scaling_matrix = self._get_scaling_matrix(scaling_ratio) # Shear x_degree = random.uniform(-self.max_shear_degree, self.max_shear_degree) y_degree = random.uniform(-self.max_shear_degree, self.max_shear_degree) shear_matrix = self._get_shear_matrix(x_degree, y_degree) # Translation trans_x = random.uniform(-self.max_translate_ratio, self.max_translate_ratio) * width trans_y = random.uniform(-self.max_translate_ratio, self.max_translate_ratio) * height translate_matrix = self._get_translation_matrix(trans_x, trans_y) warp_matrix = ( translate_matrix @ shear_matrix @ rotation_matrix @ scaling_matrix) return warp_matrix @autocast_box_type() def transform(self, results: dict) -> dict: img = results['img'] height = img.shape[0] + self.border[1] * 2 width = img.shape[1] + self.border[0] * 2 warp_matrix = self._get_random_homography_matrix(height, width) img = cv2.warpPerspective( img, warp_matrix, dsize=(width, height), borderValue=self.border_val) results['img'] = img results['img_shape'] = img.shape[:2] bboxes = results['gt_bboxes'] num_bboxes = len(bboxes) if num_bboxes: bboxes.project_(warp_matrix) if self.bbox_clip_border: bboxes.clip_([height, width]) # remove outside bbox valid_index = bboxes.is_inside([height, width]).numpy() results['gt_bboxes'] = bboxes[valid_index] results['gt_bboxes_labels'] = results['gt_bboxes_labels'][ valid_index] results['gt_ignore_flags'] = results['gt_ignore_flags'][ valid_index] if 'gt_masks' in results: raise NotImplementedError('RandomAffine only supports bbox.') return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(max_rotate_degree={self.max_rotate_degree}, ' repr_str += f'max_translate_ratio={self.max_translate_ratio}, ' repr_str += f'scaling_ratio_range={self.scaling_ratio_range}, ' repr_str += f'max_shear_degree={self.max_shear_degree}, ' repr_str += f'border={self.border}, ' repr_str += f'border_val={self.border_val}, ' repr_str += f'bbox_clip_border={self.bbox_clip_border})' return repr_str @staticmethod def _get_rotation_matrix(rotate_degrees: float) -> np.ndarray: radian = math.radians(rotate_degrees) rotation_matrix = np.array( [[np.cos(radian), -np.sin(radian), 0.], [np.sin(radian), np.cos(radian), 0.], [0., 0., 1.]], dtype=np.float32) return rotation_matrix @staticmethod def _get_scaling_matrix(scale_ratio: float) -> np.ndarray: scaling_matrix = np.array( [[scale_ratio, 0., 0.], [0., scale_ratio, 0.], [0., 0., 1.]], dtype=np.float32) return scaling_matrix @staticmethod def _get_shear_matrix(x_shear_degrees: float, y_shear_degrees: float) -> np.ndarray: x_radian = math.radians(x_shear_degrees) y_radian = math.radians(y_shear_degrees) shear_matrix = np.array([[1, np.tan(x_radian), 0.], [np.tan(y_radian), 1, 0.], [0., 0., 1.]], dtype=np.float32) return shear_matrix @staticmethod def _get_translation_matrix(x: float, y: float) -> np.ndarray: translation_matrix = np.array([[1, 0., x], [0., 1, y], [0., 0., 1.]], dtype=np.float32) return translation_matrix @TRANSFORMS.register_module() class YOLOXHSVRandomAug(BaseTransform): """Apply HSV augmentation to image sequentially. It is referenced from https://github.com/Megvii- BaseDetection/YOLOX/blob/main/yolox/data/data_augment.py#L21. Required Keys: - img Modified Keys: - img Args: hue_delta (int): delta of hue. Defaults to 5. saturation_delta (int): delta of saturation. Defaults to 30. value_delta (int): delat of value. Defaults to 30. """ def __init__(self, hue_delta: int = 5, saturation_delta: int = 30, value_delta: int = 30) -> None: self.hue_delta = hue_delta self.saturation_delta = saturation_delta self.value_delta = value_delta @cache_randomness def _get_hsv_gains(self): hsv_gains = np.random.uniform(-1, 1, 3) * [ self.hue_delta, self.saturation_delta, self.value_delta ] # random selection of h, s, v hsv_gains *= np.random.randint(0, 2, 3) # prevent overflow hsv_gains = hsv_gains.astype(np.int16) return hsv_gains def transform(self, results: dict) -> dict: img = results['img'] hsv_gains = self._get_hsv_gains() img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.int16) img_hsv[..., 0] = (img_hsv[..., 0] + hsv_gains[0]) % 180 img_hsv[..., 1] = np.clip(img_hsv[..., 1] + hsv_gains[1], 0, 255) img_hsv[..., 2] = np.clip(img_hsv[..., 2] + hsv_gains[2], 0, 255) cv2.cvtColor(img_hsv.astype(img.dtype), cv2.COLOR_HSV2BGR, dst=img) results['img'] = img return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(hue_delta={self.hue_delta}, ' repr_str += f'saturation_delta={self.saturation_delta}, ' repr_str += f'value_delta={self.value_delta})' return repr_str @TRANSFORMS.register_module() class CopyPaste(BaseTransform): """Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation The simple copy-paste transform steps are as follows: 1. The destination image is already resized with aspect ratio kept, cropped and padded. 2. Randomly select a source image, which is also already resized with aspect ratio kept, cropped and padded in a similar way as the destination image. 3. Randomly select some objects from the source image. 4. Paste these source objects to the destination image directly, due to the source and destination image have the same size. 5. Update object masks of the destination image, for some origin objects may be occluded. 6. Generate bboxes from the updated destination masks and filter some objects which are totally occluded, and adjust bboxes which are partly occluded. 7. Append selected source bboxes, masks, and labels. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_ignore_flags (bool) (optional) - gt_masks (BitmapMasks) (optional) Modified Keys: - img - gt_bboxes (optional) - gt_bboxes_labels (optional) - gt_ignore_flags (optional) - gt_masks (optional) Args: max_num_pasted (int): The maximum number of pasted objects. Defaults to 100. bbox_occluded_thr (int): The threshold of occluded bbox. Defaults to 10. mask_occluded_thr (int): The threshold of occluded mask. Defaults to 300. selected (bool): Whether select objects or not. If select is False, all objects of the source image will be pasted to the destination image. Defaults to True. """ def __init__( self, max_num_pasted: int = 100, bbox_occluded_thr: int = 10, mask_occluded_thr: int = 300, selected: bool = True, ) -> None: self.max_num_pasted = max_num_pasted self.bbox_occluded_thr = bbox_occluded_thr self.mask_occluded_thr = mask_occluded_thr self.selected = selected @cache_randomness def get_indexes(self, dataset: BaseDataset) -> int: """Call function to collect indexes.s. Args: dataset (:obj:`MultiImageMixDataset`): The dataset. Returns: list: Indexes. """ return random.randint(0, len(dataset)) @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to make a copy-paste of image. Args: results (dict): Result dict. Returns: dict: Result dict with copy-paste transformed. """ assert 'mix_results' in results num_images = len(results['mix_results']) assert num_images == 1, \ f'CopyPaste only supports processing 2 images, got {num_images}' if self.selected: selected_results = self._select_object(results['mix_results'][0]) else: selected_results = results['mix_results'][0] return self._copy_paste(results, selected_results) @cache_randomness def _get_selected_inds(self, num_bboxes: int) -> np.ndarray: max_num_pasted = min(num_bboxes + 1, self.max_num_pasted) num_pasted = np.random.randint(0, max_num_pasted) return np.random.choice(num_bboxes, size=num_pasted, replace=False) def _select_object(self, results: dict) -> dict: """Select some objects from the source results.""" bboxes = results['gt_bboxes'] labels = results['gt_bboxes_labels'] masks = results['gt_masks'] ignore_flags = results['gt_ignore_flags'] selected_inds = self._get_selected_inds(bboxes.shape[0]) selected_bboxes = bboxes[selected_inds] selected_labels = labels[selected_inds] selected_masks = masks[selected_inds] selected_ignore_flags = ignore_flags[selected_inds] results['gt_bboxes'] = selected_bboxes results['gt_bboxes_labels'] = selected_labels results['gt_masks'] = selected_masks results['gt_ignore_flags'] = selected_ignore_flags return results def _copy_paste(self, dst_results: dict, src_results: dict) -> dict: """CopyPaste transform function. Args: dst_results (dict): Result dict of the destination image. src_results (dict): Result dict of the source image. Returns: dict: Updated result dict. """ dst_img = dst_results['img'] dst_bboxes = dst_results['gt_bboxes'] dst_labels = dst_results['gt_bboxes_labels'] dst_masks = dst_results['gt_masks'] dst_ignore_flags = dst_results['gt_ignore_flags'] src_img = src_results['img'] src_bboxes = src_results['gt_bboxes'] src_labels = src_results['gt_bboxes_labels'] src_masks = src_results['gt_masks'] src_ignore_flags = src_results['gt_ignore_flags'] if len(src_bboxes) == 0: return dst_results # update masks and generate bboxes from updated masks composed_mask = np.where(np.any(src_masks.masks, axis=0), 1, 0) updated_dst_masks = self._get_updated_masks(dst_masks, composed_mask) updated_dst_bboxes = updated_dst_masks.get_bboxes(type(dst_bboxes)) assert len(updated_dst_bboxes) == len(updated_dst_masks) # filter totally occluded objects l1_distance = (updated_dst_bboxes.tensor - dst_bboxes.tensor).abs() bboxes_inds = (l1_distance <= self.bbox_occluded_thr).all( dim=-1).numpy() masks_inds = updated_dst_masks.masks.sum( axis=(1, 2)) > self.mask_occluded_thr valid_inds = bboxes_inds | masks_inds # Paste source objects to destination image directly img = dst_img * (1 - composed_mask[..., np.newaxis] ) + src_img * composed_mask[..., np.newaxis] bboxes = src_bboxes.cat([updated_dst_bboxes[valid_inds], src_bboxes]) labels = np.concatenate([dst_labels[valid_inds], src_labels]) masks = np.concatenate( [updated_dst_masks.masks[valid_inds], src_masks.masks]) ignore_flags = np.concatenate( [dst_ignore_flags[valid_inds], src_ignore_flags]) dst_results['img'] = img dst_results['gt_bboxes'] = bboxes dst_results['gt_bboxes_labels'] = labels dst_results['gt_masks'] = BitmapMasks(masks, masks.shape[1], masks.shape[2]) dst_results['gt_ignore_flags'] = ignore_flags return dst_results def _get_updated_masks(self, masks: BitmapMasks, composed_mask: np.ndarray) -> BitmapMasks: """Update masks with composed mask.""" assert masks.masks.shape[-2:] == composed_mask.shape[-2:], \ 'Cannot compare two arrays of different size' masks.masks = np.where(composed_mask, 0, masks.masks) return masks def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(max_num_pasted={self.max_num_pasted}, ' repr_str += f'bbox_occluded_thr={self.bbox_occluded_thr}, ' repr_str += f'mask_occluded_thr={self.mask_occluded_thr}, ' repr_str += f'selected={self.selected})' return repr_str @TRANSFORMS.register_module() class RandomErasing(BaseTransform): """RandomErasing operation. Random Erasing randomly selects a rectangle region in an image and erases its pixels with random values. `RandomErasing <https://arxiv.org/abs/1708.04896>`_. Required Keys: - img - gt_bboxes (HorizontalBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_ignore_flags (bool) (optional) - gt_masks (BitmapMasks) (optional) Modified Keys: - img - gt_bboxes (optional) - gt_bboxes_labels (optional) - gt_ignore_flags (optional) - gt_masks (optional) Args: n_patches (int or tuple[int, int]): Number of regions to be dropped. If it is given as a tuple, number of patches will be randomly selected from the closed interval [``n_patches[0]``, ``n_patches[1]``]. ratio (float or tuple[float, float]): The ratio of erased regions. It can be ``float`` to use a fixed ratio or ``tuple[float, float]`` to randomly choose ratio from the interval. squared (bool): Whether to erase square region. Defaults to True. bbox_erased_thr (float): The threshold for the maximum area proportion of the bbox to be erased. When the proportion of the area where the bbox is erased is greater than the threshold, the bbox will be removed. Defaults to 0.9. img_border_value (int or float or tuple): The filled values for image border. If float, the same fill value will be used for all the three channels of image. If tuple, it should be 3 elements. Defaults to 128. mask_border_value (int): The fill value used for masks. Defaults to 0. seg_ignore_label (int): The fill value used for segmentation map. Note this value must equals ``ignore_label`` in ``semantic_head`` of the corresponding config. Defaults to 255. """ def __init__( self, n_patches: Union[int, Tuple[int, int]], ratio: Union[float, Tuple[float, float]], squared: bool = True, bbox_erased_thr: float = 0.9, img_border_value: Union[int, float, tuple] = 128, mask_border_value: int = 0, seg_ignore_label: int = 255, ) -> None: if isinstance(n_patches, tuple): assert len(n_patches) == 2 and 0 <= n_patches[0] < n_patches[1] else: n_patches = (n_patches, n_patches) if isinstance(ratio, tuple): assert len(ratio) == 2 and 0 <= ratio[0] < ratio[1] <= 1 else: ratio = (ratio, ratio) self.n_patches = n_patches self.ratio = ratio self.squared = squared self.bbox_erased_thr = bbox_erased_thr self.img_border_value = img_border_value self.mask_border_value = mask_border_value self.seg_ignore_label = seg_ignore_label @cache_randomness def _get_patches(self, img_shape: Tuple[int, int]) -> List[list]: """Get patches for random erasing.""" patches = [] n_patches = np.random.randint(self.n_patches[0], self.n_patches[1] + 1) for _ in range(n_patches): if self.squared: ratio = np.random.random() * (self.ratio[1] - self.ratio[0]) + self.ratio[0] ratio = (ratio, ratio) else: ratio = (np.random.random() * (self.ratio[1] - self.ratio[0]) + self.ratio[0], np.random.random() * (self.ratio[1] - self.ratio[0]) + self.ratio[0]) ph, pw = int(img_shape[0] * ratio[0]), int(img_shape[1] * ratio[1]) px1, py1 = np.random.randint(0, img_shape[1] - pw), np.random.randint( 0, img_shape[0] - ph) px2, py2 = px1 + pw, py1 + ph patches.append([px1, py1, px2, py2]) return np.array(patches) def _transform_img(self, results: dict, patches: List[list]) -> None: """Random erasing the image.""" for patch in patches: px1, py1, px2, py2 = patch results['img'][py1:py2, px1:px2, :] = self.img_border_value def _transform_bboxes(self, results: dict, patches: List[list]) -> None: """Random erasing the bboxes.""" bboxes = results['gt_bboxes'] # TODO: unify the logic by using operators in BaseBoxes. assert isinstance(bboxes, HorizontalBoxes) bboxes = bboxes.numpy() left_top = np.maximum(bboxes[:, None, :2], patches[:, :2]) right_bottom = np.minimum(bboxes[:, None, 2:], patches[:, 2:]) wh = np.maximum(right_bottom - left_top, 0) inter_areas = wh[:, :, 0] * wh[:, :, 1] bbox_areas = (bboxes[:, 2] - bboxes[:, 0]) * ( bboxes[:, 3] - bboxes[:, 1]) bboxes_erased_ratio = inter_areas.sum(-1) / (bbox_areas + 1e-7) valid_inds = bboxes_erased_ratio < self.bbox_erased_thr results['gt_bboxes'] = HorizontalBoxes(bboxes[valid_inds]) results['gt_bboxes_labels'] = results['gt_bboxes_labels'][valid_inds] results['gt_ignore_flags'] = results['gt_ignore_flags'][valid_inds] if results.get('gt_masks', None) is not None: results['gt_masks'] = results['gt_masks'][valid_inds] def _transform_masks(self, results: dict, patches: List[list]) -> None: """Random erasing the masks.""" for patch in patches: px1, py1, px2, py2 = patch results['gt_masks'].masks[:, py1:py2, px1:px2] = self.mask_border_value def _transform_seg(self, results: dict, patches: List[list]) -> None: """Random erasing the segmentation map.""" for patch in patches: px1, py1, px2, py2 = patch results['gt_seg_map'][py1:py2, px1:px2] = self.seg_ignore_label @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to erase some regions of image.""" patches = self._get_patches(results['img_shape']) self._transform_img(results, patches) if results.get('gt_bboxes', None) is not None: self._transform_bboxes(results, patches) if results.get('gt_masks', None) is not None: self._transform_masks(results, patches) if results.get('gt_seg_map', None) is not None: self._transform_seg(results, patches) return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(n_patches={self.n_patches}, ' repr_str += f'ratio={self.ratio}, ' repr_str += f'squared={self.squared}, ' repr_str += f'bbox_erased_thr={self.bbox_erased_thr}, ' repr_str += f'img_border_value={self.img_border_value}, ' repr_str += f'mask_border_value={self.mask_border_value}, ' repr_str += f'seg_ignore_label={self.seg_ignore_label})' return repr_str @TRANSFORMS.register_module() class CachedMosaic(Mosaic): """Cached mosaic augmentation. Cached mosaic transform will random select images from the cache and combine them into one output image. .. code:: text mosaic transform center_x +------------------------------+ | pad | pad | | +-----------+ | | | | | | | image1 |--------+ | | | | | | | | | image2 | | center_y |----+-------------+-----------| | | cropped | | |pad | image3 | image4 | | | | | +----|-------------+-----------+ | | +-------------+ The cached mosaic transform steps are as follows: 1. Append the results from the last transform into the cache. 2. Choose the mosaic center as the intersections of 4 images 3. Get the left top image according to the index, and randomly sample another 3 images from the result cache. 4. Sub image will be cropped if image is larger than mosaic patch Required Keys: - img - gt_bboxes (np.float32) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_ignore_flags (bool) (optional) Modified Keys: - img - img_shape - gt_bboxes (optional) - gt_bboxes_labels (optional) - gt_ignore_flags (optional) Args: img_scale (Sequence[int]): Image size after mosaic pipeline of single image. The shape order should be (width, height). Defaults to (640, 640). center_ratio_range (Sequence[float]): Center ratio range of mosaic output. Defaults to (0.5, 1.5). bbox_clip_border (bool, optional): Whether to clip the objects outside the border of the image. In some dataset like MOT17, the gt bboxes are allowed to cross the border of images. Therefore, we don't need to clip the gt bboxes in these cases. Defaults to True. pad_val (int): Pad value. Defaults to 114. prob (float): Probability of applying this transformation. Defaults to 1.0. max_cached_images (int): The maximum length of the cache. The larger the cache, the stronger the randomness of this transform. As a rule of thumb, providing 10 caches for each image suffices for randomness. Defaults to 40. random_pop (bool): Whether to randomly pop a result from the cache when the cache is full. If set to False, use FIFO popping method. Defaults to True. """ def __init__(self, *args, max_cached_images: int = 40, random_pop: bool = True, **kwargs) -> None: super().__init__(*args, **kwargs) self.results_cache = [] self.random_pop = random_pop assert max_cached_images >= 4, 'The length of cache must >= 4, ' \ f'but got {max_cached_images}.' self.max_cached_images = max_cached_images @cache_randomness def get_indexes(self, cache: list) -> list: """Call function to collect indexes. Args: cache (list): The results cache. Returns: list: indexes. """ indexes = [random.randint(0, len(cache) - 1) for _ in range(3)] return indexes @autocast_box_type() def transform(self, results: dict) -> dict: """Mosaic transform function. Args: results (dict): Result dict. Returns: dict: Updated result dict. """ # cache and pop images self.results_cache.append(copy.deepcopy(results)) if len(self.results_cache) > self.max_cached_images: if self.random_pop: index = random.randint(0, len(self.results_cache) - 1) else: index = 0 self.results_cache.pop(index) if len(self.results_cache) <= 4: return results if random.uniform(0, 1) > self.prob: return results indices = self.get_indexes(self.results_cache) mix_results = [copy.deepcopy(self.results_cache[i]) for i in indices] # TODO: refactor mosaic to reuse these code. mosaic_bboxes = [] mosaic_bboxes_labels = [] mosaic_ignore_flags = [] mosaic_masks = [] with_mask = True if 'gt_masks' in results else False if len(results['img'].shape) == 3: mosaic_img = np.full( (int(self.img_scale[1] * 2), int(self.img_scale[0] * 2), 3), self.pad_val, dtype=results['img'].dtype) else: mosaic_img = np.full( (int(self.img_scale[1] * 2), int(self.img_scale[0] * 2)), self.pad_val, dtype=results['img'].dtype) # mosaic center x, y center_x = int( random.uniform(*self.center_ratio_range) * self.img_scale[0]) center_y = int( random.uniform(*self.center_ratio_range) * self.img_scale[1]) center_position = (center_x, center_y) loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right') for i, loc in enumerate(loc_strs): if loc == 'top_left': results_patch = copy.deepcopy(results) else: results_patch = copy.deepcopy(mix_results[i - 1]) img_i = results_patch['img'] h_i, w_i = img_i.shape[:2] # keep_ratio resize scale_ratio_i = min(self.img_scale[1] / h_i, self.img_scale[0] / w_i) img_i = mmcv.imresize( img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i))) # compute the combine parameters paste_coord, crop_coord = self._mosaic_combine( loc, center_position, img_i.shape[:2][::-1]) x1_p, y1_p, x2_p, y2_p = paste_coord x1_c, y1_c, x2_c, y2_c = crop_coord # crop and paste image mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c] # adjust coordinate gt_bboxes_i = results_patch['gt_bboxes'] gt_bboxes_labels_i = results_patch['gt_bboxes_labels'] gt_ignore_flags_i = results_patch['gt_ignore_flags'] padw = x1_p - x1_c padh = y1_p - y1_c gt_bboxes_i.rescale_([scale_ratio_i, scale_ratio_i]) gt_bboxes_i.translate_([padw, padh]) mosaic_bboxes.append(gt_bboxes_i) mosaic_bboxes_labels.append(gt_bboxes_labels_i) mosaic_ignore_flags.append(gt_ignore_flags_i) if with_mask and results_patch.get('gt_masks', None) is not None: gt_masks_i = results_patch['gt_masks'] gt_masks_i = gt_masks_i.rescale(float(scale_ratio_i)) gt_masks_i = gt_masks_i.translate( out_shape=(int(self.img_scale[0] * 2), int(self.img_scale[1] * 2)), offset=padw, direction='horizontal') gt_masks_i = gt_masks_i.translate( out_shape=(int(self.img_scale[0] * 2), int(self.img_scale[1] * 2)), offset=padh, direction='vertical') mosaic_masks.append(gt_masks_i) mosaic_bboxes = mosaic_bboxes[0].cat(mosaic_bboxes, 0) mosaic_bboxes_labels = np.concatenate(mosaic_bboxes_labels, 0) mosaic_ignore_flags = np.concatenate(mosaic_ignore_flags, 0) if self.bbox_clip_border: mosaic_bboxes.clip_([2 * self.img_scale[1], 2 * self.img_scale[0]]) # remove outside bboxes inside_inds = mosaic_bboxes.is_inside( [2 * self.img_scale[1], 2 * self.img_scale[0]]).numpy() mosaic_bboxes = mosaic_bboxes[inside_inds] mosaic_bboxes_labels = mosaic_bboxes_labels[inside_inds] mosaic_ignore_flags = mosaic_ignore_flags[inside_inds] results['img'] = mosaic_img results['img_shape'] = mosaic_img.shape[:2] results['gt_bboxes'] = mosaic_bboxes results['gt_bboxes_labels'] = mosaic_bboxes_labels results['gt_ignore_flags'] = mosaic_ignore_flags if with_mask: mosaic_masks = mosaic_masks[0].cat(mosaic_masks) results['gt_masks'] = mosaic_masks[inside_inds] return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(img_scale={self.img_scale}, ' repr_str += f'center_ratio_range={self.center_ratio_range}, ' repr_str += f'pad_val={self.pad_val}, ' repr_str += f'prob={self.prob}, ' repr_str += f'max_cached_images={self.max_cached_images}, ' repr_str += f'random_pop={self.random_pop})' return repr_str @TRANSFORMS.register_module() class CachedMixUp(BaseTransform): """Cached mixup data augmentation. .. code:: text mixup transform +------------------------------+ | mixup image | | | +--------|--------+ | | | | | | |---------------+ | | | | | | | | image | | | | | | | | | | | |-----------------+ | | pad | +------------------------------+ The cached mixup transform steps are as follows: 1. Append the results from the last transform into the cache. 2. Another random image is picked from the cache and embedded in the top left patch(after padding and resizing) 3. The target of mixup transform is the weighted average of mixup image and origin image. Required Keys: - img - gt_bboxes (np.float32) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_ignore_flags (bool) (optional) - mix_results (List[dict]) Modified Keys: - img - img_shape - gt_bboxes (optional) - gt_bboxes_labels (optional) - gt_ignore_flags (optional) Args: img_scale (Sequence[int]): Image output size after mixup pipeline. The shape order should be (width, height). Defaults to (640, 640). ratio_range (Sequence[float]): Scale ratio of mixup image. Defaults to (0.5, 1.5). flip_ratio (float): Horizontal flip ratio of mixup image. Defaults to 0.5. pad_val (int): Pad value. Defaults to 114. max_iters (int): The maximum number of iterations. If the number of iterations is greater than `max_iters`, but gt_bbox is still empty, then the iteration is terminated. Defaults to 15. bbox_clip_border (bool, optional): Whether to clip the objects outside the border of the image. In some dataset like MOT17, the gt bboxes are allowed to cross the border of images. Therefore, we don't need to clip the gt bboxes in these cases. Defaults to True. max_cached_images (int): The maximum length of the cache. The larger the cache, the stronger the randomness of this transform. As a rule of thumb, providing 10 caches for each image suffices for randomness. Defaults to 20. random_pop (bool): Whether to randomly pop a result from the cache when the cache is full. If set to False, use FIFO popping method. Defaults to True. prob (float): Probability of applying this transformation. Defaults to 1.0. """ def __init__(self, img_scale: Tuple[int, int] = (640, 640), ratio_range: Tuple[float, float] = (0.5, 1.5), flip_ratio: float = 0.5, pad_val: float = 114.0, max_iters: int = 15, bbox_clip_border: bool = True, max_cached_images: int = 20, random_pop: bool = True, prob: float = 1.0) -> None: assert isinstance(img_scale, tuple) assert max_cached_images >= 2, 'The length of cache must >= 2, ' \ f'but got {max_cached_images}.' assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. ' \ f'got {prob}.' self.dynamic_scale = img_scale self.ratio_range = ratio_range self.flip_ratio = flip_ratio self.pad_val = pad_val self.max_iters = max_iters self.bbox_clip_border = bbox_clip_border self.results_cache = [] self.max_cached_images = max_cached_images self.random_pop = random_pop self.prob = prob @cache_randomness def get_indexes(self, cache: list) -> int: """Call function to collect indexes. Args: cache (list): The result cache. Returns: int: index. """ for i in range(self.max_iters): index = random.randint(0, len(cache) - 1) gt_bboxes_i = cache[index]['gt_bboxes'] if len(gt_bboxes_i) != 0: break return index @autocast_box_type() def transform(self, results: dict) -> dict: """MixUp transform function. Args: results (dict): Result dict. Returns: dict: Updated result dict. """ # cache and pop images self.results_cache.append(copy.deepcopy(results)) if len(self.results_cache) > self.max_cached_images: if self.random_pop: index = random.randint(0, len(self.results_cache) - 1) else: index = 0 self.results_cache.pop(index) if len(self.results_cache) <= 1: return results if random.uniform(0, 1) > self.prob: return results index = self.get_indexes(self.results_cache) retrieve_results = copy.deepcopy(self.results_cache[index]) # TODO: refactor mixup to reuse these code. if retrieve_results['gt_bboxes'].shape[0] == 0: # empty bbox return results retrieve_img = retrieve_results['img'] with_mask = True if 'gt_masks' in results else False jit_factor = random.uniform(*self.ratio_range) is_filp = random.uniform(0, 1) > self.flip_ratio if len(retrieve_img.shape) == 3: out_img = np.ones( (self.dynamic_scale[1], self.dynamic_scale[0], 3), dtype=retrieve_img.dtype) * self.pad_val else: out_img = np.ones( self.dynamic_scale[::-1], dtype=retrieve_img.dtype) * self.pad_val # 1. keep_ratio resize scale_ratio = min(self.dynamic_scale[1] / retrieve_img.shape[0], self.dynamic_scale[0] / retrieve_img.shape[1]) retrieve_img = mmcv.imresize( retrieve_img, (int(retrieve_img.shape[1] * scale_ratio), int(retrieve_img.shape[0] * scale_ratio))) # 2. paste out_img[:retrieve_img.shape[0], :retrieve_img.shape[1]] = retrieve_img # 3. scale jit scale_ratio *= jit_factor out_img = mmcv.imresize(out_img, (int(out_img.shape[1] * jit_factor), int(out_img.shape[0] * jit_factor))) # 4. flip if is_filp: out_img = out_img[:, ::-1, :] # 5. random crop ori_img = results['img'] origin_h, origin_w = out_img.shape[:2] target_h, target_w = ori_img.shape[:2] padded_img = np.ones((max(origin_h, target_h), max( origin_w, target_w), 3)) * self.pad_val padded_img = padded_img.astype(np.uint8) padded_img[:origin_h, :origin_w] = out_img x_offset, y_offset = 0, 0 if padded_img.shape[0] > target_h: y_offset = random.randint(0, padded_img.shape[0] - target_h) if padded_img.shape[1] > target_w: x_offset = random.randint(0, padded_img.shape[1] - target_w) padded_cropped_img = padded_img[y_offset:y_offset + target_h, x_offset:x_offset + target_w] # 6. adjust bbox retrieve_gt_bboxes = retrieve_results['gt_bboxes'] retrieve_gt_bboxes.rescale_([scale_ratio, scale_ratio]) if with_mask: retrieve_gt_masks = retrieve_results['gt_masks'].rescale( scale_ratio) if self.bbox_clip_border: retrieve_gt_bboxes.clip_([origin_h, origin_w]) if is_filp: retrieve_gt_bboxes.flip_([origin_h, origin_w], direction='horizontal') if with_mask: retrieve_gt_masks = retrieve_gt_masks.flip() # 7. filter cp_retrieve_gt_bboxes = retrieve_gt_bboxes.clone() cp_retrieve_gt_bboxes.translate_([-x_offset, -y_offset]) if with_mask: retrieve_gt_masks = retrieve_gt_masks.translate( out_shape=(target_h, target_w), offset=-x_offset, direction='horizontal') retrieve_gt_masks = retrieve_gt_masks.translate( out_shape=(target_h, target_w), offset=-y_offset, direction='vertical') if self.bbox_clip_border: cp_retrieve_gt_bboxes.clip_([target_h, target_w]) # 8. mix up ori_img = ori_img.astype(np.float32) mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img.astype(np.float32) retrieve_gt_bboxes_labels = retrieve_results['gt_bboxes_labels'] retrieve_gt_ignore_flags = retrieve_results['gt_ignore_flags'] mixup_gt_bboxes = cp_retrieve_gt_bboxes.cat( (results['gt_bboxes'], cp_retrieve_gt_bboxes), dim=0) mixup_gt_bboxes_labels = np.concatenate( (results['gt_bboxes_labels'], retrieve_gt_bboxes_labels), axis=0) mixup_gt_ignore_flags = np.concatenate( (results['gt_ignore_flags'], retrieve_gt_ignore_flags), axis=0) if with_mask: mixup_gt_masks = retrieve_gt_masks.cat( [results['gt_masks'], retrieve_gt_masks]) # remove outside bbox inside_inds = mixup_gt_bboxes.is_inside([target_h, target_w]).numpy() mixup_gt_bboxes = mixup_gt_bboxes[inside_inds] mixup_gt_bboxes_labels = mixup_gt_bboxes_labels[inside_inds] mixup_gt_ignore_flags = mixup_gt_ignore_flags[inside_inds] if with_mask: mixup_gt_masks = mixup_gt_masks[inside_inds] results['img'] = mixup_img.astype(np.uint8) results['img_shape'] = mixup_img.shape[:2] results['gt_bboxes'] = mixup_gt_bboxes results['gt_bboxes_labels'] = mixup_gt_bboxes_labels results['gt_ignore_flags'] = mixup_gt_ignore_flags if with_mask: results['gt_masks'] = mixup_gt_masks return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(dynamic_scale={self.dynamic_scale}, ' repr_str += f'ratio_range={self.ratio_range}, ' repr_str += f'flip_ratio={self.flip_ratio}, ' repr_str += f'pad_val={self.pad_val}, ' repr_str += f'max_iters={self.max_iters}, ' repr_str += f'bbox_clip_border={self.bbox_clip_border}, ' repr_str += f'max_cached_images={self.max_cached_images}, ' repr_str += f'random_pop={self.random_pop}, ' repr_str += f'prob={self.prob})' return repr_str
140,750
37.699753
79
py
ERD
ERD-main/mmdet/visualization/local_visualizer.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Dict, List, Optional, Tuple, Union import cv2 import mmcv import numpy as np import torch from mmengine.dist import master_only from mmengine.structures import InstanceData, PixelData from mmengine.visualization import Visualizer from ..evaluation import INSTANCE_OFFSET from ..registry import VISUALIZERS from ..structures import DetDataSample from ..structures.mask import BitmapMasks, PolygonMasks, bitmap_to_polygon from .palette import _get_adaptive_scales, get_palette, jitter_color @VISUALIZERS.register_module() class DetLocalVisualizer(Visualizer): """MMDetection Local Visualizer. Args: name (str): Name of the instance. Defaults to 'visualizer'. image (np.ndarray, optional): the origin image to draw. The format should be RGB. Defaults to None. vis_backends (list, optional): Visual backend config list. Defaults to None. save_dir (str, optional): Save file dir for all storage backends. If it is None, the backend storage will not save any data. bbox_color (str, tuple(int), optional): Color of bbox lines. The tuple of color should be in BGR order. Defaults to None. text_color (str, tuple(int), optional): Color of texts. The tuple of color should be in BGR order. Defaults to (200, 200, 200). mask_color (str, tuple(int), optional): Color of masks. The tuple of color should be in BGR order. Defaults to None. line_width (int, float): The linewidth of lines. Defaults to 3. alpha (int, float): The transparency of bboxes or mask. Defaults to 0.8. Examples: >>> import numpy as np >>> import torch >>> from mmengine.structures import InstanceData >>> from mmdet.structures import DetDataSample >>> from mmdet.visualization import DetLocalVisualizer >>> det_local_visualizer = DetLocalVisualizer() >>> image = np.random.randint(0, 256, ... size=(10, 12, 3)).astype('uint8') >>> gt_instances = InstanceData() >>> gt_instances.bboxes = torch.Tensor([[1, 2, 2, 5]]) >>> gt_instances.labels = torch.randint(0, 2, (1,)) >>> gt_det_data_sample = DetDataSample() >>> gt_det_data_sample.gt_instances = gt_instances >>> det_local_visualizer.add_datasample('image', image, ... gt_det_data_sample) >>> det_local_visualizer.add_datasample( ... 'image', image, gt_det_data_sample, ... out_file='out_file.jpg') >>> det_local_visualizer.add_datasample( ... 'image', image, gt_det_data_sample, ... show=True) >>> pred_instances = InstanceData() >>> pred_instances.bboxes = torch.Tensor([[2, 4, 4, 8]]) >>> pred_instances.labels = torch.randint(0, 2, (1,)) >>> pred_det_data_sample = DetDataSample() >>> pred_det_data_sample.pred_instances = pred_instances >>> det_local_visualizer.add_datasample('image', image, ... gt_det_data_sample, ... pred_det_data_sample) """ def __init__(self, name: str = 'visualizer', image: Optional[np.ndarray] = None, vis_backends: Optional[Dict] = None, save_dir: Optional[str] = None, bbox_color: Optional[Union[str, Tuple[int]]] = None, text_color: Optional[Union[str, Tuple[int]]] = (200, 200, 200), mask_color: Optional[Union[str, Tuple[int]]] = None, line_width: Union[int, float] = 3, alpha: float = 0.8) -> None: super().__init__( name=name, image=image, vis_backends=vis_backends, save_dir=save_dir) self.bbox_color = bbox_color self.text_color = text_color self.mask_color = mask_color self.line_width = line_width self.alpha = alpha # Set default value. When calling # `DetLocalVisualizer().dataset_meta=xxx`, # it will override the default value. self.dataset_meta = {} def _draw_instances(self, image: np.ndarray, instances: ['InstanceData'], classes: Optional[List[str]], palette: Optional[List[tuple]]) -> np.ndarray: """Draw instances of GT or prediction. Args: image (np.ndarray): The image to draw. instances (:obj:`InstanceData`): Data structure for instance-level annotations or predictions. classes (List[str], optional): Category information. palette (List[tuple], optional): Palette information corresponding to the category. Returns: np.ndarray: the drawn image which channel is RGB. """ self.set_image(image) if 'bboxes' in instances: bboxes = instances.bboxes labels = instances.labels max_label = int(max(labels) if len(labels) > 0 else 0) text_palette = get_palette(self.text_color, max_label + 1) text_colors = [text_palette[label] for label in labels] bbox_color = palette if self.bbox_color is None \ else self.bbox_color bbox_palette = get_palette(bbox_color, max_label + 1) colors = [bbox_palette[label] for label in labels] self.draw_bboxes( bboxes, edge_colors=colors, alpha=self.alpha, line_widths=self.line_width) positions = bboxes[:, :2] + self.line_width areas = (bboxes[:, 3] - bboxes[:, 1]) * ( bboxes[:, 2] - bboxes[:, 0]) scales = _get_adaptive_scales(areas) for i, (pos, label) in enumerate(zip(positions, labels)): label_text = classes[ label] if classes is not None else f'class {label}' if 'scores' in instances: score = round(float(instances.scores[i]) * 100, 1) label_text += f': {score}' self.draw_texts( label_text, pos, colors=text_colors[i], font_sizes=int(13 * scales[i]), bboxes=[{ 'facecolor': 'black', 'alpha': 0.8, 'pad': 0.7, 'edgecolor': 'none' }]) if 'masks' in instances: labels = instances.labels masks = instances.masks if isinstance(masks, torch.Tensor): masks = masks.numpy() elif isinstance(masks, (PolygonMasks, BitmapMasks)): masks = masks.to_ndarray() masks = masks.astype(bool) max_label = int(max(labels) if len(labels) > 0 else 0) mask_color = palette if self.mask_color is None \ else self.mask_color mask_palette = get_palette(mask_color, max_label + 1) colors = [jitter_color(mask_palette[label]) for label in labels] text_palette = get_palette(self.text_color, max_label + 1) text_colors = [text_palette[label] for label in labels] polygons = [] for i, mask in enumerate(masks): contours, _ = bitmap_to_polygon(mask) polygons.extend(contours) self.draw_polygons(polygons, edge_colors='w', alpha=self.alpha) self.draw_binary_masks(masks, colors=colors, alphas=self.alpha) if len(labels) > 0 and \ ('bboxes' not in instances or instances.bboxes.sum() == 0): # instances.bboxes.sum()==0 represent dummy bboxes. # A typical example of SOLO does not exist bbox branch. areas = [] positions = [] for mask in masks: _, _, stats, centroids = cv2.connectedComponentsWithStats( mask.astype(np.uint8), connectivity=8) if stats.shape[0] > 1: largest_id = np.argmax(stats[1:, -1]) + 1 positions.append(centroids[largest_id]) areas.append(stats[largest_id, -1]) areas = np.stack(areas, axis=0) scales = _get_adaptive_scales(areas) for i, (pos, label) in enumerate(zip(positions, labels)): label_text = classes[ label] if classes is not None else f'class {label}' if 'scores' in instances: score = round(float(instances.scores[i]) * 100, 1) label_text += f': {score}' self.draw_texts( label_text, pos, colors=text_colors[i], font_sizes=int(13 * scales[i]), horizontal_alignments='center', bboxes=[{ 'facecolor': 'black', 'alpha': 0.8, 'pad': 0.7, 'edgecolor': 'none' }]) return self.get_image() def _draw_panoptic_seg(self, image: np.ndarray, panoptic_seg: ['PixelData'], classes: Optional[List[str]]) -> np.ndarray: """Draw panoptic seg of GT or prediction. Args: image (np.ndarray): The image to draw. panoptic_seg (:obj:`PixelData`): Data structure for pixel-level annotations or predictions. classes (List[str], optional): Category information. Returns: np.ndarray: the drawn image which channel is RGB. """ # TODO: Is there a way to bypass? num_classes = len(classes) panoptic_seg = panoptic_seg.sem_seg[0] ids = np.unique(panoptic_seg)[::-1] legal_indices = ids != num_classes # for VOID label ids = ids[legal_indices] labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64) segms = (panoptic_seg[None] == ids[:, None, None]) max_label = int(max(labels) if len(labels) > 0 else 0) mask_palette = get_palette(self.mask_color, max_label + 1) colors = [mask_palette[label] for label in labels] self.set_image(image) # draw segm polygons = [] for i, mask in enumerate(segms): contours, _ = bitmap_to_polygon(mask) polygons.extend(contours) self.draw_polygons(polygons, edge_colors='w', alpha=self.alpha) self.draw_binary_masks(segms, colors=colors, alphas=self.alpha) # draw label areas = [] positions = [] for mask in segms: _, _, stats, centroids = cv2.connectedComponentsWithStats( mask.astype(np.uint8), connectivity=8) max_id = np.argmax(stats[1:, -1]) + 1 positions.append(centroids[max_id]) areas.append(stats[max_id, -1]) areas = np.stack(areas, axis=0) scales = _get_adaptive_scales(areas) text_palette = get_palette(self.text_color, max_label + 1) text_colors = [text_palette[label] for label in labels] for i, (pos, label) in enumerate(zip(positions, labels)): label_text = classes[label] self.draw_texts( label_text, pos, colors=text_colors[i], font_sizes=int(13 * scales[i]), bboxes=[{ 'facecolor': 'black', 'alpha': 0.8, 'pad': 0.7, 'edgecolor': 'none' }], horizontal_alignments='center') return self.get_image() @master_only def add_datasample( self, name: str, image: np.ndarray, data_sample: Optional['DetDataSample'] = None, draw_gt: bool = True, draw_pred: bool = True, show: bool = False, wait_time: float = 0, # TODO: Supported in mmengine's Viusalizer. out_file: Optional[str] = None, pred_score_thr: float = 0.3, step: int = 0) -> None: """Draw datasample and save to all backends. - If GT and prediction are plotted at the same time, they are displayed in a stitched image where the left image is the ground truth and the right image is the prediction. - If ``show`` is True, all storage backends are ignored, and the images will be displayed in a local window. - If ``out_file`` is specified, the drawn image will be saved to ``out_file``. t is usually used when the display is not available. Args: name (str): The image identifier. image (np.ndarray): The image to draw. data_sample (:obj:`DetDataSample`, optional): A data sample that contain annotations and predictions. Defaults to None. draw_gt (bool): Whether to draw GT DetDataSample. Default to True. draw_pred (bool): Whether to draw Prediction DetDataSample. Defaults to True. show (bool): Whether to display the drawn image. Default to False. wait_time (float): The interval of show (s). Defaults to 0. out_file (str): Path to output file. Defaults to None. pred_score_thr (float): The threshold to visualize the bboxes and masks. Defaults to 0.3. step (int): Global step value to record. Defaults to 0. """ image = image.clip(0, 255).astype(np.uint8) classes = self.dataset_meta.get('classes', None) palette = self.dataset_meta.get('palette', None) gt_img_data = None pred_img_data = None if data_sample is not None: data_sample = data_sample.cpu() if draw_gt and data_sample is not None: gt_img_data = image if 'gt_instances' in data_sample: gt_img_data = self._draw_instances(image, data_sample.gt_instances, classes, palette) if 'gt_panoptic_seg' in data_sample: assert classes is not None, 'class information is ' \ 'not provided when ' \ 'visualizing panoptic ' \ 'segmentation results.' gt_img_data = self._draw_panoptic_seg( gt_img_data, data_sample.gt_panoptic_seg, classes) if draw_pred and data_sample is not None: pred_img_data = image if 'pred_instances' in data_sample: pred_instances = data_sample.pred_instances pred_instances = pred_instances[ pred_instances.scores > pred_score_thr] pred_img_data = self._draw_instances(image, pred_instances, classes, palette) if 'pred_panoptic_seg' in data_sample: assert classes is not None, 'class information is ' \ 'not provided when ' \ 'visualizing panoptic ' \ 'segmentation results.' pred_img_data = self._draw_panoptic_seg( pred_img_data, data_sample.pred_panoptic_seg.numpy(), classes) if gt_img_data is not None and pred_img_data is not None: drawn_img = np.concatenate((gt_img_data, pred_img_data), axis=1) elif gt_img_data is not None: drawn_img = gt_img_data elif pred_img_data is not None: drawn_img = pred_img_data else: # Display the original image directly if nothing is drawn. drawn_img = image # It is convenient for users to obtain the drawn image. # For example, the user wants to obtain the drawn image and # save it as a video during video inference. self.set_image(drawn_img) if show: self.show(drawn_img, win_name=name, wait_time=wait_time) if out_file is not None: mmcv.imwrite(drawn_img[..., ::-1], out_file) else: self.add_image(name, drawn_img, step)
17,127
41.606965
79
py
ERD
ERD-main/mmdet/engine/hooks/checkloss_hook.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional import torch from mmengine.hooks import Hook from mmengine.runner import Runner from mmdet.registry import HOOKS @HOOKS.register_module() class CheckInvalidLossHook(Hook): """Check invalid loss hook. This hook will regularly check whether the loss is valid during training. Args: interval (int): Checking interval (every k iterations). Default: 50. """ def __init__(self, interval: int = 50) -> None: self.interval = interval def after_train_iter(self, runner: Runner, batch_idx: int, data_batch: Optional[dict] = None, outputs: Optional[dict] = None) -> None: """Regularly check whether the loss is valid every n iterations. Args: runner (:obj:`Runner`): The runner of the training process. batch_idx (int): The index of the current batch in the train loop. data_batch (dict, Optional): Data from dataloader. Defaults to None. outputs (dict, Optional): Outputs from model. Defaults to None. """ if self.every_n_train_iters(runner, self.interval): assert torch.isfinite(outputs['loss']), \ runner.logger.info('loss become infinite or NaN!')
1,406
31.72093
78
py
ERD
ERD-main/mmdet/engine/hooks/sync_norm_hook.py
# Copyright (c) OpenMMLab. All rights reserved. from collections import OrderedDict from mmengine.dist import get_dist_info from mmengine.hooks import Hook from torch import nn from mmdet.registry import HOOKS from mmdet.utils import all_reduce_dict def get_norm_states(module: nn.Module) -> OrderedDict: """Get the state_dict of batch norms in the module.""" async_norm_states = OrderedDict() for name, child in module.named_modules(): if isinstance(child, nn.modules.batchnorm._NormBase): for k, v in child.state_dict().items(): async_norm_states['.'.join([name, k])] = v return async_norm_states @HOOKS.register_module() class SyncNormHook(Hook): """Synchronize Norm states before validation, currently used in YOLOX.""" def before_val_epoch(self, runner): """Synchronizing norm.""" module = runner.model _, world_size = get_dist_info() if world_size == 1: return norm_states = get_norm_states(module) if len(norm_states) == 0: return # TODO: use `all_reduce_dict` in mmengine norm_states = all_reduce_dict(norm_states, op='mean') module.load_state_dict(norm_states, strict=False)
1,247
31.842105
77
py
ERD
ERD-main/mmdet/engine/hooks/mean_teacher_hook.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional import torch.nn as nn from mmengine.hooks import Hook from mmengine.model import is_model_wrapper from mmengine.runner import Runner from mmdet.registry import HOOKS @HOOKS.register_module() class MeanTeacherHook(Hook): """Mean Teacher Hook. Mean Teacher is an efficient semi-supervised learning method in `Mean Teacher <https://arxiv.org/abs/1703.01780>`_. This method requires two models with exactly the same structure, as the student model and the teacher model, respectively. The student model updates the parameters through gradient descent, and the teacher model updates the parameters through exponential moving average of the student model. Compared with the student model, the teacher model is smoother and accumulates more knowledge. Args: momentum (float): The momentum used for updating teacher's parameter. Teacher's parameter are updated with the formula: `teacher = (1-momentum) * teacher + momentum * student`. Defaults to 0.001. interval (int): Update teacher's parameter every interval iteration. Defaults to 1. skip_buffers (bool): Whether to skip the model buffers, such as batchnorm running stats (running_mean, running_var), it does not perform the ema operation. Default to True. """ def __init__(self, momentum: float = 0.001, interval: int = 1, skip_buffer=True) -> None: assert 0 < momentum < 1 self.momentum = momentum self.interval = interval self.skip_buffers = skip_buffer def before_train(self, runner: Runner) -> None: """To check that teacher model and student model exist.""" model = runner.model if is_model_wrapper(model): model = model.module assert hasattr(model, 'teacher') assert hasattr(model, 'student') # only do it at initial stage if runner.iter == 0: self.momentum_update(model, 1) def after_train_iter(self, runner: Runner, batch_idx: int, data_batch: Optional[dict] = None, outputs: Optional[dict] = None) -> None: """Update teacher's parameter every self.interval iterations.""" if (runner.iter + 1) % self.interval != 0: return model = runner.model if is_model_wrapper(model): model = model.module self.momentum_update(model, self.momentum) def momentum_update(self, model: nn.Module, momentum: float) -> None: """Compute the moving average of the parameters using exponential moving average.""" if self.skip_buffers: for (src_name, src_parm), (dst_name, dst_parm) in zip( model.student.named_parameters(), model.teacher.named_parameters()): dst_parm.data.mul_(1 - momentum).add_( src_parm.data, alpha=momentum) else: for (src_parm, dst_parm) in zip(model.student.state_dict().values(), model.teacher.state_dict().values()): # exclude num_tracking if dst_parm.dtype.is_floating_point: dst_parm.data.mul_(1 - momentum).add_( src_parm.data, alpha=momentum)
3,537
39.204545
77
py
ERD
ERD-main/mmdet/engine/optimizers/layer_decay_optimizer_constructor.py
# Copyright (c) OpenMMLab. All rights reserved. import json from typing import List import torch.nn as nn from mmengine.dist import get_dist_info from mmengine.logging import MMLogger from mmengine.optim import DefaultOptimWrapperConstructor from mmdet.registry import OPTIM_WRAPPER_CONSTRUCTORS def get_layer_id_for_convnext(var_name, max_layer_id): """Get the layer id to set the different learning rates in ``layer_wise`` decay_type. Args: var_name (str): The key of the model. max_layer_id (int): Maximum layer id. Returns: int: The id number corresponding to different learning rate in ``LearningRateDecayOptimizerConstructor``. """ if var_name in ('backbone.cls_token', 'backbone.mask_token', 'backbone.pos_embed'): return 0 elif var_name.startswith('backbone.downsample_layers'): stage_id = int(var_name.split('.')[2]) if stage_id == 0: layer_id = 0 elif stage_id == 1: layer_id = 2 elif stage_id == 2: layer_id = 3 elif stage_id == 3: layer_id = max_layer_id return layer_id elif var_name.startswith('backbone.stages'): stage_id = int(var_name.split('.')[2]) block_id = int(var_name.split('.')[3]) if stage_id == 0: layer_id = 1 elif stage_id == 1: layer_id = 2 elif stage_id == 2: layer_id = 3 + block_id // 3 elif stage_id == 3: layer_id = max_layer_id return layer_id else: return max_layer_id + 1 def get_stage_id_for_convnext(var_name, max_stage_id): """Get the stage id to set the different learning rates in ``stage_wise`` decay_type. Args: var_name (str): The key of the model. max_stage_id (int): Maximum stage id. Returns: int: The id number corresponding to different learning rate in ``LearningRateDecayOptimizerConstructor``. """ if var_name in ('backbone.cls_token', 'backbone.mask_token', 'backbone.pos_embed'): return 0 elif var_name.startswith('backbone.downsample_layers'): return 0 elif var_name.startswith('backbone.stages'): stage_id = int(var_name.split('.')[2]) return stage_id + 1 else: return max_stage_id - 1 @OPTIM_WRAPPER_CONSTRUCTORS.register_module() class LearningRateDecayOptimizerConstructor(DefaultOptimWrapperConstructor): # Different learning rates are set for different layers of backbone. # Note: Currently, this optimizer constructor is built for ConvNeXt. def add_params(self, params: List[dict], module: nn.Module, **kwargs) -> None: """Add all parameters of module to the params list. The parameters of the given module will be added to the list of param groups, with specific rules defined by paramwise_cfg. Args: params (list[dict]): A list of param groups, it will be modified in place. module (nn.Module): The module to be added. """ logger = MMLogger.get_current_instance() parameter_groups = {} logger.info(f'self.paramwise_cfg is {self.paramwise_cfg}') num_layers = self.paramwise_cfg.get('num_layers') + 2 decay_rate = self.paramwise_cfg.get('decay_rate') decay_type = self.paramwise_cfg.get('decay_type', 'layer_wise') logger.info('Build LearningRateDecayOptimizerConstructor ' f'{decay_type} {decay_rate} - {num_layers}') weight_decay = self.base_wd for name, param in module.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith('.bias') or name in ( 'pos_embed', 'cls_token'): group_name = 'no_decay' this_weight_decay = 0. else: group_name = 'decay' this_weight_decay = weight_decay if 'layer_wise' in decay_type: if 'ConvNeXt' in module.backbone.__class__.__name__: layer_id = get_layer_id_for_convnext( name, self.paramwise_cfg.get('num_layers')) logger.info(f'set param {name} as id {layer_id}') else: raise NotImplementedError() elif decay_type == 'stage_wise': if 'ConvNeXt' in module.backbone.__class__.__name__: layer_id = get_stage_id_for_convnext(name, num_layers) logger.info(f'set param {name} as id {layer_id}') else: raise NotImplementedError() group_name = f'layer_{layer_id}_{group_name}' if group_name not in parameter_groups: scale = decay_rate**(num_layers - layer_id - 1) parameter_groups[group_name] = { 'weight_decay': this_weight_decay, 'params': [], 'param_names': [], 'lr_scale': scale, 'group_name': group_name, 'lr': scale * self.base_lr, } parameter_groups[group_name]['params'].append(param) parameter_groups[group_name]['param_names'].append(name) rank, _ = get_dist_info() if rank == 0: to_display = {} for key in parameter_groups: to_display[key] = { 'param_names': parameter_groups[key]['param_names'], 'lr_scale': parameter_groups[key]['lr_scale'], 'lr': parameter_groups[key]['lr'], 'weight_decay': parameter_groups[key]['weight_decay'], } logger.info(f'Param groups = {json.dumps(to_display, indent=2)}') params.extend(parameter_groups.values())
6,020
36.867925
77
py
ERD
ERD-main/mmdet/engine/schedulers/quadratic_warmup.py
# Copyright (c) OpenMMLab. All rights reserved. from mmengine.optim.scheduler.lr_scheduler import LRSchedulerMixin from mmengine.optim.scheduler.momentum_scheduler import MomentumSchedulerMixin from mmengine.optim.scheduler.param_scheduler import INF, _ParamScheduler from torch.optim import Optimizer from mmdet.registry import PARAM_SCHEDULERS @PARAM_SCHEDULERS.register_module() class QuadraticWarmupParamScheduler(_ParamScheduler): r"""Warm up the parameter value of each parameter group by quadratic formula: .. math:: X_{t} = X_{t-1} + \frac{2t+1}{{(end-begin)}^{2}} \times X_{base} Args: optimizer (Optimizer): Wrapped optimizer. param_name (str): Name of the parameter to be adjusted, such as ``lr``, ``momentum``. begin (int): Step at which to start updating the parameters. Defaults to 0. end (int): Step at which to stop updating the parameters. Defaults to INF. last_step (int): The index of last step. Used for resume without state dict. Defaults to -1. by_epoch (bool): Whether the scheduled parameters are updated by epochs. Defaults to True. verbose (bool): Whether to print the value for each update. Defaults to False. """ def __init__(self, optimizer: Optimizer, param_name: str, begin: int = 0, end: int = INF, last_step: int = -1, by_epoch: bool = True, verbose: bool = False): if end >= INF: raise ValueError('``end`` must be less than infinity,' 'Please set ``end`` parameter of ' '``QuadraticWarmupScheduler`` as the ' 'number of warmup end.') self.total_iters = end - begin super().__init__( optimizer=optimizer, param_name=param_name, begin=begin, end=end, last_step=last_step, by_epoch=by_epoch, verbose=verbose) @classmethod def build_iter_from_epoch(cls, *args, begin=0, end=INF, by_epoch=True, epoch_length=None, **kwargs): """Build an iter-based instance of this scheduler from an epoch-based config.""" assert by_epoch, 'Only epoch-based kwargs whose `by_epoch=True` can ' \ 'be converted to iter-based.' assert epoch_length is not None and epoch_length > 0, \ f'`epoch_length` must be a positive integer, ' \ f'but got {epoch_length}.' by_epoch = False begin = begin * epoch_length if end != INF: end = end * epoch_length return cls(*args, begin=begin, end=end, by_epoch=by_epoch, **kwargs) def _get_value(self): """Compute value using chainable form of the scheduler.""" if self.last_step == 0: return [ base_value * (2 * self.last_step + 1) / self.total_iters**2 for base_value in self.base_values ] return [ group[self.param_name] + base_value * (2 * self.last_step + 1) / self.total_iters**2 for base_value, group in zip(self.base_values, self.optimizer.param_groups) ] @PARAM_SCHEDULERS.register_module() class QuadraticWarmupLR(LRSchedulerMixin, QuadraticWarmupParamScheduler): """Warm up the learning rate of each parameter group by quadratic formula. Args: optimizer (Optimizer): Wrapped optimizer. begin (int): Step at which to start updating the parameters. Defaults to 0. end (int): Step at which to stop updating the parameters. Defaults to INF. last_step (int): The index of last step. Used for resume without state dict. Defaults to -1. by_epoch (bool): Whether the scheduled parameters are updated by epochs. Defaults to True. verbose (bool): Whether to print the value for each update. Defaults to False. """ @PARAM_SCHEDULERS.register_module() class QuadraticWarmupMomentum(MomentumSchedulerMixin, QuadraticWarmupParamScheduler): """Warm up the momentum value of each parameter group by quadratic formula. Args: optimizer (Optimizer): Wrapped optimizer. begin (int): Step at which to start updating the parameters. Defaults to 0. end (int): Step at which to stop updating the parameters. Defaults to INF. last_step (int): The index of last step. Used for resume without state dict. Defaults to -1. by_epoch (bool): Whether the scheduled parameters are updated by epochs. Defaults to True. verbose (bool): Whether to print the value for each update. Defaults to False. """
5,176
38.219697
79
py
ERD
ERD-main/mmdet/utils/contextmanagers.py
# Copyright (c) OpenMMLab. All rights reserved. import asyncio import contextlib import logging import os import time from typing import List import torch logger = logging.getLogger(__name__) DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False)) @contextlib.asynccontextmanager async def completed(trace_name='', name='', sleep_interval=0.05, streams: List[torch.cuda.Stream] = None): """Async context manager that waits for work to complete on given CUDA streams.""" if not torch.cuda.is_available(): yield return stream_before_context_switch = torch.cuda.current_stream() if not streams: streams = [stream_before_context_switch] else: streams = [s if s else stream_before_context_switch for s in streams] end_events = [ torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams ] if DEBUG_COMPLETED_TIME: start = torch.cuda.Event(enable_timing=True) stream_before_context_switch.record_event(start) cpu_start = time.monotonic() logger.debug('%s %s starting, streams: %s', trace_name, name, streams) grad_enabled_before = torch.is_grad_enabled() try: yield finally: current_stream = torch.cuda.current_stream() assert current_stream == stream_before_context_switch if DEBUG_COMPLETED_TIME: cpu_end = time.monotonic() for i, stream in enumerate(streams): event = end_events[i] stream.record_event(event) grad_enabled_after = torch.is_grad_enabled() # observed change of torch.is_grad_enabled() during concurrent run of # async_test_bboxes code assert (grad_enabled_before == grad_enabled_after ), 'Unexpected is_grad_enabled() value change' are_done = [e.query() for e in end_events] logger.debug('%s %s completed: %s streams: %s', trace_name, name, are_done, streams) with torch.cuda.stream(stream_before_context_switch): while not all(are_done): await asyncio.sleep(sleep_interval) are_done = [e.query() for e in end_events] logger.debug( '%s %s completed: %s streams: %s', trace_name, name, are_done, streams, ) current_stream = torch.cuda.current_stream() assert current_stream == stream_before_context_switch if DEBUG_COMPLETED_TIME: cpu_time = (cpu_end - cpu_start) * 1000 stream_times_ms = '' for i, stream in enumerate(streams): elapsed_time = start.elapsed_time(end_events[i]) stream_times_ms += f' {stream} {elapsed_time:.2f} ms' logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time, stream_times_ms) @contextlib.asynccontextmanager async def concurrent(streamqueue: asyncio.Queue, trace_name='concurrent', name='stream'): """Run code concurrently in different streams. :param streamqueue: asyncio.Queue instance. Queue tasks define the pool of streams used for concurrent execution. """ if not torch.cuda.is_available(): yield return initial_stream = torch.cuda.current_stream() with torch.cuda.stream(initial_stream): stream = await streamqueue.get() assert isinstance(stream, torch.cuda.Stream) try: with torch.cuda.stream(stream): logger.debug('%s %s is starting, stream: %s', trace_name, name, stream) yield current = torch.cuda.current_stream() assert current == stream logger.debug('%s %s has finished, stream: %s', trace_name, name, stream) finally: streamqueue.task_done() streamqueue.put_nowait(stream)
4,125
32.544715
79
py
ERD
ERD-main/mmdet/utils/benchmark.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import time from functools import partial from typing import List, Optional, Union import numpy as np import torch import torch.nn as nn from mmcv.cnn import fuse_conv_bn # TODO need update # from mmcv.runner import wrap_fp16_model from mmengine import MMLogger from mmengine.config import Config from mmengine.device import get_max_cuda_memory from mmengine.dist import get_world_size from mmengine.runner import Runner, load_checkpoint from mmengine.utils.dl_utils import set_multi_processing from torch.nn.parallel import DistributedDataParallel from mmdet.registry import DATASETS, MODELS try: import psutil except ImportError: psutil = None def custom_round(value: Union[int, float], factor: Union[int, float], precision: int = 2) -> float: """Custom round function.""" return round(value / factor, precision) gb_round = partial(custom_round, factor=1024**3) def print_log(msg: str, logger: Optional[MMLogger] = None) -> None: """Print a log message.""" if logger is None: print(msg, flush=True) else: logger.info(msg) def print_process_memory(p: psutil.Process, logger: Optional[MMLogger] = None) -> None: """print process memory info.""" mem_used = gb_round(psutil.virtual_memory().used) memory_full_info = p.memory_full_info() uss_mem = gb_round(memory_full_info.uss) pss_mem = gb_round(memory_full_info.pss) for children in p.children(): child_mem_info = children.memory_full_info() uss_mem += gb_round(child_mem_info.uss) pss_mem += gb_round(child_mem_info.pss) process_count = 1 + len(p.children()) print_log( f'(GB) mem_used: {mem_used:.2f} | uss: {uss_mem:.2f} | ' f'pss: {pss_mem:.2f} | total_proc: {process_count}', logger) class BaseBenchmark: """The benchmark base class. The ``run`` method is an external calling interface, and it will call the ``run_once`` method ``repeat_num`` times for benchmarking. Finally, call the ``average_multiple_runs`` method to further process the results of multiple runs. Args: max_iter (int): maximum iterations of benchmark. log_interval (int): interval of logging. num_warmup (int): Number of Warmup. logger (MMLogger, optional): Formatted logger used to record messages. """ def __init__(self, max_iter: int, log_interval: int, num_warmup: int, logger: Optional[MMLogger] = None): self.max_iter = max_iter self.log_interval = log_interval self.num_warmup = num_warmup self.logger = logger def run(self, repeat_num: int = 1) -> dict: """benchmark entry method. Args: repeat_num (int): Number of repeat benchmark. Defaults to 1. """ assert repeat_num >= 1 results = [] for _ in range(repeat_num): results.append(self.run_once()) results = self.average_multiple_runs(results) return results def run_once(self) -> dict: """Executes the benchmark once.""" raise NotImplementedError() def average_multiple_runs(self, results: List[dict]) -> dict: """Average the results of multiple runs.""" raise NotImplementedError() class InferenceBenchmark(BaseBenchmark): """The inference benchmark class. It will be statistical inference FPS, CUDA memory and CPU memory information. Args: cfg (mmengine.Config): config. checkpoint (str): Accept local filepath, URL, ``torchvision://xxx``, ``open-mmlab://xxx``. distributed (bool): distributed testing flag. is_fuse_conv_bn (bool): Whether to fuse conv and bn, this will slightly increase the inference speed. max_iter (int): maximum iterations of benchmark. Defaults to 2000. log_interval (int): interval of logging. Defaults to 50. num_warmup (int): Number of Warmup. Defaults to 5. logger (MMLogger, optional): Formatted logger used to record messages. """ def __init__(self, cfg: Config, checkpoint: str, distributed: bool, is_fuse_conv_bn: bool, max_iter: int = 2000, log_interval: int = 50, num_warmup: int = 5, logger: Optional[MMLogger] = None): super().__init__(max_iter, log_interval, num_warmup, logger) assert get_world_size( ) == 1, 'Inference benchmark does not allow distributed multi-GPU' self.cfg = copy.deepcopy(cfg) self.distributed = distributed if psutil is None: raise ImportError('psutil is not installed, please install it by: ' 'pip install psutil') self._process = psutil.Process() env_cfg = self.cfg.get('env_cfg') if env_cfg.get('cudnn_benchmark'): torch.backends.cudnn.benchmark = True mp_cfg: dict = env_cfg.get('mp_cfg', {}) set_multi_processing(**mp_cfg, distributed=self.distributed) print_log('before build: ', self.logger) print_process_memory(self._process, self.logger) self.model = self._init_model(checkpoint, is_fuse_conv_bn) # Because multiple processes will occupy additional CPU resources, # FPS statistics will be more unstable when num_workers is not 0. # It is reasonable to set num_workers to 0. dataloader_cfg = cfg.test_dataloader dataloader_cfg['num_workers'] = 0 dataloader_cfg['batch_size'] = 1 dataloader_cfg['persistent_workers'] = False self.data_loader = Runner.build_dataloader(dataloader_cfg) print_log('after build: ', self.logger) print_process_memory(self._process, self.logger) def _init_model(self, checkpoint: str, is_fuse_conv_bn: bool) -> nn.Module: """Initialize the model.""" model = MODELS.build(self.cfg.model) # TODO need update # fp16_cfg = self.cfg.get('fp16', None) # if fp16_cfg is not None: # wrap_fp16_model(model) load_checkpoint(model, checkpoint, map_location='cpu') if is_fuse_conv_bn: model = fuse_conv_bn(model) model = model.cuda() if self.distributed: model = DistributedDataParallel( model, device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=False) model.eval() return model def run_once(self) -> dict: """Executes the benchmark once.""" pure_inf_time = 0 fps = 0 for i, data in enumerate(self.data_loader): if (i + 1) % self.log_interval == 0: print_log('==================================', self.logger) torch.cuda.synchronize() start_time = time.perf_counter() with torch.no_grad(): self.model.test_step(data) torch.cuda.synchronize() elapsed = time.perf_counter() - start_time if i >= self.num_warmup: pure_inf_time += elapsed if (i + 1) % self.log_interval == 0: fps = (i + 1 - self.num_warmup) / pure_inf_time cuda_memory = get_max_cuda_memory() print_log( f'Done image [{i + 1:<3}/{self.max_iter}], ' f'fps: {fps:.1f} img/s, ' f'times per image: {1000 / fps:.1f} ms/img, ' f'cuda memory: {cuda_memory} MB', self.logger) print_process_memory(self._process, self.logger) if (i + 1) == self.max_iter: fps = (i + 1 - self.num_warmup) / pure_inf_time break return {'fps': fps} def average_multiple_runs(self, results: List[dict]) -> dict: """Average the results of multiple runs.""" print_log('============== Done ==================', self.logger) fps_list_ = [round(result['fps'], 1) for result in results] avg_fps_ = sum(fps_list_) / len(fps_list_) outputs = {'avg_fps': avg_fps_, 'fps_list': fps_list_} if len(fps_list_) > 1: times_pre_image_list_ = [ round(1000 / result['fps'], 1) for result in results ] avg_times_pre_image_ = sum(times_pre_image_list_) / len( times_pre_image_list_) print_log( f'Overall fps: {fps_list_}[{avg_fps_:.1f}] img/s, ' 'times per image: ' f'{times_pre_image_list_}[{avg_times_pre_image_:.1f}] ' 'ms/img', self.logger) else: print_log( f'Overall fps: {fps_list_[0]:.1f} img/s, ' f'times per image: {1000 / fps_list_[0]:.1f} ms/img', self.logger) print_log(f'cuda memory: {get_max_cuda_memory()} MB', self.logger) print_process_memory(self._process, self.logger) return outputs class DataLoaderBenchmark(BaseBenchmark): """The dataloader benchmark class. It will be statistical inference FPS and CPU memory information. Args: cfg (mmengine.Config): config. distributed (bool): distributed testing flag. dataset_type (str): benchmark data type, only supports ``train``, ``val`` and ``test``. max_iter (int): maximum iterations of benchmark. Defaults to 2000. log_interval (int): interval of logging. Defaults to 50. num_warmup (int): Number of Warmup. Defaults to 5. logger (MMLogger, optional): Formatted logger used to record messages. """ def __init__(self, cfg: Config, distributed: bool, dataset_type: str, max_iter: int = 2000, log_interval: int = 50, num_warmup: int = 5, logger: Optional[MMLogger] = None): super().__init__(max_iter, log_interval, num_warmup, logger) assert dataset_type in ['train', 'val', 'test'], \ 'dataset_type only supports train,' \ f' val and test, but got {dataset_type}' assert get_world_size( ) == 1, 'Dataloader benchmark does not allow distributed multi-GPU' self.cfg = copy.deepcopy(cfg) self.distributed = distributed if psutil is None: raise ImportError('psutil is not installed, please install it by: ' 'pip install psutil') self._process = psutil.Process() mp_cfg = self.cfg.get('env_cfg', {}).get('mp_cfg') if mp_cfg is not None: set_multi_processing(distributed=self.distributed, **mp_cfg) else: set_multi_processing(distributed=self.distributed) print_log('before build: ', self.logger) print_process_memory(self._process, self.logger) if dataset_type == 'train': self.data_loader = Runner.build_dataloader(cfg.train_dataloader) elif dataset_type == 'test': self.data_loader = Runner.build_dataloader(cfg.test_dataloader) else: self.data_loader = Runner.build_dataloader(cfg.val_dataloader) self.batch_size = self.data_loader.batch_size self.num_workers = self.data_loader.num_workers print_log('after build: ', self.logger) print_process_memory(self._process, self.logger) def run_once(self) -> dict: """Executes the benchmark once.""" pure_inf_time = 0 fps = 0 # benchmark with 2000 image and take the average start_time = time.perf_counter() for i, data in enumerate(self.data_loader): elapsed = time.perf_counter() - start_time if (i + 1) % self.log_interval == 0: print_log('==================================', self.logger) if i >= self.num_warmup: pure_inf_time += elapsed if (i + 1) % self.log_interval == 0: fps = (i + 1 - self.num_warmup) / pure_inf_time print_log( f'Done batch [{i + 1:<3}/{self.max_iter}], ' f'fps: {fps:.1f} batch/s, ' f'times per batch: {1000 / fps:.1f} ms/batch, ' f'batch size: {self.batch_size}, num_workers: ' f'{self.num_workers}', self.logger) print_process_memory(self._process, self.logger) if (i + 1) == self.max_iter: fps = (i + 1 - self.num_warmup) / pure_inf_time break start_time = time.perf_counter() return {'fps': fps} def average_multiple_runs(self, results: List[dict]) -> dict: """Average the results of multiple runs.""" print_log('============== Done ==================', self.logger) fps_list_ = [round(result['fps'], 1) for result in results] avg_fps_ = sum(fps_list_) / len(fps_list_) outputs = {'avg_fps': avg_fps_, 'fps_list': fps_list_} if len(fps_list_) > 1: times_pre_image_list_ = [ round(1000 / result['fps'], 1) for result in results ] avg_times_pre_image_ = sum(times_pre_image_list_) / len( times_pre_image_list_) print_log( f'Overall fps: {fps_list_}[{avg_fps_:.1f}] img/s, ' 'times per batch: ' f'{times_pre_image_list_}[{avg_times_pre_image_:.1f}] ' f'ms/batch, batch size: {self.batch_size}, num_workers: ' f'{self.num_workers}', self.logger) else: print_log( f'Overall fps: {fps_list_[0]:.1f} batch/s, ' f'times per batch: {1000 / fps_list_[0]:.1f} ms/batch, ' f'batch size: {self.batch_size}, num_workers: ' f'{self.num_workers}', self.logger) print_process_memory(self._process, self.logger) return outputs class DatasetBenchmark(BaseBenchmark): """The dataset benchmark class. It will be statistical inference FPS, FPS pre transform and CPU memory information. Args: cfg (mmengine.Config): config. dataset_type (str): benchmark data type, only supports ``train``, ``val`` and ``test``. max_iter (int): maximum iterations of benchmark. Defaults to 2000. log_interval (int): interval of logging. Defaults to 50. num_warmup (int): Number of Warmup. Defaults to 5. logger (MMLogger, optional): Formatted logger used to record messages. """ def __init__(self, cfg: Config, dataset_type: str, max_iter: int = 2000, log_interval: int = 50, num_warmup: int = 5, logger: Optional[MMLogger] = None): super().__init__(max_iter, log_interval, num_warmup, logger) assert dataset_type in ['train', 'val', 'test'], \ 'dataset_type only supports train,' \ f' val and test, but got {dataset_type}' assert get_world_size( ) == 1, 'Dataset benchmark does not allow distributed multi-GPU' self.cfg = copy.deepcopy(cfg) if dataset_type == 'train': dataloader_cfg = copy.deepcopy(cfg.train_dataloader) elif dataset_type == 'test': dataloader_cfg = copy.deepcopy(cfg.test_dataloader) else: dataloader_cfg = copy.deepcopy(cfg.val_dataloader) dataset_cfg = dataloader_cfg.pop('dataset') dataset = DATASETS.build(dataset_cfg) if hasattr(dataset, 'full_init'): dataset.full_init() self.dataset = dataset def run_once(self) -> dict: """Executes the benchmark once.""" pure_inf_time = 0 fps = 0 total_index = list(range(len(self.dataset))) np.random.shuffle(total_index) start_time = time.perf_counter() for i, idx in enumerate(total_index): if (i + 1) % self.log_interval == 0: print_log('==================================', self.logger) get_data_info_start_time = time.perf_counter() data_info = self.dataset.get_data_info(idx) get_data_info_elapsed = time.perf_counter( ) - get_data_info_start_time if (i + 1) % self.log_interval == 0: print_log(f'get_data_info - {get_data_info_elapsed * 1000} ms', self.logger) for t in self.dataset.pipeline.transforms: transform_start_time = time.perf_counter() data_info = t(data_info) transform_elapsed = time.perf_counter() - transform_start_time if (i + 1) % self.log_interval == 0: print_log( f'{t.__class__.__name__} - ' f'{transform_elapsed * 1000} ms', self.logger) if data_info is None: break elapsed = time.perf_counter() - start_time if i >= self.num_warmup: pure_inf_time += elapsed if (i + 1) % self.log_interval == 0: fps = (i + 1 - self.num_warmup) / pure_inf_time print_log( f'Done img [{i + 1:<3}/{self.max_iter}], ' f'fps: {fps:.1f} img/s, ' f'times per img: {1000 / fps:.1f} ms/img', self.logger) if (i + 1) == self.max_iter: fps = (i + 1 - self.num_warmup) / pure_inf_time break start_time = time.perf_counter() return {'fps': fps} def average_multiple_runs(self, results: List[dict]) -> dict: """Average the results of multiple runs.""" print_log('============== Done ==================', self.logger) fps_list_ = [round(result['fps'], 1) for result in results] avg_fps_ = sum(fps_list_) / len(fps_list_) outputs = {'avg_fps': avg_fps_, 'fps_list': fps_list_} if len(fps_list_) > 1: times_pre_image_list_ = [ round(1000 / result['fps'], 1) for result in results ] avg_times_pre_image_ = sum(times_pre_image_list_) / len( times_pre_image_list_) print_log( f'Overall fps: {fps_list_}[{avg_fps_:.1f}] img/s, ' 'times per img: ' f'{times_pre_image_list_}[{avg_times_pre_image_:.1f}] ' 'ms/img', self.logger) else: print_log( f'Overall fps: {fps_list_[0]:.1f} img/s, ' f'times per img: {1000 / fps_list_[0]:.1f} ms/img', self.logger) return outputs
19,185
35.684512
79
py
ERD
ERD-main/mmdet/utils/memory.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings from collections import abc from contextlib import contextmanager from functools import wraps import torch from mmengine.logging import MMLogger def cast_tensor_type(inputs, src_type=None, dst_type=None): """Recursively convert Tensor in inputs from ``src_type`` to ``dst_type``. Args: inputs: Inputs that to be casted. src_type (torch.dtype | torch.device): Source type. src_type (torch.dtype | torch.device): Destination type. Returns: The same type with inputs, but all contained Tensors have been cast. """ assert dst_type is not None if isinstance(inputs, torch.Tensor): if isinstance(dst_type, torch.device): # convert Tensor to dst_device if hasattr(inputs, 'to') and \ hasattr(inputs, 'device') and \ (inputs.device == src_type or src_type is None): return inputs.to(dst_type) else: return inputs else: # convert Tensor to dst_dtype if hasattr(inputs, 'to') and \ hasattr(inputs, 'dtype') and \ (inputs.dtype == src_type or src_type is None): return inputs.to(dst_type) else: return inputs # we need to ensure that the type of inputs to be casted are the same # as the argument `src_type`. elif isinstance(inputs, abc.Mapping): return type(inputs)({ k: cast_tensor_type(v, src_type=src_type, dst_type=dst_type) for k, v in inputs.items() }) elif isinstance(inputs, abc.Iterable): return type(inputs)( cast_tensor_type(item, src_type=src_type, dst_type=dst_type) for item in inputs) # TODO: Currently not supported # elif isinstance(inputs, InstanceData): # for key, value in inputs.items(): # inputs[key] = cast_tensor_type( # value, src_type=src_type, dst_type=dst_type) # return inputs else: return inputs @contextmanager def _ignore_torch_cuda_oom(): """A context which ignores CUDA OOM exception from pytorch. Code is modified from <https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/memory.py> # noqa: E501 """ try: yield except RuntimeError as e: # NOTE: the string may change? if 'CUDA out of memory. ' in str(e): pass else: raise class AvoidOOM: """Try to convert inputs to FP16 and CPU if got a PyTorch's CUDA Out of Memory error. It will do the following steps: 1. First retry after calling `torch.cuda.empty_cache()`. 2. If that still fails, it will then retry by converting inputs to FP16. 3. If that still fails trying to convert inputs to CPUs. In this case, it expects the function to dispatch to CPU implementation. Args: to_cpu (bool): Whether to convert outputs to CPU if get an OOM error. This will slow down the code significantly. Defaults to True. test (bool): Skip `_ignore_torch_cuda_oom` operate that can use lightweight data in unit test, only used in test unit. Defaults to False. Examples: >>> from mmdet.utils.memory import AvoidOOM >>> AvoidCUDAOOM = AvoidOOM() >>> output = AvoidOOM.retry_if_cuda_oom( >>> some_torch_function)(input1, input2) >>> # To use as a decorator >>> # from mmdet.utils import AvoidCUDAOOM >>> @AvoidCUDAOOM.retry_if_cuda_oom >>> def function(*args, **kwargs): >>> return None ``` Note: 1. The output may be on CPU even if inputs are on GPU. Processing on CPU will slow down the code significantly. 2. When converting inputs to CPU, it will only look at each argument and check if it has `.device` and `.to` for conversion. Nested structures of tensors are not supported. 3. Since the function might be called more than once, it has to be stateless. """ def __init__(self, to_cpu=True, test=False): self.to_cpu = to_cpu self.test = test def retry_if_cuda_oom(self, func): """Makes a function retry itself after encountering pytorch's CUDA OOM error. The implementation logic is referred to https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/memory.py Args: func: a stateless callable that takes tensor-like objects as arguments. Returns: func: a callable which retries `func` if OOM is encountered. """ # noqa: W605 @wraps(func) def wrapped(*args, **kwargs): # raw function if not self.test: with _ignore_torch_cuda_oom(): return func(*args, **kwargs) # Clear cache and retry torch.cuda.empty_cache() with _ignore_torch_cuda_oom(): return func(*args, **kwargs) # get the type and device of first tensor dtype, device = None, None values = args + tuple(kwargs.values()) for value in values: if isinstance(value, torch.Tensor): dtype = value.dtype device = value.device break if dtype is None or device is None: raise ValueError('There is no tensor in the inputs, ' 'cannot get dtype and device.') # Convert to FP16 fp16_args = cast_tensor_type(args, dst_type=torch.half) fp16_kwargs = cast_tensor_type(kwargs, dst_type=torch.half) logger = MMLogger.get_current_instance() logger.warning(f'Attempting to copy inputs of {str(func)} ' 'to FP16 due to CUDA OOM') # get input tensor type, the output type will same as # the first parameter type. with _ignore_torch_cuda_oom(): output = func(*fp16_args, **fp16_kwargs) output = cast_tensor_type( output, src_type=torch.half, dst_type=dtype) if not self.test: return output logger.warning('Using FP16 still meet CUDA OOM') # Try on CPU. This will slow down the code significantly, # therefore print a notice. if self.to_cpu: logger.warning(f'Attempting to copy inputs of {str(func)} ' 'to CPU due to CUDA OOM') cpu_device = torch.empty(0).device cpu_args = cast_tensor_type(args, dst_type=cpu_device) cpu_kwargs = cast_tensor_type(kwargs, dst_type=cpu_device) # convert outputs to GPU with _ignore_torch_cuda_oom(): logger.warning(f'Convert outputs to GPU (device={device})') output = func(*cpu_args, **cpu_kwargs) output = cast_tensor_type( output, src_type=cpu_device, dst_type=device) return output warnings.warn('Cannot convert output to GPU due to CUDA OOM, ' 'the output is now on CPU, which might cause ' 'errors if the output need to interact with GPU ' 'data in subsequent operations') logger.warning('Cannot convert output to GPU due to ' 'CUDA OOM, the output is on CPU now.') return func(*cpu_args, **cpu_kwargs) else: # may still get CUDA OOM error return func(*args, **kwargs) return wrapped # To use AvoidOOM as a decorator AvoidCUDAOOM = AvoidOOM()
8,099
37.028169
103
py
ERD
ERD-main/mmdet/utils/profiling.py
# Copyright (c) OpenMMLab. All rights reserved. import contextlib import sys import time import torch if sys.version_info >= (3, 7): @contextlib.contextmanager def profile_time(trace_name, name, enabled=True, stream=None, end_stream=None): """Print time spent by CPU and GPU. Useful as a temporary context manager to find sweet spots of code suitable for async implementation. """ if (not enabled) or not torch.cuda.is_available(): yield return stream = stream if stream else torch.cuda.current_stream() end_stream = end_stream if end_stream else stream start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) stream.record_event(start) try: cpu_start = time.monotonic() yield finally: cpu_end = time.monotonic() end_stream.record_event(end) end.synchronize() cpu_time = (cpu_end - cpu_start) * 1000 gpu_time = start.elapsed_time(end) msg = f'{trace_name} {name} cpu_time {cpu_time:.2f} ms ' msg += f'gpu_time {gpu_time:.2f} ms stream {stream}' print(msg, end_stream)
1,336
31.609756
73
py
ERD
ERD-main/mmdet/utils/dist_utils.py
# Copyright (c) OpenMMLab. All rights reserved. import functools import pickle import warnings from collections import OrderedDict import numpy as np import torch import torch.distributed as dist from mmengine.dist import get_dist_info from torch._utils import (_flatten_dense_tensors, _take_tensors, _unflatten_dense_tensors) def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1): if bucket_size_mb > 0: bucket_size_bytes = bucket_size_mb * 1024 * 1024 buckets = _take_tensors(tensors, bucket_size_bytes) else: buckets = OrderedDict() for tensor in tensors: tp = tensor.type() if tp not in buckets: buckets[tp] = [] buckets[tp].append(tensor) buckets = buckets.values() for bucket in buckets: flat_tensors = _flatten_dense_tensors(bucket) dist.all_reduce(flat_tensors) flat_tensors.div_(world_size) for tensor, synced in zip( bucket, _unflatten_dense_tensors(flat_tensors, bucket)): tensor.copy_(synced) def allreduce_grads(params, coalesce=True, bucket_size_mb=-1): """Allreduce gradients. Args: params (list[torch.Parameters]): List of parameters of a model coalesce (bool, optional): Whether allreduce parameters as a whole. Defaults to True. bucket_size_mb (int, optional): Size of bucket, the unit is MB. Defaults to -1. """ grads = [ param.grad.data for param in params if param.requires_grad and param.grad is not None ] world_size = dist.get_world_size() if coalesce: _allreduce_coalesced(grads, world_size, bucket_size_mb) else: for tensor in grads: dist.all_reduce(tensor.div_(world_size)) def reduce_mean(tensor): """"Obtain the mean of tensor on different GPUs.""" if not (dist.is_available() and dist.is_initialized()): return tensor tensor = tensor.clone() dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM) return tensor def obj2tensor(pyobj, device='cuda'): """Serialize picklable python object to tensor.""" storage = torch.ByteStorage.from_buffer(pickle.dumps(pyobj)) return torch.ByteTensor(storage).to(device=device) def tensor2obj(tensor): """Deserialize tensor to picklable python object.""" return pickle.loads(tensor.cpu().numpy().tobytes()) @functools.lru_cache() def _get_global_gloo_group(): """Return a process group based on gloo backend, containing all the ranks The result is cached.""" if dist.get_backend() == 'nccl': return dist.new_group(backend='gloo') else: return dist.group.WORLD def all_reduce_dict(py_dict, op='sum', group=None, to_float=True): """Apply all reduce function for python dict object. The code is modified from https://github.com/Megvii- BaseDetection/YOLOX/blob/main/yolox/utils/allreduce_norm.py. NOTE: make sure that py_dict in different ranks has the same keys and the values should be in the same shape. Currently only supports nccl backend. Args: py_dict (dict): Dict to be applied all reduce op. op (str): Operator, could be 'sum' or 'mean'. Default: 'sum' group (:obj:`torch.distributed.group`, optional): Distributed group, Default: None. to_float (bool): Whether to convert all values of dict to float. Default: True. Returns: OrderedDict: reduced python dict object. """ warnings.warn( 'group` is deprecated. Currently only supports NCCL backend.') _, world_size = get_dist_info() if world_size == 1: return py_dict # all reduce logic across different devices. py_key = list(py_dict.keys()) if not isinstance(py_dict, OrderedDict): py_key_tensor = obj2tensor(py_key) dist.broadcast(py_key_tensor, src=0) py_key = tensor2obj(py_key_tensor) tensor_shapes = [py_dict[k].shape for k in py_key] tensor_numels = [py_dict[k].numel() for k in py_key] if to_float: warnings.warn('Note: the "to_float" is True, you need to ' 'ensure that the behavior is reasonable.') flatten_tensor = torch.cat( [py_dict[k].flatten().float() for k in py_key]) else: flatten_tensor = torch.cat([py_dict[k].flatten() for k in py_key]) dist.all_reduce(flatten_tensor, op=dist.ReduceOp.SUM) if op == 'mean': flatten_tensor /= world_size split_tensors = [ x.reshape(shape) for x, shape in zip( torch.split(flatten_tensor, tensor_numels), tensor_shapes) ] out_dict = {k: v for k, v in zip(py_key, split_tensors)} if isinstance(py_dict, OrderedDict): out_dict = OrderedDict(out_dict) return out_dict def sync_random_seed(seed=None, device='cuda'): """Make sure different ranks share the same seed. All workers must call this function, otherwise it will deadlock. This method is generally used in `DistributedSampler`, because the seed should be identical across all processes in the distributed group. In distributed sampling, different ranks should sample non-overlapped data in the dataset. Therefore, this function is used to make sure that each rank shuffles the data indices in the same order based on the same seed. Then different ranks could use different indices to select non-overlapped data from the same data list. Args: seed (int, Optional): The seed. Default to None. device (str): The device where the seed will be put on. Default to 'cuda'. Returns: int: Seed to be used. """ if seed is None: seed = np.random.randint(2**31) assert isinstance(seed, int) rank, world_size = get_dist_info() if world_size == 1: return seed if rank == 0: random_num = torch.tensor(seed, dtype=torch.int32, device=device) else: random_num = torch.tensor(0, dtype=torch.int32, device=device) dist.broadcast(random_num, src=0) return random_num.item()
6,194
32.486486
77
py
ERD
ERD-main/mmdet/utils/setup_env.py
# Copyright (c) OpenMMLab. All rights reserved. import datetime import logging import os import platform import warnings import cv2 import torch.multiprocessing as mp from mmengine import DefaultScope from mmengine.logging import print_log from mmengine.utils import digit_version def setup_cache_size_limit_of_dynamo(): """Setup cache size limit of dynamo. Note: Due to the dynamic shape of the loss calculation and post-processing parts in the object detection algorithm, these functions must be compiled every time they are run. Setting a large value for torch._dynamo.config.cache_size_limit may result in repeated compilation, which can slow down training and testing speed. Therefore, we need to set the default value of cache_size_limit smaller. An empirical value is 4. """ import torch if digit_version(torch.__version__) >= digit_version('2.0.0'): if 'DYNAMO_CACHE_SIZE_LIMIT' in os.environ: import torch._dynamo cache_size_limit = int(os.environ['DYNAMO_CACHE_SIZE_LIMIT']) torch._dynamo.config.cache_size_limit = cache_size_limit print_log( f'torch._dynamo.config.cache_size_limit is force ' f'set to {cache_size_limit}.', logger='current', level=logging.WARNING) def setup_multi_processes(cfg): """Setup multi-processing environment variables.""" # set multi-process start method as `fork` to speed up the training if platform.system() != 'Windows': mp_start_method = cfg.get('mp_start_method', 'fork') current_method = mp.get_start_method(allow_none=True) if current_method is not None and current_method != mp_start_method: warnings.warn( f'Multi-processing start method `{mp_start_method}` is ' f'different from the previous setting `{current_method}`.' f'It will be force set to `{mp_start_method}`. You can change ' f'this behavior by changing `mp_start_method` in your config.') mp.set_start_method(mp_start_method, force=True) # disable opencv multithreading to avoid system being overloaded opencv_num_threads = cfg.get('opencv_num_threads', 0) cv2.setNumThreads(opencv_num_threads) # setup OMP threads # This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa workers_per_gpu = cfg.data.get('workers_per_gpu', 1) if 'train_dataloader' in cfg.data: workers_per_gpu = \ max(cfg.data.train_dataloader.get('workers_per_gpu', 1), workers_per_gpu) if 'OMP_NUM_THREADS' not in os.environ and workers_per_gpu > 1: omp_num_threads = 1 warnings.warn( f'Setting OMP_NUM_THREADS environment variable for each process ' f'to be {omp_num_threads} in default, to avoid your system being ' f'overloaded, please further tune the variable for optimal ' f'performance in your application as needed.') os.environ['OMP_NUM_THREADS'] = str(omp_num_threads) # setup MKL threads if 'MKL_NUM_THREADS' not in os.environ and workers_per_gpu > 1: mkl_num_threads = 1 warnings.warn( f'Setting MKL_NUM_THREADS environment variable for each process ' f'to be {mkl_num_threads} in default, to avoid your system being ' f'overloaded, please further tune the variable for optimal ' f'performance in your application as needed.') os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads) def register_all_modules(init_default_scope: bool = True) -> None: """Register all modules in mmdet into the registries. Args: init_default_scope (bool): Whether initialize the mmdet default scope. When `init_default_scope=True`, the global default scope will be set to `mmdet`, and all registries will build modules from mmdet's registry node. To understand more about the registry, please refer to https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md Defaults to True. """ # noqa import mmdet.datasets # noqa: F401,F403 import mmdet.engine # noqa: F401,F403 import mmdet.evaluation # noqa: F401,F403 import mmdet.models # noqa: F401,F403 import mmdet.visualization # noqa: F401,F403 if init_default_scope: never_created = DefaultScope.get_current_instance() is None \ or not DefaultScope.check_instance_created('mmdet') if never_created: DefaultScope.get_instance('mmdet', scope_name='mmdet') return current_scope = DefaultScope.get_current_instance() if current_scope.scope_name != 'mmdet': warnings.warn('The current default scope ' f'"{current_scope.scope_name}" is not "mmdet", ' '`register_all_modules` will force the current' 'default scope to be "mmdet". If this is not ' 'expected, please set `init_default_scope=False`.') # avoid name conflict new_instance_name = f'mmdet-{datetime.datetime.now()}' DefaultScope.get_instance(new_instance_name, scope_name='mmdet')
5,383
44.243697
112
py
ERD
ERD-main/mmdet/utils/split_batch.py
# Copyright (c) OpenMMLab. All rights reserved. import torch def split_batch(img, img_metas, kwargs): """Split data_batch by tags. Code is modified from <https://github.com/microsoft/SoftTeacher/blob/main/ssod/utils/structure_utils.py> # noqa: E501 Args: img (Tensor): of shape (N, C, H, W) encoding input images. Typically these should be mean centered and std scaled. img_metas (list[dict]): List of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys, see :class:`mmdet.datasets.pipelines.Collect`. kwargs (dict): Specific to concrete implementation. Returns: data_groups (dict): a dict that data_batch splited by tags, such as 'sup', 'unsup_teacher', and 'unsup_student'. """ # only stack img in the batch def fuse_list(obj_list, obj): return torch.stack(obj_list) if isinstance(obj, torch.Tensor) else obj_list # select data with tag from data_batch def select_group(data_batch, current_tag): group_flag = [tag == current_tag for tag in data_batch['tag']] return { k: fuse_list([vv for vv, gf in zip(v, group_flag) if gf], v) for k, v in data_batch.items() } kwargs.update({'img': img, 'img_metas': img_metas}) kwargs.update({'tag': [meta['tag'] for meta in img_metas]}) tags = list(set(kwargs['tag'])) data_groups = {tag: select_group(kwargs, tag) for tag in tags} for tag, group in data_groups.items(): group.pop('tag') return data_groups
1,778
37.673913
99
py
ERD
ERD-main/mmdet/structures/det_data_sample.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional from mmengine.structures import BaseDataElement, InstanceData, PixelData class DetDataSample(BaseDataElement): """A data structure interface of MMDetection. They are used as interfaces between different components. The attributes in ``DetDataSample`` are divided into several parts: - ``proposals``(InstanceData): Region proposals used in two-stage detectors. - ``gt_instances``(InstanceData): Ground truth of instance annotations. - ``pred_instances``(InstanceData): Instances of model predictions. - ``ignored_instances``(InstanceData): Instances to be ignored during training/testing. - ``gt_panoptic_seg``(PixelData): Ground truth of panoptic segmentation. - ``pred_panoptic_seg``(PixelData): Prediction of panoptic segmentation. - ``gt_sem_seg``(PixelData): Ground truth of semantic segmentation. - ``pred_sem_seg``(PixelData): Prediction of semantic segmentation. Examples: >>> import torch >>> import numpy as np >>> from mmengine.structures import InstanceData >>> from mmdet.structures import DetDataSample >>> data_sample = DetDataSample() >>> img_meta = dict(img_shape=(800, 1196), ... pad_shape=(800, 1216)) >>> gt_instances = InstanceData(metainfo=img_meta) >>> gt_instances.bboxes = torch.rand((5, 4)) >>> gt_instances.labels = torch.rand((5,)) >>> data_sample.gt_instances = gt_instances >>> assert 'img_shape' in data_sample.gt_instances.metainfo_keys() >>> len(data_sample.gt_instances) 5 >>> print(data_sample) <DetDataSample( META INFORMATION DATA FIELDS gt_instances: <InstanceData( META INFORMATION pad_shape: (800, 1216) img_shape: (800, 1196) DATA FIELDS labels: tensor([0.8533, 0.1550, 0.5433, 0.7294, 0.5098]) bboxes: tensor([[9.7725e-01, 5.8417e-01, 1.7269e-01, 6.5694e-01], [1.7894e-01, 5.1780e-01, 7.0590e-01, 4.8589e-01], [7.0392e-01, 6.6770e-01, 1.7520e-01, 1.4267e-01], [2.2411e-01, 5.1962e-01, 9.6953e-01, 6.6994e-01], [4.1338e-01, 2.1165e-01, 2.7239e-04, 6.8477e-01]]) ) at 0x7f21fb1b9190> ) at 0x7f21fb1b9880> >>> pred_instances = InstanceData(metainfo=img_meta) >>> pred_instances.bboxes = torch.rand((5, 4)) >>> pred_instances.scores = torch.rand((5,)) >>> data_sample = DetDataSample(pred_instances=pred_instances) >>> assert 'pred_instances' in data_sample >>> data_sample = DetDataSample() >>> gt_instances_data = dict( ... bboxes=torch.rand(2, 4), ... labels=torch.rand(2), ... masks=np.random.rand(2, 2, 2)) >>> gt_instances = InstanceData(**gt_instances_data) >>> data_sample.gt_instances = gt_instances >>> assert 'gt_instances' in data_sample >>> assert 'masks' in data_sample.gt_instances >>> data_sample = DetDataSample() >>> gt_panoptic_seg_data = dict(panoptic_seg=torch.rand(2, 4)) >>> gt_panoptic_seg = PixelData(**gt_panoptic_seg_data) >>> data_sample.gt_panoptic_seg = gt_panoptic_seg >>> print(data_sample) <DetDataSample( META INFORMATION DATA FIELDS _gt_panoptic_seg: <BaseDataElement( META INFORMATION DATA FIELDS panoptic_seg: tensor([[0.7586, 0.1262, 0.2892, 0.9341], [0.3200, 0.7448, 0.1052, 0.5371]]) ) at 0x7f66c2bb7730> gt_panoptic_seg: <BaseDataElement( META INFORMATION DATA FIELDS panoptic_seg: tensor([[0.7586, 0.1262, 0.2892, 0.9341], [0.3200, 0.7448, 0.1052, 0.5371]]) ) at 0x7f66c2bb7730> ) at 0x7f66c2bb7280> >>> data_sample = DetDataSample() >>> gt_segm_seg_data = dict(segm_seg=torch.rand(2, 2, 2)) >>> gt_segm_seg = PixelData(**gt_segm_seg_data) >>> data_sample.gt_segm_seg = gt_segm_seg >>> assert 'gt_segm_seg' in data_sample >>> assert 'segm_seg' in data_sample.gt_segm_seg """ @property def proposals(self) -> InstanceData: return self._proposals @proposals.setter def proposals(self, value: InstanceData): self.set_field(value, '_proposals', dtype=InstanceData) @proposals.deleter def proposals(self): del self._proposals @property def gt_instances(self) -> InstanceData: return self._gt_instances @gt_instances.setter def gt_instances(self, value: InstanceData): self.set_field(value, '_gt_instances', dtype=InstanceData) @gt_instances.deleter def gt_instances(self): del self._gt_instances @property def pred_instances(self) -> InstanceData: return self._pred_instances @pred_instances.setter def pred_instances(self, value: InstanceData): self.set_field(value, '_pred_instances', dtype=InstanceData) @pred_instances.deleter def pred_instances(self): del self._pred_instances @property def ignored_instances(self) -> InstanceData: return self._ignored_instances @ignored_instances.setter def ignored_instances(self, value: InstanceData): self.set_field(value, '_ignored_instances', dtype=InstanceData) @ignored_instances.deleter def ignored_instances(self): del self._ignored_instances @property def gt_panoptic_seg(self) -> PixelData: return self._gt_panoptic_seg @gt_panoptic_seg.setter def gt_panoptic_seg(self, value: PixelData): self.set_field(value, '_gt_panoptic_seg', dtype=PixelData) @gt_panoptic_seg.deleter def gt_panoptic_seg(self): del self._gt_panoptic_seg @property def pred_panoptic_seg(self) -> PixelData: return self._pred_panoptic_seg @pred_panoptic_seg.setter def pred_panoptic_seg(self, value: PixelData): self.set_field(value, '_pred_panoptic_seg', dtype=PixelData) @pred_panoptic_seg.deleter def pred_panoptic_seg(self): del self._pred_panoptic_seg @property def gt_sem_seg(self) -> PixelData: return self._gt_sem_seg @gt_sem_seg.setter def gt_sem_seg(self, value: PixelData): self.set_field(value, '_gt_sem_seg', dtype=PixelData) @gt_sem_seg.deleter def gt_sem_seg(self): del self._gt_sem_seg @property def pred_sem_seg(self) -> PixelData: return self._pred_sem_seg @pred_sem_seg.setter def pred_sem_seg(self, value: PixelData): self.set_field(value, '_pred_sem_seg', dtype=PixelData) @pred_sem_seg.deleter def pred_sem_seg(self): del self._pred_sem_seg SampleList = List[DetDataSample] OptSampleList = Optional[SampleList]
7,409
33.626168
79
py
ERD
ERD-main/mmdet/structures/mask/structures.py
# Copyright (c) OpenMMLab. All rights reserved. import itertools from abc import ABCMeta, abstractmethod from typing import Sequence, Type, TypeVar import cv2 import mmcv import numpy as np import pycocotools.mask as maskUtils import shapely.geometry as geometry import torch from mmcv.ops.roi_align import roi_align T = TypeVar('T') class BaseInstanceMasks(metaclass=ABCMeta): """Base class for instance masks.""" @abstractmethod def rescale(self, scale, interpolation='nearest'): """Rescale masks as large as possible while keeping the aspect ratio. For details can refer to `mmcv.imrescale`. Args: scale (tuple[int]): The maximum size (h, w) of rescaled mask. interpolation (str): Same as :func:`mmcv.imrescale`. Returns: BaseInstanceMasks: The rescaled masks. """ @abstractmethod def resize(self, out_shape, interpolation='nearest'): """Resize masks to the given out_shape. Args: out_shape: Target (h, w) of resized mask. interpolation (str): See :func:`mmcv.imresize`. Returns: BaseInstanceMasks: The resized masks. """ @abstractmethod def flip(self, flip_direction='horizontal'): """Flip masks alone the given direction. Args: flip_direction (str): Either 'horizontal' or 'vertical'. Returns: BaseInstanceMasks: The flipped masks. """ @abstractmethod def pad(self, out_shape, pad_val): """Pad masks to the given size of (h, w). Args: out_shape (tuple[int]): Target (h, w) of padded mask. pad_val (int): The padded value. Returns: BaseInstanceMasks: The padded masks. """ @abstractmethod def crop(self, bbox): """Crop each mask by the given bbox. Args: bbox (ndarray): Bbox in format [x1, y1, x2, y2], shape (4, ). Return: BaseInstanceMasks: The cropped masks. """ @abstractmethod def crop_and_resize(self, bboxes, out_shape, inds, device, interpolation='bilinear', binarize=True): """Crop and resize masks by the given bboxes. This function is mainly used in mask targets computation. It firstly align mask to bboxes by assigned_inds, then crop mask by the assigned bbox and resize to the size of (mask_h, mask_w) Args: bboxes (Tensor): Bboxes in format [x1, y1, x2, y2], shape (N, 4) out_shape (tuple[int]): Target (h, w) of resized mask inds (ndarray): Indexes to assign masks to each bbox, shape (N,) and values should be between [0, num_masks - 1]. device (str): Device of bboxes interpolation (str): See `mmcv.imresize` binarize (bool): if True fractional values are rounded to 0 or 1 after the resize operation. if False and unsupported an error will be raised. Defaults to True. Return: BaseInstanceMasks: the cropped and resized masks. """ @abstractmethod def expand(self, expanded_h, expanded_w, top, left): """see :class:`Expand`.""" @property @abstractmethod def areas(self): """ndarray: areas of each instance.""" @abstractmethod def to_ndarray(self): """Convert masks to the format of ndarray. Return: ndarray: Converted masks in the format of ndarray. """ @abstractmethod def to_tensor(self, dtype, device): """Convert masks to the format of Tensor. Args: dtype (str): Dtype of converted mask. device (torch.device): Device of converted masks. Returns: Tensor: Converted masks in the format of Tensor. """ @abstractmethod def translate(self, out_shape, offset, direction='horizontal', border_value=0, interpolation='bilinear'): """Translate the masks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). offset (int | float): The offset for translate. direction (str): The translate direction, either "horizontal" or "vertical". border_value (int | float): Border value. Default 0. interpolation (str): Same as :func:`mmcv.imtranslate`. Returns: Translated masks. """ def shear(self, out_shape, magnitude, direction='horizontal', border_value=0, interpolation='bilinear'): """Shear the masks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). magnitude (int | float): The magnitude used for shear. direction (str): The shear direction, either "horizontal" or "vertical". border_value (int | tuple[int]): Value used in case of a constant border. Default 0. interpolation (str): Same as in :func:`mmcv.imshear`. Returns: ndarray: Sheared masks. """ @abstractmethod def rotate(self, out_shape, angle, center=None, scale=1.0, border_value=0): """Rotate the masks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). angle (int | float): Rotation angle in degrees. Positive values mean counter-clockwise rotation. center (tuple[float], optional): Center point (w, h) of the rotation in source image. If not specified, the center of the image will be used. scale (int | float): Isotropic scale factor. border_value (int | float): Border value. Default 0 for masks. Returns: Rotated masks. """ def get_bboxes(self, dst_type='hbb'): """Get the certain type boxes from masks. Please refer to ``mmdet.structures.bbox.box_type`` for more details of the box type. Args: dst_type: Destination box type. Returns: :obj:`BaseBoxes`: Certain type boxes. """ from ..bbox import get_box_type _, box_type_cls = get_box_type(dst_type) return box_type_cls.from_instance_masks(self) @classmethod @abstractmethod def cat(cls: Type[T], masks: Sequence[T]) -> T: """Concatenate a sequence of masks into one single mask instance. Args: masks (Sequence[T]): A sequence of mask instances. Returns: T: Concatenated mask instance. """ class BitmapMasks(BaseInstanceMasks): """This class represents masks in the form of bitmaps. Args: masks (ndarray): ndarray of masks in shape (N, H, W), where N is the number of objects. height (int): height of masks width (int): width of masks Example: >>> from mmdet.data_elements.mask.structures import * # NOQA >>> num_masks, H, W = 3, 32, 32 >>> rng = np.random.RandomState(0) >>> masks = (rng.rand(num_masks, H, W) > 0.1).astype(np.int64) >>> self = BitmapMasks(masks, height=H, width=W) >>> # demo crop_and_resize >>> num_boxes = 5 >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes) >>> out_shape = (14, 14) >>> inds = torch.randint(0, len(self), size=(num_boxes,)) >>> device = 'cpu' >>> interpolation = 'bilinear' >>> new = self.crop_and_resize( ... bboxes, out_shape, inds, device, interpolation) >>> assert len(new) == num_boxes >>> assert new.height, new.width == out_shape """ def __init__(self, masks, height, width): self.height = height self.width = width if len(masks) == 0: self.masks = np.empty((0, self.height, self.width), dtype=np.uint8) else: assert isinstance(masks, (list, np.ndarray)) if isinstance(masks, list): assert isinstance(masks[0], np.ndarray) assert masks[0].ndim == 2 # (H, W) else: assert masks.ndim == 3 # (N, H, W) self.masks = np.stack(masks).reshape(-1, height, width) assert self.masks.shape[1] == self.height assert self.masks.shape[2] == self.width def __getitem__(self, index): """Index the BitmapMask. Args: index (int | ndarray): Indices in the format of integer or ndarray. Returns: :obj:`BitmapMasks`: Indexed bitmap masks. """ masks = self.masks[index].reshape(-1, self.height, self.width) return BitmapMasks(masks, self.height, self.width) def __iter__(self): return iter(self.masks) def __repr__(self): s = self.__class__.__name__ + '(' s += f'num_masks={len(self.masks)}, ' s += f'height={self.height}, ' s += f'width={self.width})' return s def __len__(self): """Number of masks.""" return len(self.masks) def rescale(self, scale, interpolation='nearest'): """See :func:`BaseInstanceMasks.rescale`.""" if len(self.masks) == 0: new_w, new_h = mmcv.rescale_size((self.width, self.height), scale) rescaled_masks = np.empty((0, new_h, new_w), dtype=np.uint8) else: rescaled_masks = np.stack([ mmcv.imrescale(mask, scale, interpolation=interpolation) for mask in self.masks ]) height, width = rescaled_masks.shape[1:] return BitmapMasks(rescaled_masks, height, width) def resize(self, out_shape, interpolation='nearest'): """See :func:`BaseInstanceMasks.resize`.""" if len(self.masks) == 0: resized_masks = np.empty((0, *out_shape), dtype=np.uint8) else: resized_masks = np.stack([ mmcv.imresize( mask, out_shape[::-1], interpolation=interpolation) for mask in self.masks ]) return BitmapMasks(resized_masks, *out_shape) def flip(self, flip_direction='horizontal'): """See :func:`BaseInstanceMasks.flip`.""" assert flip_direction in ('horizontal', 'vertical', 'diagonal') if len(self.masks) == 0: flipped_masks = self.masks else: flipped_masks = np.stack([ mmcv.imflip(mask, direction=flip_direction) for mask in self.masks ]) return BitmapMasks(flipped_masks, self.height, self.width) def pad(self, out_shape, pad_val=0): """See :func:`BaseInstanceMasks.pad`.""" if len(self.masks) == 0: padded_masks = np.empty((0, *out_shape), dtype=np.uint8) else: padded_masks = np.stack([ mmcv.impad(mask, shape=out_shape, pad_val=pad_val) for mask in self.masks ]) return BitmapMasks(padded_masks, *out_shape) def crop(self, bbox): """See :func:`BaseInstanceMasks.crop`.""" assert isinstance(bbox, np.ndarray) assert bbox.ndim == 1 # clip the boundary bbox = bbox.copy() bbox[0::2] = np.clip(bbox[0::2], 0, self.width) bbox[1::2] = np.clip(bbox[1::2], 0, self.height) x1, y1, x2, y2 = bbox w = np.maximum(x2 - x1, 1) h = np.maximum(y2 - y1, 1) if len(self.masks) == 0: cropped_masks = np.empty((0, h, w), dtype=np.uint8) else: cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w] return BitmapMasks(cropped_masks, h, w) def crop_and_resize(self, bboxes, out_shape, inds, device='cpu', interpolation='bilinear', binarize=True): """See :func:`BaseInstanceMasks.crop_and_resize`.""" if len(self.masks) == 0: empty_masks = np.empty((0, *out_shape), dtype=np.uint8) return BitmapMasks(empty_masks, *out_shape) # convert bboxes to tensor if isinstance(bboxes, np.ndarray): bboxes = torch.from_numpy(bboxes).to(device=device) if isinstance(inds, np.ndarray): inds = torch.from_numpy(inds).to(device=device) num_bbox = bboxes.shape[0] fake_inds = torch.arange( num_bbox, device=device).to(dtype=bboxes.dtype)[:, None] rois = torch.cat([fake_inds, bboxes], dim=1) # Nx5 rois = rois.to(device=device) if num_bbox > 0: gt_masks_th = torch.from_numpy(self.masks).to(device).index_select( 0, inds).to(dtype=rois.dtype) targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape, 1.0, 0, 'avg', True).squeeze(1) if binarize: resized_masks = (targets >= 0.5).cpu().numpy() else: resized_masks = targets.cpu().numpy() else: resized_masks = [] return BitmapMasks(resized_masks, *out_shape) def expand(self, expanded_h, expanded_w, top, left): """See :func:`BaseInstanceMasks.expand`.""" if len(self.masks) == 0: expanded_mask = np.empty((0, expanded_h, expanded_w), dtype=np.uint8) else: expanded_mask = np.zeros((len(self), expanded_h, expanded_w), dtype=np.uint8) expanded_mask[:, top:top + self.height, left:left + self.width] = self.masks return BitmapMasks(expanded_mask, expanded_h, expanded_w) def translate(self, out_shape, offset, direction='horizontal', border_value=0, interpolation='bilinear'): """Translate the BitmapMasks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). offset (int | float): The offset for translate. direction (str): The translate direction, either "horizontal" or "vertical". border_value (int | float): Border value. Default 0 for masks. interpolation (str): Same as :func:`mmcv.imtranslate`. Returns: BitmapMasks: Translated BitmapMasks. Example: >>> from mmdet.data_elements.mask.structures import BitmapMasks >>> self = BitmapMasks.random(dtype=np.uint8) >>> out_shape = (32, 32) >>> offset = 4 >>> direction = 'horizontal' >>> border_value = 0 >>> interpolation = 'bilinear' >>> # Note, There seem to be issues when: >>> # * the mask dtype is not supported by cv2.AffineWarp >>> new = self.translate(out_shape, offset, direction, >>> border_value, interpolation) >>> assert len(new) == len(self) >>> assert new.height, new.width == out_shape """ if len(self.masks) == 0: translated_masks = np.empty((0, *out_shape), dtype=np.uint8) else: masks = self.masks if masks.shape[-2:] != out_shape: empty_masks = np.zeros((masks.shape[0], *out_shape), dtype=masks.dtype) min_h = min(out_shape[0], masks.shape[1]) min_w = min(out_shape[1], masks.shape[2]) empty_masks[:, :min_h, :min_w] = masks[:, :min_h, :min_w] masks = empty_masks translated_masks = mmcv.imtranslate( masks.transpose((1, 2, 0)), offset, direction, border_value=border_value, interpolation=interpolation) if translated_masks.ndim == 2: translated_masks = translated_masks[:, :, None] translated_masks = translated_masks.transpose( (2, 0, 1)).astype(self.masks.dtype) return BitmapMasks(translated_masks, *out_shape) def shear(self, out_shape, magnitude, direction='horizontal', border_value=0, interpolation='bilinear'): """Shear the BitmapMasks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). magnitude (int | float): The magnitude used for shear. direction (str): The shear direction, either "horizontal" or "vertical". border_value (int | tuple[int]): Value used in case of a constant border. interpolation (str): Same as in :func:`mmcv.imshear`. Returns: BitmapMasks: The sheared masks. """ if len(self.masks) == 0: sheared_masks = np.empty((0, *out_shape), dtype=np.uint8) else: sheared_masks = mmcv.imshear( self.masks.transpose((1, 2, 0)), magnitude, direction, border_value=border_value, interpolation=interpolation) if sheared_masks.ndim == 2: sheared_masks = sheared_masks[:, :, None] sheared_masks = sheared_masks.transpose( (2, 0, 1)).astype(self.masks.dtype) return BitmapMasks(sheared_masks, *out_shape) def rotate(self, out_shape, angle, center=None, scale=1.0, border_value=0, interpolation='bilinear'): """Rotate the BitmapMasks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). angle (int | float): Rotation angle in degrees. Positive values mean counter-clockwise rotation. center (tuple[float], optional): Center point (w, h) of the rotation in source image. If not specified, the center of the image will be used. scale (int | float): Isotropic scale factor. border_value (int | float): Border value. Default 0 for masks. interpolation (str): Same as in :func:`mmcv.imrotate`. Returns: BitmapMasks: Rotated BitmapMasks. """ if len(self.masks) == 0: rotated_masks = np.empty((0, *out_shape), dtype=self.masks.dtype) else: rotated_masks = mmcv.imrotate( self.masks.transpose((1, 2, 0)), angle, center=center, scale=scale, border_value=border_value, interpolation=interpolation) if rotated_masks.ndim == 2: # case when only one mask, (h, w) rotated_masks = rotated_masks[:, :, None] # (h, w, 1) rotated_masks = rotated_masks.transpose( (2, 0, 1)).astype(self.masks.dtype) return BitmapMasks(rotated_masks, *out_shape) @property def areas(self): """See :py:attr:`BaseInstanceMasks.areas`.""" return self.masks.sum((1, 2)) def to_ndarray(self): """See :func:`BaseInstanceMasks.to_ndarray`.""" return self.masks def to_tensor(self, dtype, device): """See :func:`BaseInstanceMasks.to_tensor`.""" return torch.tensor(self.masks, dtype=dtype, device=device) @classmethod def random(cls, num_masks=3, height=32, width=32, dtype=np.uint8, rng=None): """Generate random bitmap masks for demo / testing purposes. Example: >>> from mmdet.data_elements.mask.structures import BitmapMasks >>> self = BitmapMasks.random() >>> print('self = {}'.format(self)) self = BitmapMasks(num_masks=3, height=32, width=32) """ from mmdet.utils.util_random import ensure_rng rng = ensure_rng(rng) masks = (rng.rand(num_masks, height, width) > 0.1).astype(dtype) self = cls(masks, height=height, width=width) return self @classmethod def cat(cls: Type[T], masks: Sequence[T]) -> T: """Concatenate a sequence of masks into one single mask instance. Args: masks (Sequence[BitmapMasks]): A sequence of mask instances. Returns: BitmapMasks: Concatenated mask instance. """ assert isinstance(masks, Sequence) if len(masks) == 0: raise ValueError('masks should not be an empty list.') assert all(isinstance(m, cls) for m in masks) mask_array = np.concatenate([m.masks for m in masks], axis=0) return cls(mask_array, *mask_array.shape[1:]) class PolygonMasks(BaseInstanceMasks): """This class represents masks in the form of polygons. Polygons is a list of three levels. The first level of the list corresponds to objects, the second level to the polys that compose the object, the third level to the poly coordinates Args: masks (list[list[ndarray]]): The first level of the list corresponds to objects, the second level to the polys that compose the object, the third level to the poly coordinates height (int): height of masks width (int): width of masks Example: >>> from mmdet.data_elements.mask.structures import * # NOQA >>> masks = [ >>> [ np.array([0, 0, 10, 0, 10, 10., 0, 10, 0, 0]) ] >>> ] >>> height, width = 16, 16 >>> self = PolygonMasks(masks, height, width) >>> # demo translate >>> new = self.translate((16, 16), 4., direction='horizontal') >>> assert np.all(new.masks[0][0][1::2] == masks[0][0][1::2]) >>> assert np.all(new.masks[0][0][0::2] == masks[0][0][0::2] + 4) >>> # demo crop_and_resize >>> num_boxes = 3 >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes) >>> out_shape = (16, 16) >>> inds = torch.randint(0, len(self), size=(num_boxes,)) >>> device = 'cpu' >>> interpolation = 'bilinear' >>> new = self.crop_and_resize( ... bboxes, out_shape, inds, device, interpolation) >>> assert len(new) == num_boxes >>> assert new.height, new.width == out_shape """ def __init__(self, masks, height, width): assert isinstance(masks, list) if len(masks) > 0: assert isinstance(masks[0], list) assert isinstance(masks[0][0], np.ndarray) self.height = height self.width = width self.masks = masks def __getitem__(self, index): """Index the polygon masks. Args: index (ndarray | List): The indices. Returns: :obj:`PolygonMasks`: The indexed polygon masks. """ if isinstance(index, np.ndarray): if index.dtype == bool: index = np.where(index)[0].tolist() else: index = index.tolist() if isinstance(index, list): masks = [self.masks[i] for i in index] else: try: masks = self.masks[index] except Exception: raise ValueError( f'Unsupported input of type {type(index)} for indexing!') if len(masks) and isinstance(masks[0], np.ndarray): masks = [masks] # ensure a list of three levels return PolygonMasks(masks, self.height, self.width) def __iter__(self): return iter(self.masks) def __repr__(self): s = self.__class__.__name__ + '(' s += f'num_masks={len(self.masks)}, ' s += f'height={self.height}, ' s += f'width={self.width})' return s def __len__(self): """Number of masks.""" return len(self.masks) def rescale(self, scale, interpolation=None): """see :func:`BaseInstanceMasks.rescale`""" new_w, new_h = mmcv.rescale_size((self.width, self.height), scale) if len(self.masks) == 0: rescaled_masks = PolygonMasks([], new_h, new_w) else: rescaled_masks = self.resize((new_h, new_w)) return rescaled_masks def resize(self, out_shape, interpolation=None): """see :func:`BaseInstanceMasks.resize`""" if len(self.masks) == 0: resized_masks = PolygonMasks([], *out_shape) else: h_scale = out_shape[0] / self.height w_scale = out_shape[1] / self.width resized_masks = [] for poly_per_obj in self.masks: resized_poly = [] for p in poly_per_obj: p = p.copy() p[0::2] = p[0::2] * w_scale p[1::2] = p[1::2] * h_scale resized_poly.append(p) resized_masks.append(resized_poly) resized_masks = PolygonMasks(resized_masks, *out_shape) return resized_masks def flip(self, flip_direction='horizontal'): """see :func:`BaseInstanceMasks.flip`""" assert flip_direction in ('horizontal', 'vertical', 'diagonal') if len(self.masks) == 0: flipped_masks = PolygonMasks([], self.height, self.width) else: flipped_masks = [] for poly_per_obj in self.masks: flipped_poly_per_obj = [] for p in poly_per_obj: p = p.copy() if flip_direction == 'horizontal': p[0::2] = self.width - p[0::2] elif flip_direction == 'vertical': p[1::2] = self.height - p[1::2] else: p[0::2] = self.width - p[0::2] p[1::2] = self.height - p[1::2] flipped_poly_per_obj.append(p) flipped_masks.append(flipped_poly_per_obj) flipped_masks = PolygonMasks(flipped_masks, self.height, self.width) return flipped_masks def crop(self, bbox): """see :func:`BaseInstanceMasks.crop`""" assert isinstance(bbox, np.ndarray) assert bbox.ndim == 1 # clip the boundary bbox = bbox.copy() bbox[0::2] = np.clip(bbox[0::2], 0, self.width) bbox[1::2] = np.clip(bbox[1::2], 0, self.height) x1, y1, x2, y2 = bbox w = np.maximum(x2 - x1, 1) h = np.maximum(y2 - y1, 1) if len(self.masks) == 0: cropped_masks = PolygonMasks([], h, w) else: # reference: https://github.com/facebookresearch/fvcore/blob/main/fvcore/transforms/transform.py # noqa crop_box = geometry.box(x1, y1, x2, y2).buffer(0.0) cropped_masks = [] # suppress shapely warnings util it incorporates GEOS>=3.11.2 # reference: https://github.com/shapely/shapely/issues/1345 initial_settings = np.seterr() np.seterr(invalid='ignore') for poly_per_obj in self.masks: cropped_poly_per_obj = [] for p in poly_per_obj: p = p.copy() p = geometry.Polygon(p.reshape(-1, 2)).buffer(0.0) # polygon must be valid to perform intersection. if not p.is_valid: continue cropped = p.intersection(crop_box) if cropped.is_empty: continue if isinstance(cropped, geometry.collection.BaseMultipartGeometry): cropped = cropped.geoms else: cropped = [cropped] # one polygon may be cropped to multiple ones for poly in cropped: # ignore lines or points if not isinstance( poly, geometry.Polygon) or not poly.is_valid: continue coords = np.asarray(poly.exterior.coords) # remove an extra identical vertex at the end coords = coords[:-1] coords[:, 0] -= x1 coords[:, 1] -= y1 cropped_poly_per_obj.append(coords.reshape(-1)) # a dummy polygon to avoid misalignment between masks and boxes if len(cropped_poly_per_obj) == 0: cropped_poly_per_obj = [np.array([0, 0, 0, 0, 0, 0])] cropped_masks.append(cropped_poly_per_obj) np.seterr(**initial_settings) cropped_masks = PolygonMasks(cropped_masks, h, w) return cropped_masks def pad(self, out_shape, pad_val=0): """padding has no effect on polygons`""" return PolygonMasks(self.masks, *out_shape) def expand(self, *args, **kwargs): """TODO: Add expand for polygon""" raise NotImplementedError def crop_and_resize(self, bboxes, out_shape, inds, device='cpu', interpolation='bilinear', binarize=True): """see :func:`BaseInstanceMasks.crop_and_resize`""" out_h, out_w = out_shape if len(self.masks) == 0: return PolygonMasks([], out_h, out_w) if not binarize: raise ValueError('Polygons are always binary, ' 'setting binarize=False is unsupported') resized_masks = [] for i in range(len(bboxes)): mask = self.masks[inds[i]] bbox = bboxes[i, :] x1, y1, x2, y2 = bbox w = np.maximum(x2 - x1, 1) h = np.maximum(y2 - y1, 1) h_scale = out_h / max(h, 0.1) # avoid too large scale w_scale = out_w / max(w, 0.1) resized_mask = [] for p in mask: p = p.copy() # crop # pycocotools will clip the boundary p[0::2] = p[0::2] - bbox[0] p[1::2] = p[1::2] - bbox[1] # resize p[0::2] = p[0::2] * w_scale p[1::2] = p[1::2] * h_scale resized_mask.append(p) resized_masks.append(resized_mask) return PolygonMasks(resized_masks, *out_shape) def translate(self, out_shape, offset, direction='horizontal', border_value=None, interpolation=None): """Translate the PolygonMasks. Example: >>> self = PolygonMasks.random(dtype=np.int64) >>> out_shape = (self.height, self.width) >>> new = self.translate(out_shape, 4., direction='horizontal') >>> assert np.all(new.masks[0][0][1::2] == self.masks[0][0][1::2]) >>> assert np.all(new.masks[0][0][0::2] == self.masks[0][0][0::2] + 4) # noqa: E501 """ assert border_value is None or border_value == 0, \ 'Here border_value is not '\ f'used, and defaultly should be None or 0. got {border_value}.' if len(self.masks) == 0: translated_masks = PolygonMasks([], *out_shape) else: translated_masks = [] for poly_per_obj in self.masks: translated_poly_per_obj = [] for p in poly_per_obj: p = p.copy() if direction == 'horizontal': p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1]) elif direction == 'vertical': p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0]) translated_poly_per_obj.append(p) translated_masks.append(translated_poly_per_obj) translated_masks = PolygonMasks(translated_masks, *out_shape) return translated_masks def shear(self, out_shape, magnitude, direction='horizontal', border_value=0, interpolation='bilinear'): """See :func:`BaseInstanceMasks.shear`.""" if len(self.masks) == 0: sheared_masks = PolygonMasks([], *out_shape) else: sheared_masks = [] if direction == 'horizontal': shear_matrix = np.stack([[1, magnitude], [0, 1]]).astype(np.float32) elif direction == 'vertical': shear_matrix = np.stack([[1, 0], [magnitude, 1]]).astype(np.float32) for poly_per_obj in self.masks: sheared_poly = [] for p in poly_per_obj: p = np.stack([p[0::2], p[1::2]], axis=0) # [2, n] new_coords = np.matmul(shear_matrix, p) # [2, n] new_coords[0, :] = np.clip(new_coords[0, :], 0, out_shape[1]) new_coords[1, :] = np.clip(new_coords[1, :], 0, out_shape[0]) sheared_poly.append( new_coords.transpose((1, 0)).reshape(-1)) sheared_masks.append(sheared_poly) sheared_masks = PolygonMasks(sheared_masks, *out_shape) return sheared_masks def rotate(self, out_shape, angle, center=None, scale=1.0, border_value=0, interpolation='bilinear'): """See :func:`BaseInstanceMasks.rotate`.""" if len(self.masks) == 0: rotated_masks = PolygonMasks([], *out_shape) else: rotated_masks = [] rotate_matrix = cv2.getRotationMatrix2D(center, -angle, scale) for poly_per_obj in self.masks: rotated_poly = [] for p in poly_per_obj: p = p.copy() coords = np.stack([p[0::2], p[1::2]], axis=1) # [n, 2] # pad 1 to convert from format [x, y] to homogeneous # coordinates format [x, y, 1] coords = np.concatenate( (coords, np.ones((coords.shape[0], 1), coords.dtype)), axis=1) # [n, 3] rotated_coords = np.matmul( rotate_matrix[None, :, :], coords[:, :, None])[..., 0] # [n, 2, 1] -> [n, 2] rotated_coords[:, 0] = np.clip(rotated_coords[:, 0], 0, out_shape[1]) rotated_coords[:, 1] = np.clip(rotated_coords[:, 1], 0, out_shape[0]) rotated_poly.append(rotated_coords.reshape(-1)) rotated_masks.append(rotated_poly) rotated_masks = PolygonMasks(rotated_masks, *out_shape) return rotated_masks def to_bitmap(self): """convert polygon masks to bitmap masks.""" bitmap_masks = self.to_ndarray() return BitmapMasks(bitmap_masks, self.height, self.width) @property def areas(self): """Compute areas of masks. This func is modified from `detectron2 <https://github.com/facebookresearch/detectron2/blob/ffff8acc35ea88ad1cb1806ab0f00b4c1c5dbfd9/detectron2/structures/masks.py#L387>`_. The function only works with Polygons using the shoelace formula. Return: ndarray: areas of each instance """ # noqa: W501 area = [] for polygons_per_obj in self.masks: area_per_obj = 0 for p in polygons_per_obj: area_per_obj += self._polygon_area(p[0::2], p[1::2]) area.append(area_per_obj) return np.asarray(area) def _polygon_area(self, x, y): """Compute the area of a component of a polygon. Using the shoelace formula: https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates Args: x (ndarray): x coordinates of the component y (ndarray): y coordinates of the component Return: float: the are of the component """ # noqa: 501 return 0.5 * np.abs( np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) def to_ndarray(self): """Convert masks to the format of ndarray.""" if len(self.masks) == 0: return np.empty((0, self.height, self.width), dtype=np.uint8) bitmap_masks = [] for poly_per_obj in self.masks: bitmap_masks.append( polygon_to_bitmap(poly_per_obj, self.height, self.width)) return np.stack(bitmap_masks) def to_tensor(self, dtype, device): """See :func:`BaseInstanceMasks.to_tensor`.""" if len(self.masks) == 0: return torch.empty((0, self.height, self.width), dtype=dtype, device=device) ndarray_masks = self.to_ndarray() return torch.tensor(ndarray_masks, dtype=dtype, device=device) @classmethod def random(cls, num_masks=3, height=32, width=32, n_verts=5, dtype=np.float32, rng=None): """Generate random polygon masks for demo / testing purposes. Adapted from [1]_ References: .. [1] https://gitlab.kitware.com/computer-vision/kwimage/-/blob/928cae35ca8/kwimage/structs/polygon.py#L379 # noqa: E501 Example: >>> from mmdet.data_elements.mask.structures import PolygonMasks >>> self = PolygonMasks.random() >>> print('self = {}'.format(self)) """ from mmdet.utils.util_random import ensure_rng rng = ensure_rng(rng) def _gen_polygon(n, irregularity, spikeyness): """Creates the polygon by sampling points on a circle around the centre. Random noise is added by varying the angular spacing between sequential points, and by varying the radial distance of each point from the centre. Based on original code by Mike Ounsworth Args: n (int): number of vertices irregularity (float): [0,1] indicating how much variance there is in the angular spacing of vertices. [0,1] will map to [0, 2pi/numberOfVerts] spikeyness (float): [0,1] indicating how much variance there is in each vertex from the circle of radius aveRadius. [0,1] will map to [0, aveRadius] Returns: a list of vertices, in CCW order. """ from scipy.stats import truncnorm # Generate around the unit circle cx, cy = (0.0, 0.0) radius = 1 tau = np.pi * 2 irregularity = np.clip(irregularity, 0, 1) * 2 * np.pi / n spikeyness = np.clip(spikeyness, 1e-9, 1) # generate n angle steps lower = (tau / n) - irregularity upper = (tau / n) + irregularity angle_steps = rng.uniform(lower, upper, n) # normalize the steps so that point 0 and point n+1 are the same k = angle_steps.sum() / (2 * np.pi) angles = (angle_steps / k).cumsum() + rng.uniform(0, tau) # Convert high and low values to be wrt the standard normal range # https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.truncnorm.html low = 0 high = 2 * radius mean = radius std = spikeyness a = (low - mean) / std b = (high - mean) / std tnorm = truncnorm(a=a, b=b, loc=mean, scale=std) # now generate the points radii = tnorm.rvs(n, random_state=rng) x_pts = cx + radii * np.cos(angles) y_pts = cy + radii * np.sin(angles) points = np.hstack([x_pts[:, None], y_pts[:, None]]) # Scale to 0-1 space points = points - points.min(axis=0) points = points / points.max(axis=0) # Randomly place within 0-1 space points = points * (rng.rand() * .8 + .2) min_pt = points.min(axis=0) max_pt = points.max(axis=0) high = (1 - max_pt) low = (0 - min_pt) offset = (rng.rand(2) * (high - low)) + low points = points + offset return points def _order_vertices(verts): """ References: https://stackoverflow.com/questions/1709283/how-can-i-sort-a-coordinate-list-for-a-rectangle-counterclockwise """ mlat = verts.T[0].sum() / len(verts) mlng = verts.T[1].sum() / len(verts) tau = np.pi * 2 angle = (np.arctan2(mlat - verts.T[0], verts.T[1] - mlng) + tau) % tau sortx = angle.argsort() verts = verts.take(sortx, axis=0) return verts # Generate a random exterior for each requested mask masks = [] for _ in range(num_masks): exterior = _order_vertices(_gen_polygon(n_verts, 0.9, 0.9)) exterior = (exterior * [(width, height)]).astype(dtype) masks.append([exterior.ravel()]) self = cls(masks, height, width) return self @classmethod def cat(cls: Type[T], masks: Sequence[T]) -> T: """Concatenate a sequence of masks into one single mask instance. Args: masks (Sequence[PolygonMasks]): A sequence of mask instances. Returns: PolygonMasks: Concatenated mask instance. """ assert isinstance(masks, Sequence) if len(masks) == 0: raise ValueError('masks should not be an empty list.') assert all(isinstance(m, cls) for m in masks) mask_list = list(itertools.chain(*[m.masks for m in masks])) return cls(mask_list, masks[0].height, masks[0].width) def polygon_to_bitmap(polygons, height, width): """Convert masks from the form of polygons to bitmaps. Args: polygons (list[ndarray]): masks in polygon representation height (int): mask height width (int): mask width Return: ndarray: the converted masks in bitmap representation """ rles = maskUtils.frPyObjects(polygons, height, width) rle = maskUtils.merge(rles) bitmap_mask = maskUtils.decode(rle).astype(bool) return bitmap_mask def bitmap_to_polygon(bitmap): """Convert masks from the form of bitmaps to polygons. Args: bitmap (ndarray): masks in bitmap representation. Return: list[ndarray]: the converted mask in polygon representation. bool: whether the mask has holes. """ bitmap = np.ascontiguousarray(bitmap).astype(np.uint8) # cv2.RETR_CCOMP: retrieves all of the contours and organizes them # into a two-level hierarchy. At the top level, there are external # boundaries of the components. At the second level, there are # boundaries of the holes. If there is another contour inside a hole # of a connected component, it is still put at the top level. # cv2.CHAIN_APPROX_NONE: stores absolutely all the contour points. outs = cv2.findContours(bitmap, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) contours = outs[-2] hierarchy = outs[-1] if hierarchy is None: return [], False # hierarchy[i]: 4 elements, for the indexes of next, previous, # parent, or nested contours. If there is no corresponding contour, # it will be -1. with_hole = (hierarchy.reshape(-1, 4)[:, 3] >= 0).any() contours = [c.reshape(-1, 2) for c in contours] return contours, with_hole
44,891
36.59799
141
py
ERD
ERD-main/mmdet/structures/mask/mask_target.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from torch.nn.modules.utils import _pair def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, cfg): """Compute mask target for positive proposals in multiple images. Args: pos_proposals_list (list[Tensor]): Positive proposals in multiple images, each has shape (num_pos, 4). pos_assigned_gt_inds_list (list[Tensor]): Assigned GT indices for each positive proposals, each has shape (num_pos,). gt_masks_list (list[:obj:`BaseInstanceMasks`]): Ground truth masks of each image. cfg (dict): Config dict that specifies the mask size. Returns: Tensor: Mask target of each image, has shape (num_pos, w, h). Example: >>> from mmengine.config import Config >>> import mmdet >>> from mmdet.data_elements.mask import BitmapMasks >>> from mmdet.data_elements.mask.mask_target import * >>> H, W = 17, 18 >>> cfg = Config({'mask_size': (13, 14)}) >>> rng = np.random.RandomState(0) >>> # Positive proposals (tl_x, tl_y, br_x, br_y) for each image >>> pos_proposals_list = [ >>> torch.Tensor([ >>> [ 7.2425, 5.5929, 13.9414, 14.9541], >>> [ 7.3241, 3.6170, 16.3850, 15.3102], >>> ]), >>> torch.Tensor([ >>> [ 4.8448, 6.4010, 7.0314, 9.7681], >>> [ 5.9790, 2.6989, 7.4416, 4.8580], >>> [ 0.0000, 0.0000, 0.1398, 9.8232], >>> ]), >>> ] >>> # Corresponding class index for each proposal for each image >>> pos_assigned_gt_inds_list = [ >>> torch.LongTensor([7, 0]), >>> torch.LongTensor([5, 4, 1]), >>> ] >>> # Ground truth mask for each true object for each image >>> gt_masks_list = [ >>> BitmapMasks(rng.rand(8, H, W), height=H, width=W), >>> BitmapMasks(rng.rand(6, H, W), height=H, width=W), >>> ] >>> mask_targets = mask_target( >>> pos_proposals_list, pos_assigned_gt_inds_list, >>> gt_masks_list, cfg) >>> assert mask_targets.shape == (5,) + cfg['mask_size'] """ cfg_list = [cfg for _ in range(len(pos_proposals_list))] mask_targets = map(mask_target_single, pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, cfg_list) mask_targets = list(mask_targets) if len(mask_targets) > 0: mask_targets = torch.cat(mask_targets) return mask_targets def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg): """Compute mask target for each positive proposal in the image. Args: pos_proposals (Tensor): Positive proposals. pos_assigned_gt_inds (Tensor): Assigned GT inds of positive proposals. gt_masks (:obj:`BaseInstanceMasks`): GT masks in the format of Bitmap or Polygon. cfg (dict): Config dict that indicate the mask size. Returns: Tensor: Mask target of each positive proposals in the image. Example: >>> from mmengine.config import Config >>> import mmdet >>> from mmdet.data_elements.mask import BitmapMasks >>> from mmdet.data_elements.mask.mask_target import * # NOQA >>> H, W = 32, 32 >>> cfg = Config({'mask_size': (7, 11)}) >>> rng = np.random.RandomState(0) >>> # Masks for each ground truth box (relative to the image) >>> gt_masks_data = rng.rand(3, H, W) >>> gt_masks = BitmapMasks(gt_masks_data, height=H, width=W) >>> # Predicted positive boxes in one image >>> pos_proposals = torch.FloatTensor([ >>> [ 16.2, 5.5, 19.9, 20.9], >>> [ 17.3, 13.6, 19.3, 19.3], >>> [ 14.8, 16.4, 17.0, 23.7], >>> [ 0.0, 0.0, 16.0, 16.0], >>> [ 4.0, 0.0, 20.0, 16.0], >>> ]) >>> # For each predicted proposal, its assignment to a gt mask >>> pos_assigned_gt_inds = torch.LongTensor([0, 1, 2, 1, 1]) >>> mask_targets = mask_target_single( >>> pos_proposals, pos_assigned_gt_inds, gt_masks, cfg) >>> assert mask_targets.shape == (5,) + cfg['mask_size'] """ device = pos_proposals.device mask_size = _pair(cfg.mask_size) binarize = not cfg.get('soft_mask_target', False) num_pos = pos_proposals.size(0) if num_pos > 0: proposals_np = pos_proposals.cpu().numpy() maxh, maxw = gt_masks.height, gt_masks.width proposals_np[:, [0, 2]] = np.clip(proposals_np[:, [0, 2]], 0, maxw) proposals_np[:, [1, 3]] = np.clip(proposals_np[:, [1, 3]], 0, maxh) pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() mask_targets = gt_masks.crop_and_resize( proposals_np, mask_size, device=device, inds=pos_assigned_gt_inds, binarize=binarize).to_ndarray() mask_targets = torch.from_numpy(mask_targets).float().to(device) else: mask_targets = pos_proposals.new_zeros((0, ) + mask_size) return mask_targets
5,264
40.132813
78
py
ERD
ERD-main/mmdet/structures/mask/utils.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import pycocotools.mask as mask_util import torch from mmengine.utils import slice_list def split_combined_polys(polys, poly_lens, polys_per_mask): """Split the combined 1-D polys into masks. A mask is represented as a list of polys, and a poly is represented as a 1-D array. In dataset, all masks are concatenated into a single 1-D tensor. Here we need to split the tensor into original representations. Args: polys (list): a list (length = image num) of 1-D tensors poly_lens (list): a list (length = image num) of poly length polys_per_mask (list): a list (length = image num) of poly number of each mask Returns: list: a list (length = image num) of list (length = mask num) of \ list (length = poly num) of numpy array. """ mask_polys_list = [] for img_id in range(len(polys)): polys_single = polys[img_id] polys_lens_single = poly_lens[img_id].tolist() polys_per_mask_single = polys_per_mask[img_id].tolist() split_polys = slice_list(polys_single, polys_lens_single) mask_polys = slice_list(split_polys, polys_per_mask_single) mask_polys_list.append(mask_polys) return mask_polys_list # TODO: move this function to more proper place def encode_mask_results(mask_results): """Encode bitmap mask to RLE code. Args: mask_results (list): bitmap mask results. Returns: list | tuple: RLE encoded mask. """ encoded_mask_results = [] for mask in mask_results: encoded_mask_results.append( mask_util.encode( np.array(mask[:, :, np.newaxis], order='F', dtype='uint8'))[0]) # encoded with RLE return encoded_mask_results def mask2bbox(masks): """Obtain tight bounding boxes of binary masks. Args: masks (Tensor): Binary mask of shape (n, h, w). Returns: Tensor: Bboxe with shape (n, 4) of \ positive region in binary mask. """ N = masks.shape[0] bboxes = masks.new_zeros((N, 4), dtype=torch.float32) x_any = torch.any(masks, dim=1) y_any = torch.any(masks, dim=2) for i in range(N): x = torch.where(x_any[i, :])[0] y = torch.where(y_any[i, :])[0] if len(x) > 0 and len(y) > 0: bboxes[i, :] = bboxes.new_tensor( [x[0], y[0], x[-1] + 1, y[-1] + 1]) return bboxes
2,508
31.166667
75
py
ERD
ERD-main/mmdet/structures/bbox/box_type.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Callable, Optional, Tuple, Type, Union import numpy as np import torch from torch import Tensor from .base_boxes import BaseBoxes BoxType = Union[np.ndarray, Tensor, BaseBoxes] box_types: dict = {} _box_type_to_name: dict = {} box_converters: dict = {} def _register_box(name: str, box_type: Type, force: bool = False) -> None: """Register a box type. Args: name (str): The name of box type. box_type (type): Box mode class to be registered. force (bool): Whether to override an existing class with the same name. Defaults to False. """ assert issubclass(box_type, BaseBoxes) name = name.lower() if not force and (name in box_types or box_type in _box_type_to_name): raise KeyError(f'box type {name} has been registered') elif name in box_types: _box_type = box_types.pop(name) _box_type_to_name.pop(_box_type) elif box_type in _box_type_to_name: _name = _box_type_to_name.pop(box_type) box_types.pop(_name) box_types[name] = box_type _box_type_to_name[box_type] = name def register_box(name: str, box_type: Type = None, force: bool = False) -> Union[Type, Callable]: """Register a box type. A record will be added to ``bbox_types``, whose key is the box type name and value is the box type itself. Simultaneously, a reverse dictionary ``_box_type_to_name`` will be updated. It can be used as a decorator or a normal function. Args: name (str): The name of box type. bbox_type (type, Optional): Box type class to be registered. Defaults to None. force (bool): Whether to override the existing box type with the same name. Defaults to False. Examples: >>> from mmdet.structures.bbox import register_box >>> from mmdet.structures.bbox import BaseBoxes >>> # as a decorator >>> @register_box('hbox') >>> class HorizontalBoxes(BaseBoxes): >>> pass >>> # as a normal function >>> class RotatedBoxes(BaseBoxes): >>> pass >>> register_box('rbox', RotatedBoxes) """ if not isinstance(force, bool): raise TypeError(f'force must be a boolean, but got {type(force)}') # use it as a normal method: register_box(name, box_type=BoxCls) if box_type is not None: _register_box(name=name, box_type=box_type, force=force) return box_type # use it as a decorator: @register_box(name) def _register(cls): _register_box(name=name, box_type=cls, force=force) return cls return _register def _register_box_converter(src_type: Union[str, type], dst_type: Union[str, type], converter: Callable, force: bool = False) -> None: """Register a box converter. Args: src_type (str or type): source box type name or class. dst_type (str or type): destination box type name or class. converter (Callable): Convert function. force (bool): Whether to override the existing box type with the same name. Defaults to False. """ assert callable(converter) src_type_name, _ = get_box_type(src_type) dst_type_name, _ = get_box_type(dst_type) converter_name = src_type_name + '2' + dst_type_name if not force and converter_name in box_converters: raise KeyError(f'The box converter from {src_type_name} to ' f'{dst_type_name} has been registered.') box_converters[converter_name] = converter def register_box_converter(src_type: Union[str, type], dst_type: Union[str, type], converter: Optional[Callable] = None, force: bool = False) -> Callable: """Register a box converter. A record will be added to ``box_converter``, whose key is '{src_type_name}2{dst_type_name}' and value is the convert function. It can be used as a decorator or a normal function. Args: src_type (str or type): source box type name or class. dst_type (str or type): destination box type name or class. converter (Callable): Convert function. Defaults to None. force (bool): Whether to override the existing box type with the same name. Defaults to False. Examples: >>> from mmdet.structures.bbox import register_box_converter >>> # as a decorator >>> @register_box_converter('hbox', 'rbox') >>> def converter_A(boxes): >>> pass >>> # as a normal function >>> def converter_B(boxes): >>> pass >>> register_box_converter('rbox', 'hbox', converter_B) """ if not isinstance(force, bool): raise TypeError(f'force must be a boolean, but got {type(force)}') # use it as a normal method: # register_box_converter(src_type, dst_type, converter=Func) if converter is not None: _register_box_converter( src_type=src_type, dst_type=dst_type, converter=converter, force=force) return converter # use it as a decorator: @register_box_converter(name) def _register(func): _register_box_converter( src_type=src_type, dst_type=dst_type, converter=func, force=force) return func return _register def get_box_type(box_type: Union[str, type]) -> Tuple[str, type]: """get both box type name and class. Args: box_type (str or type): Single box type name or class. Returns: Tuple[str, type]: A tuple of box type name and class. """ if isinstance(box_type, str): type_name = box_type.lower() assert type_name in box_types, \ f"Box type {type_name} hasn't been registered in box_types." type_cls = box_types[type_name] elif issubclass(box_type, BaseBoxes): assert box_type in _box_type_to_name, \ f"Box type {box_type} hasn't been registered in box_types." type_name = _box_type_to_name[box_type] type_cls = box_type else: raise KeyError('box_type must be a str or class inheriting from ' f'BaseBoxes, but got {type(box_type)}.') return type_name, type_cls def convert_box_type(boxes: BoxType, *, src_type: Union[str, type] = None, dst_type: Union[str, type] = None) -> BoxType: """Convert boxes from source type to destination type. If ``boxes`` is a instance of BaseBoxes, the ``src_type`` will be set as the type of ``boxes``. Args: boxes (np.ndarray or Tensor or :obj:`BaseBoxes`): boxes need to convert. src_type (str or type, Optional): source box type. Defaults to None. dst_type (str or type, Optional): destination box type. Defaults to None. Returns: Union[np.ndarray, Tensor, :obj:`BaseBoxes`]: Converted boxes. It's type is consistent with the input's type. """ assert dst_type is not None dst_type_name, dst_type_cls = get_box_type(dst_type) is_box_cls = False is_numpy = False if isinstance(boxes, BaseBoxes): src_type_name, _ = get_box_type(type(boxes)) is_box_cls = True elif isinstance(boxes, (Tensor, np.ndarray)): assert src_type is not None src_type_name, _ = get_box_type(src_type) if isinstance(boxes, np.ndarray): is_numpy = True else: raise TypeError('boxes must be a instance of BaseBoxes, Tensor or ' f'ndarray, but get {type(boxes)}.') if src_type_name == dst_type_name: return boxes converter_name = src_type_name + '2' + dst_type_name assert converter_name in box_converters, \ "Convert function hasn't been registered in box_converters." converter = box_converters[converter_name] if is_box_cls: boxes = converter(boxes.tensor) return dst_type_cls(boxes) elif is_numpy: boxes = converter(torch.from_numpy(boxes)) return boxes.numpy() else: return converter(boxes) def autocast_box_type(dst_box_type='hbox') -> Callable: """A decorator which automatically casts results['gt_bboxes'] to the destination box type. It commenly used in mmdet.datasets.transforms to make the transforms up- compatible with the np.ndarray type of results['gt_bboxes']. The speed of processing of np.ndarray and BaseBoxes data are the same: - np.ndarray: 0.0509 img/s - BaseBoxes: 0.0551 img/s Args: dst_box_type (str): Destination box type. """ _, box_type_cls = get_box_type(dst_box_type) def decorator(func: Callable) -> Callable: def wrapper(self, results: dict, *args, **kwargs) -> dict: if ('gt_bboxes' not in results or isinstance(results['gt_bboxes'], BaseBoxes)): return func(self, results) elif isinstance(results['gt_bboxes'], np.ndarray): results['gt_bboxes'] = box_type_cls( results['gt_bboxes'], clone=False) if 'mix_results' in results: for res in results['mix_results']: if isinstance(res['gt_bboxes'], np.ndarray): res['gt_bboxes'] = box_type_cls( res['gt_bboxes'], clone=False) _results = func(self, results, *args, **kwargs) # In some cases, the function will process gt_bboxes in-place # Simultaneously convert inputting and outputting gt_bboxes # back to np.ndarray if isinstance(_results, dict) and 'gt_bboxes' in _results: if isinstance(_results['gt_bboxes'], BaseBoxes): _results['gt_bboxes'] = _results['gt_bboxes'].numpy() if isinstance(results['gt_bboxes'], BaseBoxes): results['gt_bboxes'] = results['gt_bboxes'].numpy() return _results else: raise TypeError( "auto_box_type requires results['gt_bboxes'] to " 'be BaseBoxes or np.ndarray, but got ' f"{type(results['gt_bboxes'])}") return wrapper return decorator
10,566
34.579125
79
py
ERD
ERD-main/mmdet/structures/bbox/horizontal_boxes.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Tuple, TypeVar, Union import cv2 import numpy as np import torch from torch import BoolTensor, Tensor from mmdet.structures.mask.structures import BitmapMasks, PolygonMasks from .base_boxes import BaseBoxes from .bbox_overlaps import bbox_overlaps from .box_type import register_box T = TypeVar('T') DeviceType = Union[str, torch.device] MaskType = Union[BitmapMasks, PolygonMasks] @register_box(name='hbox') class HorizontalBoxes(BaseBoxes): """The horizontal box class used in MMDetection by default. The ``box_dim`` of ``HorizontalBoxes`` is 4, which means the length of the last dimension of the data should be 4. Two modes of box data are supported in ``HorizontalBoxes``: - 'xyxy': Each row of data indicates (x1, y1, x2, y2), which are the coordinates of the left-top and right-bottom points. - 'cxcywh': Each row of data indicates (x, y, w, h), where (x, y) are the coordinates of the box centers and (w, h) are the width and height. ``HorizontalBoxes`` only restores 'xyxy' mode of data. If the the data is in 'cxcywh' mode, users need to input ``in_mode='cxcywh'`` and The code will convert the 'cxcywh' data to 'xyxy' automatically. Args: data (Tensor or np.ndarray or Sequence): The box data with shape of (..., 4). dtype (torch.dtype, Optional): data type of boxes. Defaults to None. device (str or torch.device, Optional): device of boxes. Default to None. clone (bool): Whether clone ``boxes`` or not. Defaults to True. mode (str, Optional): the mode of boxes. If it is 'cxcywh', the `data` will be converted to 'xyxy' mode. Defaults to None. """ box_dim: int = 4 def __init__(self, data: Union[Tensor, np.ndarray], dtype: torch.dtype = None, device: DeviceType = None, clone: bool = True, in_mode: Optional[str] = None) -> None: super().__init__(data=data, dtype=dtype, device=device, clone=clone) if isinstance(in_mode, str): if in_mode not in ('xyxy', 'cxcywh'): raise ValueError(f'Get invalid mode {in_mode}.') if in_mode == 'cxcywh': self.tensor = self.cxcywh_to_xyxy(self.tensor) @staticmethod def cxcywh_to_xyxy(boxes: Tensor) -> Tensor: """Convert box coordinates from (cx, cy, w, h) to (x1, y1, x2, y2). Args: boxes (Tensor): cxcywh boxes tensor with shape of (..., 4). Returns: Tensor: xyxy boxes tensor with shape of (..., 4). """ ctr, wh = boxes.split((2, 2), dim=-1) return torch.cat([(ctr - wh / 2), (ctr + wh / 2)], dim=-1) @staticmethod def xyxy_to_cxcywh(boxes: Tensor) -> Tensor: """Convert box coordinates from (x1, y1, x2, y2) to (cx, cy, w, h). Args: boxes (Tensor): xyxy boxes tensor with shape of (..., 4). Returns: Tensor: cxcywh boxes tensor with shape of (..., 4). """ xy1, xy2 = boxes.split((2, 2), dim=-1) return torch.cat([(xy2 + xy1) / 2, (xy2 - xy1)], dim=-1) @property def cxcywh(self) -> Tensor: """Return a tensor representing the cxcywh boxes.""" return self.xyxy_to_cxcywh(self.tensor) @property def centers(self) -> Tensor: """Return a tensor representing the centers of boxes.""" boxes = self.tensor return (boxes[..., :2] + boxes[..., 2:]) / 2 @property def areas(self) -> Tensor: """Return a tensor representing the areas of boxes.""" boxes = self.tensor return (boxes[..., 2] - boxes[..., 0]) * ( boxes[..., 3] - boxes[..., 1]) @property def widths(self) -> Tensor: """Return a tensor representing the widths of boxes.""" boxes = self.tensor return boxes[..., 2] - boxes[..., 0] @property def heights(self) -> Tensor: """Return a tensor representing the heights of boxes.""" boxes = self.tensor return boxes[..., 3] - boxes[..., 1] def flip_(self, img_shape: Tuple[int, int], direction: str = 'horizontal') -> None: """Flip boxes horizontally or vertically in-place. Args: img_shape (Tuple[int, int]): A tuple of image height and width. direction (str): Flip direction, options are "horizontal", "vertical" and "diagonal". Defaults to "horizontal" """ assert direction in ['horizontal', 'vertical', 'diagonal'] flipped = self.tensor boxes = flipped.clone() if direction == 'horizontal': flipped[..., 0] = img_shape[1] - boxes[..., 2] flipped[..., 2] = img_shape[1] - boxes[..., 0] elif direction == 'vertical': flipped[..., 1] = img_shape[0] - boxes[..., 3] flipped[..., 3] = img_shape[0] - boxes[..., 1] else: flipped[..., 0] = img_shape[1] - boxes[..., 2] flipped[..., 1] = img_shape[0] - boxes[..., 3] flipped[..., 2] = img_shape[1] - boxes[..., 0] flipped[..., 3] = img_shape[0] - boxes[..., 1] def translate_(self, distances: Tuple[float, float]) -> None: """Translate boxes in-place. Args: distances (Tuple[float, float]): translate distances. The first is horizontal distance and the second is vertical distance. """ boxes = self.tensor assert len(distances) == 2 self.tensor = boxes + boxes.new_tensor(distances).repeat(2) def clip_(self, img_shape: Tuple[int, int]) -> None: """Clip boxes according to the image shape in-place. Args: img_shape (Tuple[int, int]): A tuple of image height and width. """ boxes = self.tensor boxes[..., 0::2] = boxes[..., 0::2].clamp(0, img_shape[1]) boxes[..., 1::2] = boxes[..., 1::2].clamp(0, img_shape[0]) def rotate_(self, center: Tuple[float, float], angle: float) -> None: """Rotate all boxes in-place. Args: center (Tuple[float, float]): Rotation origin. angle (float): Rotation angle represented in degrees. Positive values mean clockwise rotation. """ boxes = self.tensor rotation_matrix = boxes.new_tensor( cv2.getRotationMatrix2D(center, -angle, 1)) corners = self.hbox2corner(boxes) corners = torch.cat( [corners, corners.new_ones(*corners.shape[:-1], 1)], dim=-1) corners_T = torch.transpose(corners, -1, -2) corners_T = torch.matmul(rotation_matrix, corners_T) corners = torch.transpose(corners_T, -1, -2) self.tensor = self.corner2hbox(corners) def project_(self, homography_matrix: Union[Tensor, np.ndarray]) -> None: """Geometric transformat boxes in-place. Args: homography_matrix (Tensor or np.ndarray]): Shape (3, 3) for geometric transformation. """ boxes = self.tensor if isinstance(homography_matrix, np.ndarray): homography_matrix = boxes.new_tensor(homography_matrix) corners = self.hbox2corner(boxes) corners = torch.cat( [corners, corners.new_ones(*corners.shape[:-1], 1)], dim=-1) corners_T = torch.transpose(corners, -1, -2) corners_T = torch.matmul(homography_matrix, corners_T) corners = torch.transpose(corners_T, -1, -2) # Convert to homogeneous coordinates by normalization corners = corners[..., :2] / corners[..., 2:3] self.tensor = self.corner2hbox(corners) @staticmethod def hbox2corner(boxes: Tensor) -> Tensor: """Convert box coordinates from (x1, y1, x2, y2) to corners ((x1, y1), (x2, y1), (x1, y2), (x2, y2)). Args: boxes (Tensor): Horizontal box tensor with shape of (..., 4). Returns: Tensor: Corner tensor with shape of (..., 4, 2). """ x1, y1, x2, y2 = torch.split(boxes, 1, dim=-1) corners = torch.cat([x1, y1, x2, y1, x1, y2, x2, y2], dim=-1) return corners.reshape(*corners.shape[:-1], 4, 2) @staticmethod def corner2hbox(corners: Tensor) -> Tensor: """Convert box coordinates from corners ((x1, y1), (x2, y1), (x1, y2), (x2, y2)) to (x1, y1, x2, y2). Args: corners (Tensor): Corner tensor with shape of (..., 4, 2). Returns: Tensor: Horizontal box tensor with shape of (..., 4). """ if corners.numel() == 0: return corners.new_zeros((0, 4)) min_xy = corners.min(dim=-2)[0] max_xy = corners.max(dim=-2)[0] return torch.cat([min_xy, max_xy], dim=-1) def rescale_(self, scale_factor: Tuple[float, float]) -> None: """Rescale boxes w.r.t. rescale_factor in-place. Note: Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes w.r.t ``scale_facotr``. The difference is that ``resize_`` only changes the width and the height of boxes, but ``rescale_`` also rescales the box centers simultaneously. Args: scale_factor (Tuple[float, float]): factors for scaling boxes. The length should be 2. """ boxes = self.tensor assert len(scale_factor) == 2 scale_factor = boxes.new_tensor(scale_factor).repeat(2) self.tensor = boxes * scale_factor def resize_(self, scale_factor: Tuple[float, float]) -> None: """Resize the box width and height w.r.t scale_factor in-place. Note: Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes w.r.t ``scale_facotr``. The difference is that ``resize_`` only changes the width and the height of boxes, but ``rescale_`` also rescales the box centers simultaneously. Args: scale_factor (Tuple[float, float]): factors for scaling box shapes. The length should be 2. """ boxes = self.tensor assert len(scale_factor) == 2 ctrs = (boxes[..., 2:] + boxes[..., :2]) / 2 wh = boxes[..., 2:] - boxes[..., :2] scale_factor = boxes.new_tensor(scale_factor) wh = wh * scale_factor xy1 = ctrs - 0.5 * wh xy2 = ctrs + 0.5 * wh self.tensor = torch.cat([xy1, xy2], dim=-1) def is_inside(self, img_shape: Tuple[int, int], all_inside: bool = False, allowed_border: int = 0) -> BoolTensor: """Find boxes inside the image. Args: img_shape (Tuple[int, int]): A tuple of image height and width. all_inside (bool): Whether the boxes are all inside the image or part inside the image. Defaults to False. allowed_border (int): Boxes that extend beyond the image shape boundary by more than ``allowed_border`` are considered "outside" Defaults to 0. Returns: BoolTensor: A BoolTensor indicating whether the box is inside the image. Assuming the original boxes have shape (m, n, 4), the output has shape (m, n). """ img_h, img_w = img_shape boxes = self.tensor if all_inside: return (boxes[:, 0] >= -allowed_border) & \ (boxes[:, 1] >= -allowed_border) & \ (boxes[:, 2] < img_w + allowed_border) & \ (boxes[:, 3] < img_h + allowed_border) else: return (boxes[..., 0] < img_w + allowed_border) & \ (boxes[..., 1] < img_h + allowed_border) & \ (boxes[..., 2] > -allowed_border) & \ (boxes[..., 3] > -allowed_border) def find_inside_points(self, points: Tensor, is_aligned: bool = False) -> BoolTensor: """Find inside box points. Boxes dimension must be 2. Args: points (Tensor): Points coordinates. Has shape of (m, 2). is_aligned (bool): Whether ``points`` has been aligned with boxes or not. If True, the length of boxes and ``points`` should be the same. Defaults to False. Returns: BoolTensor: A BoolTensor indicating whether a point is inside boxes. Assuming the boxes has shape of (n, 4), if ``is_aligned`` is False. The index has shape of (m, n). If ``is_aligned`` is True, m should be equal to n and the index has shape of (m, ). """ boxes = self.tensor assert boxes.dim() == 2, 'boxes dimension must be 2.' if not is_aligned: boxes = boxes[None, :, :] points = points[:, None, :] else: assert boxes.size(0) == points.size(0) x_min, y_min, x_max, y_max = boxes.unbind(dim=-1) return (points[..., 0] >= x_min) & (points[..., 0] <= x_max) & \ (points[..., 1] >= y_min) & (points[..., 1] <= y_max) @staticmethod def overlaps(boxes1: BaseBoxes, boxes2: BaseBoxes, mode: str = 'iou', is_aligned: bool = False, eps: float = 1e-6) -> Tensor: """Calculate overlap between two set of boxes with their types converted to ``HorizontalBoxes``. Args: boxes1 (:obj:`BaseBoxes`): BaseBoxes with shape of (m, box_dim) or empty. boxes2 (:obj:`BaseBoxes`): BaseBoxes with shape of (n, box_dim) or empty. mode (str): "iou" (intersection over union), "iof" (intersection over foreground). Defaults to "iou". is_aligned (bool): If True, then m and n must be equal. Defaults to False. eps (float): A value added to the denominator for numerical stability. Defaults to 1e-6. Returns: Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,) """ boxes1 = boxes1.convert_to('hbox') boxes2 = boxes2.convert_to('hbox') return bbox_overlaps( boxes1.tensor, boxes2.tensor, mode=mode, is_aligned=is_aligned, eps=eps) @staticmethod def from_instance_masks(masks: MaskType) -> 'HorizontalBoxes': """Create horizontal boxes from instance masks. Args: masks (:obj:`BitmapMasks` or :obj:`PolygonMasks`): BitmapMasks or PolygonMasks instance with length of n. Returns: :obj:`HorizontalBoxes`: Converted boxes with shape of (n, 4). """ num_masks = len(masks) boxes = np.zeros((num_masks, 4), dtype=np.float32) if isinstance(masks, BitmapMasks): x_any = masks.masks.any(axis=1) y_any = masks.masks.any(axis=2) for idx in range(num_masks): x = np.where(x_any[idx, :])[0] y = np.where(y_any[idx, :])[0] if len(x) > 0 and len(y) > 0: # use +1 for x_max and y_max so that the right and bottom # boundary of instance masks are fully included by the box boxes[idx, :] = np.array( [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=np.float32) elif isinstance(masks, PolygonMasks): for idx, poly_per_obj in enumerate(masks.masks): # simply use a number that is big enough for comparison with # coordinates xy_min = np.array([masks.width * 2, masks.height * 2], dtype=np.float32) xy_max = np.zeros(2, dtype=np.float32) for p in poly_per_obj: xy = np.array(p).reshape(-1, 2).astype(np.float32) xy_min = np.minimum(xy_min, np.min(xy, axis=0)) xy_max = np.maximum(xy_max, np.max(xy, axis=0)) boxes[idx, :2] = xy_min boxes[idx, 2:] = xy_max else: raise TypeError( '`masks` must be `BitmapMasks` or `PolygonMasks`, ' f'but got {type(masks)}.') return HorizontalBoxes(boxes)
16,542
39.05569
78
py
ERD
ERD-main/mmdet/structures/bbox/base_boxes.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod, abstractproperty, abstractstaticmethod from typing import List, Optional, Sequence, Tuple, Type, TypeVar, Union import numpy as np import torch from torch import BoolTensor, Tensor from mmdet.structures.mask.structures import BitmapMasks, PolygonMasks T = TypeVar('T') DeviceType = Union[str, torch.device] IndexType = Union[slice, int, list, torch.LongTensor, torch.cuda.LongTensor, torch.BoolTensor, torch.cuda.BoolTensor, np.ndarray] MaskType = Union[BitmapMasks, PolygonMasks] class BaseBoxes(metaclass=ABCMeta): """The base class for 2D box types. The functions of ``BaseBoxes`` lie in three fields: - Verify the boxes shape. - Support tensor-like operations. - Define abstract functions for 2D boxes. In ``__init__`` , ``BaseBoxes`` verifies the validity of the data shape w.r.t ``box_dim``. The tensor with the dimension >= 2 and the length of the last dimension being ``box_dim`` will be regarded as valid. ``BaseBoxes`` will restore them at the field ``tensor``. It's necessary to override ``box_dim`` in subclass to guarantee the data shape is correct. There are many basic tensor-like functions implemented in ``BaseBoxes``. In most cases, users can operate ``BaseBoxes`` instance like a normal tensor. To protect the validity of data shape, All tensor-like functions cannot modify the last dimension of ``self.tensor``. When creating a new box type, users need to inherit from ``BaseBoxes`` and override abstract methods and specify the ``box_dim``. Then, register the new box type by using the decorator ``register_box_type``. Args: data (Tensor or np.ndarray or Sequence): The box data with shape (..., box_dim). dtype (torch.dtype, Optional): data type of boxes. Defaults to None. device (str or torch.device, Optional): device of boxes. Default to None. clone (bool): Whether clone ``boxes`` or not. Defaults to True. """ # Used to verify the last dimension length # Should override it in subclass. box_dim: int = 0 def __init__(self, data: Union[Tensor, np.ndarray, Sequence], dtype: Optional[torch.dtype] = None, device: Optional[DeviceType] = None, clone: bool = True) -> None: if isinstance(data, (np.ndarray, Tensor, Sequence)): data = torch.as_tensor(data) else: raise TypeError('boxes should be Tensor, ndarray, or Sequence, ', f'but got {type(data)}') if device is not None or dtype is not None: data = data.to(dtype=dtype, device=device) # Clone the data to avoid potential bugs if clone: data = data.clone() # handle the empty input like [] if data.numel() == 0: data = data.reshape((-1, self.box_dim)) assert data.dim() >= 2 and data.size(-1) == self.box_dim, \ ('The boxes dimension must >= 2 and the length of the last ' f'dimension must be {self.box_dim}, but got boxes with ' f'shape {data.shape}.') self.tensor = data def convert_to(self, dst_type: Union[str, type]) -> 'BaseBoxes': """Convert self to another box type. Args: dst_type (str or type): destination box type. Returns: :obj:`BaseBoxes`: destination box type object . """ from .box_type import convert_box_type return convert_box_type(self, dst_type=dst_type) def empty_boxes(self: T, dtype: Optional[torch.dtype] = None, device: Optional[DeviceType] = None) -> T: """Create empty box. Args: dtype (torch.dtype, Optional): data type of boxes. device (str or torch.device, Optional): device of boxes. Returns: T: empty boxes with shape of (0, box_dim). """ empty_box = self.tensor.new_zeros( 0, self.box_dim, dtype=dtype, device=device) return type(self)(empty_box, clone=False) def fake_boxes(self: T, sizes: Tuple[int], fill: float = 0, dtype: Optional[torch.dtype] = None, device: Optional[DeviceType] = None) -> T: """Create fake boxes with specific sizes and fill values. Args: sizes (Tuple[int]): The size of fake boxes. The last value must be equal with ``self.box_dim``. fill (float): filling value. Defaults to 0. dtype (torch.dtype, Optional): data type of boxes. device (str or torch.device, Optional): device of boxes. Returns: T: Fake boxes with shape of ``sizes``. """ fake_boxes = self.tensor.new_full( sizes, fill, dtype=dtype, device=device) return type(self)(fake_boxes, clone=False) def __getitem__(self: T, index: IndexType) -> T: """Rewrite getitem to protect the last dimension shape.""" boxes = self.tensor if isinstance(index, np.ndarray): index = torch.as_tensor(index, device=self.device) if isinstance(index, Tensor) and index.dtype == torch.bool: assert index.dim() < boxes.dim() elif isinstance(index, tuple): assert len(index) < boxes.dim() # `Ellipsis`(...) is commonly used in index like [None, ...]. # When `Ellipsis` is in index, it must be the last item. if Ellipsis in index: assert index[-1] is Ellipsis boxes = boxes[index] if boxes.dim() == 1: boxes = boxes.reshape(1, -1) return type(self)(boxes, clone=False) def __setitem__(self: T, index: IndexType, values: Union[Tensor, T]) -> T: """Rewrite setitem to protect the last dimension shape.""" assert type(values) is type(self), \ 'The value to be set must be the same box type as self' values = values.tensor if isinstance(index, np.ndarray): index = torch.as_tensor(index, device=self.device) if isinstance(index, Tensor) and index.dtype == torch.bool: assert index.dim() < self.tensor.dim() elif isinstance(index, tuple): assert len(index) < self.tensor.dim() # `Ellipsis`(...) is commonly used in index like [None, ...]. # When `Ellipsis` is in index, it must be the last item. if Ellipsis in index: assert index[-1] is Ellipsis self.tensor[index] = values def __len__(self) -> int: """Return the length of self.tensor first dimension.""" return self.tensor.size(0) def __deepcopy__(self, memo): """Only clone the ``self.tensor`` when applying deepcopy.""" cls = self.__class__ other = cls.__new__(cls) memo[id(self)] = other other.tensor = self.tensor.clone() return other def __repr__(self) -> str: """Return a strings that describes the object.""" return self.__class__.__name__ + '(\n' + str(self.tensor) + ')' def new_tensor(self, *args, **kwargs) -> Tensor: """Reload ``new_tensor`` from self.tensor.""" return self.tensor.new_tensor(*args, **kwargs) def new_full(self, *args, **kwargs) -> Tensor: """Reload ``new_full`` from self.tensor.""" return self.tensor.new_full(*args, **kwargs) def new_empty(self, *args, **kwargs) -> Tensor: """Reload ``new_empty`` from self.tensor.""" return self.tensor.new_empty(*args, **kwargs) def new_ones(self, *args, **kwargs) -> Tensor: """Reload ``new_ones`` from self.tensor.""" return self.tensor.new_ones(*args, **kwargs) def new_zeros(self, *args, **kwargs) -> Tensor: """Reload ``new_zeros`` from self.tensor.""" return self.tensor.new_zeros(*args, **kwargs) def size(self, dim: Optional[int] = None) -> Union[int, torch.Size]: """Reload new_zeros from self.tensor.""" # self.tensor.size(dim) cannot work when dim=None. return self.tensor.size() if dim is None else self.tensor.size(dim) def dim(self) -> int: """Reload ``dim`` from self.tensor.""" return self.tensor.dim() @property def device(self) -> torch.device: """Reload ``device`` from self.tensor.""" return self.tensor.device @property def dtype(self) -> torch.dtype: """Reload ``dtype`` from self.tensor.""" return self.tensor.dtype @property def shape(self) -> torch.Size: return self.tensor.shape def numel(self) -> int: """Reload ``numel`` from self.tensor.""" return self.tensor.numel() def numpy(self) -> np.ndarray: """Reload ``numpy`` from self.tensor.""" return self.tensor.numpy() def to(self: T, *args, **kwargs) -> T: """Reload ``to`` from self.tensor.""" return type(self)(self.tensor.to(*args, **kwargs), clone=False) def cpu(self: T) -> T: """Reload ``cpu`` from self.tensor.""" return type(self)(self.tensor.cpu(), clone=False) def cuda(self: T, *args, **kwargs) -> T: """Reload ``cuda`` from self.tensor.""" return type(self)(self.tensor.cuda(*args, **kwargs), clone=False) def clone(self: T) -> T: """Reload ``clone`` from self.tensor.""" return type(self)(self.tensor) def detach(self: T) -> T: """Reload ``detach`` from self.tensor.""" return type(self)(self.tensor.detach(), clone=False) def view(self: T, *shape: Tuple[int]) -> T: """Reload ``view`` from self.tensor.""" return type(self)(self.tensor.view(shape), clone=False) def reshape(self: T, *shape: Tuple[int]) -> T: """Reload ``reshape`` from self.tensor.""" return type(self)(self.tensor.reshape(shape), clone=False) def expand(self: T, *sizes: Tuple[int]) -> T: """Reload ``expand`` from self.tensor.""" return type(self)(self.tensor.expand(sizes), clone=False) def repeat(self: T, *sizes: Tuple[int]) -> T: """Reload ``repeat`` from self.tensor.""" return type(self)(self.tensor.repeat(sizes), clone=False) def transpose(self: T, dim0: int, dim1: int) -> T: """Reload ``transpose`` from self.tensor.""" ndim = self.tensor.dim() assert dim0 != -1 and dim0 != ndim - 1 assert dim1 != -1 and dim1 != ndim - 1 return type(self)(self.tensor.transpose(dim0, dim1), clone=False) def permute(self: T, *dims: Tuple[int]) -> T: """Reload ``permute`` from self.tensor.""" assert dims[-1] == -1 or dims[-1] == self.tensor.dim() - 1 return type(self)(self.tensor.permute(dims), clone=False) def split(self: T, split_size_or_sections: Union[int, Sequence[int]], dim: int = 0) -> List[T]: """Reload ``split`` from self.tensor.""" assert dim != -1 and dim != self.tensor.dim() - 1 boxes_list = self.tensor.split(split_size_or_sections, dim=dim) return [type(self)(boxes, clone=False) for boxes in boxes_list] def chunk(self: T, chunks: int, dim: int = 0) -> List[T]: """Reload ``chunk`` from self.tensor.""" assert dim != -1 and dim != self.tensor.dim() - 1 boxes_list = self.tensor.chunk(chunks, dim=dim) return [type(self)(boxes, clone=False) for boxes in boxes_list] def unbind(self: T, dim: int = 0) -> T: """Reload ``unbind`` from self.tensor.""" assert dim != -1 and dim != self.tensor.dim() - 1 boxes_list = self.tensor.unbind(dim=dim) return [type(self)(boxes, clone=False) for boxes in boxes_list] def flatten(self: T, start_dim: int = 0, end_dim: int = -2) -> T: """Reload ``flatten`` from self.tensor.""" assert end_dim != -1 and end_dim != self.tensor.dim() - 1 return type(self)(self.tensor.flatten(start_dim, end_dim), clone=False) def squeeze(self: T, dim: Optional[int] = None) -> T: """Reload ``squeeze`` from self.tensor.""" boxes = self.tensor.squeeze() if dim is None else \ self.tensor.squeeze(dim) return type(self)(boxes, clone=False) def unsqueeze(self: T, dim: int) -> T: """Reload ``unsqueeze`` from self.tensor.""" assert dim != -1 and dim != self.tensor.dim() return type(self)(self.tensor.unsqueeze(dim), clone=False) @classmethod def cat(cls: Type[T], box_list: Sequence[T], dim: int = 0) -> T: """Cancatenates a box instance list into one single box instance. Similar to ``torch.cat``. Args: box_list (Sequence[T]): A sequence of box instances. dim (int): The dimension over which the box are concatenated. Defaults to 0. Returns: T: Concatenated box instance. """ assert isinstance(box_list, Sequence) if len(box_list) == 0: raise ValueError('box_list should not be a empty list.') assert dim != -1 and dim != box_list[0].dim() - 1 assert all(isinstance(boxes, cls) for boxes in box_list) th_box_list = [boxes.tensor for boxes in box_list] return cls(torch.cat(th_box_list, dim=dim), clone=False) @classmethod def stack(cls: Type[T], box_list: Sequence[T], dim: int = 0) -> T: """Concatenates a sequence of tensors along a new dimension. Similar to ``torch.stack``. Args: box_list (Sequence[T]): A sequence of box instances. dim (int): Dimension to insert. Defaults to 0. Returns: T: Concatenated box instance. """ assert isinstance(box_list, Sequence) if len(box_list) == 0: raise ValueError('box_list should not be a empty list.') assert dim != -1 and dim != box_list[0].dim() assert all(isinstance(boxes, cls) for boxes in box_list) th_box_list = [boxes.tensor for boxes in box_list] return cls(torch.stack(th_box_list, dim=dim), clone=False) @abstractproperty def centers(self) -> Tensor: """Return a tensor representing the centers of boxes.""" pass @abstractproperty def areas(self) -> Tensor: """Return a tensor representing the areas of boxes.""" pass @abstractproperty def widths(self) -> Tensor: """Return a tensor representing the widths of boxes.""" pass @abstractproperty def heights(self) -> Tensor: """Return a tensor representing the heights of boxes.""" pass @abstractmethod def flip_(self, img_shape: Tuple[int, int], direction: str = 'horizontal') -> None: """Flip boxes horizontally or vertically in-place. Args: img_shape (Tuple[int, int]): A tuple of image height and width. direction (str): Flip direction, options are "horizontal", "vertical" and "diagonal". Defaults to "horizontal" """ pass @abstractmethod def translate_(self, distances: Tuple[float, float]) -> None: """Translate boxes in-place. Args: distances (Tuple[float, float]): translate distances. The first is horizontal distance and the second is vertical distance. """ pass @abstractmethod def clip_(self, img_shape: Tuple[int, int]) -> None: """Clip boxes according to the image shape in-place. Args: img_shape (Tuple[int, int]): A tuple of image height and width. """ pass @abstractmethod def rotate_(self, center: Tuple[float, float], angle: float) -> None: """Rotate all boxes in-place. Args: center (Tuple[float, float]): Rotation origin. angle (float): Rotation angle represented in degrees. Positive values mean clockwise rotation. """ pass @abstractmethod def project_(self, homography_matrix: Union[Tensor, np.ndarray]) -> None: """Geometric transformat boxes in-place. Args: homography_matrix (Tensor or np.ndarray]): Shape (3, 3) for geometric transformation. """ pass @abstractmethod def rescale_(self, scale_factor: Tuple[float, float]) -> None: """Rescale boxes w.r.t. rescale_factor in-place. Note: Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes w.r.t ``scale_facotr``. The difference is that ``resize_`` only changes the width and the height of boxes, but ``rescale_`` also rescales the box centers simultaneously. Args: scale_factor (Tuple[float, float]): factors for scaling boxes. The length should be 2. """ pass @abstractmethod def resize_(self, scale_factor: Tuple[float, float]) -> None: """Resize the box width and height w.r.t scale_factor in-place. Note: Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes w.r.t ``scale_facotr``. The difference is that ``resize_`` only changes the width and the height of boxes, but ``rescale_`` also rescales the box centers simultaneously. Args: scale_factor (Tuple[float, float]): factors for scaling box shapes. The length should be 2. """ pass @abstractmethod def is_inside(self, img_shape: Tuple[int, int], all_inside: bool = False, allowed_border: int = 0) -> BoolTensor: """Find boxes inside the image. Args: img_shape (Tuple[int, int]): A tuple of image height and width. all_inside (bool): Whether the boxes are all inside the image or part inside the image. Defaults to False. allowed_border (int): Boxes that extend beyond the image shape boundary by more than ``allowed_border`` are considered "outside" Defaults to 0. Returns: BoolTensor: A BoolTensor indicating whether the box is inside the image. Assuming the original boxes have shape (m, n, box_dim), the output has shape (m, n). """ pass @abstractmethod def find_inside_points(self, points: Tensor, is_aligned: bool = False) -> BoolTensor: """Find inside box points. Boxes dimension must be 2. Args: points (Tensor): Points coordinates. Has shape of (m, 2). is_aligned (bool): Whether ``points`` has been aligned with boxes or not. If True, the length of boxes and ``points`` should be the same. Defaults to False. Returns: BoolTensor: A BoolTensor indicating whether a point is inside boxes. Assuming the boxes has shape of (n, box_dim), if ``is_aligned`` is False. The index has shape of (m, n). If ``is_aligned`` is True, m should be equal to n and the index has shape of (m, ). """ pass @abstractstaticmethod def overlaps(boxes1: 'BaseBoxes', boxes2: 'BaseBoxes', mode: str = 'iou', is_aligned: bool = False, eps: float = 1e-6) -> Tensor: """Calculate overlap between two set of boxes with their types converted to the present box type. Args: boxes1 (:obj:`BaseBoxes`): BaseBoxes with shape of (m, box_dim) or empty. boxes2 (:obj:`BaseBoxes`): BaseBoxes with shape of (n, box_dim) or empty. mode (str): "iou" (intersection over union), "iof" (intersection over foreground). Defaults to "iou". is_aligned (bool): If True, then m and n must be equal. Defaults to False. eps (float): A value added to the denominator for numerical stability. Defaults to 1e-6. Returns: Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,) """ pass @abstractstaticmethod def from_instance_masks(masks: MaskType) -> 'BaseBoxes': """Create boxes from instance masks. Args: masks (:obj:`BitmapMasks` or :obj:`PolygonMasks`): BitmapMasks or PolygonMasks instance with length of n. Returns: :obj:`BaseBoxes`: Converted boxes with shape of (n, box_dim). """ pass
20,934
37.063636
79
py
ERD
ERD-main/mmdet/structures/bbox/bbox_overlaps.py
# Copyright (c) OpenMMLab. All rights reserved. import torch def fp16_clamp(x, min=None, max=None): if not x.is_cuda and x.dtype == torch.float16: # clamp for cpu float16, tensor fp16 has no clamp implementation return x.float().clamp(min, max).half() return x.clamp(min, max) def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6): """Calculate overlap between two set of bboxes. FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889 Note: Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou', there are some new generated variable when calculating IOU using bbox_overlaps function: 1) is_aligned is False area1: M x 1 area2: N x 1 lt: M x N x 2 rb: M x N x 2 wh: M x N x 2 overlap: M x N x 1 union: M x N x 1 ious: M x N x 1 Total memory: S = (9 x N x M + N + M) * 4 Byte, When using FP16, we can reduce: R = (9 x N x M + N + M) * 4 / 2 Byte R large than (N + M) * 4 * 2 is always true when N and M >= 1. Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2, N + 1 < 3 * N, when N or M is 1. Given M = 40 (ground truth), N = 400000 (three anchor boxes in per grid, FPN, R-CNNs), R = 275 MB (one times) A special case (dense detection), M = 512 (ground truth), R = 3516 MB = 3.43 GB When the batch size is B, reduce: B x R Therefore, CUDA memory runs out frequently. Experiments on GeForce RTX 2080Ti (11019 MiB): | dtype | M | N | Use | Real | Ideal | |:----:|:----:|:----:|:----:|:----:|:----:| | FP32 | 512 | 400000 | 8020 MiB | -- | -- | | FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB | | FP32 | 40 | 400000 | 1540 MiB | -- | -- | | FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB | 2) is_aligned is True area1: N x 1 area2: N x 1 lt: N x 2 rb: N x 2 wh: N x 2 overlap: N x 1 union: N x 1 ious: N x 1 Total memory: S = 11 x N * 4 Byte When using FP16, we can reduce: R = 11 x N * 4 / 2 Byte So do the 'giou' (large than 'iou'). Time-wise, FP16 is generally faster than FP32. When gpu_assign_thr is not -1, it takes more time on cpu but not reduce memory. There, we can reduce half the memory and keep the speed. If ``is_aligned`` is ``False``, then calculate the overlaps between each bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned pair of bboxes1 and bboxes2. Args: bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty. bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty. B indicates the batch dim, in shape (B1, B2, ..., Bn). If ``is_aligned`` is ``True``, then m and n must be equal. mode (str): "iou" (intersection over union), "iof" (intersection over foreground) or "giou" (generalized intersection over union). Default "iou". is_aligned (bool, optional): If True, then m and n must be equal. Default False. eps (float, optional): A value added to the denominator for numerical stability. Default 1e-6. Returns: Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,) Example: >>> bboxes1 = torch.FloatTensor([ >>> [0, 0, 10, 10], >>> [10, 10, 20, 20], >>> [32, 32, 38, 42], >>> ]) >>> bboxes2 = torch.FloatTensor([ >>> [0, 0, 10, 20], >>> [0, 10, 10, 19], >>> [10, 10, 20, 20], >>> ]) >>> overlaps = bbox_overlaps(bboxes1, bboxes2) >>> assert overlaps.shape == (3, 3) >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True) >>> assert overlaps.shape == (3, ) Example: >>> empty = torch.empty(0, 4) >>> nonempty = torch.FloatTensor([[0, 0, 10, 9]]) >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1) >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0) >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0) """ assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}' # Either the boxes are empty or the length of boxes' last dimension is 4 assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0) assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0) # Batch dim must be the same # Batch dim: (B1, B2, ... Bn) assert bboxes1.shape[:-2] == bboxes2.shape[:-2] batch_shape = bboxes1.shape[:-2] rows = bboxes1.size(-2) cols = bboxes2.size(-2) if is_aligned: assert rows == cols if rows * cols == 0: if is_aligned: return bboxes1.new(batch_shape + (rows, )) else: return bboxes1.new(batch_shape + (rows, cols)) area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * ( bboxes1[..., 3] - bboxes1[..., 1]) area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * ( bboxes2[..., 3] - bboxes2[..., 1]) if is_aligned: lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2] rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2] wh = fp16_clamp(rb - lt, min=0) overlap = wh[..., 0] * wh[..., 1] if mode in ['iou', 'giou']: union = area1 + area2 - overlap else: union = area1 if mode == 'giou': enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2]) enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:]) else: lt = torch.max(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2]) # [B, rows, cols, 2] rb = torch.min(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:]) # [B, rows, cols, 2] wh = fp16_clamp(rb - lt, min=0) overlap = wh[..., 0] * wh[..., 1] if mode in ['iou', 'giou']: union = area1[..., None] + area2[..., None, :] - overlap else: union = area1[..., None] if mode == 'giou': enclosed_lt = torch.min(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2]) enclosed_rb = torch.max(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:]) eps = union.new_tensor([eps]) union = torch.max(union, eps) ious = overlap / union if mode in ['iou', 'iof']: return ious # calculate gious enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0) enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1] enclose_area = torch.max(enclose_area, eps) gious = ious - (enclose_area - union) / enclose_area return gious
7,323
35.62
78
py
ERD
ERD-main/mmdet/structures/bbox/transforms.py
# Copyright (c) OpenMMLab. All rights reserved. from typing import List, Optional, Sequence, Tuple, Union import numpy as np import torch from torch import Tensor from mmdet.structures.bbox import BaseBoxes def find_inside_bboxes(bboxes: Tensor, img_h: int, img_w: int) -> Tensor: """Find bboxes as long as a part of bboxes is inside the image. Args: bboxes (Tensor): Shape (N, 4). img_h (int): Image height. img_w (int): Image width. Returns: Tensor: Index of the remaining bboxes. """ inside_inds = (bboxes[:, 0] < img_w) & (bboxes[:, 2] > 0) \ & (bboxes[:, 1] < img_h) & (bboxes[:, 3] > 0) return inside_inds def bbox_flip(bboxes: Tensor, img_shape: Tuple[int], direction: str = 'horizontal') -> Tensor: """Flip bboxes horizontally or vertically. Args: bboxes (Tensor): Shape (..., 4*k) img_shape (Tuple[int]): Image shape. direction (str): Flip direction, options are "horizontal", "vertical", "diagonal". Default: "horizontal" Returns: Tensor: Flipped bboxes. """ assert bboxes.shape[-1] % 4 == 0 assert direction in ['horizontal', 'vertical', 'diagonal'] flipped = bboxes.clone() if direction == 'horizontal': flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] elif direction == 'vertical': flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] else: flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] return flipped def bbox_mapping(bboxes: Tensor, img_shape: Tuple[int], scale_factor: Union[float, Tuple[float]], flip: bool, flip_direction: str = 'horizontal') -> Tensor: """Map bboxes from the original image scale to testing scale.""" new_bboxes = bboxes * bboxes.new_tensor(scale_factor) if flip: new_bboxes = bbox_flip(new_bboxes, img_shape, flip_direction) return new_bboxes def bbox_mapping_back(bboxes: Tensor, img_shape: Tuple[int], scale_factor: Union[float, Tuple[float]], flip: bool, flip_direction: str = 'horizontal') -> Tensor: """Map bboxes from testing scale to original image scale.""" new_bboxes = bbox_flip(bboxes, img_shape, flip_direction) if flip else bboxes new_bboxes = new_bboxes.view(-1, 4) / new_bboxes.new_tensor(scale_factor) return new_bboxes.view(bboxes.shape) def bbox2roi(bbox_list: List[Union[Tensor, BaseBoxes]]) -> Tensor: """Convert a list of bboxes to roi format. Args: bbox_list (List[Union[Tensor, :obj:`BaseBoxes`]): a list of bboxes corresponding to a batch of images. Returns: Tensor: shape (n, box_dim + 1), where ``box_dim`` depends on the different box types. For example, If the box type in ``bbox_list`` is HorizontalBoxes, the output shape is (n, 5). Each row of data indicates [batch_ind, x1, y1, x2, y2]. """ rois_list = [] for img_id, bboxes in enumerate(bbox_list): bboxes = get_box_tensor(bboxes) img_inds = bboxes.new_full((bboxes.size(0), 1), img_id) rois = torch.cat([img_inds, bboxes], dim=-1) rois_list.append(rois) rois = torch.cat(rois_list, 0) return rois def roi2bbox(rois: Tensor) -> List[Tensor]: """Convert rois to bounding box format. Args: rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. Returns: List[Tensor]: Converted boxes of corresponding rois. """ bbox_list = [] img_ids = torch.unique(rois[:, 0].cpu(), sorted=True) for img_id in img_ids: inds = (rois[:, 0] == img_id.item()) bbox = rois[inds, 1:] bbox_list.append(bbox) return bbox_list # TODO remove later def bbox2result(bboxes: Union[Tensor, np.ndarray], labels: Union[Tensor, np.ndarray], num_classes: int) -> List[np.ndarray]: """Convert detection results to a list of numpy arrays. Args: bboxes (Tensor | np.ndarray): shape (n, 5) labels (Tensor | np.ndarray): shape (n, ) num_classes (int): class number, including background class Returns: List(np.ndarray]): bbox results of each class """ if bboxes.shape[0] == 0: return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)] else: if isinstance(bboxes, torch.Tensor): bboxes = bboxes.detach().cpu().numpy() labels = labels.detach().cpu().numpy() return [bboxes[labels == i, :] for i in range(num_classes)] def distance2bbox( points: Tensor, distance: Tensor, max_shape: Optional[Union[Sequence[int], Tensor, Sequence[Sequence[int]]]] = None ) -> Tensor: """Decode distance prediction to bounding box. Args: points (Tensor): Shape (B, N, 2) or (N, 2). distance (Tensor): Distance from the given point to 4 boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4) max_shape (Union[Sequence[int], Tensor, Sequence[Sequence[int]]], optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If priors shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. Returns: Tensor: Boxes with shape (N, 4) or (B, N, 4) """ x1 = points[..., 0] - distance[..., 0] y1 = points[..., 1] - distance[..., 1] x2 = points[..., 0] + distance[..., 2] y2 = points[..., 1] + distance[..., 3] bboxes = torch.stack([x1, y1, x2, y2], -1) if max_shape is not None: if bboxes.dim() == 2 and not torch.onnx.is_in_onnx_export(): # speed up bboxes[:, 0::2].clamp_(min=0, max=max_shape[1]) bboxes[:, 1::2].clamp_(min=0, max=max_shape[0]) return bboxes # clip bboxes with dynamic `min` and `max` for onnx if torch.onnx.is_in_onnx_export(): # TODO: delete from mmdet.core.export import dynamic_clip_for_onnx x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape) bboxes = torch.stack([x1, y1, x2, y2], dim=-1) return bboxes if not isinstance(max_shape, torch.Tensor): max_shape = x1.new_tensor(max_shape) max_shape = max_shape[..., :2].type_as(x1) if max_shape.ndim == 2: assert bboxes.ndim == 3 assert max_shape.size(0) == bboxes.size(0) min_xy = x1.new_tensor(0) max_xy = torch.cat([max_shape, max_shape], dim=-1).flip(-1).unsqueeze(-2) bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) return bboxes def bbox2distance(points: Tensor, bbox: Tensor, max_dis: Optional[float] = None, eps: float = 0.1) -> Tensor: """Decode bounding box based on distances. Args: points (Tensor): Shape (n, 2) or (b, n, 2), [x, y]. bbox (Tensor): Shape (n, 4) or (b, n, 4), "xyxy" format max_dis (float, optional): Upper bound of the distance. eps (float): a small value to ensure target < max_dis, instead <= Returns: Tensor: Decoded distances. """ left = points[..., 0] - bbox[..., 0] top = points[..., 1] - bbox[..., 1] right = bbox[..., 2] - points[..., 0] bottom = bbox[..., 3] - points[..., 1] if max_dis is not None: left = left.clamp(min=0, max=max_dis - eps) top = top.clamp(min=0, max=max_dis - eps) right = right.clamp(min=0, max=max_dis - eps) bottom = bottom.clamp(min=0, max=max_dis - eps) return torch.stack([left, top, right, bottom], -1) def bbox_rescale(bboxes: Tensor, scale_factor: float = 1.0) -> Tensor: """Rescale bounding box w.r.t. scale_factor. Args: bboxes (Tensor): Shape (n, 4) for bboxes or (n, 5) for rois scale_factor (float): rescale factor Returns: Tensor: Rescaled bboxes. """ if bboxes.size(1) == 5: bboxes_ = bboxes[:, 1:] inds_ = bboxes[:, 0] else: bboxes_ = bboxes cx = (bboxes_[:, 0] + bboxes_[:, 2]) * 0.5 cy = (bboxes_[:, 1] + bboxes_[:, 3]) * 0.5 w = bboxes_[:, 2] - bboxes_[:, 0] h = bboxes_[:, 3] - bboxes_[:, 1] w = w * scale_factor h = h * scale_factor x1 = cx - 0.5 * w x2 = cx + 0.5 * w y1 = cy - 0.5 * h y2 = cy + 0.5 * h if bboxes.size(1) == 5: rescaled_bboxes = torch.stack([inds_, x1, y1, x2, y2], dim=-1) else: rescaled_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) return rescaled_bboxes def bbox_cxcywh_to_xyxy(bbox: Tensor) -> Tensor: """Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2). Args: bbox (Tensor): Shape (n, 4) for bboxes. Returns: Tensor: Converted bboxes. """ cx, cy, w, h = bbox.split((1, 1, 1, 1), dim=-1) bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)] return torch.cat(bbox_new, dim=-1) def bbox_xyxy_to_cxcywh(bbox: Tensor) -> Tensor: """Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h). Args: bbox (Tensor): Shape (n, 4) for bboxes. Returns: Tensor: Converted bboxes. """ x1, y1, x2, y2 = bbox.split((1, 1, 1, 1), dim=-1) bbox_new = [(x1 + x2) / 2, (y1 + y2) / 2, (x2 - x1), (y2 - y1)] return torch.cat(bbox_new, dim=-1) def bbox2corner(bboxes: torch.Tensor) -> torch.Tensor: """Convert bbox coordinates from (x1, y1, x2, y2) to corners ((x1, y1), (x2, y1), (x1, y2), (x2, y2)). Args: bboxes (Tensor): Shape (n, 4) for bboxes. Returns: Tensor: Shape (n*4, 2) for corners. """ x1, y1, x2, y2 = torch.split(bboxes, 1, dim=1) return torch.cat([x1, y1, x2, y1, x1, y2, x2, y2], dim=1).reshape(-1, 2) def corner2bbox(corners: torch.Tensor) -> torch.Tensor: """Convert bbox coordinates from corners ((x1, y1), (x2, y1), (x1, y2), (x2, y2)) to (x1, y1, x2, y2). Args: corners (Tensor): Shape (n*4, 2) for corners. Returns: Tensor: Shape (n, 4) for bboxes. """ corners = corners.reshape(-1, 4, 2) min_xy = corners.min(dim=1)[0] max_xy = corners.max(dim=1)[0] return torch.cat([min_xy, max_xy], dim=1) def bbox_project( bboxes: Union[torch.Tensor, np.ndarray], homography_matrix: Union[torch.Tensor, np.ndarray], img_shape: Optional[Tuple[int, int]] = None ) -> Union[torch.Tensor, np.ndarray]: """Geometric transformation for bbox. Args: bboxes (Union[torch.Tensor, np.ndarray]): Shape (n, 4) for bboxes. homography_matrix (Union[torch.Tensor, np.ndarray]): Shape (3, 3) for geometric transformation. img_shape (Tuple[int, int], optional): Image shape. Defaults to None. Returns: Union[torch.Tensor, np.ndarray]: Converted bboxes. """ bboxes_type = type(bboxes) if bboxes_type is np.ndarray: bboxes = torch.from_numpy(bboxes) if isinstance(homography_matrix, np.ndarray): homography_matrix = torch.from_numpy(homography_matrix) corners = bbox2corner(bboxes) corners = torch.cat( [corners, corners.new_ones(corners.shape[0], 1)], dim=1) corners = torch.matmul(homography_matrix, corners.t()).t() # Convert to homogeneous coordinates by normalization corners = corners[:, :2] / corners[:, 2:3] bboxes = corner2bbox(corners) if img_shape is not None: bboxes[:, 0::2] = bboxes[:, 0::2].clamp(0, img_shape[1]) bboxes[:, 1::2] = bboxes[:, 1::2].clamp(0, img_shape[0]) if bboxes_type is np.ndarray: bboxes = bboxes.numpy() return bboxes def cat_boxes(data_list: List[Union[Tensor, BaseBoxes]], dim: int = 0) -> Union[Tensor, BaseBoxes]: """Concatenate boxes with type of tensor or box type. Args: data_list (List[Union[Tensor, :obj:`BaseBoxes`]]): A list of tensors or box types need to be concatenated. dim (int): The dimension over which the box are concatenated. Defaults to 0. Returns: Union[Tensor, :obj`BaseBoxes`]: Concatenated results. """ if data_list and isinstance(data_list[0], BaseBoxes): return data_list[0].cat(data_list, dim=dim) else: return torch.cat(data_list, dim=dim) def stack_boxes(data_list: List[Union[Tensor, BaseBoxes]], dim: int = 0) -> Union[Tensor, BaseBoxes]: """Stack boxes with type of tensor or box type. Args: data_list (List[Union[Tensor, :obj:`BaseBoxes`]]): A list of tensors or box types need to be stacked. dim (int): The dimension over which the box are stacked. Defaults to 0. Returns: Union[Tensor, :obj`BaseBoxes`]: Stacked results. """ if data_list and isinstance(data_list[0], BaseBoxes): return data_list[0].stack(data_list, dim=dim) else: return torch.stack(data_list, dim=dim) def scale_boxes(boxes: Union[Tensor, BaseBoxes], scale_factor: Tuple[float, float]) -> Union[Tensor, BaseBoxes]: """Scale boxes with type of tensor or box type. Args: boxes (Tensor or :obj:`BaseBoxes`): boxes need to be scaled. Its type can be a tensor or a box type. scale_factor (Tuple[float, float]): factors for scaling boxes. The length should be 2. Returns: Union[Tensor, :obj:`BaseBoxes`]: Scaled boxes. """ if isinstance(boxes, BaseBoxes): boxes.rescale_(scale_factor) return boxes else: # Tensor boxes will be treated as horizontal boxes repeat_num = int(boxes.size(-1) / 2) scale_factor = boxes.new_tensor(scale_factor).repeat((1, repeat_num)) return boxes * scale_factor def get_box_wh(boxes: Union[Tensor, BaseBoxes]) -> Tuple[Tensor, Tensor]: """Get the width and height of boxes with type of tensor or box type. Args: boxes (Tensor or :obj:`BaseBoxes`): boxes with type of tensor or box type. Returns: Tuple[Tensor, Tensor]: the width and height of boxes. """ if isinstance(boxes, BaseBoxes): w = boxes.widths h = boxes.heights else: # Tensor boxes will be treated as horizontal boxes by defaults w = boxes[:, 2] - boxes[:, 0] h = boxes[:, 3] - boxes[:, 1] return w, h def get_box_tensor(boxes: Union[Tensor, BaseBoxes]) -> Tensor: """Get tensor data from box type boxes. Args: boxes (Tensor or BaseBoxes): boxes with type of tensor or box type. If its type is a tensor, the boxes will be directly returned. If its type is a box type, the `boxes.tensor` will be returned. Returns: Tensor: boxes tensor. """ if isinstance(boxes, BaseBoxes): boxes = boxes.tensor return boxes def empty_box_as(boxes: Union[Tensor, BaseBoxes]) -> Union[Tensor, BaseBoxes]: """Generate empty box according to input ``boxes` type and device. Args: boxes (Tensor or :obj:`BaseBoxes`): boxes with type of tensor or box type. Returns: Union[Tensor, BaseBoxes]: Generated empty box. """ if isinstance(boxes, BaseBoxes): return boxes.empty_boxes() else: # Tensor boxes will be treated as horizontal boxes by defaults return boxes.new_zeros(0, 4)
16,133
33.474359
79
py
fl-analysis
fl-analysis-master/src/main.py
import tensorflow as tf import numpy as np from src.client_attacks import Attack from src.config_cli import get_config from src.federated_averaging import FederatedAveraging from src.tf_model import Model from src.config.definitions import Config import logging logger = logging.getLogger(__name__) def load_model(): if config.environment.load_model is not None: model = tf.keras.models.load_model(config.environment.load_model) # Load with weights else: model = Model.create_model( config.client.model_name, config.server.intrinsic_dimension, config.client.model_weight_regularization, config.client.disable_bn) # save_model(model) return model def save_model(model): weights = np.concatenate([x.flatten() for x in model.get_weights()]) np.savetxt("resnet18_intrinsic_40k.txt", weights) def main(): import torch if torch.cuda.is_available(): torch.cuda.current_device() limit_gpu_mem() # from src.torch_compat.anticipate_lenet import LeNet # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # m1 = LeNet(10) # m2 = m1.to(device) # try models = [load_model()] if config.client.malicious is not None: config.client.malicious.attack_type = Attack.UNTARGETED.value \ if config.client.malicious.objective['name'] == "UntargetedAttack" else Attack.BACKDOOR.value server_model = FederatedAveraging(config, models, args.config_filepath) server_model.init() server_model.fit() return # if args.hyperparameter_tuning.lower() == "true": # tune_hyper(args, config) # elif len(args.permute_dataset) > 0: # # Permute, load single attack # if not Model.model_supported(args.model_name, args.dataset): # raise Exception( # f'Model {args.model_name} does not support {args.dataset}! ' # f'Check method Model.model_supported for the valid combinations.') # # attack = load_attacks()[0] # amount_eval = 3 # amount_select = 80 # from itertools import combinations # import random # total_combinations = list(combinations(set(args.permute_dataset), amount_eval)) # indices = sorted(random.sample(range(len(total_combinations)), amount_select)) # logger.info(f"Running {len(total_combinations)} combinations!") # for i, p in enumerate([total_combinations[i] for i in indices]): # train = list(set(args.permute_dataset) - set(p)) # eval = list(p) # attack['backdoor']['train'] = train # attack['backdoor']['test'] = eval # config['attack'] = attack # config['attack_type'] = Attack.UNTARGETED.value \ # if attack['objective']['name'] == "UntargetedAttack" else Attack.BACKDOOR.value # # logger.info(f"Running backdoor with samples {eval} {train}") # # models = [load_model() for i in range(args.workers)] # # server_model = FederatedAveraging(config, models, f"attack-{i}") # server_model.init() # server_model.fit() # else: # if not Model.model_supported(args.model_name, args.dataset): # raise Exception( # f'Model {args.model_name} does not support {args.dataset}! ' # f'Check method Model.model_supported for the valid combinations.') # # for i, attack in enumerate(load_attacks()): # config['attack'] = attack # config['attack_type'] = Attack.UNTARGETED.value \ # if attack['objective']['name'] == "UntargetedAttack" else Attack.BACKDOOR.value # # logger.info(f"Running attack objective {config['attack_type']}" # f" (evasion: {attack['evasion']['name'] if 'evasion' in attack else None})") # # models = [load_model() for i in range(args.workers)] # # server_model = FederatedAveraging(config, models, f"attack-{i}") # server_model.init() # server_model.fit() def limit_gpu_mem(): limit_mb = config.environment.limit_tf_gpu_mem_mb if limit_mb is None: return gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: # Restrict TensorFlow to only allocate 1GB of memory on the first GPU try: tf.config.experimental.set_virtual_device_configuration( gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=limit_mb)]) # Notice here logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs") except RuntimeError as e: # Virtual devices must be set before GPUs have been initialized print(e) if __name__ == '__main__': config: Config config, args = get_config() np.random.seed(config.environment.seed) tf.random.set_seed(config.environment.seed) main()
5,117
38.068702
118
py
fl-analysis
fl-analysis-master/src/federated_averaging.py
from __future__ import absolute_import, division, print_function, unicode_literals import json import os import time import logging from copy import deepcopy from pathlib import Path from threading import Thread from typing import List, Any import numpy as np import tensorflow as tf # from src.torch_compat.data_holder import DataHolder tf.get_logger().setLevel('DEBUG') import src.config as config from src.attack_dataset_config import AttackDatasetConfig from src.aggregation import aggregators from src.data import data_loader from src.client_attacks import Attack from src.client import Client from src.util import log_data, create_dropout_mask, aggregate_weights_masked, flatten from src.data.tf_data import Dataset, ImageGeneratorDataset, GeneratorDataset, PixelPatternDataset from src.tf_model import Model from src.data.tf_data_global import IIDGlobalDataset, NonIIDGlobalDataset, DirichletDistributionDivider class FederatedAveraging: """Implementation of federated averaging algorithm.""" client_objs: List[Client] federated_dropout: config.definitions.FederatedDropout def __init__(self, config, models, config_path): """ :type config: config_cli.Config """ self.config = config self.num_clients = config.environment.num_clients self.num_selected_clients = config.environment.num_selected_clients self.num_malicious_clients = config.environment.num_malicious_clients self.attack_frequency = config.environment.attack_frequency self.attack_type = Attack(config.client.malicious.attack_type) \ if config.client.malicious is not None else None self.num_rounds = config.server.num_rounds self.batch_size = config.client.benign_training.batch_size self.federated_dropout = config.server.federated_dropout self.attack_dataset = AttackDatasetConfig(**self.config.client.malicious.backdoor) \ if self.config.client.malicious is not None and self.config.client.malicious.backdoor is not None else None self.print_every = config.environment.print_every self.model_name = config.client.model_name self.experiment_name = config.environment.experiment_name self.model = models[0] # use first for me self.client_models = models self.global_weights = self.model.get_weights() if config.environment.use_config_dir: self.experiment_dir = os.path.dirname(config_path) self.experiment_root_dir = os.path.dirname(self.experiment_dir) else: self.experiment_root_dir = os.path.join(os.getcwd(), 'experiments') self.experiment_dir = os.path.join(self.experiment_root_dir, self.experiment_name) self.client_updates_dir = os.path.join(self.experiment_dir, 'updates') self.global_model_dir = os.path.join(self.experiment_dir, 'models') self.norms_dir = os.path.join(self.experiment_dir, 'norms') # self.clients_data = [] self.malicious_clients = np.zeros(self.num_clients, dtype=bool) if self.num_malicious_clients > 0: if config.environment.malicious_client_indices is not None: malicious_indices = config.environment.malicious_client_indices else: malicious_indices = np.random.choice(self.num_clients, self.num_malicious_clients, replace=False) assert len(malicious_indices) == self.num_malicious_clients, \ "Malicious indices must equal total number of malicious clients!" self.malicious_clients[malicious_indices] = True self.global_dataset = self.build_dataset() # self.malicious_clients[np.random.choice(self.num_clients, self.num_malicious_clients, replace=False)] = True self.client_objs = [] self.client_model = None self.client_config = {} self.writer = None self.keep_history = config.environment.save_history self.parameters_history = [] if self.keep_history else None self.previous_round_weights = None # Holder self.test_accuracy = tf.keras.metrics.Mean(name='test_accuracy') self.test_loss = tf.keras.metrics.Mean(name='test_loss') self.aggregator = aggregators.build_aggregator(config) def _init_log_directories(self): """Initializes directories in which log files are stored""" if not os.path.isdir(self.experiment_root_dir): os.mkdir(self.experiment_root_dir) if not os.path.isdir(self.experiment_dir): os.mkdir(self.experiment_dir) if not os.path.isdir(self.client_updates_dir): os.mkdir(self.client_updates_dir) if not os.path.isdir(self.global_model_dir): os.mkdir(self.global_model_dir) if (self.config.environment.save_norms or self.config.environment.save_weight_outside_bound) and not os.path.isdir(self.norms_dir): os.mkdir(self.norms_dir) # remove everything for this directory, if we do not have set directory if not self.config.environment.use_config_dir: for filename in Path(self.experiment_dir).glob('**/*'): if not os.path.isdir(str(filename)): os.remove(str(filename)) # with open(os.path.join(self.experiment_dir, 'config.json'), 'w') as fp: # self.config.to_yaml() from src.custom_summary_writer import CustomSummaryWriter self.writer = CustomSummaryWriter(self.experiment_dir) def init(self): """ Loads data, creates clients and client configuration.""" self._init_log_directories() # self.client_config = { # 'attack': self.config['attack'], # 'attack_type': self.attack_type, # 'batch_size': self.config['batch_size'], # 'untargeted_after_training': self.config['untargeted_after_training'], # 'targeted_deterministic_attack_objective': self.config['targeted_deterministic_attack_objective'], # 'targeted_attack_objective': self.config['targeted_attack_objective'], # 'targeted_attack_benign_first': self.config['targeted_attack_benign_first'], # 'scale_attack': self.config['scale_attack'], # 'scale_attack_weight': self.config['scale_attack_weight'], # 'aggregator': self.config['aggregator'], # 'trimmed_mean_beta': self.config['trimmed_mean_beta'], # 'num_epochs': self.config['num_epochs'], # 'optimizer': self.config['optimizer'], # 'learning_rate': self.config['learning_rate'], # 'lr_decay': self.config['lr_decay'], # 'decay_steps': self.config['decay_steps'], # 'decay_rate': self.config['decay_rate'], # 'decay_boundaries': self.config['decay_boundaries'], # 'decay_values': self.config['decay_values'], # 'mal_learning_rate': self.config['mal_learning_rate'], # 'mal_decay_steps': self.config['mal_decay_steps'], # 'mal_decay_rate': self.config['mal_decay_rate'], # 'poison_samples': self.config['poison_samples'], # 'mal_num_batch': self.config['mal_num_batch'], # 'mal_step_learning_rate': self.config['mal_step_learning_rate'], # 'mal_num_epochs': self.config['mal_num_epochs'], # 'mal_num_epochs_max': self.config['mal_num_epochs_max'], # 'mal_target_loss': self.config['mal_target_loss'], # 'model_name': self.config['model_name'], # 'clip': self.config['clip'], # 'clip_probability': self.config['clip_probability'], # 'clip_l2': self.config['clip_l2'], # 'clip_layers': self.config['clip_layers'], # 'backdoor_stealth': self.config['backdoor_stealth'], # 'estimate_other_updates': self.config['estimate_other_updates'], # 'attack_after': self.config['attack_after'], # 'attack_stop_after': self.config['attack_stop_after'], # 'contamination_model': self.config['contamination_model'], # 'contamination_rate': self.config['contamination_rate'], # 'gaussian_noise': self.config['gaussian_noise'], # 'pgd': self.config['pgd'], # 'pgd_constraint': self.config['pgd_constraint'], # 'pgd_clip_frequency': self.config['pgd_clip_frequency'], # 'pgd_adaptive': self.config['pgd_adaptive'], # 'weight_regularization_alpha': self.config['weight_regularization_alpha'], # 'quantization': self.config['quantization'], # 'q_bits': self.config['q_bits'], # 'q_frac': self.config['q_frac'], # 'optimized_training': self.config['optimized_training'] # } self.client_config = self.config.client self.build_clients(self.malicious_clients) def build_clients(self, mal_clients): if self.attack_type == Attack.BACKDOOR: for bid in range(self.num_clients): x, y = self.global_dataset.get_dataset_for_client(bid) if mal_clients[bid] and self.config.environment.attacker_full_dataset: x, y = self.global_dataset.get_full_dataset(x.shape[0] * 20) if mal_clients[bid]: ds = self.get_local_dataset(self.attack_dataset.augment_data, self.attack_dataset, x, y, batch_size=self.batch_size) else: ds = self.get_local_dataset(self.config.dataset.augment_data, self.attack_dataset, x, y, batch_size=self.batch_size) # dataset = self.global_dataset.get_dataset_for_client(bid) # ds = GeneratorDataset(dataset, self.batch_size) if mal_clients[bid]: if self.attack_dataset.type != "pixel_pattern": print(f"Replacing value {self.attack_dataset.type}") # This is very ugly, but we do not want to assign pixel pattern as it uses # training data of the client... ds.x_aux, ds.y_aux, ds.mal_aux_labels = self.global_dataset.x_aux_train, \ self.global_dataset.y_aux_train, \ self.global_dataset.mal_aux_labels_train ds.x_aux_test, ds.mal_aux_labels_test = self.global_dataset.x_aux_test, \ self.global_dataset.mal_aux_labels_test self.client_objs.append(Client(bid, self.client_config, ds, mal_clients[bid])) else: for bid in range(self.num_clients): x, y = self.global_dataset.get_dataset_for_client(bid) if mal_clients[bid] and self.config.environment.attacker_full_dataset: x, y = self.global_dataset.get_full_dataset(x.shape[0] * 20) ds = self.get_local_dataset(self.config.dataset.augment_data, self.attack_dataset, x, y, batch_size=self.batch_size) self.client_objs.append(Client(bid, self.client_config, ds, mal_clients[bid])) @staticmethod def get_local_dataset(augment_data, attack_config, x, y, batch_size): if attack_config is not None and attack_config.type == 'pixel_pattern': return PixelPatternDataset(x, y, attack_config.target_label, batch_size=batch_size) if augment_data: return ImageGeneratorDataset(x, y, batch_size=batch_size) else: return Dataset(x, y, batch_size=batch_size) def build_dataset(self): return data_loader.load_global_dataset(self.config, self.malicious_clients, self.attack_dataset) @staticmethod def compute_updates(prev_weights, new_weights): """Compute difference between two model weights. Args: prev_weights (list): Parameters from previous iteration. new_weights (list): New weights. Returns: list: List of gradients. """ return [new_weights[i] - prev_weights[i] for i in range(len(prev_weights))] def save_client_updates(self, client_id, malicious, round, new_weights, prev_global_model_weights): """Saves client updates into `self.client_updates_dir` directory.""" if type(malicious) is not str: malicious = 'm' if malicious else 'b' delta_weights = self.compute_updates(prev_global_model_weights, new_weights) file_name = '%i_%s_%i' % (client_id, malicious, round) outfile = os.path.join(self.client_updates_dir, file_name) np.save(outfile, delta_weights) def _create_weights_list(self, selected_clients): """Creates dictionary (client weights for each selected client). Additionally, it sets dropout masks if federated_dropout is < 1.0. Args: selected_clients (np.ndarray): Randomly selected clients without replacement. Returns: dict: Mappings of client id -> model parameters. """ if self.federated_dropout is None: return None, {i: self.model.get_weights() for i in selected_clients} # create dropout mask for each client if self.federated_dropout.nonoverlap: client_dropout_mask = create_dropout_mask(self.model, self.federated_dropout.rate, self.federated_dropout.all_parameters, n_clients=len(selected_clients)) else: client_dropout_mask = [] for _ in selected_clients: client_dropout_mask.append(create_dropout_mask(self.model, self.federated_dropout.rate, self.federated_dropout.all_parameters)[0]) # apply dropout mask weights_list = {} for i, dropout_mask in enumerate(client_dropout_mask): self.client_objs[selected_clients[i]].set_dropout_mask(dropout_mask) model_weights = deepcopy(self.model.get_weights()) if not self.federated_dropout.randommask: for l in range(len(model_weights)): model_weights[l] = model_weights[l]*dropout_mask[l] weights_list[selected_clients[i]] = model_weights return client_dropout_mask, weights_list def fit(self): """Trains the global model.""" # central_optimizer = Model.create_optimizer(self.config['optimizer'], self.config['learning_rate'], # self.config['decay_steps'], self.config['decay_rate']) # REMOVE THIS accuracies, rounds, adv_success_list = [], [], [] # loss_object = tf.keras.losses.SparseCategoricalCrossentropy( # from_logits=False) # Our model has a softmax layer! # self.model.compile( # loss=loss_object, # optimizer=tf.keras.optimizers.Adam(0.001), # metrics=['accuracy'] # ) # from src.torch_compat.anticipate import convert_model, evaluate_model # torch_model = convert_model(self.model) # evaluate_model(torch_model, self.global_dataset, # self.config.client.benign_training.batch_size, self.config.server.num_test_batches) logging.info("Starting training...") test_accuracy, adv_success, test_loss = self.evaluate() print('round=', 0, '\ttest_accuracy=', test_accuracy, '\tadv_success=', adv_success, '\ttest_loss=', test_loss, flush=True) import os import psutil for round in range(1, self.num_rounds + 1): process = psutil.Process(os.getpid()) logging.debug("Memory info: " + str(process.memory_info().rss)) # in bytes start_time = time.time() if self.attack_frequency is None: selected_clients = np.random.choice(self.num_clients, self.num_selected_clients, replace=False) else: indexes = np.array([[i, self.client_objs[i].malicious] for i in range(len(self.client_objs))]) np.random.shuffle(indexes) assert len(indexes[indexes[:, 1] == True]) > 0, "There are 0 malicious attackers." if round % (1 / self.attack_frequency) == 0: num_malicious_selected = self.config.environment.num_selected_malicious_clients or self.num_malicious_clients honest = indexes[indexes[:, 1] == False][:self.num_selected_clients - num_malicious_selected, 0] malicious = indexes[indexes[:, 1] == True][0:num_malicious_selected][:, 0] selected_clients = np.concatenate([malicious, honest]) else: honest = indexes[indexes[:, 1] == False][:self.num_selected_clients, 0] selected_clients = honest assert len(selected_clients) == self.num_selected_clients, "There must be enough non-malicious clients to select." client_dropout_masks, weights_list = self._create_weights_list(selected_clients) # If attacker has full knowledge of a round intermediate_benign_client_weights = [] if self.config.environment.attacker_full_knowledge else None ################# # TRAINING LOOP # ################# for i in (c for c in selected_clients if not self.client_objs[c].malicious): # Benign # logging.debug(f"Client {i}: Train") self.client_objs[i].last_global_weights_server = self.previous_round_weights self.client_objs[i].set_weights(weights_list[i]) # logging.debug(f"Client {i}: Set weights") self.client_objs[i].set_model(self.model) # logging.debug(f"Client {i}: Set model") self.client_objs[i].train(round) # logging.debug(f"Client {i}: Train") self.client_objs[i].set_model(None) if self.config.environment.attacker_full_knowledge: intermediate_benign_client_weights.append( FederatedAveraging.compute_updates(self.client_objs[i].weights, weights_list[i]) ) single_malicious_update = None for i in (c for c in selected_clients if self.client_objs[c].malicious): # Malicious if self.config.client.malicious.multi_attacker_scale_divide and single_malicious_update is not None: self.client_objs[i].set_weights(single_malicious_update) else: self.client_objs[i].last_global_weights_server = self.previous_round_weights self.client_objs[i].set_weights(weights_list[i]) self.client_objs[i].set_model(self.model) self.client_objs[i].set_benign_updates_this_round(intermediate_benign_client_weights) self.client_objs[i].train(round) self.client_objs[i].set_model(None) if self.config.client.malicious.multi_attacker_scale_divide: print("Setting multi attacker") single_malicious_update = self.client_objs[i].weights if self.config.environment.save_updates: for i in selected_clients: self.save_client_updates(self.client_objs[i].id, self.client_objs[i].malicious, round, self.client_objs[i].weights, weights_list[i]) num_adversaries = np.count_nonzero([self.malicious_clients[i] for i in selected_clients]) selected_clients_list = [self.client_objs[i] for i in selected_clients] if client_dropout_masks is not None: # Federated Dropout weights = aggregate_weights_masked(self.global_weights, self.config.server.global_learning_rate, self.num_clients, self.federated_dropout.rate, client_dropout_masks, [client.weights for client in selected_clients_list]) # weights = self.aggregate_weights([client.weights for client in selected_clients_list]) elif self.config.environment.ignore_malicious_update: # Ignore malicious updates temp_weights = [client.weights for client in selected_clients_list if not client.malicious] weights = self.aggregator.aggregate(self.global_weights, temp_weights) else: # weights = selected_clients_list[0].weights # just take one, malicious temp_weights = [client.weights for client in selected_clients_list] if self.config.client.clip is not None and self.config.client.clip.type == "median_l2": # Apply dynamic norm bound temp_weights = self.apply_dynamic_clipping(temp_weights) weights = self.aggregator.aggregate(self.global_weights, temp_weights) if self.config.server.gaussian_noise > 0.0: logging.debug(f"Adding noise to aggregated model {self.config.server.gaussian_noise}") total_norm = tf.norm(tf.concat([tf.reshape(weights[i], [-1]) for i in range(len(weights))], axis=0)) print(f"Global weight norm: {total_norm}") for i, layer in enumerate(weights): noise = self.noise_with_layer(layer) any_noise_nan = np.isnan(noise).any() any_layer_nan = np.isnan(layer).any() import sys if any_noise_nan: print("Noise is NaN!") np.set_printoptions(threshold=sys.maxsize) print(noise) if any_layer_nan: print(f"Layer {i} is NaN1") np.set_printoptions(threshold=sys.maxsize) print(self.global_weights[i]) # print(temp_weights[i]) print(layer) exit(1) sum = layer + noise if np.isnan(sum).any(): print("Sum is NaN!") np.set_printoptions(threshold=sys.maxsize) print(sum) weights[i] = sum # weights = [layer + self.noise_with_layer(layer) for layer in weights] if self.keep_history: self.parameters_history.append(deepcopy(weights)) if round % self.print_every == 0: self.previous_round_weights = self.model.get_weights() self.model.set_weights(weights) if Model.model_supports_weight_analysis(self.model_name): self.writer.analyze_weights(self.model, self.global_weights, selected_clients_list, round, self.parameters_history, self.config.environment.save_norms, self.config.environment.save_weight_distributions, self.config.environment.save_weight_outside_bound) self.global_weights = weights test_accuracy, adv_success, test_loss = self.evaluate() duration = time.time() - start_time self.writer.add_test_metric(test_accuracy, adv_success, round) self.writer.add_honest_train_loss(selected_clients_list, round) self.writer.add_adversary_count(num_adversaries, round) accuracies.append(test_accuracy) adv_success_list.append(adv_success) rounds.append(round) print('round=', round, '\ttest_accuracy=', test_accuracy, '\tadv_success=', adv_success, '\ttest_loss=', test_loss, '\tduration=', duration, flush=True) else: self.model.set_weights(weights) self.global_weights = weights if round in self.config.environment.save_model_at: self.save_model(round) for client in self.client_objs: client.weights = None # Release log_data(self.experiment_dir, rounds, accuracies, adv_success_list) self.log_hparams(rounds, accuracies, adv_success_list) def noise_with_layer(self, w): sigma = self.config.server.gaussian_noise gauss = np.random.normal(0, sigma, w.shape) gauss = gauss.reshape(w.shape).astype(w.dtype) return gauss @staticmethod def average_weights(client_weight_list): """Procedure for averaging client weights""" new_weights = deepcopy(client_weight_list[0]) # return new_weights for client in range(1, len(client_weight_list)): for layer in range(len(client_weight_list[client])): new_weights[layer] = new_weights[layer] + client_weight_list[client][layer] for layer in range(len(new_weights)): new_weights[layer] = new_weights[layer] / len(client_weight_list) return new_weights @tf.function def optimized_evaluate(self, batch_x, batch_y): prediction_tensor = self.model(batch_x, training=False) loss = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=False)(y_true=batch_y, y_pred=prediction_tensor) # tf.print(loss) prediction = prediction_tensor y_ = tf.cast(tf.argmax(prediction, axis=1), tf.uint8) test_accuracy_batch = tf.equal(y_, batch_y) self.test_accuracy(tf.reduce_mean(tf.cast(test_accuracy_batch, tf.float32))) self.test_loss(loss) if self.config.environment.print_batch_text: self.print_batch_text(batch_x, prediction) def print_batch_text(self, batch_x, prediction): select = 0 ALL_LETTERS = tf.constant(list("\n !\"&'(),-.0123456789:;>?ABCDEFGHIJKLMNOPQRSTUVWXYZ[]abcdefghijklmnopqrstuvwxyz}"), dtype=tf.string) # print(ALL_LETTERS) # tf.print(ALL_LETTERS) selected_letters = tf.strings.join( tf.unstack( tf.gather(ALL_LETTERS, indices=batch_x[select]))) y_ = tf.cast(tf.argmax(prediction, axis=1), tf.int32) y_letter = tf.gather(ALL_LETTERS, y_[select]) tf.print(selected_letters, y_letter) def evaluate(self): """Evaluates model performances; accuracy on test set and adversarial success. Returns: tuple of two floats: test accuracy, adversarial success """ # return 0, 0 # Batched because of memory issues # test_accuracies = [] # predictions = [] # loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False) self.test_accuracy.reset_states() self.test_loss.reset_states() for batch_x, batch_y in self.global_dataset.get_test_batch( self.config.client.benign_training.batch_size, self.config.server.num_test_batches): self.optimized_evaluate(batch_x, batch_y) test_accuracy = self.test_accuracy.result().numpy() test_loss = self.test_loss.result().numpy() # print(test_accuracy.nump) # self.test_get_correct_indices() # calculating adv success if self.num_malicious_clients == 0: adv_success = 0 elif self.attack_type == Attack.UNTARGETED or self.attack_type == Attack.DEVIATE_MAX_NORM: adv_success = 1 - test_accuracy elif self.attack_type == Attack.BACKDOOR: all_adv_success = [] batches = 0 attack_config: AttackDatasetConfig = self.attack_dataset amount_images = max(attack_config.augment_times, self.global_dataset.x_aux_test.shape[0]) batch_size = min(self.global_dataset.x_aux_test.shape[0], self.config.client.benign_training.batch_size) total_batches = int(amount_images / batch_size) # handle case ? for batch_x, batch_y in self.global_dataset.get_aux_generator(self.config.client.benign_training.batch_size, attack_config.augment_times, attack_config.augment_data, attack_config.type, attack_config.max_test_batches): preds = self.model(batch_x, training=False).numpy().argmax(axis=1) pred_inds = preds == batch_y if self.config.environment.print_backdoor_eval: logging.info(f"Backdoor predictions: {preds}") # This may break on large test sets # adv_success = np.mean(pred_inds) all_adv_success.append(pred_inds) batches += 1 if batches > total_batches: break # manually adv_success = np.mean(np.concatenate(all_adv_success)) else: raise Exception('Type not supported') return test_accuracy, adv_success, test_loss def test_get_correct_indices(self): """Debug helper""" from src.backdoor.edge_case_attack import EuropeanSevenEdgeCase # (batch_x, batch_y), (_, _) = EuropeanSevenEdgeCase().load() (_, _), (batch_x, batch_y) = EuropeanSevenEdgeCase().load() as_7s = np.repeat(7, batch_y.shape) preds = self.model(batch_x, training=False).numpy().argmax(axis=1) pred_inds = preds == as_7s print(np.where(preds == as_7s)) # print(f"Correct: {self.global_dataset.y_aux_test[pred_inds]} -> {preds[pred_inds]}") def save_model(self, round): path = os.path.join(self.global_model_dir, f'model_{round}.h5') print(f"Saving model at {path}") self.model.save(path) def write_hparams(self, hparams, metrics): self.writer.write_hparams(hparams, metrics) def log_hparams(self, rounds, accuracies, adv_successes): if self.config.hyperparameters is None: return # for now only log last round's values METRIC_ACCURACY = 'evaluation/test_accuracy' METRIC_ADV_SUCCESS = 'evaluation/adv_success' hparams_dict = flatten(self.config.hyperparameters.args) metrics = { METRIC_ACCURACY: accuracies[-1], METRIC_ADV_SUCCESS: adv_successes[-1] } self.writer.write_hparams(hparams_dict, metrics) def apply_dynamic_clipping(self, weights): if self.config.client.clip.type == "median_l2": client_delta_weights = [[client_weights[i] - self.global_weights[i] for i in range(len(client_weights))] \ for client_weights in weights] l2_norms_per_client = [tf.norm(tf.concat([tf.reshape(delta_weights[i], [-1]) \ for i in range(len(delta_weights))], axis=0)) \ for delta_weights in client_delta_weights] # for norm calculation median = np.median(l2_norms_per_client) median_factor = tf.constant(self.config.client.clip.value) bound = median * median_factor print(f"Effective bound: {bound}") multipliers_per_client = [min((bound / norm).numpy(), 1.0) for norm in l2_norms_per_client] # delta_multiplied = [delta_weights[i] * multiply if i in clip_layers else delta_weights[i] for i in # range(len(delta_weights))] delta_multiplied = [[client_weights[i] * multiplier for i in range(len(client_weights))] \ for client_weights, multiplier in zip(client_delta_weights, multipliers_per_client)] # Add back to global model to fit in other calculations return [[self.global_weights[i] + client_weights[i] for i in range(len(client_weights))] \ for client_weights in delta_multiplied] else: return weights
32,412
47.595202
235
py
fl-analysis
fl-analysis-master/src/loss.py
import tensorflow as tf import numpy as np # Define custom loss def regularized_loss(local_weights, global_weights, alpha): # Create a loss function that adds the MSE loss to the mean of all squared activations of a specific layer def loss(y_true, y_pred): cross_entropy_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)(y_true, y_pred) weight_norm = 0 layer_i = 0 for local_weight_layer in local_weights: w = local_weight_layer.weights if len(w) > 1: global_layer_tensor_w = tf.convert_to_tensor(global_weights[layer_i]) # global_layer_tensor_b = tf.convert_to_tensor(global_weights[layer_i + 1]) delta_weight = w[0] - global_layer_tensor_w weight_norm += tf.nn.l2_loss(delta_weight) layer_i += len(w) # print(f"ent {cross_entropy_loss}, w {weight_norm}") return alpha * cross_entropy_loss + ((1 - alpha) * tf.math.maximum(0, weight_norm)) # Return a function return loss
1,069
37.214286
110
py
fl-analysis
fl-analysis-master/src/custom_summary_writer.py
import numpy as np from tensorboardX import SummaryWriter from os.path import join from numpy.linalg import norm from tensorflow.python.keras.layers.convolutional import Conv2D, ZeroPadding2D, DepthwiseConv2D from tensorflow.python.keras.layers.core import Dense from tensorflow.python.keras.layers.embeddings import Embedding from tensorflow.python.keras.layers.recurrent_v2 import LSTM from scipy.linalg import eigh from src.federated_averaging import FederatedAveraging from src.util import power_iteration import pandas as pd class CustomSummaryWriter: def __init__(self, experiment_dir): self.events_dir = join(experiment_dir, 'events') self.norms_dir = join(experiment_dir, 'norms') self.writer = SummaryWriter(self.events_dir) self.params_outside_norm_df = pd.DataFrame(columns=['round', 'clients']) def add_test_metric(self, test_accuracy, adv_success, step): """Adds performance metrics to tensorboard log files. Args: test_accuracy (float): Accuracy on the unseen set. adv_success (float): Adversarial success. step (int): Global step (round). """ self.writer.add_scalar(f'evaluation/test_accuracy', test_accuracy, step) self.writer.add_scalar(f'evaluation/adv_success', adv_success, step) self.writer.flush() def add_honest_train_loss(self, selected_clients_list, step): train_loss = [] train_accuracy = [] for client in selected_clients_list: if not client.malicious: train_loss.append(client.train_loss.result()) train_accuracy.append(client.train_accuracy.result()) total_train_loss = np.mean(train_loss) total_train_accuracy = np.mean(train_accuracy) self.writer.add_scalar(f'evaluation/honest_train_accuracy', total_train_loss, step) self.writer.add_scalar(f'evaluation/mean_train_accuracy', total_train_accuracy, step) def add_adversary_count(self, num_adversaries_active, step): self.writer.add_scalar(f'adversary/count', num_adversaries_active, step) def analyze_weights(self, model, prev_weights, selected_clients, step, parameters_history=None, save_norms=False, save_histograms=False, save_weight_outside_bound=None): """Analyzes model updates. Args: model (tf model): Current global model. selected_clients (list): List of Client objects. step (int): Global step (round). parameters_history (list): (Optional) List of weights from previous rounds. """ # prev_weights = model.get_weights() benign_updates, mal_updates = [], [] for client in selected_clients: if client.malicious: mal_updates.append(FederatedAveraging.compute_updates(prev_weights, client.weights)) else: benign_updates.append(FederatedAveraging.compute_updates(prev_weights, client.weights)) benign_update = FederatedAveraging.average_weights(benign_updates) if benign_updates != [] else [None]*len(prev_weights) mal_update = FederatedAveraging.average_weights(mal_updates) if mal_updates != [] else [None]*len(prev_weights) layer_names = self.flatten([self.get_layer_name(layer) for layer in model.layers]) layer_names = [i for i in layer_names if i is not None] # layer_names = ['Conv2D' if type(layer) == Conv2D else 'Dense' # for layer in model.layers if type(layer) in [Conv2D, Dense]] if len(layer_names) == 0: layer_names = ['Theta'] printable_weights_index = [i for i in range(len(prev_weights)) if len(prev_weights[i].shape) > 1] # skip biases for i, layer in enumerate(printable_weights_index): for update, label in zip([mal_update[layer], benign_update[layer]], ['mal', 'benign']): if update is None: continue suffix = f'norm_{label}_client_updates/l{layer}_{layer_names[i]}' # l2, l1 norm self.writer.add_scalar(f'l2_{suffix}', norm(update, axis=-1).mean(), step) self.writer.add_scalar(f'l1_{suffix}', norm(update, ord=1, axis=-1).mean(), step) # principle eig value if parameters_history is not None and len(parameters_history) > 1: layer_history = [parameters[layer] for parameters in parameters_history] princ_eig = self._principle_eigen_value(layer_history, layer_names[i]) self.writer.add_scalar(f'princ_eig_{suffix}', princ_eig, step) if save_histograms and len(mal_updates) > 0: mal_merged = np.concatenate([update[layer] for update in mal_updates]) self.writer.add_histogram(f'histogram_mal/l{layer}_{layer_names[i]}', mal_merged.reshape(-1), step) if save_histograms and len(benign_updates) > 0: ben_merged = np.concatenate([update[layer] for update in benign_updates]) self.writer.add_histogram(f'histogram_ben/l{layer}_{layer_names[i]}', ben_merged.reshape(-1), step) ben_for_layer = [update[layer] for update in benign_updates] means = [] stds = [] for x in ben_for_layer: n, bins = np.histogram(x) mids = 0.5 * (bins[1:] + bins[:-1]) probs = n / np.sum(n) mean = np.sum(probs * mids) sd = np.sqrt(np.sum(probs * (mids - mean) ** 2)) means.append(mean) stds.append(sd) self.writer.add_histogram(f'histogram_ben_mean/l{layer}_{layer_names[i]}', np.array(means), step) self.writer.add_histogram(f'histogram_ben_std/l{layer}_{layer_names[i]}', np.array(stds), step) benign_norms_l2 = [norm(np.concatenate([np.reshape(b, [-1]) for b in bs], axis=0), axis=-1) for bs in benign_updates] benign_norms_l1 = [norm(np.concatenate([np.reshape(b, [-1]) for b in bs], axis=0), axis=-1, ord=1) for bs in benign_updates] self.writer.add_scalar(f'l2_total/benign', np.mean(benign_norms_l2), step) self.writer.add_scalar(f'l1_total/benign', np.mean(benign_norms_l1), step) if len(mal_updates) > 0: mal_norms_l2 = [norm(np.concatenate([np.reshape(b, [-1]) for b in bs], axis=0), axis=-1) for bs in mal_updates] mal_norms_l1 = [norm(np.concatenate([np.reshape(b, [-1]) for b in bs], axis=0), axis=-1, ord=1) for bs in mal_updates] self.writer.add_scalar(f'l2_total/mal', np.mean(mal_norms_l2), step) self.writer.add_scalar(f'l1_total/mal', np.mean(mal_norms_l1), step) if save_norms: self.save_norms_log(step, np.array([benign_norms_l2, benign_norms_l1, mal_norms_l2, mal_norms_l1])) elif save_norms: self.save_norms_log(step, np.array([benign_norms_l2, benign_norms_l1])) if save_weight_outside_bound is not None: # compute weights outside l_inf bound l_inf_norm = save_weight_outside_bound benign_updates_flat = [np.concatenate([np.reshape(l, [-1]) for l in u], axis=0) for u in benign_updates] malicious_updates_flat = [np.concatenate([np.reshape(l, [-1]) for l in u], axis=0) for u in mal_updates] counts_per_update_benign = [np.sum(u > l_inf_norm) + np.sum(u < -l_inf_norm) for u in benign_updates_flat] counts_per_update_malicious = [np.sum(u > l_inf_norm) + np.sum(u < -l_inf_norm) for u in malicious_updates_flat] self.save_weight_outside_norm(l_inf_norm, step, counts_per_update_benign + counts_per_update_malicious) self.writer.flush() def save_norms_log(self, step, array): np.save(join(self.norms_dir, f"round_{step}"), array) def save_weight_outside_norm(self, linf_norm, step, array): """ @param array: should contain the counts of the norms outside the bound @return: """ self.params_outside_norm_df = self.params_outside_norm_df.append({"round": step, "clients": array}, ignore_index=True) self.params_outside_norm_df.to_csv(join(self.norms_dir, f'params_outside_bound_{linf_norm}.csv')) def write_hparams(self, hparams, metrics): self.writer.add_hparams(hparams, metrics) @staticmethod def _principle_eigen_value(layer_weights, layer_type=None): """Computes principle eigenvalue. Args: layer_weights (list): List of np.ndarray parameters. layer_type (str): List of layer names. Returns: float64: Principle eigenvalue. """ layer_weights = np.stack([layer_params.reshape(-1) for layer_params in layer_weights], axis=0) # NxM cov_matrix = np.cov(layer_weights.T) # MxM # _w, _v = eigh(cov_matrix) # princ w[-1] w, v = power_iteration(cov_matrix) return w # largest eigenvalue def get_layer_name(self, layer): layers = { Conv2D: 'Conv2D', Dense: 'Dense', ZeroPadding2D: 'ZeroPadding2D', DepthwiseConv2D: 'DepthwiseConv2D', Embedding: 'Embedding', LSTM: ['LSTM', 'LSTM'], # two sets of weights } if type(layer) in layers.keys(): return layers[type(layer)] return None def flatten(self, list_to_flatten): output = [] for l in list_to_flatten: if isinstance(l, list): for m in l: output.append(m) else: output.append(l) return output
9,803
47.534653
130
py
fl-analysis
fl-analysis-master/src/tf_model.py
import math import tensorflow as tf from src.model.modelc import build_modelc from src.model.lenet import build_lenet5 from src.model.resnet import resnet_v2, resnet_v1 from src.model.stacked_lstm import build_stacked_lstm from src.model.test_model import build_test_model from src.subspace.builder.model_builders import build_model_mnist_fc, \ build_cnn_model_mnist_bhagoji, build_cnn_model_mnist_dev_conv, build_cnn_model_mnistcnn_conv, build_LeNet_cifar, \ build_cnn_model_cifar_allcnn, build_model_cifar_LeNet_fastfood from src.subspace.builder.resnet import build_LeNet_resnet, build_resnet_fastfood from tensorflow.keras.regularizers import l2 from src.model.mobilenet import mobilenetv2_cifar10 class Model: @staticmethod def create_model(model_name, intrinsic_dimension=None, regularization_rate=None, disable_bn=False): """Creates NN architecture based on a given model name Args: model_name (str): name of a model """ if model_name == 'mnist_cnn': do_fact = 0.3 model = tf.keras.Sequential([ tf.keras.layers.Conv2D(filters=64, kernel_size=2, padding='same', activation='relu', input_shape=(28, 28, 1), dtype=float), tf.keras.layers.MaxPooling2D(pool_size=2), # tf.keras.layers.Dropout(0.3), tf.keras.layers.Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'), tf.keras.layers.MaxPooling2D(pool_size=2), # tf.keras.layers.Dropout(0.3), tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), # tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(10, activation='softmax') ]) elif model_name == 'dev': regularizer = l2(regularization_rate) if regularization_rate is not None else None model = tf.keras.Sequential([ tf.keras.layers.Conv2D(8, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1), kernel_regularizer=regularizer, bias_regularizer=regularizer), tf.keras.layers.Conv2D(4, (3, 3), activation='relu', kernel_regularizer=regularizer, bias_regularizer=regularizer), tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), tf.keras.layers.Flatten(), tf.keras.layers.Dense(32, activation='relu', kernel_regularizer=regularizer, bias_regularizer=regularizer), tf.keras.layers.Dense(10, activation='softmax'), ]) elif model_name == 'bhagoji': model = tf.keras.Sequential([ tf.keras.layers.Conv2D(64, kernel_size=(5, 5), padding='valid', activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.Conv2D(64, (5, 5), activation='relu'), # tf.keras.layers.Dropout(0.25), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), # tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(10, activation='softmax') ]) elif model_name == 'lenet5_cifar': model = build_lenet5(input_shape=(32, 32, 3), l2_reg=regularization_rate) elif model_name == 'lenet5_mnist': model = build_lenet5(input_shape=(28, 28, 1), l2_reg=regularization_rate) model.summary() elif model_name == 'allcnn': model = build_modelc(l2_reg=regularization_rate) model.summary() elif model_name == 'allcnn_intrinsic': model = build_cnn_model_cifar_allcnn(vsize=intrinsic_dimension, weight_decay=regularization_rate) elif model_name == 'resnet18': model = resnet_v1(input_shape=(32, 32, 3), depth=20) model.summary() elif model_name == 'resnet32': model = resnet_v1(input_shape=(32, 32, 3), depth=32) elif model_name == 'resnet44': model = resnet_v1(input_shape=(32, 32, 3), depth=44) elif model_name == 'resnet56': model = resnet_v1(input_shape=(32, 32, 3), depth=56) model.summary() elif model_name == 'resnet110': model = resnet_v1(input_shape=(32, 32, 3), depth=110) elif model_name == 'resnet18_v2': model = resnet_v2(input_shape=(32, 32, 3), depth=20) elif model_name == 'resnet56_v2': model = resnet_v2(input_shape=(32, 32, 3), depth=56) model.summary() print("HI") elif model_name == 'mobilenet': model = mobilenetv2_cifar10() model.summary() elif model_name == 'dev_fc_intrinsic': model, _ = build_model_mnist_fc(vsize=intrinsic_dimension, width=100) elif model_name == 'bhagoji_intrinsic': model = build_cnn_model_mnist_bhagoji(vsize=intrinsic_dimension, proj_type='sparse') elif model_name == 'dev_intrinsic': # model = build_model_cifar_LeNet_fastfood(vsize=intrinsic_dimension) model = build_cnn_model_mnist_dev_conv(vsize=intrinsic_dimension, proj_type='sparse', weight_decay=regularization_rate) Model.normalize(model) elif model_name == 'mnistcnn_intrinsic': model = build_cnn_model_mnistcnn_conv(vsize=intrinsic_dimension, proj_type='sparse') elif model_name =='lenet5_intrinsic': # model = build_lenet_cifar_old(intrinsic_dimension) model = build_LeNet_cifar(vsize=intrinsic_dimension, proj_type='sparse', weight_decay=0.001) Model.normalize(model) elif model_name =='resnet18_intrinsic': # model = build_lenet_cifar_old(intrinsic_dimension) model = build_LeNet_resnet(20, vsize=intrinsic_dimension, proj_type='sparse', weight_decay=0.001, disable_bn=disable_bn) # model = build_resnet_fastfood(20, vsize=intrinsic_dimension, proj_type='sparse', weight_decay=0.001) Model.normalize(model) model.summary() elif model_name == 'stacked_lstm': model = build_stacked_lstm() model.summary() return model elif model_name == 'test_model': model = build_test_model() model.summary() else: raise Exception('model `%s` not supported' % model_name) return model @staticmethod def normalize(model, proj_type='sparse'): basis_matrices = [] normalizers = [] for layer in model.layers: try: basis_matrices.extend(layer.offset_creator.basis_matrices) except AttributeError: continue try: normalizers.extend(layer.offset_creator.basis_matrix_normalizers) except AttributeError: continue if proj_type == 'sparse': # Norm of overall basis matrix rows (num elements in each sum == total parameters in model) # bm_row_norms = tf.sqrt(tf.add_n([tf.sparse_reduce_sum(tf.square(bm), 1) for bm in basis_matrices])) # # Assign `normalizer` Variable to these row norms to achieve normalization of the basis matrix # # in the TF computational graph # rescale_basis_matrices = [tf.assign(var, tf.reshape(bm_row_norms, var.shape)) for var in normalizers] # _ = sess.run(rescale_basis_matrices) bm_row_norms = tf.sqrt(tf.add_n([tf.sparse.reduce_sum(tf.square(bm), 1) for bm in basis_matrices])) for var in normalizers: var.assign(tf.reshape(bm_row_norms, var.shape)) elif proj_type == 'dense': bm_sums = [tf.reduce_sum(tf.square(bm), 1) for bm in basis_matrices] divisor = tf.expand_dims(tf.sqrt(tf.add_n(bm_sums)), 1) rescale_basis_matrices = [tf.assign(var, var / divisor) for var in basis_matrices] _ = sess.run(rescale_basis_matrices) @staticmethod def model_supported(model_name, dataset_name): supported_types = { "mnist": ["mnist_cnn", "dev", "bhagoji", "dev_fc_intrinsic", "dev_intrinsic", "mnistcnn_intrinsic", "bhagoji_intrinsic", "lenet5_mnist"], "fmnist": ["mnist_cnn", "dev", "bhagoji", "dev_fc_intrinsic", "dev_intrinsic", "mnistcnn_intrinsic", "bhagoji_intrinsic", "lenet5_mnist"], "femnist": ["mnist_cnn", "dev", "bhagoji", "dev_fc_intrinsic", "dev_intrinsic", "mnistcnn_intrinsic", "bhagoji_intrinsic", "lenet5_mnist"], "cifar10": ["resnet18", "resnet32", "resnet44", "resnet56", "resnet110", "resnet18_v2", "resnet56_v2", "lenet5_cifar", "lenet5_intrinsic", "allcnn", "allcnn_intrinsic"] } return model_name in supported_types[dataset_name] @staticmethod def model_supports_weight_analysis(model_name): return model_name not in ["dev_intrinsic", "dev_fc_intrinsic", "bhagoji_intrinsic", "mnistcnn_intrinsic", "allcnn", "allcnn_intrinsic"] @staticmethod def create_optimizer(optimizer_name, learning_rate, decay, steps_per_round): """Creates optimizer based on given parameters Args: optimizer_name (str): name of the optimizer learning_rate (float|object): initial learning rate decay (src.config.definitions.LearningDecay|None): type of decay steps_per_round (int): number of optimizer steps per round Returns: keras optimizer """ if decay is not None: lr_schedule = Model.current_lr(learning_rate, decay.type, decay.decay_steps, decay.decay_rate, decay.decay_boundaries, decay.decay_values, decay.step_epochs, steps_per_round) else: lr_schedule = learning_rate if optimizer_name == 'Adam': return tf.keras.optimizers.Adam(lr_schedule) elif optimizer_name == 'SGD': return tf.keras.optimizers.SGD(lr_schedule, 0.9) raise Exception('Optimizer `%s` not supported.' % optimizer_name) @staticmethod def current_lr(learning_rate, decay_type, decay_steps, decay_rate, decay_boundaries, decay_values, steps_epoch, steps_per_batch): # lr = learning_rate * \ # math.pow(decay_rate, math.floor(epoch / decay_steps)) # lr = learning_rate * \ # tf.pow(decay_rate, tf.cast(tf.floor(epoch / decay_steps), dtype=tf.float32)) # lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( # learning_rate, # decay_steps=decay_steps, # decay_rate=decay_rate, # staircase=False) steps_multiplier = 1 if steps_epoch: steps_multiplier = steps_per_batch if decay_type == 'exponential': # exp lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( learning_rate, decay_steps=decay_steps * steps_multiplier, decay_rate=decay_rate, staircase=False) return lr_schedule elif decay_type == 'boundaries': values = [learning_rate * v for v in decay_values] boundaries = [boundary * steps_multiplier for boundary in decay_boundaries] lr_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay( boundaries, values) return lr_schedule else: return learning_rate # if epoch > 300 * 2: # learning_rate *= 1e-1 # if epoch > 250 * 2: # learning_rate *= 1e-1 # if epoch > 200 * 2: # learning_rate *= 1e-1 # print('Learning rate: ', lr) # return lr_schedule
11,849
48.170124
180
py
fl-analysis
fl-analysis-master/src/client.py
import itertools import random import logging from copy import deepcopy import numpy as np import tensorflow as tf from tensorflow.python.keras.callbacks import LearningRateScheduler import src.prob_clip as prob_clip from src.data.tf_data import Dataset from src.error import ConfigurationError from src.attack.attack import StealthAttack from src.client_attacks import Attack from src.data import image_augmentation from src.learning_rate_decay import StepDecay from src.loss import regularized_loss from src.tf_model import Model from tensorflow.python.keras.layers.convolutional import Conv2D from tensorflow.python.keras.layers.core import Dense class Client: dataset: Dataset def __init__(self, client_id, config, dataset, malicious): """ :type config: config_cli.definitions.ClientConfig """ self.id = client_id self.config = config self.dataset = dataset self.malicious = malicious self.attack_type = Attack(config.malicious.attack_type) \ if config.malicious is not None else None self.weights = None self.model = None # For memory optimization self.benign_updates_this_round = None # Attacker full knowledge self.global_trainable_weight = None self.last_global_weights = None self.last_update_weights = None self.last_global_weights_server = None # Helper to always assume client get latest global weights # print('num of params ', np.sum([np.prod(v.shape) for v in self.model.trainable_variables])) self._dropout_mask = None self.train_loss = tf.keras.metrics.Mean(name='train_loss') self.train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy') self.loss_object = None self.honest_optimizer = self.create_honest_optimizer() self.pgd_step_counter = 0 def set_dropout_mask(self, dropout_mask): self._dropout_mask = dropout_mask def _apply_dropout(self, grads): """Applies dropout if dropout mask is set. Args: grads (list): list of tensors that are modified inplace """ if self._dropout_mask is None: return for i in range(len(grads)): grads[i] = grads[i] * self._dropout_mask[i] def set_weights(self, weights): self.weights = weights def set_model(self, model): self.model = model def set_benign_updates_this_round(self, updates): """To establish whether the attacker has full knowledge this round""" self.benign_updates_this_round = updates def _compute_gradients_honest(self, tape, loss_value): grads = tape.gradient(loss_value, self.model.trainable_variables) self._apply_dropout(grads) return grads def _compute_gradients(self, tape, loss_value): grads = tape.gradient(loss_value, self.model.trainable_variables) self._apply_dropout(grads) return grads def apply_quantization(self, old_weights, new_weights): if self.config.quantization is None: return new_weights update = [new_weights[i] - old_weights[i] for i in range(len(old_weights))] quantization = self.config.quantization if quantization.type == 'deterministic': update = prob_clip.clip(update, quantization.bits, quantization.frac, False) elif quantization.type == 'probabilistic': update = prob_clip.clip(update, quantization.bits, quantization.frac, True) else: raise Exception('Selected quantization method does not exist!') return [old_weights[i] + update[i] for i in range(len(old_weights))] def perform_attack(self): try: attack_config = self.config.malicious from src import attack cls = getattr(attack, attack_config.objective["name"]) attack: StealthAttack = cls() attack.set_stealth_method(self.evasion_factory()) except Exception as e: raise ConfigurationError("Invalid attack configuration", e) args = attack_config.objective['args'].copy() args['loss_object'] = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False) args['optimizer'] = self.build_optimizer(args) args.pop('learning_rate', None) args.pop('reduce_lr', None) args.pop('attacker_full_dataset', None) malicious_weights = attack.generate(self.dataset, self.model, **args) return malicious_weights def build_optimizer(self, optimizer_config): # return elaborate optimizer, potentially with stepdecay opt = optimizer_config['optimizer'] lr = optimizer_config['learning_rate'] if 'learning_rate' in optimizer_config else None step_decay = optimizer_config['step_decay'] if 'step_decay' in optimizer_config else None if self.attack_type != Attack.UNTARGETED: # for untargeted attacks we dont have an eval set to measure success on _, adv_success = self.eval_aux_test(tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)) print(f"Adv success: {adv_success}") if 'reduce_lr' in optimizer_config and optimizer_config['reduce_lr']: if adv_success > 0.6: lr = lr / 100 elif adv_success > 0.2: lr = lr / 50 if opt == "Adam": if lr is not None: if step_decay is not None and step_decay: decay = StepDecay(lr, optimizer_config['num_epochs'] * optimizer_config['num_batch']) optimizer_config['step_decay'] = decay return tf.keras.optimizers.Adam(learning_rate=decay) return tf.keras.optimizers.Adam(learning_rate=lr) if opt == "SGD": if lr is not None: if step_decay is not None and step_decay: decay = StepDecay(lr, optimizer_config['num_epochs'] * optimizer_config['num_batch']) optimizer_config['step_decay'] = decay return tf.keras.optimizers.SGD(learning_rate=decay) return tf.keras.optimizers.SGD(learning_rate=lr) return tf.keras.optimizers.Adam() def evasion_factory(self): """ :rtype: EvasionMethod|None """ attack_config = self.config.malicious if attack_config.evasion is None: return None evasion_name = attack_config.evasion['name'] args = attack_config.evasion['args'] from src.attack import evasion cls = getattr(evasion, evasion_name) if evasion_name == 'NormBoundPGDEvasion': return cls(old_weights=self.weights, benign_updates=self.benign_updates_this_round, **args) elif evasion_name == 'NormBoundProbabilisticCheckingEvasion': return cls(old_weights=self.weights, benign_updates=self.benign_updates_this_round, **args) elif evasion_name == 'NeurotoxinEvasion': return cls(old_weights=self.weights, last_round_weights=self.last_global_weights_server, benign_updates=self.benign_updates_this_round, **args) elif evasion_name == 'TrimmedMeanEvasion': assert self.benign_updates_this_round is not None, "Only full knowledge attack is supported at this moment" return cls(benign_updates_this_round=self.benign_updates_this_round, **args) else: raise NotImplementedError(f"Evasion with name {evasion_name} not supported.") @tf.function def optimized_training(self, batch_x, batch_y): """Uses tf non-eager execution using graph""" self.unoptimized_benign_training(batch_x, batch_y) def unoptimized_benign_training(self, batch_x, batch_y): with tf.GradientTape() as tape: predictions = self.model(batch_x, training=True) loss_value = self.loss_object(y_true=batch_y, y_pred=predictions) reg = tf.reduce_sum(self.model.losses) total_loss = loss_value + reg grads = self._compute_gradients_honest(tape, total_loss) self.honest_optimizer.apply_gradients(zip(grads, self.model.trainable_weights)) self.train_loss(total_loss) self.train_accuracy(batch_y, predictions) def honest_training(self): """Performs local training""" self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=False) # Our model has a softmax layer! num_iters = 0 # tboard_callback = tf.keras.callbacks.TensorBoard(log_dir='logdir', # histogram_freq=1) if self.config.optimized_training: for i in range(self.config.benign_training.num_epochs): # tf.print(self.honest_optimizer._decayed_lr(tf.float32)) for batch_id, (batch_x, batch_y) in enumerate(self.dataset.get_data()): self.optimized_training(batch_x, batch_y) current_lr = self.honest_optimizer._decayed_lr(var_dtype=tf.float32) # print(f"Current lr: {current_lr}") if self.config.debug_client_training: print(f"Epoch {i}: Train loss={self.train_loss.result()}, acc={self.train_accuracy.result()}, lr={current_lr}", flush=True) self.train_loss.reset_states() self.train_accuracy.reset_states() else: self.honest_optimizer = self.create_honest_optimizer() for i in range(self.config.benign_training.num_epochs): for (batch_x, batch_y) in self.dataset.get_data(): # image_augmentation.debug(batch_x, batch_y) self.unoptimized_benign_training(batch_x, batch_y) self.honest_optimizer = None # release for memory reasons # Set last weights as we might act malicious next round if self.malicious: self.last_global_weights = self.weights # First round, don't estimate self.last_update_weights = self.model.get_weights() return self.model.get_weights() def deviate_max_norm_attack(self): """Builds byzantine attack that has worst case opposite of gradient""" # Currently only l_inf is supported assert self.benign_updates_this_round is not None, "Only full knowledge attack is supported at this moment" aggregation = self.config['aggregator'] if aggregation == "FedAvg": assert self.config["clip"] is not None # Full knowledge next_update = Client.average_weights(self.benign_updates_this_round) clip_value = self.config["clip"] new_weights_opposite_direction = [np.sign(layer) * -clip_value for layer in next_update] return new_weights_opposite_direction elif aggregation == "TrimmedMean": constant_b = 2.0 accumulator = [np.zeros([*layer.shape, len(self.benign_updates_this_round)], layer.dtype) for layer in self.benign_updates_this_round[0]] for client in range(0, len(self.benign_updates_this_round)): for layer in range(len(self.benign_updates_this_round[client])): accumulator[layer][..., client] = self.benign_updates_this_round[client][layer] # put the per-client layers in single np array # next_update_check = Client.average_weights(self.benign_updates_this_round) next_update = [np.mean(layer, -1) for layer in accumulator] layer_max = [np.max(layer, -1) for layer in accumulator] layer_min = [np.min(layer, -1) for layer in accumulator] directions = [np.sign(layer) for layer in next_update] new_weights = [] for signs, max, min in zip(directions, layer_max, layer_min): max_interval = np.where(max > 0, (max, max * constant_b), (max, max / constant_b)) min_interval = np.where(min > 0, (min / constant_b, min), (constant_b * min, min)) intervals = np.where(signs < 0, max_interval, min_interval) intervals = np.moveaxis(intervals, 0, -1) randomness = np.random.sample(intervals.shape[0:-1]) weights = (intervals[..., 1] - intervals[..., 0]) * randomness + intervals[..., 0] new_weights.append(weights) return new_weights else: raise NotImplementedError("Aggregation method not supported by this attack.") def add_noise(self, batch_x): if self.config.gaussian_noise is None: return batch_x sigma = self.config.gaussian_noise gauss = np.random.normal(0, sigma, batch_x.shape) gauss = gauss.reshape(batch_x.shape).astype(batch_x.dtype) noisy = np.clip(batch_x + gauss, a_min=0.0, a_max=1.0) return noisy # def contamination_attack(self, optimizer, loss_object): # """This attack modifies only epsilon*n neurons. # # Inspired by: Diakonikolas, Ilias, et al. "Sever: A robust meta-algorithm for stochastic optimization. ICML 2019 # # Warning: the convergence parameter is hard-codded as 0.01 # """ # assert self.malicious and self.config['contamination_model'] # # # region contamination mask creation # contamination_mask = [np.zeros_like(self.weights[i]) for i in range(len(self.weights))] # layer_iter, layer_ind = 0, 0 # for ind in range(len(self.model.layers)): # if type(self.model.layers[ind]) in [Conv2D, Dense]: # elems = self.model.layers[ind].weights[0].shape[-1] # elems_to_keep = int(elems * self.config['contamination_rate'][layer_iter]) # keep_inds = np.random.choice(elems, elems_to_keep, replace=False) # contamination_mask[layer_ind][..., keep_inds] = 1 # weights # contamination_mask[layer_ind + 1][keep_inds] = 1 # biases # # layer_iter += 1 # layer_ind += 2 # # endregion # # # backdoor with small noise # batch_x = self.dataset.x_aux # for local_epoch in range(100): # maximum number of local epochs # with tf.GradientTape() as tape: # # add a small noise to data samples # loss_value = loss_object(y_true=self.dataset.mal_aux_labels, # y_pred=self.model(self.add_noise(batch_x), training=True)) # if loss_value < 0.01: # break # grads = self._compute_gradients(tape, loss_value) # # blackout gradients # for i in range(len(grads)): # grads[i] = grads[i] * contamination_mask[i] # # optimizer.apply_gradients(zip(grads, self.model.trainable_variables)) # # # boost weights # new_weights = self.apply_attack(self.weights, self.model.get_weights()) # return new_weights # def minimize_loss_attack(self, optimizer, loss_object, round): # assert self.malicious # # mal_optimizer = self.create_malicious_optimizer() # # refine on auxiliary dataset # loss_value = 100 # epoch = 0 # while epoch < self.config['mal_num_epochs'] or loss_value > self.config['mal_target_loss']: # for mal_batch_x, mal_batch_y in self.dataset.get_aux(self.config['mal_num_batch']): # with tf.GradientTape() as tape: # loss_value = loss_object(y_true=mal_batch_y, # y_pred=self.model(self.add_noise(mal_batch_x), training=True)) # # if loss_value > 0.01: # grads = self._compute_gradients(tape, loss_value) # mal_optimizer.apply_gradients(zip(grads, self.model.trainable_variables)) # # self.update_weights_pgd() # # # print(f"Loss value mal {loss_value}") # epoch += 1 # # if epoch > self.config['mal_num_epochs_max']: # logging.debug(f"Client {self.id}: Epoch break ({epoch})") # break # # # boost weights # new_weights = self.apply_attack(self.weights, self.model.get_weights()) # return new_weights # def backdoor_stealth_attack(self, optimizer, loss_object, round): # """Applies alternating minimization strategy, similar to bhagoji # # First, we train the honest model for one batch, and then the malicious samples, if the loss is still high. # Iterates for as many benign batches exist. # # Note: Does not support the l2 norm constraint # Note: Only supports boosting the full gradient, not only the malicious part. # Note: Implemented as by Bhagoji. Trains the malicious samples with full batch size, this may not be desirable. # # """ # # mal_optimizer = self.create_malicious_optimizer() # # loss_value_malicious = 100 # epoch = 0 # # current_weights = self.model.get_weights() # # delta_mal_local = [np.zeros(w.shape) for w in current_weights] # while epoch < self.config['mal_num_epochs'] or loss_value_malicious > self.config['mal_target_loss']: # # for (batch_x, batch_y) in self.dataset.get_data(): # with tf.GradientTape() as tape: # pred = self.model(batch_x, training=True) # pred_labels = np.argmax(pred, axis=1) # loss_value = loss_object(y_true=batch_y, y_pred=pred) # acc = np.mean(pred_labels == batch_y) # logging.debug(f"Client {self.id}: Benign loss {loss_value} {acc}") # grads = self._compute_gradients(tape, loss_value) # # optimizer.apply_gradients(zip(grads, self.model.trainable_variables)) # # # self.update_weights_pgd() # # # delta_benign = self.model.get_weights() # # for (x_aux, y_aux) in self.dataset.get_aux(self.config['mal_num_batch']): # pred_mal = self.model(x_aux, training=False) # loss_value_malicious = loss_object(y_true=y_aux, # y_pred=pred_mal) # if loss_value_malicious < self.config['mal_target_loss']: # break # # with tf.GradientTape() as tape: # pred_mal = self.model(x_aux, training=True) # pred_mal_labels = np.argmax(pred_mal, axis=1) # loss_value_malicious = loss_object(y_true=y_aux, # y_pred=pred_mal) # acc_mal = np.mean(pred_mal_labels == y_aux) # self.debug(f"Mal loss {loss_value_malicious} {acc_mal}") # grads = tape.gradient(loss_value_malicious, self.model.trainable_variables) # mal_optimizer.apply_gradients(zip(grads, self.model.trainable_variables)) # # # end_weights = self.model.get_weights() # # delta_mal = [delta_benign[i] - end_weights[i] for i in range(len(end_weights))] # # delta_mal_local = [delta_mal_local[i] + delta_mal[i] for i in range(len(delta_mal))] # # if epoch > self.config['mal_num_epochs_max']: # self.debug(f"Epoch break! {loss_value_malicious}") # break # # epoch += 1 # # # end_weights = self.model.get_weights() # # new_weights = [end_weights[i] + (delta_mal_local[i] * (self.config['scale_attack_weight'] - 1)) for i in range(len(delta_mal_local))] # new_weights = self.apply_attack(self.weights, self.model.get_weights()) # return new_weights # # def model_replacement_attack(self, optimizer, loss_object, round): # # Note: Implemented as by `Can you really backdoor federated learning?` baseline attack # # poison_samples = self.config['poison_samples'] # mal_num_batch = self.config['mal_num_batch'] # # # Uses custom StepDecay because we want to step more explicitly # if self.config['mal_step_learning_rate']: # step_decay = StepDecay(self.config['mal_learning_rate'], self.config['mal_num_epochs'] * mal_num_batch) # mal_optimizer = Model.create_optimizer(self.config["optimizer"], step_decay, None, None, None, None, None) # else: # step_decay = None # mal_optimizer = Model.create_optimizer(self.config["optimizer"], self.config['mal_learning_rate'], None, # None, None, None, None) # # loss_object = regularized_loss(self.model.layers, self.weights) # # loss_value_mal = 100 # for epoch in range(self.config['mal_num_epochs']): # for batch_x, batch_y in self.dataset.get_data_with_aux(poison_samples, mal_num_batch): # 10 is ICML # print(f"LR: {mal_optimizer._decayed_lr(var_dtype=tf.float32)}") # # image_augmentation.debug(batch_x, batch_y) # # with tf.GradientTape() as tape: # loss_value = loss_object(y_true=batch_y, y_pred=self.model(batch_x, training=True)) # # print(f"Loss: {loss_value}") # grads = self._compute_gradients(tape, loss_value) # mal_optimizer.apply_gradients(zip(grads, self.model.trainable_variables)) # # if step_decay is not None: # step_decay.apply_step() # # self.update_weights_pgd() # # loss_value_mal, acc_mal = self.eval_aux_test(loss_object) # # # acc_nonmal = tf.reduce_mean((batch_y[21:] == tf.argmax(self.model(batch_x[21:], training=True), axis=1))) # # preds = self.model(batch_x[21:], training=False).numpy().argmax(axis=1) # # pred_inds = preds == batch_y[21:].numpy() # # # print(f"Correct: {self.global_dataset.y_aux_test[pred_inds]} -> {preds[pred_inds]}") # # acc_nonmal = np.mean(pred_inds) # logging.debug(f"Client {self.id}: Loss {loss_value_mal} acc {acc_mal}") # # # if step_decay is not None: # # if loss_value_mal < self.config['mal_target_loss']: # # step_decay.mul = 0.01 # # else: # # step_decay.mul = 1.0 # # # if loss_value_mal < self.config['mal_target_loss']: # # self.debug(f"Below target loss {loss_value_mal}") # # break # # self.debug("Epoch") # # new_weights = self.apply_attack(self.weights, self.model.get_weights()) # return new_weights def malicious_training(self, round): assert self.malicious optimizer = self.create_honest_optimizer() if self.config.benign_training.regularization_rate is not None: loss_object = regularized_loss(self.model.layers, self.weights, self.config.benign_training.regularization_rate) else: loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=False) # Our model has a softmax layer! attack_type = self.attack_type logging.info(f"Client {self.id}: Malicious training") if attack_type == Attack.UNTARGETED: new_weights = self.perform_attack() elif attack_type == Attack.DEVIATE_MAX_NORM: new_weights = self.deviate_max_norm_attack() elif attack_type == Attack.BACKDOOR: new_weights = self.perform_attack() else: raise Exception('Unknown type of attack!') if self.config.malicious.estimate_other_updates: # I guess new_weights should be updated based on this difference new_weights = self.apply_estimation(self.weights, new_weights) return new_weights # @profile def train(self, round): """Performs local training""" self.train_loss.reset_states() self.train_accuracy.reset_states() self.pgd_step_counter = 0 self.model.set_weights(self.weights) # self.global_trainable_weight = [w.numpy() for w in self.model.trainable_weights] if self.acting_malicious(round): new_weights = self.malicious_training(round) else: new_weights = self.honest_training() self.global_trainable_weight = None # Release # loss_object = tf.keras.losses.SparseCategoricalCrossentropy( # from_logits=False) # Our model has a softmax layer! # self.eval_train(loss_object) new_weights = self.apply_defense(self.weights, new_weights) new_weights = self.apply_quantization(self.weights, new_weights) # print("Post clip") # self.model.set_weights(new_weights) # self.eval_train(loss_object) # print(f"Train loss: {self.train_loss.result()}, acc: {self.train_accuracy.result()}") self.model = None # Release self.weights = new_weights def apply_estimation(self, old_weights, new_weights): if self.last_global_weights is None: self.last_global_weights = old_weights # First round, don't estimate self.last_update_weights = new_weights return new_weights logging.info(f"Client {self.id}: Global model estimation") # Assume updates of other will be the same next round other_updates = [(old_weights[i] - self.last_update_weights[i]) for i in range(len(old_weights))] new_weights_with_diff = [new_weights[i] - other_updates[i] for i in range(len(old_weights))] self.last_global_weights = old_weights # First round, don't estimate self.last_update_weights = new_weights return new_weights_with_diff def apply_attack(self, old_weights, new_weights): """ Applies attacks based on configuration :param old_weights: the weights of the model before training. Will be used to calculate the delta (malicious) weights :param new_weights: the new weights after training :return: new weights after applying data """ if self.config['scale_attack']: return self._boost_weights(old_weights, new_weights) return new_weights def _replace_model(self, old_weights, new_weights): # We could try to implement this return None def _boost_weights(self, old_weights, new_weights): logging.info(f"Client {self.id}: Boosting weights with {self.config['scale_attack_weight']}") delta_weights = [(new_weights[i] - old_weights[i]) * self.config['scale_attack_weight'] for i in range(len(old_weights))] return [old_weights[i] + delta_weights[i] for i in range(len(old_weights))] def apply_defense(self, old_weights, new_weights): """ Applies defenses based on configuration :param clip: :param old_weights: :param new_weights: :return: new weights """ assert old_weights is not None, "Old weights can't be none" assert new_weights is not None, "New weights can't be none" delta_weights = [new_weights[i] - old_weights[i] for i in range(len(old_weights))] # clip_layers = self.config['clip_layers'] if self.config['clip_layers'] != [] else range(len(old_weights)) clip_layers = range(len(old_weights)) clip = self.config.clip if clip is None: return new_weights if clip.type == "linf": if clip.probability is None: delta_weights = [np.clip(delta_weights[i], -clip.value, clip.value) if i in clip_layers else delta_weights[i] for i in range(len(delta_weights))] # # Addition, clip layers less aggressively # delta_weights = [np.clip(delta_weights[i], -clip * 5, clip * 5) if i not in clip_layers else delta_weights[i] # for i in range(len(delta_weights))] else: delta_weights = self.random_clip_l0(delta_weights, clip.value, clip.probability, clip_layers) if clip.type == "l2": delta_weights = self.clip_l2(delta_weights, clip.value, clip_layers) new_weights = [old_weights[i] + delta_weights[i] for i in range(len(old_weights))] return new_weights def random_clip_l0(self, delta_weights, clip, prob, clip_layers): """ Clip inf norm randomly :param delta_weights: weights to clip :param clip: clip value :param prob: percentage of weights to clip :return: randomly clipped weights """ new_weights = [ [np.clip(col_weights[i], -clip, clip) if i in clip_layers and random.random() < prob else col_weights[i] for i in range(len(col_weights))] for col_weights in delta_weights] return new_weights def clip_l2(self, delta_weights, l2, clip_layers): """ Calculates the norm per layer. :param delta_weights: current weight update :param l2: l2 bound :param clip_layers: what layers to apply clipping to :return: """ l2_norm_tensor = tf.constant(l2) layers_to_clip = [tf.reshape(delta_weights[i], [-1]) for i in range(len(delta_weights)) if i in clip_layers] # for norm calculation norm = max(tf.norm(tf.concat(layers_to_clip, axis=0)), 0.00001) # print(f"Norm: {norm}") multiply = min((l2_norm_tensor / norm).numpy(), 1.0) return [delta_weights[i] * multiply if i in clip_layers else delta_weights[i] for i in range(len(delta_weights))] def clip_l2_per_layer(self, delta_weights, l2, clip_layers): """ @deprecated Calculates the norm per layer. For all layers individually :param delta_weights: current weight update :param l2: l2 bound :param clip_layers: what layers to apply clipping to :return: """ norm = [tf.norm(delta_weights[i]) if i in clip_layers else tf.constant(l2) for i in range(len(delta_weights))] multiply = [tf.constant(l2) / norm[i] for i in range(len(norm))] return [delta_weights[i] * multiply[i] for i in range(len(delta_weights))] def acting_malicious(self, round): return self.malicious and self.config.malicious.attack_start <= round <= self.config.malicious.attack_stop # def apply_pgd_weights(self, old_weights, new_weights): # pgd = self.config['pgd'] # if pgd is not None: # # pgd_constraint = self.config['pgd_constraint'] / self.config['scale_attack_weight'] \ # if self.malicious and self.config['scale_attack'] \ # else self.config['pgd_constraint'] # # self.debug(f"Applying constraint {pgd} with value {pgd_constraint}") # # if pgd == 'l_inf': # new_weights = self.apply_defense(old_weights, new_weights, pgd_constraint, None) # elif pgd == 'l2': # new_weights = self.apply_defense(old_weights, new_weights, None, pgd_constraint) # else: # raise Exception('PGD type not supported') # return new_weights # def update_weights_pgd(self): # self.pgd_step_counter += 1 # if self.pgd_step_counter % self.config['pgd_clip_frequency'] != 0: # # not yet time to clip # return # # new_weights = self.apply_pgd_weights(self.weights, self.model.get_weights()) # self.model.set_weights(new_weights) def eval_train(self, loss_object): total_loss = 0.0 batch_count = 0.0 for batch_x, batch_y in self.dataset.get_data(): loss_value = loss_object(y_true=batch_y, y_pred=self.model(batch_x, training=False)) total_loss += loss_value batch_count += 1 loss = total_loss / batch_count logging.debug(f"Client {self.id}: Training loss {loss}") def eval_aux_test(self, loss_object): for batch_x, batch_y in self.dataset.get_aux_test_generator(1): preds = self.model(batch_x, training=False) loss_value = loss_object(y_true=batch_y, y_pred=preds) pred_inds = preds.numpy().argmax(axis=1) == batch_y adv_success = np.mean(pred_inds) return loss_value, adv_success def create_honest_optimizer(self): training = self.config.benign_training num_batches = self.dataset.x_train.shape[0] / training.batch_size steps_per_round = num_batches * training.num_epochs return Model.create_optimizer(training.optimizer, training.learning_rate, training.decay, steps_per_round) def debug(self, v): logging.debug(f"Client {self.id}: {v}") def info(self, v): logging.info(f"Client {self.id}: {v}") @staticmethod def average_weights(client_weight_list): """Procedure for averaging client weights""" new_weights = deepcopy(client_weight_list[0]) # return new_weights for client in range(1, len(client_weight_list)): for layer in range(len(client_weight_list[client])): new_weights[layer] = new_weights[layer] + client_weight_list[client][layer] for layer in range(len(new_weights)): new_weights[layer] = new_weights[layer] / len(client_weight_list) return new_weights
34,219
43.849279
155
py
fl-analysis
fl-analysis-master/src/util.py
import collections from copy import deepcopy import numpy as np import pandas as pd from os.path import join from tensorflow.python.keras.layers.convolutional import Conv2D from tensorflow.python.keras.layers.core import Dense def log_data(experiment_dir, rounds, accuracy, adv_success): """Logs data.""" df = pd.DataFrame() df['round'] = rounds df['accuracy'] = accuracy df['adv_success'] = adv_success df.to_csv(join(experiment_dir, 'log.csv'), index=False) def power_iteration(A): """Computes principle eigenvalue and eigenvector. Args: A (np.ndarray): Square matrix. Returns: tuple: Tuple of eigenvalue and eigenvector of np.ndarray type. """ def eigenvalue(A, v): Av = A.dot(v) return v.dot(Av) n, d = A.shape v = np.ones(d) / np.sqrt(d) ev = eigenvalue(A, v) while True: Av = A.dot(v) v_new = Av / np.linalg.norm(Av) ev_new = eigenvalue(A, v_new) if np.abs(ev - ev_new) < 0.01: break v = v_new ev = ev_new return ev_new, v_new def create_dropout_mask(model, federated_dropout_rate, federated_dropout_all_parameters, n_clients=1): """Applies dropout on model parameters as described in: Caldas, S., Konečny, J., McMahan, H.B. and Talwalkar, A., 2018. Expanding the Reach of Federated Learning by Reducing Client Resource Requirements. arXiv preprint arXiv:1812.07210 The fixed number of neurons for Dense layers (and filters for Conv2D layer) are zeroed out expect for very first and last layers (unless federated_dropout_all_parameters is True). Biases are intact. Args: model (tf.model): Keras model. federated_dropout_rate (float): Federated dropout rate in (0, 1) range. federated_dropout_all_parameters (bool): Program parameter. n_clients (int): How many non-overlapping dropout masks to create. Returns: For each client a list of np.ndarray that represent dropout mask. """ assert 0 < federated_dropout_rate < 1., 'Federated dropout rate must be in (0, 1) range.' assert type(model.layers[0]) in [Conv2D, Dense], \ "The implementation assumes that the first layer is Dense or Conv2D" layer_range = 1, len(model.layers) - 1 if federated_dropout_all_parameters: layer_range = 0, len(model.layers) dropout_mask = [[np.ones_like(l, dtype=bool) for l in model.get_weights()] for _ in range(n_clients)] # elems_to_drop = 1.0 - federated_dropout_rate layer_ind = layer_range[0] * 2 # since we skip the first layer for ind in range(layer_range[0], layer_range[1]): if type(model.layers[ind]) in [Conv2D, Dense]: param_shape = model.layers[ind].weights[0].shape if federated_dropout_all_parameters: # partially zeroed out filters assert n_clients * federated_dropout_rate < 1 # param_shape = (kernel w, kernel h, prev layer filters, current layer filters) total_params = np.prod(param_shape) n_select = int(federated_dropout_rate * total_params) * n_clients keep_inds = np.random.choice(total_params, n_select, replace=False) keep_inds = keep_inds.reshape((n_clients, -1)) for client in range(n_clients): layer_mask = np.zeros(np.prod(param_shape), dtype=bool) layer_mask[keep_inds[client]] = True dropout_mask[client][layer_ind] = layer_mask.reshape(param_shape) else: n_select = int(federated_dropout_rate * param_shape[-1]) * n_clients keep_inds = np.random.choice(param_shape[-1], n_select, replace=True) keep_inds = keep_inds.reshape((n_clients, -1)) for client in range(n_clients): layer_mask = np.zeros_like(dropout_mask[client][layer_ind], dtype=bool) layer_mask[..., keep_inds[client]] = True dropout_mask[client][layer_ind] = layer_mask layer_ind += 2 # ind*2 because we zero out only weights (not biases) return dropout_mask def aggregate_weights_masked(current_weights, global_learning_rate, num_clients, dropout_rate, client_dropout_mask, client_weight_list): """Procedure for merging client weights together with `global_learning_rate`.""" assert len(current_weights) == len(client_weight_list[0]) assert len(client_dropout_mask) == len(client_weight_list) assert len(client_dropout_mask[0]) == len(client_weight_list[0]) new_weights = deepcopy(current_weights) number_of_clients_participating_this_round = len(client_dropout_mask) # Estimate impact of this update update_coefficient = global_learning_rate / num_clients client_weight_list_masked = [] for mask, w in zip(client_dropout_mask, client_weight_list): client = [] for mask_l, w_l, old_w_l in zip(mask, w, current_weights): update = w_l - old_w_l update[mask_l == False] = float('nan') client.append(update) client_weight_list_masked.append(client) client_weight_list_t = [list(i) for i in zip(*client_weight_list_masked)] update_weight_list = [np.nan_to_num(np.nansum(w, axis=0)) for w in client_weight_list_t] counts = [np.sum(np.array(list(i), dtype=np.int), axis=0) for i in zip(*client_dropout_mask)] update_weight_list = [update_coefficient * w for w, c in zip(update_weight_list, counts)] for layer in range(len(current_weights)): new_weights[layer] = new_weights[layer] + \ update_weight_list[layer] return new_weights def flatten(d, parent_key='', sep='.'): items = [] for k, v in d.items(): new_key = parent_key + sep + k if parent_key else k if isinstance(v, collections.MutableMapping): items.extend(flatten(v, new_key, sep=sep).items()) else: items.append((new_key, v)) return dict(items)
6,070
38.679739
136
py
fl-analysis
fl-analysis-master/src/test_tf_model.py
from unittest import TestCase from src.tf_model import Model from src.tf_data import Dataset from matplotlib import pyplot import tensorflow as tf import numpy as np class TestModel(TestCase): def test_create_model_weight(self): model = Model.create_model("dev") (x_train, y_train), (x_test, y_test) = Dataset.get_mnist_dataset(128) loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) optimizer = tf.keras.optimizers.SGD(learning_rate=0.1) initial_weights = model.get_weights() bins = np.linspace(-0.001, 0.001, 100) stddevs = [] for i in range(10): with tf.GradientTape() as tape: predictions = model(x_train, training=True) loss_value = loss_object(y_true=y_train, y_pred=predictions) grads = tape.gradient(loss_value, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) update = np.concatenate([np.reshape(initial_weights[i] - model.get_weights()[i], [-1]) for i in range(len(initial_weights))]) print(np.std(update)) stddevs.append(np.std(update)) # pyplot.hist(update, bins, alpha=1.0, label=f'Iteration {i+1}') pyplot.plot(range(1, 11), stddevs, 'bo') pyplot.legend(loc='upper right') pyplot.show() def test_create_model_weight_multbatches(self): model = Model.create_model("dev") (x_train, y_train), (x_test, y_test) = Dataset.get_mnist_dataset(12800) batch_size = 128 loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) optimizer = tf.keras.optimizers.SGD(learning_rate=0.1) initial_weights = model.get_weights() bins = np.linspace(-0.001, 0.001, 100) stddevs = [] xs = [] total_batches = int(x_train.shape[0] / batch_size) for i in range(5): for bid in range(total_batches): batch_x = x_train[bid * batch_size:(bid + 1) * batch_size] batch_y = y_train[bid * batch_size:(bid + 1) * batch_size] with tf.GradientTape() as tape: predictions = model(batch_x, training=True) loss_value = loss_object(y_true=batch_y, y_pred=predictions) grads = tape.gradient(loss_value, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) update = np.concatenate( [np.reshape(initial_weights[i] - model.get_weights()[i], [-1]) for i in range(len(initial_weights))]) print(np.std(update)) stddevs.append(np.std(update)) xs.append(i + (bid / float(total_batches))) # pyplot.hist(update, bins, alpha=1.0, label=f'Iteration {i+1}') pyplot.plot(xs, stddevs) pyplot.legend(loc='upper right') pyplot.show()
3,013
36.209877
137
py
fl-analysis
fl-analysis-master/src/config_old.py
import sys import configargparse import logging from src.client_attacks import Attack parser = configargparse.ArgumentParser() parser.add('-c', '--config_filepath', required=False, is_config_file=True, help='Path to config file.') # logging configuration parser.add_argument( '-d', '--debug', help="Print debug statements", action="store_const", dest="loglevel", const=logging.DEBUG, default=logging.WARNING, ) parser.add_argument( '-v', '--verbose', help="Print verbose", action="store_const", dest="loglevel", const=logging.INFO, ) # client configuration parser.add_argument('--num_clients', type=int, default=3, help='Total number of clients.') parser.add_argument('--num_selected_clients', type=int, default=-1, help='The number of selected clients per round; -1 to use all clients.') parser.add_argument('--num_malicious_clients', type=int, default=0, help='Total number of malicious clients.') parser.add_argument('--augment_data', type=str, default='false', help='Whether to augment train/aux datasets', choices=['true', 'false']) # attacks parser.add_argument('--attacks_config', type=str, default=None, help='Path to attack config.') parser.add_argument('--attack_type', type=str, default='untargeted', help='Attack type.', choices=['untargeted', 'backdoor', Attack.DEVIATE_MAX_NORM.value]) parser.add_argument('--estimate_other_updates', type=str, default='false', help='Whether to estimate the update of the others.', choices=['true', 'false']) parser.add_argument('--attack_after', type=int, default=0, help='After which round to start behaving maliciously.') parser.add_argument('--attack_stop_after', type=int, default=10000000, help='After which round to stop behaving maliciously.') parser.add_argument('--attack_frequency', type=float, default=None, help='Frequency of malicious parties being selected. Default is None, for random selection') parser.add_argument('--weight_regularization_alpha', type=float, default=[1], nargs='+', help='Alpha value for weight regularization. Keep one for none.') parser.add_argument('--attacker_full_dataset', type=str, default='false', help='Whether the attack can access the full dataset', choices=['true', 'false']) parser.add_argument('--attacker_full_knowledge', type=str, default='false', help='Whether the attacker has access to the benign updates in a specific round', choices=['true', 'false']) parser.add_argument('--permute_dataset', type=int, nargs='+', default=[], help='Use with caution. Run many attacks while permuting items in this list') # attacks - untargeted parser.add_argument('--untargeted_after_training', type=str, default='false', help='Whether local model gradients are flipped in each local training iteration or when the local model is fully trained.', choices=['true', 'false']) # attacks - targeted_deterministic_attack parser.add_argument('--targeted_deterministic_attack_objective', type=int, default=3, help="All malicious clients try to make the model misclassify a given input as this predefined objective. Only Applicable if num_malicious_clients is non-zero value and 'attack_type' is 'targeted_deterministic'.") # attacks - targeted parser.add_argument('--targeted_attack_objective', type=int, default=[5, 7], nargs='+', help="Malicious clients try to make the model classify every sample of a class (first arguments) as a target (second argument). Only applicable if num_malicious_clients is non-zero value and 'attack_type' is 'targeted'.") parser.add_argument('--targeted_attack_benign_first', type=str, default='false', choices=['true', 'false'], help="If set to true, the attack would perform benign training first and fine tune updates on malicious dataset. Applicable if attack_type is 'targeted'.") # attacks - min loss parser.add_argument('--aux_samples', type=int, default=-1, help="Size of auxiliary dataset that is used for backdoor attack.") parser.add_argument('--gaussian_noise', type=float, default=0, help="Sigma value for gaussian noise that is added to aux samples if the value is > 0.") parser.add_argument('--backdoor_type', type=str, default='semantic', help='Backdoor type. Semantic = backdoor_feature_*, tasks = Sun et al., edge = edge cases', choices=['semantic', 'tasks', 'edge']) parser.add_argument('--backdoor_stealth', type=str, default='false', help='Whether to use stealth in backdoor.', choices=['true', 'false']) parser.add_argument('--backdoor_attack_objective', type=int, default=[7, 1], nargs='+', help="What class to mispredict `aux_samples` times. Only applicable if num_malicious_clients is non-zero value and 'attack_type' is 'segment_poisoning'.") parser.add_argument('--backdoor_tasks', type=int, default=1, help="Number of backdoor tasks to fill") parser.add_argument('--mal_num_epochs_max', type=int, default=100, help="Maximum number of epochs to run the attack") parser.add_argument('--mal_target_loss', type=float, default=0.1, help="Target threshold for training") # attacks - edge case parser.add_argument('--edge_case_type', type=str, default=None, help='Which edge case class to use') # attacks - data poisoning parser.add_argument('--poison_samples', type=int, default=1, help="How many samples to poison in a batch") parser.add_argument('--mal_num_batch', type=int, default=[200], nargs='+', help="How many batches to run") # attack - backdoor feature parser.add_argument('--backdoor_feature_aux_train', type=int, default=[], nargs='+', help="What samples to use as aux train set. Only applicable 'attack_type' is 'segment_poisoning' or 'model_replacement'.") parser.add_argument('--backdoor_feature_aux_test', type=int, default=[], nargs='+', help="What samples to use as aux test set. Only applicable 'attack_type' is 'segment_poisoning' or 'model_replacement'.") parser.add_argument('--backdoor_feature_target', type=int, default=2, help="Malicious target label") parser.add_argument('--backdoor_feature_benign_regular', type=int, default=[], nargs='+', help="Include specific benign samples in training from the dataset") parser.add_argument('--backdoor_feature_remove_malicious', type=str, default='false', help='Whether to remove the malicious samples from the honest clients.', choices=['true', 'false']) parser.add_argument('--backdoor_feature_augment_times', type=int, default=0, help="How many times the eval samples should be augmented. Leave 0 for no augmentation") # attack - backdoor contamination model parser.add_argument('--contamination_model', action='store_true', default=False, help='Whether attackers modify only a subset of neurons') parser.add_argument('--contamination_rate', type=float, default=[None], nargs='+', help='Percentage of neurons (filters) per layer that is modified by adversaries.' 'If only one value is specified, then the same contamination rate is used for all ' 'convolutional and dense layers.') # attacks - PGD parser.add_argument('--pgd', type=str, default=None, choices=['l2', 'l_inf'], help='(Projected Gradient Descent)' 'Weather malicious clients project their gradients onto the feasible set. ' 'Compatible with all implemented attacks.') parser.add_argument('--pgd_constraint', type=float, default=None, help='Projection bound (applicable only if `pgd` is set).') parser.add_argument('--pgd_clip_frequency', type=int, default=1, help='Clip every x steps of SGD. Defaults to 1 (after every step).') parser.add_argument('--pgd_adaptive', type=str, default="false", help="Whether to be adaptive in the gradient clipping (not sure if working).") # attacks - boosting supplement parser.add_argument('--scale_attack', type=str, default="false", help="Whether malicious clients scale their updates.") parser.add_argument('--scale_attack_weight', type=float, default=[1.0], nargs='+', help="A scaling factor for malicious clients' updates. Only applicable if scale_attack is set to true.") # defense parser.add_argument("--clip", type=float, default=None, help="A positive float value for absolute update clipping.") parser.add_argument("--clip_l2", type=float, default=None, help="A positive float value for l2 update clipping.") parser.add_argument("--clip_probability", type=float, default=1.0, help="Percentage of weights to clip") parser.add_argument("--clip_layers", type=int, default=[], nargs='+', help="Indexes of layers to clip. Leave empty for all") # data configuration parser.add_argument("--data_distribution", type=str, default='IID', help="IID or non-IID.") parser.add_argument("--number_of_samples", type=int, default=-1, help="How many samples to use for training; default value of -1 indicates to use the full dataset.") parser.add_argument("--dataset", type=str, default='mnist', help="Which dataset to use.", choices=['mnist', 'femnist', 'fmnist', 'cifar10']) # training configuration parser.add_argument("--model_name", type=str, default='dev', help="Which model to use.", choices=['dev', 'mnist_cnn', 'bhagoji', 'resnet18', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet18_v2', 'resnet56_v2', 'dev_intrinsic', 'dev_fc_intrinsic', 'bhagoji_intrinsic', 'mnistcnn_intrinsic', 'lenet5_mnist', 'lenet5_cifar', 'lenet5_intrinsic', 'allcnn', 'allcnn_intrinsic']) parser.add_argument("--num_rounds", type=int, default=40, help="Number of training rounds.") parser.add_argument("--num_epochs", type=int, default=3, help="Number of client epochs.") parser.add_argument("--num_test_batches", type=int, default=-1, help="Number of test batches to evaluate. -1 for max.") parser.add_argument("--batch_size", type=int, default=128, help="Clients' batch size.") parser.add_argument('--optimizer', type=str, default='Adam', help='Which optimizer to use.', choices=['Adam', 'SGD']) parser.add_argument('--learning_rate', type=float, default=0.0001, nargs="+", help='Learning rate for selected optimizer.') parser.add_argument('--lr_decay', type=str, default='None', help='Apply decay to the learning rate.', choices=['None', 'exponential', 'boundaries']) parser.add_argument('--decay_steps', type=float, default=None, help='Decay steps for exponential decay.') parser.add_argument('--decay_rate', type=float, default=None, help='Decay rate for exponential decay.') parser.add_argument('--decay_boundaries', type=int, default=[], nargs="+", help='Boundaries for boundaries decay mode') parser.add_argument('--decay_values', type=float, default=[], nargs="+", help='Values for boundaries decay mode') parser.add_argument('--regularization_rate', type=float, default=None, help='Weight regularization rate.') parser.add_argument('--mal_learning_rate', type=float, default=[], nargs="+", help='Malicious learning rate for selected optimizer.') parser.add_argument('--mal_decay_steps', type=float, default=None, help='Malicious decay steps for exponential decay.') parser.add_argument('--mal_decay_rate', type=float, default=None, help='Malicious decay rate for exponential decay.') parser.add_argument('--mal_num_epochs', type=int, default=None, help='How many malicious epochs to run') parser.add_argument('--mal_step_learning_rate', type=str, default='false', help='Whether to step the learning rate.', choices=['true', 'false']) parser.add_argument('--federated_dropout_rate', type=float, default=1.0, help='Percentage of neurons (or filters for convolutional layers) that are kept on each layer.') parser.add_argument('--federated_dropout_all_parameters', action='store_true', default=False, help='If set to True, applies dropout on all parameters randomly according to the dropout rate.' 'Applicable only if federated_dropout_rate < 1.0.') parser.add_argument('--federated_dropout_nonoverlap', action='store_true', default=False, help="Each client receives a unique mask that is not overlapped with other clients' masks." 'Applicable only if federated_dropout_rate < 1.0.') parser.add_argument('--federated_dropout_randommask', type=str, default='false', help="Enable low rank mode instead of federated dropout, i.e. only mask the uplink.") parser.add_argument('--global_gaussian_noise', type=float, default=0.0, help='Gaussian noise to add to the global model for the server.') parser.add_argument('--global_learning_rate', type=float, default=-1, help='Global learning rate for the server.') parser.add_argument("--aggregator", type=str, default='FedAvg', help="Aggregator type. Supported: FedAvg, TrimmedMean") parser.add_argument('--trimmed_mean_beta', type=float, default=0.1, help='Beta value of trimmed mean. 0 < beta < 1/2.') parser.add_argument("--intrinsic_dimension", type=int, default=1000, help="Size of intrinsic dimension. Only applicable if using subspace machine learning model.") parser.add_argument("--load_model", type=str, default=None, help="Path to load an existing model to initialize the setup.") parser.add_argument('--ignore_malicious_update', type=str, default="false", help="Whether to ignore malicious updates in training.") parser.add_argument('--quantization', type=str, default=None, help='Whether to use (probabilistic) quantization', choices=['deterministic', 'probabilistic', 'd', 'p']) parser.add_argument('--q_bits', type=int, default=None, help='Number of bits of the fixed-point number to represent the weights for quantization') parser.add_argument('--q_frac', type=int, default=None, help='Number of fractional bits of the fixed-point number for quantization') # logging parser.add_argument("--experiment_name", type=str, default='tmp', help="Sub-directory where the log files are stored.") parser.add_argument("--print_every", type=int, default=1, help="After how many rounds to perform and log evaluation on test set.") parser.add_argument("--save_updates", type=str, default='true', help="Whether to save the weight updates. Disable for large models / large number of clients.", choices=['true', 'false']) parser.add_argument("--save_norms", type=str, default='false', help="Whether to save the norms for all clients", choices=['true', 'false']) parser.add_argument("--save_weight_distributions", type=str, default='false', help="Whether to save the weight distributions for all clients", choices=['true', 'false']) parser.add_argument("--keep_history", action='store_true', default=False, help='Whether Server keeps parameter history.' 'Warning: It slows down the training because of principle eigenvalue computation.') parser.add_argument("--save_model_at", type=int, default=[], nargs='+', help="At what rounds to save model.") # hyperparameter tuning parser.add_argument("--hyperparameter_tuning", type=str, default='false', help="Whether to use hyperparameter tuning", choices=['true', 'false']) parser.add_argument("--tune_attack_clients", type=int, nargs='+', default=[-1], help="Helper for hyperparameter tuning to set the number of clients + scale_attack_weight") parser.add_argument("--tune_attack_clients_selected_frac", type=float, default=None, help="Fraction of clients to be selected") parser.add_argument("--hyperparameters_tuned", type=str, nargs='+', default=[], help="Which hyperparams are being tuned at the moment") # experiment reproducibility parser.add_argument("--seed", type=int, default=0, help="Seed for random functions. Ensures experiment reproducibility.") # computational optimization parser.add_argument("--workers", type=int, default=1, help="How many threads to use for client training simulation.") parser.add_argument("--optimized_training", type=str, default='true', help="Use optimized training loop where possible.", choices=['true', 'false']) def get_config(): args = parser.parse_args() logging.basicConfig(level=args.loglevel) root = logging.getLogger() root.setLevel(logging.DEBUG) # handler = logging.StreamHandler(sys.stdout) # handler.setLevel(logging.DEBUG) # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # handler.setFormatter(formatter) # root.addHandler(handler) config = dict() config['num_clients'] = args.num_clients if args.num_selected_clients == -1: config['num_selected_clients'] = args.num_clients else: config['num_selected_clients'] = args.num_selected_clients config['num_malicious_clients'] = args.num_malicious_clients config['augment_data'] = True if args.augment_data.lower() == "true" else False config['weight_regularization_alpha'] = args.weight_regularization_alpha[0] config['attack_type'] = args.attack_type config['untargeted_after_training'] = True if args.untargeted_after_training.lower() == "true" else False config['targeted_deterministic_attack_objective'] = args.targeted_deterministic_attack_objective config['targeted_attack_objective'] = tuple(args.targeted_attack_objective) config['targeted_attack_benign_first'] = True if args.targeted_attack_benign_first.lower() == 'true' else False config['scale_attack'] = True if args.scale_attack.lower() == "true" else False config['scale_attack_weight'] = args.scale_attack_weight[0] config['data_distribution'] = args.data_distribution config['estimate_other_updates'] = True if args.estimate_other_updates.lower() == "true" else False config['num_rounds'] = args.num_rounds config['num_epochs'] = args.num_epochs config['mal_num_epochs'] = args.mal_num_epochs if args.mal_num_epochs is not None else args.num_epochs config['batch_size'] = args.batch_size config['num_test_batches'] = args.num_test_batches if args.num_test_batches > -1 else sys.maxsize config['optimizer'] = args.optimizer config['learning_rate'] = args.learning_rate[0] if isinstance(args.learning_rate, list) and len(args.learning_rate) > 0 else args.learning_rate config['lr_decay'] = args.lr_decay if args.lr_decay != 'None' else None config['decay_steps'] = args.decay_steps config['decay_rate'] = args.decay_rate config['decay_boundaries'] = args.decay_boundaries config['decay_values'] = args.decay_values config['regularization_rate'] = args.regularization_rate config['mal_learning_rate'] = args.mal_learning_rate[0] if len(args.mal_learning_rate) > 0 else config['learning_rate'] config['mal_decay_steps'] = args.mal_decay_steps if args.mal_decay_steps is not None else args.decay_steps config['mal_decay_rate'] = args.mal_decay_rate if args.mal_decay_rate is not None else args.decay_rate config['mal_step_learning_rate'] = True if args.mal_step_learning_rate.lower() == "true" else False config['aggregator'] = args.aggregator config['trimmed_mean_beta'] = args.trimmed_mean_beta config['global_learning_rate'] = args.global_learning_rate config['global_gaussian_noise'] = args.global_gaussian_noise config['federated_dropout_rate'] = args.rate config['federated_dropout_all_parameters'] = args.all_parameters config['federated_dropout_nonoverlap'] = args.nonoverlap config['federated_dropout_randommask'] = True if args.randommask.lower() == "true" else False config['intrinsic_dimension'] = args.intrinsic_dimension config['ignore_malicious_update'] = True if args.ignore_malicious_update.lower() == "true" else False config['quantization'] = args.quantization if config['quantization'] == 'p': config['quantization'] = 'probabilistic' elif config['quantization'] == 'd': config['quantization'] = 'deterministic' config['q_bits'] = args.q_bits config['q_frac'] = args.q_frac assert 0 < args.rate <= 1, 'Federated dropout rate must be in (0, 1] range.' config['experiment_name'] = args.experiment_name config['print_every'] = args.print_every config['save_updates'] = True if args.save_updates.lower() == 'true' else False config['keep_history'] = args.keep_history config['save_model_at'] = args.save_model_at config['load_model'] = args.load_model config['save_norms'] = True if args.save_norms.lower() == 'true' else False config['save_weight_distributions'] = True if args.save_weight_distributions.lower() == 'true' else False config['model_name'] = args.model_name if args.clip is not None and args.clip != 0: assert args.clip > 0, '`clip` parameter must be a non-negative float.' config['clip'] = args.clip if args.clip is not None and args.clip != 0 else None config['clip_probability'] = args.clip_probability config['clip_l2'] = args.clip_l2 config['clip_layers'] = args.clip_layers config['dataset'] = args.dataset config['workers'] = args.workers config['number_of_samples'] = args.number_of_samples config['aux_samples'] = args.aux_samples if args.aux_samples != -1 else sys.maxsize config['mal_num_epochs_max'] = args.mal_num_epochs_max config['mal_target_loss'] = args.mal_target_loss config['backdoor_type'] = args.backdoor_type config['backdoor_stealth'] = True if args.backdoor_stealth.lower() == 'true' else False config['backdoor_attack_objective'] = None if args.backdoor_attack_objective[0] == -1 else tuple(args.backdoor_attack_objective) config['edge_case_type'] = args.edge_case_type config['attack_after'] = args.attack_after config['attack_stop_after'] = args.attack_stop_after config['attack_frequency'] = args.attack_frequency if args.attack_frequency != -1 else None config['attacker_full_dataset'] = True if args.attacker_full_dataset.lower() == "true" else False config['attacker_full_knowledge'] = True if args.attacker_full_knowledge.lower() == "true" else False config['backdoor_tasks'] = args.backdoor_tasks if args.num_malicious_clients > 0 else 0 config['backdoor_feature_aux_train'] = args.backdoor_feature_aux_train config['backdoor_feature_aux_test'] = args.backdoor_feature_aux_test config['backdoor_feature_target'] = args.backdoor_feature_target config['backdoor_feature_benign_regular'] = args.backdoor_feature_benign_regular config['backdoor_feature_remove_malicious'] = True if args.backdoor_feature_remove_malicious.lower() == "true" else False config['backdoor_feature_augment_times'] = args.backdoor_feature_augment_times config['poison_samples'] = args.poison_samples config['mal_num_batch'] = args.mal_num_batch[0] config['optimized_training'] = True if args.optimized_training.lower() == "true" else False assert args.gaussian_noise >= 0. config['gaussian_noise'] = args.gaussian_noise config['contamination_model'] = args.contamination_model config['contamination_rate'] = _preprocess_contamination_rate(args) if args.pgd is not None: assert args.pgd_constraint is not None, "PGD constraint value must be set." config['pgd'] = args.pgd config['pgd_constraint'] = args.pgd_constraint config['pgd_clip_frequency'] = args.pgd_clip_frequency config['pgd_adaptive'] = True if args.pgd_adaptive.lower() == 'true' else False logging.info(config) logging.warning("Can I see this?") return config, args def _preprocess_contamination_rate(args): if not args.contamination_model: return args.contamination_rate assert args.contamination_rate[0] is not None, "Contamination rate must be specified." from src.tf_model import Model from tensorflow.python.keras.layers.convolutional import Conv2D from tensorflow.python.keras.layers.core import Dense model = Model.create_model(args.model_name) n_layers = len([1 for layer in model.layers if type(layer) in [Conv2D, Dense]]) if len(args.contamination_rate) == 1: return tuple(args.contamination_rate * n_layers) assert len(args.contamination_rate) == n_layers, f"The number of specified values does not align with the number " \ f"of layers ({len(args.contamination_rate)} != {n_layers})" return tuple(args.contamination_rate)
24,879
63.455959
313
py
fl-analysis
fl-analysis-master/src/hyperparameter_tuning.py
import os from tensorboard.plugins.hparams import api as hp import tensorflow as tf import numpy as np from src.federated_averaging import FederatedAveraging from src.tf_model import Model def load_model(args, config): if args.load_model is not None: model = tf.keras.models.load_model(args.load_model) # Load with weights else: model = Model.create_model(args.model_name, config['intrinsic_dimension'], config['regularization_rate']) return model def tune_hyper(args, config): learning_rate = args.learning_rate if isinstance(args.learning_rate, list) and len(args.learning_rate) > 0 else [args.learning_rate] HP_LR = hp.HParam('learning_rate', hp.Discrete(learning_rate)) HP_MAL_NUM_BATCH = hp.HParam('mal_num_batch', hp.Discrete(args.mal_num_batch)) mal_lr = args.mal_learning_rate if isinstance(args.mal_learning_rate, list) and len(args.mal_learning_rate) > 0 else [args.learning_rate] HP_MAL_LR = hp.HParam('mal_learning_rate', hp.Discrete(mal_lr)) HP_WEIGHT_REG = hp.HParam('weight_regularization_alpha', hp.Discrete(args.weight_regularization_alpha)) HP_WEIGHT_SCALE = hp.HParam('scale_attack_weight', hp.Discrete(args.scale_attack_weight)) # NUM_ClIENTS = hp.HParam('mal_learning_rate', hp.Discrete(args.mal_learning_rate)) HP_NUM_CLIENTS_SETUP = hp.HParam('num_clients_attack', hp.Discrete(args.tune_attack_clients)) METRIC_ACCURACY = 'evaluation/test_accuracy' METRIC_ADV_SUCCESS = 'evaluation/adv_success' experiment_root_dir = os.path.join(os.getcwd(), 'experiments') experiment_dir = os.path.join(experiment_root_dir, args.experiment_name) with tf.summary.create_file_writer(experiment_dir).as_default(): hp.hparams_config( hparams=[HP_LR, HP_MAL_NUM_BATCH, HP_MAL_LR, HP_WEIGHT_REG, HP_WEIGHT_SCALE, HP_NUM_CLIENTS_SETUP], metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy'), hp.Metric(METRIC_ADV_SUCCESS, display_name='Adversarial Success')], ) session_num = 0 for lr in HP_LR.domain.values: for mal_lr in HP_MAL_LR.domain.values: for mal_num_batch in HP_MAL_NUM_BATCH.domain.values: for wr in HP_WEIGHT_REG.domain.values: for scale in HP_WEIGHT_SCALE.domain.values: for num_clients_att in HP_NUM_CLIENTS_SETUP.domain.values: hparams_dict = { HP_MAL_NUM_BATCH.name: mal_num_batch, HP_MAL_LR.name: mal_lr, HP_WEIGHT_REG.name: wr, HP_WEIGHT_SCALE.name: scale, HP_NUM_CLIENTS_SETUP.name: num_clients_att, HP_LR.name: lr } config_run = config config_run["learning_rate"] = lr config_run["mal_num_batch"] = mal_num_batch config_run["mal_learning_rate"] = mal_lr config_run["weight_regularization_alpha"] = wr if num_clients_att != -1: # glob_lr = args.global_learning_rate if args.global_learning_rate == -1 selected = int(num_clients_att * args.tune_attack_clients_selected_frac) config_run["num_selected_clients"] = selected config_run["num_clients"] = num_clients_att config_run["scale_attack_weight"] = num_clients_att / args.global_learning_rate # assumes nom. learning_rate # TODO: Autocalc global lr for full scale # if args.global_learning_rate == -1: # config_run["scale_attack_weight"] = num_clients_att / selected # else: # config_run["scale_attack_weight"] = num_clients_att / selected else: config_run["scale_attack_weight"] = scale run = f"run-{session_num}" run_dir = os.path.join(experiment_dir, run) run_dir = os.path.join(run_dir, "events") with tf.summary.create_file_writer(run_dir).as_default(): hp.hparams(hparams_dict) # record the values used in this trial print(hparams_dict) np.random.seed(args.seed) tf.random.set_seed(args.seed) if not Model.model_supported(args.model_name, args.dataset): raise Exception( f'Model {args.model_name} does not support {args.dataset}! Check method Model.model_supported for the valid combinations.') models = [load_model(args, config) for i in range(args.workers)] server_model = FederatedAveraging(config, models, run) server_model.init() server_model.fit() accuracy, adv_success, test_loss = server_model.evaluate() # with tf.summary.create_file_writer(run_dir).as_default(): # tf.summary.scalar(METRIC_ACCURACY, accuracy, server_model.num_rounds) # tf.summary.scalar(METRIC_ADV_SUCCESS, adv_success, server_model.num_rounds) session_num += 1 metrics_dict = { METRIC_ACCURACY: accuracy, METRIC_ADV_SUCCESS: adv_success } server_model.write_hparams(hparams_dict, metrics_dict)
6,049
52.539823
159
py
fl-analysis
fl-analysis-master/src/attack/anticipate_tf_attack.py
from src.attack.attack import LossBasedAttack import logging import numpy as np import tensorflow as tf from copy import copy logger = logging.getLogger(__name__) # Move this into generate later # from src.torch_compat.anticipate import train_anticipate class AnticipateTfAttack(LossBasedAttack): def generate(self, dataset, model, **kwargs): self.parse_params(**kwargs) self.weights = model.get_weights() loss_object_with_reg = self._combine_losses( self.stealth_method.loss_term(model) if self.stealth_method is not None else None, self.stealth_method.alpha if self.stealth_method is not None else None) attack_model = model current_model = copy(model) current_model.set_weights(attack_model.get_weights()) fl_no_models = 10 # from datetime import datetime # stamp = datetime.now().strftime("%Y%m%d-%H%M%S") # logdir = 'logs/func/%s' % stamp # <- Name of this `run` # writer = tf.summary.create_file_writer(logdir) # tf.summary.trace_on(graph=True, profiler=False) for epoch in range(self.num_epochs): batch_counter = 0 for batch_x, batch_y in dataset.get_data_with_aux(self.poison_samples, self.num_batch, self.noise_level): # print(f"LR: {mal_optimizer._decayed_lr(var_dtype=tf.float32)}") loss = None with tf.GradientTape(persistent=True) as tape: for anticipate_i in range(self.anticipate_steps): if anticipate_i == 0: current_model = self.honest_training(tape, dataset, current_model) for att_weight, cur_weight in zip(attack_model.trainable_variables, current_model.trainable_variables): after_avg = (att_weight + (cur_weight * (fl_no_models - 1))) / fl_no_models cur_weight.assign(after_avg) else: current_model = self.honest_training(tape, dataset, current_model) if self.optimization_method == 'A': if anticipate_i == self.anticipate_steps - 1: loss_value = loss_object_with_reg(y_true=batch_y, y_pred=current_model(batch_x, training=True)) loss = loss_value else: loss_value = loss_object_with_reg(y_true=batch_y, y_pred=current_model(batch_x, training=True)) if loss is None: loss = loss_value else: loss = loss + loss_value # print(loss_value) # print(batch_y) # image_augmentation.debug(batch_x[0:1], batch_y[0:1]) grads = self._compute_gradients(tape, loss_value, model) self.optimizer.apply_gradients(zip(grads, attack_model.trainable_variables)) # if self.step_decay is not None: # self.step_decay.apply_step() # # if self.stealth_method is not None: # self.stealth_method.update_after_batch(model) batch_counter += 1 # test_success, adv_success = self.eval_aux_test(dataset, model, self.loss_object) # print(test_success, adv_success) logger.info(f"Epoch {epoch}: {batch_counter}") if self.stealth_method is not None: self.stealth_method.update_after_training(attack_model) # with writer.as_default(): # tf.summary.trace_export("attack_graph", step=1) return attack_model.get_weights() def honest_training(self, tape, dataset, model): honest_optimizer = tf.keras.optimizers.SGD(learning_rate=0.01) loss_object_with_reg = self._combine_losses( self.stealth_method.loss_term(model) if self.stealth_method is not None else None, self.stealth_method.alpha if self.stealth_method is not None else None) for epoch in range(self.num_epochs): for batch_x, batch_y in dataset.get_data(): # print(f"LR: {mal_optimizer._decayed_lr(var_dtype=tf.float32)}") predictions = model(batch_x, training=True) total_loss = loss_object_with_reg(y_true=batch_y, y_pred=predictions) grads = tape.gradient(total_loss, model.trainable_variables) honest_optimizer.apply_gradients(zip(grads, model.trainable_weights)) # logger.info(f"Epoch {epoch}: {batch_counter}") # batch_counter = batch_counter + 1 break return model # def local_train_honest(self, tape, dataset, model, num_clients=1): # # TODO: Local fl training steps? # for (batch_x, batch_y) in dataset.get_data(): # predictions = model(batch_x, training=True) # loss_value = self.loss_object(y_true=batch_y, y_pred=predictions) # # # reg = tf.reduce_sum(model.losses) # # total_loss = loss_value + reg # # grads = tape.gradient(loss_value, model.trainable_variables) # # honest optimizer? # self.optimizer.apply_gradients(zip(grads, model.trainable_weights)) def parse_params(self, num_epochs, num_batch, poison_samples, optimizer, loss_object, step_decay=None, noise_level=None, anticipate_steps=7, model_type="lenet5_mnist", optimization_method=None, fl_no_models=10, regular_train=False): self.num_epochs = num_epochs self.num_batch = num_batch self.poison_samples = poison_samples self.optimizer = optimizer self.loss_object = loss_object self.step_decay = step_decay self.noise_level = noise_level self.anticipate_steps = anticipate_steps self.model_type = model_type self.optimization_method = optimization_method self.fl_no_models = fl_no_models self.regular_train = regular_train def eval_aux_test(self, dataset, model, loss_object): def calc_acc(ds): counter = 10 adv_ss = [] for batch_x, batch_y in ds: # aux samples preds = model(batch_x, training=False) loss_value = loss_object(y_true=batch_y, y_pred=preds) pred_inds = preds.numpy().argmax(axis=1) == batch_y # print(pred_inds, batch_y) adv_success = np.mean(pred_inds) adv_ss.append(adv_success) counter -= 1 if counter == 0: break return np.mean(adv_ss) return calc_acc(dataset.get_data()), calc_acc(dataset.get_data_with_aux(self.poison_samples, self.num_batch, self.noise_level))
7,110
40.104046
150
py
fl-analysis
fl-analysis-master/src/attack/test/AttackTest.py
import tensorflow as tf import numpy as np from src.data.tf_data_global import IIDGlobalDataset from src.attack.evasion.norm import NormBoundPGDEvasion from src.attack.evasion.trimmed_mean import TrimmedMeanEvasion from src.attack.attack import AttackDatasetBridge from src.attack.untargeted_attack import UntargetedAttack from src.attack.targeted_attack import TargetedAttack from src.data.tf_data import ImageGeneratorDataset, Dataset class AttackTest(tf.test.TestCase): def setUp(self): super(AttackTest, self).setUp() self.model = tf.keras.models.load_model("./../../../models/lenet5_emnist_098.h5") (x_train, y_train), (x_test, y_test) = Dataset.get_emnist_dataset(-1, 1) (x_train, y_train), (x_test, y_test) = (x_train[0], y_train[0]), (x_test[0], y_test[0]) (x_train, y_train) = (x_train[:15000], y_train[:15000]) targets = [1, 2, 3, 4, 5, 6, 7, 8] x_mal, y_mal_orig = x_train[targets], y_train[targets] y_mal = np.repeat(3, len(targets)).astype(y_train.dtype) np.delete(x_train, targets) np.delete(y_train, targets) self.global_dataset = IIDGlobalDataset(x_train, y_train, 30, x_test, y_test) self.dataset = AttackDatasetBridge(Dataset(x_train, y_train)) self.dataset.global_dataset.x_aux = x_mal self.dataset.global_dataset.y_aux = y_mal_orig self.dataset.global_dataset.mal_aux_labels = y_mal self.test_accuracy = tf.keras.metrics.Mean(name='test_accuracy') def _evaluate_targeted(self): batch_x, batch_y = self.dataset.global_dataset.x_aux, self.dataset.global_dataset.mal_aux_labels preds = self.model(batch_x, training=False).numpy().argmax(axis=1) pred_inds = preds == batch_y adv_success = np.mean(pred_inds) print(f"Adv success: {adv_success}") def _evaluate_untargeted(self): for batch_x, batch_y in self.global_dataset.get_test_batch(64, 12): self.optimized_evaluate(batch_x, batch_y) test_accuracy = self.test_accuracy.result().numpy() print(f"Adv success: {1 - test_accuracy}") @tf.function def optimized_evaluate(self, batch_x, batch_y): prediction_tensor = self.model(batch_x, training=False) prediction = prediction_tensor y_ = tf.cast(tf.argmax(prediction, axis=1), tf.uint8) test_accuracy_batch = tf.equal(y_, batch_y) self.test_accuracy(tf.reduce_mean(tf.cast(test_accuracy_batch, tf.float32))) def tearDown(self): pass def test_untargeted_attack(self): self._evaluate_untargeted() att = UntargetedAttack() att.set_stealth_method(NormBoundPGDEvasion(self.model.get_weights(), "linf", 0.1, 1, pgd_factor=.1)) weights = att.generate(self.dataset, self.model, num_epochs=1, optimizer=tf.keras.optimizers.Adam(), loss_object=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)) self.model.set_weights(weights) self._evaluate_untargeted() def test_untargeted_attack_tootight(self): self._evaluate_untargeted() att = UntargetedAttack() att.set_stealth_method(NormBoundPGDEvasion(self.model.get_weights(), "linf", 0.00001, 1, pgd_factor=0.00001)) weights = att.generate(self.dataset, self.model, num_epochs=1, optimizer=tf.keras.optimizers.Adam(), loss_object=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), alpha=0.1) self.model.set_weights(weights) self._evaluate_untargeted() def test_untargeted_attack_trimmedmean(self): self._evaluate_untargeted() att = UntargetedAttack() att.set_stealth_method(TrimmedMeanEvasion(0.5, [self.model.get_weights(), self.model.get_weights(), self.model.get_weights()], 1)) weights = att.generate(self.dataset, self.model, num_epochs=1, optimizer=tf.keras.optimizers.Adam(), loss_object=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)) self.model.set_weights(weights) self._evaluate_untargeted() def test_targeted_attack_norm(self): self._evaluate_untargeted() att = TargetedAttack() att.set_stealth_method(NormBoundPGDEvasion(self.model.get_weights(), "linf", 0.1, 1, pgd_factor=.1)) weights = att.generate(self.dataset, self.model, num_epochs=3, num_batch=6, poison_samples=5, optimizer=tf.keras.optimizers.Adam(), loss_object=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)) self.model.set_weights(weights) self._evaluate_targeted() def test_targeted_attack_norm_l2(self): self._evaluate_untargeted() l2 = 1.0 att = TargetedAttack() att.set_stealth_method(NormBoundPGDEvasion(self.model.get_weights(), "l2", 2, l2)) old_weights = self.model.get_weights() new_weights = att.generate(self.dataset, self.model, num_epochs=3, num_batch=6, poison_samples=5, optimizer=tf.keras.optimizers.Adam(), loss_object=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)) delta_weights = [new_weights[i] - old_weights[i] for i in range(len(old_weights))] l2_norm_tensor = tf.constant(l2) layers_to_clip = [tf.reshape(delta_weights[i], [-1]) for i in range(len(delta_weights))] # for norm calculation norm = tf.norm(tf.concat(layers_to_clip, axis=0)) # print(f"Norm: {norm}") multiply = min((l2_norm_tensor / norm).numpy(), 1.0) new_weights_clipped = [delta_weights[i] * multiply for i in range(len(delta_weights))] self.model.set_weights(new_weights) self._evaluate_targeted() if __name__ == '__main__': tf.test.main()
6,328
45.19708
138
py
fl-analysis
fl-analysis-master/src/subspace/builder/resnet.py
import numpy as np import tensorflow as tf from tensorflow.keras import Model from tensorflow.keras.layers import (Flatten, Input, Activation, Reshape, Dropout, Convolution2D, MaxPooling2D, BatchNormalization, Conv2D, GlobalAveragePooling2D, Concatenate, AveragePooling2D, LocallyConnected2D, Dense) # from general.tfutil import hist_summaries_traintest, scalar_summaries_traintest from src.subspace.builder.model_builders import make_and_add_losses from src.subspace.keras_ext.engine import ExtendedModel from src.subspace.keras_ext.layers import (RProjDense, RProjConv2D, RProjBatchNormalization, RProjLocallyConnected2D) from src.subspace.keras_ext.rproj_layers_util import (OffsetCreatorDenseProj, OffsetCreatorSparseProj, OffsetCreatorFastfoodProj, FastWalshHadamardProjector, ThetaPrime, MultiplyLayer) from src.subspace.keras_ext.util import make_image_input_preproc from tensorflow.keras.regularizers import l2 def resnet_layer(inputs, offset_creator_class, vv, num_filters=16, kernel_size=3, strides=1, activation='relu', batch_normalization=True, conv_first=True, kernel_regularizer=l2(1e-4), name=None): """2D Convolution-Batch Normalization-Activation stack builder # Arguments inputs (tensor): input tensor from input image or previous layer num_filters (int): Conv2D number of filters kernel_size (int): Conv2D square kernel dimensions strides (int): Conv2D square stride dimensions activation (string|None): activation name batch_normalization (bool): whether to include batch normalization conv_first (bool): conv-bn-activation (True) or bn-activation-conv (False) # Returns x (tensor): tensor as input to the next layer """ conv = RProjConv2D(offset_creator_class, vv, num_filters, kernel_size=kernel_size, strides=strides, padding='same', kernel_initializer='he_normal', activation=None, kernel_regularizer=kernel_regularizer, name=name) x = inputs if conv_first: x = conv(x) if batch_normalization: x = RProjBatchNormalization(offset_creator_class, vv)(x) # x = BatchNormalization()(x) # does this even make sense if activation is not None: x = Activation(activation)(x) else: pass # if batch_normalization: # x = BatchNormalization()(x) # if activation is not None: # x = Activation(activation)(x) # x = conv(x) return x def build_LeNet_resnet(depth, weight_decay=0, vsize=100, shift_in=None, proj_type='sparse', disable_bn=False): im_shape = (32, 32, 3) n_label_vals = 10 im_dtype = 'float32' batch_norm_enabled = not disable_bn if (depth - 2) % 6 != 0: raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])') num_res_blocks = int((depth - 2) / 6) assert proj_type in ('dense', 'sparse') if proj_type == 'dense': offset_creator_class = OffsetCreatorDenseProj else: # sparse offset_creator_class = OffsetCreatorSparseProj with tf.name_scope('inputs'): input_images, preproc_images = make_image_input_preproc(im_shape, im_dtype, shift_in=shift_in) input_labels = Input(batch_shape=(None,), dtype='int64') with tf.name_scope('net') as scope: vv = ThetaPrime(vsize) num_filters = 16 x = resnet_layer(preproc_images, offset_creator_class, vv, num_filters=num_filters, batch_normalization=batch_norm_enabled) # Instantiate the stack of residual units for stack in range(3): for res_block in range(num_res_blocks): strides = 1 if stack > 0 and res_block == 0: # first layer but not first stack strides = 2 # downsample y = resnet_layer(x, offset_creator_class, vv, num_filters=num_filters, strides=strides, name=f"Conv2D_stack{stack}_res{res_block}_l0", batch_normalization=batch_norm_enabled) y = resnet_layer(y, offset_creator_class, vv, num_filters=num_filters, activation=None, name=f"Conv2D_stack{stack}_res{res_block}_l1", batch_normalization=batch_norm_enabled) if stack > 0 and res_block == 0: # first layer but not first stack # linear projection residual shortcut connection to match # changed dims x = resnet_layer(x, offset_creator_class, vv, num_filters=num_filters, kernel_size=1, strides=strides, activation=None, batch_normalization=False, name=f"Conv2D_stack{stack}_res{res_block}_l2") x = tf.keras.layers.add([x, y]) x = Activation('relu')(x) num_filters *= 2 x = AveragePooling2D(pool_size=8)(x) y = Flatten()(x) logits = RProjDense(offset_creator_class, vv, n_label_vals, activation='softmax', kernel_initializer='he_normal')(y) model = ExtendedModel(input=input_images, output=logits) model.add_extra_trainable_weight(vv.var_2d) nontrackable_fields = ['input_images', 'preproc_images', 'input_labels', 'logits'] for field in nontrackable_fields: model.add_var(field, locals()[field]) make_and_add_losses(model, input_labels) return model def resnet_layer_ff(inputs, conv2d_class, num_filters=16, kernel_size=3, strides=1, activation='relu', batch_normalization=True, conv_first=True, kernel_regularizer=l2(1e-4), name=None): """2D Convolution-Batch Normalization-Activation stack builder # Arguments inputs (tensor): input tensor from input image or previous layer num_filters (int): Conv2D number of filters kernel_size (int): Conv2D square kernel dimensions strides (int): Conv2D square stride dimensions activation (string|None): activation name batch_normalization (bool): whether to include batch normalization conv_first (bool): conv-bn-activation (True) or bn-activation-conv (False) # Returns x (tensor): tensor as input to the next layer """ conv = conv2d_class(num_filters, kernel_size=kernel_size, strides=strides, padding='same', kernel_initializer='he_normal', activation=None, kernel_regularizer=kernel_regularizer, name=name) x = inputs if conv_first: x = conv(x) if batch_normalization: x = BatchNormalization()(x) # does this even make sense if activation is not None: x = Activation(activation)(x) else: pass # if batch_normalization: # x = BatchNormalization()(x) # if activation is not None: # x = Activation(activation)(x) # x = conv(x) return x def build_resnet_fastfood(depth, weight_decay=0, vsize=100, shift_in=None, proj_type='sparse', DD=None): im_shape = (32, 32, 3) n_label_vals = 10 im_dtype = 'float32' if (depth - 2) % 6 != 0: raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])') num_res_blocks = int((depth - 2) / 6) assert proj_type in ('dense', 'sparse') if proj_type == 'dense': offset_creator_class = OffsetCreatorDenseProj else: # sparse offset_creator_class = OffsetCreatorSparseProj with tf.name_scope('inputs'): input_images, preproc_images = make_image_input_preproc(im_shape, im_dtype, shift_in=shift_in) input_labels = Input(batch_shape=(None,), dtype='int64') def define_model(input_images, DenseLayer, ConvLayer): vv = ThetaPrime(vsize) num_filters = 16 x = resnet_layer_ff(preproc_images, ConvLayer, num_filters=num_filters) # Instantiate the stack of residual units for stack in range(3): for res_block in range(num_res_blocks): strides = 1 if stack > 0 and res_block == 0: # first layer but not first stack strides = 2 # downsample y = resnet_layer_ff(x, ConvLayer, num_filters=num_filters, strides=strides, name=f"Conv2D_stack{stack}_res{res_block}_l0") y = resnet_layer_ff(y, ConvLayer, num_filters=num_filters, activation=None, name=f"Conv2D_stack{stack}_res{res_block}_l1") if stack > 0 and res_block == 0: # first layer but not first stack # linear projection residual shortcut connection to match # changed dims x = resnet_layer_ff(x, ConvLayer, num_filters=num_filters, kernel_size=1, strides=strides, activation=None, batch_normalization=False, name=f"Conv2D_stack{stack}_res{res_block}_l2") x = tf.keras.layers.add([x, y]) x = Activation('relu')(x) num_filters *= 2 x = AveragePooling2D(pool_size=8)(x) y = Flatten()(x) logits = DenseLayer(n_label_vals, activation='softmax', kernel_initializer='he_normal')(y) model = ExtendedModel(input=input_images, output=logits) model.add_extra_trainable_weight(vv.var_2d) return model if not DD: with tf.name_scope('net_disposable'): # Make disposable direct model model_disposable = define_model(input_images, Dense, Conv2D) DD = np.sum([np.prod(var.get_shape().as_list()) for var in model_disposable.trainable_weights]).item() print(f"D {DD} {type(DD)}") del model_disposable with tf.name_scope('net'): # Make real RProj FWH model fwh_projector = FastWalshHadamardProjector(vsize, DD) DenseLayer = lambda *args, **kwargs: RProjDense(OffsetCreatorFastfoodProj, fwh_projector, *args, **kwargs) Conv2DLayer = lambda *args, **kwargs: RProjConv2D(OffsetCreatorFastfoodProj, fwh_projector, *args, **kwargs) model = define_model(input_images, DenseLayer, Conv2DLayer) fwh_projector.check_usage() for ww in fwh_projector.trainable_weights: model.add_extra_trainable_weight(ww) for ww in fwh_projector.non_trainable_weights: model.add_extra_non_trainable_weight(ww) nontrackable_fields = ['input_images', 'preproc_images', 'input_labels', 'logits'] for field in nontrackable_fields: model.add_var(field, locals()[field]) make_and_add_losses(model, input_labels) return model # Model: "model" # __________________________________________________________________________________________________ # Layer (type) Output Shape Param # Connected to # ================================================================================================== # input_1 (InputLayer) [(None, 32, 32, 3)] 0 # __________________________________________________________________________________________________ # conv2d (Conv2D) (None, 32, 32, 16) 448 input_1[0][0] # __________________________________________________________________________________________________ # batch_normalization (BatchNorma (None, 32, 32, 16) 64 conv2d[0][0] # __________________________________________________________________________________________________ # activation (Activation) (None, 32, 32, 16) 0 batch_normalization[0][0] # __________________________________________________________________________________________________ # Conv2D_stack0_res0_l0 (Conv2D) (None, 32, 32, 16) 2320 activation[0][0] # __________________________________________________________________________________________________ # batch_normalization_1 (BatchNor (None, 32, 32, 16) 64 Conv2D_stack0_res0_l0[0][0] # __________________________________________________________________________________________________ # activation_1 (Activation) (None, 32, 32, 16) 0 batch_normalization_1[0][0] # __________________________________________________________________________________________________ # Conv2D_stack0_res0_l1 (Conv2D) (None, 32, 32, 16) 2320 activation_1[0][0] # __________________________________________________________________________________________________ # batch_normalization_2 (BatchNor (None, 32, 32, 16) 64 Conv2D_stack0_res0_l1[0][0] # __________________________________________________________________________________________________ # add (Add) (None, 32, 32, 16) 0 activation[0][0] # batch_normalization_2[0][0] # __________________________________________________________________________________________________ # activation_2 (Activation) (None, 32, 32, 16) 0 add[0][0] # __________________________________________________________________________________________________ # Conv2D_stack0_res1_l0 (Conv2D) (None, 32, 32, 16) 2320 activation_2[0][0] # __________________________________________________________________________________________________ # batch_normalization_3 (BatchNor (None, 32, 32, 16) 64 Conv2D_stack0_res1_l0[0][0] # __________________________________________________________________________________________________ # activation_3 (Activation) (None, 32, 32, 16) 0 batch_normalization_3[0][0] # __________________________________________________________________________________________________ # Conv2D_stack0_res1_l1 (Conv2D) (None, 32, 32, 16) 2320 activation_3[0][0] # __________________________________________________________________________________________________ # batch_normalization_4 (BatchNor (None, 32, 32, 16) 64 Conv2D_stack0_res1_l1[0][0] # __________________________________________________________________________________________________ # add_1 (Add) (None, 32, 32, 16) 0 activation_2[0][0] # batch_normalization_4[0][0] # __________________________________________________________________________________________________ # activation_4 (Activation) (None, 32, 32, 16) 0 add_1[0][0] # __________________________________________________________________________________________________ # Conv2D_stack0_res2_l0 (Conv2D) (None, 32, 32, 16) 2320 activation_4[0][0] # __________________________________________________________________________________________________ # batch_normalization_5 (BatchNor (None, 32, 32, 16) 64 Conv2D_stack0_res2_l0[0][0] # __________________________________________________________________________________________________ # activation_5 (Activation) (None, 32, 32, 16) 0 batch_normalization_5[0][0] # __________________________________________________________________________________________________ # Conv2D_stack0_res2_l1 (Conv2D) (None, 32, 32, 16) 2320 activation_5[0][0] # __________________________________________________________________________________________________ # batch_normalization_6 (BatchNor (None, 32, 32, 16) 64 Conv2D_stack0_res2_l1[0][0] # __________________________________________________________________________________________________ # add_2 (Add) (None, 32, 32, 16) 0 activation_4[0][0] # batch_normalization_6[0][0] # __________________________________________________________________________________________________ # activation_6 (Activation) (None, 32, 32, 16) 0 add_2[0][0] # __________________________________________________________________________________________________ # Conv2D_stack1_res0_l0 (Conv2D) (None, 16, 16, 32) 4640 activation_6[0][0] # __________________________________________________________________________________________________ # batch_normalization_7 (BatchNor (None, 16, 16, 32) 128 Conv2D_stack1_res0_l0[0][0] # __________________________________________________________________________________________________ # activation_7 (Activation) (None, 16, 16, 32) 0 batch_normalization_7[0][0] # __________________________________________________________________________________________________ # Conv2D_stack1_res0_l1 (Conv2D) (None, 16, 16, 32) 9248 activation_7[0][0] # __________________________________________________________________________________________________ # Conv2D_stack1_res0_l2 (Conv2D) (None, 16, 16, 32) 544 activation_6[0][0] # __________________________________________________________________________________________________ # batch_normalization_8 (BatchNor (None, 16, 16, 32) 128 Conv2D_stack1_res0_l1[0][0] # __________________________________________________________________________________________________ # add_3 (Add) (None, 16, 16, 32) 0 Conv2D_stack1_res0_l2[0][0] # batch_normalization_8[0][0] # __________________________________________________________________________________________________ # activation_8 (Activation) (None, 16, 16, 32) 0 add_3[0][0] # __________________________________________________________________________________________________ # Conv2D_stack1_res1_l0 (Conv2D) (None, 16, 16, 32) 9248 activation_8[0][0] # __________________________________________________________________________________________________ # batch_normalization_9 (BatchNor (None, 16, 16, 32) 128 Conv2D_stack1_res1_l0[0][0] # __________________________________________________________________________________________________ # activation_9 (Activation) (None, 16, 16, 32) 0 batch_normalization_9[0][0] # __________________________________________________________________________________________________ # Conv2D_stack1_res1_l1 (Conv2D) (None, 16, 16, 32) 9248 activation_9[0][0] # __________________________________________________________________________________________________ # batch_normalization_10 (BatchNo (None, 16, 16, 32) 128 Conv2D_stack1_res1_l1[0][0] # __________________________________________________________________________________________________ # add_4 (Add) (None, 16, 16, 32) 0 activation_8[0][0] # batch_normalization_10[0][0] # __________________________________________________________________________________________________ # activation_10 (Activation) (None, 16, 16, 32) 0 add_4[0][0] # __________________________________________________________________________________________________ # Conv2D_stack1_res2_l0 (Conv2D) (None, 16, 16, 32) 9248 activation_10[0][0] # __________________________________________________________________________________________________ # batch_normalization_11 (BatchNo (None, 16, 16, 32) 128 Conv2D_stack1_res2_l0[0][0] # __________________________________________________________________________________________________ # activation_11 (Activation) (None, 16, 16, 32) 0 batch_normalization_11[0][0] # __________________________________________________________________________________________________ # Conv2D_stack1_res2_l1 (Conv2D) (None, 16, 16, 32) 9248 activation_11[0][0] # __________________________________________________________________________________________________ # batch_normalization_12 (BatchNo (None, 16, 16, 32) 128 Conv2D_stack1_res2_l1[0][0] # __________________________________________________________________________________________________ # add_5 (Add) (None, 16, 16, 32) 0 activation_10[0][0] # batch_normalization_12[0][0] # __________________________________________________________________________________________________ # activation_12 (Activation) (None, 16, 16, 32) 0 add_5[0][0] # __________________________________________________________________________________________________ # Conv2D_stack2_res0_l0 (Conv2D) (None, 8, 8, 64) 18496 activation_12[0][0] # __________________________________________________________________________________________________ # batch_normalization_13 (BatchNo (None, 8, 8, 64) 256 Conv2D_stack2_res0_l0[0][0] # __________________________________________________________________________________________________ # activation_13 (Activation) (None, 8, 8, 64) 0 batch_normalization_13[0][0] # __________________________________________________________________________________________________ # Conv2D_stack2_res0_l1 (Conv2D) (None, 8, 8, 64) 36928 activation_13[0][0] # __________________________________________________________________________________________________ # Conv2D_stack2_res0_l2 (Conv2D) (None, 8, 8, 64) 2112 activation_12[0][0] # __________________________________________________________________________________________________ # batch_normalization_14 (BatchNo (None, 8, 8, 64) 256 Conv2D_stack2_res0_l1[0][0] # __________________________________________________________________________________________________ # add_6 (Add) (None, 8, 8, 64) 0 Conv2D_stack2_res0_l2[0][0] # batch_normalization_14[0][0] # __________________________________________________________________________________________________ # activation_14 (Activation) (None, 8, 8, 64) 0 add_6[0][0] # __________________________________________________________________________________________________ # Conv2D_stack2_res1_l0 (Conv2D) (None, 8, 8, 64) 36928 activation_14[0][0] # __________________________________________________________________________________________________ # batch_normalization_15 (BatchNo (None, 8, 8, 64) 256 Conv2D_stack2_res1_l0[0][0] # __________________________________________________________________________________________________ # activation_15 (Activation) (None, 8, 8, 64) 0 batch_normalization_15[0][0] # __________________________________________________________________________________________________ # Conv2D_stack2_res1_l1 (Conv2D) (None, 8, 8, 64) 36928 activation_15[0][0] # __________________________________________________________________________________________________ # batch_normalization_16 (BatchNo (None, 8, 8, 64) 256 Conv2D_stack2_res1_l1[0][0] # __________________________________________________________________________________________________ # add_7 (Add) (None, 8, 8, 64) 0 activation_14[0][0] # batch_normalization_16[0][0] # __________________________________________________________________________________________________ # activation_16 (Activation) (None, 8, 8, 64) 0 add_7[0][0] # __________________________________________________________________________________________________ # Conv2D_stack2_res2_l0 (Conv2D) (None, 8, 8, 64) 36928 activation_16[0][0] # __________________________________________________________________________________________________ # batch_normalization_17 (BatchNo (None, 8, 8, 64) 256 Conv2D_stack2_res2_l0[0][0] # __________________________________________________________________________________________________ # activation_17 (Activation) (None, 8, 8, 64) 0 batch_normalization_17[0][0] # __________________________________________________________________________________________________ # Conv2D_stack2_res2_l1 (Conv2D) (None, 8, 8, 64) 36928 activation_17[0][0] # __________________________________________________________________________________________________ # batch_normalization_18 (BatchNo (None, 8, 8, 64) 256 Conv2D_stack2_res2_l1[0][0] # __________________________________________________________________________________________________ # add_8 (Add) (None, 8, 8, 64) 0 activation_16[0][0] # batch_normalization_18[0][0] # __________________________________________________________________________________________________ # activation_18 (Activation) (None, 8, 8, 64) 0 add_8[0][0] # __________________________________________________________________________________________________ # average_pooling2d (AveragePooli (None, 1, 1, 64) 0 activation_18[0][0] # __________________________________________________________________________________________________ # flatten (Flatten) (None, 64) 0 average_pooling2d[0][0] # __________________________________________________________________________________________________ # dense (Dense) (None, 10) 650 flatten[0][0] # ================================================================================================== # Total params: 274,442 # Trainable params: 273,066 # Non-trainable params: 1,376 # __________________________________________________________________________________________________
27,766
57.21174
116
py
fl-analysis
fl-analysis-master/src/subspace/builder/model_builders.py
import numpy as np import tensorflow as tf from tensorflow.keras import Model from tensorflow.keras.layers import (Dense, Flatten, Input, Activation, Reshape, Dropout, Convolution2D, MaxPooling2D, BatchNormalization, Conv2D, GlobalAveragePooling2D, Concatenate, AveragePooling2D, LocallyConnected2D) # from general.tfutil import hist_summaries_traintest, scalar_summaries_traintest from src.subspace.keras_ext.engine import ExtendedModel from src.subspace.keras_ext.layers import (RProjDense, RProjConv2D, RProjBatchNormalization, RProjLocallyConnected2D) from src.subspace.keras_ext.rproj_layers_util import (OffsetCreatorDenseProj, OffsetCreatorSparseProj, OffsetCreatorFastfoodProj, FastWalshHadamardProjector, ThetaPrime, MultiplyLayer) from src.subspace.keras_ext.util import make_image_input_preproc from tensorflow.keras.regularizers import l2 def make_and_add_losses(model, input_labels): '''Add classification and L2 losses''' with tf.compat.v1.name_scope('losses') as scope: prob = tf.nn.softmax(model.v.logits, name='prob') cross_ent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=model.v.logits, labels=input_labels, name='cross_ent') loss_cross_ent = tf.reduce_mean(input_tensor=cross_ent, name='loss_cross_ent') model.add_trackable('loss_cross_ent', loss_cross_ent) class_prediction = tf.argmax(input=prob, axis=1) prediction_correct = tf.equal(class_prediction, input_labels, name='prediction_correct') accuracy = tf.reduce_mean(input_tensor=tf.cast(prediction_correct, dtype=tf.float32), name='accuracy') model.add_trackable('accuracy', accuracy) # hist_summaries_traintest(prob, cross_ent) # scalar_summaries_traintest(accuracy) model.add_loss_reg() if 'loss_reg' in model.v: loss = tf.add_n(( model.v.loss_cross_ent, model.v.loss_reg, ), name='loss') else: loss = model.v.loss_cross_ent model.add_trackable('loss', loss) nontrackable_fields = ['prob', 'cross_ent', 'class_prediction', 'prediction_correct'] for field in nontrackable_fields: model.add_var(field, locals()[field]) def build_model_mnist_fc(weight_decay=0, vsize=100, depth=2, width=100, shift_in=None, proj_type='dense'): im_shape = (28, 28, 1) n_label_vals = 10 im_dtype = 'float32' assert proj_type in ('dense', 'sparse') if proj_type == 'dense': offset_creator_class = OffsetCreatorDenseProj else: # sparse offset_creator_class = OffsetCreatorSparseProj with tf.compat.v1.name_scope('inputs'): input_images, preproc_images = make_image_input_preproc(im_shape, im_dtype, shift_in=shift_in) input_labels = Input(batch_shape=(None,), dtype='int64') with tf.compat.v1.name_scope('net') as scope: vv = ThetaPrime(vsize) xx = input_images xx = Flatten()(xx) for _ in range(depth): xx = RProjDense(offset_creator_class, vv, width, activation='relu', kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(xx) # xx = Dense(width, activation='relu')(xx) logits = RProjDense(offset_creator_class, vv, 10, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), activation='softmax')(xx) # model = Model(input=input_images, output=logits) model = ExtendedModel(input=input_images, output=logits) model.add_extra_trainable_weight(vv.var_2d) nontrackable_fields = ['input_images', 'preproc_images', 'input_labels', 'logits'] for field in nontrackable_fields: model.add_var(field, locals()[field]) make_and_add_losses(model, input_labels) return model, vv.var_2d def build_cnn_model_mnist_bhagoji(weight_decay=0, vsize=100, shift_in=None, proj_type='sparse'): im_shape = (28, 28, 1) n_label_vals = 10 im_dtype = 'float32' if proj_type == 'dense': offset_creator_class = OffsetCreatorDenseProj else: # sparse offset_creator_class = OffsetCreatorSparseProj with tf.name_scope('inputs'): input_images, preproc_images = make_image_input_preproc(im_shape, im_dtype, shift_in=shift_in) input_labels = Input(batch_shape=(None,), dtype='int64') with tf.name_scope('net') as scope: vv = ThetaPrime(vsize) xx = RProjConv2D(offset_creator_class, vv, 64, kernel_size=5, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(preproc_images) xx = RProjConv2D(offset_creator_class, vv, 64, kernel_size=5, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(xx) # xx = MaxPooling2D((2, 2))(xx) xx = Flatten()(xx) xx = RProjDense(offset_creator_class, vv, 128, kernel_initializer='he_normal', activation='relu', kernel_regularizer=l2(weight_decay))(xx) logits = RProjDense(offset_creator_class, vv, 10, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), activation='softmax')(xx) model = ExtendedModel(input=input_images, output=logits) model.add_extra_trainable_weight(vv.var_2d) nontrackable_fields = ['input_images', 'preproc_images', 'input_labels', 'logits'] for field in nontrackable_fields: model.add_var(field, locals()[field]) make_and_add_losses(model, input_labels) return model def build_cnn_model_mnist_dev_conv(weight_decay=0, vsize=100, shift_in=None, proj_type='sparse'): im_shape = (28, 28, 1) n_label_vals = 10 im_dtype = 'float32' if proj_type == 'dense': offset_creator_class = OffsetCreatorDenseProj else: # sparse offset_creator_class = OffsetCreatorSparseProj with tf.name_scope('inputs'): input_images, preproc_images = make_image_input_preproc(im_shape, im_dtype, shift_in=shift_in) input_labels = Input(batch_shape=(None,), dtype='int64') with tf.name_scope('net') as scope: vv = ThetaPrime(vsize) xx = RProjConv2D(offset_creator_class, vv, 8, kernel_size=3, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(preproc_images) xx = RProjConv2D(offset_creator_class, vv, 4, kernel_size=3, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(xx) xx = MaxPooling2D((2, 2))(xx) xx = Flatten()(xx) xx = RProjDense(offset_creator_class, vv, 32, kernel_initializer='he_normal', activation='relu', kernel_regularizer=l2(weight_decay))(xx) logits = RProjDense(offset_creator_class, vv, 10, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), activation='softmax')(xx) model = ExtendedModel(input=input_images, output=logits) model.add_extra_trainable_weight(vv.var_2d) nontrackable_fields = ['input_images', 'preproc_images', 'input_labels', 'logits'] for field in nontrackable_fields: model.add_var(field, locals()[field]) make_and_add_losses(model, input_labels) return model def build_cnn_model_mnistcnn_conv(weight_decay=0, vsize=100, shift_in=None, proj_type='sparse'): im_shape = (28, 28, 1) n_label_vals = 10 im_dtype = 'float32' if proj_type == 'dense': offset_creator_class = OffsetCreatorDenseProj else: # sparse offset_creator_class = OffsetCreatorSparseProj with tf.name_scope('inputs'): input_images, preproc_images = make_image_input_preproc(im_shape, im_dtype, shift_in=shift_in) input_labels = Input(batch_shape=(None,), dtype='int64') with tf.name_scope('net') as scope: vv = ThetaPrime(vsize) xx = RProjConv2D(offset_creator_class, vv, 64, kernel_size=2, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(preproc_images) xx = MaxPooling2D((2, 2))(xx) xx = RProjConv2D(offset_creator_class, vv, 32, kernel_size=2, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(xx) xx = MaxPooling2D((2, 2))(xx) xx = Flatten()(xx) xx = RProjDense(offset_creator_class, vv, 256, kernel_initializer='he_normal', activation='relu', kernel_regularizer=l2(weight_decay))(xx) logits = RProjDense(offset_creator_class, vv, 10, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), activation='softmax')(xx) model = ExtendedModel(input=input_images, output=logits) model.add_extra_trainable_weight(vv.var_2d) nontrackable_fields = ['input_images', 'preproc_images', 'input_labels', 'logits'] for field in nontrackable_fields: model.add_var(field, locals()[field]) make_and_add_losses(model, input_labels) return model def build_cnn_model_cifar_allcnn(weight_decay=0, vsize=100, shift_in=None, proj_type='sparse'): im_shape = (32, 32, 3) n_label_vals = 10 im_dtype = 'float32' if proj_type == 'dense': offset_creator_class = OffsetCreatorDenseProj else: # sparse offset_creator_class = OffsetCreatorSparseProj with tf.name_scope('inputs'): input_images, preproc_images = make_image_input_preproc(im_shape, im_dtype, shift_in=shift_in) input_labels = Input(batch_shape=(None,), dtype='int64') with tf.name_scope('net') as scope: vv = ThetaPrime(vsize) xx = RProjConv2D(offset_creator_class, vv, 96, kernel_size=3, strides=1, kernel_initializer='he_normal', padding='same', activation='relu', kernel_regularizer=l2(weight_decay))(preproc_images) xx = RProjConv2D(offset_creator_class, vv, 96, kernel_size=3, strides=1, kernel_initializer='he_normal', padding='same', activation='relu', kernel_regularizer=l2(weight_decay))(xx) xx = RProjConv2D(offset_creator_class, vv, 96, kernel_size=3, strides=2, kernel_initializer='he_normal', padding='same', activation='relu', kernel_regularizer=l2(weight_decay))(xx) xx = RProjConv2D(offset_creator_class, vv, 192, kernel_size=3, strides=1, kernel_initializer='he_normal', padding='same', activation='relu', kernel_regularizer=l2(weight_decay))(xx) xx = RProjConv2D(offset_creator_class, vv, 192, kernel_size=3, strides=1, kernel_initializer='he_normal', padding='same', activation='relu', kernel_regularizer=l2(weight_decay))(xx) xx = RProjConv2D(offset_creator_class, vv, 192, kernel_size=3, strides=2, kernel_initializer='he_normal', padding='same', activation='relu', kernel_regularizer=l2(weight_decay))(xx) xx = RProjConv2D(offset_creator_class, vv, 192, kernel_size=3, strides=1, kernel_initializer='he_normal', padding='same', activation='relu', kernel_regularizer=l2(weight_decay))(xx) xx = RProjConv2D(offset_creator_class, vv, 192, kernel_size=1, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(xx) xx = RProjConv2D(offset_creator_class, vv, 10, kernel_size=1, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(xx) xx = GlobalAveragePooling2D()(xx) logits = RProjDense(offset_creator_class, vv, 10, kernel_regularizer=l2(weight_decay), activation='softmax')(xx) model = ExtendedModel(input=input_images, output=logits) model.add_extra_trainable_weight(vv.var_2d) nontrackable_fields = ['input_images', 'preproc_images', 'input_labels', 'logits'] for field in nontrackable_fields: model.add_var(field, locals()[field]) make_and_add_losses(model, input_labels) return model vv = None def build_test(): im_shape = (28, 28, 1) n_label_vals = 10 im_dtype = 'float32' input_images = Input(shape=im_shape) global vv if vv is None: vv = ThetaPrime(100) xx = input_images xx = Flatten()(xx) for _ in range(3): xx = Dense(100, activation='relu')(xx) logits = Dense(100)(xx) logits = MultiplyLayer(vv.var)(logits) logits = Dense(10)(logits) model = Model(inputs=input_images, outputs=logits) return model, vv def build_LeNet_cifar(weight_decay=0, vsize=100, shift_in=None, proj_type='sparse'): im_shape = (32, 32, 3) n_label_vals = 10 im_dtype = 'float32' assert proj_type in ('dense', 'sparse') if proj_type == 'dense': offset_creator_class = OffsetCreatorDenseProj else: # sparse offset_creator_class = OffsetCreatorSparseProj with tf.name_scope('inputs'): input_images, preproc_images = make_image_input_preproc(im_shape, im_dtype, shift_in=shift_in) input_labels = Input(batch_shape=(None,), dtype='int64') with tf.name_scope('net') as scope: vv = ThetaPrime(vsize) xx = RProjConv2D(offset_creator_class, vv, 6, kernel_size=5, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(preproc_images) xx = MaxPooling2D((2, 2))(xx) xx = RProjConv2D(offset_creator_class, vv, 16, kernel_size=5, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(xx) xx = MaxPooling2D((2, 2))(xx) xx = Flatten()(xx) # xx = Dropout(0.5)(xx) xx = RProjDense(offset_creator_class, vv, 120, kernel_initializer='he_normal', activation='relu', kernel_regularizer=l2(weight_decay))(xx) # xx = Dropout(0.5)(xx) xx = RProjDense(offset_creator_class, vv, 84, kernel_initializer='he_normal', activation='relu', kernel_regularizer=l2(weight_decay))(xx) # xx = Dropout(0.5)(xx) logits = RProjDense(offset_creator_class, vv, 10, kernel_initializer='glorot_uniform', activation='softmax', kernel_regularizer=l2(weight_decay))(xx) model = ExtendedModel(input=input_images, output=logits) model.add_extra_trainable_weight(vv.var_2d) nontrackable_fields = ['input_images', 'preproc_images', 'input_labels', 'logits'] for field in nontrackable_fields: model.add_var(field, locals()[field]) make_and_add_losses(model, input_labels) return model def build_model_cifar_LeNet_fastfood(weight_decay=0, vsize=100, shift_in=None, DD=None, d_rate=0.0, c1=6, c2=16, d1=120, d2=84): '''If DD is not specified, it will be computed.''' im_shape = (32, 32, 3) n_label_vals = 10 im_dtype = 'float32' with tf.name_scope('inputs'): input_images, preproc_images = make_image_input_preproc(im_shape, im_dtype, shift_in=shift_in) input_labels = Input(batch_shape=(None,), dtype='int64') def define_model(input_images, DenseLayer, Conv2DLayer): vv = ThetaPrime(vsize) xx = Conv2DLayer(c1, kernel_size=5, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(preproc_images) xx = MaxPooling2D((2, 2))(xx) xx = Conv2DLayer(c2, kernel_size=5, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(weight_decay))(xx) xx = MaxPooling2D((2, 2))(xx) xx = Flatten()(xx) xx = Dropout(d_rate)(xx) xx = DenseLayer(d1, kernel_initializer='he_normal', activation='relu', kernel_regularizer=l2(weight_decay))(xx) xx = Dropout(d_rate)(xx) xx = DenseLayer(d2, kernel_initializer='he_normal', activation='relu', kernel_regularizer=l2(weight_decay))(xx) xx = Dropout(d_rate)(xx) logits = DenseLayer(10, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(xx) model = ExtendedModel(input=input_images, output=logits) nontrackable_fields = ['input_images', 'preproc_images', 'input_labels', 'logits'] for field in ['logits']: model.add_var(field, locals()[field]) return model if not DD: with tf.name_scope('net_disposable'): # Make disposable direct model model_disposable = define_model(input_images, Dense, Conv2D) DD = np.sum([np.prod(var.get_shape().as_list()) for var in model_disposable.trainable_weights]).item() print(f"D {DD} {type(DD)}") del model_disposable with tf.name_scope('net'): # Make real RProj FWH model fwh_projector = FastWalshHadamardProjector(vsize, DD) DenseLayer = lambda *args, **kwargs: RProjDense(OffsetCreatorFastfoodProj, fwh_projector, *args, **kwargs) Conv2DLayer = lambda *args, **kwargs: RProjConv2D(OffsetCreatorFastfoodProj, fwh_projector, *args, **kwargs) model = define_model(input_images, DenseLayer, Conv2DLayer) fwh_projector.check_usage() for ww in fwh_projector.trainable_weights: model.add_extra_trainable_weight(ww) for ww in fwh_projector.non_trainable_weights: model.add_extra_non_trainable_weight(ww) nontrackable_fields = ['input_images', 'preproc_images', 'input_labels'] for field in nontrackable_fields: model.add_var(field, locals()[field]) make_and_add_losses(model, input_labels) return model
18,121
44.762626
202
py
fl-analysis
fl-analysis-master/src/subspace/builder/test_model_builders.py
from unittest import TestCase import tensorflow as tf import numpy as np from tf_data import Dataset from tf_model import Model from .model_builders import build_model_mnist_fc, build_cnn_model_mnist_bhagoji, build_test, build_cnn_model_mnist_dev_conv from ..keras_ext.rproj_layers_util import ThetaPrime import resource class Test(TestCase): def test_build_model_summary(self): model = build_model_mnist_fc() print('All model weights:') # total_params = summarize_weights(model.trainable_weights) print('Model summary:') model.summary() model.print_trainable_warnings() def test_build_model_run(self): model = build_model_mnist_fc() (x_train, y_train), (x_test, y_test) = Dataset.get_mnist_dataset(1) output = model(x_train) accuracy = output == y_train print(output, accuracy) def test_build_model_get_weights(self): model = build_model_mnist_fc() weights = model.get_weights() model.set_weights(weights) # print(weights) def test_build_model_trainable_variables(self): model = build_model_mnist_fc() vars = model.trainable_variables print(vars) def test_build_model_test_bp(self): model, theta = build_model_mnist_fc() (x_train, y_train), (x_test, y_test) = Dataset.get_mnist_dataset(24) loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) optimizer = tf.keras.optimizers.SGD(learning_rate=0.001) # model = Model.create_model("dev") # x = tf.Variable(3.0) # y = x * x for i in range(10): with tf.GradientTape() as tape: # tape.watch(theta) predictions = model(x_train, training=True) loss_value = loss_object(y_true=y_train, y_pred=predictions) # tape.watch(x) # y = x * x # grads = tape.gradient(y, [x]) print(loss_value) grads = tape.gradient(loss_value, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) def test_build_model_test_conv(self): model = build_cnn_model_mnist_dev_conv(proj_type='sparse', vsize=1000) # model, theta = build_model_mnist_fc() (x_train, y_train), (x_test, y_test) = Dataset.get_mnist_dataset(12800) batch_size = 128 loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False) optimizer = tf.keras.optimizers.SGD(learning_rate=0.1) for i in range(10): for bid in range(int(x_train.shape[0] / batch_size)): batch_x = x_train[bid * batch_size:(bid + 1) * batch_size] batch_y = y_train[bid * batch_size:(bid + 1) * batch_size] with tf.GradientTape() as tape: predictions = model(batch_x, training=True) loss_value = loss_object(y_true=batch_y, y_pred=predictions) print(loss_value) grads = tape.gradient(loss_value, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) print(using("Sparse"), flush=True) def test_build_model_test_timing(self): import time start1 = time.time() model = build_cnn_model_mnist_bhagoji() (x_train, y_train), (x_test, y_test) = Dataset.get_mnist_dataset(24) loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) optimizer = tf.keras.optimizers.SGD(learning_rate=0.001) for i in range(10): with tf.GradientTape() as tape: predictions = model(x_train, training=True) loss_value = loss_object(y_true=y_train, y_pred=predictions) print(loss_value) grads = tape.gradient(loss_value, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) duration_sparse = time.time() - start1 start2 = time.time() model = build_cnn_model_mnist_bhagoji(proj_type='dense') (x_train, y_train), (x_test, y_test) = Dataset.get_mnist_dataset(24) loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) optimizer = tf.keras.optimizers.SGD(learning_rate=0.00001) for i in range(10): with tf.GradientTape() as tape: predictions = model(x_train, training=True) loss_value = loss_object(y_true=y_train, y_pred=predictions) print(loss_value) grads = tape.gradient(loss_value, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) duration_dense = time.time() - start2 print(f"Done!") print(f"Dense: {duration_dense}") print(f"Sparse: {duration_sparse}") def test_build_model_test_vars(self): def run(): model, theta = build_test() (x_train, y_train), (x_test, y_test) = Dataset.get_mnist_dataset(24) loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) optimizer = tf.keras.optimizers.SGD(learning_rate=0.1) # model = Model.create_model("dev") # x = tf.Variable(3.0) # y = x * x with tf.GradientTape() as tape: # tape.watch(theta.var) predictions = model(x_train, training=True) # predictions = predictions * tf.norm(theta.var) loss_value = loss_object(y_true=y_train, y_pred=predictions) vars = model.trainable_variables + [theta.var] grads = tape.gradient(loss_value, vars) optimizer.apply_gradients(zip(grads, vars)) run() def test_build_model_write_graph(self): # tf.compat.v1.disable_eager_execution() tf.summary.trace_on() model = build_model_mnist_fc(depth=1) (x_train, y_train), (x_test, y_test) = Dataset.get_mnist_dataset(1) loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) optimizer = tf.keras.optimizers.SGD(learning_rate=0.1) @tf.function def run(): with tf.GradientTape() as tape: predictions = model(x_train, training=True) loss_value = loss_object(y_true=y_train, y_pred=predictions) run() writer = tf.summary.create_file_writer("graph_debug") with writer.as_default(): tf.summary.trace_export("graph", step=1) # grads = tape.gradient(tf.Variable(5), model.trainable_weights) # optimizer.apply_gradients(zip(grads, model.trainable_variables)) def using(point=""): usage = resource.getrusage(resource.RUSAGE_SELF) return '''%s: usertime=%s systime=%s mem=%s mb ''' % (point, usage[0], usage[1], (usage[2] * resource.getpagesize()) / 1000000.0)
7,121
33.572816
123
py
fl-analysis
fl-analysis-master/src/subspace/keras_ext/test_layers.py
#! /usr/bin/env python # Copyright (c) 2018 Uber Technologies, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os, sys import skimage import skimage.io import skimage.transform import numpy as np import tensorflow as tf from keras.layers import Input import keras.backend as K from keras.models import Sequential, Model #pack_root = os.path.join(os.path.dirname(__file__), '..', '..') #sys.path.insert(1, pack_root) # extended Keras layers from keras_layers import * def sample_box(proposed_box, target_box,high_thresh, low_thresh,batch_size): """ Compute Box IOU and sample positive/negative boxes and targe boxes Input: - proposed_box: tensor, all of the proposed boxes from RPN model. - target_box: tensor, groudtruth box from input dataset. - high_thresh: float, iou threshold to pick positive samples. - low_thresh: float, iou threshold to pick negative samples. - batch_sizes: output sample size. Output: - packed_pos_samples: tensor, packed with pos_samples and neg_samples. - negative_samples: tensor. """ # NOTE: this function should goes to model_builder.py later. out_iou = BoxIoU()([proposed_box, target_box]) sample_idx = BoxSamplerPosNeg(high_thresh, low_thresh, batch_size)(out_iou) ## NOTE: pos_samples is packed with pos_samples and tar_samples. Do NOT unpack here, ## otherwise keras cannot recognize the tensor size. #packed_pos_samples = BoxSamplerPositive(high_thresh, batch_size)( # [proposed_box, target_box,out_iou]) #neg_samples = BoxSamplerNegative(low_thresh, batch_size)([proposed_box, out_iou]) model = Model(input=[proposed_box, target_box], output=[ sample_idx]) return model def test_box_sampling(): print('Test box sampling module ...') # build keras model graph in_box1 = Input(batch_shape=(1,3, 4)) # proposed box in_box2 = Input(batch_shape=(1,2, 4)) # target box model = sample_box(in_box1, in_box2, 0.1, 0.1, 2) # create testing input values in_box1_val = np.array([[20., 10., 5., 5.], [80., 10., 5., 20.], [80., 80., 10., 5.]]) in_box1_val = np.tile(in_box1_val, (1,1,1)) in_box2_val = np.array([[20., 10., 20., 10.], [80., 80., 10., 10.]]) in_box2_val = np.tile(in_box2_val, (1,1,1)) # run graph init = tf.compat.v1.initialize_all_variables() sess = tf.compat.v1.Session() sess.run(init) out_vals = sess.run(model.output, feed_dict={ model.input[0]: in_box1_val, model.input[1]: in_box2_val}) print('box sampling OK!') def test_boxiou(): print('Test Box IOU layer...') # build keras model graph in_box1 = Input(batch_shape=(1,3, 4)) # proposed box in_box2 = Input(batch_shape=(1,2, 4)) # target box out_iou = BoxIoU()([in_box1, in_box2]) model = Model(input=[in_box1, in_box2], output=out_iou) # create testing input values in_box1_val = np.array([[20., 10., 5., 5.], [80., 10., 5., 20.], [80., 80., 10., 5.]]) in_box1_val = np.tile(in_box1_val, (1,1,1)) in_box2_val = np.array([[20., 10., 20., 10.], [80., 80., 10., 10.]]) in_box2_val = np.tile(in_box2_val, (1,1,1)) # run graph init = tf.compat.v1.initialize_all_variables() sess = tf.compat.v1.Session() sess.run(init) out_iou_val = sess.run(model.output, feed_dict={ model.input[0]: in_box1_val, model.input[1]: in_box2_val}) print('Box IOU OK!') print(out_iou_val) def test_selectpos(): print('Test SelectPosMakeTheta layer...') in_sample_index = Input(batch_shape=(5,3)) # sample index in_box_coords = Input(batch_shape=(6,4)) out_theta = SelectPosMakeTheta(64,64)([in_sample_index, in_box_coords]) model = Model(input=[in_sample_index, in_box_coords], output = out_theta) # create some data sample_index = np.array([[1, 2, 1], [1, 0, 3], [1, 4, 2], [-1,1, -1], [-1,3, -1]]) box_coords = np.array([[0., 0., 12., 14.], [1., 2., 15., 15.], [1.5, 2., 4., 10.], [5., 8., 4., 10.], [5.5, 3., 6., 8.], [3., 4., 9., 9.]]) # run graph init = tf.compat.v1.initialize_all_variables() sess = tf.compat.v1.Session() sess.run(init) out_theta_val = sess.run(model.output, feed_dict={ model.input[0]: sample_index, model.input[1]: box_coords}) print('SelectPosMakeTheta works!') print(out_theta_val) # def test_tile(): # in_x = Input(batch_shape = (1,13,13,5)) # in_y = Input(batch_shape = (12,6)) # # out_x = TileTensorLike()([in_x, in_y]) # model = Model(input=[in_x,in_y], output=out_x) # # in_x_val = np.random.rand(1,13,13,5) # in_y_val = np.random.rand(12,6) # # # run graph # init = tf.compat.v1.initialize_all_variables() # sess = tf.compat.v1.Session() # sess.run(init) # # out_x_val = model([in_x_val, in_y_val]) # # print('Tile works!') # print(out_x_val.shape) def run(model, inputs): return model(inputs) if __name__ == '__main__': test_boxiou() #test_box_sampling() # test_selectpos() # test_tile()
6,558
34.074866
89
py
fl-analysis
fl-analysis-master/src/subspace/keras_ext/rproj_layers_util.py
#! /usr/bin/env python # Copyright (c) 2018 Uber Technologies, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import numpy as np import tensorflow as tf from tensorflow.keras.layers import Layer # from keras.backend.tensorflow_backend import _convert_string_dtype from tensorflow.keras import regularizers, constraints, initializers, activations from sklearn.random_projection import SparseRandomProjection as SRP from scipy.sparse import find import time import os import sys lab_root = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..') sys.path.insert(1, lab_root) # from ops.fwh import fast_walsh_hadamard as c_fast_walsh_hadamard ########### # # A quick fix for the following error # from keras.backend.tensorflow_backend import _convert_string_dtype # Keras 2.0.8 NameError: global name '_convert_string_dtype' is not defined # Also called in rproj_layers.py def _convert_string_dtype(dtype): if dtype == 'float16': return np.float16 if dtype == 'float32': return np.float32 elif dtype == 'float64': return np.float64 elif dtype == 'int16': return np.int16 elif dtype == 'int32': return np.int32 elif dtype == 'int64': return np.int64 elif dtype == 'uint8': return np.int8 elif dtype == 'uint16': return np.uint16 else: raise ValueError('Unsupported dtype:', dtype) ########### class ThetaPrime(object): def __init__(self, size): # self.var = tf.Variable(np.random.randn(size).astype('float32'), trainable=True, name="ThetaPrime") # self.var = tf.Variable(np.zeros((size), dtype='float32'), trainable=True, name="ThetaPrime") self.var_2d = tf.Variable(np.zeros((1, size), dtype='float32'), trainable=True, name="ThetaPrime") self.size = size # class ThetaPrimeLayer(Layer): # def __init__(self, size): # super(ThetaPrimeLayer, self).__init__() # self.size = size # self.var = None # self.var_2d = None # # def build(self, input_shape): # self.var = tf.Variable(np.random.randn(self.size).astype('float32'), trainable=True, name="ThetaPrime") # # self.var = tf.Variable(np.zeros((size), dtype='float32'), trainable=True, name="ThetaPrime") # self.var_2d = tf.expand_dims(self.var, 0) class MultiplyLayer(Layer): def __init__(self, var): super(MultiplyLayer, self).__init__() self.var = var def call(self, inputs, **kwargs): return inputs * self.var ########### # # OffsetCreator{Dense,Sparse,Fastfood}Proj # # These classes create offsets. Each layer is given a projector on # construction and uses it as needed to create weight/bias/etc # offsets. # ########### class OffsetCreateDenseProjExec(): def __init__(self, weight_basis, ww, shape, name): self.weight_basis = weight_basis self.ww = ww self.shape = shape self.name = name def __call__(self, *args, **kwargs): return tf.reshape(tf.matmul(self.weight_basis.var_2d, self.ww, name=self.name), self.shape) class OffsetCreatorDenseProj(object): def __init__(self): self.basis_matrices = [] def create_theta_offset(self, weight_basis, shape, dtype, name=None): assert isinstance(weight_basis, ThetaPrime), 'weight_basis should be a ThetaPrime' if isinstance(shape, tf.TensorShape): shape = shape.as_list() # Create projection matrix ww total_dim = 1 for dim in shape: assert dim is not None and dim > 0, 'dimensions must be known' total_dim *= dim seed = np.random.randint(10e8) ww_shape = (weight_basis.size, total_dim) ww_0 = np.random.normal(0.0, 1.0, size=ww_shape) ww = tf.Variable(ww_0, trainable=False, dtype=_convert_string_dtype(dtype), name='%s_ww' % name) return OffsetCreateDenseProjExec(weight_basis, ww, shape, name) # theta_offset = tf.reshape(tf.matmul(weight_basis.var_2d, ww, name="MatMully"), shape) # # self.basis_matrices.append(ww) # # return theta_offset, [ww] class OffsetCreateSparseProjExec(): def __init__(self, weight_basis, normalizer, ww, shape, name): self.weight_basis = weight_basis self.normalizer = normalizer self.ww = ww self.shape = shape self.name = name def __call__(self, *args, **kwargs): # Pre-multiply the normalizer by the low-rank parameter vector to avoid a sparse matrix - sparse matrix product, # which is not well-supported in Tensorflow (instead of theta_full = (P*N^-1)*theta_small where P*N^-1 is a row-normalized # projection matrix, do P*(N^-1*theta_small)). (N^-1*theta_small) can be written as simply an element-wise vector division. theta_small_norm = tf.divide(self.weight_basis.var_2d, self.normalizer) # theta_small_norm = self.weight_basis.var_2d # Compute delta from theta_0 using sparse projection # Note: sparse matrix must be first argument delta_theta_flat = tf.sparse.sparse_dense_matmul(self.ww, theta_small_norm, adjoint_a=True, adjoint_b=True) # Create theta theta_offset = tf.reshape(delta_theta_flat, self.shape) return theta_offset class OffsetCreatorSparseProj(object): def __init__(self): self.basis_matrices = [] self.basis_matrix_normalizers = [] def create_theta_offset(self, weight_basis, shape, dtype, name=None): assert isinstance(weight_basis, ThetaPrime), 'weight_basis should be a ThetaPrime' if isinstance(shape, tf.TensorShape): shape = shape.as_list() # Create projection matrix ww total_dim = 1 for dim in shape: assert dim is not None and dim > 0, 'dimensions must be known' total_dim *= dim # Generate location and relative scale of non zero elements M = SRP(weight_basis.size)._make_random_matrix(weight_basis.size, total_dim) fm = find(M) # Create sparse projection matrix from small vv to full theta space ww0 = tf.SparseTensor(indices=np.array([fm[0], fm[1]]).T, values=fm[2], dense_shape=[weight_basis.size, total_dim]) ww = tf.cast(ww0, _convert_string_dtype(dtype), name="SparseyCast") # Create diagonal normalization matrix that will be filled in when all layers are created, so that we can normalize each # row of the projection matrix (with length equal to the total number of parameters in the model) once we have all its elements. # This will hold the norms of the rows of the un-normalized projection matrix. norm = tf.sqrt(tf.sparse.reduce_sum(tf.square(ww))) # tf.sqrt(tf.add_n([tf.sparse_reduce_sum(tf.square(bm), 1) for bm in basis_matrices])) normalizer = tf.Variable(tf.tile([norm], [weight_basis.size]), trainable=False, name='%s_normalizer' % name) self.basis_matrices.append(ww) self.basis_matrix_normalizers.append(normalizer) return OffsetCreateSparseProjExec(weight_basis, normalizer, ww, shape, name) # # Pre-multiply the normalizer by the low-rank parameter vector to avoid a sparse matrix - sparse matrix product, # # which is not well-supported in Tensorflow (instead of theta_full = (P*N^-1)*theta_small where P*N^-1 is a row-normalized # # projection matrix, do P*(N^-1*theta_small)). (N^-1*theta_small) can be written as simply an element-wise vector division. # theta_small_norm = tf.divide(weight_basis.var_2d, normalizer) # # # Compute delta from theta_0 using sparse projection # # Note: sparse matrix must be first argument # delta_theta_flat = tf.sparse.sparse_dense_matmul(ww, theta_small_norm, adjoint_a=True, adjoint_b=True) # # # Create theta # theta_offset = tf.reshape(delta_theta_flat, shape) # # # ww0 = tf.sparse.to_dense(ww0, validate_indices=False, name="SparseyDense") # # # ww0 = tf.Variable(ww0, trainable=False) # # self.basis_matrices.append(ww) # self.basis_matrix_normalizers.append(normalizer) # # # Note: previous versions added only ww0 to _non_trainable_weights but skipped normalizer. Here we more correctly return both. # # return theta_offset, [ww0] # return theta_offset, [ww0, normalizer] class OffsetCreateFastfoodProjExec(): def __init__(self, weight_basis, shape): self.weight_basis = weight_basis self.ww = [] self.shape = shape def __call__(self, *args, **kwargs): proj_tensor = self.weight_basis.get_projected_tensor(self.shape) return proj_tensor class OffsetCreatorFastfoodProj(object): def __init__(self): pass def create_theta_offset(self, weight_basis, shape, dtype, name=None): # Get offset from theta_0 (offset is initially 0) assert isinstance(weight_basis, FastWalshHadamardProjector), 'weight_basis should be a FastWalshHadamardProjector instance' return OffsetCreateFastfoodProjExec(weight_basis, shape) ########### # # FastWalshHadamardProjector # # This class is instantiated once per network and manages the whole # projection from d to D. # ########### class FastWalshHadamardProjector(Layer): '''FastWalshHadamardProjector owns the d trainable parameters and generates the D projected parameters. FastWalshHadamardProjector must be instantiated before the model is built with d (known) and D (possibly hard to find before model is built). Thus some trickiness is necessary. ''' def __init__(self, dd, DD, **kwargs): super(FastWalshHadamardProjector, self).__init__(**kwargs) self.dd = dd self.DD = DD self.index = 0 self.d_vec = self.add_weight('d_vec', (self.dd,), initializer='zeros') self.project_vars, self.D_vec_exec = tf_fastfood_transform(self.d_vec, self.dd, self.DD) for vv in self.project_vars: self._non_trainable_weights.append(vv) def get_projected_tensor(self, shape): if isinstance(shape, tf.TensorShape): shape = shape.as_list() total_size = np.prod(shape) assert self.index + total_size <= self.DD, 'Overrun D vector; requested too many projected tensors' # ret = self.D_vec[self.index:self.index + total_size] retflat = tf.slice(self.D_vec_exec(), [self.index], [total_size]) # print 'D_vec is', self.D_vec, 'and ret is', retflat ret = tf.reshape(retflat, shape) # print ' ... now ret is', ret # print 'Sliced from %d to %d and reshaped to %s' % (self.index, total_size, repr(shape)) self.index += total_size return ret def check_usage(self): if self.index == self.DD: print('FastWalshHadamardProjector usage is perfect: %d out of %d dimensions used' % (self.index, self.DD)) else: raise Exception( 'FastWalshHadamardProjector usage is off: %d out of %d dimensions used' % (self.index, self.DD)) ########### # # Fast Walsh Hadamard functions # ########### def np_fast_walsh_hadamard(x, axis, normalize=True): '''Compute Fast Walsh-Hadamard transform in numpy. Args: x: tensor of shape (a0, a1, ... aN, L, b0, b1, ..., bN). L must be a power of two. axis: the "L" axis above, aka the axis over which to do the Hadamard transform. All other dimensions are left alone; data on those dimension do not interact. normalize: Whether to normalize the results such that applying the transform twice returns to the original input value. If True, return values are floats even if input was int. Returns: ret: transformed tensor with same shape as x Tests: Wikipedia case >>> x = np.array([1,0,1,0,0,1,1,0]) >>> np_fast_walsh_hadamard(x, 0, False) array([ 4, 2, 0, -2, 0, 2, 0, 2]) >>> np_fast_walsh_hadamard(np_fast_walsh_hadamard(x, 0), 0) array([ 1., 0., 1., 0., 0., 1., 1., 0.]) ''' orig_shape = x.shape assert axis >= 0 and axis < len(orig_shape), ( 'For a vector of shape %s, axis must be in [0, %d] but it is %d' % (orig_shape, len(orig_shape) - 1, axis)) h_dim = orig_shape[axis] h_dim_exp = int(round(np.log(h_dim) / np.log(2))) assert h_dim == 2 ** h_dim_exp, ( 'hadamard can only be computed over axis with size that is a power of two, but' ' chosen axis %d has size %d' % (axis, h_dim)) working_shape_pre = [int(np.prod(orig_shape[:axis]))] # prod of empty array is 1 :) working_shape_post = [int(np.prod(orig_shape[axis + 1:]))] # prod of empty array is 1 :) working_shape_mid = [2] * h_dim_exp working_shape = working_shape_pre + working_shape_mid + working_shape_post # print 'working_shape is', working_shape ret = x.reshape(working_shape) for ii in range(h_dim_exp): dim = ii + 1 arrs = np.split(ret, 2, axis=dim) assert len(arrs) == 2 ret = np.concatenate((arrs[0] + arrs[1], arrs[0] - arrs[1]), axis=dim) if normalize: ret = ret / np.sqrt(float(h_dim)) ret = ret.reshape(orig_shape) return ret def _fast_walsh_hadamard_one_step(xx, axis): aa, bb = tf.split(xx, 2, axis=axis) ret = tf.concat((aa + bb, aa - bb), axis=axis) return ret def _fast_walsh_hadamard_one_step_method2(xx, pre, d1, d2, d3, post): working_shape = tf.concat((pre, d1, d2, d3, post), axis=0) xx = tf.reshape(xx, working_shape) aa, bb = tf.split(xx, 2, axis=2) ret = tf.concat((aa + bb, aa - bb), axis=2) return ret def tf_fast_walsh_hadamard(in_x, axis, normalize=True, method='two'): '''Compute Fast Walsh-Hadamard transform in tensorflow. Args: x: tensor of shape (a0, a1, ... aN, L, b0, b1, ..., bN). L must be a power of two. axis: the "L" axis above, aka the axis over which to do the Hadamard transform. All other dimensions are left alone; data on those dimension do not interact. normalize: Whether to normalize the results such that applying the transform twice returns to the original input value. method: 'one': Original reshape to [2]*ll version 'two': Deal with TF "UnimplementedError: SliceOp : Unhandled input dimensions" error... 'c': Use C++ FWH Op. Returns: ret: transformed tensor with same shape as x. Returned tensor is always float even if input was int. Tests: >>> in_x = tf.placeholder('float32') >>> in_x <tf.Tensor 'Placeholder:0' shape=<unknown> dtype=float32> >>> sess = tf.InteractiveSession() Wikipedia case: >>> x = np.array([1,0,1,0,0,1,1,0]) >>> sess.run(tf_fast_walsh_hadamard(in_x, 0, False), feed_dict={in_x: x}) array([ 4., 2., 0., -2., 0., 2., 0., 2.], dtype=float32) >>> sess.run(tf_fast_walsh_hadamard(in_x, 0, False, method='two'), feed_dict={in_x: x}) array([ 4., 2., 0., -2., 0., 2., 0., 2.], dtype=float32) >>> sess.run(tf_fast_walsh_hadamard(tf_fast_walsh_hadamard(in_x, 0), 0), feed_dict={in_x: x}) array([ 1., 0., 1., 0., 0., 1., 1., 0.], dtype=float32) Verify equivalence with numpy approach: >>> np.random.seed(123) >>> x = np.random.uniform(0, 1, (3, 64, 5)) >>> h_np = np_fast_walsh_hadamard(x, 1) >>> h_tf_ = tf_fast_walsh_hadamard(in_x, 1) >>> h_tf2_ = tf_fast_walsh_hadamard(in_x, 1, method='two') >>> h_tf = sess.run(h_tf_, feed_dict={in_x: x}) >>> h_tf2 = sess.run(h_tf2_, feed_dict={in_x: x}) >>> x.shape (3, 64, 5) >>> h_np.shape (3, 64, 5) >>> h_tf.shape (3, 64, 5) >>> h_tf2.shape (3, 64, 5) >>> abs(h_np - h_tf).max() < 1e-6 True >>> abs(h_np - h_tf2).max() < 1e-6 True Try a few other shapes / axes >>> sess.run(tf_fast_walsh_hadamard(in_x, 0), feed_dict={in_x: x[0]}).shape == x[0].shape True >>> sess.run(tf_fast_walsh_hadamard(in_x, 1), feed_dict={in_x: x[:, :, 0]}).shape == x[:, :, 0].shape True >>> sess.run(tf_fast_walsh_hadamard(in_x, 0), feed_dict={in_x: x[0, :, 0]}).shape == x[0, :, 0].shape True ''' orig_shape = tf.shape(input=in_x) h_dim = orig_shape[axis] h_dim_exp = tf.cast(tf.round(tf.math.log(tf.cast(h_dim, dtype=tf.float32)) / np.log(2)), 'int32') assert_pow2 = tf.compat.v1.assert_equal(h_dim, tf.pow(2, h_dim_exp), message='hadamard can only be computed over axis with size that is a power of two') with tf.control_dependencies([assert_pow2]): working_shape_pre = tf.expand_dims(tf.reduce_prod(input_tensor=orig_shape[:axis]), axis=0) # reduce_prod of empty array is 1 working_shape_post = tf.expand_dims(tf.reduce_prod(input_tensor=orig_shape[axis + 1:]), axis=0) # reduce_prod of empty array is 1 ii = tf.constant(0) assert method in ('one', 'two', 'c') if method == 'one': # expand to working dims [pre, 2, 2, 2, ..., 2, 2, post] working_shape_mid = tf.tile([2], [h_dim_exp]) working_shape = tf.concat((working_shape_pre, working_shape_mid, working_shape_post), axis=0) ret_0 = tf.reshape(in_x, working_shape) cond = lambda i, x: tf.less(i, h_dim_exp) body = lambda i, x: (tf.add(i, 1), _fast_walsh_hadamard_one_step(x, i + 1)) ii_final, ret = tf.while_loop( cond=cond, body=body, loop_vars=[ii, ret_0], parallel_iterations=1 # check on this? ) elif method == 'two': # Never expand to high rank. Roll dimensions instead. This is # needed because backprop through the slice operator only # supports up to rank 7 tensors in TF 1.3 # [pre, 1, 2, h_dim/2, post] -> # [pre, 2, 2, h_dim/4, post] -> ... # [pre, h_dim/2, 2, 1, post] d1 = tf.expand_dims(tf.constant(1), axis=0) d2 = tf.expand_dims(tf.constant(2), axis=0) # always 2 d3 = tf.expand_dims(tf.math.floordiv(h_dim, 2), axis=0) working_shape_0 = tf.concat((working_shape_pre, d1, d2, d3, working_shape_post), axis=0) ret_0 = tf.reshape(in_x, working_shape_0) cond = lambda i, d1, d3, x: tf.less(i, h_dim_exp) body = lambda i, d1, d3, x: (tf.add(i, 1), d1 * 2, tf.math.floordiv(d3, 2), _fast_walsh_hadamard_one_step_method2(x, working_shape_pre, d1, d2, d3, working_shape_post)) ii_final, d1_final, d3_final, ret = tf.while_loop( cond=cond, body=body, loop_vars=[ii, d1, d3, ret_0], parallel_iterations=1 # check on this? ) else: # 'c' version # Only works for rank-1 (vector) input assert False, 'c version disabled for now' assert axis == 0, 'axis must be 0 for the c version of tf_fast_walsh_hadamard' assert normalize, 'for c version normalize must be True' assert_rank1 = tf.compat.v1.assert_rank(in_x, 1) with tf.control_dependencies([assert_rank1, assert_pow2]): ret = c_fast_walsh_hadamard(in_x) if normalize and method != 'c': ret = ret / tf.sqrt(tf.cast(h_dim, dtype=tf.float32)) ret = tf.reshape(ret, orig_shape) return ret def tf_fastfood_transform(in_x, dd, DD, use_get=False, use_C=False): '''Transform from d to D. Pads as necessary. For now: assume dd and DD are known in python.''' # Tensor d and D # assert_D_big = tf.assert_greater_equal(DD, dd, message='d cannot be larger than D') # with tf.control_dependencies([assert_D_big]): # ll = tf.cast(tf.round(tf.log(tf.to_float(DD)) / np.log(2)), 'int32') # LL = tf.pow(2, ll) # Python d and D assert isinstance(dd, int), 'd should be int' assert isinstance(DD, int), 'D should be int' assert DD >= dd, 'd cannot be larger than D' assert dd > 0, 'd and D must be positive' ll = int(np.ceil(np.log(DD) / np.log(2))) LL = 2 ** ll # Make vars init_BB = tf.cast(tf.random.uniform((LL,), 0, 2, dtype='int32'), dtype=tf.float32) * 2 - 1 init_Pi = tf.random.shuffle(tf.range(LL)) init_GG = tf.random.normal((LL,)) init_divisor = lambda GG: tf.sqrt(LL * tf.reduce_sum(input_tensor=tf.pow(GG.initialized_value(), 2))) if use_get: BB = tf.compat.v1.get_variable('B', initializer=init_BB, trainable=False) Pi = tf.compat.v1.get_variable('Pi', initializer=init_Pi, trainable=False) GG = tf.compat.v1.get_variable('G', initializer=init_GG, trainable=False) divisor = tf.compat.v1.get_variable('divisor', initializer=init_divisor(GG), trainable=False) else: BB = tf.Variable(init_BB, name='B', trainable=False) Pi = tf.Variable(init_Pi, name='Pi', trainable=False) GG = tf.Variable(init_GG, name='G', trainable=False) divisor = tf.Variable(init_divisor(GG), name='divisor', trainable=False) fastfood_vars = [BB, Pi, GG, divisor] ret = FastfoodExec(in_x, BB, Pi, GG, LL, ll, DD, dd, divisor, use_C) return fastfood_vars, ret class FastfoodExec(): def __init__(self, in_x, BB, Pi, GG, LL, ll, DD, dd, divisor, use_C): self.in_x = in_x self.BB = BB self.Pi = Pi self.GG = GG self.LL = LL self.ll = ll self.DD = DD self.dd = dd self.divisor = divisor self.use_C = use_C def __call__(self, *args, **kwargs): # Implement transform dd_pad = tf.pad(tensor=self.in_x, paddings=[[0, self.LL - self.dd]]) mul_1 = tf.multiply(self.BB, dd_pad) if self.use_C: mul_2 = tf_fast_walsh_hadamard(mul_1, 0, method='c', normalize=True) else: mul_2 = tf_fast_walsh_hadamard(mul_1, 0, method='two', normalize=False) mul_3 = tf.gather(mul_2, self.Pi) mul_4 = tf.multiply(mul_3, self.GG) if self.use_C: mul_5 = tf_fast_walsh_hadamard(mul_4, 0, method='c', normalize=True) print('\nWARNING: check normalization on this next line more carefully\n') ret = tf.divide(tf.slice(mul_5, [0], [self.DD]), self.divisor * np.sqrt(float(self.DD) / self.LL / self.ll)) else: mul_5 = tf_fast_walsh_hadamard(mul_4, 0, method='two', normalize=False) ret = tf.divide(tf.slice(mul_5, [0], [self.DD]), self.divisor * np.sqrt(float(self.DD) / self.LL)) return ret def test_timing(): N = 29 in_x = tf.compat.v1.placeholder('float32') sum_x = tf.reduce_sum(input_tensor=in_x) hh = tf_fast_walsh_hadamard(in_x, 1, True) sum_h = tf.reduce_sum(input_tensor=hh) sess = tf.compat.v1.InteractiveSession() for ll in range(1, N): L = 2 ** ll print('\n%d, H dim %d' % (ll, L)) x = np.random.uniform(0, 1, (1, L, 1)) if L < 33554432: start = time.time() np_fast_walsh_hadamard(x, 1) end = time.time() print(' np %14s elems: %16s' % ('%d' % L, '%f' % (end - start))) else: print(' np <skipped>') start = time.time() sess.run(sum_h, feed_dict={in_x: x}) end = time.time() print(' tf %14s elems: %16s' % ('%d' % L, '%f' % (end - start))) # Time each op the third time (ignore CUDA tuning time) then subtract data transfer time sess.run(sum_x, feed_dict={in_x: x}) sess.run(sum_x, feed_dict={in_x: x}) start = time.time() sess.run(sum_x, feed_dict={in_x: x}) elap_data = time.time() - start sess.run(sum_h, feed_dict={in_x: x}) sess.run(sum_h, feed_dict={in_x: x}) start = time.time() sess.run(sum_h, feed_dict={in_x: x}) elap_had = time.time() - start print(' tf just H %14s elems: %16s' % ('%d' % (L), '%f' % (elap_had - elap_data))) DD = max(5, int(np.ceil(L * .8))) dd = max(3, int(np.ceil(DD * .001))) if x.shape[1] >= dd: for use_C in [False, True]: st = '(C) ' if use_C else '(TF)' ffvars, xform = tf_fastfood_transform(in_x, dd, DD, use_C=use_C) sum_xf = tf.reduce_sum(input_tensor=xform) sess.run(tf.compat.v1.global_variables_initializer()) sess.run(sum_xf, feed_dict={in_x: x[0, :dd, 0]}) start = time.time() sess.run(sum_xf, feed_dict={in_x: x[0, :dd, 0]}) end = time.time() print(' tf %s fastf %14s elems: %16s' % (st, '%d' % L, '%f' % (end - start))) sess.run(sum_x, feed_dict={in_x: x[0, :dd, 0]}) sess.run(sum_x, feed_dict={in_x: x[0, :dd, 0]}) start = time.time() sess.run(sum_x, feed_dict={in_x: x[0, :dd, 0]}) elap_data = time.time() - start sess.run(sum_xf, feed_dict={in_x: x[0, :dd, 0]}) sess.run(sum_xf, feed_dict={in_x: x[0, :dd, 0]}) start = time.time() sess.run(sum_xf, feed_dict={in_x: x[0, :dd, 0]}) elap_had = time.time() - start print(' tf %s just fastf%14s elems: %16s' % (st, '%d' % (L), '%f' % (elap_had - elap_data))) else: print(' tf fastfood %14s elems: <skipped, too small>' % ('%d' % L)) if L > 32768: print(' <skipped large batch cases>') continue x2 = np.random.uniform(0, 1, (10, L, 100)) start = time.time() np_fast_walsh_hadamard(x2, 1) end = time.time() print(' np %14s elems: %16s' % ('%d' % (L * 1000), '%f' % (end - start))) start = time.time() sess.run(sum_h, feed_dict={in_x: x2}) end = time.time() print(' tf %14s elems: %16s' % ('%d' % (L * 1000), '%f' % (end - start))) # Time each op the third time (ignore CUDA tuning time) then subtract data transfer time sess.run(sum_x, feed_dict={in_x: x2}) sess.run(sum_x, feed_dict={in_x: x2}) start = time.time() sess.run(sum_x, feed_dict={in_x: x2}) elap_data = time.time() - start sess.run(sum_h, feed_dict={in_x: x2}) sess.run(sum_h, feed_dict={in_x: x2}) start = time.time() sess.run(sum_h, feed_dict={in_x: x2}) elap_had = time.time() - start print(' tf just H %14s elems: %16s' % ('%d' % (L * 1000), '%f' % (elap_had - elap_data))) print('The next dim, 2**29 ==', 2 ** 29, 'crashes with OOM on a TitanX') if __name__ == '__main__': import doctest doctest.testmod() test_timing()
28,439
36.970628
136
py
fl-analysis
fl-analysis-master/src/subspace/keras_ext/engine_training.py
# Copyright (c) 2018 Uber Technologies, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import tensorflow as tf from tensorflow.keras import Model from tensorflow.keras import Input from tensorflow.python.keras import backend from src.subspace.general.util import DotDict from .util import full_static_shape class ExtendedModel(Model): '''Slight extensions of the Keras model class.''' def __init__(self, input, output, name=None): super(ExtendedModel, self).__init__(input, output, name=name) self.v = DotDict() #self._vars = OrderedDict() self._trackable = set() self._extra_trainable_weights = [] self._extra_non_trainable_weights = [] def add_loss_reg(self): '''Adds losses for all attached regularizers''' # New Keras interface for regularization / etc layer losses losses = [] for loss in self.losses: if loss is None or loss == 0 or loss == 0.0: continue losses.append(loss) if len(losses) > 0: print('Regularizer and other internal losses from model: %d losses' % len(losses)) for loss in losses: print(' loss var=%s' % loss) self.add_trackable('loss_reg', tf.add_n(losses, name='loss_reg')) if 'loss_reg' not in self.v: print('Regularizer and other internal losses from model: none to add.') def add_var(self, name_or_var, var=None, trackable=False): '''Call like self.add_var('name', var) or self.add_var(var) to use var.name as name.''' if var is None: var = name_or_var name = var.name else: name = name_or_var self.v[name] = var if trackable: self._trackable.add(name) elif name in self._trackable: self._trackable.remove(name) def add_vars(self, names_or_vars, varss=None, trackable=False): '''Call with: - one list of vars - equal length lists of names and vars - dict of name: var pairs ''' if isinstance(names_or_vars, dict): for name,var in names_or_vars.items(): self.add_var(name, var, trackable=trackable) elif varss is None: for var in names_or_vars: self.add_var(var, var=None, trackable=trackable) else: assert len(names_or_vars) == len(varss), 'should be two lists of equal length' for name,var in zip(names_or_vars, varss): self.add_var(name, var, trackable=trackable) def add_trackable(self, name_or_var, var=None): self.add_var(name_or_var, var=var, trackable=True) def add_trackables(self, names_or_vars, varss=None): self.add_vars(names_or_vars, varss=varss, trackable=True) def del_var(self, name): '''Remove var if it exists''' if name in self.v: del self.v[name] if name in self._trackable: self._trackable.remove(name) @property def var_names(self): return list(self.v.keys()) @property def trackable_names(self): return [k for k in self.var_names if k in self._trackable] @property def vars(self): return self.get_vars() def get_vars(self, var_names=None): if var_names is None: var_names = self.var_names return [self.v[name] for name in var_names] @property def tensors(self): return self.get_tensors() def get_tensors(self, tensor_names=None): return [vv for vv in self.get_vars(var_names=tensor_names) if isinstance(vv, tf.Tensor)] def get_weights(self): return backend.batch_get_value(self.extra_trainable_weights) def set_weights(self, weights): # super(Model, self).set_weights(weights) assert len(weights) == 1, f"Can only have single weight for thetaprime! {weights}" thetaPrime = self.extra_trainable_weights[0] tuple = [(thetaPrime, weights[0])] backend.batch_set_value(tuple) def set_all_weights(self, weights): super(Model, self).set_weights(weights) @property def trackable_vars(self): return [self.v[k] for k in self.var_names if k in self._trackable] @property def trackable_dict(self): return self.get_tensor_dict(self.trackable_names) @property def update_dict(self): return {'update__%d' % ii: update for ii, update in enumerate(self.updates)} @property def trackable_and_update_dict(self): '''Returns a dict of all trackables and updates. Useful for training when you want to fetch all trackables and also ensure any updates (e.g. for rolling average BatchNormalization layers) are fetched. ''' ret = self.trackable_dict ret.update(self.update_dict) return ret def get_tensor_dict(self, tensor_names=None): if tensor_names is None: tensor_names = self.var_names filtered_names = [nn for nn in tensor_names if isinstance(self.v[nn], tf.Tensor)] return {kk:self.v[kk] for kk in filtered_names} def print_trainable_warnings(self, graph=None): '''Print warnings for any vars marked as trainable in the model but not graph, and vice versa. A common case where this occurs is in BatchNormalization layers, where internal variables are updated but not marked as trainable. ''' if graph is None: try: graph = tf.python.get_default_graph() except AttributeError: graph = tf.compat.v1.get_default_graph() def tag(name): if 'batchnormalization' in name and 'running' in name: # Keras 1.2.2 return ' . ' elif 'batch_normalization' in name and 'moving' in name: # Keras 2+ return ' . ' else: return '***' # Check which vars are trainable trainable_vars_from_graph = graph.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES) trainable_vars_from_model = self.trainable_weights in_graph_not_model = set(trainable_vars_from_graph).difference(set(trainable_vars_from_model)) if in_graph_not_model: print('Warning: the following vars are marked as trainable in the graph but not in model.trainable_weights (typical for BatchNormalization layers. "." if expected, "***" if not):') print('\n'.join([' %4s %s: %s' % (tag(vv.name), vv.name, vv) for vv in in_graph_not_model])) in_model_not_graph = set(trainable_vars_from_model).difference(set(trainable_vars_from_graph)) if in_model_not_graph: print('Warning: the following vars are in model.trainable_weights but not marked as trainable in the graph:') print('\n'.join([' %4s %s: %s' % (tag(vv.name), vv.name, vv) for vv in in_model_not_graph])) def add_extra_trainable_weight(self, weight): self._extra_trainable_weights.append(weight) @property def extra_trainable_weights(self): return self._extra_trainable_weights @property def trainable_weights(self): tw = super(ExtendedModel, self).trainable_weights # tw.extend(self.extra_trainable_weights) return tw def add_extra_non_trainable_weight(self, weight): self._extra_non_trainable_weights.append(weight) @property def extra_non_trainable_weights(self): return self._extra_non_trainable_weights @property def non_trainable_weights(self): ntw = super(ExtendedModel, self).non_trainable_weights ntw.extend(self.extra_non_trainable_weights) return ntw class LazyModel(ExtendedModel): '''Like ExtendedModel. But lazy and nestable. In general, we would like to be able to encapsulate functionality in larger containers than single layers. However, this is difficult because when using the standard Model (and ExtendedModel), you must know the input shape in order to make a placeholder Input layer. This is far less convenient than, say, just being able to call a Dense(123) layer on an input of unknown width and having the shape inferred at build time. LazyModel solves this problem by delaying the model build until the first time it is actually called on a real node in the graph, at which point an internal Input layer is constructed on the fly (and generally then not used). Known issues: - BatchNormalization layers fail in mode 0 (because they are called twice). Workaround: use in mode 1 or 2 or outside LazyModel. - Layer activity_regularizers do not work well, because then there end up being two copies (one on the activation resulting from the internal Input layer). Workaround: use activity_regularizers only outside the LazyModel. - There still ends up being a dangling tf.placeholder in the graph. See notes in exp/model_keras_hacking/ for failed more elegant solutions. ''' def __init__(self, model_function): self._model_function = model_function self._lazy_has_run = False # Delay rest of construction until first call def __call__(self, inputs, mask=None): if not self._lazy_has_run: input_was_list_tuple = isinstance(inputs, list) or isinstance(inputs, tuple) if input_was_list_tuple: input_list = inputs else: input_list = [inputs] # Make short-lived Input Layers for each x this was called with input_layers = [] warn_prefix = 'if_you_get_a_must_feed_placeholder_error_here_it_is_because_you_used_an_activity_regularizer._ask_jason' for inp in input_list: #ll = Input(tensor=inp, batch_shape=inp._keras_shape, dtype=inp.dtype, name='real_input_from__%s' % inp.name.replace('/','_').replace(':','_')) #ll = Input(batch_shape=inp.get_shape().as_list(), dtype=inp.dtype, name='%s.%s' % (warn_prefix, inp.name.replace('/','_').replace(':','_'))) shape = full_static_shape(inp) ll = Input(batch_shape=shape, dtype=inp.dtype, name='%s.%s' % (warn_prefix, inp.name.replace('/','_').replace(':','_'))) input_layers.append(ll) if not input_was_list_tuple: input_layers = input_layers[0] # Call function of inputs to get output tensors # And then initialize the entire model. outputs = self._model_function(input_layers) super(LazyModel, self).__init__(input_layers, outputs) self._lazy_has_run = True # Now actually call the model and return the outputs return super(LazyModel, self).__call__(inputs, mask=mask)
12,035
39.12
192
py